150 lines
4.4 KiB
C++
150 lines
4.4 KiB
C++
/*
|
|
* Copyright (c) Facebook, Inc. and its affiliates.
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#pragma once
|
|
|
|
#include <array>
|
|
#include <memory>
|
|
|
|
#include <folly/concurrency/AtomicSharedPtr.h>
|
|
#include <folly/concurrency/CacheLocality.h>
|
|
#include <folly/container/Enumerate.h>
|
|
#include <folly/synchronization/Hazptr.h>
|
|
|
|
namespace folly {
|
|
|
|
/**
|
|
* This class creates core-local caches for a given shared_ptr, to
|
|
* mitigate contention when acquiring/releasing it.
|
|
*
|
|
* It has the same thread-safety guarantees as shared_ptr: it is safe
|
|
* to concurrently call get(), but reset()s must be synchronized with
|
|
* reads and other resets().
|
|
*
|
|
* @author Giuseppe Ottaviano <ott@fb.com>
|
|
*/
|
|
template <class T, size_t kNumSlots = 64>
|
|
class CoreCachedSharedPtr {
|
|
public:
|
|
explicit CoreCachedSharedPtr(const std::shared_ptr<T>& p = nullptr) {
|
|
reset(p);
|
|
}
|
|
|
|
void reset(const std::shared_ptr<T>& p = nullptr) {
|
|
// Allocate each Holder in a different CoreRawAllocator stripe to
|
|
// prevent false sharing. Their control blocks will be adjacent
|
|
// thanks to allocate_shared().
|
|
for (auto slot : folly::enumerate(slots_)) {
|
|
auto alloc = getCoreAllocator<Holder, kNumSlots>(slot.index);
|
|
auto holder = std::allocate_shared<Holder>(alloc, p);
|
|
*slot = std::shared_ptr<T>(holder, p.get());
|
|
}
|
|
}
|
|
|
|
std::shared_ptr<T> get() const {
|
|
return slots_[AccessSpreader<>::current(kNumSlots)];
|
|
}
|
|
|
|
private:
|
|
using Holder = std::shared_ptr<T>;
|
|
|
|
template <class, size_t>
|
|
friend class CoreCachedWeakPtr;
|
|
|
|
std::array<std::shared_ptr<T>, kNumSlots> slots_;
|
|
};
|
|
|
|
template <class T, size_t kNumSlots = 64>
|
|
class CoreCachedWeakPtr {
|
|
public:
|
|
explicit CoreCachedWeakPtr(const CoreCachedSharedPtr<T, kNumSlots>& p) {
|
|
for (auto slot : folly::enumerate(slots_)) {
|
|
*slot = p.slots_[slot.index];
|
|
}
|
|
}
|
|
|
|
std::weak_ptr<T> get() const {
|
|
return slots_[AccessSpreader<>::current(kNumSlots)];
|
|
}
|
|
|
|
private:
|
|
std::array<std::weak_ptr<T>, kNumSlots> slots_;
|
|
};
|
|
|
|
/**
|
|
* This class creates core-local caches for a given shared_ptr, to
|
|
* mitigate contention when acquiring/releasing it.
|
|
*
|
|
* All methods are threadsafe. Hazard pointers are used to avoid
|
|
* use-after-free for concurrent reset() and get() operations.
|
|
*
|
|
* Concurrent reset()s are sequenced with respect to each other: the
|
|
* sharded shared_ptrs will always all be set to the same value.
|
|
* get()s will never see a newer pointer on one core, and an older
|
|
* pointer on another after a subsequent thread migration.
|
|
*/
|
|
template <class T, size_t kNumSlots = 64>
|
|
class AtomicCoreCachedSharedPtr {
|
|
public:
|
|
explicit AtomicCoreCachedSharedPtr(const std::shared_ptr<T>& p = nullptr) {
|
|
reset(p);
|
|
}
|
|
|
|
~AtomicCoreCachedSharedPtr() {
|
|
auto slots = slots_.load(std::memory_order_acquire);
|
|
// Delete of AtomicCoreCachedSharedPtr must be synchronized, no
|
|
// need for stlots->retire().
|
|
if (slots) {
|
|
delete slots;
|
|
}
|
|
}
|
|
|
|
void reset(const std::shared_ptr<T>& p = nullptr) {
|
|
auto newslots = folly::make_unique<Slots>();
|
|
// Allocate each Holder in a different CoreRawAllocator stripe to
|
|
// prevent false sharing. Their control blocks will be adjacent
|
|
// thanks to allocate_shared().
|
|
for (auto slot : folly::enumerate(newslots->slots_)) {
|
|
auto alloc = getCoreAllocator<Holder, kNumSlots>(slot.index);
|
|
auto holder = std::allocate_shared<Holder>(alloc, p);
|
|
*slot = std::shared_ptr<T>(holder, p.get());
|
|
}
|
|
|
|
auto oldslots = slots_.exchange(newslots.release());
|
|
if (oldslots) {
|
|
oldslots->retire();
|
|
}
|
|
}
|
|
|
|
std::shared_ptr<T> get() const {
|
|
folly::hazptr_local<1> hazptr;
|
|
auto slots = hazptr[0].get_protected(slots_);
|
|
if (!slots) {
|
|
return nullptr;
|
|
}
|
|
return (slots->slots_)[AccessSpreader<>::current(kNumSlots)];
|
|
}
|
|
|
|
private:
|
|
using Holder = std::shared_ptr<T>;
|
|
struct Slots : folly::hazptr_obj_base<Slots> {
|
|
std::array<std::shared_ptr<T>, kNumSlots> slots_;
|
|
};
|
|
std::atomic<Slots*> slots_{nullptr};
|
|
};
|
|
|
|
} // namespace folly
|