40 TEST(AtomicIntrusiveLinkedList, Basic) {
81 TEST(AtomicIntrusiveLinkedList, ReverseSweep) {
87 size_t next_expected_id = 3;
89 auto const expected = next_expected_id--;
99 TEST(AtomicIntrusiveLinkedList, Move) {
130 TEST(AtomicIntrusiveLinkedList, Stress) {
132 static constexpr
size_t kNumElements = 100000;
134 std::vector<TestIntrusiveObject> elements;
135 for (
size_t i = 0;
i < kNumThreads * kNumElements; ++
i) {
136 elements.emplace_back(
i);
141 std::vector<std::thread>
threads;
142 for (
size_t threadId = 0; threadId <
kNumThreads; ++threadId) {
143 threads.emplace_back([threadId, &list, &elements] {
144 for (
size_t id = 0;
id < kNumElements; ++
id) {
145 list.
insertHead(&elements[threadId + kNumThreads *
id]);
150 std::vector<size_t> ids;
153 while (ids.size() < kNumThreads * kNumElements) {
155 ids.push_back(current->
id());
157 if (prev && prev->id() % kNumThreads == current->
id() %
kNumThreads) {
165 std::sort(ids.begin(), ids.end());
167 for (
size_t i = 0;
i < kNumThreads * kNumElements; ++
i) {
171 for (
auto& thread : threads) {
190 TEST(AtomicLinkedList, Basic) {
191 constexpr
size_t kNumElements = 10;
196 std::shared_ptr<void>
ptr = std::make_shared<int>(42);
198 for (
size_t id = 0;
id < kNumElements; ++
id) {
199 list.insertHead({
id, ptr});
207 EXPECT_EQ(1 + kNumElements - counter, ptr.use_count());
TestIntrusiveObject(size_t id__)
void reverseSweep(F &&func)
#define EXPECT_EQ(val1, val2)
constexpr detail::Map< Move > move
TestObject(size_t id__, const std::shared_ptr< void > &ptr)
static size_t const kNumThreads
std::vector< std::thread::id > threads
TEST(AtomicIntrusiveLinkedList, Basic)
Encoder::MutableCompressedList list
std::shared_ptr< void > ptr_
#define EXPECT_TRUE(condition)
std::atomic< int > counter
#define EXPECT_FALSE(condition)
folly::AtomicIntrusiveLinkedListHook< TestIntrusiveObject > hook_