proxygen
IndexedMemPoolTest.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2014-present Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <folly/IndexedMemPool.h>
23 
24 #include <string>
25 #include <thread>
26 
27 using namespace folly;
28 using namespace folly::test;
29 using namespace testing;
30 
31 TEST(IndexedMemPool, unique_ptr) {
32  typedef IndexedMemPool<size_t> Pool;
33  Pool pool(100);
34 
35  for (size_t i = 0; i < 100000; ++i) {
36  auto ptr = pool.allocElem();
37  EXPECT_TRUE(!!ptr);
38  *ptr = i;
39  }
40 
41  std::vector<Pool::UniquePtr> leak;
42  while (true) {
43  auto ptr = pool.allocElem();
44  if (!ptr) {
45  // good, we finally ran out
46  break;
47  }
48  leak.emplace_back(std::move(ptr));
49  EXPECT_LT(leak.size(), 10000u);
50  }
51 }
52 
53 TEST(IndexedMemPool, no_starvation) {
54  const int count = 1000;
55  const uint32_t poolSize = 100;
56 
57  typedef DeterministicSchedule Sched;
58  Sched sched(Sched::uniform(0));
59 
61  Pool pool(poolSize);
62 
63  for (auto pass = 0; pass < 10; ++pass) {
64  int fd[2];
65  EXPECT_EQ(pipe(fd), 0);
66 
67  // makes sure we wait for available nodes, rather than fail allocIndex
68  sem_t allocSem;
69  sem_init(&allocSem, 0, poolSize);
70 
71  // this semaphore is only needed for deterministic replay, so that we
72  // always block in an Sched:: operation rather than in a read() syscall
73  sem_t readSem;
74  sem_init(&readSem, 0, 0);
75 
76  std::thread produce = Sched::thread([&]() {
77  for (auto i = 0; i < count; ++i) {
78  Sched::wait(&allocSem);
79  uint32_t idx = pool.allocIndex();
80  EXPECT_NE(idx, 0u);
81  EXPECT_LE(
82  idx, poolSize + (pool.NumLocalLists - 1) * pool.LocalListLimit);
83  pool[idx] = i;
84  EXPECT_EQ(write(fd[1], &idx, sizeof(idx)), sizeof(idx));
85  Sched::post(&readSem);
86  }
87  });
88 
89  std::thread consume = Sched::thread([&]() {
90  for (auto i = 0; i < count; ++i) {
91  uint32_t idx;
92  Sched::wait(&readSem);
93  EXPECT_EQ(read(fd[0], &idx, sizeof(idx)), sizeof(idx));
94  EXPECT_NE(idx, 0);
95  EXPECT_GE(idx, 1u);
96  EXPECT_LE(
97  idx, poolSize + (Pool::NumLocalLists - 1) * Pool::LocalListLimit);
98  EXPECT_EQ(pool[idx], i);
99  pool.recycleIndex(idx);
100  Sched::post(&allocSem);
101  }
102  });
103 
104  Sched::join(produce);
105  Sched::join(consume);
106  close(fd[0]);
107  close(fd[1]);
108  }
109 }
110 
111 TEST(IndexedMemPool, st_capacity) {
112  // only one local list => capacity is exact
113  typedef IndexedMemPool<int, 1, 32> Pool;
114  Pool pool(10);
115 
116  EXPECT_EQ(pool.capacity(), 10u);
117  EXPECT_EQ(Pool::maxIndexForCapacity(10), 10u);
118  for (auto i = 0; i < 10; ++i) {
119  EXPECT_NE(pool.allocIndex(), 0u);
120  }
121  EXPECT_EQ(pool.allocIndex(), 0u);
122 }
123 
124 TEST(IndexedMemPool, mt_capacity) {
125  typedef IndexedMemPool<int, 16, 32> Pool;
126  Pool pool(1000);
127 
128  std::thread threads[10];
129  for (auto i = 0; i < 10; ++i) {
130  threads[i] = std::thread([&]() {
131  for (auto j = 0; j < 100; ++j) {
132  uint32_t idx = pool.allocIndex();
133  EXPECT_NE(idx, 0u);
134  }
135  });
136  }
137 
138  for (auto i = 0; i < 10; ++i) {
139  threads[i].join();
140  }
141 
142  for (auto i = 0; i < 16 * 32; ++i) {
143  pool.allocIndex();
144  }
145  EXPECT_EQ(pool.allocIndex(), 0u);
146 }
147 
148 TEST(IndexedMemPool, locate_elem) {
149  IndexedMemPool<int> pool(1000);
150 
151  for (auto i = 0; i < 1000; ++i) {
152  auto idx = pool.allocIndex();
153  EXPECT_FALSE(idx == 0);
154  int* elem = &pool[idx];
155  EXPECT_TRUE(idx == pool.locateElem(elem));
156  }
157 
158  EXPECT_EQ(pool.locateElem(nullptr), 0);
159 }
160 
162  static FOLLY_TLS size_t count;
163 
164  size_t elem_;
165 
167  elem_ = 0;
168  ++count;
169  }
170 
171  NonTrivialStruct(std::unique_ptr<std::string>&& arg1, size_t arg2) {
172  elem_ = arg1->length() + arg2;
173  ++count;
174  }
175 
177  --count;
178  }
179 };
180 
181 FOLLY_TLS size_t NonTrivialStruct::count;
182 
183 TEST(IndexedMemPool, eager_recycle) {
185  Pool pool(100);
186 
188 
189  for (size_t i = 0; i < 10; ++i) {
190  {
191  std::unique_ptr<std::string> arg{new std::string{"abc"}};
192  auto ptr = pool.allocElem(std::move(arg), 100);
194  EXPECT_EQ(ptr->elem_, 103);
195  EXPECT_TRUE(!!ptr);
196  }
198  }
199 }
200 
201 TEST(IndexedMemPool, late_recycle) {
202  {
203  using Pool = IndexedMemPool<
205  8,
206  8,
207  std::atomic,
209  Pool pool(100);
210 
212 
213  for (size_t i = 0; i < 10; ++i) {
214  {
215  auto ptr = pool.allocElem();
217  EXPECT_TRUE(!!ptr);
218  ptr->elem_ = i;
219  }
221  }
222  }
224 }
225 
226 TEST(IndexedMemPool, no_data_races) {
227  const int count = 1000;
228  const uint32_t poolSize = 100;
229  const int nthreads = 10;
230 
231  using Pool = IndexedMemPool<int, 8, 8>;
232  Pool pool(poolSize);
233 
234  std::vector<std::thread> thr(nthreads);
235  for (auto i = 0; i < nthreads; ++i) {
236  thr[i] = std::thread([&]() {
237  for (auto j = 0; j < count; ++j) {
238  uint32_t idx = pool.allocIndex();
239  EXPECT_NE(idx, 0u);
240  EXPECT_LE(
241  idx, poolSize + (pool.NumLocalLists - 1) * pool.LocalListLimit);
242  pool[idx] = j;
243  pool.recycleIndex(idx);
244  }
245  });
246  }
247  for (auto& t : thr) {
248  t.join();
249  }
250 }
251 
252 std::atomic<int> cnum{0};
253 std::atomic<int> dnum{0};
254 
255 TEST(IndexedMemPool, construction_destruction) {
256  struct Foo {
257  Foo() {
258  cnum.fetch_add(1);
259  }
260  ~Foo() {
261  dnum.fetch_add(1);
262  }
263  };
264 
265  std::atomic<bool> start{false};
266  std::atomic<int> started{0};
267 
268  using Pool = IndexedMemPool<
269  Foo,
270  1,
271  1,
272  std::atomic,
274  int nthreads = 20;
275  int count = 1000;
276 
277  {
278  Pool pool(2);
279  std::vector<std::thread> thr(nthreads);
280  for (auto i = 0; i < nthreads; ++i) {
281  thr[i] = std::thread([&]() {
282  started.fetch_add(1);
283  while (!start.load()) {
284  ;
285  }
286  for (auto j = 0; j < count; ++j) {
287  uint32_t idx = pool.allocIndex();
288  if (idx != 0) {
289  pool.recycleIndex(idx);
290  }
291  }
292  });
293  }
294 
295  while (started.load() < nthreads) {
296  ;
297  }
298  start.store(true);
299 
300  for (auto& t : thr) {
301  t.join();
302  }
303  }
304 
305  CHECK_EQ(cnum.load(), dnum.load());
306 }
307 
310 struct MockTraits {
312 
314  instance = this;
315  }
316 
318  instance = nullptr;
319  }
320 
321  MOCK_METHOD2(onAllocate, void(std::string*, std::string));
322  MOCK_METHOD1(onRecycle, void(std::string*));
323 
324  struct Forwarder {
325  static void initialize(std::string* ptr) {
326  new (ptr) std::string();
327  }
328 
329  static void cleanup(std::string* ptr) {
330  using std::string;
331  ptr->~string();
332  }
333 
335  instance->onAllocate(ptr, s);
336  }
337 
338  static void onRecycle(std::string* ptr) {
339  instance->onRecycle(ptr);
340  }
341  };
342 };
343 
345 
346 using TraitsTestPool =
348 
350  MockTraits traits;
351  const std::string* elem = nullptr;
352  EXPECT_CALL(traits, onAllocate(_, _))
353  .WillOnce(Invoke([&](std::string* s, auto) {
354  EXPECT_FALSE(pool.isAllocated(pool.locateElem(s)));
355  elem = s;
356  }));
357  std::string* ptr = pool.allocElem("foo").release();
358  EXPECT_EQ(ptr, elem);
359 
360  elem = nullptr;
361  EXPECT_CALL(traits, onRecycle(_)).WillOnce(Invoke([&](std::string* s) {
362  EXPECT_FALSE(pool.isAllocated(pool.locateElem(s)));
363  elem = s;
364  }));
365  pool.recycleIndex(pool.locateElem(ptr));
366  EXPECT_EQ(ptr, elem);
367 }
368 
369 // Test that Traits is used when both local and global lists are empty.
370 TEST(IndexedMemPool, use_traits_empty) {
371  TraitsTestPool pool(10);
372  testTraits(pool);
373 }
374 
375 // Test that Traits is used when allocating from a local list.
376 TEST(IndexedMemPool, use_traits_local_list) {
377  TraitsTestPool pool(10);
378  MockTraits traits;
379  EXPECT_CALL(traits, onAllocate(_, _));
380  // Allocate and immediately recycle an element to populate the local list.
381  pool.allocElem("");
382  testTraits(pool);
383 }
384 
385 // Test that Traits is used when allocating from a global list.
386 TEST(IndexedMemPool, use_traits_global_list) {
387  TraitsTestPool pool(10);
388  MockTraits traits;
389  EXPECT_CALL(traits, onAllocate(_, _)).Times(2);
390  auto global = pool.allocElem("");
391  // Allocate and immediately recycle an element to fill the local list.
392  pool.allocElem("");
393  // Recycle an element to populate the global list.
394  global.reset();
395  testTraits(pool);
396 }
397 
398 // Test that IndexedMemPool works with incomplete element types.
399 struct IncompleteTestElement;
#define EXPECT_LE(val1, val2)
Definition: gtest.h:1928
void * ptr
NonTrivialStruct(std::unique_ptr< std::string > &&arg1, size_t arg2)
void write(const T &in, folly::io::Appender &appender)
Definition: Types-inl.h:112
#define EXPECT_EQ(val1, val2)
Definition: gtest.h:1922
constexpr detail::Map< Move > move
Definition: Base-inl.h:2567
std::atomic< int > dnum
—— Concurrent Priority Queue Implementation ——
Definition: AtomicBitSet.h:29
#define EXPECT_GE(val1, val2)
Definition: gtest.h:1932
std::atomic< int > cnum
uint32_t locateElem(const T *elem) const
std::vector< std::thread::id > threads
static int nthreads
void testTraits(TraitsTestPool &pool)
static void onAllocate(std::string *ptr, std::string s)
static void cleanup(std::string *ptr)
PolymorphicAction< internal::InvokeAction< FunctionImpl > > Invoke(FunctionImpl function_impl)
size_t read(T &out, folly::io::Cursor &cursor)
Definition: Types-inl.h:258
bool isAllocated(uint32_t idx) const
Returns true iff idx has been alloc()ed and not recycleIndex()ed.
static MockTraits * instance
bool wait(Waiter *waiter, bool shouldSleep, Waiter *&next)
UniquePtr allocElem(Args &&...args)
uint32_t allocIndex(Args &&...args)
auto start
static void initialize(std::string *ptr)
TEST(ProgramOptionsTest, Errors)
int * count
#define EXPECT_TRUE(condition)
Definition: gtest.h:1859
static void onRecycle(std::string *ptr)
#define MOCK_METHOD1(m,...)
const char * string
Definition: Conv.cpp:212
#define join
#define EXPECT_NE(val1, val2)
Definition: gtest.h:1926
static set< string > s
void recycleIndex(uint32_t idx)
Gives up ownership previously granted by alloc()
#define EXPECT_CALL(obj, call)
const internal::AnythingMatcher _
#define EXPECT_FALSE(condition)
Definition: gtest.h:1862
#define EXPECT_LT(val1, val2)
Definition: gtest.h:1930
#define MOCK_METHOD2(m,...)
int close(NetworkSocket s)
Definition: NetOps.cpp:90
void pipe(CPUExecutor cpu, IOExecutor io)
static FOLLY_TLS size_t count