proxygen
MicroSpinLock.h
Go to the documentation of this file.
1 /*
2  * Copyright 2015-present Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /*
18  * N.B. You most likely do _not_ want to use MicroSpinLock or any
19  * other kind of spinlock. Consider MicroLock instead.
20  *
21  * In short, spinlocks in preemptive multi-tasking operating systems
22  * have serious problems and fast mutexes like std::mutex are almost
23  * certainly the better choice, because letting the OS scheduler put a
24  * thread to sleep is better for system responsiveness and throughput
25  * than wasting a timeslice repeatedly querying a lock held by a
26  * thread that's blocked, and you can't prevent userspace
27  * programs blocking.
28  *
29  * Spinlocks in an operating system kernel make much more sense than
30  * they do in userspace.
31  */
32 
33 #pragma once
34 
35 /*
36  * @author Keith Adams <kma@fb.com>
37  * @author Jordan DeLong <delong.j@fb.com>
38  */
39 
40 #include <array>
41 #include <atomic>
42 #include <cassert>
43 #include <cstdint>
44 #include <mutex>
45 #include <type_traits>
46 
47 #include <folly/Portability.h>
48 #include <folly/lang/Align.h>
50 
51 namespace folly {
52 
53 /*
54  * A really, *really* small spinlock for fine-grained locking of lots
55  * of teeny-tiny data.
56  *
57  * Zero initializing these is guaranteed to be as good as calling
58  * init(), since the free state is guaranteed to be all-bits zero.
59  *
60  * This class should be kept a POD, so we can used it in other packed
61  * structs (gcc does not allow __attribute__((__packed__)) on structs that
62  * contain non-POD data). This means avoid adding a constructor, or
63  * making some members private, etc.
64  */
65 struct MicroSpinLock {
66  enum { FREE = 0, LOCKED = 1 };
67  // lock_ can't be std::atomic<> to preserve POD-ness.
69 
70  // Initialize this MSL. It is unnecessary to call this if you
71  // zero-initialize the MicroSpinLock.
72  void init() noexcept {
73  payload()->store(FREE);
74  }
75 
76  bool try_lock() noexcept {
77  return cas(FREE, LOCKED);
78  }
79 
80  void lock() noexcept {
81  detail::Sleeper sleeper;
82  while (!try_lock()) {
83  do {
84  sleeper.wait();
85  } while (payload()->load(std::memory_order_relaxed) == LOCKED);
86  }
87  assert(payload()->load() == LOCKED);
88  }
89 
90  void unlock() noexcept {
91  assert(payload()->load() == LOCKED);
92  payload()->store(FREE, std::memory_order_release);
93  }
94 
95  private:
96  std::atomic<uint8_t>* payload() noexcept {
97  return reinterpret_cast<std::atomic<uint8_t>*>(&this->lock_);
98  }
99 
100  bool cas(uint8_t compare, uint8_t newVal) noexcept {
101  return std::atomic_compare_exchange_strong_explicit(
102  payload(),
103  &compare,
104  newVal,
105  std::memory_order_acquire,
106  std::memory_order_relaxed);
107  }
108 };
109 static_assert(
111  "MicroSpinLock must be kept a POD type.");
112 
114 
121 // TODO: generate it from configure (`getconf LEVEL1_DCACHE_LINESIZE`)
122 #define FOLLY_CACHE_LINE_SIZE 64
123 
124 template <class T, size_t N>
125 struct alignas(max_align_v) SpinLockArray {
126  T& operator[](size_t i) noexcept {
127  return data_[i].lock;
128  }
129 
130  const T& operator[](size_t i) const noexcept {
131  return data_[i].lock;
132  }
133 
134  constexpr size_t size() const noexcept {
135  return N;
136  }
137 
138  private:
139  struct PaddedSpinLock {
142  char padding[FOLLY_CACHE_LINE_SIZE - sizeof(T)];
143  };
144  static_assert(
146  "Invalid size of PaddedSpinLock");
147 
148  // Check if T can theoretically cross a cache line.
149  static_assert(
151  sizeof(T) <= max_align_v,
152  "T can cross cache line boundaries");
153 
154  char padding_[FOLLY_CACHE_LINE_SIZE];
155  std::array<PaddedSpinLock, N> data_;
156 };
157 
159 
160 typedef std::lock_guard<MicroSpinLock> MSLGuard;
161 
163 
164 } // namespace folly
std::atomic< uint8_t > * payload() noexcept
Definition: MicroSpinLock.h:96
constexpr size_t size() const noexcept
bool cas(uint8_t compare, uint8_t newVal) noexcept
std::lock_guard< MicroSpinLock > MSLGuard
folly::std T
—— Concurrent Priority Queue Implementation ——
Definition: AtomicBitSet.h:29
requires E e noexcept(noexcept(s.error(std::move(e))))
def load()
Definition: deadlock.py:441
T & operator[](size_t i) noexcept
constexpr std::size_t max_align_v
Definition: Align.h:90
void wait() noexcept
Definition: Sleeper.h:57
#define FOLLY_CACHE_LINE_SIZE
void unlock() noexcept
Definition: MicroSpinLock.h:90
void init() noexcept
Definition: MicroSpinLock.h:72
std::array< PaddedSpinLock, N > data_
void lock() noexcept
Definition: MicroSpinLock.h:80
static const char *const value
Definition: Conv.cpp:50
const
Definition: upload.py:398
const T & operator[](size_t i) const noexcept
bool try_lock() noexcept
Definition: MicroSpinLock.h:76
StringPiece data_