26 #include <type_traits> 42 template <
typename Atomic>
46 std::memory_order
order) {
47 using Integer = decltype(atomic.load());
49 return (atomic.fetch_or(mask, order) & mask);
52 template <
typename Atomic>
56 std::memory_order
order) {
57 using Integer = decltype(atomic.load());
59 return (atomic.fetch_and(~mask, order) & mask);
68 template <
typename Integer>
69 constexpr
auto is_atomic<std::atomic<Integer>> =
true;
75 template <
typename Integer>
77 std::atomic<Integer>& atomic,
79 std::memory_order
order) {
80 static_assert(
alignof(std::atomic<Integer>) ==
alignof(
Integer),
"");
81 static_assert(
sizeof(std::atomic<Integer>) ==
sizeof(
Integer),
"");
82 assert(atomic.is_lock_free());
85 return _interlockedbittestandset(
86 reinterpret_cast<volatile long*>(&atomic), static_cast<long>(bit));
87 }
else if (
sizeof(
Integer) == 8) {
88 return _interlockedbittestandset64(
89 reinterpret_cast<volatile long long*>(&atomic),
90 static_cast<long long>(bit));
97 template <
typename Atomic>
100 static_assert(!std::is_same<Atomic, std::atomic<std::uint32_t>>{},
"");
101 static_assert(!std::is_same<Atomic, std::atomic<std::uint64_t>>{},
"");
105 template <
typename Integer>
107 std::atomic<Integer>& atomic,
109 std::memory_order order) {
110 static_assert(
alignof(std::atomic<Integer>) ==
alignof(
Integer),
"");
111 static_assert(
sizeof(std::atomic<Integer>) ==
sizeof(
Integer),
"");
112 assert(atomic.is_lock_free());
115 return _interlockedbittestandreset(
116 reinterpret_cast<volatile long*>(&atomic), static_cast<long>(bit));
117 }
else if (
sizeof(
Integer) == 8) {
118 return _interlockedbittestandreset64(
119 reinterpret_cast<volatile long long*>(&atomic),
120 static_cast<long long>(bit));
127 template <
typename Atomic>
130 static_assert(!std::is_same<Atomic, std::atomic<std::uint32_t>>{},
"");
131 static_assert(!std::is_same<Atomic, std::atomic<std::uint64_t>>{},
"");
137 template <
typename Integer>
139 std::atomic<Integer>& atomic,
141 std::memory_order order) {
142 auto previous =
false;
146 asm volatile(
"lock; btsw %1, (%2); setc %0" 148 :
"ri"(static_cast<std::uint16_t>(bit)),
"r"(pointer)
149 :
"memory",
"flags");
150 }
else if (
sizeof(
Integer) == 4) {
152 asm volatile(
"lock; btsl %1, (%2); setc %0" 154 :
"ri"(static_cast<std::uint32_t>(bit)),
"r"(pointer)
155 :
"memory",
"flags");
156 }
else if (
sizeof(
Integer) == 8) {
158 asm volatile(
"lock; btsq %1, (%2); setc %0" 160 :
"ri"(static_cast<std::uint64_t>(bit)),
"r"(pointer)
161 :
"memory",
"flags");
170 template <
typename Atomic>
173 static_assert(!is_atomic<Atomic>,
"");
177 template <
typename Integer>
179 std::atomic<Integer>& atomic,
181 std::memory_order order) {
182 auto previous =
false;
186 asm volatile(
"lock; btrw %1, (%2); setc %0" 188 :
"ri"(static_cast<std::uint16_t>(bit)),
"r"(pointer)
189 :
"memory",
"flags");
190 }
else if (
sizeof(
Integer) == 4) {
192 asm volatile(
"lock; btrl %1, (%2); setc %0" 194 :
"ri"(static_cast<std::uint32_t>(bit)),
"r"(pointer)
195 :
"memory",
"flags");
196 }
else if (
sizeof(
Integer) == 8) {
198 asm volatile(
"lock; btrq %1, (%2); setc %0" 200 :
"ri"(static_cast<std::uint64_t>(bit)),
"r"(pointer)
201 :
"memory",
"flags");
210 template <
typename Atomic>
214 std::memory_order order) {
215 static_assert(!is_atomic<Atomic>,
"");
223 template <
typename Atomic>
225 throw std::logic_error{
"Incorrect function called"};
227 template <
typename Atomic>
229 throw std::logic_error{
"Incorrect function called"};
236 template <
typename Atomic>
238 using Integer = decltype(atomic.load());
239 static_assert(std::is_unsigned<Integer>{},
"");
240 static_assert(!std::is_const<Atomic>{},
"");
241 assert(bit < (
sizeof(
Integer) * 8));
252 template <
typename Atomic>
254 using Integer = decltype(atomic.load());
255 static_assert(std::is_unsigned<Integer>{},
"");
256 static_assert(!std::is_const<Atomic>{},
"");
257 assert(bit < (
sizeof(
Integer) * 8));
bool atomic_fetch_set_default(Atomic &atomic, std::size_t bit, std::memory_order order)
bool atomic_fetch_reset_default(Atomic &atomic, std::size_t bit, std::memory_order order)
—— Concurrent Priority Queue Implementation ——
requires E e noexcept(noexcept(s.error(std::move(e))))
bool atomic_fetch_reset_x86(Atomic &, std::size_t, std::memory_order) noexcept
bool atomic_fetch_set(Atomic &atomic, std::size_t bit, std::memory_order mo)
bool atomic_fetch_reset(Atomic &atomic, std::size_t bit, std::memory_order mo)
constexpr bool kIsArchAmd64
bool atomic_fetch_set_x86(Atomic &, std::size_t, std::memory_order) noexcept