Library of Assembled Shared Sources
atomic.h
Go to the documentation of this file.
1/** @file
2 * @author Bram de Greve (bram@cocamware.com)
3 * @author Tom De Muer (tom@cocamware.com)
4 *
5 * *** BEGIN LICENSE INFORMATION ***
6 *
7 * The contents of this file are subject to the Common Public Attribution License
8 * Version 1.0 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://lass.sourceforge.net/cpal-license. The License is based on the
11 * Mozilla Public License Version 1.1 but Sections 14 and 15 have been added to cover
12 * use of software over a computer network and provide for limited attribution for
13 * the Original Developer. In addition, Exhibit A has been modified to be consistent
14 * with Exhibit B.
15 *
16 * Software distributed under the License is distributed on an "AS IS" basis, WITHOUT
17 * WARRANTY OF ANY KIND, either express or implied. See the License for the specific
18 * language governing rights and limitations under the License.
19 *
20 * The Original Code is LASS - Library of Assembled Shared Sources.
21 *
22 * The Initial Developer of the Original Code is Bram de Greve and Tom De Muer.
23 * The Original Developer is the Initial Developer.
24 *
25 * All portions of the code written by the Initial Developer are:
26 * Copyright (C) 2004-2024 the Initial Developer.
27 * All Rights Reserved.
28 *
29 * Contributor(s):
30 *
31 * Alternatively, the contents of this file may be used under the terms of the
32 * GNU General Public License Version 2 or later (the GPL), in which case the
33 * provisions of GPL are applicable instead of those above. If you wish to allow use
34 * of your version of this file only under the terms of the GPL and not to allow
35 * others to use your version of this file under the CPAL, indicate your decision by
36 * deleting the provisions above and replace them with the notice and other
37 * provisions required by the GPL License. If you do not delete the provisions above,
38 * a recipient may use your version of this file under either the CPAL or the GPL.
39 *
40 * *** END LICENSE INFORMATION ***
41 */
42
43#ifndef LASS_GUARDIAN_OF_INCLUSION_UTIL_ATOMIC_H
44#define LASS_GUARDIAN_OF_INCLUSION_UTIL_ATOMIC_H
45
46#include "util_common.h"
47#if !defined(LASS_PROCESSOR_ARCHITECTURE_ARM64)
48# include "impl/atomic_impl.h"
49#endif
50
51#include <atomic>
52
53#ifdef LASS_PROCESSOR_ARCHITECTURE_x86
54# if LASS_COMPILER_TYPE == LASS_COMPILER_TYPE_MSVC
55# include <intrin.h>
56# define LASS_SPIN_PAUSE _mm_pause()
57# else
58# define LASS_SPIN_PAUSE __builtin_ia32_pause()
59# endif
60#else
61# define LASS_SPIN_PAUSE
62#endif
63
64#ifdef __cpp_lib_hardware_interference_size
65#define LASS_LOCK_FREE_ALIGNMENT std::hardware_destructive_interference_size
66#else
67#define LASS_LOCK_FREE_ALIGNMENT 64
68#endif
69
70/** @defgroup atomic
71 * @brief atomic operations and tools for lock-free algorithms
72 */
73
74namespace lass
75{
76namespace util
77{
78
79#if !defined(LASS_PROCESSOR_ARCHITECTURE_ARM64)
80
81/** @ingroup atomic
82 *
83 * Performs the following pseudocode in an atomic way (no other threads can intervene):
84 * @code
85 * if (dest != expectedValue) return false;
86 * dest = newValue;
87 * return true;
88 * @endcode
89 *
90 * @deprecated Use std::atomic
91 */
92template <typename T>
93[[deprecated("Use std::atomic")]]
94bool atomicCompareAndSwap(volatile T& dest, T expectedValue, T newValue)
95{
96 return impl::AtomicOperations< sizeof(T) >::compareAndSwap(dest, expectedValue, newValue);
97}
98
99
100
101/** @ingroup atomic
102 *
103 * Performs the following pseudocode in an atomic way (no other threads can intervene):
104 * @code
105 * if (dest1 != expectedValue1 || *(T2*)(&dest1 + sizeof(T1)) != expected2) return false;
106 * dest1 = new1;
107 * dest2 = new2;
108 * return true;
109 * @endcode
110 *
111 * Does not exist for 64-bit types (there's no 128-bit CAS).
112 *
113 * @deprecated Use std::atomic
114 */
115template <typename T1, typename T2>
116[[deprecated("Use std::atomic")]]
117bool atomicCompareAndSwap(volatile T1& dest1, T1 expected1, T2 expected2, T1 new1, T2 new2)
118{
119 LASS_META_ASSERT(sizeof(T1) == sizeof(T2), T1_and_T2_must_be_of_same_size);
120 return impl::AtomicOperations< sizeof(T1) >::compareAndSwap(
121 dest1, expected1, expected2, new1, new2);
122}
123
124
125
126/** @ingroup atomic
127 *
128 * Performs the following pseudocode in an atomic way (no other threads can intervene):
129 * @code
130 * ++value;
131 * @endcode
132 *
133 * @deprecated Use std::atomic
134 */
135template <typename T>
136[[deprecated("Use std::atomic")]]
137void atomicIncrement(volatile T& value)
138{
139 impl::AtomicOperations< sizeof(T) >::increment(value);
140}
141
142
143
144/** @ingroup atomic
145 *
146 * Performs the following pseudocode in an atomic way (no other threads can intervene):
147 * @code
148 * --value;
149 * @endcode
150 *
151 * @deprecated Use std::atomic
152 */
153template <typename T>
154[[deprecated("Use std::atomic")]]
155void atomicDecrement(volatile T& value)
156{
157 impl::AtomicOperations< sizeof(T) >::decrement(value);
158}
159
160
161
162/** @ingroup atomic
163 *
164 * @deprecated Use std::atomic
165 */
166template <typename T>
167[[deprecated("Use std::atomic")]]
168void atomicLock(volatile T& semaphore)
169{
170 T current;
171 do
172 {
173 current = semaphore;
174 }
175 while (current <= 0 || !atomicCompareAndSwap(semaphore, current, current - 1));
176}
177
178
179
180/** @ingroup atomic
181 *
182 * @deprecated Use std::atomic
183 */
184template <typename T>
185[[deprecated("Use std::atomic")]]
186bool atomicTryLock(volatile T& semaphore)
187{
188 T current;
189 do
190 {
191 current = semaphore;
192 if (current <= 0)
193 {
194 return false;
195 }
196 }
197 while (!atomicCompareAndSwap(semaphore, current, current - 1));
198 return true;
199}
200
201
202
203/** @ingroup atomic
204 *
205 * @deprecated Use std::atomic
206 */
207template <typename T>
208[[deprecated("Use std::atomic")]]
209void atomicUnlock(volatile T& semaphore)
210{
211 atomicIncrement(semaphore);
212}
213
214#endif
215
216
217/** @ingroup atomic
218 */
219template <typename T>
220void atomicLock(std::atomic<T>& semaphore)
221{
222 // the load on failure (or when current==0) may be relaxed because
223 // memory order is not about the atomic variable itself. It's about
224 // loads and stores on _other_ locations, and there are none of these
225 // inside the loop. https://stackoverflow.com/q/58635264
226 T current = semaphore.load(std::memory_order_acquire);
227 do
228 {
229 while (!current)
230 {
231 LASS_SPIN_PAUSE;
232 current = semaphore.load(std::memory_order_relaxed);
233 }
234 LASS_ASSERT(current >= 0);
235 }
236 while (!semaphore.compare_exchange_weak(current, current - 1,
237 std::memory_order_release, std::memory_order_relaxed));
238}
239
240
241
242/** @ingroup atomic
243 */
244template <typename T>
245bool atomicTryLock(std::atomic<T>& semaphore)
246{
247 // the load on failure (or when current==0) may be relaxed because
248 // memory order is not about the atomic variable itself. It's about
249 // loads and stores on _other_ locations, and there are none of these
250 // inside the loop. https://stackoverflow.com/q/58635264
251 T current = semaphore.load(std::memory_order_acquire);
252 do
253 {
254 LASS_ASSERT(current >= 0);
255 if (!current)
256 {
257 return false;
258 }
259 }
260 while (!semaphore.compare_exchange_weak(current, current - 1,
261 std::memory_order_release, std::memory_order_relaxed));
262 return true;
263}
264
265
266
267/** @ingroup atomic
268 */
269template <typename T>
270void atomicUnlock(std::atomic<T>& semaphore)
271{
272 semaphore.fetch_add(1, std::memory_order_acq_rel);
273}
274
275
276#if LASS_ADDRESS_SIZE == 64
277# if LASS_HAVE_STD_ATOMIC_DWCAS_LOCK_FREE
278 // std::atomic is lock free for 16 byte PODs, and will use lock cmpxchg16b
279 // (or _InterlockedCompareExchange128) for it's compare_exchange (DWCAS).
280 // This requires TaggedPtr to be 16-byte aligned
281# define LASS_TAGGED_PTR_ALIGN alignas(16)
282# elif LASS_ACTUAL_ADDRESS_SIZE == 48
283 // std::atomic is *not* lock free for 16 byte PODs, because it will not use
284 // lock cmpxchg16b, even though it may commonly exists for this architecture.
285 // This is to remain ABI compatible with early 64-bit AMD processors that
286 // lacked this instruction.
287 // Therefore, we need to use the good old pointers-are-actually-only-48-bits
288 // trick to pack a pointer and a 16-bit tag in the space of one 8 byte pointer,
289 // for which std::atomic will be lock free.
290# define LASS_TAGGED_PTR_64_PACKED 1
291# define LASS_TAGGED_PTR_ALIGN alignas(8)
292# else
293# error not implemented yet
294# endif
295#else
296# define LASS_TAGGED_PTR_ALIGN alignas(8)
297#endif
298
299/** Pointer with a tag for ABA salvation
300 * @ingroup atomic
301 * Some lock-free algorithms suffer from the ABA problem when acting on pointers.
302 * This can be solved (read: be made very unlikely) by adding a tag to the pointer.
303 */
304template <typename T>
305class LASS_TAGGED_PTR_ALIGN TaggedPtr
306{
307#if LASS_TAGGED_PTR_64_PACKED
308
309public:
310 typedef num::Tuint16 TTag;
311 TaggedPtr() = default;
312 TaggedPtr(T * ptr, TTag tag): bits_((reinterpret_cast<num::Tint64>(ptr) << 16) | (tag & 0xffff)) {}
313 T* get() const
314 {
315# if defined(LASS_HAVE_INLINE_ASSEMBLY_GCC) && defined(LASS_PROCESSOR_ARCHITECTURE_x86)
316 T* ptr;
317 __asm__ ("sarq $16, %0;" : "=q"(ptr) : "0"(bits_) : "cc");
318 return ptr;
319# elif LASS_COMPILER_TYPE == LASS_COMPILER_TYPE_MSVC
320 return reinterpret_cast<T*>(__ll_rshift(bits_, 16));
321# else
322 return ((bits_ & 0xa000000000000000) == 0) ?
323 reinterpret_cast<T*>(bits_ >> 16) :
324 reinterpret_cast<T*>((bits_ >> 16) | 0xffff000000000000);
325# endif
326 }
327 TTag tag() const { return static_cast<TTag>(bits_ & 0xffff); }
328 TTag nextTag() const { return static_cast<TTag>(static_cast<size_t>(tag() + 1) & 0xffff); }
329 bool operator==(const TaggedPtr& other) const { return bits_ == other.bits_; }
330private:
331 num::Tint64 bits_ = 0;
332
333#else
334
335public:
336 typedef num::TuintPtr TTag;
337 TaggedPtr() = default;
338 TaggedPtr(T* ptr, TTag tag) : ptr_(ptr), tag_(tag) {}
339 T* get() const { return ptr_; }
340 TTag tag() const { return tag_; }
341 TTag nextTag() const { return tag_ + 1; }
342 bool operator==(const TaggedPtr& other) const { return ptr_ == other.ptr_ && tag_ == other.tag_; }
343private:
344 T* ptr_ = nullptr;
345 TTag tag_ = 0;
346
347#endif
348
349public:
350 T* operator->() const { LASS_ASSERT(get()); return get(); }
351 bool operator!() const { return get() == nullptr; }
352 explicit operator bool() const { return get() != nullptr; }
353 bool operator!=(const TaggedPtr& other) const { return !(*this == other); }
354};
355
356
357}
358}
359
360#endif
361
362// EOF
void atomicLock(volatile T &semaphore)
Definition atomic.h:168
bool atomicCompareAndSwap(volatile T &dest, T expectedValue, T newValue)
Performs the following pseudocode in an atomic way (no other threads can intervene):
Definition atomic.h:94
void atomicIncrement(volatile T &value)
Performs the following pseudocode in an atomic way (no other threads can intervene):
Definition atomic.h:137
bool atomicTryLock(volatile T &semaphore)
Definition atomic.h:186
void atomicUnlock(volatile T &semaphore)
Definition atomic.h:209
void atomicDecrement(volatile T &value)
Performs the following pseudocode in an atomic way (no other threads can intervene):
Definition atomic.h:155
#define LASS_META_ASSERT(expression, message)
complite time static check
general utility, debug facilities, ...
Library for Assembled Shared Sources.
Definition config.h:53