52#if LASS_KUSER_HELPER_VERSION >= 2
53#define LASS_HAVE_KUSER_CMPXCHG 1
54typedef int (kuser_cmpxchg_t)(
int oldval,
int newval,
volatile int *ptr);
55#define lass_util_impl_kuser_cmpxchg (*(kuser_cmpxchg_t *)0xffff0fc0)
58#if LASS_KUSER_HELPER_VERSION >= 5
59#define LASS_HAVE_KUSER_CMPXCHG64 1
60typedef int (kuser_cmpxchg64_t)(
const int64_t *oldval,
const int64_t *newval,
volatile int64_t *ptr);
61#define lass_util_impl_kuser_cmpxchg64 (*(kuser_cmpxchg64_t *)0xffff0f60)
64#if LASS_HAVE_BIG_ENDIAN
65# error "code below assumes little endian. can easily be adapted for big endian too, but that's not done yet ;-)"
71#if LASS_HAVE_LDREXB_STREXB
74inline bool AtomicOperations<1>::compareAndSwap(
volatile T& dest, T expected, T desired)
79 "ldrexb %[tmp], [%[dest]]\n\t"
80 "mov %[failure], #1\n\t"
81 "teq %[tmp], %[expected]\n\t"
83 "strexbeq %[failure], %[desired], [%[dest]]\n\t"
84 : [tmp]
"=&r"(tmp), [failure]
"=&r"(failure)
85 : [dest]
"r"(&dest), [expected]
"r"(expected), [desired]
"r"(desired)
91#if LASS_HAVE_LDREXH_STREXH
94inline bool AtomicOperations<2>::compareAndSwap(
volatile T& dest, T expected, T desired)
96 LASS_ASSERT((
reinterpret_cast<num::TuintPtr
>(&dest) & 0x1) == 0);
100 __asm__ __volatile__(
101 "ldrexh %[tmp], [%[dest]]\n\t"
102 "mov %[failure], #1\n\t"
103 "teq %[tmp], %[expected]\n\t"
105 "strexheq %[failure], %[desired], [%[dest]]\n\t"
106 : [tmp]
"=&r"(tmp), [failure]
"=&r"(failure)
107 : [dest]
"r"(&dest), [expected]
"r"(expected), [desired]
"r"(desired)
113#if LASS_HAVE_LDREX_STREX
116inline bool AtomicOperations<4>::compareAndSwap(
volatile T& dest, T expected, T desired)
118 LASS_ASSERT((
reinterpret_cast<num::TuintPtr
>(&dest) & 0x3) == 0);
121 bool failure =
false;
122 __asm__ __volatile__(
123 "ldrex %[tmp], [%[dest]]\n\t"
124 "mov %[failure], #1\n\t"
125 "teq %[tmp], %[expected]\n\t"
127 "strexeq %[failure], %[desired], [%[dest]]\n\t"
128 : [tmp]
"=&r"(tmp), [failure]
"=&r"(failure)
129 : [dest]
"r"(&dest), [expected]
"r"(expected), [desired]
"r"(desired)
133#elif LASS_HAVE_KUSER_CMPXCHG
136inline bool AtomicOperations<4>::compareAndSwap(
volatile T& dest, T expected, T desired)
138 return lass_util_impl_kuser_cmpxchg(
139 *
reinterpret_cast<int32_t*
>(&expected),
140 *
reinterpret_cast<int32_t*
>(&desired),
141 reinterpret_cast<volatile int32_t*
>(&dest)
146#if LASS_HAVE_LDREXD_STREXD
149inline bool AtomicOperations<8>::compareAndSwap(
volatile T& dest, T expected, T desired)
151 LASS_ASSERT((
reinterpret_cast<num::TuintPtr
>(&dest) & 0x7) == 0);
154 bool failure =
false;
155 __asm__ __volatile__(
156 "ldrexd %[tmp], %H[tmp], [%[dest]]\n\t"
157 "mov %[failure], #1\n\t"
158 "teq %[tmp], %[expected]\n\t"
160 "teqeq %H[tmp], %H[expected]\n\t"
161 "strexdeq %[failure], %[desired], %H[desired], [%[dest]]\n\t"
162 : [tmp]
"=&r"(tmp), [failure]
"=&r"(failure)
163 : [dest]
"r"(&dest), [expected]
"r"(expected), [desired]
"r"(desired)
167#elif LASS_HAVE_KUSER_CMPXCHG64
170inline bool AtomicOperations<8>::compareAndSwap(
volatile T& dest, T expected, T desired)
172 return lass_util_impl_kuser_cmpxchg64(
173 reinterpret_cast<const int64_t*
>(&expected),
174 reinterpret_cast<const int64_t*
>(&desired),
175 reinterpret_cast<volatile int64_t*
>(&dest)
184#if LASS_HAVE_LDREXH_STREXH
186template <
typename T1,
typename T2>
187inline bool AtomicOperations<1>::compareAndSwap(
volatile T1& dest1, T1 expected1, T2 expected2, T1 desired1, T2 desired2)
189 LASS_ASSERT((
reinterpret_cast<num::TuintPtr
>(&dest1) & 0x1) == 0);
192 bool failure =
false;
193 __asm__ __volatile__(
194 "orr %[expected1], %[expected1], %[expected2], lsl #8\n\t"
195 "orr %[desired1], %[desired1], %[desired2], lsl #8\n\t"
196 "ldrexh %[tmp], [%[dest]]\n\t"
197 "mov %[failure], #1\n\t"
198 "teq %[tmp], %[expected1]\n\t"
200 "strexheq %[failure], %[desired1], [%[dest]]\n\t"
201 : [tmp]
"=&r"(tmp), [failure]
"=&r"(failure)
202 : [dest]
"r"(&dest1), [expected1]
"r"(expected1), [expected2]
"r"(expected2), [desired1]
"r"(desired1), [desired2]
"r"(desired2)
208#if LASS_HAVE_LDREX_STREX
210template <
typename T1,
typename T2>
211inline bool AtomicOperations<2>::compareAndSwap(
volatile T1& dest1, T1 expected1, T2 expected2, T1 desired1, T2 desired2)
213 LASS_ASSERT((
reinterpret_cast<num::TuintPtr
>(&dest1) & 0x3) == 0);
216 bool failure =
false;
217 __asm__ __volatile__(
218 "orr %[expected1], %[expected1], %[expected2], lsl #16\n\t"
219 "orr %[desired1], %[desired1], %[desired2], lsl #16\n\t"
220 "ldrex %[tmp], [%[dest]]\n\t"
221 "mov %[failure], #1\n\t"
222 "teq %[tmp], %[expected1]\n\t"
224 "strexeq %[failure], %[desired1], [%[dest]]\n\t"
225 : [tmp]
"=&r"(tmp), [failure]
"=&r"(failure)
226 : [dest]
"r"(&dest1), [expected1]
"r"(expected1), [expected2]
"r"(expected2), [desired1]
"r"(desired1), [desired2]
"r"(desired2)
230#elif LASS_HAVE_KUSER_CMPXCHG
232template <
typename T1,
typename T2>
233inline bool AtomicOperations<2>::compareAndSwap(
volatile T1& dest1, T1 expected1, T2 expected2, T1 desired1, T2 desired2)
235 const int32_t expected =
static_cast<int32_t
>(*
reinterpret_cast<int16_t*
>(&expected1)) | (
static_cast<int32_t
>(*
reinterpret_cast<int16_t*
>(&expected2)) << 16);
236 const int32_t desired =
static_cast<int32_t
>(*
reinterpret_cast<int16_t*
>(&desired1)) | (
static_cast<int32_t
>(*
reinterpret_cast<int16_t*
>(&desired2)) << 16);
237 return lass_util_impl_kuser_cmpxchg(expected, desired,
reinterpret_cast<volatile int32_t*
>(&dest1)) == 0;
241#if LASS_HAVE_LDREXD_STREXD
243template <
typename T1,
typename T2>
244inline bool AtomicOperations<4>::compareAndSwap(
volatile T1& dest1, T1 expected1, T2 expected2, T1 desired1, T2 desired2)
246 LASS_ASSERT((
reinterpret_cast<num::TuintPtr
>(&dest1) & 0x7) == 0);
250 bool failure =
false;
251 __asm__ __volatile__(
252 "mov %[desired], %[desired1]\n\t"
253 "mov %H[desired], %[desired2]\n\t"
254 "ldrexd %[tmp], %H[tmp], [%[dest]]\n\t"
255 "mov %[failure], #1\n\t"
256 "teq %[tmp], %[expected1]\n\t"
258 "teqeq %H[tmp], %[expected2]\n\t"
259 "strexdeq %[failure], %[desired], %H[desired], [%[dest]]\n\t"
260 : [desired]
"=&r"(desired), [tmp]
"=&r"(tmp), [failure]
"=&r"(failure)
261 : [dest]
"r"(&dest1), [expected1]
"r"(expected1), [expected2]
"r"(expected2), [desired1]
"r"(desired1), [desired2]
"r"(desired2)
265#elif LASS_HAVE_KUSER_CMPXCHG64
267template <
typename T1,
typename T2>
268inline bool AtomicOperations<4>::compareAndSwap(
volatile T1& dest1, T1 expected1, T2 expected2, T1 desired1, T2 desired2)
270 const int64_t expected =
static_cast<int64_t
>(*
reinterpret_cast<int32_t*
>(&expected1)) | (
static_cast<int64_t
>(*
reinterpret_cast<int32_t*
>(&expected2)) << 32);
271 const int64_t desired =
static_cast<int64_t
>(*
reinterpret_cast<int32_t*
>(&desired1)) | (
static_cast<int64_t
>(*
reinterpret_cast<int32_t*
>(&desired2)) << 32);
272 return lass_util_impl_kuser_cmpxchg64(&expected, &desired,
reinterpret_cast<volatile int64_t*
>(&dest1)) == 0;
280#if LASS_HAVE_LDREXB_STREXB
283inline void AtomicOperations<1>::increment(
volatile T& value)
286 bool failure =
false;
289 __asm__ __volatile__(
290 "ldrexb %[tmp], [%[value]]\n\t"
291 "add %[tmp], %[tmp], #1\n\t"
292 "strexb %[failure], %[tmp], [%[value]]\n\t"
293 : [tmp]
"=&r"(tmp), [failure]
"=&r"(failure)
301#if LASS_HAVE_LDREXH_STREXH
304inline void AtomicOperations<2>::increment(
volatile T& value)
306 LASS_ASSERT((
reinterpret_cast<num::TuintPtr
>(&value) & 0x1) == 0);
309 bool failure =
false;
312 __asm__ __volatile__(
313 "ldrexh %[tmp], [%[value]]\n\t"
314 "add %[tmp], %[tmp], #1\n\t"
315 "strexh %[failure], %[tmp], [%[value]]\n\t"
316 : [tmp]
"=&r"(tmp), [failure]
"=&r"(failure)
324#if LASS_HAVE_LDREX_STREX
327inline void AtomicOperations<4>::increment(
volatile T& value)
329 LASS_ASSERT((
reinterpret_cast<num::TuintPtr
>(&value) & 0x3) == 0);
332 bool failure =
false;
335 __asm__ __volatile__(
336 "ldrex %[tmp], [%[value]]\n\t"
337 "add %[tmp], %[tmp], #1\n\t"
338 "strex %[failure], %[tmp], [%[value]]\n\t"
339 : [tmp]
"=&r"(tmp), [failure]
"=&r"(failure)
347#if LASS_HAVE_LDREXD_STREXD
350inline void AtomicOperations<8>::increment(
volatile T& value)
352 LASS_ASSERT((
reinterpret_cast<num::TuintPtr
>(&value) & 0x7) == 0);
355 bool failure =
false;
358 __asm__ __volatile__(
359 "ldrexd %[tmp], %H[tmp], [%[value]]\n\t"
360 "adds %[tmp], %[tmp], #1\n\t"
361 "adc %H[tmp], %H[tmp], #0\n\t"
362 "strexd %[failure], %[tmp], %H[tmp], [%[value]]\n\t"
363 : [tmp]
"=&r"(tmp), [failure]
"=&r"(failure)
375#if LASS_HAVE_LDREXB_STREXB
378inline void AtomicOperations<1>::decrement(
volatile T& value)
381 bool failure =
false;
384 __asm__ __volatile__(
385 "ldrexb %[tmp], [%[value]]\n\t"
386 "sub %[tmp], %[tmp], #1\n\t"
387 "strexb %[failure], %[tmp], [%[value]]\n\t"
388 : [tmp]
"=&r"(tmp), [failure]
"=&r"(failure)
396#if LASS_HAVE_LDREXH_STREXH
399inline void AtomicOperations<2>::decrement(
volatile T& value)
401 LASS_ASSERT((
reinterpret_cast<num::TuintPtr
>(&value) & 0x1) == 0);
404 bool failure =
false;
407 __asm__ __volatile__(
408 "ldrexh %[tmp], [%[value]]\n\t"
409 "sub %[tmp], %[tmp], #1\n\t"
410 "strexh %[failure], %[tmp], [%[value]]\n\t"
411 : [tmp]
"=&r"(tmp), [failure]
"=&r"(failure)
419#if LASS_HAVE_LDREX_STREX
422inline void AtomicOperations<4>::decrement(
volatile T& value)
424 LASS_ASSERT((
reinterpret_cast<num::TuintPtr
>(&value) & 0x3) == 0);
427 bool failure =
false;
430 __asm__ __volatile__(
431 "ldrex %[tmp], [%[value]]\n\t"
432 "sub %[tmp], %[tmp], #1\n\t"
433 "strex %[failure], %[tmp], [%[value]]\n\t"
434 : [tmp]
"=&r"(tmp), [failure]
"=&r"(failure)
442#if LASS_HAVE_LDREXD_STREXD
445inline void AtomicOperations<8>::decrement(
volatile T& value)
447 LASS_ASSERT((
reinterpret_cast<num::TuintPtr
>(&value) & 0x7) == 0);
450 bool failure =
false;
453 __asm__ __volatile__(
454 "ldrexd %[tmp], %H[tmp], [%[value]]\n\t"
455 "subs %[tmp], %[tmp], #1\n\t"
456 "sbc %H[tmp], %H[tmp], #0\n\t"
457 "strexd %[failure], %[tmp], %H[tmp], [%[value]]\n\t"
458 : [tmp]
"=&r"(tmp), [failure]
"=&r"(failure)
general utility, debug facilities, ...
Library for Assembled Shared Sources.