1 | /* |
---|
2 | * This code is mostly taken from FreeBSD machine/atomic.h |
---|
3 | * Changes: Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com> |
---|
4 | * |
---|
5 | **************************************************************************** |
---|
6 | * Copyright (c) 1998 Doug Rabson |
---|
7 | * All rights reserved. |
---|
8 | * |
---|
9 | * Redistribution and use in source and binary forms, with or without |
---|
10 | * modification, are permitted provided that the following conditions |
---|
11 | * are met: |
---|
12 | * 1. Redistributions of source code must retain the above copyright |
---|
13 | * notice, this list of conditions and the following disclaimer. |
---|
14 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
15 | * notice, this list of conditions and the following disclaimer in the |
---|
16 | * documentation and/or other materials provided with the distribution. |
---|
17 | * |
---|
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
---|
19 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
---|
22 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
---|
23 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
---|
24 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
---|
25 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
---|
26 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
---|
27 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
---|
28 | * SUCH DAMAGE. |
---|
29 | */ |
---|
30 | |
---|
31 | #ifndef _MACHINE_ATOMIC_H_ |
---|
32 | #define _MACHINE_ATOMIC_H_ |
---|
33 | |
---|
34 | /* |
---|
35 | * Various simple arithmetic on memory which is atomic in the presence |
---|
36 | * of interrupts and SMP safe. |
---|
37 | */ |
---|
38 | |
---|
39 | #if !defined(__ASSEMBLY__) |
---|
40 | |
---|
41 | #include <types.h> |
---|
42 | |
---|
43 | |
---|
44 | /* |
---|
45 | * Everything is built out of cmpxchg. |
---|
46 | */ |
---|
47 | #define IA64_CMPXCHG(sz, sem, p, cmpval, newval, ret) \ |
---|
48 | __asm __volatile ( \ |
---|
49 | "mov ar.ccv=%2;;\n\t" \ |
---|
50 | "cmpxchg" #sz "." #sem " %0=%4,%3,ar.ccv\n\t" \ |
---|
51 | : "=r" (ret), "=m" (*p) \ |
---|
52 | : "r" (cmpval), "r" (newval), "m" (*p) \ |
---|
53 | : "memory") |
---|
54 | |
---|
55 | |
---|
56 | /* |
---|
57 | * Some common forms of cmpxch. |
---|
58 | */ |
---|
59 | |
---|
60 | static __inline uint8_t |
---|
61 | ia64_cmpxchg_acq_8(volatile uint8_t* p, uint8_t cmpval, uint8_t newval) |
---|
62 | { |
---|
63 | uint8_t ret; |
---|
64 | |
---|
65 | IA64_CMPXCHG(1, acq, p, cmpval, newval, ret); |
---|
66 | return (ret); |
---|
67 | } |
---|
68 | |
---|
69 | static __inline uint16_t |
---|
70 | ia64_cmpxchg_acq_16(volatile uint16_t* p, uint16_t cmpval, uint16_t newval) |
---|
71 | { |
---|
72 | uint16_t ret; |
---|
73 | |
---|
74 | IA64_CMPXCHG(2, acq, p, cmpval, newval, ret); |
---|
75 | return (ret); |
---|
76 | } |
---|
77 | |
---|
78 | static __inline uint32_t |
---|
79 | ia64_cmpxchg_acq_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval) |
---|
80 | { |
---|
81 | uint32_t ret; |
---|
82 | |
---|
83 | IA64_CMPXCHG(4, acq, p, cmpval, newval, ret); |
---|
84 | return (ret); |
---|
85 | } |
---|
86 | |
---|
87 | static __inline uint32_t |
---|
88 | ia64_cmpxchg_rel_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval) |
---|
89 | { |
---|
90 | uint32_t ret; |
---|
91 | |
---|
92 | IA64_CMPXCHG(4, rel, p, cmpval, newval, ret); |
---|
93 | return (ret); |
---|
94 | } |
---|
95 | |
---|
96 | static __inline uint64_t |
---|
97 | ia64_cmpxchg_acq_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval) |
---|
98 | { |
---|
99 | uint64_t ret; |
---|
100 | |
---|
101 | IA64_CMPXCHG(8, acq, p, cmpval, newval, ret); |
---|
102 | return (ret); |
---|
103 | } |
---|
104 | |
---|
105 | static __inline uint64_t |
---|
106 | ia64_cmpxchg_rel_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval) |
---|
107 | { |
---|
108 | uint64_t ret; |
---|
109 | |
---|
110 | IA64_CMPXCHG(8, rel, p, cmpval, newval, ret); |
---|
111 | return (ret); |
---|
112 | } |
---|
113 | |
---|
114 | #define ATOMIC_STORE_LOAD(type, width, size) \ |
---|
115 | static __inline uint##width##_t \ |
---|
116 | ia64_ld_acq_##width(volatile uint##width##_t* p) \ |
---|
117 | { \ |
---|
118 | uint##width##_t v; \ |
---|
119 | \ |
---|
120 | __asm __volatile ("ld" size ".acq %0=%1" \ |
---|
121 | : "=r" (v) \ |
---|
122 | : "m" (*p) \ |
---|
123 | : "memory"); \ |
---|
124 | return (v); \ |
---|
125 | } \ |
---|
126 | \ |
---|
127 | static __inline uint##width##_t \ |
---|
128 | atomic_load_acq_##width(volatile uint##width##_t* p) \ |
---|
129 | { \ |
---|
130 | uint##width##_t v; \ |
---|
131 | \ |
---|
132 | __asm __volatile ("ld" size ".acq %0=%1" \ |
---|
133 | : "=r" (v) \ |
---|
134 | : "m" (*p) \ |
---|
135 | : "memory"); \ |
---|
136 | return (v); \ |
---|
137 | } \ |
---|
138 | \ |
---|
139 | static __inline uint##width##_t \ |
---|
140 | atomic_load_acq_##type(volatile uint##width##_t* p) \ |
---|
141 | { \ |
---|
142 | uint##width##_t v; \ |
---|
143 | \ |
---|
144 | __asm __volatile ("ld" size ".acq %0=%1" \ |
---|
145 | : "=r" (v) \ |
---|
146 | : "m" (*p) \ |
---|
147 | : "memory"); \ |
---|
148 | return (v); \ |
---|
149 | } \ |
---|
150 | \ |
---|
151 | static __inline void \ |
---|
152 | ia64_st_rel_##width(volatile uint##width##_t* p, uint##width##_t v)\ |
---|
153 | { \ |
---|
154 | __asm __volatile ("st" size ".rel %0=%1" \ |
---|
155 | : "=m" (*p) \ |
---|
156 | : "r" (v) \ |
---|
157 | : "memory"); \ |
---|
158 | } \ |
---|
159 | \ |
---|
160 | static __inline void \ |
---|
161 | atomic_store_rel_##width(volatile uint##width##_t* p, uint##width##_t v)\ |
---|
162 | { \ |
---|
163 | __asm __volatile ("st" size ".rel %0=%1" \ |
---|
164 | : "=m" (*p) \ |
---|
165 | : "r" (v) \ |
---|
166 | : "memory"); \ |
---|
167 | } \ |
---|
168 | \ |
---|
169 | static __inline void \ |
---|
170 | atomic_store_rel_##type(volatile uint##width##_t* p, uint##width##_t v)\ |
---|
171 | { \ |
---|
172 | __asm __volatile ("st" size ".rel %0=%1" \ |
---|
173 | : "=m" (*p) \ |
---|
174 | : "r" (v) \ |
---|
175 | : "memory"); \ |
---|
176 | } |
---|
177 | |
---|
178 | ATOMIC_STORE_LOAD(char, 8, "1") |
---|
179 | ATOMIC_STORE_LOAD(short, 16, "2") |
---|
180 | ATOMIC_STORE_LOAD(int, 32, "4") |
---|
181 | ATOMIC_STORE_LOAD(long, 64, "8") |
---|
182 | |
---|
183 | #undef ATOMIC_STORE_LOAD |
---|
184 | |
---|
185 | #define IA64_ATOMIC(sz, type, name, width, op) \ |
---|
186 | \ |
---|
187 | static __inline type \ |
---|
188 | atomic_##name##_acq_##width(volatile type *p, type v) \ |
---|
189 | { \ |
---|
190 | type old, ret; \ |
---|
191 | do { \ |
---|
192 | old = *p; \ |
---|
193 | IA64_CMPXCHG(sz, acq, p, old, old op v, ret); \ |
---|
194 | } while (ret != old); \ |
---|
195 | return(ret); \ |
---|
196 | } \ |
---|
197 | \ |
---|
198 | static __inline type \ |
---|
199 | atomic_##name##_rel_##width(volatile type *p, type v) \ |
---|
200 | { \ |
---|
201 | type old, ret; \ |
---|
202 | do { \ |
---|
203 | old = *p; \ |
---|
204 | IA64_CMPXCHG(sz, rel, p, old, old op v, ret); \ |
---|
205 | } while (ret != old); \ |
---|
206 | return(ret); \ |
---|
207 | } |
---|
208 | |
---|
209 | IA64_ATOMIC(1, uint8_t, set, 8, |) |
---|
210 | IA64_ATOMIC(2, uint16_t, set, 16, |) |
---|
211 | IA64_ATOMIC(4, uint32_t, set, 32, |) |
---|
212 | IA64_ATOMIC(8, uint64_t, set, 64, |) |
---|
213 | |
---|
214 | IA64_ATOMIC(1, uint8_t, clear, 8, &~) |
---|
215 | IA64_ATOMIC(2, uint16_t, clear, 16, &~) |
---|
216 | IA64_ATOMIC(4, uint32_t, clear, 32, &~) |
---|
217 | IA64_ATOMIC(8, uint64_t, clear, 64, &~) |
---|
218 | |
---|
219 | IA64_ATOMIC(1, uint8_t, add, 8, +) |
---|
220 | IA64_ATOMIC(2, uint16_t, add, 16, +) |
---|
221 | IA64_ATOMIC(4, uint32_t, add, 32, +) |
---|
222 | IA64_ATOMIC(8, uint64_t, add, 64, +) |
---|
223 | |
---|
224 | IA64_ATOMIC(1, uint8_t, subtract, 8, -) |
---|
225 | IA64_ATOMIC(2, uint16_t, subtract, 16, -) |
---|
226 | IA64_ATOMIC(4, uint32_t, subtract, 32, -) |
---|
227 | IA64_ATOMIC(8, uint64_t, subtract, 64, -) |
---|
228 | |
---|
229 | #undef IA64_ATOMIC |
---|
230 | #undef IA64_CMPXCHG |
---|
231 | |
---|
232 | #define atomic_set_8 atomic_set_acq_8 |
---|
233 | #define atomic_clear_8 atomic_clear_acq_8 |
---|
234 | #define atomic_add_8 atomic_add_acq_8 |
---|
235 | #define atomic_subtract_8 atomic_subtract_acq_8 |
---|
236 | |
---|
237 | #define atomic_set_16 atomic_set_acq_16 |
---|
238 | #define atomic_clear_16 atomic_clear_acq_16 |
---|
239 | #define atomic_add_16 atomic_add_acq_16 |
---|
240 | #define atomic_subtract_16 atomic_subtract_acq_16 |
---|
241 | |
---|
242 | #define atomic_set_32 atomic_set_acq_32 |
---|
243 | #define atomic_clear_32 atomic_clear_acq_32 |
---|
244 | #define atomic_add_32 atomic_add_acq_32 |
---|
245 | #define atomic_subtract_32 atomic_subtract_acq_32 |
---|
246 | |
---|
247 | #define atomic_set_64 atomic_set_acq_64 |
---|
248 | #define atomic_clear_64 atomic_clear_acq_64 |
---|
249 | #define atomic_add_64 atomic_add_acq_64 |
---|
250 | #define atomic_subtract_64 atomic_subtract_acq_64 |
---|
251 | |
---|
252 | #define atomic_set_char atomic_set_8 |
---|
253 | #define atomic_clear_char atomic_clear_8 |
---|
254 | #define atomic_add_char atomic_add_8 |
---|
255 | #define atomic_subtract_char atomic_subtract_8 |
---|
256 | #define atomic_set_acq_char atomic_set_acq_8 |
---|
257 | #define atomic_clear_acq_char atomic_clear_acq_8 |
---|
258 | #define atomic_add_acq_char atomic_add_acq_8 |
---|
259 | #define atomic_subtract_acq_char atomic_subtract_acq_8 |
---|
260 | #define atomic_set_rel_char atomic_set_rel_8 |
---|
261 | #define atomic_clear_rel_char atomic_clear_rel_8 |
---|
262 | #define atomic_add_rel_char atomic_add_rel_8 |
---|
263 | #define atomic_subtract_rel_char atomic_subtract_rel_8 |
---|
264 | |
---|
265 | #define atomic_set_short atomic_set_16 |
---|
266 | #define atomic_clear_short atomic_clear_16 |
---|
267 | #define atomic_add_short atomic_add_16 |
---|
268 | #define atomic_subtract_short atomic_subtract_16 |
---|
269 | #define atomic_set_acq_short atomic_set_acq_16 |
---|
270 | #define atomic_clear_acq_short atomic_clear_acq_16 |
---|
271 | #define atomic_add_acq_short atomic_add_acq_16 |
---|
272 | #define atomic_subtract_acq_short atomic_subtract_acq_16 |
---|
273 | #define atomic_set_rel_short atomic_set_rel_16 |
---|
274 | #define atomic_clear_rel_short atomic_clear_rel_16 |
---|
275 | #define atomic_add_rel_short atomic_add_rel_16 |
---|
276 | #define atomic_subtract_rel_short atomic_subtract_rel_16 |
---|
277 | |
---|
278 | #define atomic_set_int atomic_set_32 |
---|
279 | #define atomic_clear_int atomic_clear_32 |
---|
280 | #define atomic_add_int atomic_add_32 |
---|
281 | #define atomic_subtract_int atomic_subtract_32 |
---|
282 | #define atomic_set_acq_int atomic_set_acq_32 |
---|
283 | #define atomic_clear_acq_int atomic_clear_acq_32 |
---|
284 | #define atomic_add_acq_int atomic_add_acq_32 |
---|
285 | #define atomic_subtract_acq_int atomic_subtract_acq_32 |
---|
286 | #define atomic_set_rel_int atomic_set_rel_32 |
---|
287 | #define atomic_clear_rel_int atomic_clear_rel_32 |
---|
288 | #define atomic_add_rel_int atomic_add_rel_32 |
---|
289 | #define atomic_subtract_rel_int atomic_subtract_rel_32 |
---|
290 | |
---|
291 | #define atomic_set_long atomic_set_64 |
---|
292 | #define atomic_clear_long atomic_clear_64 |
---|
293 | #define atomic_add_long atomic_add_64 |
---|
294 | #define atomic_subtract_long atomic_subtract_64 |
---|
295 | #define atomic_set_acq_long atomic_set_acq_64 |
---|
296 | #define atomic_clear_acq_long atomic_clear_acq_64 |
---|
297 | #define atomic_add_acq_long atomic_add_acq_64 |
---|
298 | #define atomic_subtract_acq_long atomic_subtract_acq_64 |
---|
299 | #define atomic_set_rel_long atomic_set_rel_64 |
---|
300 | #define atomic_clear_rel_long atomic_clear_rel_64 |
---|
301 | #define atomic_add_rel_long atomic_add_rel_64 |
---|
302 | #define atomic_subtract_rel_long atomic_subtract_rel_64 |
---|
303 | |
---|
304 | /* |
---|
305 | * Atomically compare the value stored at *p with cmpval and if the |
---|
306 | * two values are equal, update the value of *p with newval. Returns |
---|
307 | * zero if the compare failed, nonzero otherwise. |
---|
308 | */ |
---|
309 | static __inline int |
---|
310 | atomic_cmpset_acq_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval) |
---|
311 | { |
---|
312 | return ia64_cmpxchg_acq_32(p, cmpval, newval) == cmpval; |
---|
313 | } |
---|
314 | |
---|
315 | static __inline int |
---|
316 | atomic_cmpset_rel_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval) |
---|
317 | { |
---|
318 | return ia64_cmpxchg_rel_32(p, cmpval, newval) == cmpval; |
---|
319 | } |
---|
320 | |
---|
321 | /* |
---|
322 | * Atomically compare the value stored at *p with cmpval and if the |
---|
323 | * two values are equal, update the value of *p with newval. Returns |
---|
324 | * zero if the compare failed, nonzero otherwise. |
---|
325 | */ |
---|
326 | static __inline int |
---|
327 | atomic_cmpset_acq_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval) |
---|
328 | { |
---|
329 | return ia64_cmpxchg_acq_64(p, cmpval, newval) == cmpval; |
---|
330 | } |
---|
331 | |
---|
332 | static __inline int |
---|
333 | atomic_cmpset_rel_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval) |
---|
334 | { |
---|
335 | return ia64_cmpxchg_rel_64(p, cmpval, newval) == cmpval; |
---|
336 | } |
---|
337 | |
---|
338 | #define atomic_cmpset_32 atomic_cmpset_acq_32 |
---|
339 | #define atomic_cmpset_64 atomic_cmpset_acq_64 |
---|
340 | #define atomic_cmpset_int atomic_cmpset_32 |
---|
341 | #define atomic_cmpset_long atomic_cmpset_64 |
---|
342 | #define atomic_cmpset_acq_int atomic_cmpset_acq_32 |
---|
343 | #define atomic_cmpset_rel_int atomic_cmpset_rel_32 |
---|
344 | #define atomic_cmpset_acq_long atomic_cmpset_acq_64 |
---|
345 | #define atomic_cmpset_rel_long atomic_cmpset_rel_64 |
---|
346 | |
---|
347 | static __inline int |
---|
348 | atomic_cmpset_acq_ptr(volatile void *dst, void *exp, void *src) |
---|
349 | { |
---|
350 | return atomic_cmpset_acq_long((volatile u_long *)dst, |
---|
351 | (u_long)exp, (u_long)src); |
---|
352 | } |
---|
353 | |
---|
354 | static __inline int |
---|
355 | atomic_cmpset_rel_ptr(volatile void *dst, void *exp, void *src) |
---|
356 | { |
---|
357 | return atomic_cmpset_rel_long((volatile u_long *)dst, |
---|
358 | (u_long)exp, (u_long)src); |
---|
359 | } |
---|
360 | |
---|
361 | #define atomic_cmpset_ptr atomic_cmpset_acq_ptr |
---|
362 | |
---|
363 | static __inline void * |
---|
364 | atomic_load_acq_ptr(volatile void *p) |
---|
365 | { |
---|
366 | return (void *)atomic_load_acq_long((volatile u_long *)p); |
---|
367 | } |
---|
368 | |
---|
369 | static __inline void |
---|
370 | atomic_store_rel_ptr(volatile void *p, void *v) |
---|
371 | { |
---|
372 | atomic_store_rel_long((volatile u_long *)p, (u_long)v); |
---|
373 | } |
---|
374 | |
---|
375 | #define IA64_ATOMIC_PTR(NAME) \ |
---|
376 | static __inline void \ |
---|
377 | atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \ |
---|
378 | { \ |
---|
379 | atomic_##NAME##_long((volatile u_long *)p, v); \ |
---|
380 | } \ |
---|
381 | \ |
---|
382 | static __inline void \ |
---|
383 | atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \ |
---|
384 | { \ |
---|
385 | atomic_##NAME##_acq_long((volatile u_long *)p, v);\ |
---|
386 | } \ |
---|
387 | \ |
---|
388 | static __inline void \ |
---|
389 | atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \ |
---|
390 | { \ |
---|
391 | atomic_##NAME##_rel_long((volatile u_long *)p, v);\ |
---|
392 | } |
---|
393 | |
---|
394 | IA64_ATOMIC_PTR(set) |
---|
395 | IA64_ATOMIC_PTR(clear) |
---|
396 | IA64_ATOMIC_PTR(add) |
---|
397 | IA64_ATOMIC_PTR(subtract) |
---|
398 | |
---|
399 | #undef IA64_ATOMIC_PTR |
---|
400 | |
---|
401 | static __inline uint32_t |
---|
402 | atomic_readandclear_32(volatile uint32_t* p) |
---|
403 | { |
---|
404 | uint32_t val; |
---|
405 | do { |
---|
406 | val = *p; |
---|
407 | } while (!atomic_cmpset_32(p, val, 0)); |
---|
408 | return val; |
---|
409 | } |
---|
410 | |
---|
411 | static __inline uint64_t |
---|
412 | atomic_readandclear_64(volatile uint64_t* p) |
---|
413 | { |
---|
414 | uint64_t val; |
---|
415 | do { |
---|
416 | val = *p; |
---|
417 | } while (!atomic_cmpset_64(p, val, 0)); |
---|
418 | return val; |
---|
419 | } |
---|
420 | |
---|
421 | #define atomic_readandclear_int atomic_readandclear_32 |
---|
422 | #define atomic_readandclear_long atomic_readandclear_64 |
---|
423 | |
---|
424 | |
---|
425 | /* Some bit operations */ |
---|
426 | |
---|
427 | static inline void |
---|
428 | set_bit(int num, volatile void *addr) |
---|
429 | { |
---|
430 | uint32_t bit, b, old, new; |
---|
431 | volatile uint32_t *p; |
---|
432 | p = (volatile uint32_t *) addr + (num >> 5); |
---|
433 | b = 1 << (num & 31); |
---|
434 | bit = SWAP(b); |
---|
435 | do |
---|
436 | { |
---|
437 | old = *p; |
---|
438 | new = old | bit; |
---|
439 | } while(ia64_cmpxchg_acq_32(p, old, new) != old); |
---|
440 | } |
---|
441 | |
---|
442 | static __inline__ void |
---|
443 | clear_bit(int num, volatile void *addr) |
---|
444 | { |
---|
445 | uint32_t mask, m, old, new; |
---|
446 | volatile uint32_t *p; |
---|
447 | p = (volatile uint32_t *) addr + (num >> 5); |
---|
448 | m = ~(1 << (num & 31)); |
---|
449 | mask = SWAP(m); |
---|
450 | do { |
---|
451 | old = *p; |
---|
452 | new = old & mask; |
---|
453 | } while (ia64_cmpxchg_acq_32(p, old, new) != old); |
---|
454 | } |
---|
455 | |
---|
456 | static __inline__ int |
---|
457 | test_bit(int num, const volatile void *addr) |
---|
458 | { |
---|
459 | uint32_t val = SWAP(1); |
---|
460 | return val & (((const volatile uint32_t *) addr)[num >> 5] >> (num & 31)); |
---|
461 | } |
---|
462 | |
---|
463 | /* |
---|
464 | * test_and_set_bit - Set a bit and return its old value |
---|
465 | * num: Bit to set |
---|
466 | * addr: Address to count from |
---|
467 | */ |
---|
468 | static inline int |
---|
469 | test_and_set_bit (int num, volatile void *addr) |
---|
470 | { |
---|
471 | uint32_t bit, b, old, new; |
---|
472 | volatile uint32_t *m; |
---|
473 | |
---|
474 | m = (volatile uint32_t *) addr + (num >> 5); |
---|
475 | b = 1 << (num & 31); |
---|
476 | bit = SWAP(b); |
---|
477 | do { |
---|
478 | old = *m; |
---|
479 | new = old | bit; |
---|
480 | } while (ia64_cmpxchg_acq_32(m, old, new) != old); |
---|
481 | return (old & bit) != 0; |
---|
482 | } |
---|
483 | |
---|
484 | /* |
---|
485 | * test_and_clear_bit - Clear a bit and return its old value |
---|
486 | * num: Bit to set |
---|
487 | * addr: Address to count from |
---|
488 | */ |
---|
489 | static |
---|
490 | inline int test_and_clear_bit(int num, volatile unsigned long * addr) |
---|
491 | { |
---|
492 | uint32_t bit, b, old, new; |
---|
493 | volatile uint32_t* a; |
---|
494 | |
---|
495 | a = (volatile uint32_t *) addr + (num >> 5); |
---|
496 | b = ~(1 << (num & 31)); |
---|
497 | bit = SWAP(b); |
---|
498 | do { |
---|
499 | old = *a; |
---|
500 | new = old & bit; |
---|
501 | } while (ia64_cmpxchg_acq_32(a, old, new) != old); |
---|
502 | return (old & ~bit) != 0; |
---|
503 | } |
---|
504 | |
---|
505 | |
---|
506 | #endif /* !defined(__ASSEMBLY__) */ |
---|
507 | |
---|
508 | #endif /* ! _MACHINE_ATOMIC_H_ */ |
---|