1 | /****************************************************************************** |
---|
2 | * os.h |
---|
3 | * |
---|
4 | * random collection of macros and definition |
---|
5 | */ |
---|
6 | |
---|
7 | #ifndef _OS_H_ |
---|
8 | #define _OS_H_ |
---|
9 | |
---|
10 | #if __GNUC__ == 2 && __GNUC_MINOR__ < 96 |
---|
11 | #define __builtin_expect(x, expected_value) (x) |
---|
12 | #endif |
---|
13 | #define unlikely(x) __builtin_expect((x),0) |
---|
14 | |
---|
15 | #define smp_processor_id() 0 |
---|
16 | |
---|
17 | |
---|
18 | #ifndef __ASSEMBLY__ |
---|
19 | #include <types.h> |
---|
20 | #include <hypervisor.h> |
---|
21 | |
---|
22 | #define USED __attribute__ ((used)) |
---|
23 | |
---|
24 | extern void do_exit(void); |
---|
25 | #define BUG do_exit |
---|
26 | |
---|
27 | #endif |
---|
28 | #include <xen/xen.h> |
---|
29 | |
---|
30 | |
---|
31 | #define force_evtchn_callback() ((void)HYPERVISOR_xen_version(0, 0)) |
---|
32 | |
---|
33 | #define __KERNEL_CS FLAT_KERNEL_CS |
---|
34 | #define __KERNEL_DS FLAT_KERNEL_DS |
---|
35 | #define __KERNEL_SS FLAT_KERNEL_SS |
---|
36 | |
---|
37 | #define TRAP_divide_error 0 |
---|
38 | #define TRAP_debug 1 |
---|
39 | #define TRAP_nmi 2 |
---|
40 | #define TRAP_int3 3 |
---|
41 | #define TRAP_overflow 4 |
---|
42 | #define TRAP_bounds 5 |
---|
43 | #define TRAP_invalid_op 6 |
---|
44 | #define TRAP_no_device 7 |
---|
45 | #define TRAP_double_fault 8 |
---|
46 | #define TRAP_copro_seg 9 |
---|
47 | #define TRAP_invalid_tss 10 |
---|
48 | #define TRAP_no_segment 11 |
---|
49 | #define TRAP_stack_error 12 |
---|
50 | #define TRAP_gp_fault 13 |
---|
51 | #define TRAP_page_fault 14 |
---|
52 | #define TRAP_spurious_int 15 |
---|
53 | #define TRAP_copro_error 16 |
---|
54 | #define TRAP_alignment_check 17 |
---|
55 | #define TRAP_machine_check 18 |
---|
56 | #define TRAP_simd_error 19 |
---|
57 | #define TRAP_deferred_nmi 31 |
---|
58 | |
---|
59 | /* Everything below this point is not included by assembler (.S) files. */ |
---|
60 | #ifndef __ASSEMBLY__ |
---|
61 | |
---|
62 | extern shared_info_t *HYPERVISOR_shared_info; |
---|
63 | |
---|
64 | void trap_init(void); |
---|
65 | |
---|
66 | void arch_init(start_info_t *si); |
---|
67 | void arch_print_info(void); |
---|
68 | |
---|
69 | |
---|
70 | |
---|
71 | |
---|
72 | |
---|
73 | /* |
---|
74 | * The use of 'barrier' in the following reflects their use as local-lock |
---|
75 | * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following |
---|
76 | * critical operations are executed. All critical operations must complete |
---|
77 | * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also |
---|
78 | * includes these barriers, for example. |
---|
79 | */ |
---|
80 | |
---|
81 | #define __cli() \ |
---|
82 | do { \ |
---|
83 | vcpu_info_t *_vcpu; \ |
---|
84 | _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ |
---|
85 | _vcpu->evtchn_upcall_mask = 1; \ |
---|
86 | barrier(); \ |
---|
87 | } while (0) |
---|
88 | |
---|
89 | #define __sti() \ |
---|
90 | do { \ |
---|
91 | vcpu_info_t *_vcpu; \ |
---|
92 | barrier(); \ |
---|
93 | _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ |
---|
94 | _vcpu->evtchn_upcall_mask = 0; \ |
---|
95 | barrier(); /* unmask then check (avoid races) */ \ |
---|
96 | if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ |
---|
97 | force_evtchn_callback(); \ |
---|
98 | } while (0) |
---|
99 | |
---|
100 | #define __save_flags(x) \ |
---|
101 | do { \ |
---|
102 | vcpu_info_t *_vcpu; \ |
---|
103 | _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ |
---|
104 | (x) = _vcpu->evtchn_upcall_mask; \ |
---|
105 | } while (0) |
---|
106 | |
---|
107 | #define __restore_flags(x) \ |
---|
108 | do { \ |
---|
109 | vcpu_info_t *_vcpu; \ |
---|
110 | barrier(); \ |
---|
111 | _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ |
---|
112 | if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \ |
---|
113 | barrier(); /* unmask then check (avoid races) */ \ |
---|
114 | if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ |
---|
115 | force_evtchn_callback(); \ |
---|
116 | }\ |
---|
117 | } while (0) |
---|
118 | |
---|
119 | #define safe_halt() ((void)0) |
---|
120 | |
---|
121 | #define __save_and_cli(x) \ |
---|
122 | do { \ |
---|
123 | vcpu_info_t *_vcpu; \ |
---|
124 | _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ |
---|
125 | (x) = _vcpu->evtchn_upcall_mask; \ |
---|
126 | _vcpu->evtchn_upcall_mask = 1; \ |
---|
127 | barrier(); \ |
---|
128 | } while (0) |
---|
129 | |
---|
130 | #define local_irq_save(x) __save_and_cli(x) |
---|
131 | #define local_irq_restore(x) __restore_flags(x) |
---|
132 | #define local_save_flags(x) __save_flags(x) |
---|
133 | #define local_irq_disable() __cli() |
---|
134 | #define local_irq_enable() __sti() |
---|
135 | |
---|
136 | #define irqs_disabled() \ |
---|
137 | HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].evtchn_upcall_mask |
---|
138 | |
---|
139 | /* This is a barrier for the compiler only, NOT the processor! */ |
---|
140 | #define barrier() __asm__ __volatile__("": : :"memory") |
---|
141 | |
---|
142 | #if defined(__i386__) |
---|
143 | #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") |
---|
144 | #define rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") |
---|
145 | #define wmb() __asm__ __volatile__ ("": : :"memory") |
---|
146 | #elif defined(__x86_64__) |
---|
147 | #define mb() __asm__ __volatile__ ("mfence":::"memory") |
---|
148 | #define rmb() __asm__ __volatile__ ("lfence":::"memory") |
---|
149 | #define wmb() __asm__ __volatile__ ("sfence" ::: "memory") /* From CONFIG_UNORDERED_IO (linux) */ |
---|
150 | #endif |
---|
151 | |
---|
152 | |
---|
153 | #define LOCK_PREFIX "" |
---|
154 | #define LOCK "" |
---|
155 | #define ADDR (*(volatile long *) addr) |
---|
156 | /* |
---|
157 | * Make sure gcc doesn't try to be clever and move things around |
---|
158 | * on us. We need to use _exactly_ the address the user gave us, |
---|
159 | * not some alias that contains the same information. |
---|
160 | */ |
---|
161 | typedef struct { volatile int counter; } atomic_t; |
---|
162 | |
---|
163 | |
---|
164 | /************************** i386 *******************************/ |
---|
165 | #if defined (__i386__) |
---|
166 | |
---|
167 | #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) |
---|
168 | struct __xchg_dummy { unsigned long a[100]; }; |
---|
169 | #define __xg(x) ((struct __xchg_dummy *)(x)) |
---|
170 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) |
---|
171 | { |
---|
172 | switch (size) { |
---|
173 | case 1: |
---|
174 | __asm__ __volatile__("xchgb %b0,%1" |
---|
175 | :"=q" (x) |
---|
176 | :"m" (*__xg(ptr)), "0" (x) |
---|
177 | :"memory"); |
---|
178 | break; |
---|
179 | case 2: |
---|
180 | __asm__ __volatile__("xchgw %w0,%1" |
---|
181 | :"=r" (x) |
---|
182 | :"m" (*__xg(ptr)), "0" (x) |
---|
183 | :"memory"); |
---|
184 | break; |
---|
185 | case 4: |
---|
186 | __asm__ __volatile__("xchgl %0,%1" |
---|
187 | :"=r" (x) |
---|
188 | :"m" (*__xg(ptr)), "0" (x) |
---|
189 | :"memory"); |
---|
190 | break; |
---|
191 | } |
---|
192 | return x; |
---|
193 | } |
---|
194 | |
---|
195 | /** |
---|
196 | * test_and_clear_bit - Clear a bit and return its old value |
---|
197 | * @nr: Bit to clear |
---|
198 | * @addr: Address to count from |
---|
199 | * |
---|
200 | * This operation is atomic and cannot be reordered. |
---|
201 | * It can be reorderdered on other architectures other than x86. |
---|
202 | * It also implies a memory barrier. |
---|
203 | */ |
---|
204 | static inline int test_and_clear_bit(int nr, volatile unsigned long * addr) |
---|
205 | { |
---|
206 | int oldbit; |
---|
207 | |
---|
208 | __asm__ __volatile__( LOCK |
---|
209 | "btrl %2,%1\n\tsbbl %0,%0" |
---|
210 | :"=r" (oldbit),"=m" (ADDR) |
---|
211 | :"Ir" (nr) : "memory"); |
---|
212 | return oldbit; |
---|
213 | } |
---|
214 | |
---|
215 | static inline int constant_test_bit(int nr, const volatile unsigned long *addr) |
---|
216 | { |
---|
217 | return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; |
---|
218 | } |
---|
219 | |
---|
220 | static inline int variable_test_bit(int nr, const volatile unsigned long * addr) |
---|
221 | { |
---|
222 | int oldbit; |
---|
223 | |
---|
224 | __asm__ __volatile__( |
---|
225 | "btl %2,%1\n\tsbbl %0,%0" |
---|
226 | :"=r" (oldbit) |
---|
227 | :"m" (ADDR),"Ir" (nr)); |
---|
228 | return oldbit; |
---|
229 | } |
---|
230 | |
---|
231 | #define test_bit(nr,addr) \ |
---|
232 | (__builtin_constant_p(nr) ? \ |
---|
233 | constant_test_bit((nr),(addr)) : \ |
---|
234 | variable_test_bit((nr),(addr))) |
---|
235 | |
---|
236 | /** |
---|
237 | * set_bit - Atomically set a bit in memory |
---|
238 | * @nr: the bit to set |
---|
239 | * @addr: the address to start counting from |
---|
240 | * |
---|
241 | * This function is atomic and may not be reordered. See __set_bit() |
---|
242 | * if you do not require the atomic guarantees. |
---|
243 | * |
---|
244 | * Note: there are no guarantees that this function will not be reordered |
---|
245 | * on non x86 architectures, so if you are writting portable code, |
---|
246 | * make sure not to rely on its reordering guarantees. |
---|
247 | * |
---|
248 | * Note that @nr may be almost arbitrarily large; this function is not |
---|
249 | * restricted to acting on a single-word quantity. |
---|
250 | */ |
---|
251 | static inline void set_bit(int nr, volatile unsigned long * addr) |
---|
252 | { |
---|
253 | __asm__ __volatile__( LOCK |
---|
254 | "btsl %1,%0" |
---|
255 | :"=m" (ADDR) |
---|
256 | :"Ir" (nr)); |
---|
257 | } |
---|
258 | |
---|
259 | /** |
---|
260 | * clear_bit - Clears a bit in memory |
---|
261 | * @nr: Bit to clear |
---|
262 | * @addr: Address to start counting from |
---|
263 | * |
---|
264 | * clear_bit() is atomic and may not be reordered. However, it does |
---|
265 | * not contain a memory barrier, so if it is used for locking purposes, |
---|
266 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
---|
267 | * in order to ensure changes are visible on other processors. |
---|
268 | */ |
---|
269 | static inline void clear_bit(int nr, volatile unsigned long * addr) |
---|
270 | { |
---|
271 | __asm__ __volatile__( LOCK |
---|
272 | "btrl %1,%0" |
---|
273 | :"=m" (ADDR) |
---|
274 | :"Ir" (nr)); |
---|
275 | } |
---|
276 | |
---|
277 | /** |
---|
278 | * __ffs - find first bit in word. |
---|
279 | * @word: The word to search |
---|
280 | * |
---|
281 | * Undefined if no bit exists, so code should check against 0 first. |
---|
282 | */ |
---|
283 | static inline unsigned long __ffs(unsigned long word) |
---|
284 | { |
---|
285 | __asm__("bsfl %1,%0" |
---|
286 | :"=r" (word) |
---|
287 | :"rm" (word)); |
---|
288 | return word; |
---|
289 | } |
---|
290 | |
---|
291 | |
---|
292 | /* |
---|
293 | * These have to be done with inline assembly: that way the bit-setting |
---|
294 | * is guaranteed to be atomic. All bit operations return 0 if the bit |
---|
295 | * was cleared before the operation and != 0 if it was not. |
---|
296 | * |
---|
297 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
---|
298 | */ |
---|
299 | #define ADDR (*(volatile long *) addr) |
---|
300 | |
---|
301 | #define rdtscll(val) \ |
---|
302 | __asm__ __volatile__("rdtsc" : "=A" (val)) |
---|
303 | |
---|
304 | |
---|
305 | |
---|
306 | #elif defined(__x86_64__)/* ifdef __i386__ */ |
---|
307 | /************************** x86_84 *******************************/ |
---|
308 | |
---|
309 | #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) |
---|
310 | #define __xg(x) ((volatile long *)(x)) |
---|
311 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) |
---|
312 | { |
---|
313 | switch (size) { |
---|
314 | case 1: |
---|
315 | __asm__ __volatile__("xchgb %b0,%1" |
---|
316 | :"=q" (x) |
---|
317 | :"m" (*__xg(ptr)), "0" (x) |
---|
318 | :"memory"); |
---|
319 | break; |
---|
320 | case 2: |
---|
321 | __asm__ __volatile__("xchgw %w0,%1" |
---|
322 | :"=r" (x) |
---|
323 | :"m" (*__xg(ptr)), "0" (x) |
---|
324 | :"memory"); |
---|
325 | break; |
---|
326 | case 4: |
---|
327 | __asm__ __volatile__("xchgl %k0,%1" |
---|
328 | :"=r" (x) |
---|
329 | :"m" (*__xg(ptr)), "0" (x) |
---|
330 | :"memory"); |
---|
331 | break; |
---|
332 | case 8: |
---|
333 | __asm__ __volatile__("xchgq %0,%1" |
---|
334 | :"=r" (x) |
---|
335 | :"m" (*__xg(ptr)), "0" (x) |
---|
336 | :"memory"); |
---|
337 | break; |
---|
338 | } |
---|
339 | return x; |
---|
340 | } |
---|
341 | |
---|
342 | /** |
---|
343 | * test_and_clear_bit - Clear a bit and return its old value |
---|
344 | * @nr: Bit to clear |
---|
345 | * @addr: Address to count from |
---|
346 | * |
---|
347 | * This operation is atomic and cannot be reordered. |
---|
348 | * It also implies a memory barrier. |
---|
349 | */ |
---|
350 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) |
---|
351 | { |
---|
352 | int oldbit; |
---|
353 | |
---|
354 | __asm__ __volatile__( LOCK_PREFIX |
---|
355 | "btrl %2,%1\n\tsbbl %0,%0" |
---|
356 | :"=r" (oldbit),"=m" (ADDR) |
---|
357 | :"dIr" (nr) : "memory"); |
---|
358 | return oldbit; |
---|
359 | } |
---|
360 | |
---|
361 | static __inline__ int constant_test_bit(int nr, const volatile void * addr) |
---|
362 | { |
---|
363 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; |
---|
364 | } |
---|
365 | |
---|
366 | static __inline__ int variable_test_bit(int nr, volatile const void * addr) |
---|
367 | { |
---|
368 | int oldbit; |
---|
369 | |
---|
370 | __asm__ __volatile__( |
---|
371 | "btl %2,%1\n\tsbbl %0,%0" |
---|
372 | :"=r" (oldbit) |
---|
373 | :"m" (ADDR),"dIr" (nr)); |
---|
374 | return oldbit; |
---|
375 | } |
---|
376 | |
---|
377 | #define test_bit(nr,addr) \ |
---|
378 | (__builtin_constant_p(nr) ? \ |
---|
379 | constant_test_bit((nr),(addr)) : \ |
---|
380 | variable_test_bit((nr),(addr))) |
---|
381 | |
---|
382 | |
---|
383 | /** |
---|
384 | * set_bit - Atomically set a bit in memory |
---|
385 | * @nr: the bit to set |
---|
386 | * @addr: the address to start counting from |
---|
387 | * |
---|
388 | * This function is atomic and may not be reordered. See __set_bit() |
---|
389 | * if you do not require the atomic guarantees. |
---|
390 | * Note that @nr may be almost arbitrarily large; this function is not |
---|
391 | * restricted to acting on a single-word quantity. |
---|
392 | */ |
---|
393 | static __inline__ void set_bit(int nr, volatile void * addr) |
---|
394 | { |
---|
395 | __asm__ __volatile__( LOCK_PREFIX |
---|
396 | "btsl %1,%0" |
---|
397 | :"=m" (ADDR) |
---|
398 | :"dIr" (nr) : "memory"); |
---|
399 | } |
---|
400 | |
---|
401 | /** |
---|
402 | * clear_bit - Clears a bit in memory |
---|
403 | * @nr: Bit to clear |
---|
404 | * @addr: Address to start counting from |
---|
405 | * |
---|
406 | * clear_bit() is atomic and may not be reordered. However, it does |
---|
407 | * not contain a memory barrier, so if it is used for locking purposes, |
---|
408 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
---|
409 | * in order to ensure changes are visible on other processors. |
---|
410 | */ |
---|
411 | static __inline__ void clear_bit(int nr, volatile void * addr) |
---|
412 | { |
---|
413 | __asm__ __volatile__( LOCK_PREFIX |
---|
414 | "btrl %1,%0" |
---|
415 | :"=m" (ADDR) |
---|
416 | :"dIr" (nr)); |
---|
417 | } |
---|
418 | |
---|
419 | /** |
---|
420 | * __ffs - find first bit in word. |
---|
421 | * @word: The word to search |
---|
422 | * |
---|
423 | * Undefined if no bit exists, so code should check against 0 first. |
---|
424 | */ |
---|
425 | static __inline__ unsigned long __ffs(unsigned long word) |
---|
426 | { |
---|
427 | __asm__("bsfq %1,%0" |
---|
428 | :"=r" (word) |
---|
429 | :"rm" (word)); |
---|
430 | return word; |
---|
431 | } |
---|
432 | |
---|
433 | #define ADDR (*(volatile long *) addr) |
---|
434 | |
---|
435 | #define rdtscll(val) do { \ |
---|
436 | unsigned int __a,__d; \ |
---|
437 | asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ |
---|
438 | (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ |
---|
439 | } while(0) |
---|
440 | |
---|
441 | #define wrmsr(msr,val1,val2) \ |
---|
442 | __asm__ __volatile__("wrmsr" \ |
---|
443 | : /* no outputs */ \ |
---|
444 | : "c" (msr), "a" (val1), "d" (val2)) |
---|
445 | |
---|
446 | #define wrmsrl(msr,val) wrmsr(msr,(u32)((u64)(val)),((u64)(val))>>32) |
---|
447 | |
---|
448 | |
---|
449 | #else /* ifdef __x86_64__ */ |
---|
450 | #error "Unsupported architecture" |
---|
451 | #endif |
---|
452 | |
---|
453 | |
---|
454 | /********************* common i386 and x86_64 ****************************/ |
---|
455 | struct __synch_xchg_dummy { unsigned long a[100]; }; |
---|
456 | #define __synch_xg(x) ((struct __synch_xchg_dummy *)(x)) |
---|
457 | |
---|
458 | #define synch_cmpxchg(ptr, old, new) \ |
---|
459 | ((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\ |
---|
460 | (unsigned long)(old), \ |
---|
461 | (unsigned long)(new), \ |
---|
462 | sizeof(*(ptr)))) |
---|
463 | |
---|
464 | static inline unsigned long __synch_cmpxchg(volatile void *ptr, |
---|
465 | unsigned long old, |
---|
466 | unsigned long new, int size) |
---|
467 | { |
---|
468 | unsigned long prev; |
---|
469 | switch (size) { |
---|
470 | case 1: |
---|
471 | __asm__ __volatile__("lock; cmpxchgb %b1,%2" |
---|
472 | : "=a"(prev) |
---|
473 | : "q"(new), "m"(*__synch_xg(ptr)), |
---|
474 | "0"(old) |
---|
475 | : "memory"); |
---|
476 | return prev; |
---|
477 | case 2: |
---|
478 | __asm__ __volatile__("lock; cmpxchgw %w1,%2" |
---|
479 | : "=a"(prev) |
---|
480 | : "r"(new), "m"(*__synch_xg(ptr)), |
---|
481 | "0"(old) |
---|
482 | : "memory"); |
---|
483 | return prev; |
---|
484 | #ifdef __x86_64__ |
---|
485 | case 4: |
---|
486 | __asm__ __volatile__("lock; cmpxchgl %k1,%2" |
---|
487 | : "=a"(prev) |
---|
488 | : "r"(new), "m"(*__synch_xg(ptr)), |
---|
489 | "0"(old) |
---|
490 | : "memory"); |
---|
491 | return prev; |
---|
492 | case 8: |
---|
493 | __asm__ __volatile__("lock; cmpxchgq %1,%2" |
---|
494 | : "=a"(prev) |
---|
495 | : "r"(new), "m"(*__synch_xg(ptr)), |
---|
496 | "0"(old) |
---|
497 | : "memory"); |
---|
498 | return prev; |
---|
499 | #else |
---|
500 | case 4: |
---|
501 | __asm__ __volatile__("lock; cmpxchgl %1,%2" |
---|
502 | : "=a"(prev) |
---|
503 | : "r"(new), "m"(*__synch_xg(ptr)), |
---|
504 | "0"(old) |
---|
505 | : "memory"); |
---|
506 | return prev; |
---|
507 | #endif |
---|
508 | } |
---|
509 | return old; |
---|
510 | } |
---|
511 | |
---|
512 | |
---|
513 | static __inline__ void synch_set_bit(int nr, volatile void * addr) |
---|
514 | { |
---|
515 | __asm__ __volatile__ ( |
---|
516 | "lock btsl %1,%0" |
---|
517 | : "=m" (ADDR) : "Ir" (nr) : "memory" ); |
---|
518 | } |
---|
519 | |
---|
520 | static __inline__ void synch_clear_bit(int nr, volatile void * addr) |
---|
521 | { |
---|
522 | __asm__ __volatile__ ( |
---|
523 | "lock btrl %1,%0" |
---|
524 | : "=m" (ADDR) : "Ir" (nr) : "memory" ); |
---|
525 | } |
---|
526 | |
---|
527 | static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr) |
---|
528 | { |
---|
529 | int oldbit; |
---|
530 | __asm__ __volatile__ ( |
---|
531 | "lock btsl %2,%1\n\tsbbl %0,%0" |
---|
532 | : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory"); |
---|
533 | return oldbit; |
---|
534 | } |
---|
535 | |
---|
536 | static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr) |
---|
537 | { |
---|
538 | int oldbit; |
---|
539 | __asm__ __volatile__ ( |
---|
540 | "lock btrl %2,%1\n\tsbbl %0,%0" |
---|
541 | : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory"); |
---|
542 | return oldbit; |
---|
543 | } |
---|
544 | |
---|
545 | static __inline__ int synch_const_test_bit(int nr, const volatile void * addr) |
---|
546 | { |
---|
547 | return ((1UL << (nr & 31)) & |
---|
548 | (((const volatile unsigned int *) addr)[nr >> 5])) != 0; |
---|
549 | } |
---|
550 | |
---|
551 | static __inline__ int synch_var_test_bit(int nr, volatile void * addr) |
---|
552 | { |
---|
553 | int oldbit; |
---|
554 | __asm__ __volatile__ ( |
---|
555 | "btl %2,%1\n\tsbbl %0,%0" |
---|
556 | : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) ); |
---|
557 | return oldbit; |
---|
558 | } |
---|
559 | |
---|
560 | #define synch_test_bit(nr,addr) \ |
---|
561 | (__builtin_constant_p(nr) ? \ |
---|
562 | synch_const_test_bit((nr),(addr)) : \ |
---|
563 | synch_var_test_bit((nr),(addr))) |
---|
564 | |
---|
565 | |
---|
566 | |
---|
567 | #endif /* not assembly */ |
---|
568 | #endif /* _OS_H_ */ |
---|