source: trunk/packages/xen-common/xen-common/xen/include/asm-x86/bitops.h @ 34

Last change on this file since 34 was 34, checked in by hartmans, 17 years ago

Add xen and xen-common

File size: 10.7 KB
Line 
1#ifndef _X86_BITOPS_H
2#define _X86_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8#include <xen/config.h>
9
10#ifdef CONFIG_SMP
11#define LOCK_PREFIX "lock ; "
12#else
13#define LOCK_PREFIX ""
14#endif
15
16/*
17 * We specify the memory operand as both input and output because the memory
18 * operand is both read from and written to. Since the operand is in fact a
19 * word array, we also specify "memory" in the clobbers list to indicate that
20 * words other than the one directly addressed by the memory operand may be
21 * modified. We don't use "+m" because the gcc manual says that it should be
22 * used only when the constraint allows the operand to reside in a register.
23 */
24
25#define ADDR (*(volatile long *) addr)
26#define CONST_ADDR (*(const volatile long *) addr)
27
28/**
29 * set_bit - Atomically set a bit in memory
30 * @nr: the bit to set
31 * @addr: the address to start counting from
32 *
33 * This function is atomic and may not be reordered.  See __set_bit()
34 * if you do not require the atomic guarantees.
35 * Note that @nr may be almost arbitrarily large; this function is not
36 * restricted to acting on a single-word quantity.
37 */
38static __inline__ void set_bit(int nr, volatile void * addr)
39{
40        __asm__ __volatile__( LOCK_PREFIX
41                "btsl %1,%0"
42                :"=m" (ADDR)
43                :"dIr" (nr), "m" (ADDR) : "memory");
44}
45
46/**
47 * __set_bit - Set a bit in memory
48 * @nr: the bit to set
49 * @addr: the address to start counting from
50 *
51 * Unlike set_bit(), this function is non-atomic and may be reordered.
52 * If it's called on the same region of memory simultaneously, the effect
53 * may be that only one operation succeeds.
54 */
55static __inline__ void __set_bit(int nr, volatile void * addr)
56{
57        __asm__(
58                "btsl %1,%0"
59                :"=m" (ADDR)
60                :"dIr" (nr), "m" (ADDR) : "memory");
61}
62
63/**
64 * clear_bit - Clears a bit in memory
65 * @nr: Bit to clear
66 * @addr: Address to start counting from
67 *
68 * clear_bit() is atomic and may not be reordered.  However, it does
69 * not contain a memory barrier, so if it is used for locking purposes,
70 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
71 * in order to ensure changes are visible on other processors.
72 */
73static __inline__ void clear_bit(int nr, volatile void * addr)
74{
75        __asm__ __volatile__( LOCK_PREFIX
76                "btrl %1,%0"
77                :"=m" (ADDR)
78                :"dIr" (nr), "m" (ADDR) : "memory");
79}
80
81/**
82 * __clear_bit - Clears a bit in memory
83 * @nr: Bit to clear
84 * @addr: Address to start counting from
85 *
86 * Unlike clear_bit(), this function is non-atomic and may be reordered.
87 * If it's called on the same region of memory simultaneously, the effect
88 * may be that only one operation succeeds.
89 */
90static __inline__ void __clear_bit(int nr, volatile void * addr)
91{
92        __asm__(
93                "btrl %1,%0"
94                :"=m" (ADDR)
95                :"dIr" (nr), "m" (ADDR) : "memory");
96}
97
98#define smp_mb__before_clear_bit()      barrier()
99#define smp_mb__after_clear_bit()       barrier()
100
101/**
102 * __change_bit - Toggle a bit in memory
103 * @nr: the bit to set
104 * @addr: the address to start counting from
105 *
106 * Unlike change_bit(), this function is non-atomic and may be reordered.
107 * If it's called on the same region of memory simultaneously, the effect
108 * may be that only one operation succeeds.
109 */
110static __inline__ void __change_bit(int nr, volatile void * addr)
111{
112        __asm__ __volatile__(
113                "btcl %1,%0"
114                :"=m" (ADDR)
115                :"dIr" (nr), "m" (ADDR) : "memory");
116}
117
118/**
119 * change_bit - Toggle a bit in memory
120 * @nr: Bit to clear
121 * @addr: Address to start counting from
122 *
123 * change_bit() is atomic and may not be reordered.
124 * Note that @nr may be almost arbitrarily large; this function is not
125 * restricted to acting on a single-word quantity.
126 */
127static __inline__ void change_bit(int nr, volatile void * addr)
128{
129        __asm__ __volatile__( LOCK_PREFIX
130                "btcl %1,%0"
131                :"=m" (ADDR)
132                :"dIr" (nr), "m" (ADDR) : "memory");
133}
134
135/**
136 * test_and_set_bit - Set a bit and return its old value
137 * @nr: Bit to set
138 * @addr: Address to count from
139 *
140 * This operation is atomic and cannot be reordered. 
141 * It also implies a memory barrier.
142 */
143static __inline__ int test_and_set_bit(int nr, volatile void * addr)
144{
145        int oldbit;
146
147        __asm__ __volatile__( LOCK_PREFIX
148                "btsl %2,%1\n\tsbbl %0,%0"
149                :"=r" (oldbit),"=m" (ADDR)
150                :"dIr" (nr), "m" (ADDR) : "memory");
151        return oldbit;
152}
153
154/**
155 * __test_and_set_bit - Set a bit and return its old value
156 * @nr: Bit to set
157 * @addr: Address to count from
158 *
159 * This operation is non-atomic and can be reordered. 
160 * If two examples of this operation race, one can appear to succeed
161 * but actually fail.  You must protect multiple accesses with a lock.
162 */
163static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
164{
165        int oldbit;
166
167        __asm__(
168                "btsl %2,%1\n\tsbbl %0,%0"
169                :"=r" (oldbit),"=m" (ADDR)
170                :"dIr" (nr), "m" (ADDR) : "memory");
171        return oldbit;
172}
173
174/**
175 * test_and_clear_bit - Clear a bit and return its old value
176 * @nr: Bit to set
177 * @addr: Address to count from
178 *
179 * This operation is atomic and cannot be reordered. 
180 * It also implies a memory barrier.
181 */
182static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
183{
184        int oldbit;
185
186        __asm__ __volatile__( LOCK_PREFIX
187                "btrl %2,%1\n\tsbbl %0,%0"
188                :"=r" (oldbit),"=m" (ADDR)
189                :"dIr" (nr), "m" (ADDR) : "memory");
190        return oldbit;
191}
192
193/**
194 * __test_and_clear_bit - Clear a bit and return its old value
195 * @nr: Bit to set
196 * @addr: Address to count from
197 *
198 * This operation is non-atomic and can be reordered. 
199 * If two examples of this operation race, one can appear to succeed
200 * but actually fail.  You must protect multiple accesses with a lock.
201 */
202static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
203{
204        int oldbit;
205
206        __asm__(
207                "btrl %2,%1\n\tsbbl %0,%0"
208                :"=r" (oldbit),"=m" (ADDR)
209                :"dIr" (nr), "m" (ADDR) : "memory");
210        return oldbit;
211}
212
213/* WARNING: non atomic and it can be reordered! */
214static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
215{
216        int oldbit;
217
218        __asm__ __volatile__(
219                "btcl %2,%1\n\tsbbl %0,%0"
220                :"=r" (oldbit),"=m" (ADDR)
221                :"dIr" (nr), "m" (ADDR) : "memory");
222        return oldbit;
223}
224
225/**
226 * test_and_change_bit - Change a bit and return its new value
227 * @nr: Bit to set
228 * @addr: Address to count from
229 *
230 * This operation is atomic and cannot be reordered. 
231 * It also implies a memory barrier.
232 */
233static __inline__ int test_and_change_bit(int nr, volatile void * addr)
234{
235        int oldbit;
236
237        __asm__ __volatile__( LOCK_PREFIX
238                "btcl %2,%1\n\tsbbl %0,%0"
239                :"=r" (oldbit),"=m" (ADDR)
240                :"dIr" (nr), "m" (ADDR) : "memory");
241        return oldbit;
242}
243
244
245static __inline__ int constant_test_bit(int nr, const volatile void * addr)
246{
247        return ((1U << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
248}
249
250static __inline__ int variable_test_bit(int nr, const volatile void * addr)
251{
252        int oldbit;
253
254        __asm__ __volatile__(
255                "btl %2,%1\n\tsbbl %0,%0"
256                :"=r" (oldbit)
257                :"m" (CONST_ADDR),"dIr" (nr));
258        return oldbit;
259}
260
261#define test_bit(nr,addr) \
262(__builtin_constant_p(nr) ? \
263 constant_test_bit((nr),(addr)) : \
264 variable_test_bit((nr),(addr)))
265
266extern unsigned int __find_first_bit(
267    const unsigned long *addr, unsigned int size);
268extern unsigned int __find_next_bit(
269    const unsigned long *addr, unsigned int size, unsigned int offset);
270extern unsigned int __find_first_zero_bit(
271    const unsigned long *addr, unsigned int size);
272extern unsigned int __find_next_zero_bit(
273    const unsigned long *addr, unsigned int size, unsigned int offset);
274
275/* return index of first bit set in val or BITS_PER_LONG when no bit is set */
276static inline unsigned int __scanbit(unsigned long val)
277{
278        __asm__ ( "bsf %1,%0" : "=r" (val) : "r" (val), "0" (BITS_PER_LONG) );
279        return (unsigned int)val;
280}
281
282/**
283 * find_first_bit - find the first set bit in a memory region
284 * @addr: The address to start the search at
285 * @size: The maximum size to search
286 *
287 * Returns the bit-number of the first set bit, not the number of the byte
288 * containing a bit.
289 */
290#define find_first_bit(addr,size) \
291((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
292  (__scanbit(*(const unsigned long *)addr)) : \
293  __find_first_bit(addr,size)))
294
295/**
296 * find_next_bit - find the first set bit in a memory region
297 * @addr: The address to base the search on
298 * @offset: The bitnumber to start searching at
299 * @size: The maximum size to search
300 */
301#define find_next_bit(addr,size,off) \
302((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
303  ((off) + (__scanbit((*(const unsigned long *)addr) >> (off)))) : \
304  __find_next_bit(addr,size,off)))
305
306/**
307 * find_first_zero_bit - find the first zero bit in a memory region
308 * @addr: The address to start the search at
309 * @size: The maximum size to search
310 *
311 * Returns the bit-number of the first zero bit, not the number of the byte
312 * containing a bit.
313 */
314#define find_first_zero_bit(addr,size) \
315((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
316  (__scanbit(~*(const unsigned long *)addr)) : \
317  __find_first_zero_bit(addr,size)))
318
319/**
320 * find_next_zero_bit - find the first zero bit in a memory region
321 * @addr: The address to base the search on
322 * @offset: The bitnumber to start searching at
323 * @size: The maximum size to search
324 */
325#define find_next_zero_bit(addr,size,off) \
326((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
327  ((off)+(__scanbit(~(((*(const unsigned long *)addr)) >> (off))))) : \
328  __find_next_zero_bit(addr,size,off)))
329
330
331/**
332 * find_first_set_bit - find the first set bit in @word
333 * @word: the word to search
334 *
335 * Returns the bit-number of the first set bit. If no bits are set then the
336 * result is undefined.
337 */
338static __inline__ unsigned int find_first_set_bit(unsigned long word)
339{
340        __asm__ ( "bsf %1,%0" : "=r" (word) : "r" (word) );
341        return (unsigned int)word;
342}
343
344/**
345 * ffz - find first zero in word.
346 * @word: The word to search
347 *
348 * Undefined if no zero exists, so code should check against ~0UL first.
349 */
350static inline unsigned long ffz(unsigned long word)
351{
352        __asm__("bsf %1,%0"
353                :"=r" (word)
354                :"r" (~word));
355        return word;
356}
357
358/**
359 * ffs - find first bit set
360 * @x: the word to search
361 *
362 * This is defined the same way as
363 * the libc and compiler builtin ffs routines, therefore
364 * differs in spirit from the above ffz (man ffs).
365 */
366static inline int ffs(unsigned long x)
367{
368        long r;
369
370        __asm__("bsf %1,%0\n\t"
371                "jnz 1f\n\t"
372                "mov $-1,%0\n"
373                "1:" : "=r" (r) : "rm" (x));
374        return (int)r+1;
375}
376
377/**
378 * fls - find last bit set
379 * @x: the word to search
380 *
381 * This is defined the same way as ffs.
382 */
383static inline int fls(unsigned long x)
384{
385        long r;
386
387        __asm__("bsr %1,%0\n\t"
388                "jnz 1f\n\t"
389                "mov $-1,%0\n"
390                "1:" : "=r" (r) : "rm" (x));
391        return (int)r+1;
392}
393
394/**
395 * hweightN - returns the hamming weight of a N-bit word
396 * @x: the word to weigh
397 *
398 * The Hamming Weight of a number is the total number of bits set in it.
399 */
400#define hweight64(x) generic_hweight64(x)
401#define hweight32(x) generic_hweight32(x)
402#define hweight16(x) generic_hweight16(x)
403#define hweight8(x) generic_hweight8(x)
404
405#endif /* _X86_BITOPS_H */
Note: See TracBrowser for help on using the repository browser.