1 | #ifndef _ASM_IA64_SPINLOCK_H |
---|
2 | #define _ASM_IA64_SPINLOCK_H |
---|
3 | |
---|
4 | /* |
---|
5 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
---|
6 | * David Mosberger-Tang <davidm@hpl.hp.com> |
---|
7 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> |
---|
8 | * |
---|
9 | * This file is used for SMP configurations only. |
---|
10 | */ |
---|
11 | |
---|
12 | #include <linux/compiler.h> |
---|
13 | #include <linux/kernel.h> |
---|
14 | |
---|
15 | #include <asm/atomic.h> |
---|
16 | #include <asm/bitops.h> |
---|
17 | #include <asm/intrinsics.h> |
---|
18 | #include <asm/system.h> |
---|
19 | |
---|
20 | #define DEBUG_SPINLOCK |
---|
21 | |
---|
22 | typedef struct { |
---|
23 | volatile unsigned int lock; |
---|
24 | #ifdef CONFIG_PREEMPT |
---|
25 | unsigned int break_lock; |
---|
26 | #endif |
---|
27 | #ifdef DEBUG_SPINLOCK |
---|
28 | void *locker; |
---|
29 | #endif |
---|
30 | #ifdef XEN |
---|
31 | unsigned char recurse_cpu; |
---|
32 | unsigned char recurse_cnt; |
---|
33 | #endif |
---|
34 | } spinlock_t; |
---|
35 | |
---|
36 | #define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 0 } |
---|
37 | #define spin_lock_init(x) ((x)->lock = 0) |
---|
38 | |
---|
39 | #ifdef ASM_SUPPORTED |
---|
40 | /* |
---|
41 | * Try to get the lock. If we fail to get the lock, make a non-standard call to |
---|
42 | * ia64_spinlock_contention(). We do not use a normal call because that would force all |
---|
43 | * callers of spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is |
---|
44 | * carefully coded to touch only those registers that spin_lock() marks "clobbered". |
---|
45 | */ |
---|
46 | |
---|
47 | #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" |
---|
48 | |
---|
49 | static inline void |
---|
50 | _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) |
---|
51 | { |
---|
52 | register volatile unsigned int *ptr asm ("r31") = &lock->lock; |
---|
53 | |
---|
54 | #if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3) |
---|
55 | # ifdef CONFIG_ITANIUM |
---|
56 | /* don't use brl on Itanium... */ |
---|
57 | asm volatile ("{\n\t" |
---|
58 | " mov ar.ccv = r0\n\t" |
---|
59 | " mov r28 = ip\n\t" |
---|
60 | " mov r30 = 1;;\n\t" |
---|
61 | "}\n\t" |
---|
62 | "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t" |
---|
63 | "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t" |
---|
64 | "cmp4.ne p14, p0 = r30, r0\n\t" |
---|
65 | "mov b6 = r29;;\n\t" |
---|
66 | "mov r27=%2\n\t" |
---|
67 | "(p14) br.cond.spnt.many b6" |
---|
68 | : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); |
---|
69 | # else |
---|
70 | asm volatile ("{\n\t" |
---|
71 | " mov ar.ccv = r0\n\t" |
---|
72 | " mov r28 = ip\n\t" |
---|
73 | " mov r30 = 1;;\n\t" |
---|
74 | "}\n\t" |
---|
75 | "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t" |
---|
76 | "cmp4.ne p14, p0 = r30, r0\n\t" |
---|
77 | "mov r27=%2\n\t" |
---|
78 | "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;" |
---|
79 | : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); |
---|
80 | # endif /* CONFIG_MCKINLEY */ |
---|
81 | #else |
---|
82 | # ifdef CONFIG_ITANIUM |
---|
83 | /* don't use brl on Itanium... */ |
---|
84 | /* mis-declare, so we get the entry-point, not it's function descriptor: */ |
---|
85 | asm volatile ("mov r30 = 1\n\t" |
---|
86 | "mov r27=%2\n\t" |
---|
87 | "mov ar.ccv = r0;;\n\t" |
---|
88 | "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t" |
---|
89 | "movl r29 = ia64_spinlock_contention;;\n\t" |
---|
90 | "cmp4.ne p14, p0 = r30, r0\n\t" |
---|
91 | "mov b6 = r29;;\n\t" |
---|
92 | "(p14) br.call.spnt.many b6 = b6" |
---|
93 | : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); |
---|
94 | # else |
---|
95 | asm volatile ("mov r30 = 1\n\t" |
---|
96 | "mov r27=%2\n\t" |
---|
97 | "mov ar.ccv = r0;;\n\t" |
---|
98 | "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t" |
---|
99 | "cmp4.ne p14, p0 = r30, r0\n\t" |
---|
100 | "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;" |
---|
101 | : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); |
---|
102 | # endif /* CONFIG_MCKINLEY */ |
---|
103 | #endif |
---|
104 | |
---|
105 | #ifdef DEBUG_SPINLOCK |
---|
106 | asm volatile ("mov %0=ip" : "=r" (lock->locker)); |
---|
107 | #endif |
---|
108 | } |
---|
109 | #define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0) |
---|
110 | #else /* !ASM_SUPPORTED */ |
---|
111 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) |
---|
112 | # define _raw_spin_lock(x) \ |
---|
113 | do { \ |
---|
114 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ |
---|
115 | __u64 ia64_spinlock_val; \ |
---|
116 | ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \ |
---|
117 | if (unlikely(ia64_spinlock_val)) { \ |
---|
118 | do { \ |
---|
119 | while (*ia64_spinlock_ptr) \ |
---|
120 | ia64_barrier(); \ |
---|
121 | ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \ |
---|
122 | } while (ia64_spinlock_val); \ |
---|
123 | } \ |
---|
124 | } while (0) |
---|
125 | #endif /* !ASM_SUPPORTED */ |
---|
126 | |
---|
127 | #define spin_is_locked(x) ((x)->lock != 0) |
---|
128 | #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) |
---|
129 | #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) |
---|
130 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) |
---|
131 | |
---|
132 | typedef struct { |
---|
133 | volatile unsigned int read_counter : 31; |
---|
134 | volatile unsigned int write_lock : 1; |
---|
135 | #ifdef CONFIG_PREEMPT |
---|
136 | unsigned int break_lock; |
---|
137 | #endif |
---|
138 | } rwlock_t; |
---|
139 | #define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { 0, 0 } |
---|
140 | |
---|
141 | #define rwlock_init(x) do { *(x) = (rwlock_t) RW_LOCK_UNLOCKED; } while(0) |
---|
142 | #define read_can_lock(rw) (*(volatile int *)(rw) >= 0) |
---|
143 | #define write_can_lock(rw) (*(volatile int *)(rw) == 0) |
---|
144 | |
---|
145 | #define _raw_read_lock(rw) \ |
---|
146 | do { \ |
---|
147 | rwlock_t *__read_lock_ptr = (rw); \ |
---|
148 | \ |
---|
149 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ |
---|
150 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
---|
151 | while (*(volatile int *)__read_lock_ptr < 0) \ |
---|
152 | cpu_relax(); \ |
---|
153 | } \ |
---|
154 | } while (0) |
---|
155 | |
---|
156 | #define _raw_read_unlock(rw) \ |
---|
157 | do { \ |
---|
158 | rwlock_t *__read_lock_ptr = (rw); \ |
---|
159 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
---|
160 | } while (0) |
---|
161 | |
---|
162 | #ifdef ASM_SUPPORTED |
---|
163 | #define _raw_write_lock(rw) \ |
---|
164 | do { \ |
---|
165 | __asm__ __volatile__ ( \ |
---|
166 | "mov ar.ccv = r0\n" \ |
---|
167 | "dep r29 = -1, r0, 31, 1;;\n" \ |
---|
168 | "1:\n" \ |
---|
169 | "ld4 r2 = [%0];;\n" \ |
---|
170 | "cmp4.eq p0,p7 = r0,r2\n" \ |
---|
171 | "(p7) br.cond.spnt.few 1b \n" \ |
---|
172 | "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \ |
---|
173 | "cmp4.eq p0,p7 = r0, r2\n" \ |
---|
174 | "(p7) br.cond.spnt.few 1b;;\n" \ |
---|
175 | :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ |
---|
176 | } while(0) |
---|
177 | |
---|
178 | #define _raw_write_trylock(rw) \ |
---|
179 | ({ \ |
---|
180 | register long result; \ |
---|
181 | \ |
---|
182 | __asm__ __volatile__ ( \ |
---|
183 | "mov ar.ccv = r0\n" \ |
---|
184 | "dep r29 = -1, r0, 31, 1;;\n" \ |
---|
185 | "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \ |
---|
186 | : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \ |
---|
187 | (result == 0); \ |
---|
188 | }) |
---|
189 | |
---|
190 | #else /* !ASM_SUPPORTED */ |
---|
191 | |
---|
192 | #define _raw_write_lock(l) \ |
---|
193 | ({ \ |
---|
194 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ |
---|
195 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ |
---|
196 | do { \ |
---|
197 | while (*ia64_write_lock_ptr) \ |
---|
198 | ia64_barrier(); \ |
---|
199 | ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \ |
---|
200 | } while (ia64_val); \ |
---|
201 | }) |
---|
202 | |
---|
203 | #define _raw_write_trylock(rw) \ |
---|
204 | ({ \ |
---|
205 | __u64 ia64_val; \ |
---|
206 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ |
---|
207 | ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \ |
---|
208 | (ia64_val == 0); \ |
---|
209 | }) |
---|
210 | |
---|
211 | #endif /* !ASM_SUPPORTED */ |
---|
212 | |
---|
213 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) |
---|
214 | |
---|
215 | #define _raw_write_unlock(x) \ |
---|
216 | ({ \ |
---|
217 | smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ \ |
---|
218 | clear_bit(31, (x)); \ |
---|
219 | }) |
---|
220 | |
---|
221 | #ifdef XEN |
---|
222 | #include <asm/xenspinlock.h> |
---|
223 | #endif |
---|
224 | #endif /* _ASM_IA64_SPINLOCK_H */ |
---|