source: trunk/packages/xen-common/xen-common/xen/include/asm-x86/spinlock.h @ 34

Last change on this file since 34 was 34, checked in by hartmans, 17 years ago

Add xen and xen-common

File size: 3.6 KB
Line 
1#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#include <xen/config.h>
5#include <xen/lib.h>
6#include <asm/atomic.h>
7#include <asm/rwlock.h>
8
9typedef struct {
10    volatile s16 lock;
11    s8 recurse_cpu;
12    u8 recurse_cnt;
13} spinlock_t;
14
15#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 1, -1, 0 }
16
17#define spin_lock_init(x)       do { *(x) = (spinlock_t) SPIN_LOCK_UNLOCKED; } while(0)
18#define spin_is_locked(x)       (*(volatile char *)(&(x)->lock) <= 0)
19
20static inline void _raw_spin_lock(spinlock_t *lock)
21{
22    __asm__ __volatile__ (
23        "1:  lock; decb %0         \n"
24        "    js 2f                 \n"
25        ".section .text.lock,\"ax\"\n"
26        "2:  cmpb $0,%0            \n"
27        "    rep; nop              \n"
28        "    jle 2b                \n"
29        "    jmp 1b                \n"
30        ".previous"
31        : "=m" (lock->lock) : : "memory" );
32}
33
34static inline void _raw_spin_unlock(spinlock_t *lock)
35{
36#if !defined(CONFIG_X86_OOSTORE)
37    ASSERT(spin_is_locked(lock));
38    __asm__ __volatile__ (
39        "movb $1,%0" 
40        : "=m" (lock->lock) : : "memory" );
41#else
42    char oldval = 1;
43    ASSERT(spin_is_locked(lock));
44    __asm__ __volatile__ (
45        "xchgb %b0, %1"
46        : "=q" (oldval), "=m" (lock->lock) : "0" (oldval) : "memory" );
47#endif
48}
49
50static inline int _raw_spin_trylock(spinlock_t *lock)
51{
52    char oldval;
53    __asm__ __volatile__(
54        "xchgb %b0,%1"
55        :"=q" (oldval), "=m" (lock->lock)
56        :"0" (0) : "memory");
57    return oldval > 0;
58}
59
60/*
61 * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
62 * reentered recursively on the same CPU. All critical regions that may form
63 * part of a recursively-nested set must be protected by these forms. If there
64 * are any critical regions that cannot form part of such a set, they can use
65 * standard spin_[un]lock().
66 */
67#define _raw_spin_lock_recursive(_lock)            \
68    do {                                           \
69        int cpu = smp_processor_id();              \
70        if ( likely((_lock)->recurse_cpu != cpu) ) \
71        {                                          \
72            spin_lock(_lock);                      \
73            (_lock)->recurse_cpu = cpu;            \
74        }                                          \
75        (_lock)->recurse_cnt++;                    \
76    } while ( 0 )
77
78#define _raw_spin_unlock_recursive(_lock)          \
79    do {                                           \
80        if ( likely(--(_lock)->recurse_cnt == 0) ) \
81        {                                          \
82            (_lock)->recurse_cpu = -1;             \
83            spin_unlock(_lock);                    \
84        }                                          \
85    } while ( 0 )
86
87
88typedef struct {
89    volatile unsigned int lock;
90} rwlock_t;
91
92#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { RW_LOCK_BIAS }
93
94#define rwlock_init(x)  do { *(x) = (rwlock_t) RW_LOCK_UNLOCKED; } while(0)
95
96/*
97 * On x86, we implement read-write locks as a 32-bit counter
98 * with the high bit (sign) being the "contended" bit.
99 */
100static inline void _raw_read_lock(rwlock_t *rw)
101{
102    __build_read_lock(rw, "__read_lock_failed");
103}
104
105static inline void _raw_write_lock(rwlock_t *rw)
106{
107    __build_write_lock(rw, "__write_lock_failed");
108}
109
110#define _raw_read_unlock(rw)                       \
111    __asm__ __volatile__ (                         \
112        "lock ; incl %0" :                         \
113        "=m" ((rw)->lock) : : "memory" )
114#define _raw_write_unlock(rw)                      \
115    __asm__ __volatile__ (                         \
116        "lock ; addl $" RW_LOCK_BIAS_STR ",%0" :   \
117        "=m" ((rw)->lock) : : "memory" )
118
119#endif /* __ASM_SPINLOCK_H */
Note: See TracBrowser for help on using the repository browser.