source: trunk/packages/xen-common/xen-common/xen/include/asm-ia64/linux/seqlock.h @ 34

Last change on this file since 34 was 34, checked in by hartmans, 17 years ago

Add xen and xen-common

File size: 4.5 KB
Line 
1#ifndef __LINUX_SEQLOCK_H
2#define __LINUX_SEQLOCK_H
3/*
4 * Reader/writer consistent mechanism without starving writers. This type of
5 * lock for data where the reader wants a consitent set of information
6 * and is willing to retry if the information changes.  Readers never
7 * block but they may have to retry if a writer is in
8 * progress. Writers do not wait for readers.
9 *
10 * This is not as cache friendly as brlock. Also, this will not work
11 * for data that contains pointers, because any writer could
12 * invalidate a pointer that a reader was following.
13 *
14 * Expected reader usage:
15 *      do {
16 *          seq = read_seqbegin(&foo);
17 *      ...
18 *      } while (read_seqretry(&foo, seq));
19 *
20 *
21 * On non-SMP the spin locks disappear but the writer still needs
22 * to increment the sequence variables because an interrupt routine could
23 * change the state of the data.
24 *
25 * Based on x86_64 vsyscall gettimeofday
26 * by Keith Owens and Andrea Arcangeli
27 */
28
29#include <linux/config.h>
30#include <linux/spinlock.h>
31#include <linux/preempt.h>
32
33typedef struct {
34        unsigned sequence;
35        spinlock_t lock;
36} seqlock_t;
37
38/*
39 * These macros triggered gcc-3.x compile-time problems.  We think these are
40 * OK now.  Be cautious.
41 */
42#define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED }
43#define seqlock_init(x) do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } while (0)
44
45
46/* Lock out other writers and update the count.
47 * Acts like a normal spin_lock/unlock.
48 * Don't need preempt_disable() because that is in the spin_lock already.
49 */
50static inline void write_seqlock(seqlock_t *sl)
51{
52        spin_lock(&sl->lock);
53        ++sl->sequence;
54        smp_wmb();                     
55}       
56
57static inline void write_sequnlock(seqlock_t *sl) 
58{
59        smp_wmb();
60        sl->sequence++;
61        spin_unlock(&sl->lock);
62}
63
64static inline int write_tryseqlock(seqlock_t *sl)
65{
66        int ret = spin_trylock(&sl->lock);
67
68        if (ret) {
69                ++sl->sequence;
70                smp_wmb();                     
71        }
72        return ret;
73}
74
75/* Start of read calculation -- fetch last complete writer token */
76static inline unsigned read_seqbegin(const seqlock_t *sl)
77{
78        unsigned ret = sl->sequence;
79        smp_rmb();
80        return ret;
81}
82
83/* Test if reader processed invalid data.
84 * If initial values is odd,
85 *      then writer had already started when section was entered
86 * If sequence value changed
87 *      then writer changed data while in section
88 *   
89 * Using xor saves one conditional branch.
90 */
91static inline int read_seqretry(const seqlock_t *sl, unsigned iv)
92{
93        smp_rmb();
94        return (iv & 1) | (sl->sequence ^ iv);
95}
96
97
98/*
99 * Version using sequence counter only.
100 * This can be used when code has its own mutex protecting the
101 * updating starting before the write_seqcountbeqin() and ending
102 * after the write_seqcount_end().
103 */
104
105typedef struct seqcount {
106        unsigned sequence;
107} seqcount_t;
108
109#define SEQCNT_ZERO { 0 }
110#define seqcount_init(x)        do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
111
112/* Start of read using pointer to a sequence counter only.  */
113static inline unsigned read_seqcount_begin(const seqcount_t *s)
114{
115        unsigned ret = s->sequence;
116        smp_rmb();
117        return ret;
118}
119
120/* Test if reader processed invalid data.
121 * Equivalent to: iv is odd or sequence number has changed.
122 *                (iv & 1) || (*s != iv)
123 * Using xor saves one conditional branch.
124 */
125static inline int read_seqcount_retry(const seqcount_t *s, unsigned iv)
126{
127        smp_rmb();
128        return (iv & 1) | (s->sequence ^ iv);
129}
130
131
132/*
133 * Sequence counter only version assumes that callers are using their
134 * own mutexing.
135 */
136static inline void write_seqcount_begin(seqcount_t *s)
137{
138        s->sequence++;
139        smp_wmb();
140}
141
142static inline void write_seqcount_end(seqcount_t *s)
143{
144        smp_wmb();
145        s->sequence++;
146}
147
148/*
149 * Possible sw/hw IRQ protected versions of the interfaces.
150 */
151#define write_seqlock_irqsave(lock, flags)                              \
152        do { local_irq_save(flags); write_seqlock(lock); } while (0)
153#define write_seqlock_irq(lock)                                         \
154        do { local_irq_disable();   write_seqlock(lock); } while (0)
155#define write_seqlock_bh(lock)                                          \
156        do { local_bh_disable();    write_seqlock(lock); } while (0)
157
158#define write_sequnlock_irqrestore(lock, flags)                         \
159        do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
160#define write_sequnlock_irq(lock)                                       \
161        do { write_sequnlock(lock); local_irq_enable(); } while(0)
162#define write_sequnlock_bh(lock)                                        \
163        do { write_sequnlock(lock); local_bh_enable(); } while(0)
164
165#define read_seqbegin_irqsave(lock, flags)                              \
166        ({ local_irq_save(flags);   read_seqbegin(lock); })
167
168#define read_seqretry_irqrestore(lock, iv, flags)                       \
169        ({                                                              \
170                int ret = read_seqretry(lock, iv);                      \
171                local_irq_restore(flags);                               \
172                ret;                                                    \
173        })
174
175#endif /* __LINUX_SEQLOCK_H */
Note: See TracBrowser for help on using the repository browser.