1 | /****************************************************************************** |
---|
2 | * timer.c |
---|
3 | * |
---|
4 | * Copyright (c) 2002-2003 Rolf Neugebauer |
---|
5 | * Copyright (c) 2002-2005 K A Fraser |
---|
6 | */ |
---|
7 | |
---|
8 | #include <xen/config.h> |
---|
9 | #include <xen/init.h> |
---|
10 | #include <xen/types.h> |
---|
11 | #include <xen/errno.h> |
---|
12 | #include <xen/sched.h> |
---|
13 | #include <xen/lib.h> |
---|
14 | #include <xen/smp.h> |
---|
15 | #include <xen/perfc.h> |
---|
16 | #include <xen/time.h> |
---|
17 | #include <xen/softirq.h> |
---|
18 | #include <xen/timer.h> |
---|
19 | #include <xen/keyhandler.h> |
---|
20 | #include <xen/percpu.h> |
---|
21 | #include <asm/system.h> |
---|
22 | #include <asm/desc.h> |
---|
23 | |
---|
24 | /* |
---|
25 | * We pull handlers off the timer list this far in future, |
---|
26 | * rather than reprogramming the time hardware. |
---|
27 | */ |
---|
28 | #define TIMER_SLOP (50*1000) /* ns */ |
---|
29 | |
---|
30 | struct timers { |
---|
31 | spinlock_t lock; |
---|
32 | struct timer **heap; |
---|
33 | struct timer *running; |
---|
34 | } __cacheline_aligned; |
---|
35 | |
---|
36 | static DEFINE_PER_CPU(struct timers, timers); |
---|
37 | |
---|
38 | extern int reprogram_timer(s_time_t timeout); |
---|
39 | |
---|
40 | /**************************************************************************** |
---|
41 | * HEAP OPERATIONS. |
---|
42 | */ |
---|
43 | |
---|
44 | #define GET_HEAP_SIZE(_h) ((int)(((u16 *)(_h))[0])) |
---|
45 | #define SET_HEAP_SIZE(_h,_v) (((u16 *)(_h))[0] = (u16)(_v)) |
---|
46 | |
---|
47 | #define GET_HEAP_LIMIT(_h) ((int)(((u16 *)(_h))[1])) |
---|
48 | #define SET_HEAP_LIMIT(_h,_v) (((u16 *)(_h))[1] = (u16)(_v)) |
---|
49 | |
---|
50 | /* Sink down element @pos of @heap. */ |
---|
51 | static void down_heap(struct timer **heap, int pos) |
---|
52 | { |
---|
53 | int sz = GET_HEAP_SIZE(heap), nxt; |
---|
54 | struct timer *t = heap[pos]; |
---|
55 | |
---|
56 | while ( (nxt = (pos << 1)) <= sz ) |
---|
57 | { |
---|
58 | if ( ((nxt+1) <= sz) && (heap[nxt+1]->expires < heap[nxt]->expires) ) |
---|
59 | nxt++; |
---|
60 | if ( heap[nxt]->expires > t->expires ) |
---|
61 | break; |
---|
62 | heap[pos] = heap[nxt]; |
---|
63 | heap[pos]->heap_offset = pos; |
---|
64 | pos = nxt; |
---|
65 | } |
---|
66 | |
---|
67 | heap[pos] = t; |
---|
68 | t->heap_offset = pos; |
---|
69 | } |
---|
70 | |
---|
71 | /* Float element @pos up @heap. */ |
---|
72 | static void up_heap(struct timer **heap, int pos) |
---|
73 | { |
---|
74 | struct timer *t = heap[pos]; |
---|
75 | |
---|
76 | while ( (pos > 1) && (t->expires < heap[pos>>1]->expires) ) |
---|
77 | { |
---|
78 | heap[pos] = heap[pos>>1]; |
---|
79 | heap[pos]->heap_offset = pos; |
---|
80 | pos >>= 1; |
---|
81 | } |
---|
82 | |
---|
83 | heap[pos] = t; |
---|
84 | t->heap_offset = pos; |
---|
85 | } |
---|
86 | |
---|
87 | |
---|
88 | /* Delete @t from @heap. Return TRUE if new top of heap. */ |
---|
89 | static int remove_entry(struct timer **heap, struct timer *t) |
---|
90 | { |
---|
91 | int sz = GET_HEAP_SIZE(heap); |
---|
92 | int pos = t->heap_offset; |
---|
93 | |
---|
94 | t->heap_offset = 0; |
---|
95 | |
---|
96 | if ( unlikely(pos == sz) ) |
---|
97 | { |
---|
98 | SET_HEAP_SIZE(heap, sz-1); |
---|
99 | goto out; |
---|
100 | } |
---|
101 | |
---|
102 | heap[pos] = heap[sz]; |
---|
103 | heap[pos]->heap_offset = pos; |
---|
104 | |
---|
105 | SET_HEAP_SIZE(heap, --sz); |
---|
106 | |
---|
107 | if ( (pos > 1) && (heap[pos]->expires < heap[pos>>1]->expires) ) |
---|
108 | up_heap(heap, pos); |
---|
109 | else |
---|
110 | down_heap(heap, pos); |
---|
111 | |
---|
112 | out: |
---|
113 | return (pos == 1); |
---|
114 | } |
---|
115 | |
---|
116 | |
---|
117 | /* Add new entry @t to @heap. Return TRUE if new top of heap. */ |
---|
118 | static int add_entry(struct timer ***pheap, struct timer *t) |
---|
119 | { |
---|
120 | struct timer **heap = *pheap; |
---|
121 | int sz = GET_HEAP_SIZE(heap); |
---|
122 | |
---|
123 | /* Copy the heap if it is full. */ |
---|
124 | if ( unlikely(sz == GET_HEAP_LIMIT(heap)) ) |
---|
125 | { |
---|
126 | /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */ |
---|
127 | int old_limit = GET_HEAP_LIMIT(heap); |
---|
128 | int new_limit = ((old_limit + 1) << 4) - 1; |
---|
129 | heap = xmalloc_array(struct timer *, new_limit + 1); |
---|
130 | BUG_ON(heap == NULL); |
---|
131 | memcpy(heap, *pheap, (old_limit + 1) * sizeof(*heap)); |
---|
132 | SET_HEAP_LIMIT(heap, new_limit); |
---|
133 | if ( old_limit != 0 ) |
---|
134 | xfree(*pheap); |
---|
135 | *pheap = heap; |
---|
136 | } |
---|
137 | |
---|
138 | SET_HEAP_SIZE(heap, ++sz); |
---|
139 | heap[sz] = t; |
---|
140 | t->heap_offset = sz; |
---|
141 | up_heap(heap, sz); |
---|
142 | return (t->heap_offset == 1); |
---|
143 | } |
---|
144 | |
---|
145 | |
---|
146 | /**************************************************************************** |
---|
147 | * TIMER OPERATIONS. |
---|
148 | */ |
---|
149 | |
---|
150 | static inline void __add_timer(struct timer *timer) |
---|
151 | { |
---|
152 | int cpu = timer->cpu; |
---|
153 | if ( add_entry(&per_cpu(timers, cpu).heap, timer) ) |
---|
154 | cpu_raise_softirq(cpu, TIMER_SOFTIRQ); |
---|
155 | } |
---|
156 | |
---|
157 | |
---|
158 | static inline void __stop_timer(struct timer *timer) |
---|
159 | { |
---|
160 | int cpu = timer->cpu; |
---|
161 | if ( remove_entry(per_cpu(timers, cpu).heap, timer) ) |
---|
162 | cpu_raise_softirq(cpu, TIMER_SOFTIRQ); |
---|
163 | } |
---|
164 | |
---|
165 | static inline void timer_lock(struct timer *timer) |
---|
166 | { |
---|
167 | unsigned int cpu; |
---|
168 | |
---|
169 | for ( ; ; ) |
---|
170 | { |
---|
171 | cpu = timer->cpu; |
---|
172 | spin_lock(&per_cpu(timers, cpu).lock); |
---|
173 | if ( likely(timer->cpu == cpu) ) |
---|
174 | break; |
---|
175 | spin_unlock(&per_cpu(timers, cpu).lock); |
---|
176 | } |
---|
177 | } |
---|
178 | |
---|
179 | #define timer_lock_irq(t) \ |
---|
180 | do { local_irq_disable(); timer_lock(t); } while ( 0 ) |
---|
181 | #define timer_lock_irqsave(t, flags) \ |
---|
182 | do { local_irq_save(flags); timer_lock(t); } while ( 0 ) |
---|
183 | |
---|
184 | static inline void timer_unlock(struct timer *timer) |
---|
185 | { |
---|
186 | spin_unlock(&per_cpu(timers, timer->cpu).lock); |
---|
187 | } |
---|
188 | |
---|
189 | #define timer_unlock_irq(t) \ |
---|
190 | do { timer_unlock(t); local_irq_enable(); } while ( 0 ) |
---|
191 | #define timer_unlock_irqrestore(t, flags) \ |
---|
192 | do { timer_unlock(t); local_irq_restore(flags); } while ( 0 ) |
---|
193 | |
---|
194 | |
---|
195 | void set_timer(struct timer *timer, s_time_t expires) |
---|
196 | { |
---|
197 | unsigned long flags; |
---|
198 | |
---|
199 | timer_lock_irqsave(timer, flags); |
---|
200 | |
---|
201 | if ( active_timer(timer) ) |
---|
202 | __stop_timer(timer); |
---|
203 | |
---|
204 | timer->expires = expires; |
---|
205 | |
---|
206 | if ( likely(!timer->killed) ) |
---|
207 | __add_timer(timer); |
---|
208 | |
---|
209 | timer_unlock_irqrestore(timer, flags); |
---|
210 | } |
---|
211 | |
---|
212 | |
---|
213 | void stop_timer(struct timer *timer) |
---|
214 | { |
---|
215 | unsigned long flags; |
---|
216 | |
---|
217 | timer_lock_irqsave(timer, flags); |
---|
218 | |
---|
219 | if ( active_timer(timer) ) |
---|
220 | __stop_timer(timer); |
---|
221 | |
---|
222 | timer_unlock_irqrestore(timer, flags); |
---|
223 | } |
---|
224 | |
---|
225 | |
---|
226 | void migrate_timer(struct timer *timer, unsigned int new_cpu) |
---|
227 | { |
---|
228 | int old_cpu; |
---|
229 | unsigned long flags; |
---|
230 | |
---|
231 | for ( ; ; ) |
---|
232 | { |
---|
233 | if ( (old_cpu = timer->cpu) == new_cpu ) |
---|
234 | return; |
---|
235 | |
---|
236 | if ( old_cpu < new_cpu ) |
---|
237 | { |
---|
238 | spin_lock_irqsave(&per_cpu(timers, old_cpu).lock, flags); |
---|
239 | spin_lock(&per_cpu(timers, new_cpu).lock); |
---|
240 | } |
---|
241 | else |
---|
242 | { |
---|
243 | spin_lock_irqsave(&per_cpu(timers, new_cpu).lock, flags); |
---|
244 | spin_lock(&per_cpu(timers, old_cpu).lock); |
---|
245 | } |
---|
246 | |
---|
247 | if ( likely(timer->cpu == old_cpu) ) |
---|
248 | break; |
---|
249 | |
---|
250 | spin_unlock(&per_cpu(timers, old_cpu).lock); |
---|
251 | spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags); |
---|
252 | } |
---|
253 | |
---|
254 | if ( active_timer(timer) ) |
---|
255 | { |
---|
256 | __stop_timer(timer); |
---|
257 | timer->cpu = new_cpu; |
---|
258 | __add_timer(timer); |
---|
259 | } |
---|
260 | else |
---|
261 | { |
---|
262 | timer->cpu = new_cpu; |
---|
263 | } |
---|
264 | |
---|
265 | spin_unlock(&per_cpu(timers, old_cpu).lock); |
---|
266 | spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags); |
---|
267 | } |
---|
268 | |
---|
269 | |
---|
270 | void kill_timer(struct timer *timer) |
---|
271 | { |
---|
272 | int cpu; |
---|
273 | unsigned long flags; |
---|
274 | |
---|
275 | BUG_ON(this_cpu(timers).running == timer); |
---|
276 | |
---|
277 | timer_lock_irqsave(timer, flags); |
---|
278 | |
---|
279 | if ( active_timer(timer) ) |
---|
280 | __stop_timer(timer); |
---|
281 | timer->killed = 1; |
---|
282 | |
---|
283 | timer_unlock_irqrestore(timer, flags); |
---|
284 | |
---|
285 | for_each_online_cpu ( cpu ) |
---|
286 | while ( per_cpu(timers, cpu).running == timer ) |
---|
287 | cpu_relax(); |
---|
288 | } |
---|
289 | |
---|
290 | |
---|
291 | static void timer_softirq_action(void) |
---|
292 | { |
---|
293 | struct timer *t, **heap; |
---|
294 | struct timers *ts; |
---|
295 | s_time_t now; |
---|
296 | void (*fn)(void *); |
---|
297 | void *data; |
---|
298 | |
---|
299 | ts = &this_cpu(timers); |
---|
300 | |
---|
301 | spin_lock_irq(&ts->lock); |
---|
302 | |
---|
303 | do { |
---|
304 | heap = ts->heap; |
---|
305 | now = NOW(); |
---|
306 | |
---|
307 | while ( (GET_HEAP_SIZE(heap) != 0) && |
---|
308 | ((t = heap[1])->expires < (now + TIMER_SLOP)) ) |
---|
309 | { |
---|
310 | remove_entry(heap, t); |
---|
311 | |
---|
312 | ts->running = t; |
---|
313 | |
---|
314 | fn = t->function; |
---|
315 | data = t->data; |
---|
316 | |
---|
317 | spin_unlock_irq(&ts->lock); |
---|
318 | (*fn)(data); |
---|
319 | spin_lock_irq(&ts->lock); |
---|
320 | |
---|
321 | /* Heap may have grown while the lock was released. */ |
---|
322 | heap = ts->heap; |
---|
323 | } |
---|
324 | |
---|
325 | ts->running = NULL; |
---|
326 | } |
---|
327 | while ( !reprogram_timer(GET_HEAP_SIZE(heap) ? heap[1]->expires : 0) ); |
---|
328 | |
---|
329 | spin_unlock_irq(&ts->lock); |
---|
330 | } |
---|
331 | |
---|
332 | |
---|
333 | void process_pending_timers(void) |
---|
334 | { |
---|
335 | unsigned int cpu = smp_processor_id(); |
---|
336 | ASSERT(!in_irq() && local_irq_is_enabled()); |
---|
337 | if ( test_and_clear_bit(TIMER_SOFTIRQ, &softirq_pending(cpu)) ) |
---|
338 | timer_softirq_action(); |
---|
339 | } |
---|
340 | |
---|
341 | |
---|
342 | static void dump_timerq(unsigned char key) |
---|
343 | { |
---|
344 | struct timer *t; |
---|
345 | struct timers *ts; |
---|
346 | unsigned long flags; |
---|
347 | s_time_t now = NOW(); |
---|
348 | int i, j; |
---|
349 | |
---|
350 | printk("Dumping timer queues: NOW=0x%08X%08X\n", |
---|
351 | (u32)(now>>32), (u32)now); |
---|
352 | |
---|
353 | for_each_online_cpu( i ) |
---|
354 | { |
---|
355 | ts = &per_cpu(timers, i); |
---|
356 | |
---|
357 | printk("CPU[%02d] ", i); |
---|
358 | spin_lock_irqsave(&ts->lock, flags); |
---|
359 | for ( j = 1; j <= GET_HEAP_SIZE(ts->heap); j++ ) |
---|
360 | { |
---|
361 | t = ts->heap[j]; |
---|
362 | printk (" %d : %p ex=0x%08X%08X %p\n", |
---|
363 | j, t, (u32)(t->expires>>32), (u32)t->expires, t->data); |
---|
364 | } |
---|
365 | spin_unlock_irqrestore(&ts->lock, flags); |
---|
366 | printk("\n"); |
---|
367 | } |
---|
368 | } |
---|
369 | |
---|
370 | |
---|
371 | void __init timer_init(void) |
---|
372 | { |
---|
373 | static struct timer *dummy_heap; |
---|
374 | int i; |
---|
375 | |
---|
376 | open_softirq(TIMER_SOFTIRQ, timer_softirq_action); |
---|
377 | |
---|
378 | /* |
---|
379 | * All CPUs initially share an empty dummy heap. Only those CPUs that |
---|
380 | * are brought online will be dynamically allocated their own heap. |
---|
381 | */ |
---|
382 | SET_HEAP_SIZE(&dummy_heap, 0); |
---|
383 | SET_HEAP_LIMIT(&dummy_heap, 0); |
---|
384 | |
---|
385 | for_each_cpu ( i ) |
---|
386 | { |
---|
387 | spin_lock_init(&per_cpu(timers, i).lock); |
---|
388 | per_cpu(timers, i).heap = &dummy_heap; |
---|
389 | } |
---|
390 | |
---|
391 | register_keyhandler('a', dump_timerq, "dump timer queues"); |
---|
392 | } |
---|
393 | |
---|
394 | /* |
---|
395 | * Local variables: |
---|
396 | * mode: C |
---|
397 | * c-set-style: "BSD" |
---|
398 | * c-basic-offset: 4 |
---|
399 | * tab-width: 4 |
---|
400 | * indent-tabs-mode: nil |
---|
401 | * End: |
---|
402 | */ |
---|