1 | /* |
---|
2 | * linux/arch/ia64/kernel/irq.c |
---|
3 | * |
---|
4 | * Copyright (C) 1998-2001 Hewlett-Packard Co |
---|
5 | * Stephane Eranian <eranian@hpl.hp.com> |
---|
6 | * David Mosberger-Tang <davidm@hpl.hp.com> |
---|
7 | * |
---|
8 | * 6/10/99: Updated to bring in sync with x86 version to facilitate |
---|
9 | * support for SMP and different interrupt controllers. |
---|
10 | * |
---|
11 | * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector |
---|
12 | * PCI to vector allocation routine. |
---|
13 | * 04/14/2004 Ashok Raj <ashok.raj@intel.com> |
---|
14 | * Added CPU Hotplug handling for IPF. |
---|
15 | */ |
---|
16 | |
---|
17 | #include <linux/module.h> |
---|
18 | |
---|
19 | #include <linux/jiffies.h> |
---|
20 | #include <linux/errno.h> |
---|
21 | #include <linux/init.h> |
---|
22 | #include <linux/interrupt.h> |
---|
23 | #include <linux/ioport.h> |
---|
24 | #include <linux/kernel_stat.h> |
---|
25 | #include <linux/slab.h> |
---|
26 | #include <linux/ptrace.h> |
---|
27 | #include <linux/random.h> /* for rand_initialize_irq() */ |
---|
28 | #include <linux/signal.h> |
---|
29 | #include <linux/smp.h> |
---|
30 | #include <linux/smp_lock.h> |
---|
31 | #include <linux/threads.h> |
---|
32 | #include <linux/bitops.h> |
---|
33 | #ifdef CONFIG_XEN |
---|
34 | #include <linux/cpu.h> |
---|
35 | #endif |
---|
36 | |
---|
37 | #include <asm/delay.h> |
---|
38 | #include <asm/intrinsics.h> |
---|
39 | #include <asm/io.h> |
---|
40 | #include <asm/hw_irq.h> |
---|
41 | #include <asm/machvec.h> |
---|
42 | #include <asm/pgtable.h> |
---|
43 | #include <asm/system.h> |
---|
44 | |
---|
45 | #ifdef CONFIG_PERFMON |
---|
46 | # include <asm/perfmon.h> |
---|
47 | #endif |
---|
48 | |
---|
49 | #define IRQ_DEBUG 0 |
---|
50 | |
---|
51 | /* These can be overridden in platform_irq_init */ |
---|
52 | int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR; |
---|
53 | int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR; |
---|
54 | |
---|
55 | /* default base addr of IPI table */ |
---|
56 | void __iomem *ipi_base_addr = ((void __iomem *) |
---|
57 | (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); |
---|
58 | |
---|
59 | /* |
---|
60 | * Legacy IRQ to IA-64 vector translation table. |
---|
61 | */ |
---|
62 | __u8 isa_irq_to_vector_map[16] = { |
---|
63 | /* 8259 IRQ translation, first 16 entries */ |
---|
64 | 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, |
---|
65 | 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21 |
---|
66 | }; |
---|
67 | EXPORT_SYMBOL(isa_irq_to_vector_map); |
---|
68 | |
---|
69 | static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)]; |
---|
70 | |
---|
71 | int |
---|
72 | assign_irq_vector (int irq) |
---|
73 | { |
---|
74 | int pos, vector; |
---|
75 | |
---|
76 | #ifdef CONFIG_XEN |
---|
77 | if (is_running_on_xen()) { |
---|
78 | extern int xen_assign_irq_vector(int); |
---|
79 | return xen_assign_irq_vector(irq); |
---|
80 | } |
---|
81 | #endif |
---|
82 | again: |
---|
83 | pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS); |
---|
84 | vector = IA64_FIRST_DEVICE_VECTOR + pos; |
---|
85 | if (vector > IA64_LAST_DEVICE_VECTOR) |
---|
86 | return -ENOSPC; |
---|
87 | if (test_and_set_bit(pos, ia64_vector_mask)) |
---|
88 | goto again; |
---|
89 | return vector; |
---|
90 | } |
---|
91 | |
---|
92 | void |
---|
93 | free_irq_vector (int vector) |
---|
94 | { |
---|
95 | int pos; |
---|
96 | |
---|
97 | if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR) |
---|
98 | return; |
---|
99 | |
---|
100 | #ifdef CONFIG_XEN |
---|
101 | if (is_running_on_xen()) { |
---|
102 | extern void xen_free_irq_vector(int); |
---|
103 | xen_free_irq_vector(vector); |
---|
104 | return; |
---|
105 | } |
---|
106 | #endif |
---|
107 | pos = vector - IA64_FIRST_DEVICE_VECTOR; |
---|
108 | if (!test_and_clear_bit(pos, ia64_vector_mask)) |
---|
109 | printk(KERN_WARNING "%s: double free!\n", __FUNCTION__); |
---|
110 | } |
---|
111 | |
---|
112 | int |
---|
113 | reserve_irq_vector (int vector) |
---|
114 | { |
---|
115 | int pos; |
---|
116 | |
---|
117 | if (vector < IA64_FIRST_DEVICE_VECTOR || |
---|
118 | vector > IA64_LAST_DEVICE_VECTOR) |
---|
119 | return -EINVAL; |
---|
120 | |
---|
121 | pos = vector - IA64_FIRST_DEVICE_VECTOR; |
---|
122 | return test_and_set_bit(pos, ia64_vector_mask); |
---|
123 | } |
---|
124 | |
---|
125 | #ifdef CONFIG_SMP |
---|
126 | # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE) |
---|
127 | #else |
---|
128 | # define IS_RESCHEDULE(vec) (0) |
---|
129 | #endif |
---|
130 | /* |
---|
131 | * That's where the IVT branches when we get an external |
---|
132 | * interrupt. This branches to the correct hardware IRQ handler via |
---|
133 | * function ptr. |
---|
134 | */ |
---|
135 | void |
---|
136 | ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) |
---|
137 | { |
---|
138 | unsigned long saved_tpr; |
---|
139 | |
---|
140 | #if IRQ_DEBUG |
---|
141 | { |
---|
142 | unsigned long bsp, sp; |
---|
143 | |
---|
144 | /* |
---|
145 | * Note: if the interrupt happened while executing in |
---|
146 | * the context switch routine (ia64_switch_to), we may |
---|
147 | * get a spurious stack overflow here. This is |
---|
148 | * because the register and the memory stack are not |
---|
149 | * switched atomically. |
---|
150 | */ |
---|
151 | bsp = ia64_getreg(_IA64_REG_AR_BSP); |
---|
152 | sp = ia64_getreg(_IA64_REG_SP); |
---|
153 | |
---|
154 | if ((sp - bsp) < 1024) { |
---|
155 | static unsigned char count; |
---|
156 | static long last_time; |
---|
157 | |
---|
158 | if (jiffies - last_time > 5*HZ) |
---|
159 | count = 0; |
---|
160 | if (++count < 5) { |
---|
161 | last_time = jiffies; |
---|
162 | printk("ia64_handle_irq: DANGER: less than " |
---|
163 | "1KB of free stack space!!\n" |
---|
164 | "(bsp=0x%lx, sp=%lx)\n", bsp, sp); |
---|
165 | } |
---|
166 | } |
---|
167 | } |
---|
168 | #endif /* IRQ_DEBUG */ |
---|
169 | |
---|
170 | /* |
---|
171 | * Always set TPR to limit maximum interrupt nesting depth to |
---|
172 | * 16 (without this, it would be ~240, which could easily lead |
---|
173 | * to kernel stack overflows). |
---|
174 | */ |
---|
175 | irq_enter(); |
---|
176 | saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); |
---|
177 | ia64_srlz_d(); |
---|
178 | while (vector != IA64_SPURIOUS_INT_VECTOR) { |
---|
179 | if (!IS_RESCHEDULE(vector)) { |
---|
180 | ia64_setreg(_IA64_REG_CR_TPR, vector); |
---|
181 | ia64_srlz_d(); |
---|
182 | |
---|
183 | __do_IRQ(local_vector_to_irq(vector), regs); |
---|
184 | |
---|
185 | /* |
---|
186 | * Disable interrupts and send EOI: |
---|
187 | */ |
---|
188 | local_irq_disable(); |
---|
189 | ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); |
---|
190 | } |
---|
191 | ia64_eoi(); |
---|
192 | vector = ia64_get_ivr(); |
---|
193 | } |
---|
194 | /* |
---|
195 | * This must be done *after* the ia64_eoi(). For example, the keyboard softirq |
---|
196 | * handler needs to be able to wait for further keyboard interrupts, which can't |
---|
197 | * come through until ia64_eoi() has been done. |
---|
198 | */ |
---|
199 | irq_exit(); |
---|
200 | } |
---|
201 | |
---|
202 | #ifdef CONFIG_HOTPLUG_CPU |
---|
203 | /* |
---|
204 | * This function emulates a interrupt processing when a cpu is about to be |
---|
205 | * brought down. |
---|
206 | */ |
---|
207 | void ia64_process_pending_intr(void) |
---|
208 | { |
---|
209 | ia64_vector vector; |
---|
210 | unsigned long saved_tpr; |
---|
211 | extern unsigned int vectors_in_migration[NR_IRQS]; |
---|
212 | |
---|
213 | vector = ia64_get_ivr(); |
---|
214 | |
---|
215 | irq_enter(); |
---|
216 | saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); |
---|
217 | ia64_srlz_d(); |
---|
218 | |
---|
219 | /* |
---|
220 | * Perform normal interrupt style processing |
---|
221 | */ |
---|
222 | while (vector != IA64_SPURIOUS_INT_VECTOR) { |
---|
223 | if (!IS_RESCHEDULE(vector)) { |
---|
224 | ia64_setreg(_IA64_REG_CR_TPR, vector); |
---|
225 | ia64_srlz_d(); |
---|
226 | |
---|
227 | /* |
---|
228 | * Now try calling normal ia64_handle_irq as it would have got called |
---|
229 | * from a real intr handler. Try passing null for pt_regs, hopefully |
---|
230 | * it will work. I hope it works!. |
---|
231 | * Probably could shared code. |
---|
232 | */ |
---|
233 | vectors_in_migration[local_vector_to_irq(vector)]=0; |
---|
234 | __do_IRQ(local_vector_to_irq(vector), NULL); |
---|
235 | |
---|
236 | /* |
---|
237 | * Disable interrupts and send EOI |
---|
238 | */ |
---|
239 | local_irq_disable(); |
---|
240 | ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); |
---|
241 | } |
---|
242 | ia64_eoi(); |
---|
243 | vector = ia64_get_ivr(); |
---|
244 | } |
---|
245 | irq_exit(); |
---|
246 | } |
---|
247 | #endif |
---|
248 | |
---|
249 | |
---|
250 | #ifdef CONFIG_SMP |
---|
251 | extern irqreturn_t handle_IPI (int irq, void *dev_id, struct pt_regs *regs); |
---|
252 | |
---|
253 | static struct irqaction ipi_irqaction = { |
---|
254 | .handler = handle_IPI, |
---|
255 | .flags = IRQF_DISABLED, |
---|
256 | .name = "IPI" |
---|
257 | }; |
---|
258 | #endif |
---|
259 | |
---|
260 | #ifdef CONFIG_XEN |
---|
261 | #include <xen/evtchn.h> |
---|
262 | #include <xen/interface/callback.h> |
---|
263 | |
---|
264 | static DEFINE_PER_CPU(int, timer_irq) = -1; |
---|
265 | static DEFINE_PER_CPU(int, ipi_irq) = -1; |
---|
266 | static DEFINE_PER_CPU(int, resched_irq) = -1; |
---|
267 | static DEFINE_PER_CPU(int, cmc_irq) = -1; |
---|
268 | static DEFINE_PER_CPU(int, cmcp_irq) = -1; |
---|
269 | static DEFINE_PER_CPU(int, cpep_irq) = -1; |
---|
270 | static char timer_name[NR_CPUS][15]; |
---|
271 | static char ipi_name[NR_CPUS][15]; |
---|
272 | static char resched_name[NR_CPUS][15]; |
---|
273 | static char cmc_name[NR_CPUS][15]; |
---|
274 | static char cmcp_name[NR_CPUS][15]; |
---|
275 | static char cpep_name[NR_CPUS][15]; |
---|
276 | |
---|
277 | struct saved_irq { |
---|
278 | unsigned int irq; |
---|
279 | struct irqaction *action; |
---|
280 | }; |
---|
281 | /* 16 should be far optimistic value, since only several percpu irqs |
---|
282 | * are registered early. |
---|
283 | */ |
---|
284 | #define MAX_LATE_IRQ 16 |
---|
285 | static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ]; |
---|
286 | static unsigned short late_irq_cnt = 0; |
---|
287 | static unsigned short saved_irq_cnt = 0; |
---|
288 | static int xen_slab_ready = 0; |
---|
289 | |
---|
290 | #ifdef CONFIG_SMP |
---|
291 | /* Dummy stub. Though we may check RESCHEDULE_VECTOR before __do_IRQ, |
---|
292 | * it ends up to issue several memory accesses upon percpu data and |
---|
293 | * thus adds unnecessary traffic to other paths. |
---|
294 | */ |
---|
295 | static irqreturn_t |
---|
296 | handle_reschedule(int irq, void *dev_id, struct pt_regs *regs) |
---|
297 | { |
---|
298 | |
---|
299 | return IRQ_HANDLED; |
---|
300 | } |
---|
301 | |
---|
302 | static struct irqaction resched_irqaction = { |
---|
303 | .handler = handle_reschedule, |
---|
304 | .flags = SA_INTERRUPT, |
---|
305 | .name = "RESCHED" |
---|
306 | }; |
---|
307 | #endif |
---|
308 | |
---|
309 | /* |
---|
310 | * This is xen version percpu irq registration, which needs bind |
---|
311 | * to xen specific evtchn sub-system. One trick here is that xen |
---|
312 | * evtchn binding interface depends on kmalloc because related |
---|
313 | * port needs to be freed at device/cpu down. So we cache the |
---|
314 | * registration on BSP before slab is ready and then deal them |
---|
315 | * at later point. For rest instances happening after slab ready, |
---|
316 | * we hook them to xen evtchn immediately. |
---|
317 | * |
---|
318 | * FIXME: MCA is not supported by far, and thus "nomca" boot param is |
---|
319 | * required. |
---|
320 | */ |
---|
321 | static void |
---|
322 | xen_register_percpu_irq (unsigned int vec, struct irqaction *action, int save) |
---|
323 | { |
---|
324 | unsigned int cpu = smp_processor_id(); |
---|
325 | irq_desc_t *desc; |
---|
326 | int irq = 0; |
---|
327 | |
---|
328 | if (xen_slab_ready) { |
---|
329 | switch (vec) { |
---|
330 | case IA64_TIMER_VECTOR: |
---|
331 | sprintf(timer_name[cpu], "%s%d", action->name, cpu); |
---|
332 | irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, |
---|
333 | action->handler, action->flags, |
---|
334 | timer_name[cpu], action->dev_id); |
---|
335 | per_cpu(timer_irq,cpu) = irq; |
---|
336 | break; |
---|
337 | case IA64_IPI_RESCHEDULE: |
---|
338 | sprintf(resched_name[cpu], "%s%d", action->name, cpu); |
---|
339 | irq = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu, |
---|
340 | action->handler, action->flags, |
---|
341 | resched_name[cpu], action->dev_id); |
---|
342 | per_cpu(resched_irq,cpu) = irq; |
---|
343 | break; |
---|
344 | case IA64_IPI_VECTOR: |
---|
345 | sprintf(ipi_name[cpu], "%s%d", action->name, cpu); |
---|
346 | irq = bind_ipi_to_irqhandler(IPI_VECTOR, cpu, |
---|
347 | action->handler, action->flags, |
---|
348 | ipi_name[cpu], action->dev_id); |
---|
349 | per_cpu(ipi_irq,cpu) = irq; |
---|
350 | break; |
---|
351 | case IA64_CMC_VECTOR: |
---|
352 | sprintf(cmc_name[cpu], "%s%d", action->name, cpu); |
---|
353 | irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, |
---|
354 | action->handler, |
---|
355 | action->flags, |
---|
356 | cmc_name[cpu], |
---|
357 | action->dev_id); |
---|
358 | per_cpu(cmc_irq,cpu) = irq; |
---|
359 | break; |
---|
360 | case IA64_CMCP_VECTOR: |
---|
361 | sprintf(cmcp_name[cpu], "%s%d", action->name, cpu); |
---|
362 | irq = bind_ipi_to_irqhandler(CMCP_VECTOR, cpu, |
---|
363 | action->handler, |
---|
364 | action->flags, |
---|
365 | cmcp_name[cpu], |
---|
366 | action->dev_id); |
---|
367 | per_cpu(cmcp_irq,cpu) = irq; |
---|
368 | break; |
---|
369 | case IA64_CPEP_VECTOR: |
---|
370 | sprintf(cpep_name[cpu], "%s%d", action->name, cpu); |
---|
371 | irq = bind_ipi_to_irqhandler(CPEP_VECTOR, cpu, |
---|
372 | action->handler, |
---|
373 | action->flags, |
---|
374 | cpep_name[cpu], |
---|
375 | action->dev_id); |
---|
376 | per_cpu(cpep_irq,cpu) = irq; |
---|
377 | break; |
---|
378 | case IA64_CPE_VECTOR: |
---|
379 | case IA64_MCA_RENDEZ_VECTOR: |
---|
380 | case IA64_PERFMON_VECTOR: |
---|
381 | case IA64_MCA_WAKEUP_VECTOR: |
---|
382 | case IA64_SPURIOUS_INT_VECTOR: |
---|
383 | /* No need to complain, these aren't supported. */ |
---|
384 | break; |
---|
385 | default: |
---|
386 | printk(KERN_WARNING "Percpu irq %d is unsupported " |
---|
387 | "by xen!\n", vec); |
---|
388 | break; |
---|
389 | } |
---|
390 | BUG_ON(irq < 0); |
---|
391 | |
---|
392 | if (irq > 0) { |
---|
393 | /* |
---|
394 | * Mark percpu. Without this, migrate_irqs() will |
---|
395 | * mark the interrupt for migrations and trigger it |
---|
396 | * on cpu hotplug. |
---|
397 | */ |
---|
398 | desc = irq_desc + irq; |
---|
399 | desc->status |= IRQ_PER_CPU; |
---|
400 | } |
---|
401 | } |
---|
402 | |
---|
403 | /* For BSP, we cache registered percpu irqs, and then re-walk |
---|
404 | * them when initializing APs |
---|
405 | */ |
---|
406 | if (!cpu && save) { |
---|
407 | BUG_ON(saved_irq_cnt == MAX_LATE_IRQ); |
---|
408 | saved_percpu_irqs[saved_irq_cnt].irq = vec; |
---|
409 | saved_percpu_irqs[saved_irq_cnt].action = action; |
---|
410 | saved_irq_cnt++; |
---|
411 | if (!xen_slab_ready) |
---|
412 | late_irq_cnt++; |
---|
413 | } |
---|
414 | } |
---|
415 | |
---|
416 | static void |
---|
417 | xen_bind_early_percpu_irq (void) |
---|
418 | { |
---|
419 | int i; |
---|
420 | |
---|
421 | xen_slab_ready = 1; |
---|
422 | /* There's no race when accessing this cached array, since only |
---|
423 | * BSP will face with such step shortly |
---|
424 | */ |
---|
425 | for (i = 0; i < late_irq_cnt; i++) |
---|
426 | xen_register_percpu_irq(saved_percpu_irqs[i].irq, |
---|
427 | saved_percpu_irqs[i].action, 0); |
---|
428 | } |
---|
429 | |
---|
430 | /* FIXME: There's no obvious point to check whether slab is ready. So |
---|
431 | * a hack is used here by utilizing a late time hook. |
---|
432 | */ |
---|
433 | extern void (*late_time_init)(void); |
---|
434 | extern char xen_event_callback; |
---|
435 | extern void xen_init_IRQ(void); |
---|
436 | |
---|
437 | #ifdef CONFIG_HOTPLUG_CPU |
---|
438 | static int __devinit |
---|
439 | unbind_evtchn_callback(struct notifier_block *nfb, |
---|
440 | unsigned long action, void *hcpu) |
---|
441 | { |
---|
442 | unsigned int cpu = (unsigned long)hcpu; |
---|
443 | |
---|
444 | if (action == CPU_DEAD) { |
---|
445 | /* Unregister evtchn. */ |
---|
446 | if (per_cpu(cpep_irq,cpu) >= 0) { |
---|
447 | unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL); |
---|
448 | per_cpu(cpep_irq, cpu) = -1; |
---|
449 | } |
---|
450 | if (per_cpu(cmcp_irq,cpu) >= 0) { |
---|
451 | unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL); |
---|
452 | per_cpu(cmcp_irq, cpu) = -1; |
---|
453 | } |
---|
454 | if (per_cpu(cmc_irq,cpu) >= 0) { |
---|
455 | unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL); |
---|
456 | per_cpu(cmc_irq, cpu) = -1; |
---|
457 | } |
---|
458 | if (per_cpu(ipi_irq,cpu) >= 0) { |
---|
459 | unbind_from_irqhandler (per_cpu(ipi_irq, cpu), NULL); |
---|
460 | per_cpu(ipi_irq, cpu) = -1; |
---|
461 | } |
---|
462 | if (per_cpu(resched_irq,cpu) >= 0) { |
---|
463 | unbind_from_irqhandler (per_cpu(resched_irq, cpu), |
---|
464 | NULL); |
---|
465 | per_cpu(resched_irq, cpu) = -1; |
---|
466 | } |
---|
467 | if (per_cpu(timer_irq,cpu) >= 0) { |
---|
468 | unbind_from_irqhandler (per_cpu(timer_irq, cpu), NULL); |
---|
469 | per_cpu(timer_irq, cpu) = -1; |
---|
470 | } |
---|
471 | } |
---|
472 | return NOTIFY_OK; |
---|
473 | } |
---|
474 | |
---|
475 | static struct notifier_block unbind_evtchn_notifier = { |
---|
476 | .notifier_call = unbind_evtchn_callback, |
---|
477 | .priority = 0 |
---|
478 | }; |
---|
479 | #endif |
---|
480 | |
---|
481 | DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]); |
---|
482 | void xen_smp_intr_init(void) |
---|
483 | { |
---|
484 | #ifdef CONFIG_SMP |
---|
485 | unsigned int cpu = smp_processor_id(); |
---|
486 | unsigned int i = 0; |
---|
487 | struct callback_register event = { |
---|
488 | .type = CALLBACKTYPE_event, |
---|
489 | .address = (unsigned long)&xen_event_callback, |
---|
490 | }; |
---|
491 | |
---|
492 | if (cpu == 0) { |
---|
493 | /* Initialization was already done for boot cpu. */ |
---|
494 | #ifdef CONFIG_HOTPLUG_CPU |
---|
495 | /* Register the notifier only once. */ |
---|
496 | register_cpu_notifier(&unbind_evtchn_notifier); |
---|
497 | #endif |
---|
498 | return; |
---|
499 | } |
---|
500 | |
---|
501 | /* This should be piggyback when setup vcpu guest context */ |
---|
502 | BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); |
---|
503 | |
---|
504 | for (i = 0; i < saved_irq_cnt; i++) |
---|
505 | xen_register_percpu_irq(saved_percpu_irqs[i].irq, |
---|
506 | saved_percpu_irqs[i].action, 0); |
---|
507 | #endif /* CONFIG_SMP */ |
---|
508 | } |
---|
509 | #endif /* CONFIG_XEN */ |
---|
510 | |
---|
511 | void |
---|
512 | register_percpu_irq (ia64_vector vec, struct irqaction *action) |
---|
513 | { |
---|
514 | irq_desc_t *desc; |
---|
515 | unsigned int irq; |
---|
516 | |
---|
517 | #ifdef CONFIG_XEN |
---|
518 | if (is_running_on_xen()) |
---|
519 | return xen_register_percpu_irq(vec, action, 1); |
---|
520 | #endif |
---|
521 | |
---|
522 | for (irq = 0; irq < NR_IRQS; ++irq) |
---|
523 | if (irq_to_vector(irq) == vec) { |
---|
524 | desc = irq_desc + irq; |
---|
525 | desc->status |= IRQ_PER_CPU; |
---|
526 | desc->chip = &irq_type_ia64_lsapic; |
---|
527 | if (action) |
---|
528 | setup_irq(irq, action); |
---|
529 | } |
---|
530 | } |
---|
531 | |
---|
532 | void __init |
---|
533 | init_IRQ (void) |
---|
534 | { |
---|
535 | #ifdef CONFIG_XEN |
---|
536 | /* Maybe put into platform_irq_init later */ |
---|
537 | if (is_running_on_xen()) { |
---|
538 | struct callback_register event = { |
---|
539 | .type = CALLBACKTYPE_event, |
---|
540 | .address = (unsigned long)&xen_event_callback, |
---|
541 | }; |
---|
542 | xen_init_IRQ(); |
---|
543 | BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); |
---|
544 | late_time_init = xen_bind_early_percpu_irq; |
---|
545 | #ifdef CONFIG_SMP |
---|
546 | register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); |
---|
547 | #endif /* CONFIG_SMP */ |
---|
548 | } |
---|
549 | #endif /* CONFIG_XEN */ |
---|
550 | register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); |
---|
551 | #ifdef CONFIG_SMP |
---|
552 | register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); |
---|
553 | #endif |
---|
554 | #ifdef CONFIG_PERFMON |
---|
555 | pfm_init_percpu(); |
---|
556 | #endif |
---|
557 | platform_irq_init(); |
---|
558 | } |
---|
559 | |
---|
560 | void |
---|
561 | ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect) |
---|
562 | { |
---|
563 | void __iomem *ipi_addr; |
---|
564 | unsigned long ipi_data; |
---|
565 | unsigned long phys_cpu_id; |
---|
566 | |
---|
567 | #ifdef CONFIG_XEN |
---|
568 | if (is_running_on_xen()) { |
---|
569 | int irq = -1; |
---|
570 | |
---|
571 | #ifdef CONFIG_SMP |
---|
572 | /* TODO: we need to call vcpu_up here */ |
---|
573 | if (unlikely(vector == ap_wakeup_vector)) { |
---|
574 | extern void xen_send_ipi (int cpu, int vec); |
---|
575 | xen_send_ipi (cpu, vector); |
---|
576 | //vcpu_prepare_and_up(cpu); |
---|
577 | return; |
---|
578 | } |
---|
579 | #endif |
---|
580 | |
---|
581 | switch(vector) { |
---|
582 | case IA64_IPI_VECTOR: |
---|
583 | irq = per_cpu(ipi_to_irq, cpu)[IPI_VECTOR]; |
---|
584 | break; |
---|
585 | case IA64_IPI_RESCHEDULE: |
---|
586 | irq = per_cpu(ipi_to_irq, cpu)[RESCHEDULE_VECTOR]; |
---|
587 | break; |
---|
588 | case IA64_CMCP_VECTOR: |
---|
589 | irq = per_cpu(ipi_to_irq, cpu)[CMCP_VECTOR]; |
---|
590 | break; |
---|
591 | case IA64_CPEP_VECTOR: |
---|
592 | irq = per_cpu(ipi_to_irq, cpu)[CPEP_VECTOR]; |
---|
593 | break; |
---|
594 | default: |
---|
595 | printk(KERN_WARNING "Unsupported IPI type 0x%x\n", |
---|
596 | vector); |
---|
597 | irq = 0; |
---|
598 | break; |
---|
599 | } |
---|
600 | |
---|
601 | BUG_ON(irq < 0); |
---|
602 | notify_remote_via_irq(irq); |
---|
603 | return; |
---|
604 | } |
---|
605 | #endif /* CONFIG_XEN */ |
---|
606 | |
---|
607 | #ifdef CONFIG_SMP |
---|
608 | phys_cpu_id = cpu_physical_id(cpu); |
---|
609 | #else |
---|
610 | phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff; |
---|
611 | #endif |
---|
612 | |
---|
613 | /* |
---|
614 | * cpu number is in 8bit ID and 8bit EID |
---|
615 | */ |
---|
616 | |
---|
617 | ipi_data = (delivery_mode << 8) | (vector & 0xff); |
---|
618 | ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3)); |
---|
619 | |
---|
620 | writeq(ipi_data, ipi_addr); |
---|
621 | } |
---|