1 | /* |
---|
2 | * Platform dependent support for SGI SN |
---|
3 | * |
---|
4 | * This file is subject to the terms and conditions of the GNU General Public |
---|
5 | * License. See the file "COPYING" in the main directory of this archive |
---|
6 | * for more details. |
---|
7 | * |
---|
8 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved. |
---|
9 | */ |
---|
10 | |
---|
11 | #include <linux/irq.h> |
---|
12 | #include <linux/spinlock.h> |
---|
13 | #include <linux/init.h> |
---|
14 | #ifdef XEN |
---|
15 | #include <linux/pci.h> |
---|
16 | #include <asm/hw_irq.h> |
---|
17 | #endif |
---|
18 | #include <asm/sn/addrs.h> |
---|
19 | #include <asm/sn/arch.h> |
---|
20 | #include <asm/sn/intr.h> |
---|
21 | #include <asm/sn/pcibr_provider.h> |
---|
22 | #include <asm/sn/pcibus_provider_defs.h> |
---|
23 | #ifndef XEN |
---|
24 | #include <asm/sn/pcidev.h> |
---|
25 | #endif |
---|
26 | #include <asm/sn/shub_mmr.h> |
---|
27 | #include <asm/sn/sn_sal.h> |
---|
28 | |
---|
29 | #ifdef XEN |
---|
30 | #define move_native_irq(foo) do {} while(0) |
---|
31 | #endif |
---|
32 | |
---|
33 | static void force_interrupt(int irq); |
---|
34 | #ifndef XEN |
---|
35 | static void register_intr_pda(struct sn_irq_info *sn_irq_info); |
---|
36 | static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); |
---|
37 | #endif |
---|
38 | |
---|
39 | int sn_force_interrupt_flag = 1; |
---|
40 | extern int sn_ioif_inited; |
---|
41 | struct list_head **sn_irq_lh; |
---|
42 | static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */ |
---|
43 | |
---|
44 | u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, |
---|
45 | struct sn_irq_info *sn_irq_info, |
---|
46 | int req_irq, nasid_t req_nasid, |
---|
47 | int req_slice) |
---|
48 | { |
---|
49 | struct ia64_sal_retval ret_stuff; |
---|
50 | ret_stuff.status = 0; |
---|
51 | ret_stuff.v0 = 0; |
---|
52 | |
---|
53 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, |
---|
54 | (u64) SAL_INTR_ALLOC, (u64) local_nasid, |
---|
55 | (u64) local_widget, __pa(sn_irq_info), (u64) req_irq, |
---|
56 | (u64) req_nasid, (u64) req_slice); |
---|
57 | |
---|
58 | return ret_stuff.status; |
---|
59 | } |
---|
60 | |
---|
61 | void sn_intr_free(nasid_t local_nasid, int local_widget, |
---|
62 | struct sn_irq_info *sn_irq_info) |
---|
63 | { |
---|
64 | struct ia64_sal_retval ret_stuff; |
---|
65 | ret_stuff.status = 0; |
---|
66 | ret_stuff.v0 = 0; |
---|
67 | |
---|
68 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, |
---|
69 | (u64) SAL_INTR_FREE, (u64) local_nasid, |
---|
70 | (u64) local_widget, (u64) sn_irq_info->irq_irq, |
---|
71 | (u64) sn_irq_info->irq_cookie, 0, 0); |
---|
72 | } |
---|
73 | |
---|
74 | static unsigned int sn_startup_irq(unsigned int irq) |
---|
75 | { |
---|
76 | return 0; |
---|
77 | } |
---|
78 | |
---|
79 | static void sn_shutdown_irq(unsigned int irq) |
---|
80 | { |
---|
81 | } |
---|
82 | |
---|
83 | static void sn_disable_irq(unsigned int irq) |
---|
84 | { |
---|
85 | } |
---|
86 | |
---|
87 | static void sn_enable_irq(unsigned int irq) |
---|
88 | { |
---|
89 | } |
---|
90 | |
---|
91 | static void sn_ack_irq(unsigned int irq) |
---|
92 | { |
---|
93 | u64 event_occurred, mask; |
---|
94 | |
---|
95 | irq = irq & 0xff; |
---|
96 | event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)); |
---|
97 | mask = event_occurred & SH_ALL_INT_MASK; |
---|
98 | HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask); |
---|
99 | __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); |
---|
100 | |
---|
101 | move_native_irq(irq); |
---|
102 | } |
---|
103 | |
---|
104 | static void sn_end_irq(unsigned int irq) |
---|
105 | { |
---|
106 | int ivec; |
---|
107 | u64 event_occurred; |
---|
108 | |
---|
109 | ivec = irq & 0xff; |
---|
110 | if (ivec == SGI_UART_VECTOR) { |
---|
111 | event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED)); |
---|
112 | /* If the UART bit is set here, we may have received an |
---|
113 | * interrupt from the UART that the driver missed. To |
---|
114 | * make sure, we IPI ourselves to force us to look again. |
---|
115 | */ |
---|
116 | if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { |
---|
117 | platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, |
---|
118 | IA64_IPI_DM_INT, 0); |
---|
119 | } |
---|
120 | } |
---|
121 | __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs); |
---|
122 | if (sn_force_interrupt_flag) |
---|
123 | force_interrupt(irq); |
---|
124 | } |
---|
125 | |
---|
126 | #ifndef XEN |
---|
127 | static void sn_irq_info_free(struct rcu_head *head); |
---|
128 | |
---|
129 | struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, |
---|
130 | nasid_t nasid, int slice) |
---|
131 | { |
---|
132 | int vector; |
---|
133 | int cpuphys; |
---|
134 | int64_t bridge; |
---|
135 | int local_widget, status; |
---|
136 | nasid_t local_nasid; |
---|
137 | struct sn_irq_info *new_irq_info; |
---|
138 | struct sn_pcibus_provider *pci_provider; |
---|
139 | |
---|
140 | new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); |
---|
141 | if (new_irq_info == NULL) |
---|
142 | return NULL; |
---|
143 | |
---|
144 | memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); |
---|
145 | |
---|
146 | bridge = (u64) new_irq_info->irq_bridge; |
---|
147 | if (!bridge) { |
---|
148 | kfree(new_irq_info); |
---|
149 | return NULL; /* irq is not a device interrupt */ |
---|
150 | } |
---|
151 | |
---|
152 | local_nasid = NASID_GET(bridge); |
---|
153 | |
---|
154 | if (local_nasid & 1) |
---|
155 | local_widget = TIO_SWIN_WIDGETNUM(bridge); |
---|
156 | else |
---|
157 | local_widget = SWIN_WIDGETNUM(bridge); |
---|
158 | |
---|
159 | vector = sn_irq_info->irq_irq; |
---|
160 | /* Free the old PROM new_irq_info structure */ |
---|
161 | sn_intr_free(local_nasid, local_widget, new_irq_info); |
---|
162 | /* Update kernels new_irq_info with new target info */ |
---|
163 | unregister_intr_pda(new_irq_info); |
---|
164 | |
---|
165 | /* allocate a new PROM new_irq_info struct */ |
---|
166 | status = sn_intr_alloc(local_nasid, local_widget, |
---|
167 | new_irq_info, vector, |
---|
168 | nasid, slice); |
---|
169 | |
---|
170 | /* SAL call failed */ |
---|
171 | if (status) { |
---|
172 | kfree(new_irq_info); |
---|
173 | return NULL; |
---|
174 | } |
---|
175 | |
---|
176 | cpuphys = nasid_slice_to_cpuid(nasid, slice); |
---|
177 | new_irq_info->irq_cpuid = cpuphys; |
---|
178 | register_intr_pda(new_irq_info); |
---|
179 | |
---|
180 | pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; |
---|
181 | |
---|
182 | /* |
---|
183 | * If this represents a line interrupt, target it. If it's |
---|
184 | * an msi (irq_int_bit < 0), it's already targeted. |
---|
185 | */ |
---|
186 | if (new_irq_info->irq_int_bit >= 0 && |
---|
187 | pci_provider && pci_provider->target_interrupt) |
---|
188 | (pci_provider->target_interrupt)(new_irq_info); |
---|
189 | |
---|
190 | spin_lock(&sn_irq_info_lock); |
---|
191 | #ifdef XEN |
---|
192 | list_replace(&sn_irq_info->list, &new_irq_info->list); |
---|
193 | #else |
---|
194 | list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); |
---|
195 | #endif |
---|
196 | spin_unlock(&sn_irq_info_lock); |
---|
197 | #ifndef XEN |
---|
198 | call_rcu(&sn_irq_info->rcu, sn_irq_info_free); |
---|
199 | #endif |
---|
200 | |
---|
201 | #ifdef CONFIG_SMP |
---|
202 | set_irq_affinity_info((vector & 0xff), cpuphys, 0); |
---|
203 | #endif |
---|
204 | |
---|
205 | return new_irq_info; |
---|
206 | } |
---|
207 | |
---|
208 | static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) |
---|
209 | { |
---|
210 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; |
---|
211 | nasid_t nasid; |
---|
212 | int slice; |
---|
213 | |
---|
214 | nasid = cpuid_to_nasid(first_cpu(mask)); |
---|
215 | slice = cpuid_to_slice(first_cpu(mask)); |
---|
216 | |
---|
217 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, |
---|
218 | sn_irq_lh[irq], list) |
---|
219 | (void)sn_retarget_vector(sn_irq_info, nasid, slice); |
---|
220 | } |
---|
221 | #endif |
---|
222 | |
---|
223 | struct hw_interrupt_type irq_type_sn = { |
---|
224 | #ifndef XEN |
---|
225 | .name = "SN hub", |
---|
226 | #else |
---|
227 | .typename = "SN hub", |
---|
228 | #endif |
---|
229 | .startup = sn_startup_irq, |
---|
230 | .shutdown = sn_shutdown_irq, |
---|
231 | .enable = sn_enable_irq, |
---|
232 | .disable = sn_disable_irq, |
---|
233 | .ack = sn_ack_irq, |
---|
234 | .end = sn_end_irq, |
---|
235 | #ifndef XEN |
---|
236 | .set_affinity = sn_set_affinity_irq |
---|
237 | #endif |
---|
238 | }; |
---|
239 | |
---|
240 | unsigned int sn_local_vector_to_irq(u8 vector) |
---|
241 | { |
---|
242 | return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector)); |
---|
243 | } |
---|
244 | |
---|
245 | void sn_irq_init(void) |
---|
246 | { |
---|
247 | int i; |
---|
248 | irq_desc_t *base_desc = irq_desc; |
---|
249 | |
---|
250 | #ifndef XEN |
---|
251 | ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR; |
---|
252 | ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; |
---|
253 | #endif |
---|
254 | |
---|
255 | for (i = 0; i < NR_IRQS; i++) { |
---|
256 | #ifdef XEN |
---|
257 | if (base_desc[i].handler == &no_irq_type) { |
---|
258 | base_desc[i].handler = &irq_type_sn; |
---|
259 | #else |
---|
260 | if (base_desc[i].chip == &no_irq_type) { |
---|
261 | base_desc[i].chip = &irq_type_sn; |
---|
262 | #endif |
---|
263 | } |
---|
264 | } |
---|
265 | } |
---|
266 | |
---|
267 | #ifndef XEN |
---|
268 | static void register_intr_pda(struct sn_irq_info *sn_irq_info) |
---|
269 | { |
---|
270 | int irq = sn_irq_info->irq_irq; |
---|
271 | int cpu = sn_irq_info->irq_cpuid; |
---|
272 | |
---|
273 | if (pdacpu(cpu)->sn_last_irq < irq) { |
---|
274 | pdacpu(cpu)->sn_last_irq = irq; |
---|
275 | } |
---|
276 | |
---|
277 | if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) |
---|
278 | pdacpu(cpu)->sn_first_irq = irq; |
---|
279 | } |
---|
280 | |
---|
281 | static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) |
---|
282 | { |
---|
283 | int irq = sn_irq_info->irq_irq; |
---|
284 | int cpu = sn_irq_info->irq_cpuid; |
---|
285 | struct sn_irq_info *tmp_irq_info; |
---|
286 | int i, foundmatch; |
---|
287 | |
---|
288 | #ifndef XEN |
---|
289 | rcu_read_lock(); |
---|
290 | #else |
---|
291 | spin_lock(&sn_irq_info_lock); |
---|
292 | #endif |
---|
293 | if (pdacpu(cpu)->sn_last_irq == irq) { |
---|
294 | foundmatch = 0; |
---|
295 | for (i = pdacpu(cpu)->sn_last_irq - 1; |
---|
296 | i && !foundmatch; i--) { |
---|
297 | #ifdef XEN |
---|
298 | list_for_each_entry(tmp_irq_info, |
---|
299 | sn_irq_lh[i], |
---|
300 | list) { |
---|
301 | #else |
---|
302 | list_for_each_entry_rcu(tmp_irq_info, |
---|
303 | sn_irq_lh[i], |
---|
304 | list) { |
---|
305 | #endif |
---|
306 | if (tmp_irq_info->irq_cpuid == cpu) { |
---|
307 | foundmatch = 1; |
---|
308 | break; |
---|
309 | } |
---|
310 | } |
---|
311 | } |
---|
312 | pdacpu(cpu)->sn_last_irq = i; |
---|
313 | } |
---|
314 | |
---|
315 | if (pdacpu(cpu)->sn_first_irq == irq) { |
---|
316 | foundmatch = 0; |
---|
317 | for (i = pdacpu(cpu)->sn_first_irq + 1; |
---|
318 | i < NR_IRQS && !foundmatch; i++) { |
---|
319 | #ifdef XEN |
---|
320 | list_for_each_entry(tmp_irq_info, |
---|
321 | sn_irq_lh[i], |
---|
322 | list) { |
---|
323 | #else |
---|
324 | list_for_each_entry_rcu(tmp_irq_info, |
---|
325 | sn_irq_lh[i], |
---|
326 | list) { |
---|
327 | #endif |
---|
328 | if (tmp_irq_info->irq_cpuid == cpu) { |
---|
329 | foundmatch = 1; |
---|
330 | break; |
---|
331 | } |
---|
332 | } |
---|
333 | } |
---|
334 | pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i); |
---|
335 | } |
---|
336 | #ifndef XEN |
---|
337 | rcu_read_unlock(); |
---|
338 | #else |
---|
339 | spin_unlock(&sn_irq_info_lock); |
---|
340 | #endif |
---|
341 | } |
---|
342 | #endif /* XEN */ |
---|
343 | |
---|
344 | #ifndef XEN |
---|
345 | static void sn_irq_info_free(struct rcu_head *head) |
---|
346 | { |
---|
347 | struct sn_irq_info *sn_irq_info; |
---|
348 | |
---|
349 | sn_irq_info = container_of(head, struct sn_irq_info, rcu); |
---|
350 | kfree(sn_irq_info); |
---|
351 | } |
---|
352 | #endif |
---|
353 | |
---|
354 | #ifndef XEN |
---|
355 | void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) |
---|
356 | { |
---|
357 | nasid_t nasid = sn_irq_info->irq_nasid; |
---|
358 | int slice = sn_irq_info->irq_slice; |
---|
359 | int cpu = nasid_slice_to_cpuid(nasid, slice); |
---|
360 | |
---|
361 | pci_dev_get(pci_dev); |
---|
362 | sn_irq_info->irq_cpuid = cpu; |
---|
363 | sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev); |
---|
364 | |
---|
365 | /* link it into the sn_irq[irq] list */ |
---|
366 | spin_lock(&sn_irq_info_lock); |
---|
367 | #ifdef XEN |
---|
368 | list_add(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); |
---|
369 | #else |
---|
370 | list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); |
---|
371 | #endif |
---|
372 | #ifndef XEN |
---|
373 | reserve_irq_vector(sn_irq_info->irq_irq); |
---|
374 | #endif |
---|
375 | spin_unlock(&sn_irq_info_lock); |
---|
376 | |
---|
377 | register_intr_pda(sn_irq_info); |
---|
378 | } |
---|
379 | |
---|
380 | void sn_irq_unfixup(struct pci_dev *pci_dev) |
---|
381 | { |
---|
382 | struct sn_irq_info *sn_irq_info; |
---|
383 | |
---|
384 | /* Only cleanup IRQ stuff if this device has a host bus context */ |
---|
385 | if (!SN_PCIDEV_BUSSOFT(pci_dev)) |
---|
386 | return; |
---|
387 | |
---|
388 | sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info; |
---|
389 | if (!sn_irq_info) |
---|
390 | return; |
---|
391 | if (!sn_irq_info->irq_irq) { |
---|
392 | kfree(sn_irq_info); |
---|
393 | return; |
---|
394 | } |
---|
395 | |
---|
396 | unregister_intr_pda(sn_irq_info); |
---|
397 | spin_lock(&sn_irq_info_lock); |
---|
398 | #ifdef XEN |
---|
399 | list_del(&sn_irq_info->list); |
---|
400 | #else |
---|
401 | list_del_rcu(&sn_irq_info->list); |
---|
402 | #endif |
---|
403 | spin_unlock(&sn_irq_info_lock); |
---|
404 | if (list_empty(sn_irq_lh[sn_irq_info->irq_irq])) |
---|
405 | free_irq_vector(sn_irq_info->irq_irq); |
---|
406 | #ifndef XEN |
---|
407 | call_rcu(&sn_irq_info->rcu, sn_irq_info_free); |
---|
408 | #endif |
---|
409 | pci_dev_put(pci_dev); |
---|
410 | |
---|
411 | } |
---|
412 | #endif |
---|
413 | |
---|
414 | static inline void |
---|
415 | sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info) |
---|
416 | { |
---|
417 | struct sn_pcibus_provider *pci_provider; |
---|
418 | |
---|
419 | pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type]; |
---|
420 | if (pci_provider && pci_provider->force_interrupt) |
---|
421 | (*pci_provider->force_interrupt)(sn_irq_info); |
---|
422 | } |
---|
423 | |
---|
424 | static void force_interrupt(int irq) |
---|
425 | { |
---|
426 | struct sn_irq_info *sn_irq_info; |
---|
427 | |
---|
428 | #ifndef XEN |
---|
429 | if (!sn_ioif_inited) |
---|
430 | return; |
---|
431 | #endif |
---|
432 | |
---|
433 | #ifdef XEN |
---|
434 | spin_lock(&sn_irq_info_lock); |
---|
435 | #else |
---|
436 | rcu_read_lock(); |
---|
437 | #endif |
---|
438 | #ifdef XEN |
---|
439 | list_for_each_entry(sn_irq_info, sn_irq_lh[irq], list) |
---|
440 | #else |
---|
441 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) |
---|
442 | #endif |
---|
443 | sn_call_force_intr_provider(sn_irq_info); |
---|
444 | |
---|
445 | #ifdef XEN |
---|
446 | spin_unlock(&sn_irq_info_lock); |
---|
447 | #else |
---|
448 | rcu_read_unlock(); |
---|
449 | #endif |
---|
450 | } |
---|
451 | |
---|
452 | #ifndef XEN |
---|
453 | /* |
---|
454 | * Check for lost interrupts. If the PIC int_status reg. says that |
---|
455 | * an interrupt has been sent, but not handled, and the interrupt |
---|
456 | * is not pending in either the cpu irr regs or in the soft irr regs, |
---|
457 | * and the interrupt is not in service, then the interrupt may have |
---|
458 | * been lost. Force an interrupt on that pin. It is possible that |
---|
459 | * the interrupt is in flight, so we may generate a spurious interrupt, |
---|
460 | * but we should never miss a real lost interrupt. |
---|
461 | */ |
---|
462 | static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) |
---|
463 | { |
---|
464 | u64 regval; |
---|
465 | struct pcidev_info *pcidev_info; |
---|
466 | struct pcibus_info *pcibus_info; |
---|
467 | |
---|
468 | /* |
---|
469 | * Bridge types attached to TIO (anything but PIC) do not need this WAR |
---|
470 | * since they do not target Shub II interrupt registers. If that |
---|
471 | * ever changes, this check needs to accomodate. |
---|
472 | */ |
---|
473 | if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC) |
---|
474 | return; |
---|
475 | |
---|
476 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; |
---|
477 | if (!pcidev_info) |
---|
478 | return; |
---|
479 | |
---|
480 | pcibus_info = |
---|
481 | (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info-> |
---|
482 | pdi_pcibus_info; |
---|
483 | regval = pcireg_intr_status_get(pcibus_info); |
---|
484 | |
---|
485 | if (!ia64_get_irr(irq_to_vector(irq))) { |
---|
486 | if (!test_bit(irq, pda->sn_in_service_ivecs)) { |
---|
487 | regval &= 0xff; |
---|
488 | if (sn_irq_info->irq_int_bit & regval & |
---|
489 | sn_irq_info->irq_last_intr) { |
---|
490 | regval &= ~(sn_irq_info->irq_int_bit & regval); |
---|
491 | sn_call_force_intr_provider(sn_irq_info); |
---|
492 | } |
---|
493 | } |
---|
494 | } |
---|
495 | sn_irq_info->irq_last_intr = regval; |
---|
496 | } |
---|
497 | #endif |
---|
498 | |
---|
499 | void sn_lb_int_war_check(void) |
---|
500 | { |
---|
501 | #ifndef XEN |
---|
502 | struct sn_irq_info *sn_irq_info; |
---|
503 | int i; |
---|
504 | |
---|
505 | #ifdef XEN |
---|
506 | if (pda->sn_first_irq == 0) |
---|
507 | #else |
---|
508 | if (!sn_ioif_inited || pda->sn_first_irq == 0) |
---|
509 | #endif |
---|
510 | return; |
---|
511 | |
---|
512 | #ifdef XEN |
---|
513 | spin_lock(&sn_irq_info_lock); |
---|
514 | #else |
---|
515 | rcu_read_lock(); |
---|
516 | #endif |
---|
517 | for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { |
---|
518 | #ifdef XEN |
---|
519 | list_for_each_entry(sn_irq_info, sn_irq_lh[i], list) { |
---|
520 | #else |
---|
521 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) { |
---|
522 | #endif |
---|
523 | sn_check_intr(i, sn_irq_info); |
---|
524 | } |
---|
525 | } |
---|
526 | #ifdef XEN |
---|
527 | spin_unlock(&sn_irq_info_lock); |
---|
528 | #else |
---|
529 | rcu_read_unlock(); |
---|
530 | #endif |
---|
531 | #endif |
---|
532 | } |
---|
533 | |
---|
534 | void __init sn_irq_lh_init(void) |
---|
535 | { |
---|
536 | int i; |
---|
537 | |
---|
538 | sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL); |
---|
539 | if (!sn_irq_lh) |
---|
540 | panic("SN PCI INIT: Failed to allocate memory for PCI init\n"); |
---|
541 | |
---|
542 | for (i = 0; i < NR_IRQS; i++) { |
---|
543 | sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL); |
---|
544 | if (!sn_irq_lh[i]) |
---|
545 | panic("SN PCI INIT: Failed IRQ memory allocation\n"); |
---|
546 | |
---|
547 | INIT_LIST_HEAD(sn_irq_lh[i]); |
---|
548 | } |
---|
549 | } |
---|