source: trunk/packages/xen-3.1/xen-3.1/xen/arch/ia64/linux-xen/mca.c @ 34

Last change on this file since 34 was 34, checked in by hartmans, 18 years ago

Add xen and xen-common

File size: 53.1 KB
Line 
1/*
2 * File:        mca.c
3 * Purpose:     Generic MCA handling layer
4 *
5 * Updated for latest kernel
6 * Copyright (C) 2003 Hewlett-Packard Co
7 *      David Mosberger-Tang <davidm@hpl.hp.com>
8 *
9 * Copyright (C) 2002 Dell Inc.
10 * Copyright (C) Matt Domsch (Matt_Domsch@dell.com)
11 *
12 * Copyright (C) 2002 Intel
13 * Copyright (C) Jenna Hall (jenna.s.hall@intel.com)
14 *
15 * Copyright (C) 2001 Intel
16 * Copyright (C) Fred Lewis (frederick.v.lewis@intel.com)
17 *
18 * Copyright (C) 2000 Intel
19 * Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com)
20 *
21 * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
22 * Copyright (C) Vijay Chander(vijay@engr.sgi.com)
23 *
24 * 03/04/15 D. Mosberger Added INIT backtrace support.
25 * 02/03/25 M. Domsch   GUID cleanups
26 *
27 * 02/01/04 J. Hall     Aligned MCA stack to 16 bytes, added platform vs. CPU
28 *                      error flag, set SAL default return values, changed
29 *                      error record structure to linked list, added init call
30 *                      to sal_get_state_info_size().
31 *
32 * 01/01/03 F. Lewis    Added setup of CMCI and CPEI IRQs, logging of corrected
33 *                      platform errors, completed code for logging of
34 *                      corrected & uncorrected machine check errors, and
35 *                      updated for conformance with Nov. 2000 revision of the
36 *                      SAL 3.0 spec.
37 * 00/03/29 C. Fleckenstein  Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
38 *                           added min save state dump, added INIT handler.
39 *
40 * 2003-12-08 Keith Owens <kaos@sgi.com>
41 *            smp_call_function() must not be called from interrupt context (can
42 *            deadlock on tasklist_lock).  Use keventd to call smp_call_function().
43 *
44 * 2004-02-01 Keith Owens <kaos@sgi.com>
45 *            Avoid deadlock when using printk() for MCA and INIT records.
46 *            Delete all record printing code, moved to salinfo_decode in user space.
47 *            Mark variables and functions static where possible.
48 *            Delete dead variables and functions.
49 *            Reorder to remove the need for forward declarations and to consolidate
50 *            related code.
51 */
52#include <linux/config.h>
53#include <linux/types.h>
54#include <linux/init.h>
55#include <linux/sched.h>
56#include <linux/interrupt.h>
57#include <linux/irq.h>
58#include <linux/kallsyms.h>
59#include <linux/smp_lock.h>
60#include <linux/bootmem.h>
61#include <linux/acpi.h>
62#include <linux/timer.h>
63#include <linux/module.h>
64#include <linux/kernel.h>
65#include <linux/smp.h>
66#include <linux/workqueue.h>
67
68#include <asm/delay.h>
69#include <asm/machvec.h>
70#include <asm/meminit.h>
71#include <asm/page.h>
72#include <asm/ptrace.h>
73#include <asm/system.h>
74#include <asm/sal.h>
75#include <asm/mca.h>
76
77#include <asm/irq.h>
78#include <asm/hw_irq.h>
79
80#ifdef XEN
81#include <xen/symbols.h>
82#include <xen/mm.h>
83#include <xen/console.h>
84#include <xen/event.h>
85#include <xen/softirq.h>
86#include <asm/xenmca.h>
87#include <linux/shutdown.h>
88#endif
89
90#if defined(IA64_MCA_DEBUG_INFO)
91# define IA64_MCA_DEBUG(fmt...) printk(fmt)
92#else
93# define IA64_MCA_DEBUG(fmt...)
94#endif
95
96/* Used by mca_asm.S */
97#ifndef XEN
98ia64_mca_sal_to_os_state_t      ia64_sal_to_os_handoff_state;
99#else
100ia64_mca_sal_to_os_state_t      ia64_sal_to_os_handoff_state[NR_CPUS];
101DEFINE_PER_CPU(u64, ia64_sal_to_os_handoff_state_addr); 
102#endif
103ia64_mca_os_to_sal_state_t      ia64_os_to_sal_handoff_state;
104u64                             ia64_mca_serialize;
105DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
106DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
107DEFINE_PER_CPU(u64, ia64_mca_pal_pte);      /* PTE to map PAL code */
108DEFINE_PER_CPU(u64, ia64_mca_pal_base);    /* vaddr PAL code granule */
109
110unsigned long __per_cpu_mca[NR_CPUS];
111
112/* In mca_asm.S */
113extern void                     ia64_monarch_init_handler (void);
114extern void                     ia64_slave_init_handler (void);
115#ifdef XEN
116extern void setup_vector (unsigned int vec, struct irqaction *action);
117#define setup_irq(irq, action)  setup_vector(irq, action)
118#endif
119
120static ia64_mc_info_t           ia64_mc_info;
121
122#ifdef XEN
123#define jiffies                 NOW()
124#undef HZ
125#define HZ                      1000000000UL
126#endif
127
128#define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
129#define MIN_CPE_POLL_INTERVAL (2*60*HZ)  /* 2 minutes */
130#define CMC_POLL_INTERVAL     (1*60*HZ)  /* 1 minute */
131#define CPE_HISTORY_LENGTH    5
132#define CMC_HISTORY_LENGTH    5
133
134#ifndef XEN
135static struct timer_list cpe_poll_timer;
136static struct timer_list cmc_poll_timer;
137#else
138#define mod_timer(timer, expires)       set_timer(timer, expires)
139static struct timer cpe_poll_timer;
140static struct timer cmc_poll_timer;
141#endif
142/*
143 * This variable tells whether we are currently in polling mode.
144 * Start with this in the wrong state so we won't play w/ timers
145 * before the system is ready.
146 */
147static int cmc_polling_enabled = 1;
148
149/*
150 * Clearing this variable prevents CPE polling from getting activated
151 * in mca_late_init.  Use it if your system doesn't provide a CPEI,
152 * but encounters problems retrieving CPE logs.  This should only be
153 * necessary for debugging.
154 */
155static int cpe_poll_enabled = 1;
156
157extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
158
159static int mca_init;
160
161/*
162 * IA64_MCA log support
163 */
164#define IA64_MAX_LOGS           2       /* Double-buffering for nested MCAs */
165#define IA64_MAX_LOG_TYPES      4   /* MCA, INIT, CMC, CPE */
166
167typedef struct ia64_state_log_s
168{
169        spinlock_t      isl_lock;
170        int             isl_index;
171        unsigned long   isl_count;
172        ia64_err_rec_t  *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
173} ia64_state_log_t;
174
175static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
176
177#ifndef XEN
178#define IA64_LOG_ALLOCATE(it, size) \
179        {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
180                (ia64_err_rec_t *)alloc_bootmem(size); \
181        ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
182                (ia64_err_rec_t *)alloc_bootmem(size);}
183#else
184#define IA64_LOG_ALLOCATE(it, size) \
185        do { \
186                unsigned int pageorder; \
187                pageorder  = get_order_from_bytes(sizeof(struct ia64_mca_cpu)); \
188                ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
189                  (ia64_err_rec_t *)alloc_xenheap_pages(pageorder); \
190                ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
191                  (ia64_err_rec_t *)alloc_xenheap_pages(pageorder); \
192        } while(0)
193#endif
194
195#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
196#define IA64_LOG_LOCK(it)      spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
197#define IA64_LOG_UNLOCK(it)    spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
198#define IA64_LOG_NEXT_INDEX(it)    ia64_state_log[it].isl_index
199#define IA64_LOG_CURR_INDEX(it)    1 - ia64_state_log[it].isl_index
200#define IA64_LOG_INDEX_INC(it) \
201    {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
202    ia64_state_log[it].isl_count++;}
203#define IA64_LOG_INDEX_DEC(it) \
204    ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
205#define IA64_LOG_NEXT_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
206#define IA64_LOG_CURR_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
207#define IA64_LOG_COUNT(it)         ia64_state_log[it].isl_count
208
209#ifdef XEN
210struct list_head *sal_queue, sal_log_queues[IA64_MAX_LOG_TYPES];
211sal_log_record_header_t *sal_record;
212DEFINE_SPINLOCK(sal_queue_lock);
213#endif
214
215/*
216 * ia64_log_init
217 *      Reset the OS ia64 log buffer
218 * Inputs   :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
219 * Outputs      :       None
220 */
221static void
222ia64_log_init(int sal_info_type)
223{
224        u64     max_size = 0;
225
226        IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
227        IA64_LOG_LOCK_INIT(sal_info_type);
228
229        // SAL will tell us the maximum size of any error record of this type
230        max_size = ia64_sal_get_state_info_size(sal_info_type);
231        if (!max_size)
232                /* alloc_bootmem() doesn't like zero-sized allocations! */
233                return;
234
235        // set up OS data structures to hold error info
236        IA64_LOG_ALLOCATE(sal_info_type, max_size);
237        memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
238        memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
239
240#ifdef XEN
241        if (sal_record == NULL) {
242                unsigned int pageorder;
243                pageorder  = get_order_from_bytes(max_size);
244                sal_record = (sal_log_record_header_t *)
245                             alloc_xenheap_pages(pageorder);
246                BUG_ON(sal_record == NULL);
247        }
248#endif
249}
250
251#ifndef XEN
252/*
253 * ia64_log_get
254 *
255 *      Get the current MCA log from SAL and copy it into the OS log buffer.
256 *
257 *  Inputs  :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
258 *              irq_safe    whether you can use printk at this point
259 *  Outputs :   size        (total record length)
260 *              *buffer     (ptr to error record)
261 *
262 */
263static u64
264ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
265{
266        sal_log_record_header_t     *log_buffer;
267        u64                         total_len = 0;
268        int                         s;
269
270        IA64_LOG_LOCK(sal_info_type);
271
272        /* Get the process state information */
273        log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
274
275        total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
276
277        if (total_len) {
278                IA64_LOG_INDEX_INC(sal_info_type);
279                IA64_LOG_UNLOCK(sal_info_type);
280                if (irq_safe) {
281                        IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. "
282                                       "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len);
283                }
284                *buffer = (u8 *) log_buffer;
285                return total_len;
286        } else {
287                IA64_LOG_UNLOCK(sal_info_type);
288                return 0;
289        }
290}
291
292/*
293 *  ia64_mca_log_sal_error_record
294 *
295 *  This function retrieves a specified error record type from SAL
296 *  and wakes up any processes waiting for error records.
297 *
298 *  Inputs  :   sal_info_type   (Type of error record MCA/CMC/CPE/INIT)
299 */
300static void
301ia64_mca_log_sal_error_record(int sal_info_type)
302{
303        u8 *buffer;
304        sal_log_record_header_t *rh;
305        u64 size;
306        int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type != SAL_INFO_TYPE_INIT;
307#ifdef IA64_MCA_DEBUG_INFO
308        static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
309#endif
310
311        size = ia64_log_get(sal_info_type, &buffer, irq_safe);
312        if (!size)
313                return;
314
315        salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
316
317        if (irq_safe)
318                IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
319                        smp_processor_id(),
320                        sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");
321
322        /* Clear logs from corrected errors in case there's no user-level logger */
323        rh = (sal_log_record_header_t *)buffer;
324        if (rh->severity == sal_log_severity_corrected)
325                ia64_sal_clear_state_info(sal_info_type);
326}
327#else /* !XEN */
328/*
329 * ia64_log_queue
330 *
331 *      Get the current MCA log from SAL and copy it into the OS log buffer.
332 *
333 *  Inputs  :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
334 *  Outputs :   size        (total record length)
335 *              *buffer     (ptr to error record)
336 *
337 */
338static u64
339ia64_log_queue(int sal_info_type, int virq)
340{
341        sal_log_record_header_t     *log_buffer;
342        u64                         total_len = 0;
343        int                         s;
344        sal_queue_entry_t           *e;
345        unsigned long               flags;
346
347        IA64_LOG_LOCK(sal_info_type);
348
349        /* Get the process state information */
350        log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
351
352        total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
353
354        if (total_len) {
355                int queue_type;
356
357                spin_lock_irqsave(&sal_queue_lock, flags);
358
359                if (sal_info_type == SAL_INFO_TYPE_MCA && virq == VIRQ_MCA_CMC)
360                        queue_type = SAL_INFO_TYPE_CMC;
361                else
362                        queue_type = sal_info_type;
363
364                e = xmalloc(sal_queue_entry_t);
365                BUG_ON(e == NULL);
366                e->cpuid = smp_processor_id();
367                e->sal_info_type = sal_info_type;
368                e->vector = IA64_CMC_VECTOR;
369                e->virq = virq;
370                e->length = total_len;
371
372                list_add_tail(&e->list, &sal_queue[queue_type]);
373                spin_unlock_irqrestore(&sal_queue_lock, flags);
374
375                IA64_LOG_INDEX_INC(sal_info_type);
376                IA64_LOG_UNLOCK(sal_info_type);
377                if (sal_info_type != SAL_INFO_TYPE_MCA &&
378                    sal_info_type != SAL_INFO_TYPE_INIT) {
379                        IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. "
380                                       "Record length = %ld\n", __FUNCTION__,
381                                       sal_info_type, total_len);
382                }
383                return total_len;
384        } else {
385                IA64_LOG_UNLOCK(sal_info_type);
386                return 0;
387        }
388}
389#endif /* !XEN */
390
391/*
392 * platform dependent error handling
393 */
394#ifndef PLATFORM_MCA_HANDLERS
395
396#ifdef CONFIG_ACPI
397
398#ifdef XEN
399/**
400 *      Copy from linux/kernel/irq/manage.c
401 *
402 *      disable_irq_nosync - disable an irq without waiting
403 *      @irq: Interrupt to disable
404 *
405 *      Disable the selected interrupt line.  Disables and Enables are
406 *      nested.
407 *      Unlike disable_irq(), this function does not ensure existing
408 *      instances of the IRQ handler have completed before returning.
409 *
410 *      This function may be called from IRQ context.
411 */
412void disable_irq_nosync(unsigned int irq)
413{
414        irq_desc_t *desc = irq_desc + irq;
415        unsigned long flags;
416
417        if (irq >= NR_IRQS)
418                return;
419
420        spin_lock_irqsave(&desc->lock, flags);
421        if (!desc->depth++) {
422                desc->status |= IRQ_DISABLED;
423                desc->handler->disable(irq);
424        }
425        spin_unlock_irqrestore(&desc->lock, flags);
426}
427
428/**
429 *      Copy from linux/kernel/irq/manage.c
430 *
431 *      enable_irq - enable handling of an irq
432 *      @irq: Interrupt to enable
433 *
434 *      Undoes the effect of one call to disable_irq().  If this
435 *      matches the last disable, processing of interrupts on this
436 *      IRQ line is re-enabled.
437 *
438 *      This function may be called from IRQ context.
439 */
440void enable_irq(unsigned int irq)
441{
442        irq_desc_t *desc = irq_desc + irq;
443        unsigned long flags;
444
445        if (irq >= NR_IRQS)
446                return;
447
448        spin_lock_irqsave(&desc->lock, flags);
449        switch (desc->depth) {
450        case 0:
451                WARN_ON(1);
452                break;
453        case 1: {
454                unsigned int status = desc->status & ~IRQ_DISABLED;
455
456                desc->status = status;
457                if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
458                        desc->status = status | IRQ_REPLAY;
459                        hw_resend_irq(desc->handler,irq);
460                }
461                desc->handler->enable(irq);
462                /* fall-through */
463        }
464        default:
465                desc->depth--;
466        }
467        spin_unlock_irqrestore(&desc->lock, flags);
468}
469#endif  /* XEN */
470
471int cpe_vector = -1;
472
473static irqreturn_t
474ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
475{
476        static unsigned long    cpe_history[CPE_HISTORY_LENGTH];
477        static int              index;
478        static DEFINE_SPINLOCK(cpe_history_lock);
479
480        IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
481                       __FUNCTION__, cpe_irq, smp_processor_id());
482
483        /* SAL spec states this should run w/ interrupts enabled */
484        local_irq_enable();
485
486#ifndef XEN
487        /* Get the CPE error record and log it */
488        ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
489#else
490        ia64_log_queue(SAL_INFO_TYPE_CPE, VIRQ_MCA_CPE);
491        /* CPE error does not inform to dom0 but the following codes are
492           reserved for future implementation */
493/*      send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CPE); */
494#endif
495
496        spin_lock(&cpe_history_lock);
497        if (!cpe_poll_enabled && cpe_vector >= 0) {
498
499                int i, count = 1; /* we know 1 happened now */
500                unsigned long now = jiffies;
501
502                for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
503                        if (now - cpe_history[i] <= HZ)
504                                count++;
505                }
506
507                IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);
508                if (count >= CPE_HISTORY_LENGTH) {
509
510                        cpe_poll_enabled = 1;
511                        spin_unlock(&cpe_history_lock);
512                        disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));
513
514                        /*
515                         * Corrected errors will still be corrected, but
516                         * make sure there's a log somewhere that indicates
517                         * something is generating more than we can handle.
518                         */
519                        printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");
520
521                        mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
522
523                        /* lock already released, get out now */
524                        return IRQ_HANDLED;
525                } else {
526                        cpe_history[index++] = now;
527                        if (index == CPE_HISTORY_LENGTH)
528                                index = 0;
529                }
530        }
531        spin_unlock(&cpe_history_lock);
532        return IRQ_HANDLED;
533}
534
535#endif /* CONFIG_ACPI */
536
537static void
538show_min_state (pal_min_state_area_t *minstate)
539{
540        u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri;
541        u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri;
542
543        printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits);
544        printk("pr\t\t%016lx\n", minstate->pmsa_pr);
545        printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0);
546        printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc);
547        printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip);
548        printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr);
549        printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs);
550        printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip);
551        printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr);
552        printk("xfs\t\t%016lx\n", minstate->pmsa_xfs);
553        printk("b1\t\t%016lx ", minstate->pmsa_br1);
554        print_symbol("%s\n", minstate->pmsa_br1);
555
556        printk("\nstatic registers r0-r15:\n");
557        printk(" r0- 3 %016lx %016lx %016lx %016lx\n",
558               0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]);
559        printk(" r4- 7 %016lx %016lx %016lx %016lx\n",
560               minstate->pmsa_gr[3], minstate->pmsa_gr[4],
561               minstate->pmsa_gr[5], minstate->pmsa_gr[6]);
562        printk(" r8-11 %016lx %016lx %016lx %016lx\n",
563               minstate->pmsa_gr[7], minstate->pmsa_gr[8],
564               minstate->pmsa_gr[9], minstate->pmsa_gr[10]);
565        printk("r12-15 %016lx %016lx %016lx %016lx\n",
566               minstate->pmsa_gr[11], minstate->pmsa_gr[12],
567               minstate->pmsa_gr[13], minstate->pmsa_gr[14]);
568
569        printk("\nbank 0:\n");
570        printk("r16-19 %016lx %016lx %016lx %016lx\n",
571               minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1],
572               minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]);
573        printk("r20-23 %016lx %016lx %016lx %016lx\n",
574               minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5],
575               minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]);
576        printk("r24-27 %016lx %016lx %016lx %016lx\n",
577               minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9],
578               minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]);
579        printk("r28-31 %016lx %016lx %016lx %016lx\n",
580               minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13],
581               minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]);
582
583        printk("\nbank 1:\n");
584        printk("r16-19 %016lx %016lx %016lx %016lx\n",
585               minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1],
586               minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]);
587        printk("r20-23 %016lx %016lx %016lx %016lx\n",
588               minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5],
589               minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]);
590        printk("r24-27 %016lx %016lx %016lx %016lx\n",
591               minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9],
592               minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]);
593        printk("r28-31 %016lx %016lx %016lx %016lx\n",
594               minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13],
595               minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]);
596}
597
598static void
599fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw)
600{
601        u64 *dst_banked, *src_banked, bit, shift, nat_bits;
602        int i;
603
604        /*
605         * First, update the pt-regs and switch-stack structures with the contents stored
606         * in the min-state area:
607         */
608        if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) {
609                pt->cr_ipsr = ms->pmsa_xpsr;
610                pt->cr_iip = ms->pmsa_xip;
611                pt->cr_ifs = ms->pmsa_xfs;
612        } else {
613                pt->cr_ipsr = ms->pmsa_ipsr;
614                pt->cr_iip = ms->pmsa_iip;
615                pt->cr_ifs = ms->pmsa_ifs;
616        }
617        pt->ar_rsc = ms->pmsa_rsc;
618        pt->pr = ms->pmsa_pr;
619        pt->r1 = ms->pmsa_gr[0];
620        pt->r2 = ms->pmsa_gr[1];
621        pt->r3 = ms->pmsa_gr[2];
622        sw->r4 = ms->pmsa_gr[3];
623        sw->r5 = ms->pmsa_gr[4];
624        sw->r6 = ms->pmsa_gr[5];
625        sw->r7 = ms->pmsa_gr[6];
626        pt->r8 = ms->pmsa_gr[7];
627        pt->r9 = ms->pmsa_gr[8];
628        pt->r10 = ms->pmsa_gr[9];
629        pt->r11 = ms->pmsa_gr[10];
630        pt->r12 = ms->pmsa_gr[11];
631        pt->r13 = ms->pmsa_gr[12];
632        pt->r14 = ms->pmsa_gr[13];
633        pt->r15 = ms->pmsa_gr[14];
634        dst_banked = &pt->r16;          /* r16-r31 are contiguous in struct pt_regs */
635        src_banked = ms->pmsa_bank1_gr;
636        for (i = 0; i < 16; ++i)
637                dst_banked[i] = src_banked[i];
638        pt->b0 = ms->pmsa_br0;
639        sw->b1 = ms->pmsa_br1;
640
641        /* construct the NaT bits for the pt-regs structure: */
642#       define PUT_NAT_BIT(dst, addr)                                   \
643        do {                                                            \
644                bit = nat_bits & 1; nat_bits >>= 1;                     \
645                shift = ((unsigned long) addr >> 3) & 0x3f;             \
646                dst = ((dst) & ~(1UL << shift)) | (bit << shift);       \
647        } while (0)
648
649        /* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */
650        shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f;
651        nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift));
652
653        PUT_NAT_BIT(sw->caller_unat, &pt->r1);
654        PUT_NAT_BIT(sw->caller_unat, &pt->r2);
655        PUT_NAT_BIT(sw->caller_unat, &pt->r3);
656        PUT_NAT_BIT(sw->ar_unat, &sw->r4);
657        PUT_NAT_BIT(sw->ar_unat, &sw->r5);
658        PUT_NAT_BIT(sw->ar_unat, &sw->r6);
659        PUT_NAT_BIT(sw->ar_unat, &sw->r7);
660        PUT_NAT_BIT(sw->caller_unat, &pt->r8);  PUT_NAT_BIT(sw->caller_unat, &pt->r9);
661        PUT_NAT_BIT(sw->caller_unat, &pt->r10); PUT_NAT_BIT(sw->caller_unat, &pt->r11);
662        PUT_NAT_BIT(sw->caller_unat, &pt->r12); PUT_NAT_BIT(sw->caller_unat, &pt->r13);
663        PUT_NAT_BIT(sw->caller_unat, &pt->r14); PUT_NAT_BIT(sw->caller_unat, &pt->r15);
664        nat_bits >>= 16;        /* skip over bank0 NaT bits */
665        PUT_NAT_BIT(sw->caller_unat, &pt->r16); PUT_NAT_BIT(sw->caller_unat, &pt->r17);
666        PUT_NAT_BIT(sw->caller_unat, &pt->r18); PUT_NAT_BIT(sw->caller_unat, &pt->r19);
667        PUT_NAT_BIT(sw->caller_unat, &pt->r20); PUT_NAT_BIT(sw->caller_unat, &pt->r21);
668        PUT_NAT_BIT(sw->caller_unat, &pt->r22); PUT_NAT_BIT(sw->caller_unat, &pt->r23);
669        PUT_NAT_BIT(sw->caller_unat, &pt->r24); PUT_NAT_BIT(sw->caller_unat, &pt->r25);
670        PUT_NAT_BIT(sw->caller_unat, &pt->r26); PUT_NAT_BIT(sw->caller_unat, &pt->r27);
671        PUT_NAT_BIT(sw->caller_unat, &pt->r28); PUT_NAT_BIT(sw->caller_unat, &pt->r29);
672        PUT_NAT_BIT(sw->caller_unat, &pt->r30); PUT_NAT_BIT(sw->caller_unat, &pt->r31);
673}
674
675#ifdef XEN
676static spinlock_t init_dump_lock = SPIN_LOCK_UNLOCKED;
677static spinlock_t show_stack_lock = SPIN_LOCK_UNLOCKED;
678static atomic_t num_stopped_cpus = ATOMIC_INIT(0);
679extern void show_stack (struct task_struct *, unsigned long *);
680
681#define CPU_FLUSH_RETRY_MAX 5
682static void
683init_cache_flush (void)
684{
685        unsigned long flags;
686        int i;
687        s64 rval = 0;
688        u64 vector, progress = 0;
689
690        for (i = 0; i < CPU_FLUSH_RETRY_MAX; i++) {
691                local_irq_save(flags);
692                rval = ia64_pal_cache_flush(PAL_CACHE_TYPE_INSTRUCTION_DATA,
693                                            0, &progress, &vector);
694                local_irq_restore(flags);
695                if (rval == 0){
696                        printk("\nPAL cache flush success\n");
697                        return;
698                }
699        }
700        printk("\nPAL cache flush failed. status=%ld\n",rval);
701}
702
703static void inline
704save_ksp (struct unw_frame_info *info)
705{
706        current->arch._thread.ksp = (__u64)(info->sw) - 16;
707        wmb();
708        init_cache_flush();
709}       
710
711static void
712freeze_cpu_osinit (struct unw_frame_info *info, void *arg)
713{
714        save_ksp(info);
715        atomic_inc(&num_stopped_cpus);
716        printk("%s: CPU%d init handler done\n",
717               __FUNCTION__, smp_processor_id());
718        for (;;)
719                local_irq_disable();
720}
721
722/* FIXME */
723static void
724try_crashdump(struct unw_frame_info *info, void *arg)
725{ 
726        save_ksp(info);
727        printk("\nINIT dump complete.  Please reboot now.\n");
728        for (;;)
729                local_irq_disable();
730}
731#endif /* XEN */
732
733static void
734init_handler_platform (pal_min_state_area_t *ms,
735                       struct pt_regs *pt, struct switch_stack *sw)
736{
737        struct unw_frame_info info;
738
739        /* if a kernel debugger is available call it here else just dump the registers */
740
741        /*
742         * Wait for a bit.  On some machines (e.g., HP's zx2000 and zx6000, INIT can be
743         * generated via the BMC's command-line interface, but since the console is on the
744         * same serial line, the user will need some time to switch out of the BMC before
745         * the dump begins.
746         */
747        printk("Delaying for 5 seconds...\n");
748        udelay(5*1000000);
749#ifdef XEN
750        fetch_min_state(ms, pt, sw);
751        spin_lock(&show_stack_lock);
752#endif
753        show_min_state(ms);
754
755#ifdef XEN
756        printk("Backtrace of current vcpu (vcpu_id %d of domid %d)\n",
757               current->vcpu_id, current->domain->domain_id);
758#else
759        printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm);
760        fetch_min_state(ms, pt, sw);
761#endif
762        unw_init_from_interruption(&info, current, pt, sw);
763        ia64_do_show_stack(&info, NULL);
764#ifdef XEN
765        spin_unlock(&show_stack_lock);
766
767        if (spin_trylock(&init_dump_lock)) {
768                struct domain *d;
769                struct vcpu *v;
770#ifdef CONFIG_SMP
771                int other_cpus = num_online_cpus() - 1;
772                int wait = 1000 * other_cpus;
773
774                while ((atomic_read(&num_stopped_cpus) != other_cpus) && wait--)
775                        udelay(1000);
776                if (other_cpus && wait < 0)
777                        printk("timeout %d\n", atomic_read(&num_stopped_cpus));
778#endif
779                if (opt_noreboot) {
780                        /* this route is for dump routine */
781                        unw_init_running(try_crashdump, pt);
782                } else {
783                        rcu_read_lock(&domlist_read_lock);
784                        for_each_domain(d) {
785                                for_each_vcpu(d, v) {
786                                        printk("Backtrace of current vcpu "
787                                               "(vcpu_id %d of domid %d)\n",
788                                               v->vcpu_id, d->domain_id);
789                                        show_stack(v, NULL);
790                                }
791                        }
792                        rcu_read_unlock(&domlist_read_lock);
793                }
794        }
795        unw_init_running(freeze_cpu_osinit, NULL);
796#else /* XEN */
797#ifdef CONFIG_SMP
798        /* read_trylock() would be handy... */
799        if (!tasklist_lock.write_lock)
800                read_lock(&tasklist_lock);
801#endif
802        {
803                struct task_struct *g, *t;
804                do_each_thread (g, t) {
805                        if (t == current)
806                                continue;
807
808                        printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
809                        show_stack(t, NULL);
810                } while_each_thread (g, t);
811        }
812#ifdef CONFIG_SMP
813        if (!tasklist_lock.write_lock)
814                read_unlock(&tasklist_lock);
815#endif
816
817        printk("\nINIT dump complete.  Please reboot now.\n");
818#endif /* XEN */
819        while (1);                      /* hang city if no debugger */
820}
821
822#ifdef CONFIG_ACPI
823/*
824 * ia64_mca_register_cpev
825 *
826 *  Register the corrected platform error vector with SAL.
827 *
828 *  Inputs
829 *      cpev        Corrected Platform Error Vector number
830 *
831 *  Outputs
832 *      None
833 */
834static void
835ia64_mca_register_cpev (int cpev)
836{
837        /* Register the CPE interrupt vector with SAL */
838        struct ia64_sal_retval isrv;
839
840        isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
841        if (isrv.status) {
842                printk(KERN_ERR "Failed to register Corrected Platform "
843                       "Error interrupt vector with SAL (status %ld)\n", isrv.status);
844                return;
845        }
846
847        IA64_MCA_DEBUG("%s: corrected platform error "
848                       "vector %#x registered\n", __FUNCTION__, cpev);
849}
850#endif /* CONFIG_ACPI */
851
852#endif /* PLATFORM_MCA_HANDLERS */
853
854/*
855 * ia64_mca_cmc_vector_setup
856 *
857 *  Setup the corrected machine check vector register in the processor.
858 *  (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
859 *  This function is invoked on a per-processor basis.
860 *
861 * Inputs
862 *      None
863 *
864 * Outputs
865 *      None
866 */
867void
868ia64_mca_cmc_vector_setup (void)
869{
870        cmcv_reg_t      cmcv;
871
872        cmcv.cmcv_regval        = 0;
873        cmcv.cmcv_mask          = 1;        /* Mask/disable interrupt at first */
874        cmcv.cmcv_vector        = IA64_CMC_VECTOR;
875        ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
876
877        IA64_MCA_DEBUG("%s: CPU %d corrected "
878                       "machine check vector %#x registered.\n",
879                       __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
880
881        IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
882                       __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
883}
884
885/*
886 * ia64_mca_cmc_vector_disable
887 *
888 *  Mask the corrected machine check vector register in the processor.
889 *  This function is invoked on a per-processor basis.
890 *
891 * Inputs
892 *      dummy(unused)
893 *
894 * Outputs
895 *      None
896 */
897static void
898ia64_mca_cmc_vector_disable (void *dummy)
899{
900        cmcv_reg_t      cmcv;
901
902        cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
903
904        cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
905        ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
906
907        IA64_MCA_DEBUG("%s: CPU %d corrected "
908                       "machine check vector %#x disabled.\n",
909                       __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
910}
911
912/*
913 * ia64_mca_cmc_vector_enable
914 *
915 *  Unmask the corrected machine check vector register in the processor.
916 *  This function is invoked on a per-processor basis.
917 *
918 * Inputs
919 *      dummy(unused)
920 *
921 * Outputs
922 *      None
923 */
924static void
925ia64_mca_cmc_vector_enable (void *dummy)
926{
927        cmcv_reg_t      cmcv;
928
929        cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
930
931        cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
932        ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
933
934        IA64_MCA_DEBUG("%s: CPU %d corrected "
935                       "machine check vector %#x enabled.\n",
936                       __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
937}
938
939#ifndef XEN
940/*
941 * ia64_mca_cmc_vector_disable_keventd
942 *
943 * Called via keventd (smp_call_function() is not safe in interrupt context) to
944 * disable the cmc interrupt vector.
945 */
946static void
947ia64_mca_cmc_vector_disable_keventd(void *unused)
948{
949        on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
950}
951
952/*
953 * ia64_mca_cmc_vector_enable_keventd
954 *
955 * Called via keventd (smp_call_function() is not safe in interrupt context) to
956 * enable the cmc interrupt vector.
957 */
958static void
959ia64_mca_cmc_vector_enable_keventd(void *unused)
960{
961        on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
962}
963#endif /* !XEN  */
964
965/*
966 * ia64_mca_wakeup_ipi_wait
967 *
968 *      Wait for the inter-cpu interrupt to be sent by the
969 *      monarch processor once it is done with handling the
970 *      MCA.
971 *
972 *  Inputs  :   None
973 *  Outputs :   None
974 */
975static void
976ia64_mca_wakeup_ipi_wait(void)
977{
978        int     irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6);
979        int     irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f);
980        u64     irr = 0;
981
982        do {
983                switch(irr_num) {
984                      case 0:
985                        irr = ia64_getreg(_IA64_REG_CR_IRR0);
986                        break;
987                      case 1:
988                        irr = ia64_getreg(_IA64_REG_CR_IRR1);
989                        break;
990                      case 2:
991                        irr = ia64_getreg(_IA64_REG_CR_IRR2);
992                        break;
993                      case 3:
994                        irr = ia64_getreg(_IA64_REG_CR_IRR3);
995                        break;
996                }
997                cpu_relax();
998        } while (!(irr & (1UL << irr_bit))) ;
999}
1000
1001/*
1002 * ia64_mca_wakeup
1003 *
1004 *      Send an inter-cpu interrupt to wake-up a particular cpu
1005 *      and mark that cpu to be out of rendez.
1006 *
1007 *  Inputs  :   cpuid
1008 *  Outputs :   None
1009 */
1010static void
1011ia64_mca_wakeup(int cpu)
1012{
1013        platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
1014        ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1015
1016}
1017
1018/*
1019 * ia64_mca_wakeup_all
1020 *
1021 *      Wakeup all the cpus which have rendez'ed previously.
1022 *
1023 *  Inputs  :   None
1024 *  Outputs :   None
1025 */
1026static void
1027ia64_mca_wakeup_all(void)
1028{
1029        int cpu;
1030
1031        /* Clear the Rendez checkin flag for all cpus */
1032        for(cpu = 0; cpu < NR_CPUS; cpu++) {
1033                if (!cpu_online(cpu))
1034                        continue;
1035                if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
1036                        ia64_mca_wakeup(cpu);
1037        }
1038
1039}
1040
1041/*
1042 * ia64_mca_rendez_interrupt_handler
1043 *
1044 *      This is handler used to put slave processors into spinloop
1045 *      while the monarch processor does the mca handling and later
1046 *      wake each slave up once the monarch is done.
1047 *
1048 *  Inputs  :   None
1049 *  Outputs :   None
1050 */
1051static irqreturn_t
1052ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
1053{
1054        unsigned long flags;
1055        int cpu = smp_processor_id();
1056
1057        /* Mask all interrupts */
1058        local_irq_save(flags);
1059
1060        ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
1061        /* Register with the SAL monarch that the slave has
1062         * reached SAL
1063         */
1064        ia64_sal_mc_rendez();
1065
1066        /* Wait for the wakeup IPI from the monarch
1067         * This waiting is done by polling on the wakeup-interrupt
1068         * vector bit in the processor's IRRs
1069         */
1070        ia64_mca_wakeup_ipi_wait();
1071
1072        /* Enable all interrupts */
1073        local_irq_restore(flags);
1074        return IRQ_HANDLED;
1075}
1076
1077/*
1078 * ia64_mca_wakeup_int_handler
1079 *
1080 *      The interrupt handler for processing the inter-cpu interrupt to the
1081 *      slave cpu which was spinning in the rendez loop.
1082 *      Since this spinning is done by turning off the interrupts and
1083 *      polling on the wakeup-interrupt bit in the IRR, there is
1084 *      nothing useful to be done in the handler.
1085 *
1086 *  Inputs  :   wakeup_irq  (Wakeup-interrupt bit)
1087 *      arg             (Interrupt handler specific argument)
1088 *      ptregs          (Exception frame at the time of the interrupt)
1089 *  Outputs :   None
1090 *
1091 */
1092static irqreturn_t
1093ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs)
1094{
1095        return IRQ_HANDLED;
1096}
1097
1098/*
1099 * ia64_return_to_sal_check
1100 *
1101 *      This is function called before going back from the OS_MCA handler
1102 *      to the OS_MCA dispatch code which finally takes the control back
1103 *      to the SAL.
1104 *      The main purpose of this routine is to setup the OS_MCA to SAL
1105 *      return state which can be used by the OS_MCA dispatch code
1106 *      just before going back to SAL.
1107 *
1108 *  Inputs  :   None
1109 *  Outputs :   None
1110 */
1111
1112static void
1113ia64_return_to_sal_check(int recover)
1114{
1115#ifdef XEN
1116        int cpu = smp_processor_id();
1117#endif
1118
1119        /* Copy over some relevant stuff from the sal_to_os_mca_handoff
1120         * so that it can be used at the time of os_mca_to_sal_handoff
1121         */
1122#ifdef XEN
1123        ia64_os_to_sal_handoff_state.imots_sal_gp =
1124                ia64_sal_to_os_handoff_state[cpu].imsto_sal_gp;
1125
1126        ia64_os_to_sal_handoff_state.imots_sal_check_ra =
1127                ia64_sal_to_os_handoff_state[cpu].imsto_sal_check_ra;
1128#else
1129        ia64_os_to_sal_handoff_state.imots_sal_gp =
1130                ia64_sal_to_os_handoff_state.imsto_sal_gp;
1131
1132        ia64_os_to_sal_handoff_state.imots_sal_check_ra =
1133                ia64_sal_to_os_handoff_state.imsto_sal_check_ra;
1134#endif
1135
1136        if (recover)
1137                ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED;
1138        else
1139                ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_COLD_BOOT;
1140
1141        /* Default = tell SAL to return to same context */
1142        ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;
1143
1144#ifdef XEN
1145        ia64_os_to_sal_handoff_state.imots_new_min_state =
1146                (u64 *)ia64_sal_to_os_handoff_state[cpu].pal_min_state;
1147#else
1148        ia64_os_to_sal_handoff_state.imots_new_min_state =
1149                (u64 *)ia64_sal_to_os_handoff_state.pal_min_state;
1150#endif
1151
1152}
1153
1154/* Function pointer for extra MCA recovery */
1155int (*ia64_mca_ucmc_extension)
1156        (void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*)
1157        = NULL;
1158
1159int
1160ia64_reg_MCA_extension(void *fn)
1161{
1162        if (ia64_mca_ucmc_extension)
1163                return 1;
1164
1165        ia64_mca_ucmc_extension = fn;
1166        return 0;
1167}
1168
1169void
1170ia64_unreg_MCA_extension(void)
1171{
1172        if (ia64_mca_ucmc_extension)
1173                ia64_mca_ucmc_extension = NULL;
1174}
1175
1176EXPORT_SYMBOL(ia64_reg_MCA_extension);
1177EXPORT_SYMBOL(ia64_unreg_MCA_extension);
1178
1179/*
1180 * ia64_mca_ucmc_handler
1181 *
1182 *      This is uncorrectable machine check handler called from OS_MCA
1183 *      dispatch code which is in turn called from SAL_CHECK().
1184 *      This is the place where the core of OS MCA handling is done.
1185 *      Right now the logs are extracted and displayed in a well-defined
1186 *      format. This handler code is supposed to be run only on the
1187 *      monarch processor. Once the monarch is done with MCA handling
1188 *      further MCA logging is enabled by clearing logs.
1189 *      Monarch also has the duty of sending wakeup-IPIs to pull the
1190 *      slave processors out of rendezvous spinloop.
1191 *
1192 *  Inputs  :   None
1193 *  Outputs :   None
1194 */
1195void
1196ia64_mca_ucmc_handler(void)
1197{
1198#ifdef XEN
1199        int cpu = smp_processor_id();
1200        pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
1201                &ia64_sal_to_os_handoff_state[cpu].proc_state_param;
1202#else
1203        pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
1204                &ia64_sal_to_os_handoff_state.proc_state_param;
1205#endif
1206        int recover; 
1207
1208#ifndef XEN
1209        /* Get the MCA error record and log it */
1210        ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
1211#else
1212        ia64_log_queue(SAL_INFO_TYPE_MCA, VIRQ_MCA_CMC);
1213        send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);
1214#endif
1215
1216        /* TLB error is only exist in this SAL error record */
1217        recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
1218        /* other error recovery */
1219#ifndef XEN
1220           || (ia64_mca_ucmc_extension
1221                && ia64_mca_ucmc_extension(
1222                        IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
1223                        &ia64_sal_to_os_handoff_state,
1224                        &ia64_os_to_sal_handoff_state)); 
1225#else
1226        ;
1227#endif
1228
1229#ifndef XEN
1230        if (recover) {
1231                sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
1232                rh->severity = sal_log_severity_corrected;
1233                ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
1234        }
1235#endif
1236        /*
1237         *  Wakeup all the processors which are spinning in the rendezvous
1238         *  loop.
1239         */
1240        ia64_mca_wakeup_all();
1241
1242        /* Return to SAL */
1243        ia64_return_to_sal_check(recover);
1244}
1245
1246#ifndef XEN
1247static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
1248static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
1249#endif
1250
1251/*
1252 * ia64_mca_cmc_int_handler
1253 *
1254 *  This is corrected machine check interrupt handler.
1255 *      Right now the logs are extracted and displayed in a well-defined
1256 *      format.
1257 *
1258 * Inputs
1259 *      interrupt number
1260 *      client data arg ptr
1261 *      saved registers ptr
1262 *
1263 * Outputs
1264 *      None
1265 */
1266static irqreturn_t
1267ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
1268{
1269        static unsigned long    cmc_history[CMC_HISTORY_LENGTH];
1270        static int              index;
1271        static DEFINE_SPINLOCK(cmc_history_lock);
1272
1273        IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
1274                       __FUNCTION__, cmc_irq, smp_processor_id());
1275
1276        /* SAL spec states this should run w/ interrupts enabled */
1277        local_irq_enable();
1278
1279#ifndef XEN     
1280        /* Get the CMC error record and log it */
1281        ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
1282#else
1283        ia64_log_queue(SAL_INFO_TYPE_CMC, VIRQ_MCA_CMC);
1284        send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);
1285#endif
1286
1287        spin_lock(&cmc_history_lock);
1288        if (!cmc_polling_enabled) {
1289                int i, count = 1; /* we know 1 happened now */
1290                unsigned long now = jiffies;
1291
1292                for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
1293                        if (now - cmc_history[i] <= HZ)
1294                                count++;
1295                }
1296
1297                IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);
1298                if (count >= CMC_HISTORY_LENGTH) {
1299
1300                        cmc_polling_enabled = 1;
1301                        spin_unlock(&cmc_history_lock);
1302#ifndef XEN     /* XXX FIXME */
1303                        schedule_work(&cmc_disable_work);
1304#else
1305                        cpumask_raise_softirq(cpu_online_map,
1306                                              CMC_DISABLE_SOFTIRQ);
1307#endif
1308
1309                        /*
1310                         * Corrected errors will still be corrected, but
1311                         * make sure there's a log somewhere that indicates
1312                         * something is generating more than we can handle.
1313                         */
1314                        printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");
1315
1316                        mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
1317
1318                        /* lock already released, get out now */
1319                        return IRQ_HANDLED;
1320                } else {
1321                        cmc_history[index++] = now;
1322                        if (index == CMC_HISTORY_LENGTH)
1323                                index = 0;
1324                }
1325        }
1326        spin_unlock(&cmc_history_lock);
1327        return IRQ_HANDLED;
1328}
1329
1330/*
1331 *  ia64_mca_cmc_int_caller
1332 *
1333 *      Triggered by sw interrupt from CMC polling routine.  Calls
1334 *      real interrupt handler and either triggers a sw interrupt
1335 *      on the next cpu or does cleanup at the end.
1336 *
1337 * Inputs
1338 *      interrupt number
1339 *      client data arg ptr
1340 *      saved registers ptr
1341 * Outputs
1342 *      handled
1343 */
1344static irqreturn_t
1345ia64_mca_cmc_int_caller(int cmc_irq, void *arg, struct pt_regs *ptregs)
1346{
1347        static int start_count = -1;
1348        unsigned int cpuid;
1349
1350        cpuid = smp_processor_id();
1351
1352        /* If first cpu, update count */
1353        if (start_count == -1)
1354                start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
1355
1356#ifndef XEN
1357        ia64_mca_cmc_int_handler(cmc_irq, arg, ptregs);
1358#else
1359        IA64_MCA_DEBUG("%s: received polling vector = %#x on CPU %d\n",
1360                       __FUNCTION__, cmc_irq, smp_processor_id());
1361        ia64_log_queue(SAL_INFO_TYPE_CMC, VIRQ_MCA_CMC);
1362#endif
1363
1364        for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
1365
1366        if (cpuid < NR_CPUS) {
1367                platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
1368        } else {
1369                /* If no log record, switch out of polling mode */
1370                if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
1371
1372                        printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");
1373#ifndef XEN     /* XXX FIXME */
1374                        schedule_work(&cmc_enable_work);
1375#else
1376                        cpumask_raise_softirq(cpu_online_map,
1377                                              CMC_ENABLE_SOFTIRQ);
1378#endif
1379                        cmc_polling_enabled = 0;
1380
1381                } else {
1382
1383                        mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
1384                }
1385
1386                start_count = -1;
1387        }
1388        return IRQ_HANDLED;
1389}
1390
1391/*
1392 *  ia64_mca_cmc_poll
1393 *
1394 *      Poll for Corrected Machine Checks (CMCs)
1395 *
1396 * Inputs   :   dummy(unused)
1397 * Outputs  :   None
1398 *
1399 */
1400static void
1401#ifndef XEN
1402ia64_mca_cmc_poll (unsigned long dummy)
1403#else
1404ia64_mca_cmc_poll (void *dummy)
1405#endif
1406{
1407        /* Trigger a CMC interrupt cascade  */
1408        platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
1409}
1410
1411/*
1412 *  ia64_mca_cpe_int_caller
1413 *
1414 *      Triggered by sw interrupt from CPE polling routine.  Calls
1415 *      real interrupt handler and either triggers a sw interrupt
1416 *      on the next cpu or does cleanup at the end.
1417 *
1418 * Inputs
1419 *      interrupt number
1420 *      client data arg ptr
1421 *      saved registers ptr
1422 * Outputs
1423 *      handled
1424 */
1425#ifdef CONFIG_ACPI
1426
1427static irqreturn_t
1428ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
1429{
1430        static int start_count = -1;
1431#ifdef XEN
1432        static unsigned long poll_time = MIN_CPE_POLL_INTERVAL;
1433#else
1434        static int poll_time = MIN_CPE_POLL_INTERVAL;
1435#endif
1436        unsigned int cpuid;
1437
1438        cpuid = smp_processor_id();
1439
1440        /* If first cpu, update count */
1441        if (start_count == -1)
1442                start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
1443
1444#ifndef XEN
1445        ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs);
1446#else
1447        IA64_MCA_DEBUG("%s: received polling vector = %#x on CPU %d\n",
1448                       __FUNCTION__, cpe_irq, smp_processor_id());
1449        ia64_log_queue(SAL_INFO_TYPE_CPE, VIRQ_MCA_CPE);
1450#endif
1451
1452        for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
1453
1454        if (cpuid < NR_CPUS) {
1455                platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
1456        } else {
1457                /*
1458                 * If a log was recorded, increase our polling frequency,
1459                 * otherwise, backoff or return to interrupt mode.
1460                 */
1461                if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
1462                        poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
1463                } else if (cpe_vector < 0) {
1464                        poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
1465                } else {
1466                        poll_time = MIN_CPE_POLL_INTERVAL;
1467
1468                        printk(KERN_WARNING "Returning to interrupt driven CPE handler\n");
1469                        enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
1470                        cpe_poll_enabled = 0;
1471                }
1472
1473                if (cpe_poll_enabled)
1474                        mod_timer(&cpe_poll_timer, jiffies + poll_time);
1475                start_count = -1;
1476        }
1477        return IRQ_HANDLED;
1478}
1479
1480/*
1481 *  ia64_mca_cpe_poll
1482 *
1483 *      Poll for Corrected Platform Errors (CPEs), trigger interrupt
1484 *      on first cpu, from there it will trickle through all the cpus.
1485 *
1486 * Inputs   :   dummy(unused)
1487 * Outputs  :   None
1488 *
1489 */
1490static void
1491#ifndef XEN
1492ia64_mca_cpe_poll (unsigned long dummy)
1493#else
1494ia64_mca_cpe_poll (void *dummy)
1495#endif
1496{
1497        /* Trigger a CPE interrupt cascade  */
1498        platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
1499}
1500
1501#endif /* CONFIG_ACPI */
1502
1503/*
1504 * C portion of the OS INIT handler
1505 *
1506 * Called from ia64_monarch_init_handler
1507 *
1508 * Inputs: pointer to pt_regs where processor info was saved.
1509 *
1510 * Returns:
1511 *   0 if SAL must warm boot the System
1512 *   1 if SAL must return to interrupted context using PAL_MC_RESUME
1513 *
1514 */
1515void
1516ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw)
1517{
1518        pal_min_state_area_t *ms;
1519
1520#ifndef XEN
1521        oops_in_progress = 1;   /* avoid deadlock in printk, but it makes recovery dodgy */
1522        console_loglevel = 15;  /* make sure printks make it to console */
1523
1524        printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
1525                ia64_sal_to_os_handoff_state.proc_state_param);
1526
1527        /*
1528         * Address of minstate area provided by PAL is physical,
1529         * uncacheable (bit 63 set). Convert to Linux virtual
1530         * address in region 6.
1531         */
1532        ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61));
1533#else
1534        int cpu = smp_processor_id();
1535
1536        console_start_sync();
1537        printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
1538               ia64_sal_to_os_handoff_state[cpu].proc_state_param);
1539
1540        /* Xen virtual address in region 7. */
1541        ms = __va((pal_min_state_area_t *)(ia64_sal_to_os_handoff_state[cpu].pal_min_state));
1542#endif
1543
1544        init_handler_platform(ms, pt, sw);      /* call platform specific routines */
1545}
1546
1547static int __init
1548ia64_mca_disable_cpe_polling(char *str)
1549{
1550        cpe_poll_enabled = 0;
1551        return 1;
1552}
1553
1554__setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
1555
1556static struct irqaction cmci_irqaction = {
1557        .handler =      ia64_mca_cmc_int_handler,
1558#ifndef XEN
1559        .flags =        SA_INTERRUPT,
1560#endif
1561        .name =         "cmc_hndlr"
1562};
1563
1564static struct irqaction cmcp_irqaction = {
1565        .handler =      ia64_mca_cmc_int_caller,
1566#ifndef XEN
1567        .flags =        SA_INTERRUPT,
1568#endif
1569        .name =         "cmc_poll"
1570};
1571
1572static struct irqaction mca_rdzv_irqaction = {
1573        .handler =      ia64_mca_rendez_int_handler,
1574#ifndef XEN
1575        .flags =        SA_INTERRUPT,
1576#endif
1577        .name =         "mca_rdzv"
1578};
1579
1580static struct irqaction mca_wkup_irqaction = {
1581        .handler =      ia64_mca_wakeup_int_handler,
1582#ifndef XEN
1583        .flags =        SA_INTERRUPT,
1584#endif
1585        .name =         "mca_wkup"
1586};
1587
1588#ifdef CONFIG_ACPI
1589static struct irqaction mca_cpe_irqaction = {
1590        .handler =      ia64_mca_cpe_int_handler,
1591#ifndef XEN
1592        .flags =        SA_INTERRUPT,
1593#endif
1594        .name =         "cpe_hndlr"
1595};
1596
1597static struct irqaction mca_cpep_irqaction = {
1598        .handler =      ia64_mca_cpe_int_caller,
1599#ifndef XEN
1600        .flags =        SA_INTERRUPT,
1601#endif
1602        .name =         "cpe_poll"
1603};
1604#endif /* CONFIG_ACPI */
1605
1606/* Do per-CPU MCA-related initialization.  */
1607
1608void __devinit
1609ia64_mca_cpu_init(void *cpu_data)
1610{
1611        void *pal_vaddr;
1612
1613        if (smp_processor_id() == 0) {
1614                void *mca_data;
1615                int cpu;
1616
1617#ifdef XEN
1618                unsigned int pageorder;
1619                pageorder  = get_order_from_bytes(sizeof(struct ia64_mca_cpu));
1620#else
1621                mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu)
1622                                         * NR_CPUS);
1623#endif
1624                for (cpu = 0; cpu < NR_CPUS; cpu++) {
1625#ifdef XEN
1626                        mca_data = alloc_xenheap_pages(pageorder);
1627                        __per_cpu_mca[cpu] = __pa(mca_data);
1628                        IA64_MCA_DEBUG("%s: __per_cpu_mca[%d]=%lx"
1629                                       "(mca_data[%d]=%lx)\n",
1630                                       __FUNCTION__, cpu, __per_cpu_mca[cpu],
1631                                       cpu, (u64)mca_data);
1632#else
1633                        __per_cpu_mca[cpu] = __pa(mca_data);
1634                        mca_data += sizeof(struct ia64_mca_cpu);
1635#endif
1636                }
1637        }
1638#ifdef XEN
1639        else if (sal_queue) {
1640                int i;
1641                for (i = 0; i < IA64_MAX_LOG_TYPES; i++)
1642                        ia64_log_queue(i, 0);
1643        }
1644#endif
1645
1646        /*
1647         * The MCA info structure was allocated earlier and its
1648         * physical address saved in __per_cpu_mca[cpu].  Copy that
1649         * address * to ia64_mca_data so we can access it as a per-CPU
1650         * variable.
1651         */
1652        __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()];
1653#ifdef XEN
1654        IA64_MCA_DEBUG("%s: CPU#%d, ia64_mca_data=%lx\n", __FUNCTION__,
1655                       smp_processor_id(), __get_cpu_var(ia64_mca_data));
1656
1657        /* sal_to_os_handoff for smp support */
1658        __get_cpu_var(ia64_sal_to_os_handoff_state_addr) =
1659                      __pa(&ia64_sal_to_os_handoff_state[smp_processor_id()]);
1660        IA64_MCA_DEBUG("%s: CPU#%d, ia64_sal_to_os=%lx\n", __FUNCTION__,
1661                       smp_processor_id(),
1662                       __get_cpu_var(ia64_sal_to_os_handoff_state_addr));
1663#endif
1664
1665        /*
1666         * Stash away a copy of the PTE needed to map the per-CPU page.
1667         * We may need it during MCA recovery.
1668         */
1669        __get_cpu_var(ia64_mca_per_cpu_pte) =
1670                pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));
1671
1672        /*
1673         * Also, stash away a copy of the PAL address and the PTE
1674         * needed to map it.
1675         */
1676        pal_vaddr = efi_get_pal_addr();
1677        if (!pal_vaddr)
1678                return;
1679        __get_cpu_var(ia64_mca_pal_base) =
1680                GRANULEROUNDDOWN((unsigned long) pal_vaddr);
1681        __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr),
1682                                                              PAGE_KERNEL));
1683}
1684
1685/*
1686 * ia64_mca_init
1687 *
1688 *  Do all the system level mca specific initialization.
1689 *
1690 *      1. Register spinloop and wakeup request interrupt vectors
1691 *
1692 *      2. Register OS_MCA handler entry point
1693 *
1694 *      3. Register OS_INIT handler entry point
1695 *
1696 *  4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
1697 *
1698 *  Note that this initialization is done very early before some kernel
1699 *  services are available.
1700 *
1701 *  Inputs  :   None
1702 *
1703 *  Outputs :   None
1704 */
1705void __init
1706ia64_mca_init(void)
1707{
1708        ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
1709        ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler;
1710        ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
1711        int i;
1712        s64 rc;
1713        struct ia64_sal_retval isrv;
1714        u64 timeout = IA64_MCA_RENDEZ_TIMEOUT;  /* platform specific */
1715
1716#ifdef XEN
1717        slave_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
1718#endif
1719
1720        IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
1721
1722        /* Clear the Rendez checkin flag for all cpus */
1723        for(i = 0 ; i < NR_CPUS; i++)
1724                ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1725
1726        /*
1727         * Register the rendezvous spinloop and wakeup mechanism with SAL
1728         */
1729
1730        /* Register the rendezvous interrupt vector with SAL */
1731        while (1) {
1732                isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT,
1733                                              SAL_MC_PARAM_MECHANISM_INT,
1734                                              IA64_MCA_RENDEZ_VECTOR,
1735                                              timeout,
1736                                              SAL_MC_PARAM_RZ_ALWAYS);
1737                rc = isrv.status;
1738                if (rc == 0)
1739                        break;
1740                if (rc == -2) {
1741                        printk(KERN_INFO "Increasing MCA rendezvous timeout from "
1742                                "%ld to %ld milliseconds\n", timeout, isrv.v0);
1743                        timeout = isrv.v0;
1744                        continue;
1745                }
1746                printk(KERN_ERR "Failed to register rendezvous interrupt "
1747                       "with SAL (status %ld)\n", rc);
1748                return;
1749        }
1750
1751        /* Register the wakeup interrupt vector with SAL */
1752        isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP,
1753                                      SAL_MC_PARAM_MECHANISM_INT,
1754                                      IA64_MCA_WAKEUP_VECTOR,
1755                                      0, 0);
1756        rc = isrv.status;
1757        if (rc) {
1758                printk(KERN_ERR "Failed to register wakeup interrupt with SAL "
1759                       "(status %ld)\n", rc);
1760                return;
1761        }
1762
1763        IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__);
1764
1765        ia64_mc_info.imi_mca_handler        = ia64_tpa(mca_hldlr_ptr->fp);
1766        /*
1767         * XXX - disable SAL checksum by setting size to 0; should be
1768         *      ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
1769         */
1770        ia64_mc_info.imi_mca_handler_size       = 0;
1771
1772        /* Register the os mca handler with SAL */
1773        if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
1774                                       ia64_mc_info.imi_mca_handler,
1775                                       ia64_tpa(mca_hldlr_ptr->gp),
1776                                       ia64_mc_info.imi_mca_handler_size,
1777                                       0, 0, 0)))
1778        {
1779                printk(KERN_ERR "Failed to register OS MCA handler with SAL "
1780                       "(status %ld)\n", rc);
1781                return;
1782        }
1783
1784        IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__,
1785                       ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
1786
1787        /*
1788         * XXX - disable SAL checksum by setting size to 0, should be
1789         * size of the actual init handler in mca_asm.S.
1790         */
1791        ia64_mc_info.imi_monarch_init_handler           = ia64_tpa(mon_init_ptr->fp);
1792        ia64_mc_info.imi_monarch_init_handler_size      = 0;
1793        ia64_mc_info.imi_slave_init_handler             = ia64_tpa(slave_init_ptr->fp);
1794        ia64_mc_info.imi_slave_init_handler_size        = 0;
1795
1796        IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__,
1797                       ia64_mc_info.imi_monarch_init_handler);
1798
1799        /* Register the os init handler with SAL */
1800        if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
1801                                       ia64_mc_info.imi_monarch_init_handler,
1802                                       ia64_tpa(ia64_getreg(_IA64_REG_GP)),
1803                                       ia64_mc_info.imi_monarch_init_handler_size,
1804                                       ia64_mc_info.imi_slave_init_handler,
1805                                       ia64_tpa(ia64_getreg(_IA64_REG_GP)),
1806                                       ia64_mc_info.imi_slave_init_handler_size)))
1807        {
1808                printk(KERN_ERR "Failed to register m/s INIT handlers with SAL "
1809                       "(status %ld)\n", rc);
1810                return;
1811        }
1812
1813        IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__);
1814
1815        /*
1816         *  Configure the CMCI/P vector and handler. Interrupts for CMC are
1817         *  per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
1818         */
1819        register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
1820        register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
1821        ia64_mca_cmc_vector_setup();       /* Setup vector on BSP */
1822
1823        /* Setup the MCA rendezvous interrupt vector */
1824        register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
1825
1826        /* Setup the MCA wakeup interrupt vector */
1827        register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
1828
1829#ifdef CONFIG_ACPI
1830        /* Setup the CPEI/P handler */
1831        register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
1832#endif
1833
1834        /* Initialize the areas set aside by the OS to buffer the
1835         * platform/processor error states for MCA/INIT/CMC
1836         * handling.
1837         */
1838        ia64_log_init(SAL_INFO_TYPE_MCA);
1839        ia64_log_init(SAL_INFO_TYPE_INIT);
1840        ia64_log_init(SAL_INFO_TYPE_CMC);
1841        ia64_log_init(SAL_INFO_TYPE_CPE);
1842
1843#ifdef XEN
1844        INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_MCA]);
1845        INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_INIT]);
1846        INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_CMC]);
1847        INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_CPE]);
1848
1849        /* NULL sal_queue used elsewhere to determine MCA init state */
1850        sal_queue = sal_log_queues;
1851
1852        open_softirq(CMC_DISABLE_SOFTIRQ,
1853                     (softirq_handler)ia64_mca_cmc_vector_disable);
1854        open_softirq(CMC_ENABLE_SOFTIRQ,
1855                     (softirq_handler)ia64_mca_cmc_vector_enable);
1856
1857        for (i = 0; i < IA64_MAX_LOG_TYPES; i++)
1858                ia64_log_queue(i, 0);
1859#endif
1860
1861        mca_init = 1;
1862        printk(KERN_INFO "MCA related initialization done\n");
1863}
1864
1865/*
1866 * ia64_mca_late_init
1867 *
1868 *      Opportunity to setup things that require initialization later
1869 *      than ia64_mca_init.  Setup a timer to poll for CPEs if the
1870 *      platform doesn't support an interrupt driven mechanism.
1871 *
1872 *  Inputs  :   None
1873 *  Outputs :   Status
1874 */
1875static int __init
1876ia64_mca_late_init(void)
1877{
1878        if (!mca_init)
1879                return 0;
1880
1881        /* Setup the CMCI/P vector and handler */
1882#ifndef XEN
1883        init_timer(&cmc_poll_timer);
1884        cmc_poll_timer.function = ia64_mca_cmc_poll;
1885#else
1886        init_timer(&cmc_poll_timer, ia64_mca_cmc_poll,
1887                   NULL, smp_processor_id());
1888#endif
1889
1890        /* Unmask/enable the vector */
1891        cmc_polling_enabled = 0;
1892#ifndef XEN     /* XXX FIXME */
1893        schedule_work(&cmc_enable_work);
1894#else
1895        cpumask_raise_softirq(cpu_online_map, CMC_ENABLE_SOFTIRQ);
1896#endif
1897
1898        IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
1899
1900#ifdef CONFIG_ACPI
1901        /* Setup the CPEI/P vector and handler */
1902        cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
1903#ifndef XEN
1904        init_timer(&cpe_poll_timer);
1905        cpe_poll_timer.function = ia64_mca_cpe_poll;
1906#else
1907        init_timer(&cpe_poll_timer, ia64_mca_cpe_poll,
1908                   NULL,smp_processor_id());
1909#endif
1910
1911        {
1912                irq_desc_t *desc;
1913                unsigned int irq;
1914
1915                if (cpe_vector >= 0) {
1916                        /* If platform supports CPEI, enable the irq. */
1917                        cpe_poll_enabled = 0;
1918                        for (irq = 0; irq < NR_IRQS; ++irq)
1919                                if (irq_to_vector(irq) == cpe_vector) {
1920                                        desc = irq_descp(irq);
1921                                        desc->status |= IRQ_PER_CPU;
1922                                        setup_irq(irq, &mca_cpe_irqaction);
1923                                }
1924                        ia64_mca_register_cpev(cpe_vector);
1925                        IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__);
1926                } else {
1927                        /* If platform doesn't support CPEI, get the timer going. */
1928                        if (cpe_poll_enabled) {
1929                                ia64_mca_cpe_poll(0UL);
1930                                IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__);
1931                        }
1932                }
1933        }
1934#endif
1935
1936        return 0;
1937}
1938
1939#ifndef XEN
1940device_initcall(ia64_mca_late_init);
1941#else
1942__initcall(ia64_mca_late_init);
1943#endif
Note: See TracBrowser for help on using the repository browser.