Merge branch 'fix/hda' into for-linus
[pandora-kernel.git] / arch / ia64 / kernel / mca.c
1 /*
2  * File:        mca.c
3  * Purpose:     Generic MCA handling layer
4  *
5  * Copyright (C) 2003 Hewlett-Packard Co
6  *      David Mosberger-Tang <davidm@hpl.hp.com>
7  *
8  * Copyright (C) 2002 Dell Inc.
9  * Copyright (C) Matt Domsch <Matt_Domsch@dell.com>
10  *
11  * Copyright (C) 2002 Intel
12  * Copyright (C) Jenna Hall <jenna.s.hall@intel.com>
13  *
14  * Copyright (C) 2001 Intel
15  * Copyright (C) Fred Lewis <frederick.v.lewis@intel.com>
16  *
17  * Copyright (C) 2000 Intel
18  * Copyright (C) Chuck Fleckenstein <cfleck@co.intel.com>
19  *
20  * Copyright (C) 1999, 2004-2008 Silicon Graphics, Inc.
21  * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
22  *
23  * Copyright (C) 2006 FUJITSU LIMITED
24  * Copyright (C) Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
25  *
26  * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com>
27  *            Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
28  *            added min save state dump, added INIT handler.
29  *
30  * 2001-01-03 Fred Lewis <frederick.v.lewis@intel.com>
31  *            Added setup of CMCI and CPEI IRQs, logging of corrected platform
32  *            errors, completed code for logging of corrected & uncorrected
33  *            machine check errors, and updated for conformance with Nov. 2000
34  *            revision of the SAL 3.0 spec.
35  *
36  * 2002-01-04 Jenna Hall <jenna.s.hall@intel.com>
37  *            Aligned MCA stack to 16 bytes, added platform vs. CPU error flag,
38  *            set SAL default return values, changed error record structure to
39  *            linked list, added init call to sal_get_state_info_size().
40  *
41  * 2002-03-25 Matt Domsch <Matt_Domsch@dell.com>
42  *            GUID cleanups.
43  *
44  * 2003-04-15 David Mosberger-Tang <davidm@hpl.hp.com>
45  *            Added INIT backtrace support.
46  *
47  * 2003-12-08 Keith Owens <kaos@sgi.com>
48  *            smp_call_function() must not be called from interrupt context
49  *            (can deadlock on tasklist_lock).
50  *            Use keventd to call smp_call_function().
51  *
52  * 2004-02-01 Keith Owens <kaos@sgi.com>
53  *            Avoid deadlock when using printk() for MCA and INIT records.
54  *            Delete all record printing code, moved to salinfo_decode in user
55  *            space.  Mark variables and functions static where possible.
56  *            Delete dead variables and functions.  Reorder to remove the need
57  *            for forward declarations and to consolidate related code.
58  *
59  * 2005-08-12 Keith Owens <kaos@sgi.com>
60  *            Convert MCA/INIT handlers to use per event stacks and SAL/OS
61  *            state.
62  *
63  * 2005-10-07 Keith Owens <kaos@sgi.com>
64  *            Add notify_die() hooks.
65  *
66  * 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
67  *            Add printing support for MCA/INIT.
68  *
69  * 2007-04-27 Russ Anderson <rja@sgi.com>
70  *            Support multiple cpus going through OS_MCA in the same event.
71  */
72 #include <linux/jiffies.h>
73 #include <linux/types.h>
74 #include <linux/init.h>
75 #include <linux/sched.h>
76 #include <linux/interrupt.h>
77 #include <linux/irq.h>
78 #include <linux/bootmem.h>
79 #include <linux/acpi.h>
80 #include <linux/timer.h>
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/smp.h>
84 #include <linux/workqueue.h>
85 #include <linux/cpumask.h>
86 #include <linux/kdebug.h>
87 #include <linux/cpu.h>
88
89 #include <asm/delay.h>
90 #include <asm/machvec.h>
91 #include <asm/meminit.h>
92 #include <asm/page.h>
93 #include <asm/ptrace.h>
94 #include <asm/system.h>
95 #include <asm/sal.h>
96 #include <asm/mca.h>
97 #include <asm/kexec.h>
98
99 #include <asm/irq.h>
100 #include <asm/hw_irq.h>
101 #include <asm/tlb.h>
102
103 #include "mca_drv.h"
104 #include "entry.h"
105
106 #if defined(IA64_MCA_DEBUG_INFO)
107 # define IA64_MCA_DEBUG(fmt...) printk(fmt)
108 #else
109 # define IA64_MCA_DEBUG(fmt...)
110 #endif
111
112 #define NOTIFY_INIT(event, regs, arg, spin)                             \
113 do {                                                                    \
114         if ((notify_die((event), "INIT", (regs), (arg), 0, 0)           \
115                         == NOTIFY_STOP) && ((spin) == 1))               \
116                 ia64_mca_spin(__func__);                                \
117 } while (0)
118
119 #define NOTIFY_MCA(event, regs, arg, spin)                              \
120 do {                                                                    \
121         if ((notify_die((event), "MCA", (regs), (arg), 0, 0)            \
122                         == NOTIFY_STOP) && ((spin) == 1))               \
123                 ia64_mca_spin(__func__);                                \
124 } while (0)
125
126 /* Used by mca_asm.S */
127 DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
128 DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
129 DEFINE_PER_CPU(u64, ia64_mca_pal_pte);      /* PTE to map PAL code */
130 DEFINE_PER_CPU(u64, ia64_mca_pal_base);    /* vaddr PAL code granule */
131 DEFINE_PER_CPU(u64, ia64_mca_tr_reload);   /* Flag for TR reload */
132
133 unsigned long __per_cpu_mca[NR_CPUS];
134
135 /* In mca_asm.S */
136 extern void                     ia64_os_init_dispatch_monarch (void);
137 extern void                     ia64_os_init_dispatch_slave (void);
138
139 static int monarch_cpu = -1;
140
141 static ia64_mc_info_t           ia64_mc_info;
142
143 #define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
144 #define MIN_CPE_POLL_INTERVAL (2*60*HZ)  /* 2 minutes */
145 #define CMC_POLL_INTERVAL     (1*60*HZ)  /* 1 minute */
146 #define CPE_HISTORY_LENGTH    5
147 #define CMC_HISTORY_LENGTH    5
148
149 #ifdef CONFIG_ACPI
150 static struct timer_list cpe_poll_timer;
151 #endif
152 static struct timer_list cmc_poll_timer;
153 /*
154  * This variable tells whether we are currently in polling mode.
155  * Start with this in the wrong state so we won't play w/ timers
156  * before the system is ready.
157  */
158 static int cmc_polling_enabled = 1;
159
160 /*
161  * Clearing this variable prevents CPE polling from getting activated
162  * in mca_late_init.  Use it if your system doesn't provide a CPEI,
163  * but encounters problems retrieving CPE logs.  This should only be
164  * necessary for debugging.
165  */
166 static int cpe_poll_enabled = 1;
167
168 extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
169
170 static int mca_init __initdata;
171
172 /*
173  * limited & delayed printing support for MCA/INIT handler
174  */
175
176 #define mprintk(fmt...) ia64_mca_printk(fmt)
177
178 #define MLOGBUF_SIZE (512+256*NR_CPUS)
179 #define MLOGBUF_MSGMAX 256
180 static char mlogbuf[MLOGBUF_SIZE];
181 static DEFINE_SPINLOCK(mlogbuf_wlock);  /* mca context only */
182 static DEFINE_SPINLOCK(mlogbuf_rlock);  /* normal context only */
183 static unsigned long mlogbuf_start;
184 static unsigned long mlogbuf_end;
185 static unsigned int mlogbuf_finished = 0;
186 static unsigned long mlogbuf_timestamp = 0;
187
188 static int loglevel_save = -1;
189 #define BREAK_LOGLEVEL(__console_loglevel)              \
190         oops_in_progress = 1;                           \
191         if (loglevel_save < 0)                          \
192                 loglevel_save = __console_loglevel;     \
193         __console_loglevel = 15;
194
195 #define RESTORE_LOGLEVEL(__console_loglevel)            \
196         if (loglevel_save >= 0) {                       \
197                 __console_loglevel = loglevel_save;     \
198                 loglevel_save = -1;                     \
199         }                                               \
200         mlogbuf_finished = 0;                           \
201         oops_in_progress = 0;
202
203 /*
204  * Push messages into buffer, print them later if not urgent.
205  */
206 void ia64_mca_printk(const char *fmt, ...)
207 {
208         va_list args;
209         int printed_len;
210         char temp_buf[MLOGBUF_MSGMAX];
211         char *p;
212
213         va_start(args, fmt);
214         printed_len = vscnprintf(temp_buf, sizeof(temp_buf), fmt, args);
215         va_end(args);
216
217         /* Copy the output into mlogbuf */
218         if (oops_in_progress) {
219                 /* mlogbuf was abandoned, use printk directly instead. */
220                 printk(temp_buf);
221         } else {
222                 spin_lock(&mlogbuf_wlock);
223                 for (p = temp_buf; *p; p++) {
224                         unsigned long next = (mlogbuf_end + 1) % MLOGBUF_SIZE;
225                         if (next != mlogbuf_start) {
226                                 mlogbuf[mlogbuf_end] = *p;
227                                 mlogbuf_end = next;
228                         } else {
229                                 /* buffer full */
230                                 break;
231                         }
232                 }
233                 mlogbuf[mlogbuf_end] = '\0';
234                 spin_unlock(&mlogbuf_wlock);
235         }
236 }
237 EXPORT_SYMBOL(ia64_mca_printk);
238
239 /*
240  * Print buffered messages.
241  *  NOTE: call this after returning normal context. (ex. from salinfod)
242  */
243 void ia64_mlogbuf_dump(void)
244 {
245         char temp_buf[MLOGBUF_MSGMAX];
246         char *p;
247         unsigned long index;
248         unsigned long flags;
249         unsigned int printed_len;
250
251         /* Get output from mlogbuf */
252         while (mlogbuf_start != mlogbuf_end) {
253                 temp_buf[0] = '\0';
254                 p = temp_buf;
255                 printed_len = 0;
256
257                 spin_lock_irqsave(&mlogbuf_rlock, flags);
258
259                 index = mlogbuf_start;
260                 while (index != mlogbuf_end) {
261                         *p = mlogbuf[index];
262                         index = (index + 1) % MLOGBUF_SIZE;
263                         if (!*p)
264                                 break;
265                         p++;
266                         if (++printed_len >= MLOGBUF_MSGMAX - 1)
267                                 break;
268                 }
269                 *p = '\0';
270                 if (temp_buf[0])
271                         printk(temp_buf);
272                 mlogbuf_start = index;
273
274                 mlogbuf_timestamp = 0;
275                 spin_unlock_irqrestore(&mlogbuf_rlock, flags);
276         }
277 }
278 EXPORT_SYMBOL(ia64_mlogbuf_dump);
279
280 /*
281  * Call this if system is going to down or if immediate flushing messages to
282  * console is required. (ex. recovery was failed, crash dump is going to be
283  * invoked, long-wait rendezvous etc.)
284  *  NOTE: this should be called from monarch.
285  */
286 static void ia64_mlogbuf_finish(int wait)
287 {
288         BREAK_LOGLEVEL(console_loglevel);
289
290         spin_lock_init(&mlogbuf_rlock);
291         ia64_mlogbuf_dump();
292         printk(KERN_EMERG "mlogbuf_finish: printing switched to urgent mode, "
293                 "MCA/INIT might be dodgy or fail.\n");
294
295         if (!wait)
296                 return;
297
298         /* wait for console */
299         printk("Delaying for 5 seconds...\n");
300         udelay(5*1000000);
301
302         mlogbuf_finished = 1;
303 }
304
305 /*
306  * Print buffered messages from INIT context.
307  */
308 static void ia64_mlogbuf_dump_from_init(void)
309 {
310         if (mlogbuf_finished)
311                 return;
312
313         if (mlogbuf_timestamp &&
314                         time_before(jiffies, mlogbuf_timestamp + 30 * HZ)) {
315                 printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT "
316                         " and the system seems to be messed up.\n");
317                 ia64_mlogbuf_finish(0);
318                 return;
319         }
320
321         if (!spin_trylock(&mlogbuf_rlock)) {
322                 printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT. "
323                         "Generated messages other than stack dump will be "
324                         "buffered to mlogbuf and will be printed later.\n");
325                 printk(KERN_ERR "INIT: If messages would not printed after "
326                         "this INIT, wait 30sec and assert INIT again.\n");
327                 if (!mlogbuf_timestamp)
328                         mlogbuf_timestamp = jiffies;
329                 return;
330         }
331         spin_unlock(&mlogbuf_rlock);
332         ia64_mlogbuf_dump();
333 }
334
335 static void inline
336 ia64_mca_spin(const char *func)
337 {
338         if (monarch_cpu == smp_processor_id())
339                 ia64_mlogbuf_finish(0);
340         mprintk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func);
341         while (1)
342                 cpu_relax();
343 }
344 /*
345  * IA64_MCA log support
346  */
347 #define IA64_MAX_LOGS           2       /* Double-buffering for nested MCAs */
348 #define IA64_MAX_LOG_TYPES      4   /* MCA, INIT, CMC, CPE */
349
350 typedef struct ia64_state_log_s
351 {
352         spinlock_t      isl_lock;
353         int             isl_index;
354         unsigned long   isl_count;
355         ia64_err_rec_t  *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
356 } ia64_state_log_t;
357
358 static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
359
360 #define IA64_LOG_ALLOCATE(it, size) \
361         {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
362                 (ia64_err_rec_t *)alloc_bootmem(size); \
363         ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
364                 (ia64_err_rec_t *)alloc_bootmem(size);}
365 #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
366 #define IA64_LOG_LOCK(it)      spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
367 #define IA64_LOG_UNLOCK(it)    spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
368 #define IA64_LOG_NEXT_INDEX(it)    ia64_state_log[it].isl_index
369 #define IA64_LOG_CURR_INDEX(it)    1 - ia64_state_log[it].isl_index
370 #define IA64_LOG_INDEX_INC(it) \
371     {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
372     ia64_state_log[it].isl_count++;}
373 #define IA64_LOG_INDEX_DEC(it) \
374     ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
375 #define IA64_LOG_NEXT_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
376 #define IA64_LOG_CURR_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
377 #define IA64_LOG_COUNT(it)         ia64_state_log[it].isl_count
378
379 /*
380  * ia64_log_init
381  *      Reset the OS ia64 log buffer
382  * Inputs   :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
383  * Outputs      :       None
384  */
385 static void __init
386 ia64_log_init(int sal_info_type)
387 {
388         u64     max_size = 0;
389
390         IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
391         IA64_LOG_LOCK_INIT(sal_info_type);
392
393         // SAL will tell us the maximum size of any error record of this type
394         max_size = ia64_sal_get_state_info_size(sal_info_type);
395         if (!max_size)
396                 /* alloc_bootmem() doesn't like zero-sized allocations! */
397                 return;
398
399         // set up OS data structures to hold error info
400         IA64_LOG_ALLOCATE(sal_info_type, max_size);
401         memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
402         memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
403 }
404
405 /*
406  * ia64_log_get
407  *
408  *      Get the current MCA log from SAL and copy it into the OS log buffer.
409  *
410  *  Inputs  :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
411  *              irq_safe    whether you can use printk at this point
412  *  Outputs :   size        (total record length)
413  *              *buffer     (ptr to error record)
414  *
415  */
416 static u64
417 ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
418 {
419         sal_log_record_header_t     *log_buffer;
420         u64                         total_len = 0;
421         unsigned long               s;
422
423         IA64_LOG_LOCK(sal_info_type);
424
425         /* Get the process state information */
426         log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
427
428         total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
429
430         if (total_len) {
431                 IA64_LOG_INDEX_INC(sal_info_type);
432                 IA64_LOG_UNLOCK(sal_info_type);
433                 if (irq_safe) {
434                         IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. Record length = %ld\n",
435                                        __func__, sal_info_type, total_len);
436                 }
437                 *buffer = (u8 *) log_buffer;
438                 return total_len;
439         } else {
440                 IA64_LOG_UNLOCK(sal_info_type);
441                 return 0;
442         }
443 }
444
445 /*
446  *  ia64_mca_log_sal_error_record
447  *
448  *  This function retrieves a specified error record type from SAL
449  *  and wakes up any processes waiting for error records.
450  *
451  *  Inputs  :   sal_info_type   (Type of error record MCA/CMC/CPE)
452  *              FIXME: remove MCA and irq_safe.
453  */
454 static void
455 ia64_mca_log_sal_error_record(int sal_info_type)
456 {
457         u8 *buffer;
458         sal_log_record_header_t *rh;
459         u64 size;
460         int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA;
461 #ifdef IA64_MCA_DEBUG_INFO
462         static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
463 #endif
464
465         size = ia64_log_get(sal_info_type, &buffer, irq_safe);
466         if (!size)
467                 return;
468
469         salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
470
471         if (irq_safe)
472                 IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
473                         smp_processor_id(),
474                         sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");
475
476         /* Clear logs from corrected errors in case there's no user-level logger */
477         rh = (sal_log_record_header_t *)buffer;
478         if (rh->severity == sal_log_severity_corrected)
479                 ia64_sal_clear_state_info(sal_info_type);
480 }
481
482 /*
483  * search_mca_table
484  *  See if the MCA surfaced in an instruction range
485  *  that has been tagged as recoverable.
486  *
487  *  Inputs
488  *      first   First address range to check
489  *      last    Last address range to check
490  *      ip      Instruction pointer, address we are looking for
491  *
492  * Return value:
493  *      1 on Success (in the table)/ 0 on Failure (not in the  table)
494  */
495 int
496 search_mca_table (const struct mca_table_entry *first,
497                 const struct mca_table_entry *last,
498                 unsigned long ip)
499 {
500         const struct mca_table_entry *curr;
501         u64 curr_start, curr_end;
502
503         curr = first;
504         while (curr <= last) {
505                 curr_start = (u64) &curr->start_addr + curr->start_addr;
506                 curr_end = (u64) &curr->end_addr + curr->end_addr;
507
508                 if ((ip >= curr_start) && (ip <= curr_end)) {
509                         return 1;
510                 }
511                 curr++;
512         }
513         return 0;
514 }
515
516 /* Given an address, look for it in the mca tables. */
517 int mca_recover_range(unsigned long addr)
518 {
519         extern struct mca_table_entry __start___mca_table[];
520         extern struct mca_table_entry __stop___mca_table[];
521
522         return search_mca_table(__start___mca_table, __stop___mca_table-1, addr);
523 }
524 EXPORT_SYMBOL_GPL(mca_recover_range);
525
526 #ifdef CONFIG_ACPI
527
528 int cpe_vector = -1;
529 int ia64_cpe_irq = -1;
530
531 static irqreturn_t
532 ia64_mca_cpe_int_handler (int cpe_irq, void *arg)
533 {
534         static unsigned long    cpe_history[CPE_HISTORY_LENGTH];
535         static int              index;
536         static DEFINE_SPINLOCK(cpe_history_lock);
537
538         IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
539                        __func__, cpe_irq, smp_processor_id());
540
541         /* SAL spec states this should run w/ interrupts enabled */
542         local_irq_enable();
543
544         spin_lock(&cpe_history_lock);
545         if (!cpe_poll_enabled && cpe_vector >= 0) {
546
547                 int i, count = 1; /* we know 1 happened now */
548                 unsigned long now = jiffies;
549
550                 for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
551                         if (now - cpe_history[i] <= HZ)
552                                 count++;
553                 }
554
555                 IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);
556                 if (count >= CPE_HISTORY_LENGTH) {
557
558                         cpe_poll_enabled = 1;
559                         spin_unlock(&cpe_history_lock);
560                         disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));
561
562                         /*
563                          * Corrected errors will still be corrected, but
564                          * make sure there's a log somewhere that indicates
565                          * something is generating more than we can handle.
566                          */
567                         printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");
568
569                         mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
570
571                         /* lock already released, get out now */
572                         goto out;
573                 } else {
574                         cpe_history[index++] = now;
575                         if (index == CPE_HISTORY_LENGTH)
576                                 index = 0;
577                 }
578         }
579         spin_unlock(&cpe_history_lock);
580 out:
581         /* Get the CPE error record and log it */
582         ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
583
584         return IRQ_HANDLED;
585 }
586
587 #endif /* CONFIG_ACPI */
588
589 #ifdef CONFIG_ACPI
590 /*
591  * ia64_mca_register_cpev
592  *
593  *  Register the corrected platform error vector with SAL.
594  *
595  *  Inputs
596  *      cpev        Corrected Platform Error Vector number
597  *
598  *  Outputs
599  *      None
600  */
601 void
602 ia64_mca_register_cpev (int cpev)
603 {
604         /* Register the CPE interrupt vector with SAL */
605         struct ia64_sal_retval isrv;
606
607         isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
608         if (isrv.status) {
609                 printk(KERN_ERR "Failed to register Corrected Platform "
610                        "Error interrupt vector with SAL (status %ld)\n", isrv.status);
611                 return;
612         }
613
614         IA64_MCA_DEBUG("%s: corrected platform error "
615                        "vector %#x registered\n", __func__, cpev);
616 }
617 #endif /* CONFIG_ACPI */
618
619 /*
620  * ia64_mca_cmc_vector_setup
621  *
622  *  Setup the corrected machine check vector register in the processor.
623  *  (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
624  *  This function is invoked on a per-processor basis.
625  *
626  * Inputs
627  *      None
628  *
629  * Outputs
630  *      None
631  */
632 void __cpuinit
633 ia64_mca_cmc_vector_setup (void)
634 {
635         cmcv_reg_t      cmcv;
636
637         cmcv.cmcv_regval        = 0;
638         cmcv.cmcv_mask          = 1;        /* Mask/disable interrupt at first */
639         cmcv.cmcv_vector        = IA64_CMC_VECTOR;
640         ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
641
642         IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x registered.\n",
643                        __func__, smp_processor_id(), IA64_CMC_VECTOR);
644
645         IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
646                        __func__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
647 }
648
649 /*
650  * ia64_mca_cmc_vector_disable
651  *
652  *  Mask the corrected machine check vector register in the processor.
653  *  This function is invoked on a per-processor basis.
654  *
655  * Inputs
656  *      dummy(unused)
657  *
658  * Outputs
659  *      None
660  */
661 static void
662 ia64_mca_cmc_vector_disable (void *dummy)
663 {
664         cmcv_reg_t      cmcv;
665
666         cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
667
668         cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
669         ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
670
671         IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x disabled.\n",
672                        __func__, smp_processor_id(), cmcv.cmcv_vector);
673 }
674
675 /*
676  * ia64_mca_cmc_vector_enable
677  *
678  *  Unmask the corrected machine check vector register in the processor.
679  *  This function is invoked on a per-processor basis.
680  *
681  * Inputs
682  *      dummy(unused)
683  *
684  * Outputs
685  *      None
686  */
687 static void
688 ia64_mca_cmc_vector_enable (void *dummy)
689 {
690         cmcv_reg_t      cmcv;
691
692         cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
693
694         cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
695         ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
696
697         IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x enabled.\n",
698                        __func__, smp_processor_id(), cmcv.cmcv_vector);
699 }
700
701 /*
702  * ia64_mca_cmc_vector_disable_keventd
703  *
704  * Called via keventd (smp_call_function() is not safe in interrupt context) to
705  * disable the cmc interrupt vector.
706  */
707 static void
708 ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
709 {
710         on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0);
711 }
712
713 /*
714  * ia64_mca_cmc_vector_enable_keventd
715  *
716  * Called via keventd (smp_call_function() is not safe in interrupt context) to
717  * enable the cmc interrupt vector.
718  */
719 static void
720 ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
721 {
722         on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0);
723 }
724
725 /*
726  * ia64_mca_wakeup
727  *
728  *      Send an inter-cpu interrupt to wake-up a particular cpu.
729  *
730  *  Inputs  :   cpuid
731  *  Outputs :   None
732  */
733 static void
734 ia64_mca_wakeup(int cpu)
735 {
736         platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
737 }
738
739 /*
740  * ia64_mca_wakeup_all
741  *
742  *      Wakeup all the slave cpus which have rendez'ed previously.
743  *
744  *  Inputs  :   None
745  *  Outputs :   None
746  */
747 static void
748 ia64_mca_wakeup_all(void)
749 {
750         int cpu;
751
752         /* Clear the Rendez checkin flag for all cpus */
753         for_each_online_cpu(cpu) {
754                 if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
755                         ia64_mca_wakeup(cpu);
756         }
757
758 }
759
760 /*
761  * ia64_mca_rendez_interrupt_handler
762  *
763  *      This is handler used to put slave processors into spinloop
764  *      while the monarch processor does the mca handling and later
765  *      wake each slave up once the monarch is done.  The state
766  *      IA64_MCA_RENDEZ_CHECKIN_DONE indicates the cpu is rendez'ed
767  *      in SAL.  The state IA64_MCA_RENDEZ_CHECKIN_NOTDONE indicates
768  *      the cpu has come out of OS rendezvous.
769  *
770  *  Inputs  :   None
771  *  Outputs :   None
772  */
773 static irqreturn_t
774 ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
775 {
776         unsigned long flags;
777         int cpu = smp_processor_id();
778         struct ia64_mca_notify_die nd =
779                 { .sos = NULL, .monarch_cpu = &monarch_cpu };
780
781         /* Mask all interrupts */
782         local_irq_save(flags);
783
784         NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1);
785
786         ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
787         /* Register with the SAL monarch that the slave has
788          * reached SAL
789          */
790         ia64_sal_mc_rendez();
791
792         NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1);
793
794         /* Wait for the monarch cpu to exit. */
795         while (monarch_cpu != -1)
796                cpu_relax();     /* spin until monarch leaves */
797
798         NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1);
799
800         ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
801         /* Enable all interrupts */
802         local_irq_restore(flags);
803         return IRQ_HANDLED;
804 }
805
806 /*
807  * ia64_mca_wakeup_int_handler
808  *
809  *      The interrupt handler for processing the inter-cpu interrupt to the
810  *      slave cpu which was spinning in the rendez loop.
811  *      Since this spinning is done by turning off the interrupts and
812  *      polling on the wakeup-interrupt bit in the IRR, there is
813  *      nothing useful to be done in the handler.
814  *
815  *  Inputs  :   wakeup_irq  (Wakeup-interrupt bit)
816  *      arg             (Interrupt handler specific argument)
817  *  Outputs :   None
818  *
819  */
820 static irqreturn_t
821 ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg)
822 {
823         return IRQ_HANDLED;
824 }
825
826 /* Function pointer for extra MCA recovery */
827 int (*ia64_mca_ucmc_extension)
828         (void*,struct ia64_sal_os_state*)
829         = NULL;
830
831 int
832 ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *))
833 {
834         if (ia64_mca_ucmc_extension)
835                 return 1;
836
837         ia64_mca_ucmc_extension = fn;
838         return 0;
839 }
840
841 void
842 ia64_unreg_MCA_extension(void)
843 {
844         if (ia64_mca_ucmc_extension)
845                 ia64_mca_ucmc_extension = NULL;
846 }
847
848 EXPORT_SYMBOL(ia64_reg_MCA_extension);
849 EXPORT_SYMBOL(ia64_unreg_MCA_extension);
850
851
852 static inline void
853 copy_reg(const u64 *fr, u64 fnat, unsigned long *tr, unsigned long *tnat)
854 {
855         u64 fslot, tslot, nat;
856         *tr = *fr;
857         fslot = ((unsigned long)fr >> 3) & 63;
858         tslot = ((unsigned long)tr >> 3) & 63;
859         *tnat &= ~(1UL << tslot);
860         nat = (fnat >> fslot) & 1;
861         *tnat |= (nat << tslot);
862 }
863
864 /* Change the comm field on the MCA/INT task to include the pid that
865  * was interrupted, it makes for easier debugging.  If that pid was 0
866  * (swapper or nested MCA/INIT) then use the start of the previous comm
867  * field suffixed with its cpu.
868  */
869
870 static void
871 ia64_mca_modify_comm(const struct task_struct *previous_current)
872 {
873         char *p, comm[sizeof(current->comm)];
874         if (previous_current->pid)
875                 snprintf(comm, sizeof(comm), "%s %d",
876                         current->comm, previous_current->pid);
877         else {
878                 int l;
879                 if ((p = strchr(previous_current->comm, ' ')))
880                         l = p - previous_current->comm;
881                 else
882                         l = strlen(previous_current->comm);
883                 snprintf(comm, sizeof(comm), "%s %*s %d",
884                         current->comm, l, previous_current->comm,
885                         task_thread_info(previous_current)->cpu);
886         }
887         memcpy(current->comm, comm, sizeof(current->comm));
888 }
889
890 static void
891 finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos,
892                 unsigned long *nat)
893 {
894         const pal_min_state_area_t *ms = sos->pal_min_state;
895         const u64 *bank;
896
897         /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
898          * pmsa_{xip,xpsr,xfs}
899          */
900         if (ia64_psr(regs)->ic) {
901                 regs->cr_iip = ms->pmsa_iip;
902                 regs->cr_ipsr = ms->pmsa_ipsr;
903                 regs->cr_ifs = ms->pmsa_ifs;
904         } else {
905                 regs->cr_iip = ms->pmsa_xip;
906                 regs->cr_ipsr = ms->pmsa_xpsr;
907                 regs->cr_ifs = ms->pmsa_xfs;
908
909                 sos->iip = ms->pmsa_iip;
910                 sos->ipsr = ms->pmsa_ipsr;
911                 sos->ifs = ms->pmsa_ifs;
912         }
913         regs->pr = ms->pmsa_pr;
914         regs->b0 = ms->pmsa_br0;
915         regs->ar_rsc = ms->pmsa_rsc;
916         copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &regs->r1, nat);
917         copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &regs->r2, nat);
918         copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &regs->r3, nat);
919         copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &regs->r8, nat);
920         copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &regs->r9, nat);
921         copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &regs->r10, nat);
922         copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &regs->r11, nat);
923         copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &regs->r12, nat);
924         copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &regs->r13, nat);
925         copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &regs->r14, nat);
926         copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &regs->r15, nat);
927         if (ia64_psr(regs)->bn)
928                 bank = ms->pmsa_bank1_gr;
929         else
930                 bank = ms->pmsa_bank0_gr;
931         copy_reg(&bank[16-16], ms->pmsa_nat_bits, &regs->r16, nat);
932         copy_reg(&bank[17-16], ms->pmsa_nat_bits, &regs->r17, nat);
933         copy_reg(&bank[18-16], ms->pmsa_nat_bits, &regs->r18, nat);
934         copy_reg(&bank[19-16], ms->pmsa_nat_bits, &regs->r19, nat);
935         copy_reg(&bank[20-16], ms->pmsa_nat_bits, &regs->r20, nat);
936         copy_reg(&bank[21-16], ms->pmsa_nat_bits, &regs->r21, nat);
937         copy_reg(&bank[22-16], ms->pmsa_nat_bits, &regs->r22, nat);
938         copy_reg(&bank[23-16], ms->pmsa_nat_bits, &regs->r23, nat);
939         copy_reg(&bank[24-16], ms->pmsa_nat_bits, &regs->r24, nat);
940         copy_reg(&bank[25-16], ms->pmsa_nat_bits, &regs->r25, nat);
941         copy_reg(&bank[26-16], ms->pmsa_nat_bits, &regs->r26, nat);
942         copy_reg(&bank[27-16], ms->pmsa_nat_bits, &regs->r27, nat);
943         copy_reg(&bank[28-16], ms->pmsa_nat_bits, &regs->r28, nat);
944         copy_reg(&bank[29-16], ms->pmsa_nat_bits, &regs->r29, nat);
945         copy_reg(&bank[30-16], ms->pmsa_nat_bits, &regs->r30, nat);
946         copy_reg(&bank[31-16], ms->pmsa_nat_bits, &regs->r31, nat);
947 }
948
949 /* On entry to this routine, we are running on the per cpu stack, see
950  * mca_asm.h.  The original stack has not been touched by this event.  Some of
951  * the original stack's registers will be in the RBS on this stack.  This stack
952  * also contains a partial pt_regs and switch_stack, the rest of the data is in
953  * PAL minstate.
954  *
955  * The first thing to do is modify the original stack to look like a blocked
956  * task so we can run backtrace on the original task.  Also mark the per cpu
957  * stack as current to ensure that we use the correct task state, it also means
958  * that we can do backtrace on the MCA/INIT handler code itself.
959  */
960
961 static struct task_struct *
962 ia64_mca_modify_original_stack(struct pt_regs *regs,
963                 const struct switch_stack *sw,
964                 struct ia64_sal_os_state *sos,
965                 const char *type)
966 {
967         char *p;
968         ia64_va va;
969         extern char ia64_leave_kernel[];        /* Need asm address, not function descriptor */
970         const pal_min_state_area_t *ms = sos->pal_min_state;
971         struct task_struct *previous_current;
972         struct pt_regs *old_regs;
973         struct switch_stack *old_sw;
974         unsigned size = sizeof(struct pt_regs) +
975                         sizeof(struct switch_stack) + 16;
976         unsigned long *old_bspstore, *old_bsp;
977         unsigned long *new_bspstore, *new_bsp;
978         unsigned long old_unat, old_rnat, new_rnat, nat;
979         u64 slots, loadrs = regs->loadrs;
980         u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1];
981         u64 ar_bspstore = regs->ar_bspstore;
982         u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16);
983         const char *msg;
984         int cpu = smp_processor_id();
985
986         previous_current = curr_task(cpu);
987         set_curr_task(cpu, current);
988         if ((p = strchr(current->comm, ' ')))
989                 *p = '\0';
990
991         /* Best effort attempt to cope with MCA/INIT delivered while in
992          * physical mode.
993          */
994         regs->cr_ipsr = ms->pmsa_ipsr;
995         if (ia64_psr(regs)->dt == 0) {
996                 va.l = r12;
997                 if (va.f.reg == 0) {
998                         va.f.reg = 7;
999                         r12 = va.l;
1000                 }
1001                 va.l = r13;
1002                 if (va.f.reg == 0) {
1003                         va.f.reg = 7;
1004                         r13 = va.l;
1005                 }
1006         }
1007         if (ia64_psr(regs)->rt == 0) {
1008                 va.l = ar_bspstore;
1009                 if (va.f.reg == 0) {
1010                         va.f.reg = 7;
1011                         ar_bspstore = va.l;
1012                 }
1013                 va.l = ar_bsp;
1014                 if (va.f.reg == 0) {
1015                         va.f.reg = 7;
1016                         ar_bsp = va.l;
1017                 }
1018         }
1019
1020         /* mca_asm.S ia64_old_stack() cannot assume that the dirty registers
1021          * have been copied to the old stack, the old stack may fail the
1022          * validation tests below.  So ia64_old_stack() must restore the dirty
1023          * registers from the new stack.  The old and new bspstore probably
1024          * have different alignments, so loadrs calculated on the old bsp
1025          * cannot be used to restore from the new bsp.  Calculate a suitable
1026          * loadrs for the new stack and save it in the new pt_regs, where
1027          * ia64_old_stack() can get it.
1028          */
1029         old_bspstore = (unsigned long *)ar_bspstore;
1030         old_bsp = (unsigned long *)ar_bsp;
1031         slots = ia64_rse_num_regs(old_bspstore, old_bsp);
1032         new_bspstore = (unsigned long *)((u64)current + IA64_RBS_OFFSET);
1033         new_bsp = ia64_rse_skip_regs(new_bspstore, slots);
1034         regs->loadrs = (new_bsp - new_bspstore) * 8 << 16;
1035
1036         /* Verify the previous stack state before we change it */
1037         if (user_mode(regs)) {
1038                 msg = "occurred in user space";
1039                 /* previous_current is guaranteed to be valid when the task was
1040                  * in user space, so ...
1041                  */
1042                 ia64_mca_modify_comm(previous_current);
1043                 goto no_mod;
1044         }
1045
1046         if (r13 != sos->prev_IA64_KR_CURRENT) {
1047                 msg = "inconsistent previous current and r13";
1048                 goto no_mod;
1049         }
1050
1051         if (!mca_recover_range(ms->pmsa_iip)) {
1052                 if ((r12 - r13) >= KERNEL_STACK_SIZE) {
1053                         msg = "inconsistent r12 and r13";
1054                         goto no_mod;
1055                 }
1056                 if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {
1057                         msg = "inconsistent ar.bspstore and r13";
1058                         goto no_mod;
1059                 }
1060                 va.p = old_bspstore;
1061                 if (va.f.reg < 5) {
1062                         msg = "old_bspstore is in the wrong region";
1063                         goto no_mod;
1064                 }
1065                 if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {
1066                         msg = "inconsistent ar.bsp and r13";
1067                         goto no_mod;
1068                 }
1069                 size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
1070                 if (ar_bspstore + size > r12) {
1071                         msg = "no room for blocked state";
1072                         goto no_mod;
1073                 }
1074         }
1075
1076         ia64_mca_modify_comm(previous_current);
1077
1078         /* Make the original task look blocked.  First stack a struct pt_regs,
1079          * describing the state at the time of interrupt.  mca_asm.S built a
1080          * partial pt_regs, copy it and fill in the blanks using minstate.
1081          */
1082         p = (char *)r12 - sizeof(*regs);
1083         old_regs = (struct pt_regs *)p;
1084         memcpy(old_regs, regs, sizeof(*regs));
1085         old_regs->loadrs = loadrs;
1086         old_unat = old_regs->ar_unat;
1087         finish_pt_regs(old_regs, sos, &old_unat);
1088
1089         /* Next stack a struct switch_stack.  mca_asm.S built a partial
1090          * switch_stack, copy it and fill in the blanks using pt_regs and
1091          * minstate.
1092          *
1093          * In the synthesized switch_stack, b0 points to ia64_leave_kernel,
1094          * ar.pfs is set to 0.
1095          *
1096          * unwind.c::unw_unwind() does special processing for interrupt frames.
1097          * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate
1098          * is clear then unw_unwind() does _not_ adjust bsp over pt_regs.  Not
1099          * that this is documented, of course.  Set PRED_NON_SYSCALL in the
1100          * switch_stack on the original stack so it will unwind correctly when
1101          * unwind.c reads pt_regs.
1102          *
1103          * thread.ksp is updated to point to the synthesized switch_stack.
1104          */
1105         p -= sizeof(struct switch_stack);
1106         old_sw = (struct switch_stack *)p;
1107         memcpy(old_sw, sw, sizeof(*sw));
1108         old_sw->caller_unat = old_unat;
1109         old_sw->ar_fpsr = old_regs->ar_fpsr;
1110         copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat);
1111         copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat);
1112         copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat);
1113         copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat);
1114         old_sw->b0 = (u64)ia64_leave_kernel;
1115         old_sw->b1 = ms->pmsa_br1;
1116         old_sw->ar_pfs = 0;
1117         old_sw->ar_unat = old_unat;
1118         old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL);
1119         previous_current->thread.ksp = (u64)p - 16;
1120
1121         /* Finally copy the original stack's registers back to its RBS.
1122          * Registers from ar.bspstore through ar.bsp at the time of the event
1123          * are in the current RBS, copy them back to the original stack.  The
1124          * copy must be done register by register because the original bspstore
1125          * and the current one have different alignments, so the saved RNAT
1126          * data occurs at different places.
1127          *
1128          * mca_asm does cover, so the old_bsp already includes all registers at
1129          * the time of MCA/INIT.  It also does flushrs, so all registers before
1130          * this function have been written to backing store on the MCA/INIT
1131          * stack.
1132          */
1133         new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore));
1134         old_rnat = regs->ar_rnat;
1135         while (slots--) {
1136                 if (ia64_rse_is_rnat_slot(new_bspstore)) {
1137                         new_rnat = ia64_get_rnat(new_bspstore++);
1138                 }
1139                 if (ia64_rse_is_rnat_slot(old_bspstore)) {
1140                         *old_bspstore++ = old_rnat;
1141                         old_rnat = 0;
1142                 }
1143                 nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL;
1144                 old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore));
1145                 old_rnat |= (nat << ia64_rse_slot_num(old_bspstore));
1146                 *old_bspstore++ = *new_bspstore++;
1147         }
1148         old_sw->ar_bspstore = (unsigned long)old_bspstore;
1149         old_sw->ar_rnat = old_rnat;
1150
1151         sos->prev_task = previous_current;
1152         return previous_current;
1153
1154 no_mod:
1155         mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
1156                         smp_processor_id(), type, msg);
1157         old_unat = regs->ar_unat;
1158         finish_pt_regs(regs, sos, &old_unat);
1159         return previous_current;
1160 }
1161
1162 /* The monarch/slave interaction is based on monarch_cpu and requires that all
1163  * slaves have entered rendezvous before the monarch leaves.  If any cpu has
1164  * not entered rendezvous yet then wait a bit.  The assumption is that any
1165  * slave that has not rendezvoused after a reasonable time is never going to do
1166  * so.  In this context, slave includes cpus that respond to the MCA rendezvous
1167  * interrupt, as well as cpus that receive the INIT slave event.
1168  */
1169
1170 static void
1171 ia64_wait_for_slaves(int monarch, const char *type)
1172 {
1173         int c, i , wait;
1174
1175         /*
1176          * wait 5 seconds total for slaves (arbitrary)
1177          */
1178         for (i = 0; i < 5000; i++) {
1179                 wait = 0;
1180                 for_each_online_cpu(c) {
1181                         if (c == monarch)
1182                                 continue;
1183                         if (ia64_mc_info.imi_rendez_checkin[c]
1184                                         == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
1185                                 udelay(1000);           /* short wait */
1186                                 wait = 1;
1187                                 break;
1188                         }
1189                 }
1190                 if (!wait)
1191                         goto all_in;
1192         }
1193
1194         /*
1195          * Maybe slave(s) dead. Print buffered messages immediately.
1196          */
1197         ia64_mlogbuf_finish(0);
1198         mprintk(KERN_INFO "OS %s slave did not rendezvous on cpu", type);
1199         for_each_online_cpu(c) {
1200                 if (c == monarch)
1201                         continue;
1202                 if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE)
1203                         mprintk(" %d", c);
1204         }
1205         mprintk("\n");
1206         return;
1207
1208 all_in:
1209         mprintk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type);
1210         return;
1211 }
1212
1213 /*  mca_insert_tr
1214  *
1215  *  Switch rid when TR reload and needed!
1216  *  iord: 1: itr, 2: itr;
1217  *
1218 */
1219 static void mca_insert_tr(u64 iord)
1220 {
1221
1222         int i;
1223         u64 old_rr;
1224         struct ia64_tr_entry *p;
1225         unsigned long psr;
1226         int cpu = smp_processor_id();
1227
1228         psr = ia64_clear_ic();
1229         for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
1230                 p = &__per_cpu_idtrs[cpu][iord-1][i];
1231                 if (p->pte & 0x1) {
1232                         old_rr = ia64_get_rr(p->ifa);
1233                         if (old_rr != p->rr) {
1234                                 ia64_set_rr(p->ifa, p->rr);
1235                                 ia64_srlz_d();
1236                         }
1237                         ia64_ptr(iord, p->ifa, p->itir >> 2);
1238                         ia64_srlz_i();
1239                         if (iord & 0x1) {
1240                                 ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2);
1241                                 ia64_srlz_i();
1242                         }
1243                         if (iord & 0x2) {
1244                                 ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2);
1245                                 ia64_srlz_i();
1246                         }
1247                         if (old_rr != p->rr) {
1248                                 ia64_set_rr(p->ifa, old_rr);
1249                                 ia64_srlz_d();
1250                         }
1251                 }
1252         }
1253         ia64_set_psr(psr);
1254 }
1255
1256 /*
1257  * ia64_mca_handler
1258  *
1259  *      This is uncorrectable machine check handler called from OS_MCA
1260  *      dispatch code which is in turn called from SAL_CHECK().
1261  *      This is the place where the core of OS MCA handling is done.
1262  *      Right now the logs are extracted and displayed in a well-defined
1263  *      format. This handler code is supposed to be run only on the
1264  *      monarch processor. Once the monarch is done with MCA handling
1265  *      further MCA logging is enabled by clearing logs.
1266  *      Monarch also has the duty of sending wakeup-IPIs to pull the
1267  *      slave processors out of rendezvous spinloop.
1268  *
1269  *      If multiple processors call into OS_MCA, the first will become
1270  *      the monarch.  Subsequent cpus will be recorded in the mca_cpu
1271  *      bitmask.  After the first monarch has processed its MCA, it
1272  *      will wake up the next cpu in the mca_cpu bitmask and then go
1273  *      into the rendezvous loop.  When all processors have serviced
1274  *      their MCA, the last monarch frees up the rest of the processors.
1275  */
1276 void
1277 ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1278                  struct ia64_sal_os_state *sos)
1279 {
1280         int recover, cpu = smp_processor_id();
1281         struct task_struct *previous_current;
1282         struct ia64_mca_notify_die nd =
1283                 { .sos = sos, .monarch_cpu = &monarch_cpu, .data = &recover };
1284         static atomic_t mca_count;
1285         static cpumask_t mca_cpu;
1286
1287         if (atomic_add_return(1, &mca_count) == 1) {
1288                 monarch_cpu = cpu;
1289                 sos->monarch = 1;
1290         } else {
1291                 cpu_set(cpu, mca_cpu);
1292                 sos->monarch = 0;
1293         }
1294         mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d "
1295                 "monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch);
1296
1297         previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
1298
1299         NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1);
1300
1301         ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
1302         if (sos->monarch) {
1303                 ia64_wait_for_slaves(cpu, "MCA");
1304
1305                 /* Wakeup all the processors which are spinning in the
1306                  * rendezvous loop.  They will leave SAL, then spin in the OS
1307                  * with interrupts disabled until this monarch cpu leaves the
1308                  * MCA handler.  That gets control back to the OS so we can
1309                  * backtrace the other cpus, backtrace when spinning in SAL
1310                  * does not work.
1311                  */
1312                 ia64_mca_wakeup_all();
1313         } else {
1314                 while (cpu_isset(cpu, mca_cpu))
1315                         cpu_relax();    /* spin until monarch wakes us */
1316         }
1317
1318         NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1);
1319
1320         /* Get the MCA error record and log it */
1321         ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
1322
1323         /* MCA error recovery */
1324         recover = (ia64_mca_ucmc_extension
1325                 && ia64_mca_ucmc_extension(
1326                         IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
1327                         sos));
1328
1329         if (recover) {
1330                 sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
1331                 rh->severity = sal_log_severity_corrected;
1332                 ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
1333                 sos->os_status = IA64_MCA_CORRECTED;
1334         } else {
1335                 /* Dump buffered message to console */
1336                 ia64_mlogbuf_finish(1);
1337         }
1338
1339         if (__get_cpu_var(ia64_mca_tr_reload)) {
1340                 mca_insert_tr(0x1); /*Reload dynamic itrs*/
1341                 mca_insert_tr(0x2); /*Reload dynamic itrs*/
1342         }
1343
1344         NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1);
1345
1346         if (atomic_dec_return(&mca_count) > 0) {
1347                 int i;
1348
1349                 /* wake up the next monarch cpu,
1350                  * and put this cpu in the rendez loop.
1351                  */
1352                 for_each_online_cpu(i) {
1353                         if (cpu_isset(i, mca_cpu)) {
1354                                 monarch_cpu = i;
1355                                 cpu_clear(i, mca_cpu);  /* wake next cpu */
1356                                 while (monarch_cpu != -1)
1357                                         cpu_relax();    /* spin until last cpu leaves */
1358                                 set_curr_task(cpu, previous_current);
1359                                 ia64_mc_info.imi_rendez_checkin[cpu]
1360                                                 = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1361                                 return;
1362                         }
1363                 }
1364         }
1365         set_curr_task(cpu, previous_current);
1366         ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1367         monarch_cpu = -1;       /* This frees the slaves and previous monarchs */
1368 }
1369
1370 static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
1371 static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
1372
1373 /*
1374  * ia64_mca_cmc_int_handler
1375  *
1376  *  This is corrected machine check interrupt handler.
1377  *      Right now the logs are extracted and displayed in a well-defined
1378  *      format.
1379  *
1380  * Inputs
1381  *      interrupt number
1382  *      client data arg ptr
1383  *
1384  * Outputs
1385  *      None
1386  */
1387 static irqreturn_t
1388 ia64_mca_cmc_int_handler(int cmc_irq, void *arg)
1389 {
1390         static unsigned long    cmc_history[CMC_HISTORY_LENGTH];
1391         static int              index;
1392         static DEFINE_SPINLOCK(cmc_history_lock);
1393
1394         IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
1395                        __func__, cmc_irq, smp_processor_id());
1396
1397         /* SAL spec states this should run w/ interrupts enabled */
1398         local_irq_enable();
1399
1400         spin_lock(&cmc_history_lock);
1401         if (!cmc_polling_enabled) {
1402                 int i, count = 1; /* we know 1 happened now */
1403                 unsigned long now = jiffies;
1404
1405                 for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
1406                         if (now - cmc_history[i] <= HZ)
1407                                 count++;
1408                 }
1409
1410                 IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);
1411                 if (count >= CMC_HISTORY_LENGTH) {
1412
1413                         cmc_polling_enabled = 1;
1414                         spin_unlock(&cmc_history_lock);
1415                         /* If we're being hit with CMC interrupts, we won't
1416                          * ever execute the schedule_work() below.  Need to
1417                          * disable CMC interrupts on this processor now.
1418                          */
1419                         ia64_mca_cmc_vector_disable(NULL);
1420                         schedule_work(&cmc_disable_work);
1421
1422                         /*
1423                          * Corrected errors will still be corrected, but
1424                          * make sure there's a log somewhere that indicates
1425                          * something is generating more than we can handle.
1426                          */
1427                         printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");
1428
1429                         mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
1430
1431                         /* lock already released, get out now */
1432                         goto out;
1433                 } else {
1434                         cmc_history[index++] = now;
1435                         if (index == CMC_HISTORY_LENGTH)
1436                                 index = 0;
1437                 }
1438         }
1439         spin_unlock(&cmc_history_lock);
1440 out:
1441         /* Get the CMC error record and log it */
1442         ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
1443
1444         return IRQ_HANDLED;
1445 }
1446
1447 /*
1448  *  ia64_mca_cmc_int_caller
1449  *
1450  *      Triggered by sw interrupt from CMC polling routine.  Calls
1451  *      real interrupt handler and either triggers a sw interrupt
1452  *      on the next cpu or does cleanup at the end.
1453  *
1454  * Inputs
1455  *      interrupt number
1456  *      client data arg ptr
1457  * Outputs
1458  *      handled
1459  */
1460 static irqreturn_t
1461 ia64_mca_cmc_int_caller(int cmc_irq, void *arg)
1462 {
1463         static int start_count = -1;
1464         unsigned int cpuid;
1465
1466         cpuid = smp_processor_id();
1467
1468         /* If first cpu, update count */
1469         if (start_count == -1)
1470                 start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
1471
1472         ia64_mca_cmc_int_handler(cmc_irq, arg);
1473
1474         cpuid = cpumask_next(cpuid+1, cpu_online_mask);
1475
1476         if (cpuid < nr_cpu_ids) {
1477                 platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
1478         } else {
1479                 /* If no log record, switch out of polling mode */
1480                 if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
1481
1482                         printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");
1483                         schedule_work(&cmc_enable_work);
1484                         cmc_polling_enabled = 0;
1485
1486                 } else {
1487
1488                         mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
1489                 }
1490
1491                 start_count = -1;
1492         }
1493
1494         return IRQ_HANDLED;
1495 }
1496
1497 /*
1498  *  ia64_mca_cmc_poll
1499  *
1500  *      Poll for Corrected Machine Checks (CMCs)
1501  *
1502  * Inputs   :   dummy(unused)
1503  * Outputs  :   None
1504  *
1505  */
1506 static void
1507 ia64_mca_cmc_poll (unsigned long dummy)
1508 {
1509         /* Trigger a CMC interrupt cascade  */
1510         platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
1511 }
1512
1513 /*
1514  *  ia64_mca_cpe_int_caller
1515  *
1516  *      Triggered by sw interrupt from CPE polling routine.  Calls
1517  *      real interrupt handler and either triggers a sw interrupt
1518  *      on the next cpu or does cleanup at the end.
1519  *
1520  * Inputs
1521  *      interrupt number
1522  *      client data arg ptr
1523  * Outputs
1524  *      handled
1525  */
1526 #ifdef CONFIG_ACPI
1527
1528 static irqreturn_t
1529 ia64_mca_cpe_int_caller(int cpe_irq, void *arg)
1530 {
1531         static int start_count = -1;
1532         static int poll_time = MIN_CPE_POLL_INTERVAL;
1533         unsigned int cpuid;
1534
1535         cpuid = smp_processor_id();
1536
1537         /* If first cpu, update count */
1538         if (start_count == -1)
1539                 start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
1540
1541         ia64_mca_cpe_int_handler(cpe_irq, arg);
1542
1543         cpuid = cpumask_next(cpuid+1, cpu_online_mask);
1544
1545         if (cpuid < NR_CPUS) {
1546                 platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
1547         } else {
1548                 /*
1549                  * If a log was recorded, increase our polling frequency,
1550                  * otherwise, backoff or return to interrupt mode.
1551                  */
1552                 if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
1553                         poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
1554                 } else if (cpe_vector < 0) {
1555                         poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
1556                 } else {
1557                         poll_time = MIN_CPE_POLL_INTERVAL;
1558
1559                         printk(KERN_WARNING "Returning to interrupt driven CPE handler\n");
1560                         enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
1561                         cpe_poll_enabled = 0;
1562                 }
1563
1564                 if (cpe_poll_enabled)
1565                         mod_timer(&cpe_poll_timer, jiffies + poll_time);
1566                 start_count = -1;
1567         }
1568
1569         return IRQ_HANDLED;
1570 }
1571
1572 /*
1573  *  ia64_mca_cpe_poll
1574  *
1575  *      Poll for Corrected Platform Errors (CPEs), trigger interrupt
1576  *      on first cpu, from there it will trickle through all the cpus.
1577  *
1578  * Inputs   :   dummy(unused)
1579  * Outputs  :   None
1580  *
1581  */
1582 static void
1583 ia64_mca_cpe_poll (unsigned long dummy)
1584 {
1585         /* Trigger a CPE interrupt cascade  */
1586         platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
1587 }
1588
1589 #endif /* CONFIG_ACPI */
1590
1591 static int
1592 default_monarch_init_process(struct notifier_block *self, unsigned long val, void *data)
1593 {
1594         int c;
1595         struct task_struct *g, *t;
1596         if (val != DIE_INIT_MONARCH_PROCESS)
1597                 return NOTIFY_DONE;
1598 #ifdef CONFIG_KEXEC
1599         if (atomic_read(&kdump_in_progress))
1600                 return NOTIFY_DONE;
1601 #endif
1602
1603         /*
1604          * FIXME: mlogbuf will brim over with INIT stack dumps.
1605          * To enable show_stack from INIT, we use oops_in_progress which should
1606          * be used in real oops. This would cause something wrong after INIT.
1607          */
1608         BREAK_LOGLEVEL(console_loglevel);
1609         ia64_mlogbuf_dump_from_init();
1610
1611         printk(KERN_ERR "Processes interrupted by INIT -");
1612         for_each_online_cpu(c) {
1613                 struct ia64_sal_os_state *s;
1614                 t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET);
1615                 s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET);
1616                 g = s->prev_task;
1617                 if (g) {
1618                         if (g->pid)
1619                                 printk(" %d", g->pid);
1620                         else
1621                                 printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g);
1622                 }
1623         }
1624         printk("\n\n");
1625         if (read_trylock(&tasklist_lock)) {
1626                 do_each_thread (g, t) {
1627                         printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
1628                         show_stack(t, NULL);
1629                 } while_each_thread (g, t);
1630                 read_unlock(&tasklist_lock);
1631         }
1632         /* FIXME: This will not restore zapped printk locks. */
1633         RESTORE_LOGLEVEL(console_loglevel);
1634         return NOTIFY_DONE;
1635 }
1636
1637 /*
1638  * C portion of the OS INIT handler
1639  *
1640  * Called from ia64_os_init_dispatch
1641  *
1642  * Inputs: pointer to pt_regs where processor info was saved.  SAL/OS state for
1643  * this event.  This code is used for both monarch and slave INIT events, see
1644  * sos->monarch.
1645  *
1646  * All INIT events switch to the INIT stack and change the previous process to
1647  * blocked status.  If one of the INIT events is the monarch then we are
1648  * probably processing the nmi button/command.  Use the monarch cpu to dump all
1649  * the processes.  The slave INIT events all spin until the monarch cpu
1650  * returns.  We can also get INIT slave events for MCA, in which case the MCA
1651  * process is the monarch.
1652  */
1653
1654 void
1655 ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1656                   struct ia64_sal_os_state *sos)
1657 {
1658         static atomic_t slaves;
1659         static atomic_t monarchs;
1660         struct task_struct *previous_current;
1661         int cpu = smp_processor_id();
1662         struct ia64_mca_notify_die nd =
1663                 { .sos = sos, .monarch_cpu = &monarch_cpu };
1664
1665         NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0);
1666
1667         mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
1668                 sos->proc_state_param, cpu, sos->monarch);
1669         salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0);
1670
1671         previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT");
1672         sos->os_status = IA64_INIT_RESUME;
1673
1674         /* FIXME: Workaround for broken proms that drive all INIT events as
1675          * slaves.  The last slave that enters is promoted to be a monarch.
1676          * Remove this code in September 2006, that gives platforms a year to
1677          * fix their proms and get their customers updated.
1678          */
1679         if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) {
1680                 mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n",
1681                         __func__, cpu);
1682                 atomic_dec(&slaves);
1683                 sos->monarch = 1;
1684         }
1685
1686         /* FIXME: Workaround for broken proms that drive all INIT events as
1687          * monarchs.  Second and subsequent monarchs are demoted to slaves.
1688          * Remove this code in September 2006, that gives platforms a year to
1689          * fix their proms and get their customers updated.
1690          */
1691         if (sos->monarch && atomic_add_return(1, &monarchs) > 1) {
1692                 mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n",
1693                                __func__, cpu);
1694                 atomic_dec(&monarchs);
1695                 sos->monarch = 0;
1696         }
1697
1698         if (!sos->monarch) {
1699                 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
1700
1701 #ifdef CONFIG_KEXEC
1702                 while (monarch_cpu == -1 && !atomic_read(&kdump_in_progress))
1703                         udelay(1000);
1704 #else
1705                 while (monarch_cpu == -1)
1706                         cpu_relax();    /* spin until monarch enters */
1707 #endif
1708
1709                 NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
1710                 NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);
1711
1712 #ifdef CONFIG_KEXEC
1713                 while (monarch_cpu != -1 && !atomic_read(&kdump_in_progress))
1714                         udelay(1000);
1715 #else
1716                 while (monarch_cpu != -1)
1717                         cpu_relax();    /* spin until monarch leaves */
1718 #endif
1719
1720                 NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);
1721
1722                 mprintk("Slave on cpu %d returning to normal service.\n", cpu);
1723                 set_curr_task(cpu, previous_current);
1724                 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1725                 atomic_dec(&slaves);
1726                 return;
1727         }
1728
1729         monarch_cpu = cpu;
1730         NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1);
1731
1732         /*
1733          * Wait for a bit.  On some machines (e.g., HP's zx2000 and zx6000, INIT can be
1734          * generated via the BMC's command-line interface, but since the console is on the
1735          * same serial line, the user will need some time to switch out of the BMC before
1736          * the dump begins.
1737          */
1738         mprintk("Delaying for 5 seconds...\n");
1739         udelay(5*1000000);
1740         ia64_wait_for_slaves(cpu, "INIT");
1741         /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through
1742          * to default_monarch_init_process() above and just print all the
1743          * tasks.
1744          */
1745         NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1);
1746         NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1);
1747
1748         mprintk("\nINIT dump complete.  Monarch on cpu %d returning to normal service.\n", cpu);
1749         atomic_dec(&monarchs);
1750         set_curr_task(cpu, previous_current);
1751         monarch_cpu = -1;
1752         return;
1753 }
1754
1755 static int __init
1756 ia64_mca_disable_cpe_polling(char *str)
1757 {
1758         cpe_poll_enabled = 0;
1759         return 1;
1760 }
1761
1762 __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
1763
1764 static struct irqaction cmci_irqaction = {
1765         .handler =      ia64_mca_cmc_int_handler,
1766         .flags =        IRQF_DISABLED,
1767         .name =         "cmc_hndlr"
1768 };
1769
1770 static struct irqaction cmcp_irqaction = {
1771         .handler =      ia64_mca_cmc_int_caller,
1772         .flags =        IRQF_DISABLED,
1773         .name =         "cmc_poll"
1774 };
1775
1776 static struct irqaction mca_rdzv_irqaction = {
1777         .handler =      ia64_mca_rendez_int_handler,
1778         .flags =        IRQF_DISABLED,
1779         .name =         "mca_rdzv"
1780 };
1781
1782 static struct irqaction mca_wkup_irqaction = {
1783         .handler =      ia64_mca_wakeup_int_handler,
1784         .flags =        IRQF_DISABLED,
1785         .name =         "mca_wkup"
1786 };
1787
1788 #ifdef CONFIG_ACPI
1789 static struct irqaction mca_cpe_irqaction = {
1790         .handler =      ia64_mca_cpe_int_handler,
1791         .flags =        IRQF_DISABLED,
1792         .name =         "cpe_hndlr"
1793 };
1794
1795 static struct irqaction mca_cpep_irqaction = {
1796         .handler =      ia64_mca_cpe_int_caller,
1797         .flags =        IRQF_DISABLED,
1798         .name =         "cpe_poll"
1799 };
1800 #endif /* CONFIG_ACPI */
1801
1802 /* Minimal format of the MCA/INIT stacks.  The pseudo processes that run on
1803  * these stacks can never sleep, they cannot return from the kernel to user
1804  * space, they do not appear in a normal ps listing.  So there is no need to
1805  * format most of the fields.
1806  */
1807
1808 static void __cpuinit
1809 format_mca_init_stack(void *mca_data, unsigned long offset,
1810                 const char *type, int cpu)
1811 {
1812         struct task_struct *p = (struct task_struct *)((char *)mca_data + offset);
1813         struct thread_info *ti;
1814         memset(p, 0, KERNEL_STACK_SIZE);
1815         ti = task_thread_info(p);
1816         ti->flags = _TIF_MCA_INIT;
1817         ti->preempt_count = 1;
1818         ti->task = p;
1819         ti->cpu = cpu;
1820         p->stack = ti;
1821         p->state = TASK_UNINTERRUPTIBLE;
1822         cpu_set(cpu, p->cpus_allowed);
1823         INIT_LIST_HEAD(&p->tasks);
1824         p->parent = p->real_parent = p->group_leader = p;
1825         INIT_LIST_HEAD(&p->children);
1826         INIT_LIST_HEAD(&p->sibling);
1827         strncpy(p->comm, type, sizeof(p->comm)-1);
1828 }
1829
1830 /* Caller prevents this from being called after init */
1831 static void * __init_refok mca_bootmem(void)
1832 {
1833         return __alloc_bootmem(sizeof(struct ia64_mca_cpu),
1834                             KERNEL_STACK_SIZE, 0);
1835 }
1836
1837 /* Do per-CPU MCA-related initialization.  */
1838 void __cpuinit
1839 ia64_mca_cpu_init(void *cpu_data)
1840 {
1841         void *pal_vaddr;
1842         void *data;
1843         long sz = sizeof(struct ia64_mca_cpu);
1844         int cpu = smp_processor_id();
1845         static int first_time = 1;
1846
1847         /*
1848          * Structure will already be allocated if cpu has been online,
1849          * then offlined.
1850          */
1851         if (__per_cpu_mca[cpu]) {
1852                 data = __va(__per_cpu_mca[cpu]);
1853         } else {
1854                 if (first_time) {
1855                         data = mca_bootmem();
1856                         first_time = 0;
1857                 } else
1858                         data = __get_free_pages(GFP_KERNEL, get_order(sz));
1859                 if (!data)
1860                         panic("Could not allocate MCA memory for cpu %d\n",
1861                                         cpu);
1862         }
1863         format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, mca_stack),
1864                 "MCA", cpu);
1865         format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack),
1866                 "INIT", cpu);
1867         __get_cpu_var(ia64_mca_data) = __per_cpu_mca[cpu] = __pa(data);
1868
1869         /*
1870          * Stash away a copy of the PTE needed to map the per-CPU page.
1871          * We may need it during MCA recovery.
1872          */
1873         __get_cpu_var(ia64_mca_per_cpu_pte) =
1874                 pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));
1875
1876         /*
1877          * Also, stash away a copy of the PAL address and the PTE
1878          * needed to map it.
1879          */
1880         pal_vaddr = efi_get_pal_addr();
1881         if (!pal_vaddr)
1882                 return;
1883         __get_cpu_var(ia64_mca_pal_base) =
1884                 GRANULEROUNDDOWN((unsigned long) pal_vaddr);
1885         __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr),
1886                                                               PAGE_KERNEL));
1887 }
1888
1889 static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy)
1890 {
1891         unsigned long flags;
1892
1893         local_irq_save(flags);
1894         if (!cmc_polling_enabled)
1895                 ia64_mca_cmc_vector_enable(NULL);
1896         local_irq_restore(flags);
1897 }
1898
1899 static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
1900                                       unsigned long action,
1901                                       void *hcpu)
1902 {
1903         int hotcpu = (unsigned long) hcpu;
1904
1905         switch (action) {
1906         case CPU_ONLINE:
1907         case CPU_ONLINE_FROZEN:
1908                 smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust,
1909                                          NULL, 0);
1910                 break;
1911         }
1912         return NOTIFY_OK;
1913 }
1914
1915 static struct notifier_block mca_cpu_notifier __cpuinitdata = {
1916         .notifier_call = mca_cpu_callback
1917 };
1918
1919 /*
1920  * ia64_mca_init
1921  *
1922  *  Do all the system level mca specific initialization.
1923  *
1924  *      1. Register spinloop and wakeup request interrupt vectors
1925  *
1926  *      2. Register OS_MCA handler entry point
1927  *
1928  *      3. Register OS_INIT handler entry point
1929  *
1930  *  4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
1931  *
1932  *  Note that this initialization is done very early before some kernel
1933  *  services are available.
1934  *
1935  *  Inputs  :   None
1936  *
1937  *  Outputs :   None
1938  */
1939 void __init
1940 ia64_mca_init(void)
1941 {
1942         ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch;
1943         ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave;
1944         ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
1945         int i;
1946         long rc;
1947         struct ia64_sal_retval isrv;
1948         unsigned long timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */
1949         static struct notifier_block default_init_monarch_nb = {
1950                 .notifier_call = default_monarch_init_process,
1951                 .priority = 0/* we need to notified last */
1952         };
1953
1954         IA64_MCA_DEBUG("%s: begin\n", __func__);
1955
1956         /* Clear the Rendez checkin flag for all cpus */
1957         for(i = 0 ; i < NR_CPUS; i++)
1958                 ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1959
1960         /*
1961          * Register the rendezvous spinloop and wakeup mechanism with SAL
1962          */
1963
1964         /* Register the rendezvous interrupt vector with SAL */
1965         while (1) {
1966                 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT,
1967                                               SAL_MC_PARAM_MECHANISM_INT,
1968                                               IA64_MCA_RENDEZ_VECTOR,
1969                                               timeout,
1970                                               SAL_MC_PARAM_RZ_ALWAYS);
1971                 rc = isrv.status;
1972                 if (rc == 0)
1973                         break;
1974                 if (rc == -2) {
1975                         printk(KERN_INFO "Increasing MCA rendezvous timeout from "
1976                                 "%ld to %ld milliseconds\n", timeout, isrv.v0);
1977                         timeout = isrv.v0;
1978                         NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0);
1979                         continue;
1980                 }
1981                 printk(KERN_ERR "Failed to register rendezvous interrupt "
1982                        "with SAL (status %ld)\n", rc);
1983                 return;
1984         }
1985
1986         /* Register the wakeup interrupt vector with SAL */
1987         isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP,
1988                                       SAL_MC_PARAM_MECHANISM_INT,
1989                                       IA64_MCA_WAKEUP_VECTOR,
1990                                       0, 0);
1991         rc = isrv.status;
1992         if (rc) {
1993                 printk(KERN_ERR "Failed to register wakeup interrupt with SAL "
1994                        "(status %ld)\n", rc);
1995                 return;
1996         }
1997
1998         IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __func__);
1999
2000         ia64_mc_info.imi_mca_handler        = ia64_tpa(mca_hldlr_ptr->fp);
2001         /*
2002          * XXX - disable SAL checksum by setting size to 0; should be
2003          *      ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
2004          */
2005         ia64_mc_info.imi_mca_handler_size       = 0;
2006
2007         /* Register the os mca handler with SAL */
2008         if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
2009                                        ia64_mc_info.imi_mca_handler,
2010                                        ia64_tpa(mca_hldlr_ptr->gp),
2011                                        ia64_mc_info.imi_mca_handler_size,
2012                                        0, 0, 0)))
2013         {
2014                 printk(KERN_ERR "Failed to register OS MCA handler with SAL "
2015                        "(status %ld)\n", rc);
2016                 return;
2017         }
2018
2019         IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __func__,
2020                        ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
2021
2022         /*
2023          * XXX - disable SAL checksum by setting size to 0, should be
2024          * size of the actual init handler in mca_asm.S.
2025          */
2026         ia64_mc_info.imi_monarch_init_handler           = ia64_tpa(init_hldlr_ptr_monarch->fp);
2027         ia64_mc_info.imi_monarch_init_handler_size      = 0;
2028         ia64_mc_info.imi_slave_init_handler             = ia64_tpa(init_hldlr_ptr_slave->fp);
2029         ia64_mc_info.imi_slave_init_handler_size        = 0;
2030
2031         IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __func__,
2032                        ia64_mc_info.imi_monarch_init_handler);
2033
2034         /* Register the os init handler with SAL */
2035         if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
2036                                        ia64_mc_info.imi_monarch_init_handler,
2037                                        ia64_tpa(ia64_getreg(_IA64_REG_GP)),
2038                                        ia64_mc_info.imi_monarch_init_handler_size,
2039                                        ia64_mc_info.imi_slave_init_handler,
2040                                        ia64_tpa(ia64_getreg(_IA64_REG_GP)),
2041                                        ia64_mc_info.imi_slave_init_handler_size)))
2042         {
2043                 printk(KERN_ERR "Failed to register m/s INIT handlers with SAL "
2044                        "(status %ld)\n", rc);
2045                 return;
2046         }
2047         if (register_die_notifier(&default_init_monarch_nb)) {
2048                 printk(KERN_ERR "Failed to register default monarch INIT process\n");
2049                 return;
2050         }
2051
2052         IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__);
2053
2054         /*
2055          *  Configure the CMCI/P vector and handler. Interrupts for CMC are
2056          *  per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
2057          */
2058         register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
2059         register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
2060         ia64_mca_cmc_vector_setup();       /* Setup vector on BSP */
2061
2062         /* Setup the MCA rendezvous interrupt vector */
2063         register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
2064
2065         /* Setup the MCA wakeup interrupt vector */
2066         register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
2067
2068 #ifdef CONFIG_ACPI
2069         /* Setup the CPEI/P handler */
2070         register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
2071 #endif
2072
2073         /* Initialize the areas set aside by the OS to buffer the
2074          * platform/processor error states for MCA/INIT/CMC
2075          * handling.
2076          */
2077         ia64_log_init(SAL_INFO_TYPE_MCA);
2078         ia64_log_init(SAL_INFO_TYPE_INIT);
2079         ia64_log_init(SAL_INFO_TYPE_CMC);
2080         ia64_log_init(SAL_INFO_TYPE_CPE);
2081
2082         mca_init = 1;
2083         printk(KERN_INFO "MCA related initialization done\n");
2084 }
2085
2086 /*
2087  * ia64_mca_late_init
2088  *
2089  *      Opportunity to setup things that require initialization later
2090  *      than ia64_mca_init.  Setup a timer to poll for CPEs if the
2091  *      platform doesn't support an interrupt driven mechanism.
2092  *
2093  *  Inputs  :   None
2094  *  Outputs :   Status
2095  */
2096 static int __init
2097 ia64_mca_late_init(void)
2098 {
2099         if (!mca_init)
2100                 return 0;
2101
2102         register_hotcpu_notifier(&mca_cpu_notifier);
2103
2104         /* Setup the CMCI/P vector and handler */
2105         init_timer(&cmc_poll_timer);
2106         cmc_poll_timer.function = ia64_mca_cmc_poll;
2107
2108         /* Unmask/enable the vector */
2109         cmc_polling_enabled = 0;
2110         schedule_work(&cmc_enable_work);
2111
2112         IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__);
2113
2114 #ifdef CONFIG_ACPI
2115         /* Setup the CPEI/P vector and handler */
2116         cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
2117         init_timer(&cpe_poll_timer);
2118         cpe_poll_timer.function = ia64_mca_cpe_poll;
2119
2120         {
2121                 struct irq_desc *desc;
2122                 unsigned int irq;
2123
2124                 if (cpe_vector >= 0) {
2125                         /* If platform supports CPEI, enable the irq. */
2126                         irq = local_vector_to_irq(cpe_vector);
2127                         if (irq > 0) {
2128                                 cpe_poll_enabled = 0;
2129                                 desc = irq_desc + irq;
2130                                 desc->status |= IRQ_PER_CPU;
2131                                 setup_irq(irq, &mca_cpe_irqaction);
2132                                 ia64_cpe_irq = irq;
2133                                 ia64_mca_register_cpev(cpe_vector);
2134                                 IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n",
2135                                         __func__);
2136                                 return 0;
2137                         }
2138                         printk(KERN_ERR "%s: Failed to find irq for CPE "
2139                                         "interrupt handler, vector %d\n",
2140                                         __func__, cpe_vector);
2141                 }
2142                 /* If platform doesn't support CPEI, get the timer going. */
2143                 if (cpe_poll_enabled) {
2144                         ia64_mca_cpe_poll(0UL);
2145                         IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__);
2146                 }
2147         }
2148 #endif
2149
2150         return 0;
2151 }
2152
2153 device_initcall(ia64_mca_late_init);