1 /* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/traps.c
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
9 * I like traps on v9, :))))
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/sched.h> /* for jiffies */
15 #include <linux/kernel.h>
16 #include <linux/kallsyms.h>
17 #include <linux/signal.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
21 #include <linux/init.h>
23 #include <asm/delay.h>
24 #include <asm/system.h>
25 #include <asm/ptrace.h>
26 #include <asm/oplib.h>
28 #include <asm/pgtable.h>
29 #include <asm/unistd.h>
30 #include <asm/uaccess.h>
31 #include <asm/fpumacro.h>
34 #include <asm/estate.h>
35 #include <asm/chafsr.h>
36 #include <asm/sfafsr.h>
37 #include <asm/psrcompat.h>
38 #include <asm/processor.h>
39 #include <asm/timer.h>
40 #include <asm/kdebug.h>
43 #include <linux/kmod.h>
46 struct notifier_block *sparc64die_chain;
47 static DEFINE_SPINLOCK(die_notifier_lock);
49 int register_die_notifier(struct notifier_block *nb)
53 spin_lock_irqsave(&die_notifier_lock, flags);
54 err = notifier_chain_register(&sparc64die_chain, nb);
55 spin_unlock_irqrestore(&die_notifier_lock, flags);
59 /* When an irrecoverable trap occurs at tl > 0, the trap entry
60 * code logs the trap state registers at every level in the trap
61 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
74 static void dump_tl1_traplog(struct tl1_traplog *p)
78 printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
80 for (i = 0; i < 4; i++) {
82 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
83 "TNPC[%016lx] TT[%lx]\n",
85 p->trapstack[i].tstate, p->trapstack[i].tpc,
86 p->trapstack[i].tnpc, p->trapstack[i].tt);
90 void do_call_debug(struct pt_regs *regs)
92 notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
95 void bad_trap(struct pt_regs *regs, long lvl)
100 if (notify_die(DIE_TRAP, "bad trap", regs,
101 0, lvl, SIGTRAP) == NOTIFY_STOP)
105 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
106 die_if_kernel(buffer, regs);
110 if (regs->tstate & TSTATE_PRIV) {
111 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
112 die_if_kernel(buffer, regs);
114 if (test_thread_flag(TIF_32BIT)) {
115 regs->tpc &= 0xffffffff;
116 regs->tnpc &= 0xffffffff;
118 info.si_signo = SIGILL;
120 info.si_code = ILL_ILLTRP;
121 info.si_addr = (void __user *)regs->tpc;
122 info.si_trapno = lvl;
123 force_sig_info(SIGILL, &info, current);
126 void bad_trap_tl1(struct pt_regs *regs, long lvl)
130 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
131 0, lvl, SIGTRAP) == NOTIFY_STOP)
134 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
136 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
137 die_if_kernel (buffer, regs);
140 #ifdef CONFIG_DEBUG_BUGVERBOSE
141 void do_BUG(const char *file, int line)
144 printk("kernel BUG at %s:%d!\n", file, line);
148 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
152 if (notify_die(DIE_TRAP, "instruction access exception", regs,
153 0, 0x8, SIGTRAP) == NOTIFY_STOP)
156 if (regs->tstate & TSTATE_PRIV) {
157 printk("spitfire_insn_access_exception: SFSR[%016lx] "
158 "SFAR[%016lx], going.\n", sfsr, sfar);
159 die_if_kernel("Iax", regs);
161 if (test_thread_flag(TIF_32BIT)) {
162 regs->tpc &= 0xffffffff;
163 regs->tnpc &= 0xffffffff;
165 info.si_signo = SIGSEGV;
167 info.si_code = SEGV_MAPERR;
168 info.si_addr = (void __user *)regs->tpc;
170 force_sig_info(SIGSEGV, &info, current);
173 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
175 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
176 0, 0x8, SIGTRAP) == NOTIFY_STOP)
179 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
180 spitfire_insn_access_exception(regs, sfsr, sfar);
183 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
187 if (notify_die(DIE_TRAP, "data access exception", regs,
188 0, 0x30, SIGTRAP) == NOTIFY_STOP)
191 if (regs->tstate & TSTATE_PRIV) {
192 /* Test if this comes from uaccess places. */
193 const struct exception_table_entry *entry;
195 entry = search_exception_tables(regs->tpc);
197 /* Ouch, somebody is trying VM hole tricks on us... */
198 #ifdef DEBUG_EXCEPTIONS
199 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
200 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
201 regs->tpc, entry->fixup);
203 regs->tpc = entry->fixup;
204 regs->tnpc = regs->tpc + 4;
208 printk("spitfire_data_access_exception: SFSR[%016lx] "
209 "SFAR[%016lx], going.\n", sfsr, sfar);
210 die_if_kernel("Dax", regs);
213 info.si_signo = SIGSEGV;
215 info.si_code = SEGV_MAPERR;
216 info.si_addr = (void __user *)sfar;
218 force_sig_info(SIGSEGV, &info, current);
221 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
223 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
224 0, 0x30, SIGTRAP) == NOTIFY_STOP)
227 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
228 spitfire_data_access_exception(regs, sfsr, sfar);
232 /* This is really pathetic... */
233 extern volatile int pci_poke_in_progress;
234 extern volatile int pci_poke_cpu;
235 extern volatile int pci_poke_faulted;
238 /* When access exceptions happen, we must do this. */
239 static void spitfire_clean_and_reenable_l1_caches(void)
243 if (tlb_type != spitfire)
247 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
248 spitfire_put_icache_tag(va, 0x0);
249 spitfire_put_dcache_tag(va, 0x0);
252 /* Re-enable in LSU. */
253 __asm__ __volatile__("flush %%g6\n\t"
255 "stxa %0, [%%g0] %1\n\t"
258 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
259 LSU_CONTROL_IM | LSU_CONTROL_DM),
260 "i" (ASI_LSU_CONTROL)
264 static void spitfire_enable_estate_errors(void)
266 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
269 : "r" (ESTATE_ERR_ALL),
270 "i" (ASI_ESTATE_ERROR_EN));
273 static char ecc_syndrome_table[] = {
274 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
275 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
276 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
277 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
278 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
279 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
280 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
281 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
282 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
283 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
284 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
285 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
286 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
287 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
288 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
289 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
290 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
291 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
292 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
293 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
294 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
295 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
296 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
297 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
298 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
299 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
300 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
301 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
302 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
303 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
304 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
305 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
308 static char *syndrome_unknown = "<Unknown>";
310 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
312 unsigned short scode;
313 char memmod_str[64], *p;
316 scode = ecc_syndrome_table[udbl & 0xff];
317 if (prom_getunumber(scode, afar,
318 memmod_str, sizeof(memmod_str)) == -1)
319 p = syndrome_unknown;
322 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
323 "Memory Module \"%s\"\n",
324 smp_processor_id(), scode, p);
328 scode = ecc_syndrome_table[udbh & 0xff];
329 if (prom_getunumber(scode, afar,
330 memmod_str, sizeof(memmod_str)) == -1)
331 p = syndrome_unknown;
334 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
335 "Memory Module \"%s\"\n",
336 smp_processor_id(), scode, p);
341 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
344 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
345 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
346 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
348 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
350 /* We always log it, even if someone is listening for this
353 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
354 0, TRAP_TYPE_CEE, SIGTRAP);
356 /* The Correctable ECC Error trap does not disable I/D caches. So
357 * we only have to restore the ESTATE Error Enable register.
359 spitfire_enable_estate_errors();
362 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
366 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
367 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
368 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
370 /* XXX add more human friendly logging of the error status
371 * XXX as is implemented for cheetah
374 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
376 /* We always log it, even if someone is listening for this
379 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
382 if (regs->tstate & TSTATE_PRIV) {
384 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
385 die_if_kernel("UE", regs);
388 /* XXX need more intelligent processing here, such as is implemented
389 * XXX for cheetah errors, in fact if the E-cache still holds the
390 * XXX line with bad parity this will loop
393 spitfire_clean_and_reenable_l1_caches();
394 spitfire_enable_estate_errors();
396 if (test_thread_flag(TIF_32BIT)) {
397 regs->tpc &= 0xffffffff;
398 regs->tnpc &= 0xffffffff;
400 info.si_signo = SIGBUS;
402 info.si_code = BUS_OBJERR;
403 info.si_addr = (void *)0;
405 force_sig_info(SIGBUS, &info, current);
408 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
410 unsigned long afsr, tt, udbh, udbl;
413 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
414 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
415 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
416 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
417 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
420 if (tt == TRAP_TYPE_DAE &&
421 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
422 spitfire_clean_and_reenable_l1_caches();
423 spitfire_enable_estate_errors();
425 pci_poke_faulted = 1;
426 regs->tnpc = regs->tpc + 4;
431 if (afsr & SFAFSR_UE)
432 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
434 if (tt == TRAP_TYPE_CEE) {
435 /* Handle the case where we took a CEE trap, but ACK'd
436 * only the UE state in the UDB error registers.
438 if (afsr & SFAFSR_UE) {
439 if (udbh & UDBE_CE) {
440 __asm__ __volatile__(
441 "stxa %0, [%1] %2\n\t"
444 : "r" (udbh & UDBE_CE),
445 "r" (0x0), "i" (ASI_UDB_ERROR_W));
447 if (udbl & UDBE_CE) {
448 __asm__ __volatile__(
449 "stxa %0, [%1] %2\n\t"
452 : "r" (udbl & UDBE_CE),
453 "r" (0x18), "i" (ASI_UDB_ERROR_W));
457 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
461 int cheetah_pcache_forced_on;
463 void cheetah_enable_pcache(void)
467 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
470 __asm__ __volatile__("ldxa [%%g0] %1, %0"
472 : "i" (ASI_DCU_CONTROL_REG));
473 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
474 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
477 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
480 /* Cheetah error trap handling. */
481 static unsigned long ecache_flush_physbase;
482 static unsigned long ecache_flush_linesize;
483 static unsigned long ecache_flush_size;
485 /* WARNING: The error trap handlers in assembly know the precise
486 * layout of the following structure.
488 * C-level handlers below use this information to log the error
489 * and then determine how to recover (if possible).
491 struct cheetah_err_info {
496 /*0x10*/u64 dcache_data[4]; /* The actual data */
497 /*0x30*/u64 dcache_index; /* D-cache index */
498 /*0x38*/u64 dcache_tag; /* D-cache tag/valid */
499 /*0x40*/u64 dcache_utag; /* D-cache microtag */
500 /*0x48*/u64 dcache_stag; /* D-cache snooptag */
503 /*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
504 /*0x90*/u64 icache_index; /* I-cache index */
505 /*0x98*/u64 icache_tag; /* I-cache phys tag */
506 /*0xa0*/u64 icache_utag; /* I-cache microtag */
507 /*0xa8*/u64 icache_stag; /* I-cache snooptag */
508 /*0xb0*/u64 icache_upper; /* I-cache upper-tag */
509 /*0xb8*/u64 icache_lower; /* I-cache lower-tag */
512 /*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
513 /*0xe0*/u64 ecache_index; /* E-cache index */
514 /*0xe8*/u64 ecache_tag; /* E-cache tag/state */
516 /*0xf0*/u64 __pad[32 - 30];
518 #define CHAFSR_INVALID ((u64)-1L)
520 /* This table is ordered in priority of errors and matches the
521 * AFAR overwrite policy as well.
524 struct afsr_error_table {
529 static const char CHAFSR_PERR_msg[] =
530 "System interface protocol error";
531 static const char CHAFSR_IERR_msg[] =
532 "Internal processor error";
533 static const char CHAFSR_ISAP_msg[] =
534 "System request parity error on incoming addresss";
535 static const char CHAFSR_UCU_msg[] =
536 "Uncorrectable E-cache ECC error for ifetch/data";
537 static const char CHAFSR_UCC_msg[] =
538 "SW Correctable E-cache ECC error for ifetch/data";
539 static const char CHAFSR_UE_msg[] =
540 "Uncorrectable system bus data ECC error for read";
541 static const char CHAFSR_EDU_msg[] =
542 "Uncorrectable E-cache ECC error for stmerge/blkld";
543 static const char CHAFSR_EMU_msg[] =
544 "Uncorrectable system bus MTAG error";
545 static const char CHAFSR_WDU_msg[] =
546 "Uncorrectable E-cache ECC error for writeback";
547 static const char CHAFSR_CPU_msg[] =
548 "Uncorrectable ECC error for copyout";
549 static const char CHAFSR_CE_msg[] =
550 "HW corrected system bus data ECC error for read";
551 static const char CHAFSR_EDC_msg[] =
552 "HW corrected E-cache ECC error for stmerge/blkld";
553 static const char CHAFSR_EMC_msg[] =
554 "HW corrected system bus MTAG ECC error";
555 static const char CHAFSR_WDC_msg[] =
556 "HW corrected E-cache ECC error for writeback";
557 static const char CHAFSR_CPC_msg[] =
558 "HW corrected ECC error for copyout";
559 static const char CHAFSR_TO_msg[] =
560 "Unmapped error from system bus";
561 static const char CHAFSR_BERR_msg[] =
562 "Bus error response from system bus";
563 static const char CHAFSR_IVC_msg[] =
564 "HW corrected system bus data ECC error for ivec read";
565 static const char CHAFSR_IVU_msg[] =
566 "Uncorrectable system bus data ECC error for ivec read";
567 static struct afsr_error_table __cheetah_error_table[] = {
568 { CHAFSR_PERR, CHAFSR_PERR_msg },
569 { CHAFSR_IERR, CHAFSR_IERR_msg },
570 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
571 { CHAFSR_UCU, CHAFSR_UCU_msg },
572 { CHAFSR_UCC, CHAFSR_UCC_msg },
573 { CHAFSR_UE, CHAFSR_UE_msg },
574 { CHAFSR_EDU, CHAFSR_EDU_msg },
575 { CHAFSR_EMU, CHAFSR_EMU_msg },
576 { CHAFSR_WDU, CHAFSR_WDU_msg },
577 { CHAFSR_CPU, CHAFSR_CPU_msg },
578 { CHAFSR_CE, CHAFSR_CE_msg },
579 { CHAFSR_EDC, CHAFSR_EDC_msg },
580 { CHAFSR_EMC, CHAFSR_EMC_msg },
581 { CHAFSR_WDC, CHAFSR_WDC_msg },
582 { CHAFSR_CPC, CHAFSR_CPC_msg },
583 { CHAFSR_TO, CHAFSR_TO_msg },
584 { CHAFSR_BERR, CHAFSR_BERR_msg },
585 /* These two do not update the AFAR. */
586 { CHAFSR_IVC, CHAFSR_IVC_msg },
587 { CHAFSR_IVU, CHAFSR_IVU_msg },
590 static const char CHPAFSR_DTO_msg[] =
591 "System bus unmapped error for prefetch/storequeue-read";
592 static const char CHPAFSR_DBERR_msg[] =
593 "System bus error for prefetch/storequeue-read";
594 static const char CHPAFSR_THCE_msg[] =
595 "Hardware corrected E-cache Tag ECC error";
596 static const char CHPAFSR_TSCE_msg[] =
597 "SW handled correctable E-cache Tag ECC error";
598 static const char CHPAFSR_TUE_msg[] =
599 "Uncorrectable E-cache Tag ECC error";
600 static const char CHPAFSR_DUE_msg[] =
601 "System bus uncorrectable data ECC error due to prefetch/store-fill";
602 static struct afsr_error_table __cheetah_plus_error_table[] = {
603 { CHAFSR_PERR, CHAFSR_PERR_msg },
604 { CHAFSR_IERR, CHAFSR_IERR_msg },
605 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
606 { CHAFSR_UCU, CHAFSR_UCU_msg },
607 { CHAFSR_UCC, CHAFSR_UCC_msg },
608 { CHAFSR_UE, CHAFSR_UE_msg },
609 { CHAFSR_EDU, CHAFSR_EDU_msg },
610 { CHAFSR_EMU, CHAFSR_EMU_msg },
611 { CHAFSR_WDU, CHAFSR_WDU_msg },
612 { CHAFSR_CPU, CHAFSR_CPU_msg },
613 { CHAFSR_CE, CHAFSR_CE_msg },
614 { CHAFSR_EDC, CHAFSR_EDC_msg },
615 { CHAFSR_EMC, CHAFSR_EMC_msg },
616 { CHAFSR_WDC, CHAFSR_WDC_msg },
617 { CHAFSR_CPC, CHAFSR_CPC_msg },
618 { CHAFSR_TO, CHAFSR_TO_msg },
619 { CHAFSR_BERR, CHAFSR_BERR_msg },
620 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
621 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
622 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
623 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
624 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
625 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
626 /* These two do not update the AFAR. */
627 { CHAFSR_IVC, CHAFSR_IVC_msg },
628 { CHAFSR_IVU, CHAFSR_IVU_msg },
631 static const char JPAFSR_JETO_msg[] =
632 "System interface protocol error, hw timeout caused";
633 static const char JPAFSR_SCE_msg[] =
634 "Parity error on system snoop results";
635 static const char JPAFSR_JEIC_msg[] =
636 "System interface protocol error, illegal command detected";
637 static const char JPAFSR_JEIT_msg[] =
638 "System interface protocol error, illegal ADTYPE detected";
639 static const char JPAFSR_OM_msg[] =
640 "Out of range memory error has occurred";
641 static const char JPAFSR_ETP_msg[] =
642 "Parity error on L2 cache tag SRAM";
643 static const char JPAFSR_UMS_msg[] =
644 "Error due to unsupported store";
645 static const char JPAFSR_RUE_msg[] =
646 "Uncorrectable ECC error from remote cache/memory";
647 static const char JPAFSR_RCE_msg[] =
648 "Correctable ECC error from remote cache/memory";
649 static const char JPAFSR_BP_msg[] =
650 "JBUS parity error on returned read data";
651 static const char JPAFSR_WBP_msg[] =
652 "JBUS parity error on data for writeback or block store";
653 static const char JPAFSR_FRC_msg[] =
654 "Foreign read to DRAM incurring correctable ECC error";
655 static const char JPAFSR_FRU_msg[] =
656 "Foreign read to DRAM incurring uncorrectable ECC error";
657 static struct afsr_error_table __jalapeno_error_table[] = {
658 { JPAFSR_JETO, JPAFSR_JETO_msg },
659 { JPAFSR_SCE, JPAFSR_SCE_msg },
660 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
661 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
662 { CHAFSR_PERR, CHAFSR_PERR_msg },
663 { CHAFSR_IERR, CHAFSR_IERR_msg },
664 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
665 { CHAFSR_UCU, CHAFSR_UCU_msg },
666 { CHAFSR_UCC, CHAFSR_UCC_msg },
667 { CHAFSR_UE, CHAFSR_UE_msg },
668 { CHAFSR_EDU, CHAFSR_EDU_msg },
669 { JPAFSR_OM, JPAFSR_OM_msg },
670 { CHAFSR_WDU, CHAFSR_WDU_msg },
671 { CHAFSR_CPU, CHAFSR_CPU_msg },
672 { CHAFSR_CE, CHAFSR_CE_msg },
673 { CHAFSR_EDC, CHAFSR_EDC_msg },
674 { JPAFSR_ETP, JPAFSR_ETP_msg },
675 { CHAFSR_WDC, CHAFSR_WDC_msg },
676 { CHAFSR_CPC, CHAFSR_CPC_msg },
677 { CHAFSR_TO, CHAFSR_TO_msg },
678 { CHAFSR_BERR, CHAFSR_BERR_msg },
679 { JPAFSR_UMS, JPAFSR_UMS_msg },
680 { JPAFSR_RUE, JPAFSR_RUE_msg },
681 { JPAFSR_RCE, JPAFSR_RCE_msg },
682 { JPAFSR_BP, JPAFSR_BP_msg },
683 { JPAFSR_WBP, JPAFSR_WBP_msg },
684 { JPAFSR_FRC, JPAFSR_FRC_msg },
685 { JPAFSR_FRU, JPAFSR_FRU_msg },
686 /* These two do not update the AFAR. */
687 { CHAFSR_IVU, CHAFSR_IVU_msg },
690 static struct afsr_error_table *cheetah_error_table;
691 static unsigned long cheetah_afsr_errors;
693 /* This is allocated at boot time based upon the largest hardware
694 * cpu ID in the system. We allocate two entries per cpu, one for
695 * TL==0 logging and one for TL >= 1 logging.
697 struct cheetah_err_info *cheetah_error_log;
699 static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
701 struct cheetah_err_info *p;
702 int cpu = smp_processor_id();
704 if (!cheetah_error_log)
707 p = cheetah_error_log + (cpu * 2);
708 if ((afsr & CHAFSR_TL1) != 0UL)
714 extern unsigned int tl0_icpe[], tl1_icpe[];
715 extern unsigned int tl0_dcpe[], tl1_dcpe[];
716 extern unsigned int tl0_fecc[], tl1_fecc[];
717 extern unsigned int tl0_cee[], tl1_cee[];
718 extern unsigned int tl0_iae[], tl1_iae[];
719 extern unsigned int tl0_dae[], tl1_dae[];
720 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
721 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
722 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
723 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
724 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
726 void __init cheetah_ecache_flush_init(void)
728 unsigned long largest_size, smallest_linesize, order, ver;
729 int node, i, instance;
731 /* Scan all cpu device tree nodes, note two values:
732 * 1) largest E-cache size
733 * 2) smallest E-cache line size
736 smallest_linesize = ~0UL;
739 while (!cpu_find_by_instance(instance, &node, NULL)) {
742 val = prom_getintdefault(node, "ecache-size",
744 if (val > largest_size)
746 val = prom_getintdefault(node, "ecache-line-size", 64);
747 if (val < smallest_linesize)
748 smallest_linesize = val;
752 if (largest_size == 0UL || smallest_linesize == ~0UL) {
753 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
758 ecache_flush_size = (2 * largest_size);
759 ecache_flush_linesize = smallest_linesize;
761 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
763 if (ecache_flush_physbase == ~0UL) {
764 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
765 "contiguous physical memory.\n",
770 /* Now allocate error trap reporting scoreboard. */
771 node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
772 for (order = 0; order < MAX_ORDER; order++) {
773 if ((PAGE_SIZE << order) >= node)
776 cheetah_error_log = (struct cheetah_err_info *)
777 __get_free_pages(GFP_KERNEL, order);
778 if (!cheetah_error_log) {
779 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
780 "error logging scoreboard (%d bytes).\n", node);
783 memset(cheetah_error_log, 0, PAGE_SIZE << order);
785 /* Mark all AFSRs as invalid so that the trap handler will
786 * log new new information there.
788 for (i = 0; i < 2 * NR_CPUS; i++)
789 cheetah_error_log[i].afsr = CHAFSR_INVALID;
791 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
792 if ((ver >> 32) == __JALAPENO_ID ||
793 (ver >> 32) == __SERRANO_ID) {
794 cheetah_error_table = &__jalapeno_error_table[0];
795 cheetah_afsr_errors = JPAFSR_ERRORS;
796 } else if ((ver >> 32) == 0x003e0015) {
797 cheetah_error_table = &__cheetah_plus_error_table[0];
798 cheetah_afsr_errors = CHPAFSR_ERRORS;
800 cheetah_error_table = &__cheetah_error_table[0];
801 cheetah_afsr_errors = CHAFSR_ERRORS;
804 /* Now patch trap tables. */
805 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
806 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
807 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
808 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
809 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
810 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
811 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
812 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
813 if (tlb_type == cheetah_plus) {
814 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
815 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
816 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
817 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
822 static void cheetah_flush_ecache(void)
824 unsigned long flush_base = ecache_flush_physbase;
825 unsigned long flush_linesize = ecache_flush_linesize;
826 unsigned long flush_size = ecache_flush_size;
828 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
829 " bne,pt %%xcc, 1b\n\t"
830 " ldxa [%2 + %0] %3, %%g0\n\t"
832 : "0" (flush_size), "r" (flush_base),
833 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
836 static void cheetah_flush_ecache_line(unsigned long physaddr)
840 physaddr &= ~(8UL - 1UL);
841 physaddr = (ecache_flush_physbase +
842 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
843 alias = physaddr + (ecache_flush_size >> 1UL);
844 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
845 "ldxa [%1] %2, %%g0\n\t"
848 : "r" (physaddr), "r" (alias),
849 "i" (ASI_PHYS_USE_EC));
852 /* Unfortunately, the diagnostic access to the I-cache tags we need to
853 * use to clear the thing interferes with I-cache coherency transactions.
855 * So we must only flush the I-cache when it is disabled.
857 static void __cheetah_flush_icache(void)
859 unsigned int icache_size, icache_line_size;
862 icache_size = local_cpu_data().icache_size;
863 icache_line_size = local_cpu_data().icache_line_size;
865 /* Clear the valid bits in all the tags. */
866 for (addr = 0; addr < icache_size; addr += icache_line_size) {
867 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
870 : "r" (addr | (2 << 3)),
875 static void cheetah_flush_icache(void)
877 unsigned long dcu_save;
879 /* Save current DCU, disable I-cache. */
880 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
881 "or %0, %2, %%g1\n\t"
882 "stxa %%g1, [%%g0] %1\n\t"
885 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
888 __cheetah_flush_icache();
890 /* Restore DCU register */
891 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
894 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
897 static void cheetah_flush_dcache(void)
899 unsigned int dcache_size, dcache_line_size;
902 dcache_size = local_cpu_data().dcache_size;
903 dcache_line_size = local_cpu_data().dcache_line_size;
905 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
906 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
909 : "r" (addr), "i" (ASI_DCACHE_TAG));
913 /* In order to make the even parity correct we must do two things.
914 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
915 * Next, we clear out all 32-bytes of data for that line. Data of
916 * all-zero + tag parity value of zero == correct parity.
918 static void cheetah_plus_zap_dcache_parity(void)
920 unsigned int dcache_size, dcache_line_size;
923 dcache_size = local_cpu_data().dcache_size;
924 dcache_line_size = local_cpu_data().dcache_line_size;
926 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
927 unsigned long tag = (addr >> 14);
930 __asm__ __volatile__("membar #Sync\n\t"
931 "stxa %0, [%1] %2\n\t"
934 : "r" (tag), "r" (addr),
935 "i" (ASI_DCACHE_UTAG));
936 for (line = addr; line < addr + dcache_line_size; line += 8)
937 __asm__ __volatile__("membar #Sync\n\t"
938 "stxa %%g0, [%0] %1\n\t"
942 "i" (ASI_DCACHE_DATA));
946 /* Conversion tables used to frob Cheetah AFSR syndrome values into
947 * something palatable to the memory controller driver get_unumber
971 static unsigned char cheetah_ecc_syntab[] = {
972 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
973 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
974 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
975 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
976 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
977 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
978 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
979 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
980 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
981 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
982 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
983 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
984 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
985 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
986 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
987 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
988 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
989 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
990 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
991 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
992 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
993 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
994 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
995 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
996 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
997 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
998 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
999 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1000 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1001 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1002 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1003 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1005 static unsigned char cheetah_mtag_syntab[] = {
1016 /* Return the highest priority error conditon mentioned. */
1017 static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
1019 unsigned long tmp = 0;
1022 for (i = 0; cheetah_error_table[i].mask; i++) {
1023 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1029 static const char *cheetah_get_string(unsigned long bit)
1033 for (i = 0; cheetah_error_table[i].mask; i++) {
1034 if ((bit & cheetah_error_table[i].mask) != 0UL)
1035 return cheetah_error_table[i].name;
1040 extern int chmc_getunumber(int, unsigned long, char *, int);
1042 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1043 unsigned long afsr, unsigned long afar, int recoverable)
1045 unsigned long hipri;
1048 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1049 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1051 (afsr & CHAFSR_TL1) ? 1 : 0);
1052 printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1053 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1054 regs->tpc, regs->tnpc, regs->tstate);
1055 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1056 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1057 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1058 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1059 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1060 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1061 hipri = cheetah_get_hipri(afsr);
1062 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1063 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1064 hipri, cheetah_get_string(hipri));
1066 /* Try to get unumber if relevant. */
1067 #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1068 CHAFSR_CPC | CHAFSR_CPU | \
1069 CHAFSR_UE | CHAFSR_CE | \
1070 CHAFSR_EDC | CHAFSR_EDU | \
1071 CHAFSR_UCC | CHAFSR_UCU | \
1072 CHAFSR_WDU | CHAFSR_WDC)
1073 #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1074 if (afsr & ESYND_ERRORS) {
1078 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1079 syndrome = cheetah_ecc_syntab[syndrome];
1080 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1082 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1083 (recoverable ? KERN_WARNING : KERN_CRIT),
1084 smp_processor_id(), unum);
1085 } else if (afsr & MSYND_ERRORS) {
1089 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1090 syndrome = cheetah_mtag_syntab[syndrome];
1091 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1093 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1094 (recoverable ? KERN_WARNING : KERN_CRIT),
1095 smp_processor_id(), unum);
1098 /* Now dump the cache snapshots. */
1099 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1100 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1101 (int) info->dcache_index,
1105 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1106 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1107 info->dcache_data[0],
1108 info->dcache_data[1],
1109 info->dcache_data[2],
1110 info->dcache_data[3]);
1111 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1112 "u[%016lx] l[%016lx]\n",
1113 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1114 (int) info->icache_index,
1119 info->icache_lower);
1120 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1121 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1122 info->icache_data[0],
1123 info->icache_data[1],
1124 info->icache_data[2],
1125 info->icache_data[3]);
1126 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1127 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1128 info->icache_data[4],
1129 info->icache_data[5],
1130 info->icache_data[6],
1131 info->icache_data[7]);
1132 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1133 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1134 (int) info->ecache_index, info->ecache_tag);
1135 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1136 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1137 info->ecache_data[0],
1138 info->ecache_data[1],
1139 info->ecache_data[2],
1140 info->ecache_data[3]);
1142 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1143 while (afsr != 0UL) {
1144 unsigned long bit = cheetah_get_hipri(afsr);
1146 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1147 (recoverable ? KERN_WARNING : KERN_CRIT),
1148 bit, cheetah_get_string(bit));
1154 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1157 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1159 unsigned long afsr, afar;
1162 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1165 if ((afsr & cheetah_afsr_errors) != 0) {
1167 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1175 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1177 : : "r" (afsr), "i" (ASI_AFSR));
1182 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1184 struct cheetah_err_info local_snapshot, *p;
1188 cheetah_flush_ecache();
1190 p = cheetah_get_error_log(afsr);
1192 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1194 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1195 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1199 /* Grab snapshot of logged error. */
1200 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1202 /* If the current trap snapshot does not match what the
1203 * trap handler passed along into our args, big trouble.
1204 * In such a case, mark the local copy as invalid.
1206 * Else, it matches and we mark the afsr in the non-local
1207 * copy as invalid so we may log new error traps there.
1209 if (p->afsr != afsr || p->afar != afar)
1210 local_snapshot.afsr = CHAFSR_INVALID;
1212 p->afsr = CHAFSR_INVALID;
1214 cheetah_flush_icache();
1215 cheetah_flush_dcache();
1217 /* Re-enable I-cache/D-cache */
1218 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1219 "or %%g1, %1, %%g1\n\t"
1220 "stxa %%g1, [%%g0] %0\n\t"
1223 : "i" (ASI_DCU_CONTROL_REG),
1224 "i" (DCU_DC | DCU_IC)
1227 /* Re-enable error reporting */
1228 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1229 "or %%g1, %1, %%g1\n\t"
1230 "stxa %%g1, [%%g0] %0\n\t"
1233 : "i" (ASI_ESTATE_ERROR_EN),
1234 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1237 /* Decide if we can continue after handling this trap and
1238 * logging the error.
1241 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1244 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1245 * error was logged while we had error reporting traps disabled.
1247 if (cheetah_recheck_errors(&local_snapshot)) {
1248 unsigned long new_afsr = local_snapshot.afsr;
1250 /* If we got a new asynchronous error, die... */
1251 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1252 CHAFSR_WDU | CHAFSR_CPU |
1253 CHAFSR_IVU | CHAFSR_UE |
1254 CHAFSR_BERR | CHAFSR_TO))
1259 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1262 panic("Irrecoverable Fast-ECC error trap.\n");
1264 /* Flush E-cache to kick the error trap handlers out. */
1265 cheetah_flush_ecache();
1268 /* Try to fix a correctable error by pushing the line out from
1269 * the E-cache. Recheck error reporting registers to see if the
1270 * problem is intermittent.
1272 static int cheetah_fix_ce(unsigned long physaddr)
1274 unsigned long orig_estate;
1275 unsigned long alias1, alias2;
1278 /* Make sure correctable error traps are disabled. */
1279 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1280 "andn %0, %1, %%g1\n\t"
1281 "stxa %%g1, [%%g0] %2\n\t"
1283 : "=&r" (orig_estate)
1284 : "i" (ESTATE_ERROR_CEEN),
1285 "i" (ASI_ESTATE_ERROR_EN)
1288 /* We calculate alias addresses that will force the
1289 * cache line in question out of the E-cache. Then
1290 * we bring it back in with an atomic instruction so
1291 * that we get it in some modified/exclusive state,
1292 * then we displace it again to try and get proper ECC
1293 * pushed back into the system.
1295 physaddr &= ~(8UL - 1UL);
1296 alias1 = (ecache_flush_physbase +
1297 (physaddr & ((ecache_flush_size >> 1) - 1)));
1298 alias2 = alias1 + (ecache_flush_size >> 1);
1299 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1300 "ldxa [%1] %3, %%g0\n\t"
1301 "casxa [%2] %3, %%g0, %%g0\n\t"
1302 "membar #StoreLoad | #StoreStore\n\t"
1303 "ldxa [%0] %3, %%g0\n\t"
1304 "ldxa [%1] %3, %%g0\n\t"
1307 : "r" (alias1), "r" (alias2),
1308 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1310 /* Did that trigger another error? */
1311 if (cheetah_recheck_errors(NULL)) {
1312 /* Try one more time. */
1313 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1315 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1316 if (cheetah_recheck_errors(NULL))
1321 /* No new error, intermittent problem. */
1325 /* Restore error enables. */
1326 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1328 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1333 /* Return non-zero if PADDR is a valid physical memory address. */
1334 static int cheetah_check_main_memory(unsigned long paddr)
1336 unsigned long vaddr = PAGE_OFFSET + paddr;
1338 if (vaddr > (unsigned long) high_memory)
1341 return kern_addr_valid(vaddr);
1344 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1346 struct cheetah_err_info local_snapshot, *p;
1347 int recoverable, is_memory;
1349 p = cheetah_get_error_log(afsr);
1351 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1353 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1354 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1358 /* Grab snapshot of logged error. */
1359 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1361 /* If the current trap snapshot does not match what the
1362 * trap handler passed along into our args, big trouble.
1363 * In such a case, mark the local copy as invalid.
1365 * Else, it matches and we mark the afsr in the non-local
1366 * copy as invalid so we may log new error traps there.
1368 if (p->afsr != afsr || p->afar != afar)
1369 local_snapshot.afsr = CHAFSR_INVALID;
1371 p->afsr = CHAFSR_INVALID;
1373 is_memory = cheetah_check_main_memory(afar);
1375 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1376 /* XXX Might want to log the results of this operation
1377 * XXX somewhere... -DaveM
1379 cheetah_fix_ce(afar);
1383 int flush_all, flush_line;
1385 flush_all = flush_line = 0;
1386 if ((afsr & CHAFSR_EDC) != 0UL) {
1387 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1391 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1392 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1398 /* Trap handler only disabled I-cache, flush it. */
1399 cheetah_flush_icache();
1401 /* Re-enable I-cache */
1402 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1403 "or %%g1, %1, %%g1\n\t"
1404 "stxa %%g1, [%%g0] %0\n\t"
1407 : "i" (ASI_DCU_CONTROL_REG),
1412 cheetah_flush_ecache();
1413 else if (flush_line)
1414 cheetah_flush_ecache_line(afar);
1417 /* Re-enable error reporting */
1418 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1419 "or %%g1, %1, %%g1\n\t"
1420 "stxa %%g1, [%%g0] %0\n\t"
1423 : "i" (ASI_ESTATE_ERROR_EN),
1424 "i" (ESTATE_ERROR_CEEN)
1427 /* Decide if we can continue after handling this trap and
1428 * logging the error.
1431 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1434 /* Re-check AFSR/AFAR */
1435 (void) cheetah_recheck_errors(&local_snapshot);
1438 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1441 panic("Irrecoverable Correctable-ECC error trap.\n");
1444 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1446 struct cheetah_err_info local_snapshot, *p;
1447 int recoverable, is_memory;
1450 /* Check for the special PCI poke sequence. */
1451 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1452 cheetah_flush_icache();
1453 cheetah_flush_dcache();
1455 /* Re-enable I-cache/D-cache */
1456 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1457 "or %%g1, %1, %%g1\n\t"
1458 "stxa %%g1, [%%g0] %0\n\t"
1461 : "i" (ASI_DCU_CONTROL_REG),
1462 "i" (DCU_DC | DCU_IC)
1465 /* Re-enable error reporting */
1466 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1467 "or %%g1, %1, %%g1\n\t"
1468 "stxa %%g1, [%%g0] %0\n\t"
1471 : "i" (ASI_ESTATE_ERROR_EN),
1472 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1475 (void) cheetah_recheck_errors(NULL);
1477 pci_poke_faulted = 1;
1479 regs->tnpc = regs->tpc + 4;
1484 p = cheetah_get_error_log(afsr);
1486 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1488 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1489 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1493 /* Grab snapshot of logged error. */
1494 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1496 /* If the current trap snapshot does not match what the
1497 * trap handler passed along into our args, big trouble.
1498 * In such a case, mark the local copy as invalid.
1500 * Else, it matches and we mark the afsr in the non-local
1501 * copy as invalid so we may log new error traps there.
1503 if (p->afsr != afsr || p->afar != afar)
1504 local_snapshot.afsr = CHAFSR_INVALID;
1506 p->afsr = CHAFSR_INVALID;
1508 is_memory = cheetah_check_main_memory(afar);
1511 int flush_all, flush_line;
1513 flush_all = flush_line = 0;
1514 if ((afsr & CHAFSR_EDU) != 0UL) {
1515 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1519 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1520 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1526 cheetah_flush_icache();
1527 cheetah_flush_dcache();
1529 /* Re-enable I/D caches */
1530 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1531 "or %%g1, %1, %%g1\n\t"
1532 "stxa %%g1, [%%g0] %0\n\t"
1535 : "i" (ASI_DCU_CONTROL_REG),
1536 "i" (DCU_IC | DCU_DC)
1540 cheetah_flush_ecache();
1541 else if (flush_line)
1542 cheetah_flush_ecache_line(afar);
1545 /* Re-enable error reporting */
1546 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1547 "or %%g1, %1, %%g1\n\t"
1548 "stxa %%g1, [%%g0] %0\n\t"
1551 : "i" (ASI_ESTATE_ERROR_EN),
1552 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1555 /* Decide if we can continue after handling this trap and
1556 * logging the error.
1559 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1562 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1563 * error was logged while we had error reporting traps disabled.
1565 if (cheetah_recheck_errors(&local_snapshot)) {
1566 unsigned long new_afsr = local_snapshot.afsr;
1568 /* If we got a new asynchronous error, die... */
1569 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1570 CHAFSR_WDU | CHAFSR_CPU |
1571 CHAFSR_IVU | CHAFSR_UE |
1572 CHAFSR_BERR | CHAFSR_TO))
1577 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1579 /* "Recoverable" here means we try to yank the page from ever
1580 * being newly used again. This depends upon a few things:
1581 * 1) Must be main memory, and AFAR must be valid.
1582 * 2) If we trapped from user, OK.
1583 * 3) Else, if we trapped from kernel we must find exception
1584 * table entry (ie. we have to have been accessing user
1587 * If AFAR is not in main memory, or we trapped from kernel
1588 * and cannot find an exception table entry, it is unacceptable
1589 * to try and continue.
1591 if (recoverable && is_memory) {
1592 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1593 /* OK, usermode access. */
1596 const struct exception_table_entry *entry;
1598 entry = search_exception_tables(regs->tpc);
1600 /* OK, kernel access to userspace. */
1604 /* BAD, privileged state is corrupted. */
1609 if (pfn_valid(afar >> PAGE_SHIFT))
1610 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1614 /* Only perform fixup if we still have a
1615 * recoverable condition.
1618 regs->tpc = entry->fixup;
1619 regs->tnpc = regs->tpc + 4;
1628 panic("Irrecoverable deferred error trap.\n");
1631 /* Handle a D/I cache parity error trap. TYPE is encoded as:
1633 * Bit0: 0=dcache,1=icache
1634 * Bit1: 0=recoverable,1=unrecoverable
1636 * The hardware has disabled both the I-cache and D-cache in
1637 * the %dcr register.
1639 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1642 __cheetah_flush_icache();
1644 cheetah_plus_zap_dcache_parity();
1645 cheetah_flush_dcache();
1647 /* Re-enable I-cache/D-cache */
1648 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1649 "or %%g1, %1, %%g1\n\t"
1650 "stxa %%g1, [%%g0] %0\n\t"
1653 : "i" (ASI_DCU_CONTROL_REG),
1654 "i" (DCU_DC | DCU_IC)
1658 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1660 (type & 0x1) ? 'I' : 'D',
1662 panic("Irrecoverable Cheetah+ parity error.");
1665 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1667 (type & 0x1) ? 'I' : 'D',
1671 void do_fpe_common(struct pt_regs *regs)
1673 if (regs->tstate & TSTATE_PRIV) {
1674 regs->tpc = regs->tnpc;
1677 unsigned long fsr = current_thread_info()->xfsr[0];
1680 if (test_thread_flag(TIF_32BIT)) {
1681 regs->tpc &= 0xffffffff;
1682 regs->tnpc &= 0xffffffff;
1684 info.si_signo = SIGFPE;
1686 info.si_addr = (void __user *)regs->tpc;
1688 info.si_code = __SI_FAULT;
1689 if ((fsr & 0x1c000) == (1 << 14)) {
1691 info.si_code = FPE_FLTINV;
1692 else if (fsr & 0x08)
1693 info.si_code = FPE_FLTOVF;
1694 else if (fsr & 0x04)
1695 info.si_code = FPE_FLTUND;
1696 else if (fsr & 0x02)
1697 info.si_code = FPE_FLTDIV;
1698 else if (fsr & 0x01)
1699 info.si_code = FPE_FLTRES;
1701 force_sig_info(SIGFPE, &info, current);
1705 void do_fpieee(struct pt_regs *regs)
1707 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1708 0, 0x24, SIGFPE) == NOTIFY_STOP)
1711 do_fpe_common(regs);
1714 extern int do_mathemu(struct pt_regs *, struct fpustate *);
1716 void do_fpother(struct pt_regs *regs)
1718 struct fpustate *f = FPUSTATE;
1721 if (notify_die(DIE_TRAP, "fpu exception other", regs,
1722 0, 0x25, SIGFPE) == NOTIFY_STOP)
1725 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
1726 case (2 << 14): /* unfinished_FPop */
1727 case (3 << 14): /* unimplemented_FPop */
1728 ret = do_mathemu(regs, f);
1733 do_fpe_common(regs);
1736 void do_tof(struct pt_regs *regs)
1740 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
1741 0, 0x26, SIGEMT) == NOTIFY_STOP)
1744 if (regs->tstate & TSTATE_PRIV)
1745 die_if_kernel("Penguin overflow trap from kernel mode", regs);
1746 if (test_thread_flag(TIF_32BIT)) {
1747 regs->tpc &= 0xffffffff;
1748 regs->tnpc &= 0xffffffff;
1750 info.si_signo = SIGEMT;
1752 info.si_code = EMT_TAGOVF;
1753 info.si_addr = (void __user *)regs->tpc;
1755 force_sig_info(SIGEMT, &info, current);
1758 void do_div0(struct pt_regs *regs)
1762 if (notify_die(DIE_TRAP, "integer division by zero", regs,
1763 0, 0x28, SIGFPE) == NOTIFY_STOP)
1766 if (regs->tstate & TSTATE_PRIV)
1767 die_if_kernel("TL0: Kernel divide by zero.", regs);
1768 if (test_thread_flag(TIF_32BIT)) {
1769 regs->tpc &= 0xffffffff;
1770 regs->tnpc &= 0xffffffff;
1772 info.si_signo = SIGFPE;
1774 info.si_code = FPE_INTDIV;
1775 info.si_addr = (void __user *)regs->tpc;
1777 force_sig_info(SIGFPE, &info, current);
1780 void instruction_dump (unsigned int *pc)
1784 if ((((unsigned long) pc) & 3))
1787 printk("Instruction DUMP:");
1788 for (i = -3; i < 6; i++)
1789 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
1793 static void user_instruction_dump (unsigned int __user *pc)
1796 unsigned int buf[9];
1798 if ((((unsigned long) pc) & 3))
1801 if (copy_from_user(buf, pc - 3, sizeof(buf)))
1804 printk("Instruction DUMP:");
1805 for (i = 0; i < 9; i++)
1806 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
1810 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
1812 unsigned long pc, fp, thread_base, ksp;
1813 void *tp = task_stack_page(tsk);
1814 struct reg_window *rw;
1817 ksp = (unsigned long) _ksp;
1819 if (tp == current_thread_info())
1822 fp = ksp + STACK_BIAS;
1823 thread_base = (unsigned long) tp;
1825 printk("Call Trace:");
1826 #ifdef CONFIG_KALLSYMS
1830 /* Bogus frame pointer? */
1831 if (fp < (thread_base + sizeof(struct thread_info)) ||
1832 fp >= (thread_base + THREAD_SIZE))
1834 rw = (struct reg_window *)fp;
1836 printk(" [%016lx] ", pc);
1837 print_symbol("%s\n", pc);
1838 fp = rw->ins[6] + STACK_BIAS;
1839 } while (++count < 16);
1840 #ifndef CONFIG_KALLSYMS
1845 void dump_stack(void)
1849 __asm__ __volatile__("mov %%fp, %0"
1851 show_stack(current, ksp);
1854 EXPORT_SYMBOL(dump_stack);
1856 static inline int is_kernel_stack(struct task_struct *task,
1857 struct reg_window *rw)
1859 unsigned long rw_addr = (unsigned long) rw;
1860 unsigned long thread_base, thread_end;
1862 if (rw_addr < PAGE_OFFSET) {
1863 if (task != &init_task)
1867 thread_base = (unsigned long) task_stack_page(task);
1868 thread_end = thread_base + sizeof(union thread_union);
1869 if (rw_addr >= thread_base &&
1870 rw_addr < thread_end &&
1877 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
1879 unsigned long fp = rw->ins[6];
1884 return (struct reg_window *) (fp + STACK_BIAS);
1887 void die_if_kernel(char *str, struct pt_regs *regs)
1889 static int die_counter;
1890 extern void __show_regs(struct pt_regs * regs);
1891 extern void smp_report_regs(void);
1894 /* Amuse the user. */
1897 " \"@'/ .. \\`@\"\n"
1901 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
1902 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
1903 __asm__ __volatile__("flushw");
1905 if (regs->tstate & TSTATE_PRIV) {
1906 struct reg_window *rw = (struct reg_window *)
1907 (regs->u_regs[UREG_FP] + STACK_BIAS);
1909 /* Stop the back trace when we hit userland or we
1910 * find some badly aligned kernel stack.
1914 is_kernel_stack(current, rw)) {
1915 printk("Caller[%016lx]", rw->ins[7]);
1916 print_symbol(": %s", rw->ins[7]);
1919 rw = kernel_stack_up(rw);
1921 instruction_dump ((unsigned int *) regs->tpc);
1923 if (test_thread_flag(TIF_32BIT)) {
1924 regs->tpc &= 0xffffffff;
1925 regs->tnpc &= 0xffffffff;
1927 user_instruction_dump ((unsigned int __user *) regs->tpc);
1933 if (regs->tstate & TSTATE_PRIV)
1938 extern int handle_popc(u32 insn, struct pt_regs *regs);
1939 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
1941 void do_illegal_instruction(struct pt_regs *regs)
1943 unsigned long pc = regs->tpc;
1944 unsigned long tstate = regs->tstate;
1948 if (notify_die(DIE_TRAP, "illegal instruction", regs,
1949 0, 0x10, SIGILL) == NOTIFY_STOP)
1952 if (tstate & TSTATE_PRIV)
1953 die_if_kernel("Kernel illegal instruction", regs);
1954 if (test_thread_flag(TIF_32BIT))
1956 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
1957 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
1958 if (handle_popc(insn, regs))
1960 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
1961 if (handle_ldf_stq(insn, regs))
1965 info.si_signo = SIGILL;
1967 info.si_code = ILL_ILLOPC;
1968 info.si_addr = (void __user *)pc;
1970 force_sig_info(SIGILL, &info, current);
1973 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
1977 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
1978 0, 0x34, SIGSEGV) == NOTIFY_STOP)
1981 if (regs->tstate & TSTATE_PRIV) {
1982 extern void kernel_unaligned_trap(struct pt_regs *regs,
1985 unsigned long sfsr);
1987 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc),
1991 info.si_signo = SIGBUS;
1993 info.si_code = BUS_ADRALN;
1994 info.si_addr = (void __user *)sfar;
1996 force_sig_info(SIGBUS, &info, current);
1999 void do_privop(struct pt_regs *regs)
2003 if (notify_die(DIE_TRAP, "privileged operation", regs,
2004 0, 0x11, SIGILL) == NOTIFY_STOP)
2007 if (test_thread_flag(TIF_32BIT)) {
2008 regs->tpc &= 0xffffffff;
2009 regs->tnpc &= 0xffffffff;
2011 info.si_signo = SIGILL;
2013 info.si_code = ILL_PRVOPC;
2014 info.si_addr = (void __user *)regs->tpc;
2016 force_sig_info(SIGILL, &info, current);
2019 void do_privact(struct pt_regs *regs)
2024 /* Trap level 1 stuff or other traps we should never see... */
2025 void do_cee(struct pt_regs *regs)
2027 die_if_kernel("TL0: Cache Error Exception", regs);
2030 void do_cee_tl1(struct pt_regs *regs)
2032 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2033 die_if_kernel("TL1: Cache Error Exception", regs);
2036 void do_dae_tl1(struct pt_regs *regs)
2038 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2039 die_if_kernel("TL1: Data Access Exception", regs);
2042 void do_iae_tl1(struct pt_regs *regs)
2044 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2045 die_if_kernel("TL1: Instruction Access Exception", regs);
2048 void do_div0_tl1(struct pt_regs *regs)
2050 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2051 die_if_kernel("TL1: DIV0 Exception", regs);
2054 void do_fpdis_tl1(struct pt_regs *regs)
2056 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2057 die_if_kernel("TL1: FPU Disabled", regs);
2060 void do_fpieee_tl1(struct pt_regs *regs)
2062 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2063 die_if_kernel("TL1: FPU IEEE Exception", regs);
2066 void do_fpother_tl1(struct pt_regs *regs)
2068 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2069 die_if_kernel("TL1: FPU Other Exception", regs);
2072 void do_ill_tl1(struct pt_regs *regs)
2074 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2075 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2078 void do_irq_tl1(struct pt_regs *regs)
2080 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2081 die_if_kernel("TL1: IRQ Exception", regs);
2084 void do_lddfmna_tl1(struct pt_regs *regs)
2086 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2087 die_if_kernel("TL1: LDDF Exception", regs);
2090 void do_stdfmna_tl1(struct pt_regs *regs)
2092 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2093 die_if_kernel("TL1: STDF Exception", regs);
2096 void do_paw(struct pt_regs *regs)
2098 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2101 void do_paw_tl1(struct pt_regs *regs)
2103 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2104 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2107 void do_vaw(struct pt_regs *regs)
2109 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2112 void do_vaw_tl1(struct pt_regs *regs)
2114 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2115 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2118 void do_tof_tl1(struct pt_regs *regs)
2120 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2121 die_if_kernel("TL1: Tag Overflow Exception", regs);
2124 void do_getpsr(struct pt_regs *regs)
2126 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2127 regs->tpc = regs->tnpc;
2129 if (test_thread_flag(TIF_32BIT)) {
2130 regs->tpc &= 0xffffffff;
2131 regs->tnpc &= 0xffffffff;
2135 struct trap_per_cpu trap_block[NR_CPUS];
2137 /* This can get invoked before sched_init() so play it super safe
2138 * and use hard_smp_processor_id().
2140 void init_cur_cpu_trap(void)
2142 int cpu = hard_smp_processor_id();
2143 struct trap_per_cpu *p = &trap_block[cpu];
2145 p->thread = current_thread_info();
2149 extern void thread_info_offsets_are_bolixed_dave(void);
2150 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2152 /* Only invoked on boot processor. */
2153 void __init trap_init(void)
2155 /* Compile time sanity check. */
2156 if (TI_TASK != offsetof(struct thread_info, task) ||
2157 TI_FLAGS != offsetof(struct thread_info, flags) ||
2158 TI_CPU != offsetof(struct thread_info, cpu) ||
2159 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2160 TI_KSP != offsetof(struct thread_info, ksp) ||
2161 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2162 TI_KREGS != offsetof(struct thread_info, kregs) ||
2163 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2164 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2165 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2166 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2167 TI_GSR != offsetof(struct thread_info, gsr) ||
2168 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2169 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2170 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2171 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2172 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2173 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2174 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2175 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2176 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
2177 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2178 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2179 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
2180 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2181 (TI_FPREGS & (64 - 1)))
2182 thread_info_offsets_are_bolixed_dave();
2184 if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
2185 (TRAP_PER_CPU_PGD_PADDR !=
2186 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2187 (TRAP_PER_CPU_CPU_MONDO_PA !=
2188 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2189 (TRAP_PER_CPU_DEV_MONDO_PA !=
2190 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2191 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2192 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2193 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2194 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2195 (TRAP_PER_CPU_FAULT_INFO !=
2196 offsetof(struct trap_per_cpu, fault_info)))
2197 trap_per_cpu_offsets_are_bolixed_dave();
2199 /* Attach to the address space of init_task. On SMP we
2200 * do this in smp.c:smp_callin for other cpus.
2202 atomic_inc(&init_mm.mm_count);
2203 current->active_mm = &init_mm;