2 * unaligned.c: Unaligned load/store trap handling with special
3 * cases for the kernel to do them more quickly.
5 * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net)
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10 #include <linux/jiffies.h>
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
14 #include <linux/module.h>
16 #include <asm/ptrace.h>
17 #include <asm/pstate.h>
18 #include <asm/processor.h>
19 #include <asm/system.h>
20 #include <asm/uaccess.h>
21 #include <linux/smp.h>
22 #include <linux/bitops.h>
23 #include <linux/perf_event.h>
24 #include <linux/ratelimit.h>
25 #include <linux/bitops.h>
26 #include <asm/fpumacro.h>
29 load, /* ld, ldd, ldh, ldsh */
30 store, /* st, std, sth, stsh */
31 both, /* Swap, ldstub, cas, ... */
37 static inline enum direction decode_direction(unsigned int insn)
39 unsigned long tmp = (insn >> 21) & 1;
44 switch ((insn>>19)&0xf) {
53 /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
54 static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
58 tmp = ((insn >> 19) & 0xf);
59 if (tmp == 11 || tmp == 14) /* ldx/stx */
65 return 16; /* ldd/std - Although it is actually 8 */
69 printk("Impossible unaligned trap. insn=%08x\n", insn);
70 die_if_kernel("Byte sized unaligned access?!?!", regs);
72 /* GCC should never warn that control reaches the end
73 * of this function without returning a value because
74 * die_if_kernel() is marked with attribute 'noreturn'.
75 * Alas, some versions do...
82 static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
84 if (insn & 0x800000) {
86 return (unsigned char)(regs->tstate >> 24); /* %asi */
88 return (unsigned char)(insn >> 5); /* imm_asi */
93 /* 0x400000 = signed, 0 = unsigned */
94 static inline int decode_signedness(unsigned int insn)
96 return (insn & 0x400000);
99 static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
100 unsigned int rd, int from_kernel)
102 if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
103 if (from_kernel != 0)
104 __asm__ __volatile__("flushw");
110 static inline long sign_extend_imm13(long imm)
112 return imm << 51 >> 51;
115 static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
120 return (!reg ? 0 : regs->u_regs[reg]);
121 if (regs->tstate & TSTATE_PRIV) {
122 struct reg_window *win;
123 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
124 value = win->locals[reg - 16];
125 } else if (test_thread_flag(TIF_32BIT)) {
126 struct reg_window32 __user *win32;
127 win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
128 get_user(value, &win32->locals[reg - 16]);
130 struct reg_window __user *win;
131 win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
132 get_user(value, &win->locals[reg - 16]);
137 static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
140 return ®s->u_regs[reg];
141 if (regs->tstate & TSTATE_PRIV) {
142 struct reg_window *win;
143 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
144 return &win->locals[reg - 16];
145 } else if (test_thread_flag(TIF_32BIT)) {
146 struct reg_window32 *win32;
147 win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
148 return (unsigned long *)&win32->locals[reg - 16];
150 struct reg_window *win;
151 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
152 return &win->locals[reg - 16];
156 unsigned long compute_effective_address(struct pt_regs *regs,
157 unsigned int insn, unsigned int rd)
159 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
160 unsigned int rs1 = (insn >> 14) & 0x1f;
161 unsigned int rs2 = insn & 0x1f;
165 maybe_flush_windows(rs1, 0, rd, from_kernel);
166 addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
168 maybe_flush_windows(rs1, rs2, rd, from_kernel);
169 addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
172 if (!from_kernel && test_thread_flag(TIF_32BIT))
178 /* This is just to make gcc think die_if_kernel does return... */
179 static void __used unaligned_panic(char *str, struct pt_regs *regs)
181 die_if_kernel(str, regs);
184 extern int do_int_load(unsigned long *dest_reg, int size,
185 unsigned long *saddr, int is_signed, int asi);
187 extern int __do_int_store(unsigned long *dst_addr, int size,
188 unsigned long src_val, int asi);
190 static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
191 struct pt_regs *regs, int asi, int orig_asi)
193 unsigned long zero = 0;
194 unsigned long *src_val_p = &zero;
195 unsigned long src_val;
199 zero = (((long)(reg_num ?
200 (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
201 (unsigned)fetch_reg(reg_num + 1, regs);
202 } else if (reg_num) {
203 src_val_p = fetch_reg_addr(reg_num, regs);
205 src_val = *src_val_p;
206 if (unlikely(asi != orig_asi)) {
209 src_val = swab16(src_val);
212 src_val = swab32(src_val);
215 src_val = swab64(src_val);
223 return __do_int_store(dst_addr, size, src_val, asi);
226 static inline void advance(struct pt_regs *regs)
228 regs->tpc = regs->tnpc;
230 if (test_thread_flag(TIF_32BIT)) {
231 regs->tpc &= 0xffffffff;
232 regs->tnpc &= 0xffffffff;
236 static inline int floating_point_load_or_store_p(unsigned int insn)
238 return (insn >> 24) & 1;
241 static inline int ok_for_kernel(unsigned int insn)
243 return !floating_point_load_or_store_p(insn);
246 static void kernel_mna_trap_fault(int fixup_tstate_asi)
248 struct pt_regs *regs = current_thread_info()->kern_una_regs;
249 unsigned int insn = current_thread_info()->kern_una_insn;
250 const struct exception_table_entry *entry;
252 entry = search_exception_tables(regs->tpc);
254 unsigned long address;
256 address = compute_effective_address(regs, insn,
257 ((insn >> 25) & 0x1f));
258 if (address < PAGE_SIZE) {
259 printk(KERN_ALERT "Unable to handle kernel NULL "
260 "pointer dereference in mna handler");
262 printk(KERN_ALERT "Unable to handle kernel paging "
263 "request in mna handler");
264 printk(KERN_ALERT " at virtual address %016lx\n",address);
265 printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n",
266 (current->mm ? CTX_HWBITS(current->mm->context) :
267 CTX_HWBITS(current->active_mm->context)));
268 printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n",
269 (current->mm ? (unsigned long) current->mm->pgd :
270 (unsigned long) current->active_mm->pgd));
271 die_if_kernel("Oops", regs);
274 regs->tpc = entry->fixup;
275 regs->tnpc = regs->tpc + 4;
277 if (fixup_tstate_asi) {
278 regs->tstate &= ~TSTATE_ASI;
279 regs->tstate |= (ASI_AIUS << 24UL);
283 static void log_unaligned(struct pt_regs *regs)
285 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
287 if (__ratelimit(&ratelimit)) {
288 printk("Kernel unaligned access at TPC[%lx] %pS\n",
289 regs->tpc, (void *) regs->tpc);
293 asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
295 enum direction dir = decode_direction(insn);
296 int size = decode_access_size(regs, insn);
299 current_thread_info()->kern_una_regs = regs;
300 current_thread_info()->kern_una_insn = insn;
302 orig_asi = asi = decode_asi(insn, regs);
304 /* If this is a {get,put}_user() on an unaligned userspace pointer,
305 * just signal a fault and do not log the event.
307 if (asi == ASI_AIUS) {
308 kernel_mna_trap_fault(0);
314 if (!ok_for_kernel(insn) || dir == both) {
315 printk("Unsupported unaligned load/store trap for kernel "
316 "at <%016lx>.\n", regs->tpc);
317 unaligned_panic("Kernel does fpu/atomic "
318 "unaligned load/store.", regs);
320 kernel_mna_trap_fault(0);
322 unsigned long addr, *reg_addr;
325 addr = compute_effective_address(regs, insn,
326 ((insn >> 25) & 0x1f));
327 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
341 reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
342 err = do_int_load(reg_addr, size,
343 (unsigned long *) addr,
344 decode_signedness(insn), asi);
345 if (likely(!err) && unlikely(asi != orig_asi)) {
346 unsigned long val_in = *reg_addr;
349 val_in = swab16(val_in);
352 val_in = swab32(val_in);
355 val_in = swab64(val_in);
367 err = do_int_store(((insn>>25)&0x1f), size,
368 (unsigned long *) addr, regs,
373 panic("Impossible kernel unaligned trap.");
377 kernel_mna_trap_fault(1);
383 int handle_popc(u32 insn, struct pt_regs *regs)
385 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
386 int ret, rd = ((insn >> 25) & 0x1f);
389 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
391 maybe_flush_windows(0, 0, rd, from_kernel);
392 value = sign_extend_imm13(insn);
394 maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
395 value = fetch_reg(insn & 0x1f, regs);
397 ret = hweight64(value);
400 regs->u_regs[rd] = ret;
402 if (test_thread_flag(TIF_32BIT)) {
403 struct reg_window32 __user *win32;
404 win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
405 put_user(ret, &win32->locals[rd - 16]);
407 struct reg_window __user *win;
408 win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
409 put_user(ret, &win->locals[rd - 16]);
416 extern void do_fpother(struct pt_regs *regs);
417 extern void do_privact(struct pt_regs *regs);
418 extern void spitfire_data_access_exception(struct pt_regs *regs,
421 extern void sun4v_data_access_exception(struct pt_regs *regs,
423 unsigned long type_ctx);
425 int handle_ldf_stq(u32 insn, struct pt_regs *regs)
427 unsigned long addr = compute_effective_address(regs, insn, 0);
428 int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
429 struct fpustate *f = FPUSTATE;
430 int asi = decode_asi(insn, regs);
431 int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
433 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
435 save_and_clear_fpu();
436 current_thread_info()->xfsr[0] &= ~0x1c000;
438 current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
442 if (insn & 0x200000) {
444 u64 first = 0, second = 0;
446 if (current_thread_info()->fpsaved[0] & flag) {
447 first = *(u64 *)&f->regs[freg];
448 second = *(u64 *)&f->regs[freg+2];
460 /* Need to convert endians */
461 u64 tmp = __swab64p(&first);
463 first = __swab64p(&second);
468 if (tlb_type == hypervisor)
469 sun4v_data_access_exception(regs, addr, 0);
471 spitfire_data_access_exception(regs, 0, addr);
474 if (put_user (first >> 32, (u32 __user *)addr) ||
475 __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
476 __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
477 __put_user ((u32)second, (u32 __user *)(addr + 12))) {
478 if (tlb_type == hypervisor)
479 sun4v_data_access_exception(regs, addr, 0);
481 spitfire_data_access_exception(regs, 0, addr);
485 /* LDF, LDDF, LDQF */
486 u32 data[4] __attribute__ ((aligned(8)));
493 } else if (asi > ASI_SNFL) {
494 if (tlb_type == hypervisor)
495 sun4v_data_access_exception(regs, addr, 0);
497 spitfire_data_access_exception(regs, 0, addr);
500 switch (insn & 0x180000) {
501 case 0x000000: size = 1; break;
502 case 0x100000: size = 4; break;
503 default: size = 2; break;
505 for (i = 0; i < size; i++)
508 err = get_user (data[0], (u32 __user *) addr);
510 for (i = 1; i < size; i++)
511 err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
513 if (err && !(asi & 0x2 /* NF */)) {
514 if (tlb_type == hypervisor)
515 sun4v_data_access_exception(regs, addr, 0);
517 spitfire_data_access_exception(regs, 0, addr);
520 if (asi & 0x8) /* Little */ {
524 case 1: data[0] = le32_to_cpup(data + 0); break;
525 default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
527 case 4: tmp = le64_to_cpup((u64 *)(data + 0));
528 *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
529 *(u64 *)(data + 2) = tmp;
533 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
534 current_thread_info()->fpsaved[0] = FPRS_FEF;
535 current_thread_info()->gsr[0] = 0;
537 if (!(current_thread_info()->fpsaved[0] & flag)) {
539 memset(f->regs, 0, 32*sizeof(u32));
541 memset(f->regs+32, 0, 32*sizeof(u32));
543 memcpy(f->regs + freg, data, size * 4);
544 current_thread_info()->fpsaved[0] |= flag;
550 void handle_ld_nf(u32 insn, struct pt_regs *regs)
552 int rd = ((insn >> 25) & 0x1f);
553 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
556 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
558 maybe_flush_windows(0, 0, rd, from_kernel);
559 reg = fetch_reg_addr(rd, regs);
560 if (from_kernel || rd < 16) {
562 if ((insn & 0x780000) == 0x180000)
564 } else if (test_thread_flag(TIF_32BIT)) {
565 put_user(0, (int __user *) reg);
566 if ((insn & 0x780000) == 0x180000)
567 put_user(0, ((int __user *) reg) + 1);
569 put_user(0, (unsigned long __user *) reg);
570 if ((insn & 0x780000) == 0x180000)
571 put_user(0, (unsigned long __user *) reg + 1);
576 void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
578 unsigned long pc = regs->tpc;
579 unsigned long tstate = regs->tstate;
584 struct fpustate *f = FPUSTATE;
586 if (tstate & TSTATE_PRIV)
587 die_if_kernel("lddfmna from kernel", regs);
588 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
589 if (test_thread_flag(TIF_32BIT))
591 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
592 int asi = decode_asi(insn, regs);
596 if ((asi > ASI_SNFL) ||
600 err = get_user(first, (u32 __user *)sfar);
602 err = get_user(second, (u32 __user *)(sfar + 4));
608 save_and_clear_fpu();
609 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
610 value = (((u64)first) << 32) | second;
611 if (asi & 0x8) /* Little */
612 value = __swab64p(&value);
613 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
614 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
615 current_thread_info()->fpsaved[0] = FPRS_FEF;
616 current_thread_info()->gsr[0] = 0;
618 if (!(current_thread_info()->fpsaved[0] & flag)) {
620 memset(f->regs, 0, 32*sizeof(u32));
622 memset(f->regs+32, 0, 32*sizeof(u32));
624 *(u64 *)(f->regs + freg) = value;
625 current_thread_info()->fpsaved[0] |= flag;
628 if (tlb_type == hypervisor)
629 sun4v_data_access_exception(regs, sfar, sfsr);
631 spitfire_data_access_exception(regs, sfsr, sfar);
637 void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
639 unsigned long pc = regs->tpc;
640 unsigned long tstate = regs->tstate;
645 struct fpustate *f = FPUSTATE;
647 if (tstate & TSTATE_PRIV)
648 die_if_kernel("stdfmna from kernel", regs);
649 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
650 if (test_thread_flag(TIF_32BIT))
652 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
653 int asi = decode_asi(insn, regs);
654 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
656 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
657 if ((asi > ASI_SNFL) ||
660 save_and_clear_fpu();
661 if (current_thread_info()->fpsaved[0] & flag)
662 value = *(u64 *)&f->regs[freg];
668 value = __swab64p(&value); break;
671 if (put_user (value >> 32, (u32 __user *) sfar) ||
672 __put_user ((u32)value, (u32 __user *)(sfar + 4)))
676 if (tlb_type == hypervisor)
677 sun4v_data_access_exception(regs, sfar, sfsr);
679 spitfire_data_access_exception(regs, sfsr, sfar);