#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include <asm/war.h>
+#include <asm/page.h>
#define PANIC_PIC(msg) \
.set push; \
.align 5
NESTED(handle_int, PT_SIZE, sp)
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /*
+ * Check to see if the interrupted code has just disabled
+ * interrupts and ignore this interrupt for now if so.
+ *
+ * local_irq_disable() disables interrupts and then calls
+ * trace_hardirqs_off() to track the state. If an interrupt is taken
+ * after interrupts are disabled but before the state is updated
+ * it will appear to restore_all that it is incorrectly returning with
+ * interrupts disabled
+ */
+ .set push
+ .set noat
+ mfc0 k0, CP0_STATUS
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+ and k0, ST0_IEP
+ bnez k0, 1f
+
+ mfc0 k0, EP0_EPC
+ .set noreorder
+ j k0
+ rfe
+#else
+ and k0, ST0_IE
+ bnez k0, 1f
+
+ eret
+#endif
+1:
+ .set pop
+#endif
SAVE_ALL
CLI
TRACE_IRQS_OFF
+ LONG_L s0, TI_REGS($28)
+ LONG_S sp, TI_REGS($28)
PTR_LA ra, ret_from_irq
- move a0, sp
j plat_irq_dispatch
END(handle_int)
* during service by SMTC kernel, we also want to
* pass the IM value to be cleared.
*/
-EXPORT(except_vec_vi_mori)
+FEXPORT(except_vec_vi_mori)
ori a0, $0, 0
#endif /* CONFIG_MIPS_MT_SMTC */
-EXPORT(except_vec_vi_lui)
+FEXPORT(except_vec_vi_lui)
lui v0, 0 /* Patched */
j except_vec_vi_handler
-EXPORT(except_vec_vi_ori)
+FEXPORT(except_vec_vi_ori)
ori v0, 0 /* Patched */
.set pop
END(except_vec_vi)
_ehb
#endif /* CONFIG_MIPS_MT_SMTC */
CLI
+#ifdef CONFIG_TRACE_IRQFLAGS
+ move s0, v0
+#ifdef CONFIG_MIPS_MT_SMTC
+ move s1, a0
+#endif
TRACE_IRQS_OFF
- move a0, sp
- jalr v0
- j ret_from_irq
+#ifdef CONFIG_MIPS_MT_SMTC
+ move a0, s1
+#endif
+ move v0, s0
+#endif
+
+ LONG_L s0, TI_REGS($28)
+ LONG_S sp, TI_REGS($28)
+ PTR_LA ra, ret_from_irq
+ jr v0
END(except_vec_vi_handler)
/*
.set at
__BUILD_\verbose \exception
move a0, sp
- jal do_\handler
- j ret_from_exception
+ PTR_LA ra, ret_from_exception
+ j do_\handler
END(handle_\exception)
.endm
BUILD_HANDLER dsp dsp sti silent /* #26 */
BUILD_HANDLER reserved reserved sti verbose /* others */
+ .align 5
+ LEAF(handle_ri_rdhwr_vivt)
+#ifdef CONFIG_MIPS_MT_SMTC
+ PANIC_PIC("handle_ri_rdhwr_vivt called")
+#else
+ .set push
+ .set noat
+ .set noreorder
+ /* check if TLB contains a entry for EPC */
+ MFC0 k1, CP0_ENTRYHI
+ andi k1, 0xff /* ASID_MASK */
+ MFC0 k0, CP0_EPC
+ PTR_SRL k0, PAGE_SHIFT + 1
+ PTR_SLL k0, PAGE_SHIFT + 1
+ or k1, k0
+ MTC0 k1, CP0_ENTRYHI
+ mtc0_tlbw_hazard
+ tlbp
+ tlb_probe_hazard
+ mfc0 k1, CP0_INDEX
+ .set pop
+ bltz k1, handle_ri /* slow path */
+ /* fall thru */
+#endif
+ END(handle_ri_rdhwr_vivt)
+
+ LEAF(handle_ri_rdhwr)
+ .set push
+ .set noat
+ .set noreorder
+ /* 0x7c03e83b: rdhwr v1,$29 */
+ MFC0 k1, CP0_EPC
+ lui k0, 0x7c03
+ lw k1, (k1)
+ ori k0, 0xe83b
+ .set reorder
+ bne k0, k1, handle_ri /* if not ours */
+ /* The insn is rdhwr. No need to check CAUSE.BD here. */
+ get_saved_sp /* k1 := current_thread_info */
+ .set noreorder
+ MFC0 k0, CP0_EPC
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+ ori k1, _THREAD_MASK
+ xori k1, _THREAD_MASK
+ LONG_L v1, TI_TP_VALUE(k1)
+ LONG_ADDIU k0, 4
+ jr k0
+ rfe
+#else
+ LONG_ADDIU k0, 4 /* stall on $k0 */
+ MTC0 k0, CP0_EPC
+ /* I hope three instructions between MTC0 and ERET are enough... */
+ ori k1, _THREAD_MASK
+ xori k1, _THREAD_MASK
+ LONG_L v1, TI_TP_VALUE(k1)
+ .set mips3
+ eret
+ .set mips0
+#endif
+ .set pop
+ END(handle_ri_rdhwr)
+
#ifdef CONFIG_64BIT
/* A temporary overflow handler used by check_daddi(). */