2 * Kernel execution entry point code.
4 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
5 * Initial PowerPC version.
6 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
9 * Low-level exception handers, MMU support, and rewrite.
10 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
11 * PowerPC 8xx modifications.
12 * Copyright (c) 1998-1999 TiVo, Inc.
13 * PowerPC 403GCX modifications.
14 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
15 * PowerPC 403GCX/405GP modifications.
16 * Copyright 2000 MontaVista Software Inc.
17 * PPC405 modifications
18 * PowerPC 403GCX/405GP modifications.
19 * Author: MontaVista Software, Inc.
20 * frank_rowand@mvista.com or source@mvista.com
21 * debbie_chu@mvista.com
22 * Copyright 2002-2004 MontaVista Software, Inc.
23 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
24 * Copyright 2004 Freescale Semiconductor, Inc
25 * PowerPC e500 modifications, Kumar Gala <galak@kernel.crashing.org>
27 * This program is free software; you can redistribute it and/or modify it
28 * under the terms of the GNU General Public License as published by the
29 * Free Software Foundation; either version 2 of the License, or (at your
30 * option) any later version.
33 #include <linux/init.h>
34 #include <linux/threads.h>
35 #include <asm/processor.h>
38 #include <asm/pgtable.h>
39 #include <asm/cputable.h>
40 #include <asm/thread_info.h>
41 #include <asm/ppc_asm.h>
42 #include <asm/asm-offsets.h>
43 #include <asm/cache.h>
44 #include <asm/ptrace.h>
45 #include "head_booke.h"
47 /* As with the other PowerPC ports, it is expected that when code
48 * execution begins here, the following registers contain valid, yet
49 * optional, information:
51 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
52 * r4 - Starting address of the init RAM disk
53 * r5 - Ending address of the init RAM disk
54 * r6 - Start of kernel command line string (e.g. "mem=128")
55 * r7 - End of kernel command line string
62 * Reserve a word at a fixed location to store the address
67 /* Translate device tree address to physical, save in r30/r31 */
72 li r25,0 /* phys kernel start (low) */
73 li r24,0 /* CPU number */
74 li r23,0 /* phys kernel start (high) */
76 #ifdef CONFIG_RELOCATABLE
77 LOAD_REG_ADDR_PIC(r3, _stext) /* Get our current runtime base */
79 /* Translate _stext address to physical, save in r23/r25 */
86 addis r3,r8,(is_second_reloc - 0b)@ha
87 lwz r19,(is_second_reloc - 0b)@l(r3)
89 /* Check if this is the second relocation. */
94 * For the second relocation, we already get the real memstart_addr
95 * from device tree. So we will map PAGE_OFFSET to memstart_addr,
96 * then the virtual address of start kernel should be:
97 * PAGE_OFFSET + (kernstart_addr - memstart_addr)
98 * Since the offset between kernstart_addr and memstart_addr should
99 * never be beyond 1G, so we can just use the lower 32bit of them
100 * for the calculation.
104 addis r4,r8,(kernstart_addr - 0b)@ha
105 addi r4,r4,(kernstart_addr - 0b)@l
108 addis r6,r8,(memstart_addr - 0b)@ha
109 addi r6,r6,(memstart_addr - 0b)@l
118 * We have the runtime (virutal) address of our base.
119 * We calculate our shift of offset from a 64M page.
120 * We could map the 64M page we belong to at PAGE_OFFSET and
121 * get going from there.
124 ori r4,r4,KERNELBASE@l
125 rlwinm r6,r25,0,0x3ffffff /* r6 = PHYS_START % 64M */
126 rlwinm r5,r4,0,0x3ffffff /* r5 = KERNELBASE % 64M */
127 subf r3,r5,r6 /* r3 = r6 - r5 */
128 add r3,r4,r3 /* Required Virtual Address */
133 * For the second relocation, we already set the right tlb entries
134 * for the kernel space, so skip the code in fsl_booke_entry_mapping.S
140 /* We try to not make any assumptions about how the boot loader
141 * setup or used the TLBs. We invalidate all mappings from the
142 * boot loader and load a single entry in TLB1[0] to map the
143 * first 64M of kernel memory. Any boot info passed from the
144 * bootloader needs to live in this first 64M.
146 * Requirement on bootloader:
147 * - The page we're executing in needs to reside in TLB1 and
148 * have IPROT=1. If not an invalidate broadcast could
149 * evict the entry we're currently executing in.
151 * r3 = Index of TLB1 were executing in
152 * r4 = Current MSR[IS]
153 * r5 = Index of TLB1 temp mapping
155 * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0]
159 _ENTRY(__early_start)
161 #define ENTRY_MAPPING_BOOT_SETUP
162 #include "fsl_booke_entry_mapping.S"
163 #undef ENTRY_MAPPING_BOOT_SETUP
166 /* Establish the interrupt vector offsets */
167 SET_IVOR(0, CriticalInput);
168 SET_IVOR(1, MachineCheck);
169 SET_IVOR(2, DataStorage);
170 SET_IVOR(3, InstructionStorage);
171 SET_IVOR(4, ExternalInput);
172 SET_IVOR(5, Alignment);
173 SET_IVOR(6, Program);
174 SET_IVOR(7, FloatingPointUnavailable);
175 SET_IVOR(8, SystemCall);
176 SET_IVOR(9, AuxillaryProcessorUnavailable);
177 SET_IVOR(10, Decrementer);
178 SET_IVOR(11, FixedIntervalTimer);
179 SET_IVOR(12, WatchdogTimer);
180 SET_IVOR(13, DataTLBError);
181 SET_IVOR(14, InstructionTLBError);
182 SET_IVOR(15, DebugCrit);
184 /* Establish the interrupt vector base */
185 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
188 /* Setup the defaults for TLB entries */
189 li r2,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l
191 oris r2,r2,MAS4_TLBSELD(1)@h
198 oris r2,r2,HID0_DOZE@h
202 #if !defined(CONFIG_BDI_SWITCH)
204 * The Abatron BDI JTAG debugger does not tolerate others
205 * mucking with the debug registers.
210 /* clear any residual debug events */
216 /* Check to see if we're the second processor, and jump
217 * to the secondary_start code if so
219 lis r24, boot_cpuid@h
220 ori r24, r24, boot_cpuid@l
224 bne __secondary_start
228 * This is where the main kernel code starts.
233 ori r2,r2,init_task@l
235 /* ptr to current thread */
236 addi r4,r2,THREAD /* init task's THREAD */
237 mtspr SPRN_SPRG_THREAD,r4
240 lis r1,init_thread_union@h
241 ori r1,r1,init_thread_union@l
243 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
245 CURRENT_THREAD_INFO(r22, r1)
250 #ifdef CONFIG_RELOCATABLE
253 #ifdef CONFIG_PHYS_64BIT
262 #ifdef CONFIG_DYNAMIC_MEMSTART
263 lis r3,kernstart_addr@ha
264 la r3,kernstart_addr@l(r3)
265 #ifdef CONFIG_PHYS_64BIT
274 * Decide what sort of machine this is and initialize the MMU.
281 /* Setup PTE pointers for the Abatron bdiGDB */
282 lis r6, swapper_pg_dir@h
283 ori r6, r6, swapper_pg_dir@l
284 lis r5, abatron_pteptrs@h
285 ori r5, r5, abatron_pteptrs@l
287 ori r4, r4, KERNELBASE@l
288 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
292 lis r4,start_kernel@h
293 ori r4,r4,start_kernel@l
295 ori r3,r3,MSR_KERNEL@l
298 rfi /* change context and jump to start_kernel */
300 /* Macros to hide the PTE size differences
302 * FIND_PTE -- walks the page tables given EA & pgdir pointer
304 * r11 -- PGDIR pointer
306 * label 2: is the bailout case
308 * if we find the pte (fall through):
309 * r11 is low pte word
310 * r12 is pointer to the pte
311 * r10 is the pshift from the PGD, if we're a hugepage
313 #ifdef CONFIG_PTE_64BIT
314 #ifdef CONFIG_HUGETLB_PAGE
316 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
317 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
318 rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \
319 blt 1000f; /* Normal non-huge page */ \
320 beq 2f; /* Bail if no table */ \
321 oris r11, r11, PD_HUGE@h; /* Put back address bit */ \
322 andi. r10, r11, HUGEPD_SHIFT_MASK@l; /* extract size field */ \
323 xor r12, r10, r11; /* drop size bits from pointer */ \
325 1000: rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \
326 li r10, 0; /* clear r10 */ \
327 1001: lwz r11, 4(r12); /* Get pte entry */
330 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
331 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
332 rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \
333 beq 2f; /* Bail if no table */ \
334 rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \
335 lwz r11, 4(r12); /* Get pte entry */
336 #endif /* HUGEPAGE */
337 #else /* !PTE_64BIT */
339 rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \
340 lwz r11, 0(r11); /* Get L1 entry */ \
341 rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \
342 beq 2f; /* Bail if no table */ \
343 rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \
344 lwz r11, 0(r12); /* Get Linux PTE */
348 * Interrupt vector entry code
350 * The Book E MMUs are always on so we don't need to handle
351 * interrupts in real mode as with previous PPC processors. In
352 * this case we handle interrupts in the kernel virtual address
355 * Interrupt vectors are dynamically placed relative to the
356 * interrupt prefix as determined by the address of interrupt_base.
357 * The interrupt vectors offsets are programmed using the labels
358 * for each interrupt vector entry.
360 * Interrupt vectors must be aligned on a 16 byte boundary.
361 * We align on a 32 byte cache line boundary for good measure.
365 /* Critical Input Interrupt */
366 CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
368 /* Machine Check Interrupt */
370 /* no RFMCI, MCSRRs on E200 */
371 CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
372 machine_check_exception)
374 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
377 /* Data Storage Interrupt */
378 START_EXCEPTION(DataStorage)
379 NORMAL_EXCEPTION_PROLOG(DATA_STORAGE)
380 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
382 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
383 andis. r10,r5,(ESR_ILK|ESR_DLK)@h
385 EXC_XFER_LITE(0x0300, handle_page_fault)
387 addi r3,r1,STACK_FRAME_OVERHEAD
388 EXC_XFER_EE_LITE(0x0300, CacheLockingException)
390 /* Instruction Storage Interrupt */
391 INSTRUCTION_STORAGE_EXCEPTION
393 /* External Input Interrupt */
394 EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ, EXC_XFER_LITE)
396 /* Alignment Interrupt */
399 /* Program Interrupt */
402 /* Floating Point Unavailable Interrupt */
403 #ifdef CONFIG_PPC_FPU
404 FP_UNAVAILABLE_EXCEPTION
407 /* E200 treats 'normal' floating point instructions as FP Unavail exception */
408 EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
409 program_check_exception, EXC_XFER_EE)
411 EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
412 unknown_exception, EXC_XFER_EE)
416 /* System Call Interrupt */
417 START_EXCEPTION(SystemCall)
418 NORMAL_EXCEPTION_PROLOG(SYSCALL)
419 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
421 /* Auxiliary Processor Unavailable Interrupt */
422 EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \
423 unknown_exception, EXC_XFER_EE)
425 /* Decrementer Interrupt */
426 DECREMENTER_EXCEPTION
428 /* Fixed Internal Timer Interrupt */
429 /* TODO: Add FIT support */
430 EXCEPTION(0x3100, FIT, FixedIntervalTimer, \
431 unknown_exception, EXC_XFER_EE)
433 /* Watchdog Timer Interrupt */
434 #ifdef CONFIG_BOOKE_WDT
435 CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, WatchdogException)
437 CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, unknown_exception)
440 /* Data TLB Error Interrupt */
441 START_EXCEPTION(DataTLBError)
442 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
443 mfspr r10, SPRN_SPRG_THREAD
444 stw r11, THREAD_NORMSAVE(0)(r10)
445 #ifdef CONFIG_KVM_BOOKE_HV
448 END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
450 stw r12, THREAD_NORMSAVE(1)(r10)
451 stw r13, THREAD_NORMSAVE(2)(r10)
453 stw r13, THREAD_NORMSAVE(3)(r10)
454 DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1
455 mfspr r10, SPRN_DEAR /* Get faulting address */
457 /* If we are faulting a kernel address, we have to use the
458 * kernel page tables.
460 lis r11, PAGE_OFFSET@h
463 lis r11, swapper_pg_dir@h
464 ori r11, r11, swapper_pg_dir@l
466 mfspr r12,SPRN_MAS1 /* Set TID to 0 */
467 rlwinm r12,r12,0,16,1
472 /* Get the PGD for the current thread */
474 mfspr r11,SPRN_SPRG_THREAD
478 /* Mask of required permission bits. Note that while we
479 * do copy ESR:ST to _PAGE_RW position as trying to write
480 * to an RO page is pretty common, we don't do it with
481 * _PAGE_DIRTY. We could do it, but it's a fairly rare
482 * event so I'd rather take the overhead when it happens
483 * rather than adding an instruction here. We should measure
484 * whether the whole thing is worth it in the first place
485 * as we could avoid loading SPRN_ESR completely in the first
488 * TODO: Is it worth doing that mfspr & rlwimi in the first
489 * place or can we save a couple of instructions here ?
492 #ifdef CONFIG_PTE_64BIT
494 oris r13,r13,_PAGE_ACCESSED@h
496 li r13,_PAGE_PRESENT|_PAGE_ACCESSED
498 rlwimi r13,r12,11,29,29
501 andc. r13,r13,r11 /* Check permission */
503 #ifdef CONFIG_PTE_64BIT
505 subf r13,r11,r12 /* create false data dep */
506 lwzx r13,r11,r13 /* Get upper pte bits */
508 lwz r13,0(r12) /* Get upper pte bits */
512 bne 2f /* Bail if permission/valid mismach */
514 /* Jump to common tlb load */
517 /* The bailout. Restore registers to pre-exception conditions
518 * and call the heavyweights to help us out.
520 mfspr r10, SPRN_SPRG_THREAD
521 lwz r11, THREAD_NORMSAVE(3)(r10)
523 lwz r13, THREAD_NORMSAVE(2)(r10)
524 lwz r12, THREAD_NORMSAVE(1)(r10)
525 lwz r11, THREAD_NORMSAVE(0)(r10)
526 mfspr r10, SPRN_SPRG_RSCRATCH0
529 /* Instruction TLB Error Interrupt */
531 * Nearly the same as above, except we get our
532 * information from different registers and bailout
533 * to a different point.
535 START_EXCEPTION(InstructionTLBError)
536 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
537 mfspr r10, SPRN_SPRG_THREAD
538 stw r11, THREAD_NORMSAVE(0)(r10)
539 #ifdef CONFIG_KVM_BOOKE_HV
542 END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
544 stw r12, THREAD_NORMSAVE(1)(r10)
545 stw r13, THREAD_NORMSAVE(2)(r10)
547 stw r13, THREAD_NORMSAVE(3)(r10)
548 DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1
549 mfspr r10, SPRN_SRR0 /* Get faulting address */
551 /* If we are faulting a kernel address, we have to use the
552 * kernel page tables.
554 lis r11, PAGE_OFFSET@h
557 lis r11, swapper_pg_dir@h
558 ori r11, r11, swapper_pg_dir@l
560 mfspr r12,SPRN_MAS1 /* Set TID to 0 */
561 rlwinm r12,r12,0,16,1
564 /* Make up the required permissions for kernel code */
565 #ifdef CONFIG_PTE_64BIT
566 li r13,_PAGE_PRESENT | _PAGE_BAP_SX
567 oris r13,r13,_PAGE_ACCESSED@h
569 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
573 /* Get the PGD for the current thread */
575 mfspr r11,SPRN_SPRG_THREAD
578 /* Make up the required permissions for user code */
579 #ifdef CONFIG_PTE_64BIT
580 li r13,_PAGE_PRESENT | _PAGE_BAP_UX
581 oris r13,r13,_PAGE_ACCESSED@h
583 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
588 andc. r13,r13,r11 /* Check permission */
590 #ifdef CONFIG_PTE_64BIT
592 subf r13,r11,r12 /* create false data dep */
593 lwzx r13,r11,r13 /* Get upper pte bits */
595 lwz r13,0(r12) /* Get upper pte bits */
599 bne 2f /* Bail if permission mismach */
601 /* Jump to common TLB load point */
605 /* The bailout. Restore registers to pre-exception conditions
606 * and call the heavyweights to help us out.
608 mfspr r10, SPRN_SPRG_THREAD
609 lwz r11, THREAD_NORMSAVE(3)(r10)
611 lwz r13, THREAD_NORMSAVE(2)(r10)
612 lwz r12, THREAD_NORMSAVE(1)(r10)
613 lwz r11, THREAD_NORMSAVE(0)(r10)
614 mfspr r10, SPRN_SPRG_RSCRATCH0
618 /* SPE Unavailable */
619 START_EXCEPTION(SPEUnavailable)
620 NORMAL_EXCEPTION_PROLOG(SPE_ALTIVEC_UNAVAIL)
623 b fast_exception_return
624 1: addi r3,r1,STACK_FRAME_OVERHEAD
625 EXC_XFER_EE_LITE(0x2010, KernelSPE)
627 EXCEPTION(0x2020, SPE_ALTIVEC_UNAVAIL, SPEUnavailable, \
628 unknown_exception, EXC_XFER_EE)
629 #endif /* CONFIG_SPE */
631 /* SPE Floating Point Data */
633 EXCEPTION(0x2030, SPE_FP_DATA_ALTIVEC_ASSIST, SPEFloatingPointData,
634 SPEFloatingPointException, EXC_XFER_EE)
636 /* SPE Floating Point Round */
637 EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
638 SPEFloatingPointRoundException, EXC_XFER_EE)
640 EXCEPTION(0x2040, SPE_FP_DATA_ALTIVEC_ASSIST, SPEFloatingPointData,
641 unknown_exception, EXC_XFER_EE)
642 EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
643 unknown_exception, EXC_XFER_EE)
644 #endif /* CONFIG_SPE */
646 /* Performance Monitor */
647 EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \
648 performance_monitor_exception, EXC_XFER_STD)
650 EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception, EXC_XFER_STD)
652 CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \
653 CriticalDoorbell, unknown_exception)
655 /* Debug Interrupt */
656 DEBUG_DEBUG_EXCEPTION
659 GUEST_DOORBELL_EXCEPTION
661 CRITICAL_EXCEPTION(0, GUEST_DBELL_CRIT, CriticalGuestDoorbell, \
665 EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception, EXC_XFER_EE)
667 /* Embedded Hypervisor Privilege */
668 EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception, EXC_XFER_EE)
677 * Both the instruction and data TLB miss get to this
678 * point to load the TLB.
679 * r10 - tsize encoding (if HUGETLB_PAGE) or available to use
680 * r11 - TLB (info from Linux PTE)
681 * r12 - available to use
682 * r13 - upper bits of PTE (if PTE_64BIT) or available to use
683 * CR5 - results of addr >= PAGE_OFFSET
684 * MAS0, MAS1 - loaded with proper value when we get here
685 * MAS2, MAS3 - will need additional info from Linux PTE
686 * Upon exit, we reload everything and RFI.
689 #ifdef CONFIG_HUGETLB_PAGE
690 cmpwi 6, r10, 0 /* check for huge page */
691 beq 6, finish_tlb_load_cont /* !huge */
693 /* Alas, we need more scratch registers for hugepages */
694 mfspr r12, SPRN_SPRG_THREAD
695 stw r14, THREAD_NORMSAVE(4)(r12)
696 stw r15, THREAD_NORMSAVE(5)(r12)
697 stw r16, THREAD_NORMSAVE(6)(r12)
698 stw r17, THREAD_NORMSAVE(7)(r12)
700 /* Get the next_tlbcam_idx percpu var */
702 lwz r12, THREAD_INFO-THREAD(r12)
704 lis r14, __per_cpu_offset@h
705 ori r14, r14, __per_cpu_offset@l
706 rlwinm r15, r15, 2, 0, 29
711 lis r17, next_tlbcam_idx@h
712 ori r17, r17, next_tlbcam_idx@l
713 add r17, r17, r16 /* r17 = *next_tlbcam_idx */
714 lwz r15, 0(r17) /* r15 = next_tlbcam_idx */
716 lis r14, MAS0_TLBSEL(1)@h /* select TLB1 (TLBCAM) */
717 rlwimi r14, r15, 16, 4, 15 /* next_tlbcam_idx entry */
720 /* Extract TLB1CFG(NENTRY) */
721 mfspr r16, SPRN_TLB1CFG
722 andi. r16, r16, 0xfff
724 /* Update next_tlbcam_idx, wrapping when necessary */
728 lis r14, tlbcam_index@h
729 ori r14, r14, tlbcam_index@l
734 * Calc MAS1_TSIZE from r10 (which has pshift encoded)
735 * tlb_enc = (pshift - 10).
739 rlwimi r16, r15, 7, 20, 24
742 /* copy the pshift for use later */
747 #endif /* CONFIG_HUGETLB_PAGE */
750 * We set execute, because we don't have the granularity to
751 * properly set this at the page level (Linux problem).
752 * Many of these bits are software only. Bits we don't set
753 * here we (properly should) assume have the appropriate value.
755 finish_tlb_load_cont:
756 #ifdef CONFIG_PTE_64BIT
757 rlwinm r12, r11, 32-2, 26, 31 /* Move in perm bits */
758 andi. r10, r11, _PAGE_DIRTY
760 li r10, MAS3_SW | MAS3_UW
762 1: rlwimi r12, r13, 20, 0, 11 /* grab RPN[32:43] */
763 rlwimi r12, r11, 20, 12, 19 /* grab RPN[44:51] */
764 2: mtspr SPRN_MAS3, r12
765 BEGIN_MMU_FTR_SECTION
766 srwi r10, r13, 12 /* grab RPN[12:31] */
768 END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
770 li r10, (_PAGE_EXEC | _PAGE_PRESENT)
772 rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */
774 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
778 rlwimi r13, r12, 0, 20, 31 /* Get RPN from PTE, merge w/ perms */
783 #ifdef CONFIG_PTE_64BIT
784 rlwimi r12, r11, 32-19, 27, 31 /* extract WIMGE from pte */
786 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
788 #ifdef CONFIG_HUGETLB_PAGE
789 beq 6, 3f /* don't mask if page isn't huge */
793 rlwinm r13, r13, 0, 0, 19 /* bottom bits used for WIMGE/etc */
794 andc r12, r12, r13 /* mask off ea bits within the page */
796 3: mtspr SPRN_MAS2, r12
799 /* Round robin TLB1 entries assignment */
802 /* Extract TLB1CFG(NENTRY) */
803 mfspr r11, SPRN_TLB1CFG
804 andi. r11, r11, 0xfff
806 /* Extract MAS0(NV) */
807 andi. r13, r12, 0xfff
812 /* check if we need to wrap */
815 /* wrap back to first free tlbcam entry */
816 lis r13, tlbcam_index@ha
817 lwz r13, tlbcam_index@l(r13)
818 rlwimi r12, r13, 0, 20, 31
821 #endif /* CONFIG_E200 */
826 /* Done...restore registers and get out of here. */
827 mfspr r10, SPRN_SPRG_THREAD
828 #ifdef CONFIG_HUGETLB_PAGE
829 beq 6, 8f /* skip restore for 4k page faults */
830 lwz r14, THREAD_NORMSAVE(4)(r10)
831 lwz r15, THREAD_NORMSAVE(5)(r10)
832 lwz r16, THREAD_NORMSAVE(6)(r10)
833 lwz r17, THREAD_NORMSAVE(7)(r10)
835 8: lwz r11, THREAD_NORMSAVE(3)(r10)
837 lwz r13, THREAD_NORMSAVE(2)(r10)
838 lwz r12, THREAD_NORMSAVE(1)(r10)
839 lwz r11, THREAD_NORMSAVE(0)(r10)
840 mfspr r10, SPRN_SPRG_RSCRATCH0
841 rfi /* Force context change */
844 /* Note that the SPE support is closely modeled after the AltiVec
845 * support. Changes to one are likely to be applicable to the
849 * Disable SPE for the task which had SPE previously,
850 * and save its SPE registers in its thread_struct.
851 * Enables SPE for use in the kernel on return.
852 * On SMP we know the SPE units are free, since we give it up every
857 mtmsr r5 /* enable use of SPE now */
860 * For SMP, we don't do lazy SPE switching because it just gets too
861 * horrendously complex, especially when a task switches from one CPU
862 * to another. Instead we call giveup_spe in switch_to.
865 lis r3,last_task_used_spe@ha
866 lwz r4,last_task_used_spe@l(r3)
869 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
870 SAVE_32EVRS(0,r10,r4,THREAD_EVR0)
871 evxor evr10, evr10, evr10 /* clear out evr10 */
872 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
874 evstddx evr10, r4, r5 /* save off accumulator */
876 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
878 andc r4,r4,r10 /* disable SPE for previous task */
879 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
881 #endif /* !CONFIG_SMP */
882 /* enable use of SPE after return */
884 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
887 stw r4,THREAD_USED_SPE(r5)
890 REST_32EVRS(0,r10,r5,THREAD_EVR0)
893 stw r4,last_task_used_spe@l(r3)
894 #endif /* !CONFIG_SMP */
898 * SPE unavailable trap from kernel - print a message, but let
899 * the task use SPE in the kernel until it returns to user mode.
904 stw r3,_MSR(r1) /* enable use of SPE after return */
908 mr r4,r2 /* current */
914 87: .string "SPE used in kernel (task=%p, pc=%x) \n"
918 #endif /* CONFIG_SPE */
921 * Translate the effec addr in r3 to phys addr. The phys addr will be put
922 * into r3(higher 32bit) and r4(lower 32bit)
927 rlwinm r9,r9,16,0x3fff0000 /* turn PID into MAS6[SPID] */
928 rlwimi r9,r8,28,0x00000001 /* turn MSR[DS] into MAS6[SAS] */
931 tlbsx 0,r3 /* must succeed */
935 rlwinm r9,r8,25,0x1f /* r9 = log2(page size) */
937 slw r10,r10,r9 /* r10 = page size */
939 and r11,r3,r10 /* r11 = page offset */
940 andc r4,r12,r10 /* r4 = page base */
941 or r4,r4,r11 /* r4 = devtree phys addr */
942 #ifdef CONFIG_PHYS_64BIT
951 /* Adjust or setup IVORs for e200 */
952 _GLOBAL(__setup_e200_ivors)
955 li r3,SPEUnavailable@l
957 li r3,SPEFloatingPointData@l
959 li r3,SPEFloatingPointRound@l
964 /* Adjust or setup IVORs for e500v1/v2 */
965 _GLOBAL(__setup_e500_ivors)
968 li r3,SPEUnavailable@l
970 li r3,SPEFloatingPointData@l
972 li r3,SPEFloatingPointRound@l
974 li r3,PerformanceMonitor@l
979 /* Adjust or setup IVORs for e500mc */
980 _GLOBAL(__setup_e500mc_ivors)
983 li r3,PerformanceMonitor@l
987 li r3,CriticalDoorbell@l
992 /* setup ehv ivors for */
993 _GLOBAL(__setup_ehv_ivors)
994 li r3,GuestDoorbell@l
996 li r3,CriticalGuestDoorbell@l
1001 mtspr SPRN_IVOR41,r3
1007 * extern void giveup_spe(struct task_struct *prev)
1012 oris r5,r5,MSR_SPE@h
1013 mtmsr r5 /* enable use of SPE now */
1016 beqlr- /* if no previous owner, done */
1017 addi r3,r3,THREAD /* want THREAD of task */
1020 SAVE_32EVRS(0, r4, r3, THREAD_EVR0)
1021 evxor evr6, evr6, evr6 /* clear out evr6 */
1022 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
1024 evstddx evr6, r4, r3 /* save off accumulator */
1026 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1028 andc r4,r4,r3 /* disable SPE for previous task */
1029 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1033 lis r4,last_task_used_spe@ha
1034 stw r5,last_task_used_spe@l(r4)
1035 #endif /* !CONFIG_SMP */
1037 #endif /* CONFIG_SPE */
1040 * extern void abort(void)
1042 * At present, this routine just applies a system reset.
1046 mtspr SPRN_DBCR0,r13 /* disable all debug events */
1049 ori r13,r13,MSR_DE@l /* Enable Debug Events */
1052 mfspr r13,SPRN_DBCR0
1053 lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
1054 mtspr SPRN_DBCR0,r13
1057 _GLOBAL(set_context)
1059 #ifdef CONFIG_BDI_SWITCH
1060 /* Context switch the PTE pointer for the Abatron BDI2000.
1061 * The PGDIR is the second parameter.
1063 lis r5, abatron_pteptrs@h
1064 ori r5, r5, abatron_pteptrs@l
1068 isync /* Force context change */
1071 _GLOBAL(flush_dcache_L1)
1072 mfspr r3,SPRN_L1CFG0
1074 rlwinm r5,r3,9,3 /* Extract cache block size */
1075 twlgti r5,1 /* Only 32 and 64 byte cache blocks
1076 * are currently defined.
1079 subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
1080 * log2(number of ways)
1082 slw r5,r4,r5 /* r5 = cache block size */
1084 rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
1085 mulli r7,r7,13 /* An 8-way cache will require 13
1090 /* save off HID0 and set DCFA */
1092 ori r9,r8,HID0_DCFA@l
1099 1: lwz r3,0(r4) /* Load... */
1107 1: dcbf 0,r4 /* ...and flush. */
1117 /* Flush L1 d-cache, invalidate and disable d-cache and i-cache */
1118 _GLOBAL(__flush_disable_L1)
1120 bl flush_dcache_L1 /* Flush L1 d-cache */
1123 mfspr r4, SPRN_L1CSR0 /* Invalidate and disable d-cache */
1129 mtspr SPRN_L1CSR0, r4
1132 1: mfspr r4, SPRN_L1CSR0 /* Wait for the invalidate to finish */
1136 mfspr r4, SPRN_L1CSR1 /* Invalidate and disable i-cache */
1140 mtspr SPRN_L1CSR1, r4
1146 /* When we get here, r24 needs to hold the CPU # */
1147 .globl __secondary_start
1149 lis r3,__secondary_hold_acknowledge@h
1150 ori r3,r3,__secondary_hold_acknowledge@l
1154 mr r4,r24 /* Why? */
1157 lis r3,tlbcam_index@ha
1158 lwz r3,tlbcam_index@l(r3)
1160 li r26,0 /* r26 safe? */
1162 /* Load each CAM entry */
1168 /* get current_thread_info and current */
1169 lis r1,secondary_ti@ha
1170 lwz r1,secondary_ti@l(r1)
1174 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1178 /* ptr to current thread */
1179 addi r4,r2,THREAD /* address of our thread_struct */
1180 mtspr SPRN_SPRG_THREAD,r4
1182 /* Setup the defaults for TLB entries */
1183 li r4,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l
1186 /* Jump to start_secondary */
1188 ori r4,r4,MSR_KERNEL@l
1189 lis r3,start_secondary@h
1190 ori r3,r3,start_secondary@l
1197 .globl __secondary_hold_acknowledge
1198 __secondary_hold_acknowledge:
1203 * Create a tlb entry with the same effective and physical address as
1204 * the tlb entry used by the current running code. But set the TS to 1.
1205 * Then switch to the address space 1. It will return with the r3 set to
1206 * the ESEL of the new created tlb.
1208 _GLOBAL(switch_to_as1)
1211 /* Find a entry not used */
1212 mfspr r3,SPRN_TLB1CFG
1215 rlwinm r4,r4,16,0x3fff0000 /* turn PID into MAS6[SPID] */
1217 1: lis r4,0x1000 /* Set MAS0(TLBSEL) = 1 */
1219 rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
1223 andis. r4,r4,MAS1_VALID@h
1226 /* Get the tlb entry used by the current running code */
1232 ori r4,r4,MAS1_TS /* Set the TS = 1 */
1236 rlwinm r4,r4,0,~MAS0_ESEL_MASK
1237 rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
1244 ori r4,r4,MSR_IS | MSR_DS
1251 * Restore to the address space 0 and also invalidate the tlb entry created
1253 * r3 - the tlb entry which should be invalidated
1254 * r4 - __pa(PAGE_OFFSET in AS1) - __pa(PAGE_OFFSET in AS0)
1255 * r5 - device tree virtual address. If r4 is 0, r5 is ignored.
1257 _GLOBAL(restore_to_as0)
1265 * We may map the PAGE_OFFSET in AS0 to a different physical address,
1266 * so we need calculate the right jump and device tree address based
1267 * on the offset passed by r4.
1273 li r8,(MSR_IS | MSR_DS)
1281 /* Invalidate the temporary tlb entry for AS1 */
1282 1: lis r9,0x1000 /* Set MAS0(TLBSEL) = 1 */
1283 rlwimi r9,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
1287 rlwinm r9,r9,0,2,31 /* Clear MAS1 Valid and IPPROT */
1298 * The PAGE_OFFSET will map to a different physical address,
1299 * jump to _start to do another relocation again.
1305 * We put a few things here that have to be page-aligned. This stuff
1306 * goes at the beginning of the data segment, which is page-aligned.
1312 .globl empty_zero_page
1315 .globl swapper_pg_dir
1317 .space PGD_TABLE_SIZE
1320 * Room for two PTE pointers, usually the kernel and current user pointers
1321 * to their respective root page table.