2 * linux/arch/arm/boot/compressed/head.S
4 * Copyright (C) 1996-2002 Russell King
5 * Copyright (C) 2004 Hyok S. Choi (MPU support)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/linkage.h>
16 * Note that these macros must not contain any code which is not
17 * 100% relocatable. Any attempt to do so will result in a crash.
18 * Please select one of the following when turning on debugging.
22 #if defined(CONFIG_DEBUG_ICEDCC)
24 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
25 .macro loadsp, rb, tmp
28 mcr p14, 0, \ch, c0, c5, 0
30 #elif defined(CONFIG_CPU_XSCALE)
31 .macro loadsp, rb, tmp
34 mcr p14, 0, \ch, c8, c0, 0
37 .macro loadsp, rb, tmp
40 mcr p14, 0, \ch, c1, c0, 0
46 #include <mach/debug-macro.S>
52 #if defined(CONFIG_ARCH_SA1100)
53 .macro loadsp, rb, tmp
54 mov \rb, #0x80000000 @ physical base address
55 #ifdef CONFIG_DEBUG_LL_SER3
56 add \rb, \rb, #0x00050000 @ Ser3
58 add \rb, \rb, #0x00010000 @ Ser1
61 #elif defined(CONFIG_ARCH_S3C2410)
62 .macro loadsp, rb, tmp
64 add \rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT
67 .macro loadsp, rb, tmp
85 .macro debug_reloc_start
88 kphex r6, 8 /* processor id */
90 kphex r7, 8 /* architecture id */
91 #ifdef CONFIG_CPU_CP15
93 mrc p15, 0, r0, c1, c0
94 kphex r0, 8 /* control reg */
97 kphex r5, 8 /* decompressed kernel start */
99 kphex r9, 8 /* decompressed kernel end */
101 kphex r4, 8 /* kernel execution address */
106 .macro debug_reloc_end
108 kphex r5, 8 /* end of kernel */
111 bl memdump /* dump 256 bytes at start of kernel */
115 .section ".start", #alloc, #execinstr
117 * sort out different calling conventions
120 .arm @ Always enter in ARM state
122 .type start,#function
128 THUMB( adr r12, BSYM(1f) )
131 .word 0x016f2818 @ Magic numbers to help the loader
132 .word start @ absolute load/run zImage address
133 .word _edata @ zImage end address
135 1: mov r7, r1 @ save architecture ID
136 mov r8, r2 @ save atags pointer
138 #ifndef __ARM_ARCH_2__
140 * Booting from Angel - need to enter SVC mode and disable
141 * FIQs/IRQs (numeric definitions from angel arm.h source).
142 * We only do this if we were in user mode on entry.
144 mrs r2, cpsr @ get current mode
145 tst r2, #3 @ not user?
147 mov r0, #0x17 @ angel_SWIreason_EnterSVC
148 ARM( swi 0x123456 ) @ angel_SWI_ARM
149 THUMB( svc 0xab ) @ angel_SWI_THUMB
151 mrs r2, cpsr @ turn off interrupts to
152 orr r2, r2, #0xc0 @ prevent angel from running
155 teqp pc, #0x0c000003 @ turn off interrupts
159 * Note that some cache flushing and other stuff may
160 * be needed here - is there an Angel SWI call for this?
164 * some architecture specific code can be inserted
165 * by the linker here, but it should preserve r7, r8, and r9.
170 #ifdef CONFIG_AUTO_ZRELADDR
171 @ determine final kernel image address
173 and r4, r4, #0xf8000000
174 add r4, r4, #TEXT_OFFSET
182 ldmia r0, {r1, r2, r3, r6, r10, r11, r12}
186 * We might be running at a different address. We need
187 * to fix up various pointers.
189 sub r0, r0, r1 @ calculate the delta offset
190 add r6, r6, r0 @ _edata
191 add r10, r10, r0 @ inflated kernel size location
194 * The kernel build system appends the size of the
195 * decompressed kernel at the end of the compressed data
196 * in little-endian form.
200 orr r9, r9, lr, lsl #8
203 orr r9, r9, lr, lsl #16
204 orr r9, r9, r10, lsl #24
206 #ifndef CONFIG_ZBOOT_ROM
207 /* malloc space is above the relocated stack (64k max) */
209 add r10, sp, #0x10000
212 * With ZBOOT_ROM the bss/stack is non relocatable,
213 * but someone could still run this code from RAM,
214 * in which case our reference is _edata.
219 mov r5, #0 @ init dtb size to 0
220 #ifdef CONFIG_ARM_APPENDED_DTB
225 * r4 = final kernel address
226 * r5 = appended dtb size (still unknown)
228 * r7 = architecture ID
229 * r8 = atags/device tree pointer
230 * r9 = size of decompressed image
231 * r10 = end of this image, including bss/stack/malloc space if non XIP
236 * if there are device trees (dtb) appended to zImage, advance r10 so that the
237 * dtb data will get relocated along with the kernel if necessary.
242 ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian
247 bne dtb_check_done @ not found
249 mov r8, r6 @ use the appended device tree
252 * Make sure that the DTB doesn't end up in the final
253 * kernel's .bss area. To do so, we adjust the decompressed
254 * kernel size to compensate if that .bss size is larger
255 * than the relocated code.
257 ldr r5, =_kernel_bss_size
258 adr r1, wont_overwrite
263 /* Get the dtb's size */
266 /* convert r5 (dtb size) to little endian */
267 eor r1, r5, r5, ror #16
268 bic r1, r1, #0x00ff0000
270 eor r5, r5, r1, lsr #8
273 /* preserve 64-bit alignment */
277 /* relocate some pointers past the appended dtb */
285 * Check to see if we will overwrite ourselves.
286 * r4 = final kernel address
287 * r9 = size of decompressed image
288 * r10 = end of this image, including bss/stack/malloc space if non XIP
290 * r4 - 16k page directory >= r10 -> OK
291 * r4 + image length <= address of wont_overwrite -> OK
297 adr r9, wont_overwrite
302 * Relocate ourselves past the end of the decompressed kernel.
304 * r10 = end of the decompressed kernel
305 * Because we always copy ahead, we need to do it from the end and go
306 * backward in case the source and destination overlap.
309 * Bump to the next 256-byte boundary with the size of
310 * the relocation code added. This avoids overwriting
311 * ourself when the offset is small.
313 add r10, r10, #((reloc_code_end - restart + 256) & ~255)
316 /* Get start of code we want to copy and align it down. */
320 sub r9, r6, r5 @ size to copy
321 add r9, r9, #31 @ rounded up to a multiple
322 bic r9, r9, #31 @ ... of 32 bytes
326 1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
328 stmdb r9!, {r0 - r3, r10 - r12, lr}
331 /* Preserve offset to relocated code. */
334 #ifndef CONFIG_ZBOOT_ROM
335 /* cache_clean_flush may use the stack, so relocate it */
341 adr r0, BSYM(restart)
347 * If delta is zero, we are running at the address we were linked at.
351 * r4 = kernel execution address
352 * r5 = appended dtb size (0 if not present)
353 * r7 = architecture ID
365 #ifndef CONFIG_ZBOOT_ROM
367 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
368 * we need to fix up pointers into the BSS region.
369 * Note that the stack pointer has already been fixed up.
375 * Relocate all entries in the GOT table.
376 * Bump bss entries to _edata + dtb size
378 1: ldr r1, [r11, #0] @ relocate entries in the GOT
379 add r1, r1, r0 @ This fixes up C references
380 cmp r1, r2 @ if entry >= bss_start &&
381 cmphs r3, r1 @ bss_end > entry
382 addhi r1, r1, r5 @ entry += dtb size
383 str r1, [r11], #4 @ next entry
387 /* bump our bss pointers too */
394 * Relocate entries in the GOT table. We only relocate
395 * the entries that are outside the (relocated) BSS region.
397 1: ldr r1, [r11, #0] @ relocate entries in the GOT
398 cmp r1, r2 @ entry < bss_start ||
399 cmphs r3, r1 @ _end < entry
400 addlo r1, r1, r0 @ table. This fixes up the
401 str r1, [r11], #4 @ C references.
406 not_relocated: mov r0, #0
407 1: str r0, [r2], #4 @ clear bss
415 * The C runtime environment should now be setup sufficiently.
416 * Set up some pointers, and start decompressing.
417 * r4 = kernel execution address
418 * r7 = architecture ID
422 mov r1, sp @ malloc space above stack
423 add r2, sp, #0x10000 @ 64k max
428 mov r0, #0 @ must be zero
429 mov r1, r7 @ restore architecture number
430 mov r2, r8 @ restore atags pointer
431 ARM( mov pc, r4 ) @ call kernel
432 THUMB( bx r4 ) @ entry point is always ARM
437 .word __bss_start @ r2
440 .word input_data_end - 4 @ r10 (inflated size location)
441 .word _got_start @ r11
443 .word .L_user_stack_end @ sp
446 #ifdef CONFIG_ARCH_RPC
448 params: ldr r0, =0x10000100 @ params_phys for RPC
455 * Turn on the cache. We need to setup some page tables so that we
456 * can have both the I and D caches on.
458 * We place the page tables 16k down from the kernel execution address,
459 * and we hope that nothing else is using it. If we're using it, we
463 * r4 = kernel execution address
464 * r7 = architecture number
467 * r0, r1, r2, r3, r9, r10, r12 corrupted
468 * This routine must preserve:
472 cache_on: mov r3, #8 @ cache_on function
476 * Initialize the highest priority protection region, PR7
477 * to cover all 32bit address and cacheable and bufferable.
479 __armv4_mpu_cache_on:
480 mov r0, #0x3f @ 4G, the whole
481 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
482 mcr p15, 0, r0, c6, c7, 1
485 mcr p15, 0, r0, c2, c0, 0 @ D-cache on
486 mcr p15, 0, r0, c2, c0, 1 @ I-cache on
487 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
490 mcr p15, 0, r0, c5, c0, 1 @ I-access permission
491 mcr p15, 0, r0, c5, c0, 0 @ D-access permission
494 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
495 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
496 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
497 mrc p15, 0, r0, c1, c0, 0 @ read control reg
498 @ ...I .... ..D. WC.M
499 orr r0, r0, #0x002d @ .... .... ..1. 11.1
500 orr r0, r0, #0x1000 @ ...1 .... .... ....
502 mcr p15, 0, r0, c1, c0, 0 @ write control reg
505 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
506 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
509 __armv3_mpu_cache_on:
510 mov r0, #0x3f @ 4G, the whole
511 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
514 mcr p15, 0, r0, c2, c0, 0 @ cache on
515 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
518 mcr p15, 0, r0, c5, c0, 0 @ access permission
521 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
523 * ?? ARMv3 MMU does not allow reading the control register,
524 * does this really work on ARMv3 MPU?
526 mrc p15, 0, r0, c1, c0, 0 @ read control reg
527 @ .... .... .... WC.M
528 orr r0, r0, #0x000d @ .... .... .... 11.1
529 /* ?? this overwrites the value constructed above? */
531 mcr p15, 0, r0, c1, c0, 0 @ write control reg
533 /* ?? invalidate for the second time? */
534 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
537 __setup_mmu: sub r3, r4, #16384 @ Page directory size
538 bic r3, r3, #0xff @ Align the pointer
541 * Initialise the page tables, turning on the cacheable and bufferable
542 * bits for the RAM area only.
546 mov r9, r9, lsl #18 @ start of RAM
547 add r10, r9, #0x10000000 @ a reasonable RAM size
551 1: cmp r1, r9 @ if virt > start of RAM
552 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
553 orrhs r1, r1, #0x08 @ set cacheable
555 orrhs r1, r1, #0x0c @ set cacheable, bufferable
557 cmp r1, r10 @ if virt > end of RAM
558 bichs r1, r1, #0x0c @ clear cacheable, bufferable
559 str r1, [r0], #4 @ 1:1 mapping
564 * If ever we are running from Flash, then we surely want the cache
565 * to be enabled also for our execution instance... We map 2MB of it
566 * so there is no map overlap problem for up to 1 MB compressed kernel.
567 * If the execution is in RAM then we would only be duplicating the above.
573 orr r1, r1, r2, lsl #20
574 add r0, r3, r2, lsl #2
581 __arm926ejs_mmu_cache_on:
582 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
583 mov r0, #4 @ put dcache in WT mode
584 mcr p15, 7, r0, c15, c0, 0
587 __armv4_mmu_cache_on:
592 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
593 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
594 mrc p15, 0, r0, c1, c0, 0 @ read control reg
595 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
597 #ifdef CONFIG_CPU_ENDIAN_BE8
598 orr r0, r0, #1 << 25 @ big-endian page tables
600 bl __common_mmu_cache_on
602 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
606 __armv7_mmu_cache_on:
609 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
613 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
615 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
617 mrc p15, 0, r0, c1, c0, 0 @ read control reg
618 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
619 orr r0, r0, #0x003c @ write buffer
621 #ifdef CONFIG_CPU_ENDIAN_BE8
622 orr r0, r0, #1 << 25 @ big-endian page tables
624 orrne r0, r0, #1 @ MMU enabled
626 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
627 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
629 mcr p15, 0, r0, c1, c0, 0 @ load control register
630 mrc p15, 0, r0, c1, c0, 0 @ and read it back
632 mcr p15, 0, r0, c7, c5, 4 @ ISB
639 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
640 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
641 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
642 mrc p15, 0, r0, c1, c0, 0 @ read control reg
643 orr r0, r0, #0x1000 @ I-cache enable
644 bl __common_mmu_cache_on
646 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
653 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
654 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
656 bl __common_mmu_cache_on
658 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
661 __common_mmu_cache_on:
662 #ifndef CONFIG_THUMB2_KERNEL
664 orr r0, r0, #0x000d @ Write buffer, mmu
667 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
668 mcr p15, 0, r1, c3, c0, 0 @ load domain access control
670 .align 5 @ cache line aligned
671 1: mcr p15, 0, r0, c1, c0, 0 @ load control register
672 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
673 sub pc, lr, r0, lsr #32 @ properly flush pipeline
676 #define PROC_ENTRY_SIZE (4*5)
679 * Here follow the relocatable cache support functions for the
680 * various processors. This is a generic hook for locating an
681 * entry and jumping to an instruction at the specified offset
682 * from the start of the block. Please note this is all position
692 call_cache_fn: adr r12, proc_types
693 #ifdef CONFIG_CPU_CP15
694 mrc p15, 0, r9, c0, c0 @ get processor ID
696 ldr r9, =CONFIG_PROCESSOR_ID
698 1: ldr r1, [r12, #0] @ get value
699 ldr r2, [r12, #4] @ get mask
700 eor r1, r1, r9 @ (real ^ match)
702 ARM( addeq pc, r12, r3 ) @ call cache function
703 THUMB( addeq r12, r3 )
704 THUMB( moveq pc, r12 ) @ call cache function
705 add r12, r12, #PROC_ENTRY_SIZE
709 * Table for cache operations. This is basically:
712 * - 'cache on' method instruction
713 * - 'cache off' method instruction
714 * - 'cache flush' method instruction
716 * We match an entry using: ((real_id ^ match) & mask) == 0
718 * Writethrough caches generally only need 'on' and 'off'
719 * methods. Writeback caches _must_ have the flush method
723 .type proc_types,#object
725 .word 0x41560600 @ ARM6/610
727 W(b) __arm6_mmu_cache_off @ works, but slow
728 W(b) __arm6_mmu_cache_off
731 @ b __arm6_mmu_cache_on @ untested
732 @ b __arm6_mmu_cache_off
733 @ b __armv3_mmu_cache_flush
735 .word 0x00000000 @ old ARM ID
744 .word 0x41007000 @ ARM7/710
746 W(b) __arm7_mmu_cache_off
747 W(b) __arm7_mmu_cache_off
751 .word 0x41807200 @ ARM720T (writethrough)
753 W(b) __armv4_mmu_cache_on
754 W(b) __armv4_mmu_cache_off
758 .word 0x41007400 @ ARM74x
760 W(b) __armv3_mpu_cache_on
761 W(b) __armv3_mpu_cache_off
762 W(b) __armv3_mpu_cache_flush
764 .word 0x41009400 @ ARM94x
766 W(b) __armv4_mpu_cache_on
767 W(b) __armv4_mpu_cache_off
768 W(b) __armv4_mpu_cache_flush
770 .word 0x41069260 @ ARM926EJ-S (v5TEJ)
772 W(b) __arm926ejs_mmu_cache_on
773 W(b) __armv4_mmu_cache_off
774 W(b) __armv5tej_mmu_cache_flush
776 .word 0x00007000 @ ARM7 IDs
785 @ Everything from here on will be the new ID system.
787 .word 0x4401a100 @ sa110 / sa1100
789 W(b) __armv4_mmu_cache_on
790 W(b) __armv4_mmu_cache_off
791 W(b) __armv4_mmu_cache_flush
793 .word 0x6901b110 @ sa1110
795 W(b) __armv4_mmu_cache_on
796 W(b) __armv4_mmu_cache_off
797 W(b) __armv4_mmu_cache_flush
800 .word 0xffffff00 @ PXA9xx
801 W(b) __armv4_mmu_cache_on
802 W(b) __armv4_mmu_cache_off
803 W(b) __armv4_mmu_cache_flush
805 .word 0x56158000 @ PXA168
807 W(b) __armv4_mmu_cache_on
808 W(b) __armv4_mmu_cache_off
809 W(b) __armv5tej_mmu_cache_flush
811 .word 0x56050000 @ Feroceon
813 W(b) __armv4_mmu_cache_on
814 W(b) __armv4_mmu_cache_off
815 W(b) __armv5tej_mmu_cache_flush
817 #ifdef CONFIG_CPU_FEROCEON_OLD_ID
818 /* this conflicts with the standard ARMv5TE entry */
819 .long 0x41009260 @ Old Feroceon
821 b __armv4_mmu_cache_on
822 b __armv4_mmu_cache_off
823 b __armv5tej_mmu_cache_flush
826 .word 0x66015261 @ FA526
828 W(b) __fa526_cache_on
829 W(b) __armv4_mmu_cache_off
830 W(b) __fa526_cache_flush
832 @ These match on the architecture ID
834 .word 0x00020000 @ ARMv4T
836 W(b) __armv4_mmu_cache_on
837 W(b) __armv4_mmu_cache_off
838 W(b) __armv4_mmu_cache_flush
840 .word 0x00050000 @ ARMv5TE
842 W(b) __armv4_mmu_cache_on
843 W(b) __armv4_mmu_cache_off
844 W(b) __armv4_mmu_cache_flush
846 .word 0x00060000 @ ARMv5TEJ
848 W(b) __armv4_mmu_cache_on
849 W(b) __armv4_mmu_cache_off
850 W(b) __armv5tej_mmu_cache_flush
852 .word 0x0007b000 @ ARMv6
854 W(b) __armv4_mmu_cache_on
855 W(b) __armv4_mmu_cache_off
856 W(b) __armv6_mmu_cache_flush
858 .word 0x000f0000 @ new CPU Id
860 W(b) __armv7_mmu_cache_on
861 W(b) __armv7_mmu_cache_off
862 W(b) __armv7_mmu_cache_flush
864 .word 0 @ unrecognised type
873 .size proc_types, . - proc_types
876 * If you get a "non-constant expression in ".if" statement"
877 * error from the assembler on this line, check that you have
878 * not accidentally written a "b" instruction where you should
881 .if (. - proc_types) % PROC_ENTRY_SIZE != 0
882 .error "The size of one or more proc_types entries is wrong."
886 * Turn off the Cache and MMU. ARMv3 does not support
887 * reading the control register, but ARMv4 does.
890 * r0, r1, r2, r3, r9, r12 corrupted
891 * This routine must preserve:
895 cache_off: mov r3, #12 @ cache_off function
898 __armv4_mpu_cache_off:
899 mrc p15, 0, r0, c1, c0
901 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
903 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
904 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
905 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
908 __armv3_mpu_cache_off:
909 mrc p15, 0, r0, c1, c0
911 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
913 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
916 __armv4_mmu_cache_off:
918 mrc p15, 0, r0, c1, c0
920 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
922 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
923 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
927 __armv7_mmu_cache_off:
928 mrc p15, 0, r0, c1, c0
934 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
936 bl __armv7_mmu_cache_flush
939 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
941 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
942 mcr p15, 0, r0, c7, c10, 4 @ DSB
943 mcr p15, 0, r0, c7, c5, 4 @ ISB
946 __arm6_mmu_cache_off:
947 mov r0, #0x00000030 @ ARM6 control reg.
948 b __armv3_mmu_cache_off
950 __arm7_mmu_cache_off:
951 mov r0, #0x00000070 @ ARM7 control reg.
952 b __armv3_mmu_cache_off
954 __armv3_mmu_cache_off:
955 mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off
957 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
958 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
962 * Clean and flush the cache to maintain consistency.
965 * r1, r2, r3, r9, r10, r11, r12 corrupted
966 * This routine must preserve:
974 __armv4_mpu_cache_flush:
977 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
978 mov r1, #7 << 5 @ 8 segments
979 1: orr r3, r1, #63 << 26 @ 64 entries
980 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
981 subs r3, r3, #1 << 26
982 bcs 2b @ entries 63 to 0
984 bcs 1b @ segments 7 to 0
987 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
988 mcr p15, 0, ip, c7, c10, 4 @ drain WB
993 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
994 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
995 mcr p15, 0, r1, c7, c10, 4 @ drain WB
998 __armv6_mmu_cache_flush:
1000 mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
1001 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
1002 mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
1003 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1006 __armv7_mmu_cache_flush:
1007 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
1008 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
1011 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
1014 mcr p15, 0, r10, c7, c10, 5 @ DMB
1015 stmfd sp!, {r0-r7, r9-r11}
1016 mrc p15, 1, r0, c0, c0, 1 @ read clidr
1017 ands r3, r0, #0x7000000 @ extract loc from clidr
1018 mov r3, r3, lsr #23 @ left align loc bit field
1019 beq finished @ if loc is 0, then no need to clean
1020 mov r10, #0 @ start clean at cache level 0
1022 add r2, r10, r10, lsr #1 @ work out 3x current cache level
1023 mov r1, r0, lsr r2 @ extract cache type bits from clidr
1024 and r1, r1, #7 @ mask of the bits for current cache only
1025 cmp r1, #2 @ see what cache we have at this level
1026 blt skip @ skip if no cache, or just i-cache
1027 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1028 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
1029 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
1030 and r2, r1, #7 @ extract the length of the cache lines
1031 add r2, r2, #4 @ add 4 (line length offset)
1033 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
1034 clz r5, r4 @ find bit position of way size increment
1036 ands r7, r7, r1, lsr #13 @ extract max number of the index size
1038 mov r9, r4 @ create working copy of max way size
1040 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
1041 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
1042 THUMB( lsl r6, r9, r5 )
1043 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
1044 THUMB( lsl r6, r7, r2 )
1045 THUMB( orr r11, r11, r6 ) @ factor index number into r11
1046 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
1047 subs r9, r9, #1 @ decrement the way
1049 subs r7, r7, #1 @ decrement the index
1052 add r10, r10, #2 @ increment cache number
1056 ldmfd sp!, {r0-r7, r9-r11}
1057 mov r10, #0 @ swith back to cache level 0
1058 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1060 mcr p15, 0, r10, c7, c10, 4 @ DSB
1061 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
1062 mcr p15, 0, r10, c7, c10, 4 @ DSB
1063 mcr p15, 0, r10, c7, c5, 4 @ ISB
1066 __armv5tej_mmu_cache_flush:
1067 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
1069 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
1070 mcr p15, 0, r0, c7, c10, 4 @ drain WB
1073 __armv4_mmu_cache_flush:
1074 mov r2, #64*1024 @ default: 32K dcache size (*2)
1075 mov r11, #32 @ default: 32 byte line size
1076 mrc p15, 0, r3, c0, c0, 1 @ read cache type
1077 teq r3, r9 @ cache ID register present?
1082 mov r2, r2, lsl r1 @ base dcache size *2
1083 tst r3, #1 << 14 @ test M bit
1084 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
1088 mov r11, r11, lsl r3 @ cache line size in bytes
1091 bic r1, r1, #63 @ align to longest cache line
1094 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
1095 THUMB( ldr r3, [r1] ) @ s/w flush D cache
1096 THUMB( add r1, r1, r11 )
1100 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1101 mcr p15, 0, r1, c7, c6, 0 @ flush D cache
1102 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1105 __armv3_mmu_cache_flush:
1106 __armv3_mpu_cache_flush:
1108 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
1112 * Various debugging routines for printing hex characters and
1113 * memory, which again must be relocatable.
1117 .type phexbuf,#object
1119 .size phexbuf, . - phexbuf
1121 @ phex corrupts {r0, r1, r2, r3}
1122 phex: adr r3, phexbuf
1136 @ puts corrupts {r0, r1, r2, r3}
1138 1: ldrb r2, [r0], #1
1151 @ putc corrupts {r0, r1, r2, r3}
1158 @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
1159 memdump: mov r12, r0
1162 2: mov r0, r11, lsl #2
1170 ldr r0, [r12, r11, lsl #2]
1192 .section ".stack", "aw", %nobits
1193 .L_user_stack: .space 4096