2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
15 #include <linux/config.h>
16 #include <linux/sys.h>
17 #include <asm/unistd.h>
18 #include <asm/errno.h>
21 #include <asm/cache.h>
22 #include <asm/cputable.h>
24 #include <asm/ppc_asm.h>
25 #include <asm/thread_info.h>
26 #include <asm/asm-offsets.h>
39 * Returns (address we're running at) - (address we were linked at)
40 * for use before the text and data are mapped to KERNELBASE.
52 * add_reloc_offset(x) returns x + reloc_offset().
54 _GLOBAL(add_reloc_offset)
65 * sub_reloc_offset(x) returns x - reloc_offset().
67 _GLOBAL(sub_reloc_offset)
79 * reloc_got2 runs through the .got2 section adding an offset
84 lis r7,__got2_start@ha
85 addi r7,r7,__got2_start@l
87 addi r8,r8,__got2_end@l
108 * called with r3 = data offset and r4 = CPU number
111 _GLOBAL(identify_cpu)
112 addis r8,r3,cpu_specs@ha
113 addi r8,r8,cpu_specs@l
116 lwz r5,CPU_SPEC_PVR_MASK(r8)
118 lwz r6,CPU_SPEC_PVR_VALUE(r8)
121 addi r8,r8,CPU_SPEC_ENTRY_SIZE
124 addis r6,r3,cur_cpu_spec@ha
125 addi r6,r6,cur_cpu_spec@l
131 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
132 * and writes nop's over sections of code that don't apply for this cpu.
133 * r3 = data offset (not changed)
135 _GLOBAL(do_cpu_ftr_fixups)
136 /* Get CPU 0 features */
137 addis r6,r3,cur_cpu_spec@ha
138 addi r6,r6,cur_cpu_spec@l
141 lwz r4,CPU_SPEC_FEATURES(r4)
143 /* Get the fixup table */
144 addis r6,r3,__start___ftr_fixup@ha
145 addi r6,r6,__start___ftr_fixup@l
146 addis r7,r3,__stop___ftr_fixup@ha
147 addi r7,r7,__stop___ftr_fixup@l
153 lwz r8,-16(r6) /* mask */
155 lwz r9,-12(r6) /* value */
158 lwz r8,-8(r6) /* section begin */
159 lwz r9,-4(r6) /* section end */
162 /* write nops over the section of code */
163 /* todo: if large section, add a branch at the start of it */
167 lis r0,0x60000000@h /* nop */
169 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
171 dcbst 0,r8 /* suboptimal, but simpler */
176 sync /* additional sync needed on g4 */
181 * call_setup_cpu - call the setup_cpu function for this cpu
182 * r3 = data offset, r24 = cpu number
184 * Setup function is called with:
186 * r4 = ptr to CPU spec (relocated)
188 _GLOBAL(call_setup_cpu)
189 addis r4,r3,cur_cpu_spec@ha
190 addi r4,r4,cur_cpu_spec@l
193 lwz r5,CPU_SPEC_SETUP(r4)
200 #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
202 /* This gets called by via-pmu.c to switch the PLL selection
203 * on 750fx CPU. This function should really be moved to some
204 * other place (as most of the cpufreq code in via-pmu
206 _GLOBAL(low_choose_750fx_pll)
212 /* If switching to PLL1, disable HID0:BTIC */
223 /* Calc new HID1 value */
224 mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
225 rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
226 rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
230 /* Store new HID1 image */
234 addis r6,r6,nap_save_hid1@ha
235 stw r4,nap_save_hid1@l(r6)
237 /* If switching to PLL0, enable HID0:BTIC */
252 _GLOBAL(low_choose_7447a_dfs)
258 /* Calc new HID1 value */
260 insrwi r4,r3,1,9 /* insert parameter into bit 9 */
270 #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
273 * complement mask on the msr then "or" some values on.
274 * _nmask_and_or_msr(nmask, value_to_or)
276 _GLOBAL(_nmask_and_or_msr)
277 mfmsr r0 /* Get current msr */
278 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
279 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
280 SYNC /* Some chip revs have problems here... */
281 mtmsr r0 /* Update machine state */
290 #if defined(CONFIG_40x)
291 sync /* Flush to memory before changing mapping */
293 isync /* Flush shadow TLB */
294 #elif defined(CONFIG_44x)
298 /* Load high watermark */
299 lis r4,tlb_44x_hwater@ha
300 lwz r5,tlb_44x_hwater@l(r4)
302 1: tlbwe r3,r3,PPC44x_TLB_PAGEID
308 #elif defined(CONFIG_FSL_BOOKE)
309 /* Invalidate all entries in TLB0 */
312 /* Invalidate all entries in TLB1 */
315 /* Invalidate all entries in TLB2 */
318 /* Invalidate all entries in TLB3 */
324 #endif /* CONFIG_SMP */
325 #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
326 #if defined(CONFIG_SMP)
332 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
333 rlwinm r0,r0,0,28,26 /* clear DR */
337 lis r9,mmu_hash_lock@h
338 ori r9,r9,mmu_hash_lock@l
350 stw r0,0(r9) /* clear mmu_hash_lock */
354 #else /* CONFIG_SMP */
358 #endif /* CONFIG_SMP */
359 #endif /* ! defined(CONFIG_40x) */
363 * Flush MMU TLB for a particular address
366 #if defined(CONFIG_40x)
370 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
371 * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
373 tlbwe r3, r3, TLB_TAG
376 #elif defined(CONFIG_44x)
378 mfspr r5,SPRN_PID /* Get PID */
379 rlwimi r4,r5,0,24,31 /* Set TID */
385 /* There are only 64 TLB entries, so r3 < 64,
386 * which means bit 22, is clear. Since 22 is
387 * the V bit in the TLB_PAGEID, loading this
388 * value will invalidate the TLB entry.
390 tlbwe r3, r3, PPC44x_TLB_PAGEID
393 #elif defined(CONFIG_FSL_BOOKE)
394 rlwinm r4, r3, 0, 0, 19
395 ori r5, r4, 0x08 /* TLBSEL = 1 */
396 ori r6, r4, 0x10 /* TLBSEL = 2 */
397 ori r7, r4, 0x18 /* TLBSEL = 3 */
403 #if defined(CONFIG_SMP)
405 #endif /* CONFIG_SMP */
406 #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
407 #if defined(CONFIG_SMP)
413 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
414 rlwinm r0,r0,0,28,26 /* clear DR */
418 lis r9,mmu_hash_lock@h
419 ori r9,r9,mmu_hash_lock@l
431 stw r0,0(r9) /* clear mmu_hash_lock */
435 #else /* CONFIG_SMP */
438 #endif /* CONFIG_SMP */
439 #endif /* ! CONFIG_40x */
443 * Flush instruction cache.
444 * This is a no-op on the 601.
446 _GLOBAL(flush_instruction_cache)
447 #if defined(CONFIG_8xx)
450 mtspr SPRN_IC_CST, r5
451 #elif defined(CONFIG_4xx)
463 #elif CONFIG_FSL_BOOKE
466 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
467 /* msync; isync recommended here */
471 END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
473 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
477 rlwinm r3,r3,16,16,31
479 beqlr /* for 601, do nothing */
480 /* 603/604 processor - use invalidate-all bit in HID0 */
484 #endif /* CONFIG_8xx/4xx */
489 * Write any modified data cache blocks out to memory
490 * and invalidate the corresponding instruction cache blocks.
491 * This is a no-op on the 601.
493 * flush_icache_range(unsigned long start, unsigned long stop)
495 _GLOBAL(flush_icache_range)
497 blr /* for 601, do nothing */
498 END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
499 li r5,L1_CACHE_BYTES-1
503 srwi. r4,r4,L1_CACHE_SHIFT
508 addi r3,r3,L1_CACHE_BYTES
510 sync /* wait for dcbst's to get to ram */
513 addi r6,r6,L1_CACHE_BYTES
515 sync /* additional sync needed on g4 */
519 * Write any modified data cache blocks out to memory.
520 * Does not invalidate the corresponding cache lines (especially for
521 * any corresponding instruction cache).
523 * clean_dcache_range(unsigned long start, unsigned long stop)
525 _GLOBAL(clean_dcache_range)
526 li r5,L1_CACHE_BYTES-1
530 srwi. r4,r4,L1_CACHE_SHIFT
535 addi r3,r3,L1_CACHE_BYTES
537 sync /* wait for dcbst's to get to ram */
541 * Write any modified data cache blocks out to memory and invalidate them.
542 * Does not invalidate the corresponding instruction cache blocks.
544 * flush_dcache_range(unsigned long start, unsigned long stop)
546 _GLOBAL(flush_dcache_range)
547 li r5,L1_CACHE_BYTES-1
551 srwi. r4,r4,L1_CACHE_SHIFT
556 addi r3,r3,L1_CACHE_BYTES
558 sync /* wait for dcbst's to get to ram */
562 * Like above, but invalidate the D-cache. This is used by the 8xx
563 * to invalidate the cache so the PPC core doesn't get stale data
564 * from the CPM (no cache snooping here :-).
566 * invalidate_dcache_range(unsigned long start, unsigned long stop)
568 _GLOBAL(invalidate_dcache_range)
569 li r5,L1_CACHE_BYTES-1
573 srwi. r4,r4,L1_CACHE_SHIFT
578 addi r3,r3,L1_CACHE_BYTES
580 sync /* wait for dcbi's to get to ram */
583 #ifdef CONFIG_NOT_COHERENT_CACHE
585 * 40x cores have 8K or 16K dcache and 32 byte line size.
586 * 44x has a 32K dcache and 32 byte line size.
587 * 8xx has 1, 2, 4, 8K variants.
588 * For now, cover the worst case of the 44x.
589 * Must be called with external interrupts disabled.
591 #define CACHE_NWAYS 64
592 #define CACHE_NLINES 16
594 _GLOBAL(flush_dcache_all)
595 li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
598 1: lwz r3, 0(r5) /* Load one word from every line */
599 addi r5, r5, L1_CACHE_BYTES
602 #endif /* CONFIG_NOT_COHERENT_CACHE */
605 * Flush a particular page from the data cache to RAM.
606 * Note: this is necessary because the instruction cache does *not*
607 * snoop from the data cache.
608 * This is a no-op on the 601 which has a unified cache.
610 * void __flush_dcache_icache(void *page)
612 _GLOBAL(__flush_dcache_icache)
614 blr /* for 601, do nothing */
615 END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
616 rlwinm r3,r3,0,0,19 /* Get page base address */
617 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
620 0: dcbst 0,r3 /* Write line to ram */
621 addi r3,r3,L1_CACHE_BYTES
626 addi r6,r6,L1_CACHE_BYTES
633 * Flush a particular page from the data cache to RAM, identified
634 * by its physical address. We turn off the MMU so we can just use
635 * the physical address (this may be a highmem page without a kernel
638 * void __flush_dcache_icache_phys(unsigned long physaddr)
640 _GLOBAL(__flush_dcache_icache_phys)
642 blr /* for 601, do nothing */
643 END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
645 rlwinm r0,r10,0,28,26 /* clear DR */
648 rlwinm r3,r3,0,0,19 /* Get page base address */
649 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
652 0: dcbst 0,r3 /* Write line to ram */
653 addi r3,r3,L1_CACHE_BYTES
658 addi r6,r6,L1_CACHE_BYTES
661 mtmsr r10 /* restore DR */
666 * Clear pages using the dcbz instruction, which doesn't cause any
667 * memory traffic (except to write out any cache lines which get
668 * displaced). This only works on cacheable memory.
670 * void clear_pages(void *page, int order) ;
673 li r0,4096/L1_CACHE_BYTES
685 addi r3,r3,L1_CACHE_BYTES
690 * Copy a whole page. We use the dcbz instruction on the destination
691 * to reduce memory traffic (it eliminates the unnecessary reads of
692 * the destination into cache). This requires that the destination
695 #define COPY_16_BYTES \
710 /* don't use prefetch on 8xx */
711 li r0,4096/L1_CACHE_BYTES
717 #else /* not 8xx, we can prefetch */
720 #if MAX_COPY_PREFETCH > 1
721 li r0,MAX_COPY_PREFETCH
725 addi r11,r11,L1_CACHE_BYTES
727 #else /* MAX_COPY_PREFETCH == 1 */
729 li r11,L1_CACHE_BYTES+4
730 #endif /* MAX_COPY_PREFETCH */
731 li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
739 #if L1_CACHE_BYTES >= 32
741 #if L1_CACHE_BYTES >= 64
744 #if L1_CACHE_BYTES >= 128
754 crnot 4*cr0+eq,4*cr0+eq
755 li r0,MAX_COPY_PREFETCH
758 #endif /* CONFIG_8xx */
761 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
762 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
764 _GLOBAL(atomic_clear_mask)
771 _GLOBAL(atomic_set_mask)
780 * I/O string operations
782 * insb(port, buf, len)
783 * outsb(port, buf, len)
784 * insw(port, buf, len)
785 * outsw(port, buf, len)
786 * insl(port, buf, len)
787 * outsl(port, buf, len)
788 * insw_ns(port, buf, len)
789 * outsw_ns(port, buf, len)
790 * insl_ns(port, buf, len)
791 * outsl_ns(port, buf, len)
793 * The *_ns versions don't do byte-swapping.
861 _GLOBAL(__ide_mm_insw)
873 _GLOBAL(__ide_mm_outsw)
885 _GLOBAL(__ide_mm_insl)
897 _GLOBAL(__ide_mm_outsl)
910 * Extended precision shifts.
912 * Updated to be valid for shift counts from 0 to 63 inclusive.
915 * R3/R4 has 64 bit value
919 * ashrdi3: arithmetic right shift (sign propagation)
920 * lshrdi3: logical right shift
921 * ashldi3: left shift
925 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
926 addi r7,r5,32 # could be xori, or addi with -32
927 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
928 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
929 sraw r7,r3,r7 # t2 = MSW >> (count-32)
930 or r4,r4,r6 # LSW |= t1
931 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
932 sraw r3,r3,r5 # MSW = MSW >> count
933 or r4,r4,r7 # LSW |= t2
938 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
939 addi r7,r5,32 # could be xori, or addi with -32
940 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
941 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
942 or r3,r3,r6 # MSW |= t1
943 slw r4,r4,r5 # LSW = LSW << count
944 or r3,r3,r7 # MSW |= t2
949 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
950 addi r7,r5,32 # could be xori, or addi with -32
951 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
952 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
953 or r4,r4,r6 # LSW |= t1
954 srw r3,r3,r5 # MSW = MSW >> count
955 or r4,r4,r7 # LSW |= t2
965 mr r3,r1 /* Close enough */
969 * These are used in the alignment trap handler when emulating
970 * single-precision loads and stores.
971 * We restore and save the fpscr so the task gets the same result
972 * and exceptions as if the cpu had performed the load or store.
975 #ifdef CONFIG_PPC_FPU
977 lfd 0,-4(r5) /* load up fpscr value */
981 mffs 0 /* save new fpscr value */
986 lfd 0,-4(r5) /* load up fpscr value */
990 mffs 0 /* save new fpscr value */
996 * Create a kernel thread
997 * kernel_thread(fn, arg, flags)
999 _GLOBAL(kernel_thread)
1003 mr r30,r3 /* function */
1004 mr r31,r4 /* argument */
1005 ori r3,r5,CLONE_VM /* flags */
1006 oris r3,r3,CLONE_UNTRACED>>16
1007 li r4,0 /* new sp (unused) */
1010 cmpwi 0,r3,0 /* parent or child? */
1011 bne 1f /* return if parent */
1012 li r0,0 /* make top-level stack frame */
1014 mtlr r30 /* fn addr in lr */
1015 mr r3,r31 /* load arg and call fn */
1018 li r0,__NR_exit /* exit if function returns */
1034 * This routine is just here to keep GCC happy - sigh...