2 * arch/sparc64/mm/init.c
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/initrd.h>
17 #include <linux/swap.h>
18 #include <linux/pagemap.h>
19 #include <linux/poison.h>
21 #include <linux/seq_file.h>
22 #include <linux/kprobes.h>
23 #include <linux/cache.h>
24 #include <linux/sort.h>
25 #include <linux/percpu.h>
26 #include <linux/memblock.h>
27 #include <linux/mmzone.h>
28 #include <linux/gfp.h>
31 #include <asm/system.h>
33 #include <asm/pgalloc.h>
34 #include <asm/pgtable.h>
35 #include <asm/oplib.h>
36 #include <asm/iommu.h>
38 #include <asm/uaccess.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
42 #include <asm/starfire.h>
44 #include <asm/spitfire.h>
45 #include <asm/sections.h>
47 #include <asm/hypervisor.h>
49 #include <asm/mdesc.h>
50 #include <asm/cpudata.h>
55 unsigned long kern_linear_pte_xor[2] __read_mostly;
57 /* A bitmap, one bit for every 256MB of physical memory. If the bit
58 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
59 * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
61 unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
63 #ifndef CONFIG_DEBUG_PAGEALLOC
64 /* A special kernel TSB for 4MB and 256MB linear mappings.
65 * Space is allocated for this right after the trap table
66 * in arch/sparc64/kernel/head.S
68 extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
73 static struct linux_prom64_registers pavail[MAX_BANKS] __devinitdata;
74 static int pavail_ents __devinitdata;
76 static int cmp_p64(const void *a, const void *b)
78 const struct linux_prom64_registers *x = a, *y = b;
80 if (x->phys_addr > y->phys_addr)
82 if (x->phys_addr < y->phys_addr)
87 static void __init read_obp_memory(const char *property,
88 struct linux_prom64_registers *regs,
91 phandle node = prom_finddevice("/memory");
92 int prop_size = prom_getproplen(node, property);
95 ents = prop_size / sizeof(struct linux_prom64_registers);
96 if (ents > MAX_BANKS) {
97 prom_printf("The machine has more %s property entries than "
98 "this kernel can support (%d).\n",
103 ret = prom_getproperty(node, property, (char *) regs, prop_size);
105 prom_printf("Couldn't get %s property from /memory.\n");
109 /* Sanitize what we got from the firmware, by page aligning
112 for (i = 0; i < ents; i++) {
113 unsigned long base, size;
115 base = regs[i].phys_addr;
116 size = regs[i].reg_size;
119 if (base & ~PAGE_MASK) {
120 unsigned long new_base = PAGE_ALIGN(base);
122 size -= new_base - base;
123 if ((long) size < 0L)
128 /* If it is empty, simply get rid of it.
129 * This simplifies the logic of the other
130 * functions that process these arrays.
132 memmove(®s[i], ®s[i + 1],
133 (ents - i - 1) * sizeof(regs[0]));
138 regs[i].phys_addr = base;
139 regs[i].reg_size = size;
144 sort(regs, ents, sizeof(struct linux_prom64_registers),
148 unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES /
149 sizeof(unsigned long)];
150 EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
152 /* Kernel physical address base and size in bytes. */
153 unsigned long kern_base __read_mostly;
154 unsigned long kern_size __read_mostly;
156 /* Initial ramdisk setup */
157 extern unsigned long sparc_ramdisk_image64;
158 extern unsigned int sparc_ramdisk_image;
159 extern unsigned int sparc_ramdisk_size;
161 struct page *mem_map_zero __read_mostly;
162 EXPORT_SYMBOL(mem_map_zero);
164 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
166 unsigned long sparc64_kern_pri_context __read_mostly;
167 unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
168 unsigned long sparc64_kern_sec_context __read_mostly;
170 int num_kernel_image_mappings;
172 #ifdef CONFIG_DEBUG_DCFLUSH
173 atomic_t dcpage_flushes = ATOMIC_INIT(0);
175 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
179 inline void flush_dcache_page_impl(struct page *page)
181 BUG_ON(tlb_type == hypervisor);
182 #ifdef CONFIG_DEBUG_DCFLUSH
183 atomic_inc(&dcpage_flushes);
186 #ifdef DCACHE_ALIASING_POSSIBLE
187 __flush_dcache_page(page_address(page),
188 ((tlb_type == spitfire) &&
189 page_mapping(page) != NULL));
191 if (page_mapping(page) != NULL &&
192 tlb_type == spitfire)
193 __flush_icache_page(__pa(page_address(page)));
197 #define PG_dcache_dirty PG_arch_1
198 #define PG_dcache_cpu_shift 32UL
199 #define PG_dcache_cpu_mask \
200 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
202 #define dcache_dirty_cpu(page) \
203 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
205 static inline void set_dcache_dirty(struct page *page, int this_cpu)
207 unsigned long mask = this_cpu;
208 unsigned long non_cpu_bits;
210 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
211 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
213 __asm__ __volatile__("1:\n\t"
215 "and %%g7, %1, %%g1\n\t"
216 "or %%g1, %0, %%g1\n\t"
217 "casx [%2], %%g7, %%g1\n\t"
219 "bne,pn %%xcc, 1b\n\t"
222 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
226 static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
228 unsigned long mask = (1UL << PG_dcache_dirty);
230 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
233 "srlx %%g7, %4, %%g1\n\t"
234 "and %%g1, %3, %%g1\n\t"
236 "bne,pn %%icc, 2f\n\t"
237 " andn %%g7, %1, %%g1\n\t"
238 "casx [%2], %%g7, %%g1\n\t"
240 "bne,pn %%xcc, 1b\n\t"
244 : "r" (cpu), "r" (mask), "r" (&page->flags),
245 "i" (PG_dcache_cpu_mask),
246 "i" (PG_dcache_cpu_shift)
250 static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
252 unsigned long tsb_addr = (unsigned long) ent;
254 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
255 tsb_addr = __pa(tsb_addr);
257 __tsb_insert(tsb_addr, tag, pte);
260 unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
261 unsigned long _PAGE_SZBITS __read_mostly;
263 static void flush_dcache(unsigned long pfn)
267 page = pfn_to_page(pfn);
269 unsigned long pg_flags;
271 pg_flags = page->flags;
272 if (pg_flags & (1UL << PG_dcache_dirty)) {
273 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
275 int this_cpu = get_cpu();
277 /* This is just to optimize away some function calls
281 flush_dcache_page_impl(page);
283 smp_flush_dcache_page_impl(page, cpu);
285 clear_dcache_dirty_cpu(page, cpu);
292 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
294 struct mm_struct *mm;
296 unsigned long tag, flags;
297 unsigned long tsb_index, tsb_hash_shift;
300 if (tlb_type != hypervisor) {
301 unsigned long pfn = pte_pfn(pte);
309 tsb_index = MM_TSB_BASE;
310 tsb_hash_shift = PAGE_SHIFT;
312 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
313 if (!(pte_val(pte) & _PAGE_VALID))
316 spin_lock_irqsave(&mm->context.lock, flags);
318 #ifdef CONFIG_HUGETLB_PAGE
319 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
320 if ((tlb_type == hypervisor &&
321 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
322 (tlb_type != hypervisor &&
323 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
324 tsb_index = MM_TSB_HUGE;
325 tsb_hash_shift = HPAGE_SHIFT;
330 tsb = mm->context.tsb_block[tsb_index].tsb;
331 tsb += ((address >> tsb_hash_shift) &
332 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
333 tag = (address >> 22UL);
334 tsb_insert(tsb, tag, pte_val(pte));
336 spin_unlock_irqrestore(&mm->context.lock, flags);
339 void flush_dcache_page(struct page *page)
341 struct address_space *mapping;
344 if (tlb_type == hypervisor)
347 /* Do not bother with the expensive D-cache flush if it
348 * is merely the zero page. The 'bigcore' testcase in GDB
349 * causes this case to run millions of times.
351 if (page == ZERO_PAGE(0))
354 this_cpu = get_cpu();
356 mapping = page_mapping(page);
357 if (mapping && !mapping_mapped(mapping)) {
358 int dirty = test_bit(PG_dcache_dirty, &page->flags);
360 int dirty_cpu = dcache_dirty_cpu(page);
362 if (dirty_cpu == this_cpu)
364 smp_flush_dcache_page_impl(page, dirty_cpu);
366 set_dcache_dirty(page, this_cpu);
368 /* We could delay the flush for the !page_mapping
369 * case too. But that case is for exec env/arg
370 * pages and those are %99 certainly going to get
371 * faulted into the tlb (and thus flushed) anyways.
373 flush_dcache_page_impl(page);
379 EXPORT_SYMBOL(flush_dcache_page);
381 void __kprobes flush_icache_range(unsigned long start, unsigned long end)
383 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
384 if (tlb_type == spitfire) {
387 /* This code only runs on Spitfire cpus so this is
388 * why we can assume _PAGE_PADDR_4U.
390 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
391 unsigned long paddr, mask = _PAGE_PADDR_4U;
393 if (kaddr >= PAGE_OFFSET)
394 paddr = kaddr & mask;
396 pgd_t *pgdp = pgd_offset_k(kaddr);
397 pud_t *pudp = pud_offset(pgdp, kaddr);
398 pmd_t *pmdp = pmd_offset(pudp, kaddr);
399 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
401 paddr = pte_val(*ptep) & mask;
403 __flush_icache_page(paddr);
407 EXPORT_SYMBOL(flush_icache_range);
409 void mmu_info(struct seq_file *m)
411 if (tlb_type == cheetah)
412 seq_printf(m, "MMU Type\t: Cheetah\n");
413 else if (tlb_type == cheetah_plus)
414 seq_printf(m, "MMU Type\t: Cheetah+\n");
415 else if (tlb_type == spitfire)
416 seq_printf(m, "MMU Type\t: Spitfire\n");
417 else if (tlb_type == hypervisor)
418 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
420 seq_printf(m, "MMU Type\t: ???\n");
422 #ifdef CONFIG_DEBUG_DCFLUSH
423 seq_printf(m, "DCPageFlushes\t: %d\n",
424 atomic_read(&dcpage_flushes));
426 seq_printf(m, "DCPageFlushesXC\t: %d\n",
427 atomic_read(&dcpage_flushes_xcall));
428 #endif /* CONFIG_SMP */
429 #endif /* CONFIG_DEBUG_DCFLUSH */
432 struct linux_prom_translation prom_trans[512] __read_mostly;
433 unsigned int prom_trans_ents __read_mostly;
435 unsigned long kern_locked_tte_data;
437 /* The obp translations are saved based on 8k pagesize, since obp can
438 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
439 * HI_OBP_ADDRESS range are handled in ktlb.S.
441 static inline int in_obp_range(unsigned long vaddr)
443 return (vaddr >= LOW_OBP_ADDRESS &&
444 vaddr < HI_OBP_ADDRESS);
447 static int cmp_ptrans(const void *a, const void *b)
449 const struct linux_prom_translation *x = a, *y = b;
451 if (x->virt > y->virt)
453 if (x->virt < y->virt)
458 /* Read OBP translations property into 'prom_trans[]'. */
459 static void __init read_obp_translations(void)
461 int n, node, ents, first, last, i;
463 node = prom_finddevice("/virtual-memory");
464 n = prom_getproplen(node, "translations");
465 if (unlikely(n == 0 || n == -1)) {
466 prom_printf("prom_mappings: Couldn't get size.\n");
469 if (unlikely(n > sizeof(prom_trans))) {
470 prom_printf("prom_mappings: Size %Zd is too big.\n", n);
474 if ((n = prom_getproperty(node, "translations",
475 (char *)&prom_trans[0],
476 sizeof(prom_trans))) == -1) {
477 prom_printf("prom_mappings: Couldn't get property.\n");
481 n = n / sizeof(struct linux_prom_translation);
485 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
488 /* Now kick out all the non-OBP entries. */
489 for (i = 0; i < ents; i++) {
490 if (in_obp_range(prom_trans[i].virt))
494 for (; i < ents; i++) {
495 if (!in_obp_range(prom_trans[i].virt))
500 for (i = 0; i < (last - first); i++) {
501 struct linux_prom_translation *src = &prom_trans[i + first];
502 struct linux_prom_translation *dest = &prom_trans[i];
506 for (; i < ents; i++) {
507 struct linux_prom_translation *dest = &prom_trans[i];
508 dest->virt = dest->size = dest->data = 0x0UL;
511 prom_trans_ents = last - first;
513 if (tlb_type == spitfire) {
514 /* Clear diag TTE bits. */
515 for (i = 0; i < prom_trans_ents; i++)
516 prom_trans[i].data &= ~0x0003fe0000000000UL;
519 /* Force execute bit on. */
520 for (i = 0; i < prom_trans_ents; i++)
521 prom_trans[i].data |= (tlb_type == hypervisor ?
522 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
525 static void __init hypervisor_tlb_lock(unsigned long vaddr,
529 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
532 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
533 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
538 static unsigned long kern_large_tte(unsigned long paddr);
540 static void __init remap_kernel(void)
542 unsigned long phys_page, tte_vaddr, tte_data;
543 int i, tlb_ent = sparc64_highest_locked_tlbent();
545 tte_vaddr = (unsigned long) KERNBASE;
546 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
547 tte_data = kern_large_tte(phys_page);
549 kern_locked_tte_data = tte_data;
551 /* Now lock us into the TLBs via Hypervisor or OBP. */
552 if (tlb_type == hypervisor) {
553 for (i = 0; i < num_kernel_image_mappings; i++) {
554 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
555 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
556 tte_vaddr += 0x400000;
557 tte_data += 0x400000;
560 for (i = 0; i < num_kernel_image_mappings; i++) {
561 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
562 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
563 tte_vaddr += 0x400000;
564 tte_data += 0x400000;
566 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
568 if (tlb_type == cheetah_plus) {
569 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
570 CTX_CHEETAH_PLUS_NUC);
571 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
572 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
577 static void __init inherit_prom_mappings(void)
579 /* Now fixup OBP's idea about where we really are mapped. */
580 printk("Remapping the kernel... ");
585 void prom_world(int enter)
588 set_fs((mm_segment_t) { get_thread_current_ds() });
590 __asm__ __volatile__("flushw");
593 void __flush_dcache_range(unsigned long start, unsigned long end)
597 if (tlb_type == spitfire) {
600 for (va = start; va < end; va += 32) {
601 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
605 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
608 for (va = start; va < end; va += 32)
609 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
613 "i" (ASI_DCACHE_INVALIDATE));
616 EXPORT_SYMBOL(__flush_dcache_range);
618 /* get_new_mmu_context() uses "cache + 1". */
619 DEFINE_SPINLOCK(ctx_alloc_lock);
620 unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
621 #define MAX_CTX_NR (1UL << CTX_NR_BITS)
622 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
623 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
625 /* Caller does TLB context flushing on local CPU if necessary.
626 * The caller also ensures that CTX_VALID(mm->context) is false.
628 * We must be careful about boundary cases so that we never
629 * let the user have CTX 0 (nucleus) or we ever use a CTX
630 * version of zero (and thus NO_CONTEXT would not be caught
631 * by version mis-match tests in mmu_context.h).
633 * Always invoked with interrupts disabled.
635 void get_new_mmu_context(struct mm_struct *mm)
637 unsigned long ctx, new_ctx;
638 unsigned long orig_pgsz_bits;
642 spin_lock_irqsave(&ctx_alloc_lock, flags);
643 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
644 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
645 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
647 if (new_ctx >= (1 << CTX_NR_BITS)) {
648 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
649 if (new_ctx >= ctx) {
651 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
654 new_ctx = CTX_FIRST_VERSION;
656 /* Don't call memset, for 16 entries that's just
659 mmu_context_bmap[0] = 3;
660 mmu_context_bmap[1] = 0;
661 mmu_context_bmap[2] = 0;
662 mmu_context_bmap[3] = 0;
663 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
664 mmu_context_bmap[i + 0] = 0;
665 mmu_context_bmap[i + 1] = 0;
666 mmu_context_bmap[i + 2] = 0;
667 mmu_context_bmap[i + 3] = 0;
673 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
674 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
676 tlb_context_cache = new_ctx;
677 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
678 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
680 if (unlikely(new_version))
681 smp_new_mmu_context_version();
684 static int numa_enabled = 1;
685 static int numa_debug;
687 static int __init early_numa(char *p)
692 if (strstr(p, "off"))
695 if (strstr(p, "debug"))
700 early_param("numa", early_numa);
702 #define numadbg(f, a...) \
703 do { if (numa_debug) \
704 printk(KERN_INFO f, ## a); \
707 static void __init find_ramdisk(unsigned long phys_base)
709 #ifdef CONFIG_BLK_DEV_INITRD
710 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
711 unsigned long ramdisk_image;
713 /* Older versions of the bootloader only supported a
714 * 32-bit physical address for the ramdisk image
715 * location, stored at sparc_ramdisk_image. Newer
716 * SILO versions set sparc_ramdisk_image to zero and
717 * provide a full 64-bit physical address at
718 * sparc_ramdisk_image64.
720 ramdisk_image = sparc_ramdisk_image;
722 ramdisk_image = sparc_ramdisk_image64;
724 /* Another bootloader quirk. The bootloader normalizes
725 * the physical address to KERNBASE, so we have to
726 * factor that back out and add in the lowest valid
727 * physical page address to get the true physical address.
729 ramdisk_image -= KERNBASE;
730 ramdisk_image += phys_base;
732 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
733 ramdisk_image, sparc_ramdisk_size);
735 initrd_start = ramdisk_image;
736 initrd_end = ramdisk_image + sparc_ramdisk_size;
738 memblock_reserve(initrd_start, sparc_ramdisk_size);
740 initrd_start += PAGE_OFFSET;
741 initrd_end += PAGE_OFFSET;
746 struct node_mem_mask {
749 unsigned long bootmem_paddr;
751 static struct node_mem_mask node_masks[MAX_NUMNODES];
752 static int num_node_masks;
754 int numa_cpu_lookup_table[NR_CPUS];
755 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
757 #ifdef CONFIG_NEED_MULTIPLE_NODES
759 struct mdesc_mblock {
762 u64 offset; /* RA-to-PA */
764 static struct mdesc_mblock *mblocks;
765 static int num_mblocks;
767 static unsigned long ra_to_pa(unsigned long addr)
771 for (i = 0; i < num_mblocks; i++) {
772 struct mdesc_mblock *m = &mblocks[i];
774 if (addr >= m->base &&
775 addr < (m->base + m->size)) {
783 static int find_node(unsigned long addr)
787 addr = ra_to_pa(addr);
788 for (i = 0; i < num_node_masks; i++) {
789 struct node_mem_mask *p = &node_masks[i];
791 if ((addr & p->mask) == p->val)
797 u64 memblock_nid_range(u64 start, u64 end, int *nid)
799 *nid = find_node(start);
801 while (start < end) {
802 int n = find_node(start);
815 u64 memblock_nid_range(u64 start, u64 end, int *nid)
822 /* This must be invoked after performing all of the necessary
823 * add_active_range() calls for 'nid'. We need to be able to get
824 * correct data from get_pfn_range_for_nid().
826 static void __init allocate_node_data(int nid)
828 unsigned long paddr, num_pages, start_pfn, end_pfn;
829 struct pglist_data *p;
831 #ifdef CONFIG_NEED_MULTIPLE_NODES
832 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
834 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
837 NODE_DATA(nid) = __va(paddr);
838 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
840 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
845 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
846 p->node_start_pfn = start_pfn;
847 p->node_spanned_pages = end_pfn - start_pfn;
849 if (p->node_spanned_pages) {
850 num_pages = bootmem_bootmap_pages(p->node_spanned_pages);
852 paddr = memblock_alloc_try_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid);
854 prom_printf("Cannot allocate bootmap for nid[%d]\n",
858 node_masks[nid].bootmem_paddr = paddr;
862 static void init_node_masks_nonnuma(void)
866 numadbg("Initializing tables for non-numa.\n");
868 node_masks[0].mask = node_masks[0].val = 0;
871 for (i = 0; i < NR_CPUS; i++)
872 numa_cpu_lookup_table[i] = 0;
874 cpumask_setall(&numa_cpumask_lookup_table[0]);
877 #ifdef CONFIG_NEED_MULTIPLE_NODES
878 struct pglist_data *node_data[MAX_NUMNODES];
880 EXPORT_SYMBOL(numa_cpu_lookup_table);
881 EXPORT_SYMBOL(numa_cpumask_lookup_table);
882 EXPORT_SYMBOL(node_data);
884 struct mdesc_mlgroup {
890 static struct mdesc_mlgroup *mlgroups;
891 static int num_mlgroups;
893 static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
898 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
899 u64 target = mdesc_arc_target(md, arc);
902 val = mdesc_get_property(md, target,
904 if (val && *val == cfg_handle)
910 static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
913 u64 arc, candidate, best_latency = ~(u64)0;
915 candidate = MDESC_NODE_NULL;
916 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
917 u64 target = mdesc_arc_target(md, arc);
918 const char *name = mdesc_node_name(md, target);
921 if (strcmp(name, "pio-latency-group"))
924 val = mdesc_get_property(md, target, "latency", NULL);
928 if (*val < best_latency) {
934 if (candidate == MDESC_NODE_NULL)
937 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
940 int of_node_to_nid(struct device_node *dp)
942 const struct linux_prom64_registers *regs;
943 struct mdesc_handle *md;
948 /* This is the right thing to do on currently supported
949 * SUN4U NUMA platforms as well, as the PCI controller does
950 * not sit behind any particular memory controller.
955 regs = of_get_property(dp, "reg", NULL);
959 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
965 mdesc_for_each_node_by_name(md, grp, "group") {
966 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
978 static void __init add_node_ranges(void)
980 struct memblock_region *reg;
982 for_each_memblock(memory, reg) {
983 unsigned long size = reg->size;
984 unsigned long start, end;
988 while (start < end) {
989 unsigned long this_end;
992 this_end = memblock_nid_range(start, end, &nid);
994 numadbg("Adding active range nid[%d] "
995 "start[%lx] end[%lx]\n",
996 nid, start, this_end);
998 add_active_range(nid,
1000 this_end >> PAGE_SHIFT);
1007 static int __init grab_mlgroups(struct mdesc_handle *md)
1009 unsigned long paddr;
1013 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1018 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
1023 mlgroups = __va(paddr);
1024 num_mlgroups = count;
1027 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1028 struct mdesc_mlgroup *m = &mlgroups[count++];
1033 val = mdesc_get_property(md, node, "latency", NULL);
1035 val = mdesc_get_property(md, node, "address-match", NULL);
1037 val = mdesc_get_property(md, node, "address-mask", NULL);
1040 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1041 "match[%llx] mask[%llx]\n",
1042 count - 1, m->node, m->latency, m->match, m->mask);
1048 static int __init grab_mblocks(struct mdesc_handle *md)
1050 unsigned long paddr;
1054 mdesc_for_each_node_by_name(md, node, "mblock")
1059 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
1064 mblocks = __va(paddr);
1065 num_mblocks = count;
1068 mdesc_for_each_node_by_name(md, node, "mblock") {
1069 struct mdesc_mblock *m = &mblocks[count++];
1072 val = mdesc_get_property(md, node, "base", NULL);
1074 val = mdesc_get_property(md, node, "size", NULL);
1076 val = mdesc_get_property(md, node,
1077 "address-congruence-offset", NULL);
1079 /* The address-congruence-offset property is optional.
1080 * Explicity zero it be identifty this.
1087 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
1088 count - 1, m->base, m->size, m->offset);
1094 static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1095 u64 grp, cpumask_t *mask)
1099 cpumask_clear(mask);
1101 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1102 u64 target = mdesc_arc_target(md, arc);
1103 const char *name = mdesc_node_name(md, target);
1106 if (strcmp(name, "cpu"))
1108 id = mdesc_get_property(md, target, "id", NULL);
1109 if (*id < nr_cpu_ids)
1110 cpumask_set_cpu(*id, mask);
1114 static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1118 for (i = 0; i < num_mlgroups; i++) {
1119 struct mdesc_mlgroup *m = &mlgroups[i];
1120 if (m->node == node)
1126 static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1129 struct mdesc_mlgroup *candidate = NULL;
1130 u64 arc, best_latency = ~(u64)0;
1131 struct node_mem_mask *n;
1133 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1134 u64 target = mdesc_arc_target(md, arc);
1135 struct mdesc_mlgroup *m = find_mlgroup(target);
1138 if (m->latency < best_latency) {
1140 best_latency = m->latency;
1146 if (num_node_masks != index) {
1147 printk(KERN_ERR "Inconsistent NUMA state, "
1148 "index[%d] != num_node_masks[%d]\n",
1149 index, num_node_masks);
1153 n = &node_masks[num_node_masks++];
1155 n->mask = candidate->mask;
1156 n->val = candidate->match;
1158 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
1159 index, n->mask, n->val, candidate->latency);
1164 static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1170 numa_parse_mdesc_group_cpus(md, grp, &mask);
1172 for_each_cpu(cpu, &mask)
1173 numa_cpu_lookup_table[cpu] = index;
1174 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
1177 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1178 for_each_cpu(cpu, &mask)
1183 return numa_attach_mlgroup(md, grp, index);
1186 static int __init numa_parse_mdesc(void)
1188 struct mdesc_handle *md = mdesc_grab();
1192 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1193 if (node == MDESC_NODE_NULL) {
1198 err = grab_mblocks(md);
1202 err = grab_mlgroups(md);
1207 mdesc_for_each_node_by_name(md, node, "group") {
1208 err = numa_parse_mdesc_group(md, node, count);
1216 for (i = 0; i < num_node_masks; i++) {
1217 allocate_node_data(i);
1227 static int __init numa_parse_jbus(void)
1229 unsigned long cpu, index;
1231 /* NUMA node id is encoded in bits 36 and higher, and there is
1232 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1235 for_each_present_cpu(cpu) {
1236 numa_cpu_lookup_table[cpu] = index;
1237 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
1238 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1239 node_masks[index].val = cpu << 36UL;
1243 num_node_masks = index;
1247 for (index = 0; index < num_node_masks; index++) {
1248 allocate_node_data(index);
1249 node_set_online(index);
1255 static int __init numa_parse_sun4u(void)
1257 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1260 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1261 if ((ver >> 32UL) == __JALAPENO_ID ||
1262 (ver >> 32UL) == __SERRANO_ID)
1263 return numa_parse_jbus();
1268 static int __init bootmem_init_numa(void)
1272 numadbg("bootmem_init_numa()\n");
1275 if (tlb_type == hypervisor)
1276 err = numa_parse_mdesc();
1278 err = numa_parse_sun4u();
1285 static int bootmem_init_numa(void)
1292 static void __init bootmem_init_nonnuma(void)
1294 unsigned long top_of_ram = memblock_end_of_DRAM();
1295 unsigned long total_ram = memblock_phys_mem_size();
1296 struct memblock_region *reg;
1298 numadbg("bootmem_init_nonnuma()\n");
1300 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1301 top_of_ram, total_ram);
1302 printk(KERN_INFO "Memory hole size: %ldMB\n",
1303 (top_of_ram - total_ram) >> 20);
1305 init_node_masks_nonnuma();
1307 for_each_memblock(memory, reg) {
1308 unsigned long start_pfn, end_pfn;
1313 start_pfn = memblock_region_memory_base_pfn(reg);
1314 end_pfn = memblock_region_memory_end_pfn(reg);
1315 add_active_range(0, start_pfn, end_pfn);
1318 allocate_node_data(0);
1323 static void __init reserve_range_in_node(int nid, unsigned long start,
1326 numadbg(" reserve_range_in_node(nid[%d],start[%lx],end[%lx]\n",
1328 while (start < end) {
1329 unsigned long this_end;
1332 this_end = memblock_nid_range(start, end, &n);
1334 numadbg(" MATCH reserving range [%lx:%lx]\n",
1336 reserve_bootmem_node(NODE_DATA(nid), start,
1337 (this_end - start), BOOTMEM_DEFAULT);
1339 numadbg(" NO MATCH, advancing start to %lx\n",
1346 static void __init trim_reserved_in_node(int nid)
1348 struct memblock_region *reg;
1350 numadbg(" trim_reserved_in_node(%d)\n", nid);
1352 for_each_memblock(reserved, reg)
1353 reserve_range_in_node(nid, reg->base, reg->base + reg->size);
1356 static void __init bootmem_init_one_node(int nid)
1358 struct pglist_data *p;
1360 numadbg("bootmem_init_one_node(%d)\n", nid);
1364 if (p->node_spanned_pages) {
1365 unsigned long paddr = node_masks[nid].bootmem_paddr;
1366 unsigned long end_pfn;
1368 end_pfn = p->node_start_pfn + p->node_spanned_pages;
1370 numadbg(" init_bootmem_node(%d, %lx, %lx, %lx)\n",
1371 nid, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
1373 init_bootmem_node(p, paddr >> PAGE_SHIFT,
1374 p->node_start_pfn, end_pfn);
1376 numadbg(" free_bootmem_with_active_regions(%d, %lx)\n",
1378 free_bootmem_with_active_regions(nid, end_pfn);
1380 trim_reserved_in_node(nid);
1382 numadbg(" sparse_memory_present_with_active_regions(%d)\n",
1384 sparse_memory_present_with_active_regions(nid);
1388 static unsigned long __init bootmem_init(unsigned long phys_base)
1390 unsigned long end_pfn;
1393 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1394 max_pfn = max_low_pfn = end_pfn;
1395 min_low_pfn = (phys_base >> PAGE_SHIFT);
1397 if (bootmem_init_numa() < 0)
1398 bootmem_init_nonnuma();
1400 /* XXX cpu notifier XXX */
1402 for_each_online_node(nid)
1403 bootmem_init_one_node(nid);
1410 static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1411 static int pall_ents __initdata;
1413 #ifdef CONFIG_DEBUG_PAGEALLOC
1414 static unsigned long __ref kernel_map_range(unsigned long pstart,
1415 unsigned long pend, pgprot_t prot)
1417 unsigned long vstart = PAGE_OFFSET + pstart;
1418 unsigned long vend = PAGE_OFFSET + pend;
1419 unsigned long alloc_bytes = 0UL;
1421 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1422 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1427 while (vstart < vend) {
1428 unsigned long this_end, paddr = __pa(vstart);
1429 pgd_t *pgd = pgd_offset_k(vstart);
1434 pud = pud_offset(pgd, vstart);
1435 if (pud_none(*pud)) {
1438 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1439 alloc_bytes += PAGE_SIZE;
1440 pud_populate(&init_mm, pud, new);
1443 pmd = pmd_offset(pud, vstart);
1444 if (!pmd_present(*pmd)) {
1447 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1448 alloc_bytes += PAGE_SIZE;
1449 pmd_populate_kernel(&init_mm, pmd, new);
1452 pte = pte_offset_kernel(pmd, vstart);
1453 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1454 if (this_end > vend)
1457 while (vstart < this_end) {
1458 pte_val(*pte) = (paddr | pgprot_val(prot));
1460 vstart += PAGE_SIZE;
1469 extern unsigned int kvmap_linear_patch[1];
1470 #endif /* CONFIG_DEBUG_PAGEALLOC */
1472 static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
1474 const unsigned long shift_256MB = 28;
1475 const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL);
1476 const unsigned long size_256MB = (1UL << shift_256MB);
1478 while (start < end) {
1481 remains = end - start;
1482 if (remains < size_256MB)
1485 if (start & mask_256MB) {
1486 start = (start + size_256MB) & ~mask_256MB;
1490 while (remains >= size_256MB) {
1491 unsigned long index = start >> shift_256MB;
1493 __set_bit(index, kpte_linear_bitmap);
1495 start += size_256MB;
1496 remains -= size_256MB;
1501 static void __init init_kpte_bitmap(void)
1505 for (i = 0; i < pall_ents; i++) {
1506 unsigned long phys_start, phys_end;
1508 phys_start = pall[i].phys_addr;
1509 phys_end = phys_start + pall[i].reg_size;
1511 mark_kpte_bitmap(phys_start, phys_end);
1515 static void __init kernel_physical_mapping_init(void)
1517 #ifdef CONFIG_DEBUG_PAGEALLOC
1518 unsigned long i, mem_alloced = 0UL;
1520 for (i = 0; i < pall_ents; i++) {
1521 unsigned long phys_start, phys_end;
1523 phys_start = pall[i].phys_addr;
1524 phys_end = phys_start + pall[i].reg_size;
1526 mem_alloced += kernel_map_range(phys_start, phys_end,
1530 printk("Allocated %ld bytes for kernel page tables.\n",
1533 kvmap_linear_patch[0] = 0x01000000; /* nop */
1534 flushi(&kvmap_linear_patch[0]);
1540 #ifdef CONFIG_DEBUG_PAGEALLOC
1541 void kernel_map_pages(struct page *page, int numpages, int enable)
1543 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1544 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1546 kernel_map_range(phys_start, phys_end,
1547 (enable ? PAGE_KERNEL : __pgprot(0)));
1549 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1550 PAGE_OFFSET + phys_end);
1552 /* we should perform an IPI and flush all tlbs,
1553 * but that can deadlock->flush only current cpu.
1555 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1556 PAGE_OFFSET + phys_end);
1560 unsigned long __init find_ecache_flush_span(unsigned long size)
1564 for (i = 0; i < pavail_ents; i++) {
1565 if (pavail[i].reg_size >= size)
1566 return pavail[i].phys_addr;
1572 static void __init tsb_phys_patch(void)
1574 struct tsb_ldquad_phys_patch_entry *pquad;
1575 struct tsb_phys_patch_entry *p;
1577 pquad = &__tsb_ldquad_phys_patch;
1578 while (pquad < &__tsb_ldquad_phys_patch_end) {
1579 unsigned long addr = pquad->addr;
1581 if (tlb_type == hypervisor)
1582 *(unsigned int *) addr = pquad->sun4v_insn;
1584 *(unsigned int *) addr = pquad->sun4u_insn;
1586 __asm__ __volatile__("flush %0"
1593 p = &__tsb_phys_patch;
1594 while (p < &__tsb_phys_patch_end) {
1595 unsigned long addr = p->addr;
1597 *(unsigned int *) addr = p->insn;
1599 __asm__ __volatile__("flush %0"
1607 /* Don't mark as init, we give this to the Hypervisor. */
1608 #ifndef CONFIG_DEBUG_PAGEALLOC
1609 #define NUM_KTSB_DESCR 2
1611 #define NUM_KTSB_DESCR 1
1613 static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
1614 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1616 static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
1618 pa >>= KTSB_PHYS_SHIFT;
1620 while (start < end) {
1621 unsigned int *ia = (unsigned int *)(unsigned long)*start;
1623 ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10);
1624 __asm__ __volatile__("flush %0" : : "r" (ia));
1626 ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff);
1627 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
1633 static void ktsb_phys_patch(void)
1635 extern unsigned int __swapper_tsb_phys_patch;
1636 extern unsigned int __swapper_tsb_phys_patch_end;
1637 unsigned long ktsb_pa;
1639 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1640 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
1641 &__swapper_tsb_phys_patch_end, ktsb_pa);
1642 #ifndef CONFIG_DEBUG_PAGEALLOC
1644 extern unsigned int __swapper_4m_tsb_phys_patch;
1645 extern unsigned int __swapper_4m_tsb_phys_patch_end;
1646 ktsb_pa = (kern_base +
1647 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1648 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
1649 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
1654 static void __init sun4v_ktsb_init(void)
1656 unsigned long ktsb_pa;
1658 /* First KTSB for PAGE_SIZE mappings. */
1659 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1661 switch (PAGE_SIZE) {
1664 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1665 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1669 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1670 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1674 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1675 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1678 case 4 * 1024 * 1024:
1679 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1680 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1684 ktsb_descr[0].assoc = 1;
1685 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1686 ktsb_descr[0].ctx_idx = 0;
1687 ktsb_descr[0].tsb_base = ktsb_pa;
1688 ktsb_descr[0].resv = 0;
1690 #ifndef CONFIG_DEBUG_PAGEALLOC
1691 /* Second KTSB for 4MB/256MB mappings. */
1692 ktsb_pa = (kern_base +
1693 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1695 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
1696 ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
1697 HV_PGSZ_MASK_256MB);
1698 ktsb_descr[1].assoc = 1;
1699 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1700 ktsb_descr[1].ctx_idx = 0;
1701 ktsb_descr[1].tsb_base = ktsb_pa;
1702 ktsb_descr[1].resv = 0;
1706 void __cpuinit sun4v_ktsb_register(void)
1708 unsigned long pa, ret;
1710 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1712 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1714 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1715 "errors with %lx\n", pa, ret);
1720 /* paging_init() sets up the page tables */
1722 static unsigned long last_valid_pfn;
1723 pgd_t swapper_pg_dir[2048];
1725 static void sun4u_pgprot_init(void);
1726 static void sun4v_pgprot_init(void);
1728 void __init paging_init(void)
1730 unsigned long end_pfn, shift, phys_base;
1731 unsigned long real_end, i;
1733 /* These build time checkes make sure that the dcache_dirty_cpu()
1734 * page->flags usage will work.
1736 * When a page gets marked as dcache-dirty, we store the
1737 * cpu number starting at bit 32 in the page->flags. Also,
1738 * functions like clear_dcache_dirty_cpu use the cpu mask
1739 * in 13-bit signed-immediate instruction fields.
1743 * Page flags must not reach into upper 32 bits that are used
1744 * for the cpu number
1746 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
1749 * The bit fields placed in the high range must not reach below
1750 * the 32 bit boundary. Otherwise we cannot place the cpu field
1751 * at the 32 bit boundary.
1753 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
1754 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
1756 BUILD_BUG_ON(NR_CPUS > 4096);
1758 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1759 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1761 /* Invalidate both kernel TSBs. */
1762 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
1763 #ifndef CONFIG_DEBUG_PAGEALLOC
1764 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
1767 if (tlb_type == hypervisor)
1768 sun4v_pgprot_init();
1770 sun4u_pgprot_init();
1772 if (tlb_type == cheetah_plus ||
1773 tlb_type == hypervisor) {
1778 if (tlb_type == hypervisor) {
1779 sun4v_patch_tlb_handlers();
1785 /* Find available physical memory...
1787 * Read it twice in order to work around a bug in openfirmware.
1788 * The call to grab this table itself can cause openfirmware to
1789 * allocate memory, which in turn can take away some space from
1790 * the list of available memory. Reading it twice makes sure
1791 * we really do get the final value.
1793 read_obp_translations();
1794 read_obp_memory("reg", &pall[0], &pall_ents);
1795 read_obp_memory("available", &pavail[0], &pavail_ents);
1796 read_obp_memory("available", &pavail[0], &pavail_ents);
1798 phys_base = 0xffffffffffffffffUL;
1799 for (i = 0; i < pavail_ents; i++) {
1800 phys_base = min(phys_base, pavail[i].phys_addr);
1801 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
1804 memblock_reserve(kern_base, kern_size);
1806 find_ramdisk(phys_base);
1808 memblock_enforce_memory_limit(cmdline_memory_size);
1811 memblock_dump_all();
1813 set_bit(0, mmu_context_bmap);
1815 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1817 real_end = (unsigned long)_end;
1818 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
1819 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
1820 num_kernel_image_mappings);
1822 /* Set kernel pgd to upper alias so physical page computations
1825 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1827 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1829 /* Now can init the kernel/bad page tables. */
1830 pud_set(pud_offset(&swapper_pg_dir[0], 0),
1831 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1833 inherit_prom_mappings();
1837 /* Ok, we can use our TLB miss and window trap handlers safely. */
1842 if (tlb_type == hypervisor)
1843 sun4v_ktsb_register();
1845 prom_build_devicetree();
1846 of_populate_present_mask();
1848 of_fill_in_cpu_data();
1851 if (tlb_type == hypervisor) {
1853 mdesc_populate_present_mask(cpu_all_mask);
1855 mdesc_fill_in_cpu_data(cpu_all_mask);
1859 /* Once the OF device tree and MDESC have been setup, we know
1860 * the list of possible cpus. Therefore we can allocate the
1863 for_each_possible_cpu(i) {
1864 /* XXX Use node local allocations... XXX */
1865 softirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
1866 hardirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
1869 /* Setup bootmem... */
1870 last_valid_pfn = end_pfn = bootmem_init(phys_base);
1872 #ifndef CONFIG_NEED_MULTIPLE_NODES
1873 max_mapnr = last_valid_pfn;
1875 kernel_physical_mapping_init();
1878 unsigned long max_zone_pfns[MAX_NR_ZONES];
1880 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1882 max_zone_pfns[ZONE_NORMAL] = end_pfn;
1884 free_area_init_nodes(max_zone_pfns);
1887 printk("Booting Linux...\n");
1890 int __devinit page_in_phys_avail(unsigned long paddr)
1896 for (i = 0; i < pavail_ents; i++) {
1897 unsigned long start, end;
1899 start = pavail[i].phys_addr;
1900 end = start + pavail[i].reg_size;
1902 if (paddr >= start && paddr < end)
1905 if (paddr >= kern_base && paddr < (kern_base + kern_size))
1907 #ifdef CONFIG_BLK_DEV_INITRD
1908 if (paddr >= __pa(initrd_start) &&
1909 paddr < __pa(PAGE_ALIGN(initrd_end)))
1916 static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
1917 static int pavail_rescan_ents __initdata;
1919 /* Certain OBP calls, such as fetching "available" properties, can
1920 * claim physical memory. So, along with initializing the valid
1921 * address bitmap, what we do here is refetch the physical available
1922 * memory list again, and make sure it provides at least as much
1923 * memory as 'pavail' does.
1925 static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
1929 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
1931 for (i = 0; i < pavail_ents; i++) {
1932 unsigned long old_start, old_end;
1934 old_start = pavail[i].phys_addr;
1935 old_end = old_start + pavail[i].reg_size;
1936 while (old_start < old_end) {
1939 for (n = 0; n < pavail_rescan_ents; n++) {
1940 unsigned long new_start, new_end;
1942 new_start = pavail_rescan[n].phys_addr;
1943 new_end = new_start +
1944 pavail_rescan[n].reg_size;
1946 if (new_start <= old_start &&
1947 new_end >= (old_start + PAGE_SIZE)) {
1948 set_bit(old_start >> 22, bitmap);
1953 prom_printf("mem_init: Lost memory in pavail\n");
1954 prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
1955 pavail[i].phys_addr,
1956 pavail[i].reg_size);
1957 prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
1958 pavail_rescan[i].phys_addr,
1959 pavail_rescan[i].reg_size);
1960 prom_printf("mem_init: Cannot continue, aborting.\n");
1964 old_start += PAGE_SIZE;
1969 static void __init patch_tlb_miss_handler_bitmap(void)
1971 extern unsigned int valid_addr_bitmap_insn[];
1972 extern unsigned int valid_addr_bitmap_patch[];
1974 valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1];
1976 valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0];
1977 flushi(&valid_addr_bitmap_insn[0]);
1980 void __init mem_init(void)
1982 unsigned long codepages, datapages, initpages;
1983 unsigned long addr, last;
1985 addr = PAGE_OFFSET + kern_base;
1986 last = PAGE_ALIGN(kern_size) + addr;
1987 while (addr < last) {
1988 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
1992 setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap);
1993 patch_tlb_miss_handler_bitmap();
1995 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1997 #ifdef CONFIG_NEED_MULTIPLE_NODES
2000 for_each_online_node(i) {
2001 if (NODE_DATA(i)->node_spanned_pages != 0) {
2003 free_all_bootmem_node(NODE_DATA(i));
2008 totalram_pages = free_all_bootmem();
2011 /* We subtract one to account for the mem_map_zero page
2014 totalram_pages -= 1;
2015 num_physpages = totalram_pages;
2018 * Set up the zero page, mark it reserved, so that page count
2019 * is not manipulated when freeing the page from user ptes.
2021 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2022 if (mem_map_zero == NULL) {
2023 prom_printf("paging_init: Cannot alloc zero page.\n");
2026 SetPageReserved(mem_map_zero);
2028 codepages = (((unsigned long) _etext) - ((unsigned long) _start));
2029 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
2030 datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
2031 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
2032 initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
2033 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
2035 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
2036 nr_free_pages() << (PAGE_SHIFT-10),
2037 codepages << (PAGE_SHIFT-10),
2038 datapages << (PAGE_SHIFT-10),
2039 initpages << (PAGE_SHIFT-10),
2040 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
2042 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2043 cheetah_ecache_flush_init();
2046 void free_initmem(void)
2048 unsigned long addr, initend;
2051 /* If the physical memory maps were trimmed by kernel command
2052 * line options, don't even try freeing this initmem stuff up.
2053 * The kernel image could have been in the trimmed out region
2054 * and if so the freeing below will free invalid page structs.
2056 if (cmdline_memory_size)
2060 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2062 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2063 initend = (unsigned long)(__init_end) & PAGE_MASK;
2064 for (; addr < initend; addr += PAGE_SIZE) {
2069 ((unsigned long) __va(kern_base)) -
2070 ((unsigned long) KERNBASE));
2071 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
2074 p = virt_to_page(page);
2076 ClearPageReserved(p);
2085 #ifdef CONFIG_BLK_DEV_INITRD
2086 void free_initrd_mem(unsigned long start, unsigned long end)
2089 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
2090 for (; start < end; start += PAGE_SIZE) {
2091 struct page *p = virt_to_page(start);
2093 ClearPageReserved(p);
2102 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2103 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2104 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2105 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2106 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2107 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2109 pgprot_t PAGE_KERNEL __read_mostly;
2110 EXPORT_SYMBOL(PAGE_KERNEL);
2112 pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2113 pgprot_t PAGE_COPY __read_mostly;
2115 pgprot_t PAGE_SHARED __read_mostly;
2116 EXPORT_SYMBOL(PAGE_SHARED);
2118 unsigned long pg_iobits __read_mostly;
2120 unsigned long _PAGE_IE __read_mostly;
2121 EXPORT_SYMBOL(_PAGE_IE);
2123 unsigned long _PAGE_E __read_mostly;
2124 EXPORT_SYMBOL(_PAGE_E);
2126 unsigned long _PAGE_CACHE __read_mostly;
2127 EXPORT_SYMBOL(_PAGE_CACHE);
2129 #ifdef CONFIG_SPARSEMEM_VMEMMAP
2130 unsigned long vmemmap_table[VMEMMAP_SIZE];
2132 static long __meminitdata addr_start, addr_end;
2133 static int __meminitdata node_start;
2135 int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
2137 unsigned long vstart = (unsigned long) start;
2138 unsigned long vend = (unsigned long) (start + nr);
2139 unsigned long phys_start = (vstart - VMEMMAP_BASE);
2140 unsigned long phys_end = (vend - VMEMMAP_BASE);
2141 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
2142 unsigned long end = VMEMMAP_ALIGN(phys_end);
2143 unsigned long pte_base;
2145 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2146 _PAGE_CP_4U | _PAGE_CV_4U |
2147 _PAGE_P_4U | _PAGE_W_4U);
2148 if (tlb_type == hypervisor)
2149 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2150 _PAGE_CP_4V | _PAGE_CV_4V |
2151 _PAGE_P_4V | _PAGE_W_4V);
2153 for (; addr < end; addr += VMEMMAP_CHUNK) {
2154 unsigned long *vmem_pp =
2155 vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
2158 if (!(*vmem_pp & _PAGE_VALID)) {
2159 block = vmemmap_alloc_block(1UL << 22, node);
2163 *vmem_pp = pte_base | __pa(block);
2165 /* check to see if we have contiguous blocks */
2166 if (addr_end != addr || node_start != node) {
2168 printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
2169 addr_start, addr_end-1, node_start);
2173 addr_end = addr + VMEMMAP_CHUNK;
2179 void __meminit vmemmap_populate_print_last(void)
2182 printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
2183 addr_start, addr_end-1, node_start);
2189 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
2191 static void prot_init_common(unsigned long page_none,
2192 unsigned long page_shared,
2193 unsigned long page_copy,
2194 unsigned long page_readonly,
2195 unsigned long page_exec_bit)
2197 PAGE_COPY = __pgprot(page_copy);
2198 PAGE_SHARED = __pgprot(page_shared);
2200 protection_map[0x0] = __pgprot(page_none);
2201 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2202 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2203 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2204 protection_map[0x4] = __pgprot(page_readonly);
2205 protection_map[0x5] = __pgprot(page_readonly);
2206 protection_map[0x6] = __pgprot(page_copy);
2207 protection_map[0x7] = __pgprot(page_copy);
2208 protection_map[0x8] = __pgprot(page_none);
2209 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2210 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2211 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2212 protection_map[0xc] = __pgprot(page_readonly);
2213 protection_map[0xd] = __pgprot(page_readonly);
2214 protection_map[0xe] = __pgprot(page_shared);
2215 protection_map[0xf] = __pgprot(page_shared);
2218 static void __init sun4u_pgprot_init(void)
2220 unsigned long page_none, page_shared, page_copy, page_readonly;
2221 unsigned long page_exec_bit;
2223 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2224 _PAGE_CACHE_4U | _PAGE_P_4U |
2225 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2227 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2228 _PAGE_CACHE_4U | _PAGE_P_4U |
2229 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2230 _PAGE_EXEC_4U | _PAGE_L_4U);
2232 _PAGE_IE = _PAGE_IE_4U;
2233 _PAGE_E = _PAGE_E_4U;
2234 _PAGE_CACHE = _PAGE_CACHE_4U;
2236 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2237 __ACCESS_BITS_4U | _PAGE_E_4U);
2239 #ifdef CONFIG_DEBUG_PAGEALLOC
2240 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^
2241 0xfffff80000000000UL;
2243 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
2244 0xfffff80000000000UL;
2246 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2247 _PAGE_P_4U | _PAGE_W_4U);
2249 /* XXX Should use 256MB on Panther. XXX */
2250 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
2252 _PAGE_SZBITS = _PAGE_SZBITS_4U;
2253 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2254 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2255 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2258 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2259 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2260 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2261 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2262 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2263 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2264 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2266 page_exec_bit = _PAGE_EXEC_4U;
2268 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2272 static void __init sun4v_pgprot_init(void)
2274 unsigned long page_none, page_shared, page_copy, page_readonly;
2275 unsigned long page_exec_bit;
2277 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2278 _PAGE_CACHE_4V | _PAGE_P_4V |
2279 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2281 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
2283 _PAGE_IE = _PAGE_IE_4V;
2284 _PAGE_E = _PAGE_E_4V;
2285 _PAGE_CACHE = _PAGE_CACHE_4V;
2287 #ifdef CONFIG_DEBUG_PAGEALLOC
2288 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
2289 0xfffff80000000000UL;
2291 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2292 0xfffff80000000000UL;
2294 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2295 _PAGE_P_4V | _PAGE_W_4V);
2297 #ifdef CONFIG_DEBUG_PAGEALLOC
2298 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
2299 0xfffff80000000000UL;
2301 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
2302 0xfffff80000000000UL;
2304 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2305 _PAGE_P_4V | _PAGE_W_4V);
2307 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2308 __ACCESS_BITS_4V | _PAGE_E_4V);
2310 _PAGE_SZBITS = _PAGE_SZBITS_4V;
2311 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2312 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2313 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2314 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2316 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
2317 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2318 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2319 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2320 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2321 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2322 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2324 page_exec_bit = _PAGE_EXEC_4V;
2326 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2330 unsigned long pte_sz_bits(unsigned long sz)
2332 if (tlb_type == hypervisor) {
2336 return _PAGE_SZ8K_4V;
2338 return _PAGE_SZ64K_4V;
2340 return _PAGE_SZ512K_4V;
2341 case 4 * 1024 * 1024:
2342 return _PAGE_SZ4MB_4V;
2348 return _PAGE_SZ8K_4U;
2350 return _PAGE_SZ64K_4U;
2352 return _PAGE_SZ512K_4U;
2353 case 4 * 1024 * 1024:
2354 return _PAGE_SZ4MB_4U;
2359 pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2363 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
2364 pte_val(pte) |= (((unsigned long)space) << 32);
2365 pte_val(pte) |= pte_sz_bits(page_size);
2370 static unsigned long kern_large_tte(unsigned long paddr)
2374 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2375 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2376 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2377 if (tlb_type == hypervisor)
2378 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2379 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
2380 _PAGE_EXEC_4V | _PAGE_W_4V);
2385 /* If not locked, zap it. */
2386 void __flush_tlb_all(void)
2388 unsigned long pstate;
2391 __asm__ __volatile__("flushw\n\t"
2392 "rdpr %%pstate, %0\n\t"
2393 "wrpr %0, %1, %%pstate"
2396 if (tlb_type == hypervisor) {
2397 sun4v_mmu_demap_all();
2398 } else if (tlb_type == spitfire) {
2399 for (i = 0; i < 64; i++) {
2400 /* Spitfire Errata #32 workaround */
2401 /* NOTE: Always runs on spitfire, so no
2402 * cheetah+ page size encodings.
2404 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2408 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2410 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2411 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2414 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2415 spitfire_put_dtlb_data(i, 0x0UL);
2418 /* Spitfire Errata #32 workaround */
2419 /* NOTE: Always runs on spitfire, so no
2420 * cheetah+ page size encodings.
2422 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2426 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2428 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2429 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2432 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2433 spitfire_put_itlb_data(i, 0x0UL);
2436 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2437 cheetah_flush_dtlb_all();
2438 cheetah_flush_itlb_all();
2440 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2445 #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
2447 #define do_flush_tlb_kernel_range __flush_tlb_kernel_range
2450 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
2452 if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
2453 if (start < LOW_OBP_ADDRESS) {
2454 flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
2455 do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
2457 if (end > HI_OBP_ADDRESS) {
2458 flush_tsb_kernel_range(end, HI_OBP_ADDRESS);
2459 do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS);
2462 flush_tsb_kernel_range(start, end);
2463 do_flush_tlb_kernel_range(start, end);