2 * This file contains the routines for TLB flushing.
3 * On machines where the MMU does not use a hash table to store virtual to
4 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
5 * this does -not- include 603 however which shares the implementation with
6 * hash based processors)
10 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
13 * Derived from arch/ppc/mm/init.c:
14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
16 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
17 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
18 * Copyright (C) 1996 Paul Mackerras
20 * Derived from "arch/i386/mm/init.c"
21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version
26 * 2 of the License, or (at your option) any later version.
30 #include <linux/kernel.h>
32 #include <linux/init.h>
33 #include <linux/highmem.h>
34 #include <linux/pagemap.h>
35 #include <linux/preempt.h>
36 #include <linux/spinlock.h>
37 #include <linux/memblock.h>
39 #include <asm/tlbflush.h>
41 #include <asm/code-patching.h>
45 #ifdef CONFIG_PPC_BOOK3E
46 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
50 .enc = BOOK3E_PAGESZ_4K,
54 .enc = BOOK3E_PAGESZ_16K,
59 .enc = BOOK3E_PAGESZ_64K,
63 .enc = BOOK3E_PAGESZ_1M,
68 .enc = BOOK3E_PAGESZ_16M,
72 .enc = BOOK3E_PAGESZ_256M,
76 .enc = BOOK3E_PAGESZ_1GB,
79 static inline int mmu_get_tsize(int psize)
81 return mmu_psize_defs[psize].enc;
84 static inline int mmu_get_tsize(int psize)
86 /* This isn't used on !Book3E for now */
91 /* The variables below are currently only used on 64-bit Book3E
92 * though this will probably be made common with other nohash
93 * implementations at some point
97 int mmu_linear_psize; /* Page size used for the linear mapping */
98 int mmu_pte_psize; /* Page size used for PTE pages */
99 int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
100 int book3e_htw_enabled; /* Is HW tablewalk enabled ? */
101 unsigned long linear_map_top; /* Top of linear mapping */
103 #endif /* CONFIG_PPC64 */
105 #ifdef CONFIG_PPC_FSL_BOOK3E
106 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
107 DEFINE_PER_CPU(int, next_tlbcam_idx);
108 EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
112 * Base TLB flushing operations:
114 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
115 * - flush_tlb_page(vma, vmaddr) flushes one page
116 * - flush_tlb_range(vma, start, end) flushes a range of pages
117 * - flush_tlb_kernel_range(start, end) flushes kernel pages
119 * - local_* variants of page and mm only apply to the current
124 * These are the base non-SMP variants of page and mm flushing
126 void local_flush_tlb_mm(struct mm_struct *mm)
131 pid = mm->context.id;
132 if (pid != MMU_NO_CONTEXT)
136 EXPORT_SYMBOL(local_flush_tlb_mm);
138 void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
144 pid = mm ? mm->context.id : 0;
145 if (pid != MMU_NO_CONTEXT)
146 _tlbil_va(vmaddr, pid, tsize, ind);
150 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
152 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
153 mmu_get_tsize(mmu_virtual_psize), 0);
155 EXPORT_SYMBOL(local_flush_tlb_page);
158 * And here are the SMP non-local implementations
162 static DEFINE_RAW_SPINLOCK(tlbivax_lock);
164 static int mm_is_core_local(struct mm_struct *mm)
166 return cpumask_subset(mm_cpumask(mm),
167 topology_thread_cpumask(smp_processor_id()));
170 struct tlb_flush_param {
177 static void do_flush_tlb_mm_ipi(void *param)
179 struct tlb_flush_param *p = param;
181 _tlbil_pid(p ? p->pid : 0);
184 static void do_flush_tlb_page_ipi(void *param)
186 struct tlb_flush_param *p = param;
188 _tlbil_va(p->addr, p->pid, p->tsize, p->ind);
192 /* Note on invalidations and PID:
194 * We snapshot the PID with preempt disabled. At this point, it can still
195 * change either because:
196 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
197 * - we are invaliating some target that isn't currently running here
198 * and is concurrently acquiring a new PID on another CPU
199 * - some other CPU is re-acquiring a lost PID for this mm
202 * However, this shouldn't be a problem as we only guarantee
203 * invalidation of TLB entries present prior to this call, so we
204 * don't care about the PID changing, and invalidating a stale PID
205 * is generally harmless.
208 void flush_tlb_mm(struct mm_struct *mm)
213 pid = mm->context.id;
214 if (unlikely(pid == MMU_NO_CONTEXT))
216 if (!mm_is_core_local(mm)) {
217 struct tlb_flush_param p = { .pid = pid };
218 /* Ignores smp_processor_id() even if set. */
219 smp_call_function_many(mm_cpumask(mm),
220 do_flush_tlb_mm_ipi, &p, 1);
226 EXPORT_SYMBOL(flush_tlb_mm);
228 void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
231 struct cpumask *cpu_mask;
235 pid = mm ? mm->context.id : 0;
236 if (unlikely(pid == MMU_NO_CONTEXT))
238 cpu_mask = mm_cpumask(mm);
239 if (!mm_is_core_local(mm)) {
240 /* If broadcast tlbivax is supported, use it */
241 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
242 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
244 raw_spin_lock(&tlbivax_lock);
245 _tlbivax_bcast(vmaddr, pid, tsize, ind);
247 raw_spin_unlock(&tlbivax_lock);
250 struct tlb_flush_param p = {
256 /* Ignores smp_processor_id() even if set in cpu_mask */
257 smp_call_function_many(cpu_mask,
258 do_flush_tlb_page_ipi, &p, 1);
261 _tlbil_va(vmaddr, pid, tsize, ind);
266 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
268 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
269 mmu_get_tsize(mmu_virtual_psize), 0);
271 EXPORT_SYMBOL(flush_tlb_page);
273 #endif /* CONFIG_SMP */
276 * Flush kernel TLB entries in the given range
278 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
282 smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
289 EXPORT_SYMBOL(flush_tlb_kernel_range);
292 * Currently, for range flushing, we just do a full mm flush. This should
293 * be optimized based on a threshold on the size of the range, since
294 * some implementation can stack multiple tlbivax before a tlbsync but
295 * for now, we keep it that way
297 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
301 flush_tlb_mm(vma->vm_mm);
303 EXPORT_SYMBOL(flush_tlb_range);
305 void tlb_flush(struct mmu_gather *tlb)
307 flush_tlb_mm(tlb->mm);
311 * Below are functions specific to the 64-bit variant of Book3E though that
312 * may change in the future
318 * Handling of virtual linear page tables or indirect TLB entries
319 * flushing when PTE pages are freed
321 void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
323 int tsize = mmu_psize_defs[mmu_pte_psize].enc;
325 if (book3e_htw_enabled) {
326 unsigned long start = address & PMD_MASK;
327 unsigned long end = address + PMD_SIZE;
328 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
330 /* This isn't the most optimal, ideally we would factor out the
331 * while preempt & CPU mask mucking around, or even the IPI but
334 while (start < end) {
335 __flush_tlb_page(tlb->mm, start, tsize, 1);
339 unsigned long rmask = 0xf000000000000000ul;
340 unsigned long rid = (address & rmask) | 0x1000000000000000ul;
341 unsigned long vpte = address & ~rmask;
343 #ifdef CONFIG_PPC_64K_PAGES
344 vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful;
346 vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
349 __flush_tlb_page(tlb->mm, vpte, tsize, 0);
353 static void setup_page_sizes(void)
355 unsigned int tlb0cfg;
360 #ifdef CONFIG_PPC_FSL_BOOK3E
361 unsigned int mmucfg = mfspr(SPRN_MMUCFG);
363 if (((mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) &&
364 (mmu_has_feature(MMU_FTR_TYPE_FSL_E))) {
365 unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
366 unsigned int min_pg, max_pg;
368 min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
369 max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
371 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
372 struct mmu_psize_def *def;
375 def = &mmu_psize_defs[psize];
381 /* adjust to be in terms of 4^shift Kb */
382 shift = (shift - 10) >> 1;
384 if ((shift >= min_pg) && (shift <= max_pg))
385 def->flags |= MMU_PAGE_SIZE_DIRECT;
392 tlb0cfg = mfspr(SPRN_TLB0CFG);
393 tlb0ps = mfspr(SPRN_TLB0PS);
394 eptcfg = mfspr(SPRN_EPTCFG);
396 /* Look for supported direct sizes */
397 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
398 struct mmu_psize_def *def = &mmu_psize_defs[psize];
400 if (tlb0ps & (1U << (def->shift - 10)))
401 def->flags |= MMU_PAGE_SIZE_DIRECT;
404 /* Indirect page sizes supported ? */
405 if ((tlb0cfg & TLBnCFG_IND) == 0)
408 /* Now, we only deal with one IND page size for each
409 * direct size. Hopefully all implementations today are
410 * unambiguous, but we might want to be careful in the
413 for (i = 0; i < 3; i++) {
414 unsigned int ps, sps;
422 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
423 struct mmu_psize_def *def = &mmu_psize_defs[psize];
425 if (ps == (def->shift - 10))
426 def->flags |= MMU_PAGE_SIZE_INDIRECT;
427 if (sps == (def->shift - 10))
433 /* Cleanup array and print summary */
434 pr_info("MMU: Supported page sizes\n");
435 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
436 struct mmu_psize_def *def = &mmu_psize_defs[psize];
437 const char *__page_type_names[] = {
443 if (def->flags == 0) {
447 pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
448 __page_type_names[def->flags & 0x3]);
452 static void __patch_exception(int exc, unsigned long addr)
454 extern unsigned int interrupt_base_book3e;
455 unsigned int *ibase = &interrupt_base_book3e;
457 /* Our exceptions vectors start with a NOP and -then- a branch
458 * to deal with single stepping from userspace which stops on
459 * the second instruction. Thus we need to patch the second
460 * instruction of the exception, not the first one
463 patch_branch(ibase + (exc / 4) + 1, addr, 0);
466 #define patch_exception(exc, name) do { \
467 extern unsigned int name; \
468 __patch_exception((exc), (unsigned long)&name); \
471 static void setup_mmu_htw(void)
473 /* Check if HW tablewalk is present, and if yes, enable it by:
475 * - patching the TLB miss handlers to branch to the
476 * one dedicates to it
478 * - setting the global book3e_htw_enabled
480 unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG);
482 if ((tlb0cfg & TLBnCFG_IND) &&
483 (tlb0cfg & TLBnCFG_PT)) {
484 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
485 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
486 book3e_htw_enabled = 1;
488 pr_info("MMU: Book3E HW tablewalk %s\n",
489 book3e_htw_enabled ? "enabled" : "not supported");
493 * Early initialization of the MMU TLB code
495 static void __early_init_mmu(int boot_cpu)
499 /* XXX This will have to be decided at runtime, but right
500 * now our boot and TLB miss code hard wires it. Ideally
501 * we should find out a suitable page size and patch the
502 * TLB miss code (either that or use the PACA to store
505 mmu_linear_psize = MMU_PAGE_1G;
507 /* XXX This should be decided at runtime based on supported
508 * page sizes in the TLB, but for now let's assume 16M is
509 * always there and a good fit (which it probably is)
511 mmu_vmemmap_psize = MMU_PAGE_16M;
513 /* XXX This code only checks for TLB 0 capabilities and doesn't
514 * check what page size combos are supported by the HW. It
515 * also doesn't handle the case where a separate array holds
516 * the IND entries from the array loaded by the PT.
519 /* Look for supported page sizes */
522 /* Look for HW tablewalk support */
526 /* Set MAS4 based on page table setting */
528 mas4 = 0x4 << MAS4_WIMGED_SHIFT;
529 if (book3e_htw_enabled) {
530 mas4 |= mas4 | MAS4_INDD;
531 #ifdef CONFIG_PPC_64K_PAGES
532 mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
533 mmu_pte_psize = MMU_PAGE_256M;
535 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
536 mmu_pte_psize = MMU_PAGE_1M;
539 #ifdef CONFIG_PPC_64K_PAGES
540 mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
542 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
544 mmu_pte_psize = mmu_virtual_psize;
546 mtspr(SPRN_MAS4, mas4);
548 /* Set the global containing the top of the linear mapping
549 * for use by the TLB miss code
551 linear_map_top = memblock_end_of_DRAM();
553 #ifdef CONFIG_PPC_FSL_BOOK3E
554 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
555 unsigned int num_cams;
557 /* use a quarter of the TLBCAM for bolted linear map */
558 num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
559 linear_map_top = map_mem_in_cams(linear_map_top, num_cams);
561 /* limit memory so we dont have linear faults */
562 memblock_enforce_memory_limit(linear_map_top);
565 patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
566 patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e);
570 /* A sync won't hurt us after mucking around with
571 * the MMU configuration
575 memblock_set_current_limit(linear_map_top);
578 void __init early_init_mmu(void)
583 void __cpuinit early_init_mmu_secondary(void)
588 void setup_initial_memory_limit(phys_addr_t first_memblock_base,
589 phys_addr_t first_memblock_size)
591 /* On Embedded 64-bit, we adjust the RMA size to match
592 * the bolted TLB entry. We know for now that only 1G
593 * entries are supported though that may eventually
594 * change. We crop it to the size of the first MEMBLOCK to
595 * avoid going over total available memory just in case...
597 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
599 /* Finally limit subsequent allocations */
600 memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
602 #endif /* CONFIG_PPC64 */