1 /* arch/sparc64/mm/tsb.c
3 * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
6 #include <linux/kernel.h>
7 #include <linux/preempt.h>
8 #include <linux/slab.h>
9 #include <asm/system.h>
11 #include <asm/pgtable.h>
12 #include <asm/mmu_context.h>
15 #include <asm/oplib.h>
17 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
19 static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
22 return vaddr & (nentries - 1);
25 static inline int tag_compare(unsigned long tag, unsigned long vaddr)
27 return (tag == (vaddr >> 22));
30 /* TSB flushes need only occur on the processor initiating the address
31 * space modification, not on each cpu the address space has run on.
32 * Only the TLB flush needs that treatment.
35 void flush_tsb_kernel_range(unsigned long start, unsigned long end)
39 for (v = start; v < end; v += PAGE_SIZE) {
40 unsigned long hash = tsb_hash(v, PAGE_SHIFT,
42 struct tsb *ent = &swapper_tsb[hash];
44 if (tag_compare(ent->tag, v))
45 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
49 static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
50 unsigned long hash_shift,
51 unsigned long nentries)
53 unsigned long tag, ent, hash;
56 hash = tsb_hash(v, hash_shift, nentries);
57 ent = tsb + (hash * sizeof(struct tsb));
63 static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
64 unsigned long tsb, unsigned long nentries)
68 for (i = 0; i < tb->tlb_nr; i++)
69 __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
72 void flush_tsb_user(struct tlb_batch *tb)
74 struct mm_struct *mm = tb->mm;
75 unsigned long nentries, base, flags;
77 spin_lock_irqsave(&mm->context.lock, flags);
79 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
80 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
81 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
83 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
85 #ifdef CONFIG_HUGETLB_PAGE
86 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
87 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
88 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
89 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
91 __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
94 spin_unlock_irqrestore(&mm->context.lock, flags);
97 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
99 unsigned long nentries, base, flags;
101 spin_lock_irqsave(&mm->context.lock, flags);
103 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
104 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
105 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
107 __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
109 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
110 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
111 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
112 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
113 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
115 __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
118 spin_unlock_irqrestore(&mm->context.lock, flags);
121 #if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
122 #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
123 #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
124 #elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
125 #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K
126 #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K
128 #error Broken base page size setting...
131 #ifdef CONFIG_HUGETLB_PAGE
132 #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
133 #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K
134 #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K
135 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
136 #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K
137 #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K
138 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
139 #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
140 #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
142 #error Broken huge page size setting...
146 static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
148 unsigned long tsb_reg, base, tsb_paddr;
149 unsigned long page_sz, tte;
151 mm->context.tsb_block[tsb_idx].tsb_nentries =
152 tsb_bytes / sizeof(struct tsb);
156 base = TSBMAP_8K_BASE;
158 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
160 base = TSBMAP_4M_BASE;
167 tte = pgprot_val(PAGE_KERNEL_LOCKED);
168 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
169 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
171 /* Use the smallest page size that can map the whole TSB
177 #ifdef DCACHE_ALIASING_POSSIBLE
178 base += (tsb_paddr & 8192);
200 page_sz = 512 * 1024;
205 page_sz = 512 * 1024;
210 page_sz = 512 * 1024;
215 page_sz = 4 * 1024 * 1024;
219 printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
220 current->comm, current->pid, tsb_bytes);
223 tte |= pte_sz_bits(page_sz);
225 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
226 /* Physical mapping, no locked TLB entry for TSB. */
227 tsb_reg |= tsb_paddr;
229 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
230 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
231 mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
234 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
235 tte |= (tsb_paddr & ~(page_sz - 1UL));
237 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
238 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
239 mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
242 /* Setup the Hypervisor TSB descriptor. */
243 if (tlb_type == hypervisor) {
244 struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
248 hp->pgsz_idx = HV_PGSZ_IDX_BASE;
250 #ifdef CONFIG_HUGETLB_PAGE
252 hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
259 hp->num_ttes = tsb_bytes / 16;
263 hp->pgsz_mask = HV_PGSZ_MASK_BASE;
265 #ifdef CONFIG_HUGETLB_PAGE
267 hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
273 hp->tsb_base = tsb_paddr;
278 struct kmem_cache *pgtable_cache __read_mostly;
280 static struct kmem_cache *tsb_caches[8] __read_mostly;
282 static const char *tsb_cache_names[8] = {
293 void __init pgtable_cache_init(void)
297 pgtable_cache = kmem_cache_create("pgtable_cache",
298 PAGE_SIZE, PAGE_SIZE,
301 if (!pgtable_cache) {
302 prom_printf("pgtable_cache_init(): Could not create!\n");
306 for (i = 0; i < 8; i++) {
307 unsigned long size = 8192 << i;
308 const char *name = tsb_cache_names[i];
310 tsb_caches[i] = kmem_cache_create(name,
313 if (!tsb_caches[i]) {
314 prom_printf("Could not create %s cache\n", name);
320 int sysctl_tsb_ratio = -2;
322 static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
324 unsigned long num_ents = (new_size / sizeof(struct tsb));
326 if (sysctl_tsb_ratio < 0)
327 return num_ents - (num_ents >> -sysctl_tsb_ratio);
329 return num_ents + (num_ents >> sysctl_tsb_ratio);
332 /* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
333 * do_sparc64_fault() invokes this routine to try and grow it.
335 * When we reach the maximum TSB size supported, we stick ~0UL into
336 * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
337 * will not trigger any longer.
339 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
340 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
341 * must be 512K aligned. It also must be physically contiguous, so we
342 * cannot use vmalloc().
344 * The idea here is to grow the TSB when the RSS of the process approaches
345 * the number of entries that the current TSB can hold at once. Currently,
346 * we trigger when the RSS hits 3/4 of the TSB capacity.
348 void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
350 unsigned long max_tsb_size = 1 * 1024 * 1024;
351 unsigned long new_size, old_size, flags;
352 struct tsb *old_tsb, *new_tsb;
353 unsigned long new_cache_index, old_cache_index;
354 unsigned long new_rss_limit;
357 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
358 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
361 for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
362 new_rss_limit = tsb_size_to_rss_limit(new_size);
363 if (new_rss_limit > rss)
368 if (new_size == max_tsb_size)
369 new_rss_limit = ~0UL;
372 gfp_flags = GFP_KERNEL;
373 if (new_size > (PAGE_SIZE * 2))
374 gfp_flags = __GFP_NOWARN | __GFP_NORETRY;
376 new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
377 gfp_flags, numa_node_id());
378 if (unlikely(!new_tsb)) {
379 /* Not being able to fork due to a high-order TSB
380 * allocation failure is very bad behavior. Just back
381 * down to a 0-order allocation and force no TSB
382 * growing for this address space.
384 if (mm->context.tsb_block[tsb_index].tsb == NULL &&
385 new_cache_index > 0) {
388 new_rss_limit = ~0UL;
389 goto retry_tsb_alloc;
392 /* If we failed on a TSB grow, we are under serious
393 * memory pressure so don't try to grow any more.
395 if (mm->context.tsb_block[tsb_index].tsb != NULL)
396 mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
400 /* Mark all tags as invalid. */
401 tsb_init(new_tsb, new_size);
403 /* Ok, we are about to commit the changes. If we are
404 * growing an existing TSB the locking is very tricky,
407 * We have to hold mm->context.lock while committing to the
408 * new TSB, this synchronizes us with processors in
409 * flush_tsb_user() and switch_mm() for this address space.
411 * But even with that lock held, processors run asynchronously
412 * accessing the old TSB via TLB miss handling. This is OK
413 * because those actions are just propagating state from the
414 * Linux page tables into the TSB, page table mappings are not
415 * being changed. If a real fault occurs, the processor will
416 * synchronize with us when it hits flush_tsb_user(), this is
417 * also true for the case where vmscan is modifying the page
418 * tables. The only thing we need to be careful with is to
419 * skip any locked TSB entries during copy_tsb().
421 * When we finish committing to the new TSB, we have to drop
422 * the lock and ask all other cpus running this address space
423 * to run tsb_context_switch() to see the new TSB table.
425 spin_lock_irqsave(&mm->context.lock, flags);
427 old_tsb = mm->context.tsb_block[tsb_index].tsb;
429 (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
430 old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
434 /* Handle multiple threads trying to grow the TSB at the same time.
435 * One will get in here first, and bump the size and the RSS limit.
436 * The others will get in here next and hit this check.
438 if (unlikely(old_tsb &&
439 (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
440 spin_unlock_irqrestore(&mm->context.lock, flags);
442 kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
446 mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
449 extern void copy_tsb(unsigned long old_tsb_base,
450 unsigned long old_tsb_size,
451 unsigned long new_tsb_base,
452 unsigned long new_tsb_size);
453 unsigned long old_tsb_base = (unsigned long) old_tsb;
454 unsigned long new_tsb_base = (unsigned long) new_tsb;
456 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
457 old_tsb_base = __pa(old_tsb_base);
458 new_tsb_base = __pa(new_tsb_base);
460 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
463 mm->context.tsb_block[tsb_index].tsb = new_tsb;
464 setup_tsb_params(mm, tsb_index, new_size);
466 spin_unlock_irqrestore(&mm->context.lock, flags);
468 /* If old_tsb is NULL, we're being invoked for the first time
469 * from init_new_context().
472 /* Reload it on the local cpu. */
473 tsb_context_switch(mm);
475 /* Now force other processors to do the same. */
480 /* Now it is safe to free the old tsb. */
481 kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
485 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
487 #ifdef CONFIG_HUGETLB_PAGE
488 unsigned long huge_pte_count;
492 spin_lock_init(&mm->context.lock);
494 mm->context.sparc64_ctx_val = 0UL;
496 #ifdef CONFIG_HUGETLB_PAGE
497 /* We reset it to zero because the fork() page copying
498 * will re-increment the counters as the parent PTEs are
499 * copied into the child address space.
501 huge_pte_count = mm->context.huge_pte_count;
502 mm->context.huge_pte_count = 0;
505 /* copy_mm() copies over the parent's mm_struct before calling
506 * us, so we need to zero out the TSB pointer or else tsb_grow()
507 * will be confused and think there is an older TSB to free up.
509 for (i = 0; i < MM_NUM_TSBS; i++)
510 mm->context.tsb_block[i].tsb = NULL;
512 /* If this is fork, inherit the parent's TSB size. We would
513 * grow it to that size on the first page fault anyways.
515 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
517 #ifdef CONFIG_HUGETLB_PAGE
518 if (unlikely(huge_pte_count))
519 tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
522 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
528 static void tsb_destroy_one(struct tsb_config *tp)
530 unsigned long cache_index;
534 cache_index = tp->tsb_reg_val & 0x7UL;
535 kmem_cache_free(tsb_caches[cache_index], tp->tsb);
537 tp->tsb_reg_val = 0UL;
540 void destroy_context(struct mm_struct *mm)
542 unsigned long flags, i;
544 for (i = 0; i < MM_NUM_TSBS; i++)
545 tsb_destroy_one(&mm->context.tsb_block[i]);
547 spin_lock_irqsave(&ctx_alloc_lock, flags);
549 if (CTX_VALID(mm->context)) {
550 unsigned long nr = CTX_NRBITS(mm->context);
551 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
554 spin_unlock_irqrestore(&ctx_alloc_lock, flags);