2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/smp_lock.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/sysctl.h>
20 #include <asm/pgalloc.h>
22 #include <asm/tlbflush.h>
23 #include <asm/mmu_context.h>
24 #include <asm/machdep.h>
25 #include <asm/cputable.h>
28 #include <linux/sysctl.h>
30 #define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
31 #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
33 #ifdef CONFIG_PPC_64K_PAGES
34 #define HUGEPTE_INDEX_SIZE (PMD_SHIFT-HPAGE_SHIFT)
36 #define HUGEPTE_INDEX_SIZE (PUD_SHIFT-HPAGE_SHIFT)
38 #define PTRS_PER_HUGEPTE (1 << HUGEPTE_INDEX_SIZE)
39 #define HUGEPTE_TABLE_SIZE (sizeof(pte_t) << HUGEPTE_INDEX_SIZE)
41 #define HUGEPD_SHIFT (HPAGE_SHIFT + HUGEPTE_INDEX_SIZE)
42 #define HUGEPD_SIZE (1UL << HUGEPD_SHIFT)
43 #define HUGEPD_MASK (~(HUGEPD_SIZE-1))
45 #define huge_pgtable_cache (pgtable_cache[HUGEPTE_CACHE_NUM])
47 /* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
48 * will choke on pointers to hugepte tables, which is handy for
49 * catching screwups early. */
52 typedef struct { unsigned long pd; } hugepd_t;
54 #define hugepd_none(hpd) ((hpd).pd == 0)
56 static inline pte_t *hugepd_page(hugepd_t hpd)
58 BUG_ON(!(hpd.pd & HUGEPD_OK));
59 return (pte_t *)(hpd.pd & ~HUGEPD_OK);
62 static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr)
64 unsigned long idx = ((addr >> HPAGE_SHIFT) & (PTRS_PER_HUGEPTE-1));
65 pte_t *dir = hugepd_page(*hpdp);
70 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
71 unsigned long address)
73 pte_t *new = kmem_cache_alloc(huge_pgtable_cache,
74 GFP_KERNEL|__GFP_REPEAT);
79 spin_lock(&mm->page_table_lock);
80 if (!hugepd_none(*hpdp))
81 kmem_cache_free(huge_pgtable_cache, new);
83 hpdp->pd = (unsigned long)new | HUGEPD_OK;
84 spin_unlock(&mm->page_table_lock);
88 /* Modelled after find_linux_pte() */
89 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
94 BUG_ON(! in_hugepage_area(mm->context, addr));
98 pg = pgd_offset(mm, addr);
100 pu = pud_offset(pg, addr);
101 if (!pud_none(*pu)) {
102 #ifdef CONFIG_PPC_64K_PAGES
104 pm = pmd_offset(pu, addr);
106 return hugepte_offset((hugepd_t *)pm, addr);
108 return hugepte_offset((hugepd_t *)pu, addr);
116 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
120 hugepd_t *hpdp = NULL;
122 BUG_ON(! in_hugepage_area(mm->context, addr));
126 pg = pgd_offset(mm, addr);
127 pu = pud_alloc(mm, pg, addr);
130 #ifdef CONFIG_PPC_64K_PAGES
132 pm = pmd_alloc(mm, pu, addr);
134 hpdp = (hugepd_t *)pm;
136 hpdp = (hugepd_t *)pu;
143 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr))
146 return hugepte_offset(hpdp, addr);
149 static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
151 pte_t *hugepte = hugepd_page(*hpdp);
155 pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, HUGEPTE_CACHE_NUM,
159 #ifdef CONFIG_PPC_64K_PAGES
160 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
161 unsigned long addr, unsigned long end,
162 unsigned long floor, unsigned long ceiling)
169 pmd = pmd_offset(pud, addr);
171 next = pmd_addr_end(addr, end);
174 free_hugepte_range(tlb, (hugepd_t *)pmd);
175 } while (pmd++, addr = next, addr != end);
185 if (end - 1 > ceiling - 1)
188 pmd = pmd_offset(pud, start);
190 pmd_free_tlb(tlb, pmd);
194 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
195 unsigned long addr, unsigned long end,
196 unsigned long floor, unsigned long ceiling)
203 pud = pud_offset(pgd, addr);
205 next = pud_addr_end(addr, end);
206 #ifdef CONFIG_PPC_64K_PAGES
207 if (pud_none_or_clear_bad(pud))
209 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
213 free_hugepte_range(tlb, (hugepd_t *)pud);
215 } while (pud++, addr = next, addr != end);
221 ceiling &= PGDIR_MASK;
225 if (end - 1 > ceiling - 1)
228 pud = pud_offset(pgd, start);
230 pud_free_tlb(tlb, pud);
234 * This function frees user-level page tables of a process.
236 * Must be called with pagetable lock held.
238 void hugetlb_free_pgd_range(struct mmu_gather **tlb,
239 unsigned long addr, unsigned long end,
240 unsigned long floor, unsigned long ceiling)
247 * Comments below take from the normal free_pgd_range(). They
248 * apply here too. The tests against HUGEPD_MASK below are
249 * essential, because we *don't* test for this at the bottom
250 * level. Without them we'll attempt to free a hugepte table
251 * when we unmap just part of it, even if there are other
252 * active mappings using it.
254 * The next few lines have given us lots of grief...
256 * Why are we testing HUGEPD* at this top level? Because
257 * often there will be no work to do at all, and we'd prefer
258 * not to go all the way down to the bottom just to discover
261 * Why all these "- 1"s? Because 0 represents both the bottom
262 * of the address space and the top of it (using -1 for the
263 * top wouldn't help much: the masks would do the wrong thing).
264 * The rule is that addr 0 and floor 0 refer to the bottom of
265 * the address space, but end 0 and ceiling 0 refer to the top
266 * Comparisons need to use "end - 1" and "ceiling - 1" (though
267 * that end 0 case should be mythical).
269 * Wherever addr is brought up or ceiling brought down, we
270 * must be careful to reject "the opposite 0" before it
271 * confuses the subsequent tests. But what about where end is
272 * brought down by HUGEPD_SIZE below? no, end can't go down to
275 * Whereas we round start (addr) and ceiling down, by different
276 * masks at different levels, in order to test whether a table
277 * now has no other vmas using it, so can be freed, we don't
278 * bother to round floor or end up - the tests don't need that.
288 ceiling &= HUGEPD_MASK;
292 if (end - 1 > ceiling - 1)
298 pgd = pgd_offset((*tlb)->mm, addr);
300 BUG_ON(! in_hugepage_area((*tlb)->mm->context, addr));
301 next = pgd_addr_end(addr, end);
302 if (pgd_none_or_clear_bad(pgd))
304 hugetlb_free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
305 } while (pgd++, addr = next, addr != end);
308 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
309 pte_t *ptep, pte_t pte)
311 if (pte_present(*ptep)) {
312 /* We open-code pte_clear because we need to pass the right
313 * argument to hpte_update (huge / !huge)
315 unsigned long old = pte_update(ptep, ~0UL);
316 if (old & _PAGE_HASHPTE)
317 hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1);
320 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
323 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
326 unsigned long old = pte_update(ptep, ~0UL);
328 if (old & _PAGE_HASHPTE)
329 hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1);
335 struct slb_flush_info {
336 struct mm_struct *mm;
340 static void flush_low_segments(void *parm)
342 struct slb_flush_info *fi = parm;
345 BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS);
347 if (current->active_mm != fi->mm)
350 /* Only need to do anything if this CPU is working in the same
351 * mm as the one which has changed */
353 /* update the paca copy of the context struct */
354 get_paca()->context = current->active_mm->context;
356 asm volatile("isync" : : : "memory");
357 for (i = 0; i < NUM_LOW_AREAS; i++) {
358 if (! (fi->newareas & (1U << i)))
360 asm volatile("slbie %0"
361 : : "r" ((i << SID_SHIFT) | SLBIE_C));
363 asm volatile("isync" : : : "memory");
366 static void flush_high_segments(void *parm)
368 struct slb_flush_info *fi = parm;
372 BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS);
374 if (current->active_mm != fi->mm)
377 /* Only need to do anything if this CPU is working in the same
378 * mm as the one which has changed */
380 /* update the paca copy of the context struct */
381 get_paca()->context = current->active_mm->context;
383 asm volatile("isync" : : : "memory");
384 for (i = 0; i < NUM_HIGH_AREAS; i++) {
385 if (! (fi->newareas & (1U << i)))
387 for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
388 asm volatile("slbie %0"
389 :: "r" (((i << HTLB_AREA_SHIFT)
390 + (j << SID_SHIFT)) | SLBIE_C));
392 asm volatile("isync" : : : "memory");
395 static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area)
397 unsigned long start = area << SID_SHIFT;
398 unsigned long end = (area+1) << SID_SHIFT;
399 struct vm_area_struct *vma;
401 BUG_ON(area >= NUM_LOW_AREAS);
403 /* Check no VMAs are in the region */
404 vma = find_vma(mm, start);
405 if (vma && (vma->vm_start < end))
411 static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
413 unsigned long start = area << HTLB_AREA_SHIFT;
414 unsigned long end = (area+1) << HTLB_AREA_SHIFT;
415 struct vm_area_struct *vma;
417 BUG_ON(area >= NUM_HIGH_AREAS);
419 /* Hack, so that each addresses is controlled by exactly one
420 * of the high or low area bitmaps, the first high area starts
423 start = 0x100000000UL;
425 /* Check no VMAs are in the region */
426 vma = find_vma(mm, start);
427 if (vma && (vma->vm_start < end))
433 static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
436 struct slb_flush_info fi;
438 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
439 BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
441 newareas &= ~(mm->context.low_htlb_areas);
443 return 0; /* The segments we want are already open */
445 for (i = 0; i < NUM_LOW_AREAS; i++)
446 if ((1 << i) & newareas)
447 if (prepare_low_area_for_htlb(mm, i) != 0)
450 mm->context.low_htlb_areas |= newareas;
452 /* the context change must make it to memory before the flush,
453 * so that further SLB misses do the right thing. */
457 fi.newareas = newareas;
458 on_each_cpu(flush_low_segments, &fi, 0, 1);
463 static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
465 struct slb_flush_info fi;
468 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
469 BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8)
472 newareas &= ~(mm->context.high_htlb_areas);
474 return 0; /* The areas we want are already open */
476 for (i = 0; i < NUM_HIGH_AREAS; i++)
477 if ((1 << i) & newareas)
478 if (prepare_high_area_for_htlb(mm, i) != 0)
481 mm->context.high_htlb_areas |= newareas;
483 /* the context change must make it to memory before the flush,
484 * so that further SLB misses do the right thing. */
488 fi.newareas = newareas;
489 on_each_cpu(flush_high_segments, &fi, 0, 1);
494 int prepare_hugepage_range(unsigned long addr, unsigned long len)
498 if ( (addr+len) < addr )
501 if (addr < 0x100000000UL)
502 err = open_low_hpage_areas(current->mm,
503 LOW_ESID_MASK(addr, len));
504 if ((addr + len) > 0x100000000UL)
505 err = open_high_hpage_areas(current->mm,
506 HTLB_AREA_MASK(addr, len));
508 printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
509 " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",
511 LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len));
519 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
524 if (! in_hugepage_area(mm->context, address))
525 return ERR_PTR(-EINVAL);
527 ptep = huge_pte_offset(mm, address);
528 page = pte_page(*ptep);
530 page += (address % HPAGE_SIZE) / PAGE_SIZE;
535 int pmd_huge(pmd_t pmd)
541 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
542 pmd_t *pmd, int write)
548 /* Because we have an exclusive hugepage region which lies within the
549 * normal user address space, we have to take special measures to make
550 * non-huge mmap()s evade the hugepage reserved regions. */
551 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
552 unsigned long len, unsigned long pgoff,
555 struct mm_struct *mm = current->mm;
556 struct vm_area_struct *vma;
557 unsigned long start_addr;
563 addr = PAGE_ALIGN(addr);
564 vma = find_vma(mm, addr);
565 if (((TASK_SIZE - len) >= addr)
566 && (!vma || (addr+len) <= vma->vm_start)
567 && !is_hugepage_only_range(mm, addr,len))
570 if (len > mm->cached_hole_size) {
571 start_addr = addr = mm->free_area_cache;
573 start_addr = addr = TASK_UNMAPPED_BASE;
574 mm->cached_hole_size = 0;
578 vma = find_vma(mm, addr);
579 while (TASK_SIZE - len >= addr) {
580 BUG_ON(vma && (addr >= vma->vm_end));
582 if (touches_hugepage_low_range(mm, addr, len)) {
583 addr = ALIGN(addr+1, 1<<SID_SHIFT);
584 vma = find_vma(mm, addr);
587 if (touches_hugepage_high_range(mm, addr, len)) {
588 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
589 vma = find_vma(mm, addr);
592 if (!vma || addr + len <= vma->vm_start) {
594 * Remember the place where we stopped the search:
596 mm->free_area_cache = addr + len;
599 if (addr + mm->cached_hole_size < vma->vm_start)
600 mm->cached_hole_size = vma->vm_start - addr;
605 /* Make sure we didn't miss any holes */
606 if (start_addr != TASK_UNMAPPED_BASE) {
607 start_addr = addr = TASK_UNMAPPED_BASE;
608 mm->cached_hole_size = 0;
615 * This mmap-allocator allocates new areas top-down from below the
616 * stack's low limit (the base):
618 * Because we have an exclusive hugepage region which lies within the
619 * normal user address space, we have to take special measures to make
620 * non-huge mmap()s evade the hugepage reserved regions.
623 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
624 const unsigned long len, const unsigned long pgoff,
625 const unsigned long flags)
627 struct vm_area_struct *vma, *prev_vma;
628 struct mm_struct *mm = current->mm;
629 unsigned long base = mm->mmap_base, addr = addr0;
630 unsigned long largest_hole = mm->cached_hole_size;
633 /* requested length too big for entire address space */
637 /* dont allow allocations above current base */
638 if (mm->free_area_cache > base)
639 mm->free_area_cache = base;
641 /* requesting a specific address */
643 addr = PAGE_ALIGN(addr);
644 vma = find_vma(mm, addr);
645 if (TASK_SIZE - len >= addr &&
646 (!vma || addr + len <= vma->vm_start)
647 && !is_hugepage_only_range(mm, addr,len))
651 if (len <= largest_hole) {
653 mm->free_area_cache = base;
656 /* make sure it can fit in the remaining address space */
657 if (mm->free_area_cache < len)
660 /* either no address requested or cant fit in requested address hole */
661 addr = (mm->free_area_cache - len) & PAGE_MASK;
664 if (touches_hugepage_low_range(mm, addr, len)) {
665 addr = (addr & ((~0) << SID_SHIFT)) - len;
666 goto hugepage_recheck;
667 } else if (touches_hugepage_high_range(mm, addr, len)) {
668 addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len;
669 goto hugepage_recheck;
673 * Lookup failure means no vma is above this address,
674 * i.e. return with success:
676 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
680 * new region fits between prev_vma->vm_end and
681 * vma->vm_start, use it:
683 if (addr+len <= vma->vm_start &&
684 (!prev_vma || (addr >= prev_vma->vm_end))) {
685 /* remember the address as a hint for next time */
686 mm->cached_hole_size = largest_hole;
687 return (mm->free_area_cache = addr);
689 /* pull free_area_cache down to the first hole */
690 if (mm->free_area_cache == vma->vm_end) {
691 mm->free_area_cache = vma->vm_start;
692 mm->cached_hole_size = largest_hole;
696 /* remember the largest hole we saw so far */
697 if (addr + largest_hole < vma->vm_start)
698 largest_hole = vma->vm_start - addr;
700 /* try just below the current vma->vm_start */
701 addr = vma->vm_start-len;
702 } while (len <= vma->vm_start);
706 * if hint left us with no space for the requested
707 * mapping then try again:
710 mm->free_area_cache = base;
716 * A failed mmap() very likely causes application failure,
717 * so fall back to the bottom-up function here. This scenario
718 * can happen with large stack limits and large mmap()
721 mm->free_area_cache = TASK_UNMAPPED_BASE;
722 mm->cached_hole_size = ~0UL;
723 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
725 * Restore the topdown base:
727 mm->free_area_cache = base;
728 mm->cached_hole_size = ~0UL;
733 static int htlb_check_hinted_area(unsigned long addr, unsigned long len)
735 struct vm_area_struct *vma;
737 vma = find_vma(current->mm, addr);
738 if (!vma || ((addr + len) <= vma->vm_start))
744 static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
746 unsigned long addr = 0;
747 struct vm_area_struct *vma;
749 vma = find_vma(current->mm, addr);
750 while (addr + len <= 0x100000000UL) {
751 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
753 if (! __within_hugepage_low_range(addr, len, segmask)) {
754 addr = ALIGN(addr+1, 1<<SID_SHIFT);
755 vma = find_vma(current->mm, addr);
759 if (!vma || (addr + len) <= vma->vm_start)
761 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
762 /* Depending on segmask this might not be a confirmed
763 * hugepage region, so the ALIGN could have skipped
765 vma = find_vma(current->mm, addr);
771 static unsigned long htlb_get_high_area(unsigned long len, u16 areamask)
773 unsigned long addr = 0x100000000UL;
774 struct vm_area_struct *vma;
776 vma = find_vma(current->mm, addr);
777 while (addr + len <= TASK_SIZE_USER64) {
778 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
780 if (! __within_hugepage_high_range(addr, len, areamask)) {
781 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
782 vma = find_vma(current->mm, addr);
786 if (!vma || (addr + len) <= vma->vm_start)
788 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
789 /* Depending on segmask this might not be a confirmed
790 * hugepage region, so the ALIGN could have skipped
792 vma = find_vma(current->mm, addr);
798 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
799 unsigned long len, unsigned long pgoff,
803 u16 areamask, curareas;
805 if (HPAGE_SHIFT == 0)
807 if (len & ~HPAGE_MASK)
810 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
813 /* Paranoia, caller should have dealt with this */
814 BUG_ON((addr + len) < addr);
816 if (test_thread_flag(TIF_32BIT)) {
817 /* Paranoia, caller should have dealt with this */
818 BUG_ON((addr + len) > 0x100000000UL);
820 curareas = current->mm->context.low_htlb_areas;
822 /* First see if we can use the hint address */
823 if (addr && (htlb_check_hinted_area(addr, len) == 0)) {
824 areamask = LOW_ESID_MASK(addr, len);
825 if (open_low_hpage_areas(current->mm, areamask) == 0)
829 /* Next see if we can map in the existing low areas */
830 addr = htlb_get_low_area(len, curareas);
834 /* Finally go looking for areas to open */
836 for (areamask = LOW_ESID_MASK(0x100000000UL-len, len);
837 ! lastshift; areamask >>=1) {
841 addr = htlb_get_low_area(len, curareas | areamask);
842 if ((addr != -ENOMEM)
843 && open_low_hpage_areas(current->mm, areamask) == 0)
847 curareas = current->mm->context.high_htlb_areas;
849 /* First see if we can use the hint address */
850 /* We discourage 64-bit processes from doing hugepage
851 * mappings below 4GB (must use MAP_FIXED) */
852 if ((addr >= 0x100000000UL)
853 && (htlb_check_hinted_area(addr, len) == 0)) {
854 areamask = HTLB_AREA_MASK(addr, len);
855 if (open_high_hpage_areas(current->mm, areamask) == 0)
859 /* Next see if we can map in the existing high areas */
860 addr = htlb_get_high_area(len, curareas);
864 /* Finally go looking for areas to open */
866 for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len);
867 ! lastshift; areamask >>=1) {
871 addr = htlb_get_high_area(len, curareas | areamask);
872 if ((addr != -ENOMEM)
873 && open_high_hpage_areas(current->mm, areamask) == 0)
877 printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
883 * Called by asm hashtable.S for doing lazy icache flush
885 static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
891 if (!pfn_valid(pte_pfn(pte)))
894 page = pte_page(pte);
897 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
899 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++)
900 __flush_dcache_icache(page_address(page+i));
901 set_bit(PG_arch_1, &page->flags);
909 int hash_huge_page(struct mm_struct *mm, unsigned long access,
910 unsigned long ea, unsigned long vsid, int local,
914 unsigned long old_pte, new_pte;
915 unsigned long va, rflags, pa;
919 ptep = huge_pte_offset(mm, ea);
921 /* Search the Linux page table for a match with va */
922 va = (vsid << 28) | (ea & 0x0fffffff);
925 * If no pte found or not present, send the problem up to
928 if (unlikely(!ptep || pte_none(*ptep)))
932 * Check the user's access rights to the page. If access should be
933 * prevented then send the problem up to do_page_fault.
935 if (unlikely(access & ~pte_val(*ptep)))
938 * At this point, we have a pte (old_pte) which can be used to build
939 * or update an HPTE. There are 2 cases:
941 * 1. There is a valid (present) pte with no associated HPTE (this is
942 * the most common case)
943 * 2. There is a valid (present) pte with an associated HPTE. The
944 * current values of the pp bits in the HPTE prevent access
945 * because we are doing software DIRTY bit management and the
946 * page is currently not DIRTY.
951 old_pte = pte_val(*ptep);
952 if (old_pte & _PAGE_BUSY)
954 new_pte = old_pte | _PAGE_BUSY |
955 _PAGE_ACCESSED | _PAGE_HASHPTE;
956 } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
959 rflags = 0x2 | (!(new_pte & _PAGE_RW));
960 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
961 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
962 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
963 /* No CPU has hugepages but lacks no execute, so we
964 * don't need to worry about that case */
965 rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
968 /* Check if pte already has an hpte (case 2) */
969 if (unlikely(old_pte & _PAGE_HASHPTE)) {
970 /* There MIGHT be an HPTE for this pte */
971 unsigned long hash, slot;
973 hash = hpt_hash(va, HPAGE_SHIFT);
974 if (old_pte & _PAGE_F_SECOND)
976 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
977 slot += (old_pte & _PAGE_F_GIX) >> 12;
979 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
981 old_pte &= ~_PAGE_HPTEFLAGS;
984 if (likely(!(old_pte & _PAGE_HASHPTE))) {
985 unsigned long hash = hpt_hash(va, HPAGE_SHIFT);
986 unsigned long hpte_group;
988 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
991 hpte_group = ((hash & htab_hash_mask) *
992 HPTES_PER_GROUP) & ~0x7UL;
994 /* clear HPTE slot informations in new PTE */
995 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
997 /* Add in WIMG bits */
998 /* XXX We should store these in the pte */
999 /* --BenH: I think they are ... */
1000 rflags |= _PAGE_COHERENT;
1002 /* Insert into the hash table, primary slot */
1003 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
1006 /* Primary is full, try the secondary */
1007 if (unlikely(slot == -1)) {
1008 new_pte |= _PAGE_F_SECOND;
1009 hpte_group = ((~hash & htab_hash_mask) *
1010 HPTES_PER_GROUP) & ~0x7UL;
1011 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
1016 hpte_group = ((hash & htab_hash_mask) *
1017 HPTES_PER_GROUP)&~0x7UL;
1019 ppc_md.hpte_remove(hpte_group);
1024 if (unlikely(slot == -2))
1025 panic("hash_huge_page: pte_insert failed\n");
1027 new_pte |= (slot << 12) & _PAGE_F_GIX;
1031 * No need to use ldarx/stdcx here
1033 *ptep = __pte(new_pte & ~_PAGE_BUSY);
1041 static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
1043 memset(addr, 0, kmem_cache_size(cache));
1046 static int __init hugetlbpage_init(void)
1048 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
1051 huge_pgtable_cache = kmem_cache_create("hugepte_cache",
1054 SLAB_HWCACHE_ALIGN |
1055 SLAB_MUST_HWCACHE_ALIGN,
1057 if (! huge_pgtable_cache)
1058 panic("hugetlbpage_init(): could not create hugepte cache\n");
1063 module_init(hugetlbpage_init);