Merge branch 'for-linus' of git://neil.brown.name/md
[pandora-kernel.git] / arch / powerpc / mm / hash_utils_64.c
index 2b5a399..8d3b58e 100644 (file)
@@ -94,6 +94,9 @@ unsigned long htab_hash_mask;
 int mmu_linear_psize = MMU_PAGE_4K;
 int mmu_virtual_psize = MMU_PAGE_4K;
 int mmu_vmalloc_psize = MMU_PAGE_4K;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int mmu_vmemmap_psize = MMU_PAGE_4K;
+#endif
 int mmu_io_psize = MMU_PAGE_4K;
 int mmu_kernel_ssize = MMU_SEGSIZE_256M;
 int mmu_highuser_ssize = MMU_SEGSIZE_256M;
@@ -117,7 +120,7 @@ static DEFINE_SPINLOCK(linear_map_hash_lock);
 
 /* Pre-POWER4 CPUs (4k pages only)
  */
-struct mmu_psize_def mmu_psize_defaults_old[] = {
+static struct mmu_psize_def mmu_psize_defaults_old[] = {
        [MMU_PAGE_4K] = {
                .shift  = 12,
                .sllp   = 0,
@@ -131,7 +134,7 @@ struct mmu_psize_def mmu_psize_defaults_old[] = {
  *
  * Support for 16Mb large pages
  */
-struct mmu_psize_def mmu_psize_defaults_gp[] = {
+static struct mmu_psize_def mmu_psize_defaults_gp[] = {
        [MMU_PAGE_4K] = {
                .shift  = 12,
                .sllp   = 0,
@@ -387,11 +390,32 @@ static void __init htab_init_page_sizes(void)
        }
 #endif /* CONFIG_PPC_64K_PAGES */
 
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+       /* We try to use 16M pages for vmemmap if that is supported
+        * and we have at least 1G of RAM at boot
+        */
+       if (mmu_psize_defs[MMU_PAGE_16M].shift &&
+           lmb_phys_mem_size() >= 0x40000000)
+               mmu_vmemmap_psize = MMU_PAGE_16M;
+       else if (mmu_psize_defs[MMU_PAGE_64K].shift)
+               mmu_vmemmap_psize = MMU_PAGE_64K;
+       else
+               mmu_vmemmap_psize = MMU_PAGE_4K;
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
        printk(KERN_DEBUG "Page orders: linear mapping = %d, "
-              "virtual = %d, io = %d\n",
+              "virtual = %d, io = %d"
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+              ", vmemmap = %d"
+#endif
+              "\n",
               mmu_psize_defs[mmu_linear_psize].shift,
               mmu_psize_defs[mmu_virtual_psize].shift,
-              mmu_psize_defs[mmu_io_psize].shift);
+              mmu_psize_defs[mmu_io_psize].shift
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+              ,mmu_psize_defs[mmu_vmemmap_psize].shift
+#endif
+              );
 
 #ifdef CONFIG_HUGETLB_PAGE
        /* Init large page size. Currently, we pick 16M or 1M depending
@@ -509,8 +533,6 @@ void __init htab_initialize(void)
        unsigned long base = 0, size = 0, limit;
        int i;
 
-       extern unsigned long tce_alloc_start, tce_alloc_end;
-
        DBG(" -> htab_initialize()\n");
 
        /* Initialize segment sizes */
@@ -673,6 +695,28 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
        return pp;
 }
 
+#ifdef CONFIG_PPC_MM_SLICES
+unsigned int get_paca_psize(unsigned long addr)
+{
+       unsigned long index, slices;
+
+       if (addr < SLICE_LOW_TOP) {
+               slices = get_paca()->context.low_slices_psize;
+               index = GET_LOW_SLICE_INDEX(addr);
+       } else {
+               slices = get_paca()->context.high_slices_psize;
+               index = GET_HIGH_SLICE_INDEX(addr);
+       }
+       return (slices >> (index * 4)) & 0xF;
+}
+
+#else
+unsigned int get_paca_psize(unsigned long addr)
+{
+       return get_paca()->context.user_psize;
+}
+#endif
+
 /*
  * Demote a segment to using 4k pages.
  * For now this makes the whole process use 4k pages.
@@ -680,13 +724,13 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
 #ifdef CONFIG_PPC_64K_PAGES
 void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
 {
-       if (mm->context.user_psize == MMU_PAGE_4K)
+       if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
                return;
-       slice_set_user_psize(mm, MMU_PAGE_4K);
+       slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
 #ifdef CONFIG_SPU_BASE
        spu_flush_all_slbs(mm);
 #endif
-       if (get_paca()->context.user_psize != MMU_PAGE_4K) {
+       if (get_paca_psize(addr) != MMU_PAGE_4K) {
                get_paca()->context = mm->context;
                slb_flush_and_rebolt();
        }
@@ -770,11 +814,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
                        DBG_LOW(" user region with no mm !\n");
                        return 1;
                }
-#ifdef CONFIG_PPC_MM_SLICES
                psize = get_slice_psize(mm, ea);
-#else
-               psize = mm->context.user_psize;
-#endif
                ssize = user_segment_size(ea);
                vsid = get_vsid(mm->context.id, ea, ssize);
                break;
@@ -846,7 +886,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
        /* Do actual hashing */
 #ifdef CONFIG_PPC_64K_PAGES
        /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
-       if (pte_val(*ptep) & _PAGE_4K_PFN) {
+       if ((pte_val(*ptep) & _PAGE_4K_PFN) && psize == MMU_PAGE_64K) {
                demote_segment_4k(mm, ea);
                psize = MMU_PAGE_4K;
        }
@@ -875,7 +915,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
                }
        }
        if (user_region) {
-               if (psize != get_paca()->context.user_psize) {
+               if (psize != get_paca_psize(ea)) {
                        get_paca()->context = mm->context;
                        slb_flush_and_rebolt();
                }