x86: enable memory tester support on 32-bit
[pandora-kernel.git] / arch / x86 / mm / init_32.c
index ecccb05..3eeab6d 100644 (file)
@@ -50,6 +50,7 @@
 
 unsigned int __VMALLOC_RESERVE = 128 << 20;
 
+unsigned long max_low_pfn_mapped;
 unsigned long max_pfn_mapped;
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -184,8 +185,9 @@ static inline int is_kernel_text(unsigned long addr)
  * PAGE_OFFSET:
  */
 static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
-                                               unsigned long start,
-                                               unsigned long end)
+                                               unsigned long start_pfn,
+                                               unsigned long end_pfn,
+                                               int use_pse)
 {
        int pgd_idx, pmd_idx, pte_ofs;
        unsigned long pfn;
@@ -193,32 +195,33 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
        pmd_t *pmd;
        pte_t *pte;
        unsigned pages_2m = 0, pages_4k = 0;
-       unsigned limit_pfn = end >> PAGE_SHIFT;
 
-       pgd_idx = pgd_index(PAGE_OFFSET);
-       pgd = pgd_base + pgd_idx;
-       pfn = start >> PAGE_SHIFT;
+       if (!cpu_has_pse)
+               use_pse = 0;
 
+       pfn = start_pfn;
+       pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+       pgd = pgd_base + pgd_idx;
        for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
                pmd = one_md_table_init(pgd);
-               if (pfn >= limit_pfn)
-                       continue;
 
-               for (pmd_idx = 0;
-                    pmd_idx < PTRS_PER_PMD && pfn < limit_pfn;
+               if (pfn >= end_pfn)
+                       continue;
+#ifdef CONFIG_X86_PAE
+               pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+               pmd += pmd_idx;
+#else
+               pmd_idx = 0;
+#endif
+               for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
                     pmd++, pmd_idx++) {
                        unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
 
                        /*
                         * Map with big pages if possible, otherwise
                         * create normal page tables:
-                        *
-                        * Don't use a large page for the first 2/4MB of memory
-                        * because there are often fixed size MTRRs in there
-                        * and overlapping MTRRs into large pages can cause
-                        * slowdowns.
                         */
-                       if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) {
+                       if (use_pse) {
                                unsigned int addr2;
                                pgprot_t prot = PAGE_KERNEL_LARGE;
 
@@ -233,13 +236,13 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
                                set_pmd(pmd, pfn_pmd(pfn, prot));
 
                                pfn += PTRS_PER_PTE;
-                               max_pfn_mapped = pfn;
                                continue;
                        }
                        pte = one_page_table_init(pmd);
 
-                       for (pte_ofs = 0;
-                            pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
+                       pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+                       pte += pte_ofs;
+                       for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
                             pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
                                pgprot_t prot = PAGE_KERNEL;
 
@@ -249,7 +252,6 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
                                pages_4k++;
                                set_pte(pte, pfn_pte(pfn, prot));
                        }
-                       max_pfn_mapped = pfn;
                }
        }
        update_page_count(PG_LEVEL_2M, pages_2m);
@@ -382,11 +384,6 @@ static void __init set_highmem_pages_init(void)
 # define set_highmem_pages_init()      do { } while (0)
 #endif /* CONFIG_HIGHMEM */
 
-pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
-EXPORT_SYMBOL(__PAGE_KERNEL);
-
-pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
-
 void __init native_pagetable_setup_start(pgd_t *base)
 {
        unsigned long pfn, va;
@@ -442,14 +439,10 @@ void __init native_pagetable_setup_done(pgd_t *base)
  * be partially populated, and so it avoids stomping on any existing
  * mappings.
  */
-static void __init pagetable_init(void)
+static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
 {
-       pgd_t *pgd_base = swapper_pg_dir;
        unsigned long vaddr, end;
 
-       paravirt_pagetable_setup_start(pgd_base);
-
-       remap_numa_kva();
        /*
         * Fixed mappings, only the page table structure has to be
         * created - mappings will be set by set_fixmap():
@@ -459,6 +452,13 @@ static void __init pagetable_init(void)
        end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
        page_table_range_init(vaddr, end, pgd_base);
        early_ioremap_reset();
+}
+
+static void __init pagetable_init(void)
+{
+       pgd_t *pgd_base = swapper_pg_dir;
+
+       paravirt_pagetable_setup_start(pgd_base);
 
        permanent_kmaps_init(pgd_base);
 
@@ -505,7 +505,7 @@ void zap_low_mappings(void)
 
 int nx_enabled;
 
-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
+pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
 EXPORT_SYMBOL_GPL(__supported_pte_mask);
 
 #ifdef CONFIG_X86_PAE
@@ -656,12 +656,14 @@ void __init initmem_init(unsigned long start_pfn,
        if (max_pfn > max_low_pfn)
                highstart_pfn = max_low_pfn;
        memory_present(0, 0, highend_pfn);
+       e820_register_active_regions(0, 0, highend_pfn);
        printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
                pages_to_mb(highend_pfn - highstart_pfn));
        num_physpages = highend_pfn;
        high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
 #else
        memory_present(0, 0, max_low_pfn);
+       e820_register_active_regions(0, 0, max_low_pfn);
        num_physpages = max_low_pfn;
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
 #endif
@@ -673,25 +675,21 @@ void __init initmem_init(unsigned long start_pfn,
 
        setup_bootmem_allocator();
 }
+#endif /* !CONFIG_NEED_MULTIPLE_NODES */
 
-void __init zone_sizes_init(void)
+static void __init zone_sizes_init(void)
 {
        unsigned long max_zone_pfns[MAX_NR_ZONES];
        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
        max_zone_pfns[ZONE_DMA] =
                virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
-       remove_all_active_ranges();
 #ifdef CONFIG_HIGHMEM
        max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
-       e820_register_active_regions(0, 0, highend_pfn);
-#else
-       e820_register_active_regions(0, 0, max_low_pfn);
 #endif
 
        free_area_init_nodes(max_zone_pfns);
 }
-#endif /* !CONFIG_NEED_MULTIPLE_NODES */
 
 void __init setup_bootmem_allocator(void)
 {
@@ -724,27 +722,9 @@ void __init setup_bootmem_allocator(void)
        after_init_bootmem = 1;
 }
 
-/*
- * The node 0 pgdat is initialized before all of these because
- * it's needed for bootmem.  node>0 pgdats have their virtual
- * space allocated before the pagetables are in place to access
- * them, so they can't be cleared then.
- *
- * This should all compile down to nothing when NUMA is off.
- */
-static void __init remapped_pgdat_init(void)
-{
-       int nid;
-
-       for_each_online_node(nid) {
-               if (nid != 0)
-                       memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
-       }
-}
-
 static void __init find_early_table_space(unsigned long end)
 {
-       unsigned long puds, pmds, tables, start;
+       unsigned long puds, pmds, ptes, tables, start;
 
        puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
        tables = PAGE_ALIGN(puds * sizeof(pud_t));
@@ -752,6 +732,20 @@ static void __init find_early_table_space(unsigned long end)
        pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
        tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
 
+       if (cpu_has_pse) {
+               unsigned long extra;
+
+               extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
+               extra += PMD_SIZE;
+               ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       } else
+               ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+       tables += PAGE_ALIGN(ptes * sizeof(pte_t));
+
+       /* for fixmap */
+       tables += PAGE_SIZE * 2;
+
        /*
         * RED-PEN putting page tables only on node 0 could
         * cause a hotspot and fill up ZONE_DMA. The page tables
@@ -776,6 +770,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
                                                unsigned long end)
 {
        pgd_t *pgd_base = swapper_pg_dir;
+       unsigned long start_pfn, end_pfn;
+       unsigned long big_page_start;
 
        /*
         * Find space for the kernel direct mapping tables.
@@ -796,11 +792,49 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
        /* Enable PGE if available */
        if (cpu_has_pge) {
                set_in_cr4(X86_CR4_PGE);
-               __PAGE_KERNEL |= _PAGE_GLOBAL;
-               __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
+               __supported_pte_mask |= _PAGE_GLOBAL;
        }
 
-       kernel_physical_mapping_init(pgd_base, start, end);
+       /*
+        * Don't use a large page for the first 2/4MB of memory
+        * because there are often fixed size MTRRs in there
+        * and overlapping MTRRs into large pages can cause
+        * slowdowns.
+        */
+       big_page_start = PMD_SIZE;
+
+       if (start < big_page_start) {
+               start_pfn = start >> PAGE_SHIFT;
+               end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT);
+       } else {
+               /* head is not big page alignment ? */
+               start_pfn = start >> PAGE_SHIFT;
+               end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
+                                << (PMD_SHIFT - PAGE_SHIFT);
+       }
+       if (start_pfn < end_pfn)
+               kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
+
+       /* big page range */
+       start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
+                        << (PMD_SHIFT - PAGE_SHIFT);
+       if (start_pfn < (big_page_start >> PAGE_SHIFT))
+               start_pfn =  big_page_start >> PAGE_SHIFT;
+       end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
+       if (start_pfn < end_pfn)
+               kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
+                                               cpu_has_pse);
+
+       /* tail is not big page alignment ? */
+       start_pfn = end_pfn;
+       if (start_pfn > (big_page_start>>PAGE_SHIFT)) {
+               end_pfn = end >> PAGE_SHIFT;
+               if (start_pfn < end_pfn)
+                       kernel_physical_mapping_init(pgd_base, start_pfn,
+                                                        end_pfn, 0);
+       }
+
+       early_ioremap_page_table_range_init(pgd_base);
 
        load_cr3(swapper_pg_dir);
 
@@ -810,9 +844,13 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
                reserve_early(table_start << PAGE_SHIFT,
                                 table_end << PAGE_SHIFT, "PGTABLE");
 
+       if (!after_init_bootmem)
+               early_memtest(start, end);
+
        return end >> PAGE_SHIFT;
 }
 
+
 /*
  * paging_init() sets up the page tables - note that the first 8MB are
  * already mapped by head.S.
@@ -831,7 +869,6 @@ void __init paging_init(void)
        /*
         * NOTE: at this point the bootmem allocator is fully available.
         */
-       remapped_pgdat_init();
        sparse_init();
        zone_sizes_init();
 
@@ -1001,6 +1038,8 @@ void mark_rodata_ro(void)
        unsigned long start = PFN_ALIGN(_text);
        unsigned long size = PFN_ALIGN(_etext) - start;
 
+#ifndef CONFIG_DYNAMIC_FTRACE
+       /* Dynamic tracing modifies the kernel text section */
        set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
        printk(KERN_INFO "Write protecting the kernel text: %luk\n",
                size >> 10);
@@ -1013,6 +1052,8 @@ void mark_rodata_ro(void)
        printk(KERN_INFO "Testing CPA: write protecting again\n");
        set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
 #endif
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
        start += size;
        size = (unsigned long)__end_rodata - start;
        set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);