Merge branches 'release', 'asus', 'sony-laptop' and 'thinkpad' into release
[pandora-kernel.git] / arch / x86 / mm / init_64.c
index a7308b2..9b61c75 100644 (file)
 #include <asm/proto.h>
 #include <asm/smp.h>
 #include <asm/sections.h>
+#include <asm/kdebug.h>
+#include <asm/numa.h>
 
-#ifndef Dprintk
-#define Dprintk(x...)
-#endif
-
-const struct dma_mapping_ops* dma_ops;
+const struct dma_mapping_ops *dma_ops;
 EXPORT_SYMBOL(dma_ops);
 
 static unsigned long dma_reserve __initdata;
@@ -65,22 +63,26 @@ void show_mem(void)
 {
        long i, total = 0, reserved = 0;
        long shared = 0, cached = 0;
-       pg_data_t *pgdat;
        struct page *page;
+       pg_data_t *pgdat;
 
        printk(KERN_INFO "Mem-info:\n");
        show_free_areas();
-       printk(KERN_INFO "Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+       printk(KERN_INFO "Free swap:       %6ldkB\n",
+               nr_swap_pages << (PAGE_SHIFT-10));
 
        for_each_online_pgdat(pgdat) {
-               for (i = 0; i < pgdat->node_spanned_pages; ++i) {
-                       /* this loop can take a while with 256 GB and 4k pages
-                          so update the NMI watchdog */
-                       if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) {
+               for (i = 0; i < pgdat->node_spanned_pages; ++i) {
+                       /*
+                        * This loop can take a while with 256 GB and
+                        * 4k pages so defer the NMI watchdog:
+                        */
+                       if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
                                touch_nmi_watchdog();
-                       }
+
                        if (!pfn_valid(pgdat->node_start_pfn + i))
                                continue;
+
                        page = pfn_to_page(pgdat->node_start_pfn + i);
                        total++;
                        if (PageReserved(page))
@@ -89,51 +91,58 @@ void show_mem(void)
                                cached++;
                        else if (page_count(page))
                                shared += page_count(page) - 1;
-               }
+               }
        }
-       printk(KERN_INFO "%lu pages of RAM\n", total);
-       printk(KERN_INFO "%lu reserved pages\n",reserved);
-       printk(KERN_INFO "%lu pages shared\n",shared);
-       printk(KERN_INFO "%lu pages swap cached\n",cached);
+       printk(KERN_INFO "%lu pages of RAM\n",          total);
+       printk(KERN_INFO "%lu reserved pages\n",        reserved);
+       printk(KERN_INFO "%lu pages shared\n",          shared);
+       printk(KERN_INFO "%lu pages swap cached\n",     cached);
 }
 
 int after_bootmem;
 
 static __init void *spp_getpage(void)
-{ 
+{
        void *ptr;
+
        if (after_bootmem)
-               ptr = (void *) get_zeroed_page(GFP_ATOMIC); 
+               ptr = (void *) get_zeroed_page(GFP_ATOMIC);
        else
                ptr = alloc_bootmem_pages(PAGE_SIZE);
-       if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
-               panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
 
-       Dprintk("spp_getpage %p\n", ptr);
+       if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
+               panic("set_pte_phys: cannot allocate page data %s\n",
+                       after_bootmem ? "after bootmem" : "");
+       }
+
+       pr_debug("spp_getpage %p\n", ptr);
+
        return ptr;
-} 
+}
 
-static __init void set_pte_phys(unsigned long vaddr,
-                        unsigned long phys, pgprot_t prot)
+static __init void
+set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot)
 {
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *pte, new_pte;
 
-       Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
+       pr_debug("set_pte_phys %lx to %lx\n", vaddr, phys);
 
        pgd = pgd_offset_k(vaddr);
        if (pgd_none(*pgd)) {
-               printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
+               printk(KERN_ERR
+                       "PGD FIXMAP MISSING, it should be setup in head.S!\n");
                return;
        }
        pud = pud_offset(pgd, vaddr);
        if (pud_none(*pud)) {
-               pmd = (pmd_t *) spp_getpage(); 
+               pmd = (pmd_t *) spp_getpage();
                set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
                if (pmd != pmd_offset(pud, 0)) {
-                       printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
+                       printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
+                               pmd, pmd_offset(pud, 0));
                        return;
                }
        }
@@ -142,7 +151,7 @@ static __init void set_pte_phys(unsigned long vaddr,
                pte = (pte_t *) spp_getpage();
                set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
                if (pte != pte_offset_kernel(pmd, 0)) {
-                       printk("PAGETABLE BUG #02!\n");
+                       printk(KERN_ERR "PAGETABLE BUG #02!\n");
                        return;
                }
        }
@@ -162,33 +171,35 @@ static __init void set_pte_phys(unsigned long vaddr,
 }
 
 /* NOTE: this is meant to be run only at boot */
-void __init 
-__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
+void __init
+__set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
 {
        unsigned long address = __fix_to_virt(idx);
 
        if (idx >= __end_of_fixed_addresses) {
-               printk("Invalid __set_fixmap\n");
+               printk(KERN_ERR "Invalid __set_fixmap\n");
                return;
        }
        set_pte_phys(address, phys, prot);
 }
 
-unsigned long __meminitdata table_start, table_end;
+static unsigned long __initdata table_start;
+static unsigned long __meminitdata table_end;
 
 static __meminit void *alloc_low_page(unsigned long *phys)
-{ 
+{
        unsigned long pfn = table_end++;
        void *adr;
 
        if (after_bootmem) {
                adr = (void *)get_zeroed_page(GFP_ATOMIC);
                *phys = __pa(adr);
+
                return adr;
        }
 
-       if (pfn >= end_pfn) 
-               panic("alloc_low_page: ran out of memory"); 
+       if (pfn >= end_pfn)
+               panic("alloc_low_page: ran out of memory");
 
        adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
        memset(adr, 0, PAGE_SIZE);
@@ -197,44 +208,49 @@ static __meminit void *alloc_low_page(unsigned long *phys)
 }
 
 static __meminit void unmap_low_page(void *adr)
-{ 
-
+{
        if (after_bootmem)
                return;
 
        early_iounmap(adr, PAGE_SIZE);
-} 
+}
 
 /* Must run before zap_low_mappings */
 __meminit void *early_ioremap(unsigned long addr, unsigned long size)
 {
-       unsigned long vaddr;
        pmd_t *pmd, *last_pmd;
+       unsigned long vaddr;
        int i, pmds;
 
        pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
        vaddr = __START_KERNEL_map;
        pmd = level2_kernel_pgt;
        last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
+
        for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
                for (i = 0; i < pmds; i++) {
                        if (pmd_present(pmd[i]))
-                               goto next;
+                               goto continue_outer_loop;
                }
                vaddr += addr & ~PMD_MASK;
                addr &= PMD_MASK;
+
                for (i = 0; i < pmds; i++, addr += PMD_SIZE)
-                       set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));
-               __flush_tlb();
+                       set_pmd(pmd+i, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
+               __flush_tlb_all();
+
                return (void *)vaddr;
-       next:
+continue_outer_loop:
                ;
        }
-       printk("early_ioremap(0x%lx, %lu) failed\n", addr, size);
+       printk(KERN_ERR "early_ioremap(0x%lx, %lu) failed\n", addr, size);
+
        return NULL;
 }
 
-/* To avoid virtual aliases later */
+/*
+ * To avoid virtual aliases later:
+ */
 __meminit void early_iounmap(void *addr, unsigned long size)
 {
        unsigned long vaddr;
@@ -244,9 +260,11 @@ __meminit void early_iounmap(void *addr, unsigned long size)
        vaddr = (unsigned long)addr;
        pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
        pmd = level2_kernel_pgt + pmd_index(vaddr);
+
        for (i = 0; i < pmds; i++)
                pmd_clear(pmd + i);
-       __flush_tlb();
+
+       __flush_tlb_all();
 }
 
 static void __meminit
@@ -255,41 +273,40 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
        int i = pmd_index(address);
 
        for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
-               unsigned long entry;
                pmd_t *pmd = pmd_page + pmd_index(address);
 
                if (address >= end) {
-                       if (!after_bootmem)
+                       if (!after_bootmem) {
                                for (; i < PTRS_PER_PMD; i++, pmd++)
                                        set_pmd(pmd, __pmd(0));
+                       }
                        break;
                }
 
                if (pmd_val(*pmd))
                        continue;
 
-               entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
-               entry &= __supported_pte_mask;
-               set_pmd(pmd, __pmd(entry));
+               set_pte((pte_t *)pmd,
+                       pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
        }
 }
 
 static void __meminit
 phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
 {
-       pmd_t *pmd = pmd_offset(pud,0);
+       pmd_t *pmd = pmd_offset(pud, 0);
        spin_lock(&init_mm.page_table_lock);
        phys_pmd_init(pmd, address, end);
        spin_unlock(&init_mm.page_table_lock);
        __flush_tlb_all();
 }
 
-static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
-{ 
+static void __meminit
+phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
+{
        int i = pud_index(addr);
 
-
-       for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
+       for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
                unsigned long pmd_phys;
                pud_t *pud = pud_page + pud_index(addr);
                pmd_t *pmd;
@@ -297,10 +314,11 @@ static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigne
                if (addr >= end)
                        break;
 
-               if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
-                       set_pud(pud, __pud(0)); 
+               if (!after_bootmem &&
+                               !e820_any_mapped(addr, addr+PUD_SIZE, 0)) {
+                       set_pud(pud, __pud(0));
                        continue;
-               } 
+               }
 
                if (pud_val(*pud)) {
                        phys_pmd_update(pud, addr, end);
@@ -308,14 +326,16 @@ static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigne
                }
 
                pmd = alloc_low_page(&pmd_phys);
+
                spin_lock(&init_mm.page_table_lock);
                set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
                phys_pmd_init(pmd, addr, end);
                spin_unlock(&init_mm.page_table_lock);
+
                unmap_low_page(pmd);
        }
-       __flush_tlb();
-} 
+       __flush_tlb_all();
+}
 
 static void __init find_early_table_space(unsigned long end)
 {
@@ -326,11 +346,13 @@ static void __init find_early_table_space(unsigned long end)
        tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
                 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
 
-       /* RED-PEN putting page tables only on node 0 could
-          cause a hotspot and fill up ZONE_DMA. The page tables
-          need roughly 0.5KB per GB. */
-       start = 0x8000;
-       table_start = find_e820_area(start, end, tables);
+       /*
+        * RED-PEN putting page tables only on node 0 could
+        * cause a hotspot and fill up ZONE_DMA. The page tables
+        * need roughly 0.5KB per GB.
+        */
+       start = 0x8000;
+       table_start = find_e820_area(start, end, tables, PAGE_SIZE);
        if (table_start == -1UL)
                panic("Cannot find space for the kernel page tables");
 
@@ -342,20 +364,23 @@ static void __init find_early_table_space(unsigned long end)
                (table_start << PAGE_SHIFT) + tables);
 }
 
-/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
-   This runs before bootmem is initialized and gets pages directly from the 
-   physical memory. To access them they are temporarily mapped. */
-void __meminit init_memory_mapping(unsigned long start, unsigned long end)
-{ 
-       unsigned long next; 
+/*
+ * Setup the direct mapping of the physical memory at PAGE_OFFSET.
+ * This runs before bootmem is initialized and gets pages directly from
+ * the physical memory. To access them they are temporarily mapped.
+ */
+void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
+{
+       unsigned long next;
 
-       Dprintk("init_memory_mapping\n");
+       pr_debug("init_memory_mapping\n");
 
-       /* 
+       /*
         * Find space for the kernel direct mapping tables.
-        * Later we should allocate these tables in the local node of the memory
-        * mapped.  Unfortunately this is done currently before the nodes are 
-        * discovered.
+        *
+        * Later we should allocate these tables in the local node of the
+        * memory mapped. Unfortunately this is done currently before the
+        * nodes are discovered.
         */
        if (!after_bootmem)
                find_early_table_space(end);
@@ -364,8 +389,8 @@ void __meminit init_memory_mapping(unsigned long start, unsigned long end)
        end = (unsigned long)__va(end);
 
        for (; start < end; start = next) {
-               unsigned long pud_phys; 
                pgd_t *pgd = pgd_offset_k(start);
+               unsigned long pud_phys;
                pud_t *pud;
 
                if (after_bootmem)
@@ -374,23 +399,28 @@ void __meminit init_memory_mapping(unsigned long start, unsigned long end)
                        pud = alloc_low_page(&pud_phys);
 
                next = start + PGDIR_SIZE;
-               if (next > end) 
-                       next = end; 
+               if (next > end)
+                       next = end;
                phys_pud_init(pud, __pa(start), __pa(next));
                if (!after_bootmem)
                        set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
                unmap_low_page(pud);
-       } 
+       }
 
        if (!after_bootmem)
                mmu_cr4_features = read_cr4();
        __flush_tlb_all();
+
+       if (!after_bootmem)
+               reserve_early(table_start << PAGE_SHIFT,
+                                table_end << PAGE_SHIFT, "PGTABLE");
 }
 
 #ifndef CONFIG_NUMA
 void __init paging_init(void)
 {
        unsigned long max_zone_pfns[MAX_NR_ZONES];
+
        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
        max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
        max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
@@ -402,40 +432,6 @@ void __init paging_init(void)
 }
 #endif
 
-/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
-   from the CPU leading to inconsistent cache lines. address and size
-   must be aligned to 2MB boundaries. 
-   Does nothing when the mapping doesn't exist. */
-void __init clear_kernel_mapping(unsigned long address, unsigned long size) 
-{
-       unsigned long end = address + size;
-
-       BUG_ON(address & ~LARGE_PAGE_MASK);
-       BUG_ON(size & ~LARGE_PAGE_MASK); 
-       
-       for (; address < end; address += LARGE_PAGE_SIZE) { 
-               pgd_t *pgd = pgd_offset_k(address);
-               pud_t *pud;
-               pmd_t *pmd;
-               if (pgd_none(*pgd))
-                       continue;
-               pud = pud_offset(pgd, address);
-               if (pud_none(*pud))
-                       continue; 
-               pmd = pmd_offset(pud, address);
-               if (!pmd || pmd_none(*pmd))
-                       continue; 
-               if (0 == (pmd_val(*pmd) & _PAGE_PSE)) { 
-                       /* Could handle this, but it should not happen currently. */
-                       printk(KERN_ERR 
-              "clear_kernel_mapping: mapping has been split. will leak memory\n"); 
-                       pmd_ERROR(*pmd); 
-               }
-               set_pmd(pmd, __pmd(0));                 
-       }
-       __flush_tlb_all();
-} 
-
 /*
  * Memory hotplug specific functions
  */
@@ -461,16 +457,12 @@ int arch_add_memory(int nid, u64 start, u64 size)
        unsigned long nr_pages = size >> PAGE_SHIFT;
        int ret;
 
-       init_memory_mapping(start, (start + size -1));
+       init_memory_mapping(start, start + size-1);
 
        ret = __add_pages(zone, start_pfn, nr_pages);
-       if (ret)
-               goto error;
+       WARN_ON(1);
 
        return ret;
-error:
-       printk("%s: Problem encountered in __add_pages!\n", __func__);
-       return ret;
 }
 EXPORT_SYMBOL_GPL(arch_add_memory);
 
@@ -484,36 +476,8 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
-#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
-/*
- * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
- * just online the pages.
- */
-int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
-{
-       int err = -EIO;
-       unsigned long pfn;
-       unsigned long total = 0, mem = 0;
-       for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
-               if (pfn_valid(pfn)) {
-                       online_page(pfn_to_page(pfn));
-                       err = 0;
-                       mem++;
-               }
-               total++;
-       }
-       if (!err) {
-               z->spanned_pages += total;
-               z->present_pages += mem;
-               z->zone_pgdat->node_spanned_pages += total;
-               z->zone_pgdat->node_present_pages += mem;
-       }
-       return err;
-}
-#endif
-
-static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
-                        kcore_vsyscall;
+static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
+                        kcore_modules, kcore_vsyscall;
 
 void __init mem_init(void)
 {
@@ -521,8 +485,15 @@ void __init mem_init(void)
 
        pci_iommu_alloc();
 
-       /* clear the zero-page */
-       memset(empty_zero_page, 0, PAGE_SIZE);
+       /* clear_bss() already clear the empty_zero_page */
+
+       /* temporary debugging - double check it's true: */
+       {
+               int i;
+
+               for (i = 0; i < 1024; i++)
+                       WARN_ON_ONCE(empty_zero_page[i]);
+       }
 
        reservedpages = 0;
 
@@ -534,7 +505,6 @@ void __init mem_init(void)
 #endif
        reservedpages = end_pfn - totalram_pages -
                                        absent_pages_in_range(0, end_pfn);
-
        after_bootmem = 1;
 
        codesize =  (unsigned long) &_etext - (unsigned long) &_text;
@@ -542,15 +512,16 @@ void __init mem_init(void)
        initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
 
        /* Register memory areas for /proc/kcore */
-       kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
-       kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
+       kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
+       kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
                   VMALLOC_END-VMALLOC_START);
        kclist_add(&kcore_kernel, &_stext, _end - _stext);
        kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
-       kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, 
+       kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
                                 VSYSCALL_END - VSYSCALL_START);
 
-       printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
+       printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
+                               "%ldk reserved, %ldk data, %ldk init)\n",
                (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
                end_pfn << (PAGE_SHIFT-10),
                codesize >> 10,
@@ -566,19 +537,27 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
        if (begin >= end)
                return;
 
+       /*
+        * If debugging page accesses then do not free this memory but
+        * mark them not present - any buggy init-section access will
+        * create a kernel page fault:
+        */
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
+               begin, PAGE_ALIGN(end));
+       set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
+#else
        printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
+
        for (addr = begin; addr < end; addr += PAGE_SIZE) {
                ClearPageReserved(virt_to_page(addr));
                init_page_count(virt_to_page(addr));
                memset((void *)(addr & ~(PAGE_SIZE-1)),
                        POISON_FREE_INITMEM, PAGE_SIZE);
-               if (addr >= __START_KERNEL_map)
-                       change_page_attr_addr(addr, 1, __pgprot(0));
                free_page(addr);
                totalram_pages++;
        }
-       if (addr > __START_KERNEL_map)
-               global_flush_tlb();
+#endif
 }
 
 void free_initmem(void)
@@ -589,6 +568,8 @@ void free_initmem(void)
 }
 
 #ifdef CONFIG_DEBUG_RODATA
+const int rodata_test_data = 0xC3;
+EXPORT_SYMBOL_GPL(rodata_test_data);
 
 void mark_rodata_ro(void)
 {
@@ -603,25 +584,34 @@ void mark_rodata_ro(void)
 #ifdef CONFIG_KPROBES
        start = (unsigned long)__start_rodata;
 #endif
-       
+
        end = (unsigned long)__end_rodata;
        start = (start + PAGE_SIZE - 1) & PAGE_MASK;
        end &= PAGE_MASK;
        if (end <= start)
                return;
 
-       change_page_attr_addr(start, (end - start) >> PAGE_SHIFT, PAGE_KERNEL_RO);
 
        printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
               (end - start) >> 10);
+       set_memory_ro(start, (end - start) >> PAGE_SHIFT);
 
        /*
-        * change_page_attr_addr() requires a global_flush_tlb() call after it.
-        * We do this after the printk so that if something went wrong in the
-        * change, the printk gets out at least to give a better debug hint
-        * of who is the culprit.
+        * The rodata section (but not the kernel text!) should also be
+        * not-executable.
         */
-       global_flush_tlb();
+       start = ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
+       set_memory_nx(start, (end - start) >> PAGE_SHIFT);
+
+       rodata_test();
+
+#ifdef CONFIG_CPA_DEBUG
+       printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
+       set_memory_rw(start, (end-start) >> PAGE_SHIFT);
+
+       printk(KERN_INFO "Testing CPA: again\n");
+       set_memory_ro(start, (end-start) >> PAGE_SHIFT);
+#endif
 }
 #endif
 
@@ -632,17 +622,21 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 }
 #endif
 
-void __init reserve_bootmem_generic(unsigned long phys, unsigned len) 
-{ 
+void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
+{
 #ifdef CONFIG_NUMA
        int nid = phys_to_nid(phys);
 #endif
        unsigned long pfn = phys >> PAGE_SHIFT;
+
        if (pfn >= end_pfn) {
-               /* This can happen with kdump kernels when accessing firmware
-                  tables. */
+               /*
+                * This can happen with kdump kernels when accessing
+                * firmware tables:
+                */
                if (pfn < end_pfn_map)
                        return;
+
                printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
                                phys, len);
                return;
@@ -650,9 +644,9 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
 
        /* Should check here against the e820 map to avoid double free */
 #ifdef CONFIG_NUMA
-       reserve_bootmem_node(NODE_DATA(nid), phys, len);
-#else                  
-       reserve_bootmem(phys, len);    
+       reserve_bootmem_node(NODE_DATA(nid), phys, len);
+#else
+       reserve_bootmem(phys, len);
 #endif
        if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
                dma_reserve += len / PAGE_SIZE;
@@ -660,46 +654,49 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
        }
 }
 
-int kern_addr_valid(unsigned long addr) 
-{ 
+int kern_addr_valid(unsigned long addr)
+{
        unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *pte;
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
 
        if (above != 0 && above != -1UL)
-               return 0; 
-       
+               return 0;
+
        pgd = pgd_offset_k(addr);
        if (pgd_none(*pgd))
                return 0;
 
        pud = pud_offset(pgd, addr);
        if (pud_none(*pud))
-               return 0; 
+               return 0;
 
        pmd = pmd_offset(pud, addr);
        if (pmd_none(*pmd))
                return 0;
+
        if (pmd_large(*pmd))
                return pfn_valid(pmd_pfn(*pmd));
 
        pte = pte_offset_kernel(pmd, addr);
        if (pte_none(*pte))
                return 0;
+
        return pfn_valid(pte_pfn(*pte));
 }
 
-/* A pseudo VMA to allow ptrace access for the vsyscall page.  This only
-   covers the 64bit vsyscall page now. 32bit has a real VMA now and does
-   not need special handling anymore. */
-
+/*
+ * A pseudo VMA to allow ptrace access for the vsyscall page.  This only
+ * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
+ * not need special handling anymore:
+ */
 static struct vm_area_struct gate_vma = {
-       .vm_start = VSYSCALL_START,
-       .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
-       .vm_page_prot = PAGE_READONLY_EXEC,
-       .vm_flags = VM_READ | VM_EXEC
+       .vm_start       = VSYSCALL_START,
+       .vm_end         = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
+       .vm_page_prot   = PAGE_READONLY_EXEC,
+       .vm_flags       = VM_READ | VM_EXEC
 };
 
 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
@@ -714,14 +711,17 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
 int in_gate_area(struct task_struct *task, unsigned long addr)
 {
        struct vm_area_struct *vma = get_gate_vma(task);
+
        if (!vma)
                return 0;
+
        return (addr >= vma->vm_start) && (addr < vma->vm_end);
 }
 
-/* Use this when you have no reliable task/vma, typically from interrupt
- * context.  It is less reliable than using the task's vma and may give
- * false positives.
+/*
+ * Use this when you have no reliable task/vma, typically from interrupt
+ * context. It is less reliable than using the task's vma and may give
+ * false positives:
  */
 int in_gate_area_no_task(unsigned long addr)
 {
@@ -741,8 +741,8 @@ const char *arch_vma_name(struct vm_area_struct *vma)
 /*
  * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
  */
-int __meminit vmemmap_populate(struct page *start_page,
-                                               unsigned long size, int node)
+int __meminit
+vmemmap_populate(struct page *start_page, unsigned long size, int node)
 {
        unsigned long addr = (unsigned long)start_page;
        unsigned long end = (unsigned long)(start_page + size);
@@ -757,6 +757,7 @@ int __meminit vmemmap_populate(struct page *start_page,
                pgd = vmemmap_pgd_populate(addr, node);
                if (!pgd)
                        return -ENOMEM;
+
                pud = vmemmap_pud_populate(pgd, addr, node);
                if (!pud)
                        return -ENOMEM;
@@ -764,20 +765,22 @@ int __meminit vmemmap_populate(struct page *start_page,
                pmd = pmd_offset(pud, addr);
                if (pmd_none(*pmd)) {
                        pte_t entry;
-                       void *p = vmemmap_alloc_block(PMD_SIZE, node);
+                       void *p;
+
+                       p = vmemmap_alloc_block(PMD_SIZE, node);
                        if (!p)
                                return -ENOMEM;
 
-                       entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
-                       mk_pte_huge(entry);
+                       entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
+                                                       PAGE_KERNEL_LARGE);
                        set_pmd(pmd, __pmd(pte_val(entry)));
 
                        printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
                                addr, addr + PMD_SIZE - 1, p, node);
-               } else
+               } else {
                        vmemmap_verify((pte_t *)pmd, node, addr, next);
+               }
        }
-
        return 0;
 }
 #endif