Pull bugzilla-5452 into release branch
[pandora-kernel.git] / arch / x86_64 / mm / init.c
index eca6012..4ba34e9 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/proc_fs.h>
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/memory_hotplug.h>
 
 #include <asm/processor.h>
 #include <asm/system.h>
@@ -70,7 +72,7 @@ void show_mem(void)
        show_free_areas();
        printk(KERN_INFO "Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
 
-       for_each_pgdat(pgdat) {
+       for_each_online_pgdat(pgdat) {
                for (i = 0; i < pgdat->node_spanned_pages; ++i) {
                        page = pfn_to_page(pgdat->node_start_pfn + i);
                        total++;
@@ -92,7 +94,7 @@ void show_mem(void)
 
 int after_bootmem;
 
-static void *spp_getpage(void)
+static __init void *spp_getpage(void)
 { 
        void *ptr;
        if (after_bootmem)
@@ -106,7 +108,7 @@ static void *spp_getpage(void)
        return ptr;
 } 
 
-static void set_pte_phys(unsigned long vaddr,
+static __init void set_pte_phys(unsigned long vaddr,
                         unsigned long phys, pgprot_t prot)
 {
        pgd_t *pgd;
@@ -155,7 +157,8 @@ static void set_pte_phys(unsigned long vaddr,
 }
 
 /* NOTE: this is meant to be run only at boot */
-void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
+void __init 
+__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
 {
        unsigned long address = __fix_to_virt(idx);
 
@@ -180,13 +183,19 @@ static  struct temp_map {
        {}
 }; 
 
-static __init void *alloc_low_page(int *index, unsigned long *phys) 
+static __meminit void *alloc_low_page(int *index, unsigned long *phys)
 { 
        struct temp_map *ti;
        int i; 
        unsigned long pfn = table_end++, paddr; 
        void *adr;
 
+       if (after_bootmem) {
+               adr = (void *)get_zeroed_page(GFP_ATOMIC);
+               *phys = __pa(adr);
+               return adr;
+       }
+
        if (pfn >= end_pfn) 
                panic("alloc_low_page: ran out of memory"); 
        for (i = 0; temp_mappings[i].allocated; i++) {
@@ -199,55 +208,113 @@ static __init void *alloc_low_page(int *index, unsigned long *phys)
        ti->allocated = 1; 
        __flush_tlb();         
        adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK); 
+       memset(adr, 0, PAGE_SIZE);
        *index = i; 
        *phys  = pfn * PAGE_SIZE;  
        return adr; 
 } 
 
-static __init void unmap_low_page(int i)
+static __meminit void unmap_low_page(int i)
 { 
-       struct temp_map *ti = &temp_mappings[i];
+       struct temp_map *ti;
+
+       if (after_bootmem)
+               return;
+
+       ti = &temp_mappings[i];
        set_pmd(ti->pmd, __pmd(0));
        ti->allocated = 0; 
 } 
 
-static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
+/* Must run before zap_low_mappings */
+__init void *early_ioremap(unsigned long addr, unsigned long size)
+{
+       unsigned long map = round_down(addr, LARGE_PAGE_SIZE); 
+
+       /* actually usually some more */
+       if (size >= LARGE_PAGE_SIZE) { 
+               printk("SMBIOS area too long %lu\n", size);
+               return NULL;
+       }
+       set_pmd(temp_mappings[0].pmd,  __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
+       map += LARGE_PAGE_SIZE;
+       set_pmd(temp_mappings[1].pmd,  __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
+       __flush_tlb();
+       return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
+}
+
+/* To avoid virtual aliases later */
+__init void early_iounmap(void *addr, unsigned long size)
+{
+       if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
+               printk("early_iounmap: bad address %p\n", addr);
+       set_pmd(temp_mappings[0].pmd, __pmd(0));
+       set_pmd(temp_mappings[1].pmd, __pmd(0));
+       __flush_tlb();
+}
+
+static void __meminit
+phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
+{
+       int i;
+
+       for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) {
+               unsigned long entry;
+
+               if (address > end) {
+                       for (; i < PTRS_PER_PMD; i++, pmd++)
+                               set_pmd(pmd, __pmd(0));
+                       break;
+               }
+               entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
+               entry &= __supported_pte_mask;
+               set_pmd(pmd, __pmd(entry));
+       }
+}
+
+static void __meminit
+phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
+{
+       pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
+
+       if (pmd_none(*pmd)) {
+               spin_lock(&init_mm.page_table_lock);
+               phys_pmd_init(pmd, address, end);
+               spin_unlock(&init_mm.page_table_lock);
+               __flush_tlb_all();
+       }
+}
+
+static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
 { 
-       long i, j; 
+       long i = pud_index(address);
 
-       i = pud_index(address);
        pud = pud + i;
+
+       if (after_bootmem && pud_val(*pud)) {
+               phys_pmd_update(pud, address, end);
+               return;
+       }
+
        for (; i < PTRS_PER_PUD; pud++, i++) {
                int map; 
                unsigned long paddr, pmd_phys;
                pmd_t *pmd;
 
-               paddr = address + i*PUD_SIZE;
-               if (paddr >= end) { 
-                       for (; i < PTRS_PER_PUD; i++, pud++) 
-                               set_pud(pud, __pud(0)); 
+               paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
+               if (paddr >= end)
                        break;
-               } 
 
-               if (!e820_mapped(paddr, paddr+PUD_SIZE, 0)) { 
+               if (!after_bootmem && !e820_any_mapped(paddr, paddr+PUD_SIZE, 0)) {
                        set_pud(pud, __pud(0)); 
                        continue;
                } 
 
                pmd = alloc_low_page(&map, &pmd_phys);
+               spin_lock(&init_mm.page_table_lock);
                set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
-               for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
-                       unsigned long pe;
-
-                       if (paddr >= end) { 
-                               for (; j < PTRS_PER_PMD; j++, pmd++)
-                                       set_pmd(pmd,  __pmd(0)); 
-                               break;
-               }
-                       pe = _PAGE_NX|_PAGE_PSE | _KERNPG_TABLE | _PAGE_GLOBAL | paddr;
-                       pe &= __supported_pte_mask;
-                       set_pmd(pmd, __pmd(pe));
-               }
+               phys_pmd_init(pmd, paddr, end);
+               spin_unlock(&init_mm.page_table_lock);
                unmap_low_page(map);
        }
        __flush_tlb();
@@ -262,30 +329,25 @@ static void __init find_early_table_space(unsigned long end)
        tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
                 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
 
-       /* Put page tables beyond the DMA zones if possible.
-          RED-PEN might be better to spread them out more over
-          memory to avoid hotspots */
-       if (end > MAX_DMA32_PFN<<PAGE_SHIFT)
-               start = MAX_DMA32_PFN << PAGE_SHIFT;
-       else if (end > MAX_DMA_PFN << PAGE_SHIFT)
-               start = MAX_DMA_PFN << PAGE_SHIFT;
-       else
-               start = 0x8000;
-
-       table_start = find_e820_area(start, end, tables);
-       if (table_start == -1)
-               table_start = find_e820_area(0x8000, end, tables);
+       /* RED-PEN putting page tables only on node 0 could
+          cause a hotspot and fill up ZONE_DMA. The page tables
+          need roughly 0.5KB per GB. */
+       start = 0x8000;
+       table_start = find_e820_area(start, end, tables);
        if (table_start == -1UL)
                panic("Cannot find space for the kernel page tables");
 
        table_start >>= PAGE_SHIFT;
        table_end = table_start;
+
+       early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
+               end, table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
 }
 
 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
    This runs before bootmem is initialized and gets pages directly from the 
    physical memory. To access them they are temporarily mapped. */
-void __init init_memory_mapping(unsigned long start, unsigned long end)
+void __meminit init_memory_mapping(unsigned long start, unsigned long end)
 { 
        unsigned long next; 
 
@@ -297,7 +359,8 @@ void __init init_memory_mapping(unsigned long start, unsigned long end)
         * mapped.  Unfortunately this is done currently before the nodes are 
         * discovered.
         */
-       find_early_table_space(end);
+       if (!after_bootmem)
+               find_early_table_space(end);
 
        start = (unsigned long)__va(start);
        end = (unsigned long)__va(end);
@@ -305,20 +368,26 @@ void __init init_memory_mapping(unsigned long start, unsigned long end)
        for (; start < end; start = next) {
                int map;
                unsigned long pud_phys; 
-               pud_t *pud = alloc_low_page(&map, &pud_phys);
+               pgd_t *pgd = pgd_offset_k(start);
+               pud_t *pud;
+
+               if (after_bootmem)
+                       pud = pud_offset_k(pgd, start & PGDIR_MASK);
+               else
+                       pud = alloc_low_page(&map, &pud_phys);
+
                next = start + PGDIR_SIZE;
                if (next > end) 
                        next = end; 
                phys_pud_init(pud, __pa(start), __pa(next));
-               set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
+               if (!after_bootmem)
+                       set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
                unmap_low_page(map);   
        } 
 
-       asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
+       if (!after_bootmem)
+               asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
        __flush_tlb_all();
-       early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end, 
-              table_start<<PAGE_SHIFT, 
-              table_end<<PAGE_SHIFT);
 }
 
 void __cpuinit zap_low_mappings(int cpu)
@@ -393,6 +462,9 @@ size_zones(unsigned long *z, unsigned long *h,
 void __init paging_init(void)
 {
        unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
+
+       memory_present(0, 0, end_pfn);
+       sparse_init();
        size_zones(zones, holes, 0, end_pfn);
        free_area_init_node(0, NODE_DATA(0), zones,
                            __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
@@ -433,6 +505,81 @@ void __init clear_kernel_mapping(unsigned long address, unsigned long size)
        __flush_tlb_all();
 } 
 
+/*
+ * Memory hotplug specific functions
+ */
+#if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)
+
+void online_page(struct page *page)
+{
+       ClearPageReserved(page);
+       init_page_count(page);
+       __free_page(page);
+       totalram_pages++;
+       num_physpages++;
+}
+
+#ifndef CONFIG_MEMORY_HOTPLUG
+/*
+ * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
+ * just online the pages.
+ */
+int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
+{
+       int err = -EIO;
+       unsigned long pfn;
+       unsigned long total = 0, mem = 0;
+       for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
+               if (pfn_valid(pfn)) {
+                       online_page(pfn_to_page(pfn));
+                       err = 0;
+                       mem++;
+               }
+               total++;
+       }
+       if (!err) {
+               z->spanned_pages += total;
+               z->present_pages += mem;
+               z->zone_pgdat->node_spanned_pages += total;
+               z->zone_pgdat->node_present_pages += mem;
+       }
+       return err;
+}
+#endif
+
+/*
+ * Memory is added always to NORMAL zone. This means you will never get
+ * additional DMA/DMA32 memory.
+ */
+int add_memory(u64 start, u64 size)
+{
+       struct pglist_data *pgdat = NODE_DATA(0);
+       struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
+       unsigned long start_pfn = start >> PAGE_SHIFT;
+       unsigned long nr_pages = size >> PAGE_SHIFT;
+       int ret;
+
+       ret = __add_pages(zone, start_pfn, nr_pages);
+       if (ret)
+               goto error;
+
+       init_memory_mapping(start, (start + size -1));
+
+       return ret;
+error:
+       printk("%s: Problem encountered in __add_pages!\n", __func__);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(add_memory);
+
+int remove_memory(u64 start, u64 size)
+{
+       return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(remove_memory);
+
+#endif
+
 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
                         kcore_vsyscall;
 
@@ -504,7 +651,7 @@ void free_initmem(void)
        addr = (unsigned long)(&__init_begin);
        for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
                ClearPageReserved(virt_to_page(addr));
-               set_page_count(virt_to_page(addr), 1);
+               init_page_count(virt_to_page(addr));
                memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE); 
                free_page(addr);
                totalram_pages++;
@@ -539,12 +686,12 @@ void mark_rodata_ro(void)
 #ifdef CONFIG_BLK_DEV_INITRD
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
-       if (start < (unsigned long)&_end)
+       if (start >= end)
                return;
        printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
        for (; start < end; start += PAGE_SIZE) {
                ClearPageReserved(virt_to_page(start));
-               set_page_count(virt_to_page(start), 1);
+               init_page_count(virt_to_page(start));
                free_page(start);
                totalram_pages++;
        }