Merge branch 'core-memblock-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / arch / x86 / mm / init_64.c
index 850bad0..8434620 100644 (file)
@@ -96,6 +96,43 @@ static int __init nonx32_setup(char *str)
 }
 __setup("noexec32=", nonx32_setup);
 
+/*
+ * When memory was added/removed make sure all the processes MM have
+ * suitable PGD entries in the local PGD level page.
+ */
+void sync_global_pgds(unsigned long start, unsigned long end)
+{
+       unsigned long address;
+
+       for (address = start; address <= end; address += PGDIR_SIZE) {
+               const pgd_t *pgd_ref = pgd_offset_k(address);
+               unsigned long flags;
+               struct page *page;
+
+               if (pgd_none(*pgd_ref))
+                       continue;
+
+               spin_lock_irqsave(&pgd_lock, flags);
+               list_for_each_entry(page, &pgd_list, lru) {
+                       pgd_t *pgd;
+                       spinlock_t *pgt_lock;
+
+                       pgd = (pgd_t *)page_address(page) + pgd_index(address);
+                       pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+                       spin_lock(pgt_lock);
+
+                       if (pgd_none(*pgd))
+                               set_pgd(pgd, *pgd_ref);
+                       else
+                               BUG_ON(pgd_page_vaddr(*pgd)
+                                      != pgd_page_vaddr(*pgd_ref));
+
+                       spin_unlock(pgt_lock);
+               }
+               spin_unlock_irqrestore(&pgd_lock, flags);
+       }
+}
+
 /*
  * NOTE: This function is marked __ref because it calls __init function
  * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
@@ -292,7 +329,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
                panic("alloc_low_page: ran out of memory");
 
        adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
-       memset(adr, 0, PAGE_SIZE);
+       clear_page(adr);
        *phys  = pfn * PAGE_SIZE;
        return adr;
 }
@@ -533,11 +570,13 @@ kernel_physical_mapping_init(unsigned long start,
                             unsigned long end,
                             unsigned long page_size_mask)
 {
-
+       bool pgd_changed = false;
        unsigned long next, last_map_addr = end;
+       unsigned long addr;
 
        start = (unsigned long)__va(start);
        end = (unsigned long)__va(end);
+       addr = start;
 
        for (; start < end; start = next) {
                pgd_t *pgd = pgd_offset_k(start);
@@ -562,7 +601,12 @@ kernel_physical_mapping_init(unsigned long start,
                spin_lock(&init_mm.page_table_lock);
                pgd_populate(&init_mm, pgd, __va(pud_phys));
                spin_unlock(&init_mm.page_table_lock);
+               pgd_changed = true;
        }
+
+       if (pgd_changed)
+               sync_global_pgds(addr, end);
+
        __flush_tlb_all();
 
        return last_map_addr;
@@ -940,6 +984,7 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
                }
 
        }
+       sync_global_pgds((unsigned long)start_page, end);
        return 0;
 }