Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / arch / x86 / mm / init_64.c
index 470cc47..2362b64 100644 (file)
@@ -51,6 +51,8 @@
 #include <asm/numa.h>
 #include <asm/cacheflush.h>
 #include <asm/init.h>
+#include <asm/uv/uv.h>
+#include <asm/setup.h>
 
 static int __init parse_direct_gbpages_off(char *arg)
 {
@@ -105,18 +107,18 @@ void sync_global_pgds(unsigned long start, unsigned long end)
 
        for (address = start; address <= end; address += PGDIR_SIZE) {
                const pgd_t *pgd_ref = pgd_offset_k(address);
-               unsigned long flags;
                struct page *page;
 
                if (pgd_none(*pgd_ref))
                        continue;
 
-               spin_lock_irqsave(&pgd_lock, flags);
+               spin_lock(&pgd_lock);
                list_for_each_entry(page, &pgd_list, lru) {
                        pgd_t *pgd;
                        spinlock_t *pgt_lock;
 
                        pgd = (pgd_t *)page_address(page) + pgd_index(address);
+                       /* the pgt_lock only for Xen */
                        pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
                        spin_lock(pgt_lock);
 
@@ -128,7 +130,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
 
                        spin_unlock(pgt_lock);
                }
-               spin_unlock_irqrestore(&pgd_lock, flags);
+               spin_unlock(&pgd_lock);
        }
 }
 
@@ -293,18 +295,18 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
  * to the compile time generated pmds. This results in invalid pmds up
  * to the point where we hit the physaddr 0 mapping.
  *
- * We limit the mappings to the region from _text to _end.  _end is
- * rounded up to the 2MB boundary. This catches the invalid pmds as
+ * We limit the mappings to the region from _text to _brk_end.  _brk_end
+ * is rounded up to the 2MB boundary. This catches the invalid pmds as
  * well, as they are located before _text:
  */
 void __init cleanup_highmap(void)
 {
        unsigned long vaddr = __START_KERNEL_map;
-       unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1;
+       unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
+       unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
        pmd_t *pmd = level2_kernel_pgt;
-       pmd_t *last_pmd = pmd + PTRS_PER_PMD;
 
-       for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) {
+       for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
                if (pmd_none(*pmd))
                        continue;
                if (vaddr < (unsigned long) _text || vaddr > end)
@@ -606,63 +608,9 @@ kernel_physical_mapping_init(unsigned long start,
 void __init initmem_init(void)
 {
        memblock_x86_register_active_regions(0, 0, max_pfn);
-       init_memory_mapping_high();
 }
 #endif
 
-struct mapping_work_data {
-       unsigned long start;
-       unsigned long end;
-       unsigned long pfn_mapped;
-};
-
-static int __init_refok
-mapping_work_fn(unsigned long start_pfn, unsigned long end_pfn, void *datax)
-{
-       struct mapping_work_data *data = datax;
-       unsigned long pfn_mapped;
-       unsigned long final_start, final_end;
-
-       final_start = max_t(unsigned long, start_pfn<<PAGE_SHIFT, data->start);
-       final_end = min_t(unsigned long, end_pfn<<PAGE_SHIFT, data->end);
-
-       if (final_end <= final_start)
-               return 0;
-
-       pfn_mapped = init_memory_mapping(final_start, final_end);
-
-       if (pfn_mapped > data->pfn_mapped)
-               data->pfn_mapped = pfn_mapped;
-
-       return 0;
-}
-
-static unsigned long __init_refok
-init_memory_mapping_active_regions(unsigned long start, unsigned long end)
-{
-       struct mapping_work_data data;
-
-       data.start = start;
-       data.end = end;
-       data.pfn_mapped = 0;
-
-       work_with_active_regions(MAX_NUMNODES, mapping_work_fn, &data);
-
-       return data.pfn_mapped;
-}
-
-void __init_refok init_memory_mapping_high(void)
-{
-       if (max_pfn > max_low_pfn) {
-               max_pfn_mapped = init_memory_mapping_active_regions(1UL<<32,
-                                                        max_pfn<<PAGE_SHIFT);
-               /* can we preserve max_low_pfn ? */
-               max_low_pfn = max_pfn;
-
-               memblock.current_limit = get_max_mapped();
-       }
-}
-
 void __init paging_init(void)
 {
        unsigned long max_zone_pfns[MAX_NR_ZONES];
@@ -952,6 +900,19 @@ const char *arch_vma_name(struct vm_area_struct *vma)
        return NULL;
 }
 
+#ifdef CONFIG_X86_UV
+#define MIN_MEMORY_BLOCK_SIZE   (1 << SECTION_SIZE_BITS)
+
+unsigned long memory_block_size_bytes(void)
+{
+       if (is_uv_system()) {
+               printk(KERN_INFO "UV: memory block size 2GB\n");
+               return 2UL * 1024 * 1024 * 1024;
+       }
+       return MIN_MEMORY_BLOCK_SIZE;
+}
+#endif
+
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 /*
  * Initialise the sparsemem vmemmap using huge-pages at the PMD level.