x86: adjust vmalloc_sync_all() for Xen (2nd try)
authorJan Beulich <jbeulich@novell.com>
Fri, 29 Aug 2008 11:53:45 +0000 (12:53 +0100)
committerIngo Molnar <mingo@elte.hu>
Sat, 6 Sep 2008 17:48:01 +0000 (19:48 +0200)
Since the fourth PDPT entry cannot be shared under Xen,
vmalloc_sync_all() must iterate over pmd-s rather than pgd-s here.
Luckily, the code isn't used for native PAE (SHARED_KERNEL_PMD is 1)
and the change is benign to non-PAE.

Also do a little more cleanup in that function.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
arch/x86/mm/fault.c

index 455f3fe..356ed2d 100644 (file)
@@ -915,15 +915,15 @@ LIST_HEAD(pgd_list);
 
 void vmalloc_sync_all(void)
 {
-#ifdef CONFIG_X86_32
-       unsigned long start = VMALLOC_START & PGDIR_MASK;
        unsigned long address;
 
+#ifdef CONFIG_X86_32
        if (SHARED_KERNEL_PMD)
                return;
 
-       BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
-       for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
+       for (address = VMALLOC_START & PMD_MASK;
+            address >= TASK_SIZE && address < FIXADDR_TOP;
+            address += PMD_SIZE) {
                unsigned long flags;
                struct page *page;
 
@@ -936,10 +936,8 @@ void vmalloc_sync_all(void)
                spin_unlock_irqrestore(&pgd_lock, flags);
        }
 #else /* CONFIG_X86_64 */
-       unsigned long start = VMALLOC_START & PGDIR_MASK;
-       unsigned long address;
-
-       for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
+       for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
+            address += PGDIR_SIZE) {
                const pgd_t *pgd_ref = pgd_offset_k(address);
                unsigned long flags;
                struct page *page;