Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 19 Sep 2008 23:11:09 +0000 (16:11 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 19 Sep 2008 23:11:09 +0000 (16:11 -0700)
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: completely disable NOPL on 32 bits
  x86/paravirt: Remove duplicate paravirt_pagetable_setup_{start, done}()
  xen: fix for xen guest with mem > 3.7G
  x86: fix possible x86_64 and EFI regression
  arch/x86/kernel/kdebugfs.c: introduce missing kfree

arch/x86/kernel/cpu/common.c
arch/x86/kernel/kdebugfs.c
arch/x86/kernel/setup.c
arch/x86/mm/init_32.c
arch/x86/xen/setup.c

index 8aab851..4e456bd 100644 (file)
@@ -344,31 +344,15 @@ static void __init early_cpu_detect(void)
 
 /*
  * The NOPL instruction is supposed to exist on all CPUs with
- * family >= 6, unfortunately, that's not true in practice because
+ * family >= 6; unfortunately, that's not true in practice because
  * of early VIA chips and (more importantly) broken virtualizers that
- * are not easy to detect.  Hence, probe for it based on first
- * principles.
+ * are not easy to detect.  In the latter case it doesn't even *fail*
+ * reliably, so probing for it doesn't even work.  Disable it completely
+ * unless we can find a reliable way to detect all the broken cases.
  */
 static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
 {
-       const u32 nopl_signature = 0x888c53b1; /* Random number */
-       u32 has_nopl = nopl_signature;
-
        clear_cpu_cap(c, X86_FEATURE_NOPL);
-       if (c->x86 >= 6) {
-               asm volatile("\n"
-                            "1:      .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
-                            "2:\n"
-                            "        .section .fixup,\"ax\"\n"
-                            "3:      xor %0,%0\n"
-                            "        jmp 2b\n"
-                            "        .previous\n"
-                            _ASM_EXTABLE(1b,3b)
-                            : "+a" (has_nopl));
-
-               if (has_nopl == nopl_signature)
-                       set_cpu_cap(c, X86_FEATURE_NOPL);
-       }
 }
 
 static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
index f2d43bc..ff7d3b0 100644 (file)
@@ -139,6 +139,7 @@ static int __init create_setup_data_nodes(struct dentry *parent)
                if (PageHighMem(pg)) {
                        data = ioremap_cache(pa_data, sizeof(*data));
                        if (!data) {
+                               kfree(node);
                                error = -ENXIO;
                                goto err_dir;
                        }
index 362d4e7..9838f25 100644 (file)
@@ -670,6 +670,10 @@ void __init setup_arch(char **cmdline_p)
 
        parse_early_param();
 
+#ifdef CONFIG_X86_64
+       check_efer();
+#endif
+
 #if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
        /*
         * Must be before kernel pagetables are setup
@@ -738,7 +742,6 @@ void __init setup_arch(char **cmdline_p)
 #else
        num_physpages = max_pfn;
 
-       check_efer();
 
        /* How many end-of-memory variables you have, grandma! */
        /* need this before calling reserve_initrd */
index d37f293..60ec1d0 100644 (file)
@@ -458,11 +458,7 @@ static void __init pagetable_init(void)
 {
        pgd_t *pgd_base = swapper_pg_dir;
 
-       paravirt_pagetable_setup_start(pgd_base);
-
        permanent_kmaps_init(pgd_base);
-
-       paravirt_pagetable_setup_done(pgd_base);
 }
 
 #ifdef CONFIG_ACPI_SLEEP
index b6acc3a..d679010 100644 (file)
@@ -42,7 +42,7 @@ char * __init xen_memory_setup(void)
 
        e820.nr_map = 0;
 
-       e820_add_region(0, PFN_PHYS(max_pfn), E820_RAM);
+       e820_add_region(0, PFN_PHYS((u64)max_pfn), E820_RAM);
 
        /*
         * Even though this is normal, usable memory under Xen, reserve