Merge branches 'x86/acpi', 'x86/asm', 'x86/cpudetect', 'x86/crashdump', 'x86/debug...
authorIngo Molnar <mingo@elte.hu>
Fri, 13 Feb 2009 08:46:36 +0000 (09:46 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 13 Feb 2009 08:46:36 +0000 (09:46 +0100)
343 files changed:
Documentation/cputopology.txt
Documentation/x86/boot.txt
Makefile
arch/alpha/kernel/irq.c
arch/arm/kernel/irq.c
arch/arm/kernel/vmlinux.lds.S
arch/arm/oprofile/op_model_mpcore.c
arch/blackfin/kernel/irqchip.c
arch/ia64/include/asm/percpu.h
arch/ia64/include/asm/topology.h
arch/ia64/include/asm/uv/uv.h [new file with mode: 0644]
arch/ia64/kernel/acpi.c
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/irq.c
arch/ia64/kernel/irq_ia64.c
arch/ia64/kernel/msi_ia64.c
arch/ia64/kernel/vmlinux.lds.S
arch/ia64/sn/kernel/msi_sn.c
arch/mips/include/asm/irq.h
arch/mips/kernel/irq-gic.c
arch/mips/kernel/smtc.c
arch/mips/mti-malta/malta-smtc.c
arch/mips/sgi-ip22/ip22-int.c
arch/mips/sgi-ip22/ip22-time.c
arch/mips/sibyte/bcm1480/smp.c
arch/mips/sibyte/sb1250/smp.c
arch/mn10300/kernel/mn10300-watchdog.c
arch/parisc/kernel/irq.c
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/vmlinux.lds.S
arch/powerpc/platforms/pseries/xics.c
arch/powerpc/sysdev/mpic.c
arch/sparc/kernel/irq_64.c
arch/sparc/kernel/time_64.c
arch/x86/Kconfig
arch/x86/Kconfig.cpu
arch/x86/Kconfig.debug
arch/x86/Makefile
arch/x86/configs/i386_defconfig
arch/x86/configs/x86_64_defconfig
arch/x86/ia32/ia32_signal.c
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/a.out-core.h
arch/x86/include/asm/acpi.h
arch/x86/include/asm/apic.h
arch/x86/include/asm/apicnum.h [new file with mode: 0644]
arch/x86/include/asm/apm.h [moved from arch/x86/include/asm/mach-default/apm.h with 100% similarity]
arch/x86/include/asm/bigsmp/apic.h [deleted file]
arch/x86/include/asm/bigsmp/apicdef.h [deleted file]
arch/x86/include/asm/bigsmp/ipi.h [deleted file]
arch/x86/include/asm/calling.h
arch/x86/include/asm/cpu.h
arch/x86/include/asm/cpumask.h [new file with mode: 0644]
arch/x86/include/asm/current.h
arch/x86/include/asm/do_timer.h [moved from arch/x86/include/asm/mach-default/do_timer.h with 100% similarity]
arch/x86/include/asm/elf.h
arch/x86/include/asm/entry_arch.h [moved from arch/x86/include/asm/mach-default/entry_arch.h with 57% similarity]
arch/x86/include/asm/es7000/apic.h [deleted file]
arch/x86/include/asm/es7000/apicdef.h [deleted file]
arch/x86/include/asm/es7000/ipi.h [deleted file]
arch/x86/include/asm/es7000/mpparse.h [deleted file]
arch/x86/include/asm/es7000/wakecpu.h [deleted file]
arch/x86/include/asm/fixmap_32.h
arch/x86/include/asm/fixmap_64.h
arch/x86/include/asm/genapic.h
arch/x86/include/asm/genapic_32.h [deleted file]
arch/x86/include/asm/genapic_64.h [deleted file]
arch/x86/include/asm/hardirq.h
arch/x86/include/asm/hardirq_32.h [deleted file]
arch/x86/include/asm/hardirq_64.h [deleted file]
arch/x86/include/asm/hw_irq.h
arch/x86/include/asm/io.h
arch/x86/include/asm/io_32.h
arch/x86/include/asm/io_64.h
arch/x86/include/asm/io_apic.h
arch/x86/include/asm/ipi.h
arch/x86/include/asm/irq.h
arch/x86/include/asm/irq_regs.h
arch/x86/include/asm/irq_regs_32.h [deleted file]
arch/x86/include/asm/irq_regs_64.h [deleted file]
arch/x86/include/asm/irq_vectors.h
arch/x86/include/asm/kexec.h
arch/x86/include/asm/mach-default/mach_apic.h [deleted file]
arch/x86/include/asm/mach-default/mach_apicdef.h [deleted file]
arch/x86/include/asm/mach-default/mach_ipi.h [deleted file]
arch/x86/include/asm/mach-default/mach_mpparse.h [deleted file]
arch/x86/include/asm/mach-default/mach_mpspec.h [deleted file]
arch/x86/include/asm/mach-default/mach_wakecpu.h [deleted file]
arch/x86/include/asm/mach-generic/gpio.h [deleted file]
arch/x86/include/asm/mach-generic/mach_apic.h [deleted file]
arch/x86/include/asm/mach-generic/mach_apicdef.h [deleted file]
arch/x86/include/asm/mach-generic/mach_ipi.h [deleted file]
arch/x86/include/asm/mach-generic/mach_mpparse.h [deleted file]
arch/x86/include/asm/mach-generic/mach_mpspec.h [deleted file]
arch/x86/include/asm/mach-generic/mach_wakecpu.h [deleted file]
arch/x86/include/asm/mach-rdc321x/gpio.h [deleted file]
arch/x86/include/asm/mach_timer.h [moved from arch/x86/include/asm/mach-default/mach_timer.h with 100% similarity]
arch/x86/include/asm/mach_traps.h [moved from arch/x86/include/asm/mach-default/mach_traps.h with 100% similarity]
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/mmu_context_32.h [deleted file]
arch/x86/include/asm/mmu_context_64.h [deleted file]
arch/x86/include/asm/mpspec.h
arch/x86/include/asm/mpspec_def.h
arch/x86/include/asm/numaq.h
arch/x86/include/asm/numaq/apic.h [deleted file]
arch/x86/include/asm/numaq/apicdef.h [deleted file]
arch/x86/include/asm/numaq/ipi.h [deleted file]
arch/x86/include/asm/numaq/mpparse.h [deleted file]
arch/x86/include/asm/numaq/wakecpu.h [deleted file]
arch/x86/include/asm/page.h
arch/x86/include/asm/page_64.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/pat.h
arch/x86/include/asm/pci-functions.h [moved from arch/x86/include/asm/mach-default/pci-functions.h with 100% similarity]
arch/x86/include/asm/pda.h [deleted file]
arch/x86/include/asm/percpu.h
arch/x86/include/asm/pgtable-2level.h
arch/x86/include/asm/pgtable-3level.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_32.h
arch/x86/include/asm/pgtable_64.h
arch/x86/include/asm/prctl.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/proto.h
arch/x86/include/asm/ptrace.h
arch/x86/include/asm/rdc321x_defs.h [moved from arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h with 100% similarity]
arch/x86/include/asm/segment.h
arch/x86/include/asm/setup.h
arch/x86/include/asm/setup_arch.h [moved from arch/x86/include/asm/mach-default/setup_arch.h with 100% similarity]
arch/x86/include/asm/smp.h
arch/x86/include/asm/smpboot_hooks.h [moved from arch/x86/include/asm/mach-default/smpboot_hooks.h with 83% similarity]
arch/x86/include/asm/spinlock.h
arch/x86/include/asm/stackprotector.h [new file with mode: 0644]
arch/x86/include/asm/summit/apic.h [deleted file]
arch/x86/include/asm/summit/apicdef.h [deleted file]
arch/x86/include/asm/summit/ipi.h [deleted file]
arch/x86/include/asm/summit/mpparse.h [deleted file]
arch/x86/include/asm/syscalls.h
arch/x86/include/asm/system.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/topology.h
arch/x86/include/asm/trampoline.h
arch/x86/include/asm/traps.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uv/uv.h [new file with mode: 0644]
arch/x86/include/asm/uv/uv_bau.h
arch/x86/include/asm/voyager.h
arch/x86/include/asm/xen/events.h
arch/x86/kernel/Makefile
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/apic.c
arch/x86/kernel/apm_32.c
arch/x86/kernel/asm-offsets_32.c
arch/x86/kernel/asm-offsets_64.c
arch/x86/kernel/bigsmp_32.c [new file with mode: 0644]
arch/x86/kernel/cpu/addon_cpuid_features.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce_amd_64.c
arch/x86/kernel/cpu/mcheck/mce_intel_64.c
arch/x86/kernel/crash.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/early_printk.c
arch/x86/kernel/efi.c
arch/x86/kernel/efi_64.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/es7000_32.c
arch/x86/kernel/genapic_64.c
arch/x86/kernel/genapic_flat_64.c
arch/x86/kernel/genx2apic_cluster.c
arch/x86/kernel/genx2apic_phys.c
arch/x86/kernel/genx2apic_uv_x.c
arch/x86/kernel/head64.c
arch/x86/kernel/head_32.S
arch/x86/kernel/head_64.S
arch/x86/kernel/io_apic.c
arch/x86/kernel/ioport.c
arch/x86/kernel/ipi.c
arch/x86/kernel/irq.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/irq_64.c
arch/x86/kernel/irqinit_32.c
arch/x86/kernel/kgdb.c
arch/x86/kernel/machine_kexec_64.c
arch/x86/kernel/microcode_intel.c
arch/x86/kernel/module_32.c
arch/x86/kernel/module_64.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/msr.c
arch/x86/kernel/nmi.c
arch/x86/kernel/numaq_32.c
arch/x86/kernel/paravirt-spinlocks.c
arch/x86/kernel/paravirt.c
arch/x86/kernel/paravirt_patch_32.c
arch/x86/kernel/paravirt_patch_64.c
arch/x86/kernel/probe_32.c [new file with mode: 0644]
arch/x86/kernel/probe_roms_32.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/reboot.c
arch/x86/kernel/relocate_kernel_64.S
arch/x86/kernel/setup.c
arch/x86/kernel/setup_percpu.c
arch/x86/kernel/signal.c
arch/x86/kernel/smp.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/smpcommon.c [deleted file]
arch/x86/kernel/stacktrace.c
arch/x86/kernel/summit_32.c
arch/x86/kernel/syscall_table_32.S
arch/x86/kernel/time_32.c
arch/x86/kernel/tlb_32.c [deleted file]
arch/x86/kernel/tlb_uv.c
arch/x86/kernel/trampoline_64.S
arch/x86/kernel/traps.c
arch/x86/kernel/tsc.c
arch/x86/kernel/visws_quirks.c
arch/x86/kernel/vm86_32.c
arch/x86/kernel/vmi_32.c
arch/x86/kernel/vmiclock_32.c
arch/x86/kernel/vmlinux_32.lds.S
arch/x86/kernel/vmlinux_64.lds.S
arch/x86/kernel/vsmp_64.c
arch/x86/kernel/x8664_ksyms_64.c
arch/x86/lguest/boot.c
arch/x86/mach-default/Makefile [deleted file]
arch/x86/mach-default/setup.c [deleted file]
arch/x86/mach-generic/Makefile [deleted file]
arch/x86/mach-generic/bigsmp.c [deleted file]
arch/x86/mach-generic/default.c [deleted file]
arch/x86/mach-generic/es7000.c [deleted file]
arch/x86/mach-generic/numaq.c [deleted file]
arch/x86/mach-generic/probe.c [deleted file]
arch/x86/mach-generic/summit.c [deleted file]
arch/x86/mach-rdc321x/Makefile [deleted file]
arch/x86/mach-rdc321x/gpio.c [deleted file]
arch/x86/mach-rdc321x/platform.c [deleted file]
arch/x86/mach-voyager/setup.c
arch/x86/mach-voyager/voyager_smp.c
arch/x86/math-emu/get_address.c
arch/x86/mm/Makefile
arch/x86/mm/extable.c
arch/x86/mm/fault.c
arch/x86/mm/init_32.c
arch/x86/mm/ioremap.c
arch/x86/mm/mmap.c
arch/x86/mm/numa_64.c
arch/x86/mm/pat.c
arch/x86/mm/srat_64.c
arch/x86/mm/tlb.c [moved from arch/x86/kernel/tlb_64.c with 67% similarity]
arch/x86/pci/numaq_32.c
arch/x86/pci/pcbios.c
arch/x86/vdso/Makefile
arch/x86/xen/Makefile
arch/x86/xen/enlighten.c
arch/x86/xen/irq.c
arch/x86/xen/mmu.c
arch/x86/xen/mmu.h
arch/x86/xen/multicalls.h
arch/x86/xen/smp.c
arch/x86/xen/suspend.c
arch/x86/xen/xen-asm.S [new file with mode: 0644]
arch/x86/xen/xen-asm.h [new file with mode: 0644]
arch/x86/xen/xen-asm_32.S
arch/x86/xen/xen-asm_64.S
arch/x86/xen/xen-ops.h
drivers/acpi/acpica/tbxface.c
drivers/acpi/osl.c
drivers/acpi/tables.c
drivers/base/cpu.c
drivers/base/topology.c
drivers/clocksource/acpi_pm.c
drivers/clocksource/cyclone.c
drivers/eisa/Kconfig
drivers/firmware/dcdbas.c
drivers/firmware/iscsi_ibft.c
drivers/gpu/drm/drm_proc.c
drivers/input/keyboard/Kconfig
drivers/input/mouse/Kconfig
drivers/misc/Kconfig
drivers/misc/sgi-gru/grufile.c
drivers/misc/sgi-xp/xp.h
drivers/misc/sgi-xp/xpc_main.c
drivers/mtd/nand/Kconfig
drivers/net/ne3210.c
drivers/net/sfc/efx.c
drivers/net/sfc/falcon.c
drivers/net/wireless/arlan-main.c
drivers/oprofile/buffer_sync.c
drivers/oprofile/buffer_sync.h
drivers/oprofile/oprof.c
drivers/pci/dmar.c
drivers/pci/intr_remapping.c
drivers/watchdog/rdc321x_wdt.c
drivers/xen/events.c
drivers/xen/manage.c
include/acpi/acpiosxf.h
include/acpi/acpixf.h
include/asm-generic/percpu.h
include/asm-generic/sections.h
include/asm-generic/vmlinux.lds.h
include/linux/acpi.h
include/linux/elfcore.h
include/linux/interrupt.h
include/linux/irq.h
include/linux/irqnr.h
include/linux/magic.h
include/linux/percpu.h
include/linux/sched.h
include/linux/smp.h
include/linux/stackprotector.h [new file with mode: 0644]
include/linux/topology.h
init/main.c
kernel/exit.c
kernel/fork.c
kernel/irq/chip.c
kernel/irq/handle.c
kernel/irq/internals.h
kernel/irq/manage.c
kernel/irq/migration.c
kernel/irq/numa_migrate.c
kernel/irq/proc.c
kernel/kexec.c
kernel/panic.c
kernel/sched.c
kernel/sched_rt.c
kernel/softirq.c
mm/page-writeback.c
mm/slab.c
mm/slob.c
mm/slub.c
scripts/gcc-x86_32-has-stack-protector.sh [new file with mode: 0644]
scripts/gcc-x86_64-has-stack-protector.sh
scripts/mod/modpost.c
sound/drivers/Kconfig

index 45932ec..b41f3e5 100644 (file)
@@ -18,11 +18,11 @@ For an architecture to support this feature, it must define some of
 these macros in include/asm-XXX/topology.h:
 #define topology_physical_package_id(cpu)
 #define topology_core_id(cpu)
-#define topology_thread_siblings(cpu)
-#define topology_core_siblings(cpu)
+#define topology_thread_cpumask(cpu)
+#define topology_core_cpumask(cpu)
 
 The type of **_id is int.
-The type of siblings is cpumask_t.
+The type of siblings is (const) struct cpumask *.
 
 To be consistent on all architectures, include/linux/topology.h
 provides default definitions for any of the above macros that are
index 7b4596a..1229969 100644 (file)
@@ -158,7 +158,7 @@ Offset      Proto   Name            Meaning
 0202/4 2.00+   header          Magic signature "HdrS"
 0206/2 2.00+   version         Boot protocol version supported
 0208/4 2.00+   realmode_swtch  Boot loader hook (see below)
-020C/2 2.00+   start_sys       The load-low segment (0x1000) (obsolete)
+020C/2 2.00+   start_sys_seg   The load-low segment (0x1000) (obsolete)
 020E/2 2.00+   kernel_version  Pointer to kernel version string
 0210/1 2.00+   type_of_loader  Boot loader identifier
 0211/1 2.00+   loadflags       Boot protocol option flags
@@ -170,10 +170,11 @@ Offset    Proto   Name            Meaning
 0224/2 2.01+   heap_end_ptr    Free memory after setup end
 0226/2 N/A     pad1            Unused
 0228/4 2.02+   cmd_line_ptr    32-bit pointer to the kernel command line
-022C/4 2.03+   initrd_addr_max Highest legal initrd address
+022C/4 2.03+   ramdisk_max     Highest legal initrd address
 0230/4 2.05+   kernel_alignment Physical addr alignment required for kernel
 0234/1 2.05+   relocatable_kernel Whether kernel is relocatable or not
-0235/3 N/A     pad2            Unused
+0235/1 N/A     pad2            Unused
+0236/2 N/A     pad3            Unused
 0238/4 2.06+   cmdline_size    Maximum size of the kernel command line
 023C/4 2.07+   hardware_subarch Hardware subarchitecture
 0240/8 2.07+   hardware_subarch_data Subarchitecture-specific data
@@ -299,14 +300,14 @@ Protocol: 2.00+
   e.g. 0x0204 for version 2.04, and 0x0a11 for a hypothetical version
   10.17.
 
-Field name:    readmode_swtch
+Field name:    realmode_swtch
 Type:          modify (optional)
 Offset/size:   0x208/4
 Protocol:      2.00+
 
   Boot loader hook (see ADVANCED BOOT LOADER HOOKS below.)
 
-Field name:    start_sys
+Field name:    start_sys_seg
 Type:          read
 Offset/size:   0x20c/2
 Protocol:      2.00+
@@ -468,7 +469,7 @@ Protocol:   2.02+
   zero, the kernel will assume that your boot loader does not support
   the 2.02+ protocol.
 
-Field name:    initrd_addr_max
+Field name:    ramdisk_max
 Type:          read
 Offset/size:   0x22c/4
 Protocol:      2.03+
index 681c1d2..77a006d 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -532,8 +532,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
 endif
 
 # Force gcc to behave correct even for buggy distributions
-# Arch Makefiles may override this setting
+ifndef CONFIG_CC_STACKPROTECTOR
 KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector)
+endif
 
 ifdef CONFIG_FRAME_POINTER
 KBUILD_CFLAGS  += -fno-omit-frame-pointer -fno-optimize-sibling-calls
index 703731a..7bc7489 100644 (file)
@@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq)
                cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
        last_cpu = cpu;
 
-       irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+       cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
        irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
        return 0;
 }
index 363db18..45eacb5 100644 (file)
@@ -104,6 +104,11 @@ static struct irq_desc bad_irq_desc = {
        .lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
 };
 
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* We are not allocating bad_irq_desc.affinity or .pending_mask */
+#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
+#endif
+
 /*
  * do_IRQ handles all hardware IRQ's.  Decoded IRQs should not
  * come via this function.  Instead, they should provide their
@@ -161,7 +166,7 @@ void __init init_IRQ(void)
                irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
 
 #ifdef CONFIG_SMP
-       bad_irq_desc.affinity = CPU_MASK_ALL;
+       cpumask_setall(bad_irq_desc.affinity);
        bad_irq_desc.cpu = smp_processor_id();
 #endif
        init_arch_irq();
@@ -191,15 +196,16 @@ void migrate_irqs(void)
                struct irq_desc *desc = irq_desc + i;
 
                if (desc->cpu == cpu) {
-                       unsigned int newcpu = any_online_cpu(desc->affinity);
-
-                       if (newcpu == NR_CPUS) {
+                       unsigned int newcpu = cpumask_any_and(desc->affinity,
+                                                             cpu_online_mask);
+                       if (newcpu >= nr_cpu_ids) {
                                if (printk_ratelimit())
                                        printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
                                               i, cpu);
 
-                               cpus_setall(desc->affinity);
-                               newcpu = any_online_cpu(desc->affinity);
+                               cpumask_setall(desc->affinity);
+                               newcpu = cpumask_any_and(desc->affinity,
+                                                        cpu_online_mask);
                        }
 
                        route_irq(desc, i, newcpu);
index 0021607..85598f7 100644 (file)
@@ -65,6 +65,7 @@ SECTIONS
 #endif
                . = ALIGN(4096);
                __per_cpu_start = .;
+                       *(.data.percpu.page_aligned)
                        *(.data.percpu)
                        *(.data.percpu.shared_aligned)
                __per_cpu_end = .;
index 6d6bd58..853d42b 100644 (file)
@@ -263,7 +263,7 @@ static void em_route_irq(int irq, unsigned int cpu)
        const struct cpumask *mask = cpumask_of(cpu);
 
        spin_lock_irq(&desc->lock);
-       desc->affinity = *mask;
+       cpumask_copy(desc->affinity, mask);
        desc->chip->set_affinity(irq, mask);
        spin_unlock_irq(&desc->lock);
 }
index 75724ee..23e9aa0 100644 (file)
@@ -70,6 +70,11 @@ static struct irq_desc bad_irq_desc = {
 #endif
 };
 
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* We are not allocating a variable-sized bad_irq_desc.affinity */
+#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
+#endif
+
 int show_interrupts(struct seq_file *p, void *v)
 {
        int i = *(loff_t *) v, j;
index 77f30b6..30cf465 100644 (file)
@@ -27,12 +27,12 @@ extern void *per_cpu_init(void);
 
 #else /* ! SMP */
 
-#define PER_CPU_ATTRIBUTES     __attribute__((__section__(".data.percpu")))
-
 #define per_cpu_init()                         (__phys_per_cpu_start)
 
 #endif /* SMP */
 
+#define PER_CPU_BASE_SECTION ".data.percpu"
+
 /*
  * Be extremely careful when taking the address of this variable!  Due to virtual
  * remapping, it is different from the canonical address returned by __get_cpu_var(var)!
index 32f3af1..3193f44 100644 (file)
@@ -84,7 +84,7 @@ void build_cpu_to_node_map(void);
        .child                  = NULL,                 \
        .groups                 = NULL,                 \
        .min_interval           = 8,                    \
-       .max_interval           = 8*(min(num_online_cpus(), 32)), \
+       .max_interval           = 8*(min(num_online_cpus(), 32U)), \
        .busy_factor            = 64,                   \
        .imbalance_pct          = 125,                  \
        .cache_nice_tries       = 2,                    \
diff --git a/arch/ia64/include/asm/uv/uv.h b/arch/ia64/include/asm/uv/uv.h
new file mode 100644 (file)
index 0000000..61b5bdf
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef _ASM_IA64_UV_UV_H
+#define _ASM_IA64_UV_UV_H
+
+#include <asm/system.h>
+#include <asm/sn/simulator.h>
+
+static inline int is_uv_system(void)
+{
+       /* temporary support for running on hardware simulator */
+       return IS_MEDUSA() || ia64_platform_is("uv");
+}
+
+#endif /* _ASM_IA64_UV_UV_H */
index d541671..2363ed1 100644 (file)
@@ -199,6 +199,10 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
        return __va(phys_addr);
 }
 
+char *__init __acpi_unmap_table(unsigned long virt_addr, unsigned long size)
+{
+}
+
 /* --------------------------------------------------------------------------
                             Boot-time Table Parsing
    -------------------------------------------------------------------------- */
index 5cfd3d9..006ad36 100644 (file)
@@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi)
        if (iosapic_intr_info[irq].count == 0) {
 #ifdef CONFIG_SMP
                /* Clear affinity */
-               cpus_setall(idesc->affinity);
+               cpumask_setall(idesc->affinity);
 #endif
                /* Clear the interrupt information */
                iosapic_intr_info[irq].dest = 0;
index a58f64c..226233a 100644 (file)
@@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
 void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
 {
        if (irq < NR_IRQS) {
-               cpumask_copy(&irq_desc[irq].affinity,
+               cpumask_copy(irq_desc[irq].affinity,
                             cpumask_of(cpu_logical_id(hwid)));
                irq_redir[irq] = (char) (redir & 0xff);
        }
@@ -148,7 +148,7 @@ static void migrate_irqs(void)
                if (desc->status == IRQ_PER_CPU)
                        continue;
 
-               if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask)
+               if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
                    >= nr_cpu_ids) {
                        /*
                         * Save it for phase 2 processing
index 28d3d48..927ad02 100644 (file)
@@ -493,11 +493,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
        saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
        ia64_srlz_d();
        while (vector != IA64_SPURIOUS_INT_VECTOR) {
+               struct irq_desc *desc = irq_to_desc(vector);
+
                if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
                        smp_local_flush_tlb();
-                       kstat_this_cpu.irqs[vector]++;
+                       kstat_incr_irqs_this_cpu(vector, desc);
                } else if (unlikely(IS_RESCHEDULE(vector)))
-                       kstat_this_cpu.irqs[vector]++;
+                       kstat_incr_irqs_this_cpu(vector, desc);
                else {
                        int irq = local_vector_to_irq(vector);
 
@@ -551,11 +553,13 @@ void ia64_process_pending_intr(void)
          * Perform normal interrupt style processing
          */
        while (vector != IA64_SPURIOUS_INT_VECTOR) {
+               struct irq_desc *desc = irq_to_desc(vector);
+
                if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
                        smp_local_flush_tlb();
-                       kstat_this_cpu.irqs[vector]++;
+                       kstat_incr_irqs_this_cpu(vector, desc);
                } else if (unlikely(IS_RESCHEDULE(vector)))
-                       kstat_this_cpu.irqs[vector]++;
+                       kstat_incr_irqs_this_cpu(vector, desc);
                else {
                        struct pt_regs *old_regs = set_irq_regs(NULL);
                        int irq = local_vector_to_irq(vector);
index 8903393..dcb6b7c 100644 (file)
@@ -75,7 +75,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
        msg.data = data;
 
        write_msi_msg(irq, &msg);
-       irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+       cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
 }
 #endif /* CONFIG_SMP */
 
@@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
        msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
 
        dmar_msi_write(irq, &msg);
-       irq_desc[irq].affinity = *mask;
+       cpumask_copy(irq_desc[irq].affinity, mask);
 }
 #endif /* CONFIG_SMP */
 
index 10a7d47..f45e4e5 100644 (file)
@@ -219,6 +219,7 @@ SECTIONS
   .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
        {
                __per_cpu_start = .;
+               *(.data.percpu.page_aligned)
                *(.data.percpu)
                *(.data.percpu.shared_aligned)
                __per_cpu_end = .;
index ca553b0..81e4289 100644 (file)
@@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
        msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
 
        write_msi_msg(irq, &msg);
-       irq_desc[irq].affinity = *cpu_mask;
+       cpumask_copy(irq_desc[irq].affinity, cpu_mask);
 }
 #endif /* CONFIG_SMP */
 
index abc62aa..3214ade 100644 (file)
@@ -66,7 +66,7 @@ extern void smtc_forward_irq(unsigned int irq);
  */
 #define IRQ_AFFINITY_HOOK(irq)                                         \
 do {                                                                   \
-    if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) {      \
+    if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\
        smtc_forward_irq(irq);                                          \
        irq_exit();                                                     \
        return;                                                         \
index 494a49a..87deb8f 100644 (file)
@@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
                set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
 
        }
-       irq_desc[irq].affinity = *cpumask;
+       cpumask_copy(irq_desc[irq].affinity, cpumask);
        spin_unlock_irqrestore(&gic_lock, flags);
 
 }
index b6cca01..5f5af7d 100644 (file)
@@ -686,7 +686,7 @@ void smtc_forward_irq(unsigned int irq)
         * and efficiency, we just pick the easiest one to find.
         */
 
-       target = first_cpu(irq_desc[irq].affinity);
+       target = cpumask_first(irq_desc[irq].affinity);
 
        /*
         * We depend on the platform code to have correctly processed
@@ -921,11 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi)
        struct clock_event_device *cd;
        void *arg_copy = pipi->arg;
        int type_copy = pipi->type;
+       int irq = MIPS_CPU_IRQ_BASE + 1;
+
        smtc_ipi_nq(&freeIPIq, pipi);
        switch (type_copy) {
        case SMTC_CLOCK_TICK:
                irq_enter();
-               kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
+               kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
                cd = &per_cpu(mips_clockevent_device, cpu);
                cd->event_handler(cd);
                irq_exit();
index aabd727..5ba3188 100644 (file)
@@ -116,7 +116,7 @@ struct plat_smp_ops msmtc_smp_ops = {
 
 void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
 {
-       cpumask_t tmask = *affinity;
+       cpumask_t tmask;
        int cpu = 0;
        void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
 
@@ -139,11 +139,12 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
         * be made to forward to an offline "CPU".
         */
 
+       cpumask_copy(&tmask, affinity);
        for_each_cpu(cpu, affinity) {
                if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
                        cpu_clear(cpu, tmask);
        }
-       irq_desc[irq].affinity = tmask;
+       cpumask_copy(irq_desc[irq].affinity, &tmask);
 
        if (cpus_empty(tmask))
                /*
index f8b18af..0ecd5fe 100644 (file)
@@ -155,7 +155,7 @@ static void indy_buserror_irq(void)
        int irq = SGI_BUSERR_IRQ;
 
        irq_enter();
-       kstat_this_cpu.irqs[irq]++;
+       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
        ip22_be_interrupt(irq);
        irq_exit();
 }
index 3dcb27e..c8f7d23 100644 (file)
@@ -122,7 +122,7 @@ void indy_8254timer_irq(void)
        char c;
 
        irq_enter();
-       kstat_this_cpu.irqs[irq]++;
+       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
        printk(KERN_ALERT "Oops, got 8254 interrupt.\n");
        ArcRead(0, &c, 1, &cnt);
        ArcEnterInteractiveMode();
index dddfda8..3146916 100644 (file)
@@ -178,9 +178,10 @@ struct plat_smp_ops bcm1480_smp_ops = {
 void bcm1480_mailbox_interrupt(void)
 {
        int cpu = smp_processor_id();
+       int irq = K_BCM1480_INT_MBOX_0_0;
        unsigned int action;
 
-       kstat_this_cpu.irqs[K_BCM1480_INT_MBOX_0_0]++;
+       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
        /* Load the mailbox register to figure out what we're supposed to do */
        action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff;
 
index 5950a28..cad1400 100644 (file)
@@ -166,9 +166,10 @@ struct plat_smp_ops sb_smp_ops = {
 void sb1250_mailbox_interrupt(void)
 {
        int cpu = smp_processor_id();
+       int irq = K_INT_MBOX_0;
        unsigned int action;
 
-       kstat_this_cpu.irqs[K_INT_MBOX_0]++;
+       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
        /* Load the mailbox register to figure out what we're supposed to do */
        action = (____raw_readq(mailbox_regs[cpu]) >> 48) & 0xffff;
 
index 10811e9..2e370d8 100644 (file)
@@ -130,6 +130,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
         * the stack NMI-atomically, it's safe to use smp_processor_id().
         */
        int sum, cpu = smp_processor_id();
+       int irq = NMIIRQ;
        u8 wdt, tmp;
 
        wdt = WDCTR & ~WDCTR_WDCNE;
@@ -138,7 +139,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
        NMICR = NMICR_WDIF;
 
        nmi_count(cpu)++;
-       kstat_this_cpu.irqs[NMIIRQ]++;
+       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
        sum = irq_stat[cpu].__irq_count;
 
        if (last_irq_sums[cpu] == sum) {
index ac2c822..4948280 100644 (file)
@@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
        if (CHECK_IRQ_PER_CPU(irq)) {
                /* Bad linux design decision.  The mask has already
                 * been set; we must reset it */
-               irq_desc[irq].affinity = CPU_MASK_ALL;
+               cpumask_setall(irq_desc[irq].affinity);
                return -EINVAL;
        }
 
@@ -136,7 +136,7 @@ static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
        if (cpu_check_affinity(irq, dest))
                return;
 
-       irq_desc[irq].affinity = *dest;
+       cpumask_copy(irq_desc[irq].affinity, dest);
 }
 #endif
 
@@ -295,7 +295,7 @@ int txn_alloc_irq(unsigned int bits_wide)
 unsigned long txn_affinity_addr(unsigned int irq, int cpu)
 {
 #ifdef CONFIG_SMP
-       irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+       cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
 #endif
 
        return per_cpu(cpu_data, cpu).txn_addr;
@@ -352,7 +352,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
        irq = eirr_to_irq(eirr_val);
 
 #ifdef CONFIG_SMP
-       dest = irq_desc[irq].affinity;
+       cpumask_copy(&dest, irq_desc[irq].affinity);
        if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
            !cpu_isset(smp_processor_id(), dest)) {
                int cpu = first_cpu(dest);
index 23b8b5e..ad1e5ac 100644 (file)
@@ -231,7 +231,7 @@ void fixup_irqs(cpumask_t map)
                if (irq_desc[irq].status & IRQ_PER_CPU)
                        continue;
 
-               cpus_and(mask, irq_desc[irq].affinity, map);
+               cpumask_and(&mask, irq_desc[irq].affinity, &map);
                if (any_online_cpu(mask) == NR_CPUS) {
                        printk("Breaking affinity for irq %i\n", irq);
                        mask = map;
index 161b9b9..295ccc5 100644 (file)
@@ -184,6 +184,7 @@ SECTIONS
        . = ALIGN(PAGE_SIZE);
        .data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
                __per_cpu_start = .;
+               *(.data.percpu.page_aligned)
                *(.data.percpu)
                *(.data.percpu.shared_aligned)
                __per_cpu_end = .;
index 84e058f..80b5134 100644 (file)
@@ -153,9 +153,10 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check)
 {
        int server;
        /* For the moment only implement delivery to all cpus or one cpu */
-       cpumask_t cpumask = irq_desc[virq].affinity;
+       cpumask_t cpumask;
        cpumask_t tmp = CPU_MASK_NONE;
 
+       cpumask_copy(&cpumask, irq_desc[virq].affinity);
        if (!distribute_irqs)
                return default_server;
 
@@ -869,7 +870,7 @@ void xics_migrate_irqs_away(void)
                       virq, cpu);
 
                /* Reset affinity to all cpus */
-               irq_desc[virq].affinity = CPU_MASK_ALL;
+               cpumask_setall(irq_desc[virq].affinity);
                desc->chip->set_affinity(virq, cpu_all_mask);
 unlock:
                spin_unlock_irqrestore(&desc->lock, flags);
index a35297d..532e205 100644 (file)
@@ -566,9 +566,10 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
 #ifdef CONFIG_SMP
 static int irq_choose_cpu(unsigned int virt_irq)
 {
-       cpumask_t mask = irq_desc[virt_irq].affinity;
+       cpumask_t mask;
        int cpuid;
 
+       cpumask_copy(&mask, irq_desc[virt_irq].affinity);
        if (cpus_equal(mask, CPU_MASK_ALL)) {
                static int irq_rover;
                static DEFINE_SPINLOCK(irq_rover_lock);
index e289376..3d2c6ba 100644 (file)
@@ -252,9 +252,10 @@ struct irq_handler_data {
 #ifdef CONFIG_SMP
 static int irq_choose_cpu(unsigned int virt_irq)
 {
-       cpumask_t mask = irq_desc[virt_irq].affinity;
+       cpumask_t mask;
        int cpuid;
 
+       cpumask_copy(&mask, irq_desc[virt_irq].affinity);
        if (cpus_equal(mask, CPU_MASK_ALL)) {
                static int irq_rover;
                static DEFINE_SPINLOCK(irq_rover_lock);
@@ -796,7 +797,7 @@ void fixup_irqs(void)
                    !(irq_desc[irq].status & IRQ_PER_CPU)) {
                        if (irq_desc[irq].chip->set_affinity)
                                irq_desc[irq].chip->set_affinity(irq,
-                                       &irq_desc[irq].affinity);
+                                       irq_desc[irq].affinity);
                }
                spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
        }
index 2db3c22..db310aa 100644 (file)
@@ -729,7 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs)
 
        irq_enter();
 
-       kstat_this_cpu.irqs[0]++;
+       kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
 
        if (unlikely(!evt->event_handler)) {
                printk(KERN_WARNING
index 9c39095..1042d69 100644 (file)
@@ -5,7 +5,7 @@ mainmenu "Linux Kernel Configuration for x86"
 config 64BIT
        bool "64-bit kernel" if ARCH = "x86"
        default ARCH = "x86_64"
-       help
+       ---help---
          Say yes to build a 64-bit kernel - formerly known as x86_64
          Say no to build a 32-bit kernel - formerly known as i386
 
@@ -34,8 +34,8 @@ config X86
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_TRACE_MCOUNT_TEST
-       select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
-       select HAVE_ARCH_KGDB if !X86_VOYAGER
+       select HAVE_KVM
+       select HAVE_ARCH_KGDB
        select HAVE_ARCH_TRACEHOOK
        select HAVE_GENERIC_DMA_COHERENT if X86_32
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -133,18 +133,16 @@ config ARCH_HAS_CACHE_LINE_SIZE
        def_bool y
 
 config HAVE_SETUP_PER_CPU_AREA
-       def_bool X86_64_SMP || (X86_SMP && !X86_VOYAGER)
+       def_bool y
 
 config HAVE_CPUMASK_OF_CPU_MAP
        def_bool X86_64_SMP
 
 config ARCH_HIBERNATION_POSSIBLE
        def_bool y
-       depends on !SMP || !X86_VOYAGER
 
 config ARCH_SUSPEND_POSSIBLE
        def_bool y
-       depends on !X86_VOYAGER
 
 config ZONE_DMA32
        bool
@@ -174,11 +172,6 @@ config GENERIC_PENDING_IRQ
        depends on GENERIC_HARDIRQS && SMP
        default y
 
-config X86_SMP
-       bool
-       depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
-       default y
-
 config USE_GENERIC_SMP_HELPERS
        def_bool y
        depends on SMP
@@ -194,19 +187,17 @@ config X86_64_SMP
 config X86_HT
        bool
        depends on SMP
-       depends on (X86_32 && !X86_VOYAGER) || X86_64
-       default y
-
-config X86_BIOS_REBOOT
-       bool
-       depends on !X86_VOYAGER
        default y
 
 config X86_TRAMPOLINE
        bool
-       depends on X86_SMP || (X86_VOYAGER && SMP) || (64BIT && ACPI_SLEEP)
+       depends on SMP || (64BIT && ACPI_SLEEP)
        default y
 
+config X86_32_LAZY_GS
+       def_bool y
+       depends on X86_32 && !CC_STACKPROTECTOR
+
 config KTIME_SCALAR
        def_bool X86_32
 source "init/Kconfig"
@@ -244,14 +235,10 @@ config SMP
 
          If you don't know what to do here, say N.
 
-config X86_HAS_BOOT_CPU_ID
-       def_bool y
-       depends on X86_VOYAGER
-
 config SPARSE_IRQ
        bool "Support sparse irq numbering"
        depends on PCI_MSI || HT_IRQ
-       help
+       ---help---
          This enables support for sparse irqs. This is useful for distro
          kernels that want to define a high CONFIG_NR_CPUS value but still
          want to have low kernel memory footprint on smaller machines.
@@ -265,137 +252,154 @@ config NUMA_MIGRATE_IRQ_DESC
        bool "Move irq desc when changing irq smp_affinity"
        depends on SPARSE_IRQ && NUMA
        default n
-       help
+       ---help---
          This enables moving irq_desc to cpu/node that irq will use handled.
 
          If you don't know what to do here, say N.
 
-config X86_FIND_SMP_CONFIG
-       def_bool y
-       depends on X86_MPPARSE || X86_VOYAGER
-
 config X86_MPPARSE
        bool "Enable MPS table" if ACPI
        default y
        depends on X86_LOCAL_APIC
-       help
+       ---help---
          For old smp systems that do not have proper acpi support. Newer systems
          (esp with 64bit cpus) with acpi support, MADT and DSDT will override it
 
-choice
-       prompt "Subarchitecture Type"
-       default X86_PC
+config X86_BIGSMP
+       bool "Support for big SMP systems with more than 8 CPUs"
+       depends on X86_32 && SMP
+       ---help---
+         This option is needed for the systems that have more than 8 CPUs
 
-config X86_PC
-       bool "PC-compatible"
-       help
-         Choose this option if your computer is a standard PC or compatible.
+config X86_EXTENDED_PLATFORM
+       bool "Support for extended (non-PC) x86 platforms"
+       default y
+       ---help---
+         If you disable this option then the kernel will only support
+         standard PC platforms. (which covers the vast majority of
+         systems out there.)
+
+         If you enable this option then you'll be able to select a number
+         of non-PC x86 platforms.
+
+         If you have one of these systems, or if you want to build a
+         generic distribution kernel, say Y here - otherwise say N.
+
+# This is an alphabetically sorted list of 64 bit extended platforms
+# Please maintain the alphabetic order if and when there are additions
+
+config X86_VSMP
+       bool "ScaleMP vSMP"
+       select PARAVIRT
+       depends on X86_64 && PCI
+       depends on X86_EXTENDED_PLATFORM
+       ---help---
+         Support for ScaleMP vSMP systems.  Say 'Y' here if this kernel is
+         supposed to run on these EM64T-based machines.  Only choose this option
+         if you have one of these machines.
+
+config X86_UV
+       bool "SGI Ultraviolet"
+       depends on X86_64
+       depends on X86_EXTENDED_PLATFORM
+       ---help---
+         This option is needed in order to support SGI Ultraviolet systems.
+         If you don't have one of these, you should say N here.
+
+# Following is an alphabetically sorted list of 32 bit extended platforms
+# Please maintain the alphabetic order if and when there are additions
 
 config X86_ELAN
        bool "AMD Elan"
        depends on X86_32
-       help
+       depends on X86_EXTENDED_PLATFORM
+       ---help---
          Select this for an AMD Elan processor.
 
          Do not use this option for K6/Athlon/Opteron processors!
 
          If unsure, choose "PC-compatible" instead.
 
-config X86_VOYAGER
-       bool "Voyager (NCR)"
-       depends on X86_32 && (SMP || BROKEN) && !PCI
-       help
-         Voyager is an MCA-based 32-way capable SMP architecture proprietary
-         to NCR Corp.  Machine classes 345x/35xx/4100/51xx are Voyager-based.
-
-         *** WARNING ***
-
-         If you do not specifically know you have a Voyager based machine,
-         say N here, otherwise the kernel you build will not be bootable.
-
-config X86_GENERICARCH
-       bool "Generic architecture"
+config X86_RDC321X
+       bool "RDC R-321x SoC"
        depends on X86_32
-       help
-          This option compiles in the NUMAQ, Summit, bigsmp, ES7000, default
+       depends on X86_EXTENDED_PLATFORM
+       select M486
+       select X86_REBOOTFIXUPS
+       ---help---
+         This option is needed for RDC R-321x system-on-chip, also known
+         as R-8610-(G).
+         If you don't have one of these chips, you should say N here.
+
+config X86_32_NON_STANDARD
+       bool "Support non-standard 32-bit SMP architectures"
+       depends on X86_32 && SMP
+       depends on X86_EXTENDED_PLATFORM
+       ---help---
+         This option compiles in the NUMAQ, Summit, bigsmp, ES7000, default
          subarchitectures.  It is intended for a generic binary kernel.
          if you select them all, kernel will probe it one by one. and will
          fallback to default.
 
-if X86_GENERICARCH
+# Alphabetically sorted list of Non standard 32 bit platforms
 
 config X86_NUMAQ
        bool "NUMAQ (IBM/Sequent)"
-       depends on SMP && X86_32 && PCI && X86_MPPARSE
+       depends on X86_32_NON_STANDARD
        select NUMA
-       help
+       select X86_MPPARSE
+       ---help---
          This option is used for getting Linux to run on a NUMAQ (IBM/Sequent)
          NUMA multiquad box. This changes the way that processors are
          bootstrapped, and uses Clustered Logical APIC addressing mode instead
          of Flat Logical.  You will need a new lynxer.elf file to flash your
          firmware with - send email to <Martin.Bligh@us.ibm.com>.
 
+config X86_VISWS
+       bool "SGI 320/540 (Visual Workstation)"
+       depends on X86_32 && PCI && X86_MPPARSE && PCI_GODIRECT
+       depends on X86_32_NON_STANDARD
+       ---help---
+         The SGI Visual Workstation series is an IA32-based workstation
+         based on SGI systems chips with some legacy PC hardware attached.
+
+         Say Y here to create a kernel to run on the SGI 320 or 540.
+
+         A kernel compiled for the Visual Workstation will run on general
+         PCs as well. See <file:Documentation/sgi-visws.txt> for details.
+
 config X86_SUMMIT
        bool "Summit/EXA (IBM x440)"
-       depends on X86_32 && SMP
-       help
+       depends on X86_32_NON_STANDARD
+       ---help---
          This option is needed for IBM systems that use the Summit/EXA chipset.
          In particular, it is needed for the x440.
 
 config X86_ES7000
-       bool "Support for Unisys ES7000 IA32 series"
-       depends on X86_32 && SMP
-       help
+       bool "Unisys ES7000 IA32 series"
+       depends on X86_32_NON_STANDARD && X86_BIGSMP
+       ---help---
          Support for Unisys ES7000 systems.  Say 'Y' here if this kernel is
          supposed to run on an IA32-based Unisys ES7000 system.
 
-config X86_BIGSMP
-       bool "Support for big SMP systems with more than 8 CPUs"
-       depends on X86_32 && SMP
-       help
-         This option is needed for the systems that have more than 8 CPUs
-         and if the system is not of any sub-arch type above.
-
-endif
-
-config X86_VSMP
-       bool "Support for ScaleMP vSMP"
-       select PARAVIRT
-       depends on X86_64 && PCI
-       help
-         Support for ScaleMP vSMP systems.  Say 'Y' here if this kernel is
-         supposed to run on these EM64T-based machines.  Only choose this option
-         if you have one of these machines.
-
-endchoice
-
-config X86_VISWS
-       bool "SGI 320/540 (Visual Workstation)"
-       depends on X86_32 && PCI && !X86_VOYAGER && X86_MPPARSE && PCI_GODIRECT
-       help
-         The SGI Visual Workstation series is an IA32-based workstation
-         based on SGI systems chips with some legacy PC hardware attached.
-
-         Say Y here to create a kernel to run on the SGI 320 or 540.
+config X86_VOYAGER
+       bool "Voyager (NCR)"
+       depends on SMP && !PCI && BROKEN
+       depends on X86_32_NON_STANDARD
+       ---help---
+         Voyager is an MCA-based 32-way capable SMP architecture proprietary
+         to NCR Corp.  Machine classes 345x/35xx/4100/51xx are Voyager-based.
 
-         A kernel compiled for the Visual Workstation will run on general
-         PCs as well. See <file:Documentation/sgi-visws.txt> for details.
+         *** WARNING ***
 
-config X86_RDC321X
-       bool "RDC R-321x SoC"
-       depends on X86_32
-       select M486
-       select X86_REBOOTFIXUPS
-       help
-         This option is needed for RDC R-321x system-on-chip, also known
-         as R-8610-(G).
-         If you don't have one of these chips, you should say N here.
+         If you do not specifically know you have a Voyager based machine,
+         say N here, otherwise the kernel you build will not be bootable.
 
 config SCHED_OMIT_FRAME_POINTER
        def_bool y
        prompt "Single-depth WCHAN output"
        depends on X86
-       help
+       ---help---
          Calculate simpler /proc/<PID>/wchan values. If this option
          is disabled then wchan values will recurse back to the
          caller function. This provides more accurate wchan values,
@@ -405,7 +409,7 @@ config SCHED_OMIT_FRAME_POINTER
 
 menuconfig PARAVIRT_GUEST
        bool "Paravirtualized guest support"
-       help
+       ---help---
          Say Y here to get to see options related to running Linux under
          various hypervisors.  This option alone does not add any kernel code.
 
@@ -419,8 +423,7 @@ config VMI
        bool "VMI Guest support"
        select PARAVIRT
        depends on X86_32
-       depends on !X86_VOYAGER
-       help
+       ---help---
          VMI provides a paravirtualized interface to the VMware ESX server
          (it could be used by other hypervisors in theory too, but is not
          at the moment), by linking the kernel to a GPL-ed ROM module
@@ -430,8 +433,7 @@ config KVM_CLOCK
        bool "KVM paravirtualized clock"
        select PARAVIRT
        select PARAVIRT_CLOCK
-       depends on !X86_VOYAGER
-       help
+       ---help---
          Turning on this option will allow you to run a paravirtualized clock
          when running over the KVM hypervisor. Instead of relying on a PIT
          (or probably other) emulation by the underlying device model, the host
@@ -441,17 +443,15 @@ config KVM_CLOCK
 config KVM_GUEST
        bool "KVM Guest support"
        select PARAVIRT
-       depends on !X86_VOYAGER
-       help
-        This option enables various optimizations for running under the KVM
-        hypervisor.
+       ---help---
+         This option enables various optimizations for running under the KVM
+         hypervisor.
 
 source "arch/x86/lguest/Kconfig"
 
 config PARAVIRT
        bool "Enable paravirtualization code"
-       depends on !X86_VOYAGER
-       help
+       ---help---
          This changes the kernel so it can modify itself when it is run
          under a hypervisor, potentially improving performance significantly
          over full virtualization.  However, when run without a hypervisor
@@ -464,51 +464,51 @@ config PARAVIRT_CLOCK
 endif
 
 config PARAVIRT_DEBUG
-       bool "paravirt-ops debugging"
-       depends on PARAVIRT && DEBUG_KERNEL
-       help
-         Enable to debug paravirt_ops internals.  Specifically, BUG if
-        a paravirt_op is missing when it is called.
+       bool "paravirt-ops debugging"
+       depends on PARAVIRT && DEBUG_KERNEL
+       ---help---
+         Enable to debug paravirt_ops internals.  Specifically, BUG if
+         a paravirt_op is missing when it is called.
 
 config MEMTEST
        bool "Memtest"
-       help
+       ---help---
          This option adds a kernel parameter 'memtest', which allows memtest
          to be set.
-               memtest=0, mean disabled; -- default
-               memtest=1, mean do 1 test pattern;
-               ...
-               memtest=4, mean do 4 test patterns.
+               memtest=0, mean disabled; -- default
+               memtest=1, mean do 1 test pattern;
+               ...
+               memtest=4, mean do 4 test patterns.
          If you are unsure how to answer this question, answer N.
 
 config X86_SUMMIT_NUMA
        def_bool y
-       depends on X86_32 && NUMA && X86_GENERICARCH
+       depends on X86_32 && NUMA && X86_32_NON_STANDARD
 
 config X86_CYCLONE_TIMER
        def_bool y
-       depends on X86_GENERICARCH
+       depends on X86_32_NON_STANDARD
 
 source "arch/x86/Kconfig.cpu"
 
 config HPET_TIMER
        def_bool X86_64
        prompt "HPET Timer Support" if X86_32
-       help
-         Use the IA-PC HPET (High Precision Event Timer) to manage
-         time in preference to the PIT and RTC, if a HPET is
-         present.
-         HPET is the next generation timer replacing legacy 8254s.
-         The HPET provides a stable time base on SMP
-         systems, unlike the TSC, but it is more expensive to access,
-         as it is off-chip.  You can find the HPET spec at
-         <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>.
+       ---help---
+         Use the IA-PC HPET (High Precision Event Timer) to manage
+         time in preference to the PIT and RTC, if a HPET is
+         present.
+         HPET is the next generation timer replacing legacy 8254s.
+         The HPET provides a stable time base on SMP
+         systems, unlike the TSC, but it is more expensive to access,
+         as it is off-chip.  You can find the HPET spec at
+         <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>.
 
-         You can safely choose Y here.  However, HPET will only be
-         activated if the platform and the BIOS support this feature.
-         Otherwise the 8254 will be used for timing services.
+         You can safely choose Y here.  However, HPET will only be
+         activated if the platform and the BIOS support this feature.
+         Otherwise the 8254 will be used for timing services.
 
-         Choose N to continue using the legacy 8254 timer.
+         Choose N to continue using the legacy 8254 timer.
 
 config HPET_EMULATE_RTC
        def_bool y
@@ -519,7 +519,7 @@ config HPET_EMULATE_RTC
 config DMI
        default y
        bool "Enable DMI scanning" if EMBEDDED
-       help
+       ---help---
          Enabled scanning of DMI to identify machine quirks. Say Y
          here unless you have verified that your setup is not
          affected by entries in the DMI blacklist. Required by PNP
@@ -531,7 +531,7 @@ config GART_IOMMU
        select SWIOTLB
        select AGP
        depends on X86_64 && PCI
-       help
+       ---help---
          Support for full DMA access of devices with 32bit memory access only
          on systems with more than 3GB. This is usually needed for USB,
          sound, many IDE/SATA chipsets and some other devices.
@@ -546,7 +546,7 @@ config CALGARY_IOMMU
        bool "IBM Calgary IOMMU support"
        select SWIOTLB
        depends on X86_64 && PCI && EXPERIMENTAL
-       help
+       ---help---
          Support for hardware IOMMUs in IBM's xSeries x366 and x460
          systems. Needed to run systems with more than 3GB of memory
          properly with 32-bit PCI devices that do not support DAC
@@ -564,7 +564,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
        def_bool y
        prompt "Should Calgary be enabled by default?"
        depends on CALGARY_IOMMU
-       help
+       ---help---
          Should Calgary be enabled by default? if you choose 'y', Calgary
          will be used (if it exists). If you choose 'n', Calgary will not be
          used even if it exists. If you choose 'n' and would like to use
@@ -576,7 +576,7 @@ config AMD_IOMMU
        select SWIOTLB
        select PCI_MSI
        depends on X86_64 && PCI && ACPI
-       help
+       ---help---
          With this option you can enable support for AMD IOMMU hardware in
          your system. An IOMMU is a hardware component which provides
          remapping of DMA memory accesses from devices. With an AMD IOMMU you
@@ -591,7 +591,7 @@ config AMD_IOMMU_STATS
        bool "Export AMD IOMMU statistics to debugfs"
        depends on AMD_IOMMU
        select DEBUG_FS
-       help
+       ---help---
          This option enables code in the AMD IOMMU driver to collect various
          statistics about whats happening in the driver and exports that
          information to userspace via debugfs.
@@ -600,7 +600,7 @@ config AMD_IOMMU_STATS
 # need this always selected by IOMMU for the VIA workaround
 config SWIOTLB
        def_bool y if X86_64
-       help
+       ---help---
          Support for software bounce buffers used on x86-64 systems
          which don't have a hardware IOMMU (e.g. the current generation
          of Intel's x86-64 CPUs). Using this PCI devices which can only
@@ -618,7 +618,7 @@ config MAXSMP
        depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
        select CPUMASK_OFFSTACK
        default n
-       help
+       ---help---
          Configure maximum number of CPUS and NUMA Nodes for this architecture.
          If unsure, say N.
 
@@ -629,7 +629,7 @@ config NR_CPUS
        default "4096" if MAXSMP
        default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000)
        default "8" if SMP
-       help
+       ---help---
          This allows you to specify the maximum number of CPUs which this
          kernel will support.  The maximum supported value is 512 and the
          minimum value which makes sense is 2.
@@ -640,7 +640,7 @@ config NR_CPUS
 config SCHED_SMT
        bool "SMT (Hyperthreading) scheduler support"
        depends on X86_HT
-       help
+       ---help---
          SMT scheduler support improves the CPU scheduler's decision making
          when dealing with Intel Pentium 4 chips with HyperThreading at a
          cost of slightly increased overhead in some places. If unsure say
@@ -650,7 +650,7 @@ config SCHED_MC
        def_bool y
        prompt "Multi-core scheduler support"
        depends on X86_HT
-       help
+       ---help---
          Multi-core scheduler support improves the CPU scheduler's decision
          making when dealing with multi-core CPU chips at a cost of slightly
          increased overhead in some places. If unsure say N here.
@@ -659,8 +659,8 @@ source "kernel/Kconfig.preempt"
 
 config X86_UP_APIC
        bool "Local APIC support on uniprocessors"
-       depends on X86_32 && !SMP && !(X86_VOYAGER || X86_GENERICARCH)
-       help
+       depends on X86_32 && !SMP && !X86_32_NON_STANDARD
+       ---help---
          A local APIC (Advanced Programmable Interrupt Controller) is an
          integrated interrupt controller in the CPU. If you have a single-CPU
          system which has a processor with a local APIC, you can say Y here to
@@ -673,7 +673,7 @@ config X86_UP_APIC
 config X86_UP_IOAPIC
        bool "IO-APIC support on uniprocessors"
        depends on X86_UP_APIC
-       help
+       ---help---
          An IO-APIC (I/O Advanced Programmable Interrupt Controller) is an
          SMP-capable replacement for PC-style interrupt controllers. Most
          SMP systems and many recent uniprocessor systems have one.
@@ -684,11 +684,11 @@ config X86_UP_IOAPIC
 
 config X86_LOCAL_APIC
        def_bool y
-       depends on X86_64 || (X86_32 && (X86_UP_APIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH))
+       depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
 
 config X86_IO_APIC
        def_bool y
-       depends on X86_64 || (X86_32 && (X86_UP_IOAPIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH))
+       depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
 
 config X86_VISWS_APIC
        def_bool y
@@ -698,7 +698,7 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
        bool "Reroute for broken boot IRQs"
        default n
        depends on X86_IO_APIC
-       help
+       ---help---
          This option enables a workaround that fixes a source of
          spurious interrupts. This is recommended when threaded
          interrupt handling is used on systems where the generation of
@@ -720,7 +720,6 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
 
 config X86_MCE
        bool "Machine Check Exception"
-       depends on !X86_VOYAGER
        ---help---
          Machine Check Exception support allows the processor to notify the
          kernel if it detects a problem (e.g. overheating, component failure).
@@ -739,7 +738,7 @@ config X86_MCE_INTEL
        def_bool y
        prompt "Intel MCE features"
        depends on X86_64 && X86_MCE && X86_LOCAL_APIC
-       help
+       ---help---
           Additional support for intel specific MCE features such as
           the thermal monitor.
 
@@ -747,14 +746,14 @@ config X86_MCE_AMD
        def_bool y
        prompt "AMD MCE features"
        depends on X86_64 && X86_MCE && X86_LOCAL_APIC
-       help
+       ---help---
           Additional support for AMD specific MCE features such as
           the DRAM Error Threshold.
 
 config X86_MCE_NONFATAL
        tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4"
        depends on X86_32 && X86_MCE
-       help
+       ---help---
          Enabling this feature starts a timer that triggers every 5 seconds which
          will look at the machine check registers to see if anything happened.
          Non-fatal problems automatically get corrected (but still logged).
@@ -767,7 +766,7 @@ config X86_MCE_NONFATAL
 config X86_MCE_P4THERMAL
        bool "check for P4 thermal throttling interrupt."
        depends on X86_32 && X86_MCE && (X86_UP_APIC || SMP)
-       help
+       ---help---
          Enabling this feature will cause a message to be printed when the P4
          enters thermal throttling.
 
@@ -775,11 +774,11 @@ config VM86
        bool "Enable VM86 support" if EMBEDDED
        default y
        depends on X86_32
-       help
-          This option is required by programs like DOSEMU to run 16-bit legacy
+       ---help---
+         This option is required by programs like DOSEMU to run 16-bit legacy
          code on X86 processors. It also may be needed by software like
-          XFree86 to initialize some video cards via BIOS. Disabling this
-          option saves about 6k.
+         XFree86 to initialize some video cards via BIOS. Disabling this
+         option saves about 6k.
 
 config TOSHIBA
        tristate "Toshiba Laptop support"
@@ -853,33 +852,33 @@ config MICROCODE
          module will be called microcode.
 
 config MICROCODE_INTEL
-       bool "Intel microcode patch loading support"
-       depends on MICROCODE
-       default MICROCODE
-       select FW_LOADER
-       --help---
-         This options enables microcode patch loading support for Intel
-         processors.
-
-         For latest news and information on obtaining all the required
-         Intel ingredients for this driver, check:
-         <http://www.urbanmyth.org/microcode/>.
+       bool "Intel microcode patch loading support"
+       depends on MICROCODE
+       default MICROCODE
+       select FW_LOADER
+       ---help---
+         This options enables microcode patch loading support for Intel
+         processors.
+
+         For latest news and information on obtaining all the required
+         Intel ingredients for this driver, check:
+         <http://www.urbanmyth.org/microcode/>.
 
 config MICROCODE_AMD
-       bool "AMD microcode patch loading support"
-       depends on MICROCODE
-       select FW_LOADER
-       --help---
-         If you select this option, microcode patch loading support for AMD
-        processors will be enabled.
+       bool "AMD microcode patch loading support"
+       depends on MICROCODE
+       select FW_LOADER
+       ---help---
+         If you select this option, microcode patch loading support for AMD
+         processors will be enabled.
 
-   config MICROCODE_OLD_INTERFACE
+config MICROCODE_OLD_INTERFACE
        def_bool y
        depends on MICROCODE
 
 config X86_MSR
        tristate "/dev/cpu/*/msr - Model-specific register support"
-       help
+       ---help---
          This device gives privileged processes access to the x86
          Model-Specific Registers (MSRs).  It is a character device with
          major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr.
@@ -888,7 +887,7 @@ config X86_MSR
 
 config X86_CPUID
        tristate "/dev/cpu/*/cpuid - CPU information support"
-       help
+       ---help---
          This device gives processes access to the x86 CPUID instruction to
          be executed on a specific processor.  It is a character device
          with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
@@ -940,7 +939,7 @@ config NOHIGHMEM
 config HIGHMEM4G
        bool "4GB"
        depends on !X86_NUMAQ
-       help
+       ---help---
          Select this if you have a 32-bit processor and between 1 and 4
          gigabytes of physical RAM.
 
@@ -948,7 +947,7 @@ config HIGHMEM64G
        bool "64GB"
        depends on !M386 && !M486
        select X86_PAE
-       help
+       ---help---
          Select this if you have a 32-bit processor and more than 4
          gigabytes of physical RAM.
 
@@ -959,7 +958,7 @@ choice
        prompt "Memory split" if EMBEDDED
        default VMSPLIT_3G
        depends on X86_32
-       help
+       ---help---
          Select the desired split between kernel and user memory.
 
          If the address range available to the kernel is less than the
@@ -1005,20 +1004,20 @@ config HIGHMEM
 config X86_PAE
        bool "PAE (Physical Address Extension) Support"
        depends on X86_32 && !HIGHMEM4G
-       help
+       ---help---
          PAE is required for NX support, and furthermore enables
          larger swapspace support for non-overcommit purposes. It
          has the cost of more pagetable lookup overhead, and also
          consumes more pagetable space per process.
 
 config ARCH_PHYS_ADDR_T_64BIT
-       def_bool X86_64 || X86_PAE
+       def_bool X86_64 || X86_PAE
 
 config DIRECT_GBPAGES
        bool "Enable 1GB pages for kernel pagetables" if EMBEDDED
        default y
        depends on X86_64
-       help
+       ---help---
          Allow the kernel linear mapping to use 1GB pages on CPUs that
          support it. This can improve the kernel's performance a tiny bit by
          reducing TLB pressure. If in doubt, say "Y".
@@ -1028,9 +1027,8 @@ config NUMA
        bool "Numa Memory Allocation and Scheduler Support"
        depends on SMP
        depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL)
-       default n if X86_PC
        default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
-       help
+       ---help---
          Enable NUMA (Non Uniform Memory Access) support.
 
          The kernel will try to allocate memory used by a CPU on the
@@ -1053,19 +1051,19 @@ config K8_NUMA
        def_bool y
        prompt "Old style AMD Opteron NUMA detection"
        depends on X86_64 && NUMA && PCI
-       help
-        Enable K8 NUMA node topology detection.  You should say Y here if
-        you have a multi processor AMD K8 system. This uses an old
-        method to read the NUMA configuration directly from the builtin
-        Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA
-        instead, which also takes priority if both are compiled in.
+       ---help---
+         Enable K8 NUMA node topology detection.  You should say Y here if
+         you have a multi processor AMD K8 system. This uses an old
+         method to read the NUMA configuration directly from the builtin
+         Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA
+         instead, which also takes priority if both are compiled in.
 
 config X86_64_ACPI_NUMA
        def_bool y
        prompt "ACPI NUMA detection"
        depends on X86_64 && NUMA && ACPI && PCI
        select ACPI_NUMA
-       help
+       ---help---
          Enable ACPI SRAT based node topology detection.
 
 # Some NUMA nodes have memory ranges that span
@@ -1080,7 +1078,7 @@ config NODES_SPAN_OTHER_NODES
 config NUMA_EMU
        bool "NUMA emulation"
        depends on X86_64 && NUMA
-       help
+       ---help---
          Enable NUMA emulation. A flat machine will be split
          into virtual nodes when booted with "numa=fake=N", where N is the
          number of nodes. This is only useful for debugging.
@@ -1093,7 +1091,7 @@ config NODES_SHIFT
        default "4" if X86_NUMAQ
        default "3"
        depends on NEED_MULTIPLE_NODES
-       help
+       ---help---
          Specify the maximum number of NUMA Nodes available on the target
          system.  Increases memory reserved to accomodate various tables.
 
@@ -1131,7 +1129,7 @@ config ARCH_SPARSEMEM_DEFAULT
 
 config ARCH_SPARSEMEM_ENABLE
        def_bool y
-       depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC) || X86_GENERICARCH
+       depends on X86_64 || NUMA || (EXPERIMENTAL && X86_32) || X86_32_NON_STANDARD
        select SPARSEMEM_STATIC if X86_32
        select SPARSEMEM_VMEMMAP_ENABLE if X86_64
 
@@ -1148,61 +1146,61 @@ source "mm/Kconfig"
 config HIGHPTE
        bool "Allocate 3rd-level pagetables from highmem"
        depends on X86_32 && (HIGHMEM4G || HIGHMEM64G)
-       help
+       ---help---
          The VM uses one page table entry for each page of physical memory.
          For systems with a lot of RAM, this can be wasteful of precious
          low memory.  Setting this option will put user-space page table
          entries in high memory.
 
 config X86_CHECK_BIOS_CORRUPTION
-        bool "Check for low memory corruption"
-       help
-        Periodically check for memory corruption in low memory, which
-        is suspected to be caused by BIOS.  Even when enabled in the
-        configuration, it is disabled at runtime.  Enable it by
-        setting "memory_corruption_check=1" on the kernel command
-        line.  By default it scans the low 64k of memory every 60
-        seconds; see the memory_corruption_check_size and
-        memory_corruption_check_period parameters in
-        Documentation/kernel-parameters.txt to adjust this.
-
-        When enabled with the default parameters, this option has
-        almost no overhead, as it reserves a relatively small amount
-        of memory and scans it infrequently.  It both detects corruption
-        and prevents it from affecting the running system.
-
-        It is, however, intended as a diagnostic tool; if repeatable
-        BIOS-originated corruption always affects the same memory,
-        you can use memmap= to prevent the kernel from using that
-        memory.
+       bool "Check for low memory corruption"
+       ---help---
+         Periodically check for memory corruption in low memory, which
+         is suspected to be caused by BIOS.  Even when enabled in the
+         configuration, it is disabled at runtime.  Enable it by
+         setting "memory_corruption_check=1" on the kernel command
+         line.  By default it scans the low 64k of memory every 60
+         seconds; see the memory_corruption_check_size and
+         memory_corruption_check_period parameters in
+         Documentation/kernel-parameters.txt to adjust this.
+
+         When enabled with the default parameters, this option has
+         almost no overhead, as it reserves a relatively small amount
+         of memory and scans it infrequently.  It both detects corruption
+         and prevents it from affecting the running system.
+
+         It is, however, intended as a diagnostic tool; if repeatable
+         BIOS-originated corruption always affects the same memory,
+         you can use memmap= to prevent the kernel from using that
+         memory.
 
 config X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK
-        bool "Set the default setting of memory_corruption_check"
+       bool "Set the default setting of memory_corruption_check"
        depends on X86_CHECK_BIOS_CORRUPTION
        default y
-       help
-        Set whether the default state of memory_corruption_check is
-        on or off.
+       ---help---
+         Set whether the default state of memory_corruption_check is
+         on or off.
 
 config X86_RESERVE_LOW_64K
-        bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen"
+       bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen"
        default y
-       help
-        Reserve the first 64K of physical RAM on BIOSes that are known
-        to potentially corrupt that memory range. A numbers of BIOSes are
-        known to utilize this area during suspend/resume, so it must not
-        be used by the kernel.
+       ---help---
+         Reserve the first 64K of physical RAM on BIOSes that are known
+         to potentially corrupt that memory range. A numbers of BIOSes are
+         known to utilize this area during suspend/resume, so it must not
+         be used by the kernel.
 
-        Set this to N if you are absolutely sure that you trust the BIOS
-        to get all its memory reservations and usages right.
+         Set this to N if you are absolutely sure that you trust the BIOS
+         to get all its memory reservations and usages right.
 
-        If you have doubts about the BIOS (e.g. suspend/resume does not
-        work or there's kernel crashes after certain hardware hotplug
-        events) and it's not AMI or Phoenix, then you might want to enable
-        X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical
-        corruption patterns.
+         If you have doubts about the BIOS (e.g. suspend/resume does not
+         work or there's kernel crashes after certain hardware hotplug
+         events) and it's not AMI or Phoenix, then you might want to enable
+         X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical
+         corruption patterns.
 
-        Say Y if unsure.
+         Say Y if unsure.
 
 config MATH_EMULATION
        bool
@@ -1268,7 +1266,7 @@ config MTRR_SANITIZER
        def_bool y
        prompt "MTRR cleanup support"
        depends on MTRR
-       help
+       ---help---
          Convert MTRR layout from continuous to discrete, so X drivers can
          add writeback entries.
 
@@ -1283,7 +1281,7 @@ config MTRR_SANITIZER_ENABLE_DEFAULT
        range 0 1
        default "0"
        depends on MTRR_SANITIZER
-       help
+       ---help---
          Enable mtrr cleanup default value
 
 config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
@@ -1291,7 +1289,7 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
        range 0 7
        default "1"
        depends on MTRR_SANITIZER
-       help
+       ---help---
          mtrr cleanup spare entries default, it can be changed via
          mtrr_spare_reg_nr=N on the kernel command line.
 
@@ -1299,7 +1297,7 @@ config X86_PAT
        bool
        prompt "x86 PAT support"
        depends on MTRR
-       help
+       ---help---
          Use PAT attributes to setup page level cache control.
 
          PATs are the modern equivalents of MTRRs and are much more
@@ -1314,20 +1312,20 @@ config EFI
        bool "EFI runtime service support"
        depends on ACPI
        ---help---
-       This enables the kernel to use EFI runtime services that are
-       available (such as the EFI variable services).
+         This enables the kernel to use EFI runtime services that are
+         available (such as the EFI variable services).
 
-       This option is only useful on systems that have EFI firmware.
-       In addition, you should use the latest ELILO loader available
-       at <http://elilo.sourceforge.net> in order to take advantage
-       of EFI runtime services. However, even with this option, the
-       resultant kernel should continue to boot on existing non-EFI
-       platforms.
+         This option is only useful on systems that have EFI firmware.
+         In addition, you should use the latest ELILO loader available
+         at <http://elilo.sourceforge.net> in order to take advantage
+         of EFI runtime services. However, even with this option, the
+         resultant kernel should continue to boot on existing non-EFI
+         platforms.
 
 config SECCOMP
        def_bool y
        prompt "Enable seccomp to safely compute untrusted bytecode"
-       help
+       ---help---
          This kernel feature is useful for number crunching applications
          that may need to compute untrusted bytecode during their
          execution. By using pipes or other transports made available to
@@ -1340,13 +1338,16 @@ config SECCOMP
 
          If unsure, say Y. Only embedded should say N here.
 
+config CC_STACKPROTECTOR_ALL
+       bool
+
 config CC_STACKPROTECTOR
        bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
-       depends on X86_64 && EXPERIMENTAL && BROKEN
-       help
-         This option turns on the -fstack-protector GCC feature. This
-         feature puts, at the beginning of critical functions, a canary
-         value on the stack just before the return address, and validates
+       select CC_STACKPROTECTOR_ALL
+       ---help---
+         This option turns on the -fstack-protector GCC feature. This
+         feature puts, at the beginning of functions, a canary value on
+         the stack just before the return address, and validates
          the value just before actually returning.  Stack based buffer
          overflows (that need to overwrite this return address) now also
          overwrite the canary, which gets detected and the attack is then
@@ -1354,22 +1355,14 @@ config CC_STACKPROTECTOR
 
          This feature requires gcc version 4.2 or above, or a distribution
          gcc with the feature backported. Older versions are automatically
-         detected and for those versions, this configuration option is ignored.
-
-config CC_STACKPROTECTOR_ALL
-       bool "Use stack-protector for all functions"
-       depends on CC_STACKPROTECTOR
-       help
-         Normally, GCC only inserts the canary value protection for
-         functions that use large-ish on-stack buffers. By enabling
-         this option, GCC will be asked to do this for ALL functions.
+         detected and for those versions, this configuration option is
+         ignored. (and a warning is printed during bootup)
 
 source kernel/Kconfig.hz
 
 config KEXEC
        bool "kexec system call"
-       depends on X86_BIOS_REBOOT
-       help
+       ---help---
          kexec is a system call that implements the ability to shutdown your
          current kernel, and to start another kernel.  It is like a reboot
          but it is independent of the system firmware.   And like a reboot
@@ -1386,7 +1379,7 @@ config KEXEC
 config CRASH_DUMP
        bool "kernel crash dumps"
        depends on X86_64 || (X86_32 && HIGHMEM)
-       help
+       ---help---
          Generate crash dump after being started by kexec.
          This should be normally only set in special crash dump kernels
          which are loaded in the main kernel with kexec-tools into
@@ -1401,7 +1394,7 @@ config KEXEC_JUMP
        bool "kexec jump (EXPERIMENTAL)"
        depends on EXPERIMENTAL
        depends on KEXEC && HIBERNATION && X86_32
-       help
+       ---help---
          Jump between original kernel and kexeced kernel and invoke
          code in physical address mode via KEXEC
 
@@ -1410,7 +1403,7 @@ config PHYSICAL_START
        default "0x1000000" if X86_NUMAQ
        default "0x200000" if X86_64
        default "0x100000"
-       help
+       ---help---
          This gives the physical address where the kernel is loaded.
 
          If kernel is a not relocatable (CONFIG_RELOCATABLE=n) then
@@ -1451,7 +1444,7 @@ config PHYSICAL_START
 config RELOCATABLE
        bool "Build a relocatable kernel (EXPERIMENTAL)"
        depends on EXPERIMENTAL
-       help
+       ---help---
          This builds a kernel image that retains relocation information
          so it can be loaded someplace besides the default 1MB.
          The relocations tend to make the kernel binary about 10% larger,
@@ -1471,7 +1464,7 @@ config PHYSICAL_ALIGN
        default "0x100000" if X86_32
        default "0x200000" if X86_64
        range 0x2000 0x400000
-       help
+       ---help---
          This value puts the alignment restrictions on physical address
          where kernel is loaded and run from. Kernel is compiled for an
          address which meets above alignment restriction.
@@ -1492,7 +1485,7 @@ config PHYSICAL_ALIGN
 
 config HOTPLUG_CPU
        bool "Support for hot-pluggable CPUs"
-       depends on SMP && HOTPLUG && !X86_VOYAGER
+       depends on SMP && HOTPLUG
        ---help---
          Say Y here to allow turning CPUs off and on. CPUs can be
          controlled through /sys/devices/system/cpu.
@@ -1504,7 +1497,7 @@ config COMPAT_VDSO
        def_bool y
        prompt "Compat VDSO support"
        depends on X86_32 || IA32_EMULATION
-       help
+       ---help---
          Map the 32-bit VDSO to the predictable old-style address too.
        ---help---
          Say N here if you are running a sufficiently recent glibc
@@ -1516,7 +1509,7 @@ config COMPAT_VDSO
 config CMDLINE_BOOL
        bool "Built-in kernel command line"
        default n
-       help
+       ---help---
          Allow for specifying boot arguments to the kernel at
          build time.  On some systems (e.g. embedded ones), it is
          necessary or convenient to provide some or all of the
@@ -1534,7 +1527,7 @@ config CMDLINE
        string "Built-in kernel command string"
        depends on CMDLINE_BOOL
        default ""
-       help
+       ---help---
          Enter arguments here that should be compiled into the kernel
          image and used at boot time.  If the boot loader provides a
          command line at boot time, it is appended to this string to
@@ -1551,7 +1544,7 @@ config CMDLINE_OVERRIDE
        bool "Built-in command line overrides boot loader arguments"
        default n
        depends on CMDLINE_BOOL
-       help
+       ---help---
          Set this option to 'Y' to have the kernel ignore the boot loader
          command line, and use ONLY the built-in command line.
 
@@ -1573,7 +1566,6 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
        depends on NUMA
 
 menu "Power management and ACPI options"
-       depends on !X86_VOYAGER
 
 config ARCH_HIBERNATION_HEADER
        def_bool y
@@ -1651,7 +1643,7 @@ if APM
 
 config APM_IGNORE_USER_SUSPEND
        bool "Ignore USER SUSPEND"
-       help
+       ---help---
          This option will ignore USER SUSPEND requests. On machines with a
          compliant APM BIOS, you want to say N. However, on the NEC Versa M
          series notebooks, it is necessary to say Y because of a BIOS bug.
@@ -1675,7 +1667,7 @@ config APM_DO_ENABLE
 
 config APM_CPU_IDLE
        bool "Make CPU Idle calls when idle"
-       help
+       ---help---
          Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop.
          On some machines, this can activate improved power savings, such as
          a slowed CPU clock rate, when the machine is idle. These idle calls
@@ -1686,7 +1678,7 @@ config APM_CPU_IDLE
 
 config APM_DISPLAY_BLANK
        bool "Enable console blanking using APM"
-       help
+       ---help---
          Enable console blanking using the APM. Some laptops can use this to
          turn off the LCD backlight when the screen blanker of the Linux
          virtual console blanks the screen. Note that this is only used by
@@ -1699,7 +1691,7 @@ config APM_DISPLAY_BLANK
 
 config APM_ALLOW_INTS
        bool "Allow interrupts during APM BIOS calls"
-       help
+       ---help---
          Normally we disable external interrupts while we are making calls to
          the APM BIOS as a measure to lessen the effects of a badly behaving
          BIOS implementation.  The BIOS should reenable interrupts if it
@@ -1724,7 +1716,7 @@ config PCI
        bool "PCI support"
        default y
        select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC)
-       help
+       ---help---
          Find out whether you have a PCI motherboard. PCI is the name of a
          bus system, i.e. the way the CPU talks to the other stuff inside
          your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
@@ -1795,7 +1787,7 @@ config PCI_MMCONFIG
 config DMAR
        bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
        depends on X86_64 && PCI_MSI && ACPI && EXPERIMENTAL
-       help
+       ---help---
          DMA remapping (DMAR) devices support enables independent address
          translations for Direct Memory Access (DMA) from devices.
          These DMA remapping devices are reported via ACPI tables
@@ -1817,29 +1809,29 @@ config DMAR_GFX_WA
        def_bool y
        prompt "Support for Graphics workaround"
        depends on DMAR
-       help
-        Current Graphics drivers tend to use physical address
-        for DMA and avoid using DMA APIs. Setting this config
-        option permits the IOMMU driver to set a unity map for
-        all the OS-visible memory. Hence the driver can continue
-        to use physical addresses for DMA.
+       ---help---
+         Current Graphics drivers tend to use physical address
+         for DMA and avoid using DMA APIs. Setting this config
+         option permits the IOMMU driver to set a unity map for
+         all the OS-visible memory. Hence the driver can continue
+         to use physical addresses for DMA.
 
 config DMAR_FLOPPY_WA
        def_bool y
        depends on DMAR
-       help
-        Floppy disk drivers are know to bypass DMA API calls
-        thereby failing to work when IOMMU is enabled. This
-        workaround will setup a 1:1 mapping for the first
-        16M to make floppy (an ISA device) work.
+       ---help---
+         Floppy disk drivers are know to bypass DMA API calls
+         thereby failing to work when IOMMU is enabled. This
+         workaround will setup a 1:1 mapping for the first
+         16M to make floppy (an ISA device) work.
 
 config INTR_REMAP
        bool "Support for Interrupt Remapping (EXPERIMENTAL)"
        depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
-       help
-        Supports Interrupt remapping for IO-APIC and MSI devices.
-        To use x2apic mode in the CPU's which support x2APIC enhancements or
-        to support platforms with CPU's having > 8 bit APIC ID, say Y.
+       ---help---
+         Supports Interrupt remapping for IO-APIC and MSI devices.
+         To use x2apic mode in the CPU's which support x2APIC enhancements or
+         to support platforms with CPU's having > 8 bit APIC ID, say Y.
 
 source "drivers/pci/pcie/Kconfig"
 
@@ -1853,8 +1845,7 @@ if X86_32
 
 config ISA
        bool "ISA support"
-       depends on !X86_VOYAGER
-       help
+       ---help---
          Find out whether you have ISA slots on your motherboard.  ISA is the
          name of a bus system, i.e. the way the CPU talks to the other stuff
          inside your box.  Other bus systems are PCI, EISA, MicroChannel
@@ -1880,9 +1871,8 @@ config EISA
 source "drivers/eisa/Kconfig"
 
 config MCA
-       bool "MCA support" if !X86_VOYAGER
-       default y if X86_VOYAGER
-       help
+       bool "MCA support"
+       ---help---
          MicroChannel Architecture is found in some IBM PS/2 machines and
          laptops.  It is a bus system similar to PCI or ISA. See
          <file:Documentation/mca.txt> (and especially the web page given
@@ -1892,8 +1882,7 @@ source "drivers/mca/Kconfig"
 
 config SCx200
        tristate "NatSemi SCx200 support"
-       depends on !X86_VOYAGER
-       help
+       ---help---
          This provides basic support for National Semiconductor's
          (now AMD's) Geode processors.  The driver probes for the
          PCI-IDs of several on-chip devices, so its a good dependency
@@ -1905,7 +1894,7 @@ config SCx200HR_TIMER
        tristate "NatSemi SCx200 27MHz High-Resolution Timer Support"
        depends on SCx200 && GENERIC_TIME
        default y
-       help
+       ---help---
          This driver provides a clocksource built upon the on-chip
          27MHz high-resolution timer.  Its also a workaround for
          NSC Geode SC-1100's buggy TSC, which loses time when the
@@ -1916,7 +1905,7 @@ config GEODE_MFGPT_TIMER
        def_bool y
        prompt "Geode Multi-Function General Purpose Timer (MFGPT) events"
        depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS
-       help
+       ---help---
          This driver provides a clock event source based on the MFGPT
          timer(s) in the CS5535 and CS5536 companion chip for the geode.
          MFGPTs have a better resolution and max interval than the
@@ -1925,7 +1914,7 @@ config GEODE_MFGPT_TIMER
 config OLPC
        bool "One Laptop Per Child support"
        default n
-       help
+       ---help---
          Add support for detecting the unique features of the OLPC
          XO hardware.
 
@@ -1950,16 +1939,16 @@ config IA32_EMULATION
        bool "IA32 Emulation"
        depends on X86_64
        select COMPAT_BINFMT_ELF
-       help
+       ---help---
          Include code to run 32-bit programs under a 64-bit kernel. You should
          likely turn this on, unless you're 100% sure that you don't have any
          32-bit programs left.
 
 config IA32_AOUT
-       tristate "IA32 a.out support"
-       depends on IA32_EMULATION
-       help
-         Support old a.out binaries in the 32bit emulation.
+       tristate "IA32 a.out support"
+       depends on IA32_EMULATION
+       ---help---
+         Support old a.out binaries in the 32bit emulation.
 
 config COMPAT
        def_bool y
index c98d52e..a95eaf0 100644 (file)
@@ -50,7 +50,7 @@ config M386
 config M486
        bool "486"
        depends on X86_32
-       help
+       ---help---
          Select this for a 486 series processor, either Intel or one of the
          compatible processors from AMD, Cyrix, IBM, or Intel.  Includes DX,
          DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
@@ -59,7 +59,7 @@ config M486
 config M586
        bool "586/K5/5x86/6x86/6x86MX"
        depends on X86_32
-       help
+       ---help---
          Select this for an 586 or 686 series processor such as the AMD K5,
          the Cyrix 5x86, 6x86 and 6x86MX.  This choice does not
          assume the RDTSC (Read Time Stamp Counter) instruction.
@@ -67,21 +67,21 @@ config M586
 config M586TSC
        bool "Pentium-Classic"
        depends on X86_32
-       help
+       ---help---
          Select this for a Pentium Classic processor with the RDTSC (Read
          Time Stamp Counter) instruction for benchmarking.
 
 config M586MMX
        bool "Pentium-MMX"
        depends on X86_32
-       help
+       ---help---
          Select this for a Pentium with the MMX graphics/multimedia
          extended instructions.
 
 config M686
        bool "Pentium-Pro"
        depends on X86_32
-       help
+       ---help---
          Select this for Intel Pentium Pro chips.  This enables the use of
          Pentium Pro extended instructions, and disables the init-time guard
          against the f00f bug found in earlier Pentiums.
@@ -89,7 +89,7 @@ config M686
 config MPENTIUMII
        bool "Pentium-II/Celeron(pre-Coppermine)"
        depends on X86_32
-       help
+       ---help---
          Select this for Intel chips based on the Pentium-II and
          pre-Coppermine Celeron core.  This option enables an unaligned
          copy optimization, compiles the kernel with optimization flags
@@ -99,7 +99,7 @@ config MPENTIUMII
 config MPENTIUMIII
        bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
        depends on X86_32
-       help
+       ---help---
          Select this for Intel chips based on the Pentium-III and
          Celeron-Coppermine core.  This option enables use of some
          extended prefetch instructions in addition to the Pentium II
@@ -108,14 +108,14 @@ config MPENTIUMIII
 config MPENTIUMM
        bool "Pentium M"
        depends on X86_32
-       help
+       ---help---
          Select this for Intel Pentium M (not Pentium-4 M)
          notebook chips.
 
 config MPENTIUM4
        bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
        depends on X86_32
-       help
+       ---help---
          Select this for Intel Pentium 4 chips.  This includes the
          Pentium 4, Pentium D, P4-based Celeron and Xeon, and
          Pentium-4 M (not Pentium M) chips.  This option enables compile
@@ -151,7 +151,7 @@ config MPENTIUM4
 config MK6
        bool "K6/K6-II/K6-III"
        depends on X86_32
-       help
+       ---help---
          Select this for an AMD K6-family processor.  Enables use of
          some extended instructions, and passes appropriate optimization
          flags to GCC.
@@ -159,14 +159,14 @@ config MK6
 config MK7
        bool "Athlon/Duron/K7"
        depends on X86_32
-       help
+       ---help---
          Select this for an AMD Athlon K7-family processor.  Enables use of
          some extended instructions, and passes appropriate optimization
          flags to GCC.
 
 config MK8
        bool "Opteron/Athlon64/Hammer/K8"
-       help
+       ---help---
          Select this for an AMD Opteron or Athlon64 Hammer-family processor.
          Enables use of some extended instructions, and passes appropriate
          optimization flags to GCC.
@@ -174,7 +174,7 @@ config MK8
 config MCRUSOE
        bool "Crusoe"
        depends on X86_32
-       help
+       ---help---
          Select this for a Transmeta Crusoe processor.  Treats the processor
          like a 586 with TSC, and sets some GCC optimization flags (like a
          Pentium Pro with no alignment requirements).
@@ -182,13 +182,13 @@ config MCRUSOE
 config MEFFICEON
        bool "Efficeon"
        depends on X86_32
-       help
+       ---help---
          Select this for a Transmeta Efficeon processor.
 
 config MWINCHIPC6
        bool "Winchip-C6"
        depends on X86_32
-       help
+       ---help---
          Select this for an IDT Winchip C6 chip.  Linux and GCC
          treat this chip as a 586TSC with some extended instructions
          and alignment requirements.
@@ -196,7 +196,7 @@ config MWINCHIPC6
 config MWINCHIP3D
        bool "Winchip-2/Winchip-2A/Winchip-3"
        depends on X86_32
-       help
+       ---help---
          Select this for an IDT Winchip-2, 2A or 3.  Linux and GCC
          treat this chip as a 586TSC with some extended instructions
          and alignment requirements.  Also enable out of order memory
@@ -206,19 +206,19 @@ config MWINCHIP3D
 config MGEODEGX1
        bool "GeodeGX1"
        depends on X86_32
-       help
+       ---help---
          Select this for a Geode GX1 (Cyrix MediaGX) chip.
 
 config MGEODE_LX
        bool "Geode GX/LX"
        depends on X86_32
-       help
+       ---help---
          Select this for AMD Geode GX and LX processors.
 
 config MCYRIXIII
        bool "CyrixIII/VIA-C3"
        depends on X86_32
-       help
+       ---help---
          Select this for a Cyrix III or C3 chip.  Presently Linux and GCC
          treat this chip as a generic 586. Whilst the CPU is 686 class,
          it lacks the cmov extension which gcc assumes is present when
@@ -230,7 +230,7 @@ config MCYRIXIII
 config MVIAC3_2
        bool "VIA C3-2 (Nehemiah)"
        depends on X86_32
-       help
+       ---help---
          Select this for a VIA C3 "Nehemiah". Selecting this enables usage
          of SSE and tells gcc to treat the CPU as a 686.
          Note, this kernel will not boot on older (pre model 9) C3s.
@@ -238,14 +238,14 @@ config MVIAC3_2
 config MVIAC7
        bool "VIA C7"
        depends on X86_32
-       help
+       ---help---
          Select this for a VIA C7.  Selecting this uses the correct cache
          shift and tells gcc to treat the CPU as a 686.
 
 config MPSC
        bool "Intel P4 / older Netburst based Xeon"
        depends on X86_64
-       help
+       ---help---
          Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
          Xeon CPUs with Intel 64bit which is compatible with x86-64.
          Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the
@@ -255,7 +255,7 @@ config MPSC
 
 config MCORE2
        bool "Core 2/newer Xeon"
-       help
+       ---help---
 
          Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
          53xx) CPUs. You can distinguish newer from older Xeons by the CPU
@@ -265,7 +265,7 @@ config MCORE2
 config GENERIC_CPU
        bool "Generic-x86-64"
        depends on X86_64
-       help
+       ---help---
          Generic x86-64 CPU.
          Run equally well on all x86-64 CPUs.
 
@@ -274,7 +274,7 @@ endchoice
 config X86_GENERIC
        bool "Generic x86 support"
        depends on X86_32
-       help
+       ---help---
          Instead of just including optimizations for the selected
          x86 variant (e.g. PII, Crusoe or Athlon), include some more
          generic optimizations as well. This will make the kernel
@@ -294,25 +294,23 @@ config X86_CPU
 # Define implied options from the CPU selection here
 config X86_L1_CACHE_BYTES
        int
-       default "128" if GENERIC_CPU || MPSC
-       default "64" if MK8 || MCORE2
-       depends on X86_64
+       default "128" if MPSC
+       default "64" if GENERIC_CPU || MK8 || MCORE2 || X86_32
 
 config X86_INTERNODE_CACHE_BYTES
        int
        default "4096" if X86_VSMP
        default X86_L1_CACHE_BYTES if !X86_VSMP
-       depends on X86_64
 
 config X86_CMPXCHG
        def_bool X86_64 || (X86_32 && !M386)
 
 config X86_L1_CACHE_SHIFT
        int
-       default "7" if MPENTIUM4 || X86_GENERIC || GENERIC_CPU || MPSC
+       default "7" if MPENTIUM4 || MPSC
        default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
        default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
-       default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7
+       default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7 || X86_GENERIC || GENERIC_CPU
 
 config X86_XADD
        def_bool y
@@ -321,7 +319,7 @@ config X86_XADD
 config X86_PPRO_FENCE
        bool "PentiumPro memory ordering errata workaround"
        depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
-       help
+       ---help---
          Old PentiumPro multiprocessor systems had errata that could cause
          memory operations to violate the x86 ordering standard in rare cases.
          Enabling this option will attempt to work around some (but not all)
@@ -414,14 +412,14 @@ config X86_DEBUGCTLMSR
 
 menuconfig PROCESSOR_SELECT
        bool "Supported processor vendors" if EMBEDDED
-       help
+       ---help---
          This lets you choose what x86 vendor support code your kernel
          will include.
 
 config CPU_SUP_INTEL
        default y
        bool "Support Intel processors" if PROCESSOR_SELECT
-       help
+       ---help---
          This enables detection, tunings and quirks for Intel processors
 
          You need this enabled if you want your kernel to run on an
@@ -435,7 +433,7 @@ config CPU_SUP_CYRIX_32
        default y
        bool "Support Cyrix processors" if PROCESSOR_SELECT
        depends on !64BIT
-       help
+       ---help---
          This enables detection, tunings and quirks for Cyrix processors
 
          You need this enabled if you want your kernel to run on a
@@ -448,7 +446,7 @@ config CPU_SUP_CYRIX_32
 config CPU_SUP_AMD
        default y
        bool "Support AMD processors" if PROCESSOR_SELECT
-       help
+       ---help---
          This enables detection, tunings and quirks for AMD processors
 
          You need this enabled if you want your kernel to run on an
@@ -462,7 +460,7 @@ config CPU_SUP_CENTAUR_32
        default y
        bool "Support Centaur processors" if PROCESSOR_SELECT
        depends on !64BIT
-       help
+       ---help---
          This enables detection, tunings and quirks for Centaur processors
 
          You need this enabled if you want your kernel to run on a
@@ -476,7 +474,7 @@ config CPU_SUP_CENTAUR_64
        default y
        bool "Support Centaur processors" if PROCESSOR_SELECT
        depends on 64BIT
-       help
+       ---help---
          This enables detection, tunings and quirks for Centaur processors
 
          You need this enabled if you want your kernel to run on a
@@ -490,7 +488,7 @@ config CPU_SUP_TRANSMETA_32
        default y
        bool "Support Transmeta processors" if PROCESSOR_SELECT
        depends on !64BIT
-       help
+       ---help---
          This enables detection, tunings and quirks for Transmeta processors
 
          You need this enabled if you want your kernel to run on a
@@ -504,7 +502,7 @@ config CPU_SUP_UMC_32
        default y
        bool "Support UMC processors" if PROCESSOR_SELECT
        depends on !64BIT
-       help
+       ---help---
          This enables detection, tunings and quirks for UMC processors
 
          You need this enabled if you want your kernel to run on a
@@ -523,7 +521,7 @@ config X86_PTRACE_BTS
        bool "Branch Trace Store"
        default y
        depends on X86_DEBUGCTLMSR
-       help
+       ---help---
          This adds a ptrace interface to the hardware's branch trace store.
 
          Debuggers may use it to collect an execution trace of the debugged
index 10d6cc3..ba4781b 100644 (file)
@@ -7,7 +7,7 @@ source "lib/Kconfig.debug"
 
 config STRICT_DEVMEM
        bool "Filter access to /dev/mem"
-       help
+       ---help---
          If this option is disabled, you allow userspace (root) access to all
          of memory, including kernel and userspace memory. Accidental
          access to this is obviously disastrous, but specific access can
@@ -25,7 +25,7 @@ config STRICT_DEVMEM
 config X86_VERBOSE_BOOTUP
        bool "Enable verbose x86 bootup info messages"
        default y
-       help
+       ---help---
          Enables the informational output from the decompression stage
          (e.g. bzImage) of the boot. If you disable this you will still
          see errors. Disable this if you want silent bootup.
@@ -33,7 +33,7 @@ config X86_VERBOSE_BOOTUP
 config EARLY_PRINTK
        bool "Early printk" if EMBEDDED
        default y
-       help
+       ---help---
          Write kernel log output directly into the VGA buffer or to a serial
          port.
 
@@ -47,7 +47,7 @@ config EARLY_PRINTK_DBGP
        bool "Early printk via EHCI debug port"
        default n
        depends on EARLY_PRINTK && PCI
-       help
+       ---help---
          Write kernel log output directly into the EHCI debug port.
 
          This is useful for kernel debugging when your machine crashes very
@@ -59,14 +59,14 @@ config EARLY_PRINTK_DBGP
 config DEBUG_STACKOVERFLOW
        bool "Check for stack overflows"
        depends on DEBUG_KERNEL
-       help
+       ---help---
          This option will cause messages to be printed if free stack space
          drops below a certain limit.
 
 config DEBUG_STACK_USAGE
        bool "Stack utilization instrumentation"
        depends on DEBUG_KERNEL
-       help
+       ---help---
          Enables the display of the minimum amount of free stack which each
          task has ever had available in the sysrq-T and sysrq-P debug output.
 
@@ -75,7 +75,7 @@ config DEBUG_STACK_USAGE
 config DEBUG_PAGEALLOC
        bool "Debug page memory allocations"
        depends on DEBUG_KERNEL
-       help
+       ---help---
          Unmap pages from the kernel linear mapping after free_pages().
          This results in a large slowdown, but helps to find certain types
          of memory corruptions.
@@ -83,9 +83,9 @@ config DEBUG_PAGEALLOC
 config DEBUG_PER_CPU_MAPS
        bool "Debug access to per_cpu maps"
        depends on DEBUG_KERNEL
-       depends on X86_SMP
+       depends on SMP
        default n
-       help
+       ---help---
          Say Y to verify that the per_cpu map being accessed has
          been setup.  Adds a fair amount of code to kernel memory
          and decreases performance.
@@ -96,7 +96,7 @@ config X86_PTDUMP
        bool "Export kernel pagetable layout to userspace via debugfs"
        depends on DEBUG_KERNEL
        select DEBUG_FS
-       help
+       ---help---
          Say Y here if you want to show the kernel pagetable layout in a
          debugfs file. This information is only useful for kernel developers
          who are working in architecture specific areas of the kernel.
@@ -108,7 +108,7 @@ config DEBUG_RODATA
        bool "Write protect kernel read-only data structures"
        default y
        depends on DEBUG_KERNEL
-       help
+       ---help---
          Mark the kernel read-only data as write-protected in the pagetables,
          in order to catch accidental (and incorrect) writes to such const
          data. This is recommended so that we can catch kernel bugs sooner.
@@ -117,7 +117,8 @@ config DEBUG_RODATA
 config DEBUG_RODATA_TEST
        bool "Testcase for the DEBUG_RODATA feature"
        depends on DEBUG_RODATA
-       help
+       default y
+       ---help---
          This option enables a testcase for the DEBUG_RODATA
          feature as well as for the change_page_attr() infrastructure.
          If in doubt, say "N"
@@ -125,7 +126,7 @@ config DEBUG_RODATA_TEST
 config DEBUG_NX_TEST
        tristate "Testcase for the NX non-executable stack feature"
        depends on DEBUG_KERNEL && m
-       help
+       ---help---
          This option enables a testcase for the CPU NX capability
          and the software setup of this feature.
          If in doubt, say "N"
@@ -133,7 +134,7 @@ config DEBUG_NX_TEST
 config 4KSTACKS
        bool "Use 4Kb for kernel stacks instead of 8Kb"
        depends on X86_32
-       help
+       ---help---
          If you say Y here the kernel will use a 4Kb stacksize for the
          kernel stack attached to each process/thread. This facilitates
          running more threads on a system and also reduces the pressure
@@ -144,7 +145,7 @@ config DOUBLEFAULT
        default y
        bool "Enable doublefault exception handler" if EMBEDDED
        depends on X86_32
-       help
+       ---help---
          This option allows trapping of rare doublefault exceptions that
          would otherwise cause a system to silently reboot. Disabling this
          option saves about 4k and might cause you much additional grey
@@ -154,7 +155,7 @@ config IOMMU_DEBUG
        bool "Enable IOMMU debugging"
        depends on GART_IOMMU && DEBUG_KERNEL
        depends on X86_64
-       help
+       ---help---
          Force the IOMMU to on even when you have less than 4GB of
          memory and add debugging code. On overflow always panic. And
          allow to enable IOMMU leak tracing. Can be disabled at boot
@@ -170,7 +171,7 @@ config IOMMU_LEAK
        bool "IOMMU leak tracing"
        depends on DEBUG_KERNEL
        depends on IOMMU_DEBUG
-       help
+       ---help---
          Add a simple leak tracer to the IOMMU code. This is useful when you
          are debugging a buggy device driver that leaks IOMMU mappings.
 
@@ -223,25 +224,25 @@ choice
 
 config IO_DELAY_0X80
        bool "port 0x80 based port-IO delay [recommended]"
-       help
+       ---help---
          This is the traditional Linux IO delay used for in/out_p.
          It is the most tested hence safest selection here.
 
 config IO_DELAY_0XED
        bool "port 0xed based port-IO delay"
-       help
+       ---help---
          Use port 0xed as the IO delay. This frees up port 0x80 which is
          often used as a hardware-debug port.
 
 config IO_DELAY_UDELAY
        bool "udelay based port-IO delay"
-       help
+       ---help---
          Use udelay(2) as the IO delay method. This provides the delay
          while not having any side-effect on the IO port space.
 
 config IO_DELAY_NONE
        bool "no port-IO delay"
-       help
+       ---help---
          No port-IO delay. Will break on old boxes that require port-IO
          delay for certain operations. Should work on most new machines.
 
@@ -275,18 +276,18 @@ config DEBUG_BOOT_PARAMS
        bool "Debug boot parameters"
        depends on DEBUG_KERNEL
        depends on DEBUG_FS
-       help
+       ---help---
          This option will cause struct boot_params to be exported via debugfs.
 
 config CPA_DEBUG
        bool "CPA self-test code"
        depends on DEBUG_KERNEL
-       help
+       ---help---
          Do change_page_attr() self-tests every 30 seconds.
 
 config OPTIMIZE_INLINING
        bool "Allow gcc to uninline functions marked 'inline'"
-       help
+       ---help---
          This option determines if the kernel forces gcc to inline the functions
          developers have marked 'inline'. Doing so takes away freedom from gcc to
          do what it thinks is best, which is desirable for the gcc 3.x series of
@@ -299,4 +300,3 @@ config OPTIMIZE_INLINING
          If unsure, say N.
 
 endmenu
-
index d1a47ad..1836191 100644 (file)
@@ -70,14 +70,17 @@ else
         # this works around some issues with generating unwind tables in older gccs
         # newer gccs do it by default
         KBUILD_CFLAGS += -maccumulate-outgoing-args
+endif
 
-        stackp := $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh
-        stackp-$(CONFIG_CC_STACKPROTECTOR) := $(shell $(stackp) \
-                "$(CC)" -fstack-protector )
-        stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(stackp) \
-                "$(CC)" -fstack-protector-all )
-
-        KBUILD_CFLAGS += $(stackp-y)
+ifdef CONFIG_CC_STACKPROTECTOR
+       cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
+        ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC)),y)
+                stackp-y := -fstack-protector
+                stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += -fstack-protector-all
+                KBUILD_CFLAGS += $(stackp-y)
+        else
+                $(warning stack protector enabled but no compiler support)
+        endif
 endif
 
 # Stackpointer is addressed different for 32 bit and 64 bit x86
@@ -102,29 +105,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
 # prevent gcc from generating any FP code by mistake
 KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
 
-###
-# Sub architecture support
-# fcore-y is linked before mcore-y files.
-
-# Default subarch .c files
-mcore-y  := arch/x86/mach-default/
-
-# Voyager subarch support
-mflags-$(CONFIG_X86_VOYAGER)   := -Iarch/x86/include/asm/mach-voyager
-mcore-$(CONFIG_X86_VOYAGER)    := arch/x86/mach-voyager/
-
-# generic subarchitecture
-mflags-$(CONFIG_X86_GENERICARCH):= -Iarch/x86/include/asm/mach-generic
-fcore-$(CONFIG_X86_GENERICARCH)        += arch/x86/mach-generic/
-mcore-$(CONFIG_X86_GENERICARCH)        := arch/x86/mach-default/
-
-# default subarch .h files
-mflags-y += -Iarch/x86/include/asm/mach-default
-
-# 64 bit does not support subarch support - clear sub arch variables
-fcore-$(CONFIG_X86_64)  :=
-mcore-$(CONFIG_X86_64)  :=
-
 KBUILD_CFLAGS += $(mflags-y)
 KBUILD_AFLAGS += $(mflags-y)
 
@@ -150,9 +130,6 @@ core-$(CONFIG_LGUEST_GUEST) += arch/x86/lguest/
 core-y += arch/x86/kernel/
 core-y += arch/x86/mm/
 
-# Remaining sub architecture files
-core-y += $(mcore-y)
-
 core-y += arch/x86/crypto/
 core-y += arch/x86/vdso/
 core-$(CONFIG_IA32_EMULATION) += arch/x86/ia32/
index 8f40a80..096dd53 100644 (file)
@@ -196,7 +196,6 @@ CONFIG_SMP=y
 CONFIG_SPARSE_IRQ=y
 CONFIG_X86_FIND_SMP_CONFIG=y
 CONFIG_X86_MPPARSE=y
-CONFIG_X86_PC=y
 # CONFIG_X86_ELAN is not set
 # CONFIG_X86_VOYAGER is not set
 # CONFIG_X86_GENERICARCH is not set
index 10728fd..2efb5d5 100644 (file)
@@ -198,7 +198,6 @@ CONFIG_SPARSE_IRQ=y
 # CONFIG_NUMA_MIGRATE_IRQ_DESC is not set
 CONFIG_X86_FIND_SMP_CONFIG=y
 CONFIG_X86_MPPARSE=y
-CONFIG_X86_PC=y
 # CONFIG_X86_ELAN is not set
 # CONFIG_X86_VOYAGER is not set
 # CONFIG_X86_GENERICARCH is not set
index 9dabd00..dd77ac0 100644 (file)
@@ -46,78 +46,83 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
 
 int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
 {
-       int err;
+       int err = 0;
 
        if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
                return -EFAULT;
 
-       /* If you change siginfo_t structure, please make sure that
-          this code is fixed accordingly.
-          It should never copy any pad contained in the structure
-          to avoid security leaks, but must copy the generic
-          3 ints plus the relevant union member.  */
-       err = __put_user(from->si_signo, &to->si_signo);
-       err |= __put_user(from->si_errno, &to->si_errno);
-       err |= __put_user((short)from->si_code, &to->si_code);
-
-       if (from->si_code < 0) {
-               err |= __put_user(from->si_pid, &to->si_pid);
-               err |= __put_user(from->si_uid, &to->si_uid);
-               err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr);
-       } else {
-               /*
-                * First 32bits of unions are always present:
-                * si_pid === si_band === si_tid === si_addr(LS half)
-                */
-               err |= __put_user(from->_sifields._pad[0],
-                                 &to->_sifields._pad[0]);
-               switch (from->si_code >> 16) {
-               case __SI_FAULT >> 16:
-                       break;
-               case __SI_CHLD >> 16:
-                       err |= __put_user(from->si_utime, &to->si_utime);
-                       err |= __put_user(from->si_stime, &to->si_stime);
-                       err |= __put_user(from->si_status, &to->si_status);
-                       /* FALL THROUGH */
-               default:
-               case __SI_KILL >> 16:
-                       err |= __put_user(from->si_uid, &to->si_uid);
-                       break;
-               case __SI_POLL >> 16:
-                       err |= __put_user(from->si_fd, &to->si_fd);
-                       break;
-               case __SI_TIMER >> 16:
-                       err |= __put_user(from->si_overrun, &to->si_overrun);
-                       err |= __put_user(ptr_to_compat(from->si_ptr),
-                                         &to->si_ptr);
-                       break;
-                        /* This is not generated by the kernel as of now.  */
-               case __SI_RT >> 16:
-               case __SI_MESGQ >> 16:
-                       err |= __put_user(from->si_uid, &to->si_uid);
-                       err |= __put_user(from->si_int, &to->si_int);
-                       break;
+       put_user_try {
+               /* If you change siginfo_t structure, please make sure that
+                  this code is fixed accordingly.
+                  It should never copy any pad contained in the structure
+                  to avoid security leaks, but must copy the generic
+                  3 ints plus the relevant union member.  */
+               put_user_ex(from->si_signo, &to->si_signo);
+               put_user_ex(from->si_errno, &to->si_errno);
+               put_user_ex((short)from->si_code, &to->si_code);
+
+               if (from->si_code < 0) {
+                       put_user_ex(from->si_pid, &to->si_pid);
+                       put_user_ex(from->si_uid, &to->si_uid);
+                       put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr);
+               } else {
+                       /*
+                        * First 32bits of unions are always present:
+                        * si_pid === si_band === si_tid === si_addr(LS half)
+                        */
+                       put_user_ex(from->_sifields._pad[0],
+                                         &to->_sifields._pad[0]);
+                       switch (from->si_code >> 16) {
+                       case __SI_FAULT >> 16:
+                               break;
+                       case __SI_CHLD >> 16:
+                               put_user_ex(from->si_utime, &to->si_utime);
+                               put_user_ex(from->si_stime, &to->si_stime);
+                               put_user_ex(from->si_status, &to->si_status);
+                               /* FALL THROUGH */
+                       default:
+                       case __SI_KILL >> 16:
+                               put_user_ex(from->si_uid, &to->si_uid);
+                               break;
+                       case __SI_POLL >> 16:
+                               put_user_ex(from->si_fd, &to->si_fd);
+                               break;
+                       case __SI_TIMER >> 16:
+                               put_user_ex(from->si_overrun, &to->si_overrun);
+                               put_user_ex(ptr_to_compat(from->si_ptr),
+                                           &to->si_ptr);
+                               break;
+                                /* This is not generated by the kernel as of now.  */
+                       case __SI_RT >> 16:
+                       case __SI_MESGQ >> 16:
+                               put_user_ex(from->si_uid, &to->si_uid);
+                               put_user_ex(from->si_int, &to->si_int);
+                               break;
+                       }
                }
-       }
+       } put_user_catch(err);
+
        return err;
 }
 
 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
 {
-       int err;
+       int err = 0;
        u32 ptr32;
 
        if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
                return -EFAULT;
 
-       err = __get_user(to->si_signo, &from->si_signo);
-       err |= __get_user(to->si_errno, &from->si_errno);
-       err |= __get_user(to->si_code, &from->si_code);
+       get_user_try {
+               get_user_ex(to->si_signo, &from->si_signo);
+               get_user_ex(to->si_errno, &from->si_errno);
+               get_user_ex(to->si_code, &from->si_code);
 
-       err |= __get_user(to->si_pid, &from->si_pid);
-       err |= __get_user(to->si_uid, &from->si_uid);
-       err |= __get_user(ptr32, &from->si_ptr);
-       to->si_ptr = compat_ptr(ptr32);
+               get_user_ex(to->si_pid, &from->si_pid);
+               get_user_ex(to->si_uid, &from->si_uid);
+               get_user_ex(ptr32, &from->si_ptr);
+               to->si_ptr = compat_ptr(ptr32);
+       } get_user_catch(err);
 
        return err;
 }
@@ -142,17 +147,23 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
                                  struct pt_regs *regs)
 {
        stack_t uss, uoss;
-       int ret;
+       int ret, err = 0;
        mm_segment_t seg;
 
        if (uss_ptr) {
                u32 ptr;
 
                memset(&uss, 0, sizeof(stack_t));
-               if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)) ||
-                           __get_user(ptr, &uss_ptr->ss_sp) ||
-                           __get_user(uss.ss_flags, &uss_ptr->ss_flags) ||
-                           __get_user(uss.ss_size, &uss_ptr->ss_size))
+               if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)))
+                       return -EFAULT;
+
+               get_user_try {
+                       get_user_ex(ptr, &uss_ptr->ss_sp);
+                       get_user_ex(uss.ss_flags, &uss_ptr->ss_flags);
+                       get_user_ex(uss.ss_size, &uss_ptr->ss_size);
+               } get_user_catch(err);
+
+               if (err)
                        return -EFAULT;
                uss.ss_sp = compat_ptr(ptr);
        }
@@ -161,10 +172,16 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
        ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
        set_fs(seg);
        if (ret >= 0 && uoss_ptr)  {
-               if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)) ||
-                   __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
-                   __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
-                   __put_user(uoss.ss_size, &uoss_ptr->ss_size))
+               if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
+                       return -EFAULT;
+
+               put_user_try {
+                       put_user_ex(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp);
+                       put_user_ex(uoss.ss_flags, &uoss_ptr->ss_flags);
+                       put_user_ex(uoss.ss_size, &uoss_ptr->ss_size);
+               } put_user_catch(err);
+
+               if (err)
                        ret = -EFAULT;
        }
        return ret;
@@ -174,18 +191,18 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
  * Do a signal return; undo the signal stack.
  */
 #define COPY(x)                        {               \
-       err |= __get_user(regs->x, &sc->x);     \
+       get_user_ex(regs->x, &sc->x);           \
 }
 
 #define COPY_SEG_CPL3(seg)     {                       \
                unsigned short tmp;                     \
-               err |= __get_user(tmp, &sc->seg);       \
+               get_user_ex(tmp, &sc->seg);             \
                regs->seg = tmp | 3;                    \
 }
 
 #define RELOAD_SEG(seg)                {               \
        unsigned int cur, pre;                  \
-       err |= __get_user(pre, &sc->seg);       \
+       get_user_ex(pre, &sc->seg);             \
        savesegment(seg, cur);                  \
        pre |= 3;                               \
        if (pre != cur)                         \
@@ -209,39 +226,42 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
               sc, sc->err, sc->ip, sc->cs, sc->flags);
 #endif
 
-       /*
-        * Reload fs and gs if they have changed in the signal
-        * handler.  This does not handle long fs/gs base changes in
-        * the handler, but does not clobber them at least in the
-        * normal case.
-        */
-       err |= __get_user(gs, &sc->gs);
-       gs |= 3;
-       savesegment(gs, oldgs);
-       if (gs != oldgs)
-               load_gs_index(gs);
-
-       RELOAD_SEG(fs);
-       RELOAD_SEG(ds);
-       RELOAD_SEG(es);
-
-       COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
-       COPY(dx); COPY(cx); COPY(ip);
-       /* Don't touch extended registers */
-
-       COPY_SEG_CPL3(cs);
-       COPY_SEG_CPL3(ss);
-
-       err |= __get_user(tmpflags, &sc->flags);
-       regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
-       /* disable syscall checks */
-       regs->orig_ax = -1;
-
-       err |= __get_user(tmp, &sc->fpstate);
-       buf = compat_ptr(tmp);
-       err |= restore_i387_xstate_ia32(buf);
-
-       err |= __get_user(*pax, &sc->ax);
+       get_user_try {
+               /*
+                * Reload fs and gs if they have changed in the signal
+                * handler.  This does not handle long fs/gs base changes in
+                * the handler, but does not clobber them at least in the
+                * normal case.
+                */
+               get_user_ex(gs, &sc->gs);
+               gs |= 3;
+               savesegment(gs, oldgs);
+               if (gs != oldgs)
+                       load_gs_index(gs);
+
+               RELOAD_SEG(fs);
+               RELOAD_SEG(ds);
+               RELOAD_SEG(es);
+
+               COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
+               COPY(dx); COPY(cx); COPY(ip);
+               /* Don't touch extended registers */
+
+               COPY_SEG_CPL3(cs);
+               COPY_SEG_CPL3(ss);
+
+               get_user_ex(tmpflags, &sc->flags);
+               regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
+               /* disable syscall checks */
+               regs->orig_ax = -1;
+
+               get_user_ex(tmp, &sc->fpstate);
+               buf = compat_ptr(tmp);
+               err |= restore_i387_xstate_ia32(buf);
+
+               get_user_ex(*pax, &sc->ax);
+       } get_user_catch(err);
+
        return err;
 }
 
@@ -319,36 +339,38 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
 {
        int tmp, err = 0;
 
-       savesegment(gs, tmp);
-       err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
-       savesegment(fs, tmp);
-       err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
-       savesegment(ds, tmp);
-       err |= __put_user(tmp, (unsigned int __user *)&sc->ds);
-       savesegment(es, tmp);
-       err |= __put_user(tmp, (unsigned int __user *)&sc->es);
-
-       err |= __put_user(regs->di, &sc->di);
-       err |= __put_user(regs->si, &sc->si);
-       err |= __put_user(regs->bp, &sc->bp);
-       err |= __put_user(regs->sp, &sc->sp);
-       err |= __put_user(regs->bx, &sc->bx);
-       err |= __put_user(regs->dx, &sc->dx);
-       err |= __put_user(regs->cx, &sc->cx);
-       err |= __put_user(regs->ax, &sc->ax);
-       err |= __put_user(current->thread.trap_no, &sc->trapno);
-       err |= __put_user(current->thread.error_code, &sc->err);
-       err |= __put_user(regs->ip, &sc->ip);
-       err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
-       err |= __put_user(regs->flags, &sc->flags);
-       err |= __put_user(regs->sp, &sc->sp_at_signal);
-       err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
-
-       err |= __put_user(ptr_to_compat(fpstate), &sc->fpstate);
-
-       /* non-iBCS2 extensions.. */
-       err |= __put_user(mask, &sc->oldmask);
-       err |= __put_user(current->thread.cr2, &sc->cr2);
+       put_user_try {
+               savesegment(gs, tmp);
+               put_user_ex(tmp, (unsigned int __user *)&sc->gs);
+               savesegment(fs, tmp);
+               put_user_ex(tmp, (unsigned int __user *)&sc->fs);
+               savesegment(ds, tmp);
+               put_user_ex(tmp, (unsigned int __user *)&sc->ds);
+               savesegment(es, tmp);
+               put_user_ex(tmp, (unsigned int __user *)&sc->es);
+
+               put_user_ex(regs->di, &sc->di);
+               put_user_ex(regs->si, &sc->si);
+               put_user_ex(regs->bp, &sc->bp);
+               put_user_ex(regs->sp, &sc->sp);
+               put_user_ex(regs->bx, &sc->bx);
+               put_user_ex(regs->dx, &sc->dx);
+               put_user_ex(regs->cx, &sc->cx);
+               put_user_ex(regs->ax, &sc->ax);
+               put_user_ex(current->thread.trap_no, &sc->trapno);
+               put_user_ex(current->thread.error_code, &sc->err);
+               put_user_ex(regs->ip, &sc->ip);
+               put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
+               put_user_ex(regs->flags, &sc->flags);
+               put_user_ex(regs->sp, &sc->sp_at_signal);
+               put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
+
+               put_user_ex(ptr_to_compat(fpstate), &sc->fpstate);
+
+               /* non-iBCS2 extensions.. */
+               put_user_ex(mask, &sc->oldmask);
+               put_user_ex(current->thread.cr2, &sc->cr2);
+       } put_user_catch(err);
 
        return err;
 }
@@ -437,13 +459,17 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
                else
                        restorer = &frame->retcode;
        }
-       err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
 
-       /*
-        * These are actually not used anymore, but left because some
-        * gdb versions depend on them as a marker.
-        */
-       err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
+       put_user_try {
+               put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
+
+               /*
+                * These are actually not used anymore, but left because some
+                * gdb versions depend on them as a marker.
+                */
+               put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
+       } put_user_catch(err);
+
        if (err)
                return -EFAULT;
 
@@ -496,41 +522,40 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
                return -EFAULT;
 
-       err |= __put_user(sig, &frame->sig);
-       err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo);
-       err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc);
-       err |= copy_siginfo_to_user32(&frame->info, info);
-       if (err)
-               return -EFAULT;
+       put_user_try {
+               put_user_ex(sig, &frame->sig);
+               put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
+               put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
+               err |= copy_siginfo_to_user32(&frame->info, info);
 
-       /* Create the ucontext.  */
-       if (cpu_has_xsave)
-               err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
-       else
-               err |= __put_user(0, &frame->uc.uc_flags);
-       err |= __put_user(0, &frame->uc.uc_link);
-       err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
-       err |= __put_user(sas_ss_flags(regs->sp),
-                         &frame->uc.uc_stack.ss_flags);
-       err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
-       err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
-                                    regs, set->sig[0]);
-       err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
-       if (err)
-               return -EFAULT;
+               /* Create the ucontext.  */
+               if (cpu_has_xsave)
+                       put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
+               else
+                       put_user_ex(0, &frame->uc.uc_flags);
+               put_user_ex(0, &frame->uc.uc_link);
+               put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+               put_user_ex(sas_ss_flags(regs->sp),
+                           &frame->uc.uc_stack.ss_flags);
+               put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+               err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
+                                            regs, set->sig[0]);
+               err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+               if (ka->sa.sa_flags & SA_RESTORER)
+                       restorer = ka->sa.sa_restorer;
+               else
+                       restorer = VDSO32_SYMBOL(current->mm->context.vdso,
+                                                rt_sigreturn);
+               put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
+
+               /*
+                * Not actually used anymore, but left because some gdb
+                * versions need it.
+                */
+               put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
+       } put_user_catch(err);
 
-       if (ka->sa.sa_flags & SA_RESTORER)
-               restorer = ka->sa.sa_restorer;
-       else
-               restorer = VDSO32_SYMBOL(current->mm->context.vdso,
-                                        rt_sigreturn);
-       err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
-
-       /*
-        * Not actually used anymore, but left because some gdb
-        * versions need it.
-        */
-       err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
        if (err)
                return -EFAULT;
 
index 5a0d76d..097a6b6 100644 (file)
@@ -112,8 +112,8 @@ ENTRY(ia32_sysenter_target)
        CFI_DEF_CFA     rsp,0
        CFI_REGISTER    rsp,rbp
        SWAPGS_UNSAFE_STACK
-       movq    %gs:pda_kernelstack, %rsp
-       addq    $(PDA_STACKOFFSET),%rsp 
+       movq    PER_CPU_VAR(kernel_stack), %rsp
+       addq    $(KERNEL_STACK_OFFSET),%rsp
        /*
         * No need to follow this irqs on/off section: the syscall
         * disabled irqs, here we enable it straight after entry:
@@ -273,13 +273,13 @@ ENDPROC(ia32_sysenter_target)
 ENTRY(ia32_cstar_target)
        CFI_STARTPROC32 simple
        CFI_SIGNAL_FRAME
-       CFI_DEF_CFA     rsp,PDA_STACKOFFSET
+       CFI_DEF_CFA     rsp,KERNEL_STACK_OFFSET
        CFI_REGISTER    rip,rcx
        /*CFI_REGISTER  rflags,r11*/
        SWAPGS_UNSAFE_STACK
        movl    %esp,%r8d
        CFI_REGISTER    rsp,r8
-       movq    %gs:pda_kernelstack,%rsp
+       movq    PER_CPU_VAR(kernel_stack),%rsp
        /*
         * No need to follow this irqs on/off section: the syscall
         * disabled irqs and here we enable it straight after entry:
index 3c601f8..bb70e39 100644 (file)
@@ -55,7 +55,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
        dump->regs.ds = (u16)regs->ds;
        dump->regs.es = (u16)regs->es;
        dump->regs.fs = (u16)regs->fs;
-       savesegment(gs, dump->regs.gs);
+       dump->regs.gs = get_user_gs(regs);
        dump->regs.orig_ax = regs->orig_ax;
        dump->regs.ip = regs->ip;
        dump->regs.cs = (u16)regs->cs;
index 9830681..4518dc5 100644 (file)
@@ -102,9 +102,6 @@ static inline void disable_acpi(void)
        acpi_noirq = 1;
 }
 
-/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
-#define FIX_ACPI_PAGES 4
-
 extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
 
 static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
index ab1d51a..fba49f6 100644 (file)
        } while (0)
 
 
+#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
 extern void generic_apic_probe(void);
+#else
+static inline void generic_apic_probe(void)
+{
+}
+#endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
 
@@ -41,6 +47,21 @@ extern unsigned int apic_verbosity;
 extern int local_apic_timer_c2_ok;
 
 extern int disable_apic;
+
+#ifdef CONFIG_SMP
+extern void __inquire_remote_apic(int apicid);
+#else /* CONFIG_SMP */
+static inline void __inquire_remote_apic(int apicid)
+{
+}
+#endif /* CONFIG_SMP */
+
+static inline void default_inquire_remote_apic(int apicid)
+{
+       if (apic_verbosity >= APIC_DEBUG)
+               __inquire_remote_apic(apicid);
+}
+
 /*
  * Basic functions accessing APICs.
  */
@@ -124,12 +145,35 @@ struct apic_ops {
 
 extern struct apic_ops *apic_ops;
 
-#define apic_read (apic_ops->read)
-#define apic_write (apic_ops->write)
-#define apic_icr_read (apic_ops->icr_read)
-#define apic_icr_write (apic_ops->icr_write)
-#define apic_wait_icr_idle (apic_ops->wait_icr_idle)
-#define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle)
+static inline u32 apic_read(u32 reg)
+{
+       return apic_ops->read(reg);
+}
+
+static inline void apic_write(u32 reg, u32 val)
+{
+       apic_ops->write(reg, val);
+}
+
+static inline u64 apic_icr_read(void)
+{
+       return apic_ops->icr_read();
+}
+
+static inline void apic_icr_write(u32 low, u32 high)
+{
+       apic_ops->icr_write(low, high);
+}
+
+static inline void apic_wait_icr_idle(void)
+{
+       apic_ops->wait_icr_idle();
+}
+
+static inline u32 safe_apic_wait_icr_idle(void)
+{
+       return apic_ops->safe_wait_icr_idle();
+}
 
 extern int get_physical_broadcast(void);
 
@@ -196,4 +240,22 @@ static inline void disable_local_APIC(void) { }
 
 #endif /* !CONFIG_X86_LOCAL_APIC */
 
+#ifdef CONFIG_X86_64
+#define        SET_APIC_ID(x)          (apic->set_apic_id(x))
+#else
+
+#ifdef CONFIG_X86_LOCAL_APIC
+static inline unsigned default_get_apic_id(unsigned long x)
+{
+       unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
+
+       if (APIC_XAPIC(ver))
+               return (x >> 24) & 0xFF;
+       else
+               return (x >> 24) & 0x0F;
+}
+#endif
+
+#endif
+
 #endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/apicnum.h b/arch/x86/include/asm/apicnum.h
new file mode 100644 (file)
index 0000000..82f613c
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef _ASM_X86_APICNUM_H
+#define _ASM_X86_APICNUM_H
+
+/* define MAX_IO_APICS */
+#ifdef CONFIG_X86_32
+# define MAX_IO_APICS 64
+#else
+# define MAX_IO_APICS 128
+# define MAX_LOCAL_APIC 32768
+#endif
+
+#endif /* _ASM_X86_APICNUM_H */
diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h
deleted file mode 100644 (file)
index d8dd9f5..0000000
+++ /dev/null
@@ -1,155 +0,0 @@
-#ifndef __ASM_MACH_APIC_H
-#define __ASM_MACH_APIC_H
-
-#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
-#define esr_disable (1)
-
-static inline int apic_id_registered(void)
-{
-       return (1);
-}
-
-static inline const cpumask_t *target_cpus(void)
-{
-#ifdef CONFIG_SMP
-       return &cpu_online_map;
-#else
-       return &cpumask_of_cpu(0);
-#endif
-}
-
-#undef APIC_DEST_LOGICAL
-#define APIC_DEST_LOGICAL      0
-#define APIC_DFR_VALUE         (APIC_DFR_FLAT)
-#define INT_DELIVERY_MODE      (dest_Fixed)
-#define INT_DEST_MODE          (0)    /* phys delivery to target proc */
-#define NO_BALANCE_IRQ         (0)
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
-       return (0);
-}
-
-static inline unsigned long check_apicid_present(int bit)
-{
-       return (1);
-}
-
-static inline unsigned long calculate_ldr(int cpu)
-{
-       unsigned long val, id;
-       val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
-       id = xapic_phys_to_log_apicid(cpu);
-       val |= SET_APIC_LOGICAL_ID(id);
-       return val;
-}
-
-/*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LDR and TPR before enabling
- * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116).  So here it goes...
- */
-static inline void init_apic_ldr(void)
-{
-       unsigned long val;
-       int cpu = smp_processor_id();
-
-       apic_write(APIC_DFR, APIC_DFR_VALUE);
-       val = calculate_ldr(cpu);
-       apic_write(APIC_LDR, val);
-}
-
-static inline void setup_apic_routing(void)
-{
-       printk("Enabling APIC mode:  %s.  Using %d I/O APICs\n",
-               "Physflat", nr_ioapics);
-}
-
-static inline int multi_timer_check(int apic, int irq)
-{
-       return (0);
-}
-
-static inline int apicid_to_node(int logical_apicid)
-{
-       return apicid_2_node[hard_smp_processor_id()];
-}
-
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
-       if (mps_cpu < nr_cpu_ids)
-               return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
-
-       return BAD_APICID;
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
-{
-       return physid_mask_of_physid(phys_apicid);
-}
-
-extern u8 cpu_2_logical_apicid[];
-/* Mapping from cpu number to logical apicid */
-static inline int cpu_to_logical_apicid(int cpu)
-{
-       if (cpu >= nr_cpu_ids)
-               return BAD_APICID;
-       return cpu_physical_id(cpu);
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
-{
-       /* For clustered we don't have a good way to do this yet - hack */
-       return physids_promote(0xFFL);
-}
-
-static inline void setup_portio_remap(void)
-{
-}
-
-static inline void enable_apic_mode(void)
-{
-}
-
-static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
-{
-       return (1);
-}
-
-/* As we are using single CPU as destination, pick only one CPU here */
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
-{
-       int cpu;
-       int apicid;     
-
-       cpu = first_cpu(*cpumask);
-       apicid = cpu_to_logical_apicid(cpu);
-       return apicid;
-}
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-                                                 const struct cpumask *andmask)
-{
-       int cpu;
-
-       /*
-        * We're using fixed IRQ delivery, can only return one phys APIC ID.
-        * May as well be the first.
-        */
-       for_each_cpu_and(cpu, cpumask, andmask)
-               if (cpumask_test_cpu(cpu, cpu_online_mask))
-                       break;
-       if (cpu < nr_cpu_ids)
-               return cpu_to_logical_apicid(cpu);
-
-       return BAD_APICID;
-}
-
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
-       return cpuid_apic >> index_msb;
-}
-
-#endif /* __ASM_MACH_APIC_H */
diff --git a/arch/x86/include/asm/bigsmp/apicdef.h b/arch/x86/include/asm/bigsmp/apicdef.h
deleted file mode 100644 (file)
index 392c3f5..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_MACH_APICDEF_H
-#define __ASM_MACH_APICDEF_H
-
-#define                APIC_ID_MASK            (0xFF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
-       return (((x)>>24)&0xFF);
-}
-
-#define                GET_APIC_ID(x)  get_apic_id(x)
-
-#endif
diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h
deleted file mode 100644 (file)
index 27fcd01..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef __ASM_MACH_IPI_H
-#define __ASM_MACH_IPI_H
-
-void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-
-static inline void send_IPI_mask(const struct cpumask *mask, int vector)
-{
-       send_IPI_mask_sequence(mask, vector);
-}
-
-static inline void send_IPI_allbutself(int vector)
-{
-       send_IPI_mask_allbutself(cpu_online_mask, vector);
-}
-
-static inline void send_IPI_all(int vector)
-{
-       send_IPI_mask(cpu_online_mask, vector);
-}
-
-#endif /* __ASM_MACH_IPI_H */
index 2bc162e..0e63c9a 100644 (file)
@@ -1,5 +1,55 @@
 /*
- * Some macros to handle stack frames in assembly.
+
+ x86 function call convention, 64-bit:
+ -------------------------------------
+  arguments           |  callee-saved      | extra caller-saved | return
+ [callee-clobbered]   |                    | [callee-clobbered] |
+ ---------------------------------------------------------------------------
+ rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11             | rax, rdx [**]
+
+ ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
+   functions when it sees tail-call optimization possibilities) rflags is
+   clobbered. Leftover arguments are passed over the stack frame.)
+
+ [*]  In the frame-pointers case rbp is fixed to the stack frame.
+
+ [**] for struct return values wider than 64 bits the return convention is a
+      bit more complex: up to 128 bits width we return small structures
+      straight in rax, rdx. For structures larger than that (3 words or
+      larger) the caller puts a pointer to an on-stack return struct
+      [allocated in the caller's stack frame] into the first argument - i.e.
+      into rdi. All other arguments shift up by one in this case.
+      Fortunately this case is rare in the kernel.
+
+For 32-bit we have the following conventions - kernel is built with
+-mregparm=3 and -freg-struct-return:
+
+ x86 function calling convention, 32-bit:
+ ----------------------------------------
+  arguments         | callee-saved        | extra caller-saved | return
+ [callee-clobbered] |                     | [callee-clobbered] |
+ -------------------------------------------------------------------------
+ eax edx ecx        | ebx edi esi ebp [*] | <none>             | eax, edx [**]
+
+ ( here too esp is obviously invariant across normal function calls. eflags
+   is clobbered. Leftover arguments are passed over the stack frame. )
+
+ [*]  In the frame-pointers case ebp is fixed to the stack frame.
+
+ [**] We build with -freg-struct-return, which on 32-bit means similar
+      semantics as on 64-bit: edx can be used for a second return value
+      (i.e. covering integer and structure sizes up to 64 bits) - after that
+      it gets more complex and more expensive: 3-word or larger struct returns
+      get done in the caller's frame and the pointer to the return struct goes
+      into regparm0, i.e. eax - the other arguments shift up and the
+      function's register parameters degenerate to regparm=2 in essence.
+
+*/
+
+
+/*
+ * 64-bit system call stack frame layout defines and helpers,
+ * for assembly code:
  */
 
 #define R15              0
@@ -9,7 +59,7 @@
 #define RBP             32
 #define RBX             40
 
-/* arguments: interrupts/non tracing syscalls only save upto here*/
+/* arguments: interrupts/non tracing syscalls only save up to here: */
 #define R11             48
 #define R10             56
 #define R9              64
@@ -22,7 +72,7 @@
 #define ORIG_RAX       120       /* + error_code */
 /* end of arguments */
 
-/* cpu exception frame or undefined in case of fast syscall. */
+/* cpu exception frame or undefined in case of fast syscall: */
 #define RIP            128
 #define CS             136
 #define EFLAGS         144
index bae482d..b185091 100644 (file)
@@ -7,6 +7,20 @@
 #include <linux/nodemask.h>
 #include <linux/percpu.h>
 
+#ifdef CONFIG_SMP
+
+extern void prefill_possible_map(void);
+
+#else /* CONFIG_SMP */
+
+static inline void prefill_possible_map(void) {}
+
+#define cpu_physical_id(cpu)                   boot_cpu_physical_apicid
+#define safe_smp_processor_id()                        0
+#define stack_smp_processor_id()               0
+
+#endif /* CONFIG_SMP */
+
 struct x86_cpu {
        struct cpu cpu;
 };
@@ -17,4 +31,7 @@ extern void arch_unregister_cpu(int);
 #endif
 
 DECLARE_PER_CPU(int, cpu_state);
+
+extern unsigned int boot_cpu_id;
+
 #endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h
new file mode 100644 (file)
index 0000000..a7f3c75
--- /dev/null
@@ -0,0 +1,32 @@
+#ifndef _ASM_X86_CPUMASK_H
+#define _ASM_X86_CPUMASK_H
+#ifndef __ASSEMBLY__
+#include <linux/cpumask.h>
+
+#ifdef CONFIG_X86_64
+
+extern cpumask_var_t cpu_callin_mask;
+extern cpumask_var_t cpu_callout_mask;
+extern cpumask_var_t cpu_initialized_mask;
+extern cpumask_var_t cpu_sibling_setup_mask;
+
+extern void setup_cpu_local_masks(void);
+
+#else /* CONFIG_X86_32 */
+
+extern cpumask_t cpu_callin_map;
+extern cpumask_t cpu_callout_map;
+extern cpumask_t cpu_initialized;
+extern cpumask_t cpu_sibling_setup_map;
+
+#define cpu_callin_mask                ((struct cpumask *)&cpu_callin_map)
+#define cpu_callout_mask       ((struct cpumask *)&cpu_callout_map)
+#define cpu_initialized_mask   ((struct cpumask *)&cpu_initialized)
+#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
+
+static inline void setup_cpu_local_masks(void) { }
+
+#endif /* CONFIG_X86_32 */
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_X86_CPUMASK_H */
index 0930b4f..c68c361 100644 (file)
@@ -1,39 +1,21 @@
 #ifndef _ASM_X86_CURRENT_H
 #define _ASM_X86_CURRENT_H
 
-#ifdef CONFIG_X86_32
 #include <linux/compiler.h>
 #include <asm/percpu.h>
 
+#ifndef __ASSEMBLY__
 struct task_struct;
 
 DECLARE_PER_CPU(struct task_struct *, current_task);
-static __always_inline struct task_struct *get_current(void)
-{
-       return x86_read_percpu(current_task);
-}
-
-#else /* X86_32 */
-
-#ifndef __ASSEMBLY__
-#include <asm/pda.h>
-
-struct task_struct;
 
 static __always_inline struct task_struct *get_current(void)
 {
-       return read_pda(pcurrent);
+       return percpu_read(current_task);
 }
 
-#else /* __ASSEMBLY__ */
-
-#include <asm/asm-offsets.h>
-#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
+#define current get_current()
 
 #endif /* __ASSEMBLY__ */
 
-#endif /* X86_32 */
-
-#define current get_current()
-
 #endif /* _ASM_X86_CURRENT_H */
index f51a3dd..83c1bc8 100644 (file)
@@ -112,7 +112,7 @@ extern unsigned int vdso_enabled;
  * now struct_user_regs, they are different)
  */
 
-#define ELF_CORE_COPY_REGS(pr_reg, regs)       \
+#define ELF_CORE_COPY_REGS_COMMON(pr_reg, regs)        \
 do {                                           \
        pr_reg[0] = regs->bx;                   \
        pr_reg[1] = regs->cx;                   \
@@ -124,7 +124,6 @@ do {                                                \
        pr_reg[7] = regs->ds & 0xffff;          \
        pr_reg[8] = regs->es & 0xffff;          \
        pr_reg[9] = regs->fs & 0xffff;          \
-       savesegment(gs, pr_reg[10]);            \
        pr_reg[11] = regs->orig_ax;             \
        pr_reg[12] = regs->ip;                  \
        pr_reg[13] = regs->cs & 0xffff;         \
@@ -133,6 +132,18 @@ do {                                               \
        pr_reg[16] = regs->ss & 0xffff;         \
 } while (0);
 
+#define ELF_CORE_COPY_REGS(pr_reg, regs)       \
+do {                                           \
+       ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
+       pr_reg[10] = get_user_gs(regs);         \
+} while (0);
+
+#define ELF_CORE_COPY_KERNEL_REGS(pr_reg, regs)        \
+do {                                           \
+       ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
+       savesegment(gs, pr_reg[10]);            \
+} while (0);
+
 #define ELF_PLATFORM   (utsname()->machine)
 #define set_personality_64bit()        do { } while (0)
 
similarity index 57%
rename from arch/x86/include/asm/mach-default/entry_arch.h
rename to arch/x86/include/asm/entry_arch.h
index 6b1add8..854d538 100644 (file)
@@ -9,12 +9,28 @@
  * is no hardware IRQ pin equivalent for them, they are triggered
  * through the ICC by us (IPIs)
  */
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
 BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
-BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
 BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
 BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
 BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
+
+BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0,
+                smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1,
+                smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2,
+                smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3,
+                smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4,
+                smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt5,INVALIDATE_TLB_VECTOR_START+5,
+                smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6,
+                smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
+                smp_invalidate_interrupt)
 #endif
 
 /*
@@ -25,10 +41,15 @@ BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
  * a much simpler SMP time architecture:
  */
 #ifdef CONFIG_X86_LOCAL_APIC
+
 BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
 BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
 BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
 
+#ifdef CONFIG_PERF_COUNTERS
+BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR)
+#endif
+
 #ifdef CONFIG_X86_MCE_P4THERMAL
 BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
 #endif
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h
deleted file mode 100644 (file)
index c58b9cc..0000000
+++ /dev/null
@@ -1,242 +0,0 @@
-#ifndef __ASM_ES7000_APIC_H
-#define __ASM_ES7000_APIC_H
-
-#include <linux/gfp.h>
-
-#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
-#define esr_disable (1)
-
-static inline int apic_id_registered(void)
-{
-               return (1);
-}
-
-static inline const cpumask_t *target_cpus_cluster(void)
-{
-       return &CPU_MASK_ALL;
-}
-
-static inline const cpumask_t *target_cpus(void)
-{
-       return &cpumask_of_cpu(smp_processor_id());
-}
-
-#define APIC_DFR_VALUE_CLUSTER         (APIC_DFR_CLUSTER)
-#define INT_DELIVERY_MODE_CLUSTER      (dest_LowestPrio)
-#define INT_DEST_MODE_CLUSTER          (1) /* logical delivery broadcast to all procs */
-#define NO_BALANCE_IRQ_CLUSTER         (1)
-
-#define APIC_DFR_VALUE         (APIC_DFR_FLAT)
-#define INT_DELIVERY_MODE      (dest_Fixed)
-#define INT_DEST_MODE          (0)    /* phys delivery to target procs */
-#define NO_BALANCE_IRQ         (0)
-#undef  APIC_DEST_LOGICAL
-#define APIC_DEST_LOGICAL      0x0
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
-       return 0;
-}
-static inline unsigned long check_apicid_present(int bit)
-{
-       return physid_isset(bit, phys_cpu_present_map);
-}
-
-#define apicid_cluster(apicid) (apicid & 0xF0)
-
-static inline unsigned long calculate_ldr(int cpu)
-{
-       unsigned long id;
-       id = xapic_phys_to_log_apicid(cpu);
-       return (SET_APIC_LOGICAL_ID(id));
-}
-
-/*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LdR and TPR before enabling
- * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116).  So here it goes...
- */
-static inline void init_apic_ldr_cluster(void)
-{
-       unsigned long val;
-       int cpu = smp_processor_id();
-
-       apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
-       val = calculate_ldr(cpu);
-       apic_write(APIC_LDR, val);
-}
-
-static inline void init_apic_ldr(void)
-{
-       unsigned long val;
-       int cpu = smp_processor_id();
-
-       apic_write(APIC_DFR, APIC_DFR_VALUE);
-       val = calculate_ldr(cpu);
-       apic_write(APIC_LDR, val);
-}
-
-extern int apic_version [MAX_APICS];
-static inline void setup_apic_routing(void)
-{
-       int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
-       printk("Enabling APIC mode:  %s. Using %d I/O APICs, target cpus %lx\n",
-               (apic_version[apic] == 0x14) ?
-                       "Physical Cluster" : "Logical Cluster",
-                       nr_ioapics, cpus_addr(*target_cpus())[0]);
-}
-
-static inline int multi_timer_check(int apic, int irq)
-{
-       return 0;
-}
-
-static inline int apicid_to_node(int logical_apicid)
-{
-       return 0;
-}
-
-
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
-       if (!mps_cpu)
-               return boot_cpu_physical_apicid;
-       else if (mps_cpu < nr_cpu_ids)
-               return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
-       else
-               return BAD_APICID;
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
-{
-       static int id = 0;
-       physid_mask_t mask;
-       mask = physid_mask_of_physid(id);
-       ++id;
-       return mask;
-}
-
-extern u8 cpu_2_logical_apicid[];
-/* Mapping from cpu number to logical apicid */
-static inline int cpu_to_logical_apicid(int cpu)
-{
-#ifdef CONFIG_SMP
-       if (cpu >= nr_cpu_ids)
-               return BAD_APICID;
-       return (int)cpu_2_logical_apicid[cpu];
-#else
-       return logical_smp_processor_id();
-#endif
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
-{
-       /* For clustered we don't have a good way to do this yet - hack */
-       return physids_promote(0xff);
-}
-
-
-static inline void setup_portio_remap(void)
-{
-}
-
-extern unsigned int boot_cpu_physical_apicid;
-static inline int check_phys_apicid_present(int cpu_physical_apicid)
-{
-       boot_cpu_physical_apicid = read_apic_id();
-       return (1);
-}
-
-static inline unsigned int
-cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
-{
-       int num_bits_set;
-       int cpus_found = 0;
-       int cpu;
-       int apicid;
-
-       num_bits_set = cpumask_weight(cpumask);
-       /* Return id to all */
-       if (num_bits_set == nr_cpu_ids)
-               return 0xFF;
-       /*
-        * The cpus in the mask must all be on the apic cluster.  If are not
-        * on the same apicid cluster return default value of TARGET_CPUS.
-        */
-       cpu = cpumask_first(cpumask);
-       apicid = cpu_to_logical_apicid(cpu);
-       while (cpus_found < num_bits_set) {
-               if (cpumask_test_cpu(cpu, cpumask)) {
-                       int new_apicid = cpu_to_logical_apicid(cpu);
-                       if (apicid_cluster(apicid) !=
-                                       apicid_cluster(new_apicid)){
-                               printk ("%s: Not a valid mask!\n", __func__);
-                               return 0xFF;
-                       }
-                       apicid = new_apicid;
-                       cpus_found++;
-               }
-               cpu++;
-       }
-       return apicid;
-}
-
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
-{
-       int num_bits_set;
-       int cpus_found = 0;
-       int cpu;
-       int apicid;
-
-       num_bits_set = cpus_weight(*cpumask);
-       /* Return id to all */
-       if (num_bits_set == nr_cpu_ids)
-               return cpu_to_logical_apicid(0);
-       /*
-        * The cpus in the mask must all be on the apic cluster.  If are not
-        * on the same apicid cluster return default value of TARGET_CPUS.
-        */
-       cpu = first_cpu(*cpumask);
-       apicid = cpu_to_logical_apicid(cpu);
-       while (cpus_found < num_bits_set) {
-               if (cpu_isset(cpu, *cpumask)) {
-                       int new_apicid = cpu_to_logical_apicid(cpu);
-                       if (apicid_cluster(apicid) !=
-                                       apicid_cluster(new_apicid)){
-                               printk ("%s: Not a valid mask!\n", __func__);
-                               return cpu_to_logical_apicid(0);
-                       }
-                       apicid = new_apicid;
-                       cpus_found++;
-               }
-               cpu++;
-       }
-       return apicid;
-}
-
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
-                                                 const struct cpumask *andmask)
-{
-       int apicid = cpu_to_logical_apicid(0);
-       cpumask_var_t cpumask;
-
-       if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
-               return apicid;
-
-       cpumask_and(cpumask, inmask, andmask);
-       cpumask_and(cpumask, cpumask, cpu_online_mask);
-       apicid = cpu_mask_to_apicid(cpumask);
-
-       free_cpumask_var(cpumask);
-       return apicid;
-}
-
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
-       return cpuid_apic >> index_msb;
-}
-
-#endif /* __ASM_ES7000_APIC_H */
diff --git a/arch/x86/include/asm/es7000/apicdef.h b/arch/x86/include/asm/es7000/apicdef.h
deleted file mode 100644 (file)
index 8b234a3..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_ES7000_APICDEF_H
-#define __ASM_ES7000_APICDEF_H
-
-#define                APIC_ID_MASK            (0xFF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
-       return (((x)>>24)&0xFF);
-}
-
-#define                GET_APIC_ID(x)  get_apic_id(x)
-
-#endif
diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h
deleted file mode 100644 (file)
index 7e8ed24..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef __ASM_ES7000_IPI_H
-#define __ASM_ES7000_IPI_H
-
-void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-
-static inline void send_IPI_mask(const struct cpumask *mask, int vector)
-{
-       send_IPI_mask_sequence(mask, vector);
-}
-
-static inline void send_IPI_allbutself(int vector)
-{
-       send_IPI_mask_allbutself(cpu_online_mask, vector);
-}
-
-static inline void send_IPI_all(int vector)
-{
-       send_IPI_mask(cpu_online_mask, vector);
-}
-
-#endif /* __ASM_ES7000_IPI_H */
diff --git a/arch/x86/include/asm/es7000/mpparse.h b/arch/x86/include/asm/es7000/mpparse.h
deleted file mode 100644 (file)
index c1629b0..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __ASM_ES7000_MPPARSE_H
-#define __ASM_ES7000_MPPARSE_H
-
-#include <linux/acpi.h>
-
-extern int parse_unisys_oem (char *oemptr);
-extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
-extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
-extern void setup_unisys(void);
-
-#ifndef CONFIG_X86_GENERICARCH
-extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
-extern int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid);
-#endif
-
-#ifdef CONFIG_ACPI
-
-static inline int es7000_check_dsdt(void)
-{
-       struct acpi_table_header header;
-
-       if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
-           !strncmp(header.oem_id, "UNISYS", 6))
-               return 1;
-       return 0;
-}
-#endif
-
-#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h
deleted file mode 100644 (file)
index 78f0daa..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef __ASM_ES7000_WAKECPU_H
-#define __ASM_ES7000_WAKECPU_H
-
-#define TRAMPOLINE_PHYS_LOW    0x467
-#define TRAMPOLINE_PHYS_HIGH   0x469
-
-static inline void wait_for_init_deassert(atomic_t *deassert)
-{
-#ifndef CONFIG_ES7000_CLUSTERED_APIC
-       while (!atomic_read(deassert))
-               cpu_relax();
-#endif
-       return;
-}
-
-/* Nothing to do for most platforms, since cleared by the INIT cycle */
-static inline void smp_callin_clear_local_apic(void)
-{
-}
-
-static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
-{
-}
-
-static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
-{
-}
-
-extern void __inquire_remote_apic(int apicid);
-
-static inline void inquire_remote_apic(int apicid)
-{
-       if (apic_verbosity >= APIC_DEBUG)
-               __inquire_remote_apic(apicid);
-}
-
-#endif /* __ASM_MACH_WAKECPU_H */
index c7115c1..047d9ba 100644 (file)
@@ -95,10 +95,6 @@ enum fixed_addresses {
                        (__end_of_permanent_fixed_addresses & 255),
        FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
        FIX_WP_TEST,
-#ifdef CONFIG_ACPI
-       FIX_ACPI_BEGIN,
-       FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
-#endif
 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
        FIX_OHCI1394_BASE,
 #endif
index 00a30ab..298d9ba 100644 (file)
@@ -50,10 +50,6 @@ enum fixed_addresses {
        FIX_PARAVIRT_BOOTMAP,
 #endif
        __end_of_permanent_fixed_addresses,
-#ifdef CONFIG_ACPI
-       FIX_ACPI_BEGIN,
-       FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
-#endif
 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
        FIX_OHCI1394_BASE,
 #endif
index d48bee6..273b994 100644 (file)
@@ -1,5 +1,263 @@
+#ifndef _ASM_X86_GENAPIC_H
+#define _ASM_X86_GENAPIC_H
+
+#include <linux/cpumask.h>
+
+#include <asm/mpspec.h>
+#include <asm/atomic.h>
+
+/*
+ * Copyright 2004 James Cleverdon, IBM.
+ * Subject to the GNU Public License, v.2
+ *
+ * Generic APIC sub-arch data struct.
+ *
+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
+ * James Cleverdon.
+ */
+struct genapic {
+       char *name;
+
+       int (*probe)(void);
+       int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
+       int (*apic_id_registered)(void);
+
+       u32 irq_delivery_mode;
+       u32 irq_dest_mode;
+
+       const struct cpumask *(*target_cpus)(void);
+
+       int disable_esr;
+
+       int dest_logical;
+       unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
+       unsigned long (*check_apicid_present)(int apicid);
+
+       void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
+       void (*init_apic_ldr)(void);
+
+       physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
+
+       void (*setup_apic_routing)(void);
+       int (*multi_timer_check)(int apic, int irq);
+       int (*apicid_to_node)(int logical_apicid);
+       int (*cpu_to_logical_apicid)(int cpu);
+       int (*cpu_present_to_apicid)(int mps_cpu);
+       physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
+       void (*setup_portio_remap)(void);
+       int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
+       void (*enable_apic_mode)(void);
+       int (*phys_pkg_id)(int cpuid_apic, int index_msb);
+
+       /*
+        * When one of the next two hooks returns 1 the genapic
+        * is switched to this. Essentially they are additional
+        * probe functions:
+        */
+       int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid);
+
+       unsigned int (*get_apic_id)(unsigned long x);
+       unsigned long (*set_apic_id)(unsigned int id);
+       unsigned long apic_id_mask;
+
+       unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
+       unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
+                                              const struct cpumask *andmask);
+
+       /* ipi */
+       void (*send_IPI_mask)(const struct cpumask *mask, int vector);
+       void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
+                                        int vector);
+       void (*send_IPI_allbutself)(int vector);
+       void (*send_IPI_all)(int vector);
+       void (*send_IPI_self)(int vector);
+
+       /* wakeup_secondary_cpu */
+       int (*wakeup_cpu)(int apicid, unsigned long start_eip);
+
+       int trampoline_phys_low;
+       int trampoline_phys_high;
+
+       void (*wait_for_init_deassert)(atomic_t *deassert);
+       void (*smp_callin_clear_local_apic)(void);
+       void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
+       void (*inquire_remote_apic)(int apicid);
+};
+
+extern struct genapic *apic;
+
+/*
+ * Warm reset vector default position:
+ */
+#define DEFAULT_TRAMPOLINE_PHYS_LOW            0x467
+#define DEFAULT_TRAMPOLINE_PHYS_HIGH           0x469
+
 #ifdef CONFIG_X86_32
-# include "genapic_32.h"
+extern void es7000_update_genapic_to_cluster(void);
 #else
-# include "genapic_64.h"
+extern struct genapic apic_flat;
+extern struct genapic apic_physflat;
+extern struct genapic apic_x2apic_cluster;
+extern struct genapic apic_x2apic_phys;
+extern int default_acpi_madt_oem_check(char *, char *);
+
+extern void apic_send_IPI_self(int vector);
+
+extern struct genapic apic_x2apic_uv_x;
+DECLARE_PER_CPU(int, x2apic_extra_bits);
+
+extern void default_setup_apic_routing(void);
+
+extern int default_cpu_present_to_apicid(int mps_cpu);
+extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
 #endif
+
+static inline void default_wait_for_init_deassert(atomic_t *deassert)
+{
+       while (!atomic_read(deassert))
+               cpu_relax();
+       return;
+}
+
+extern void generic_bigsmp_probe(void);
+
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+#include <asm/smp.h>
+
+#define APIC_DFR_VALUE (APIC_DFR_FLAT)
+
+static inline const struct cpumask *default_target_cpus(void)
+{
+#ifdef CONFIG_SMP
+       return cpu_online_mask;
+#else
+       return cpumask_of(0);
+#endif
+}
+
+DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
+
+
+static inline unsigned int read_apic_id(void)
+{
+       unsigned int reg;
+
+       reg = apic_read(APIC_ID);
+
+       return apic->get_apic_id(reg);
+}
+
+#ifdef CONFIG_X86_64
+extern void default_setup_apic_routing(void);
+#else
+
+/*
+ * Set up the logical destination ID.
+ *
+ * Intel recommends to set DFR, LDR and TPR before enabling
+ * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
+ * document number 292116).  So here it goes...
+ */
+extern void default_init_apic_ldr(void);
+
+static inline int default_apic_id_registered(void)
+{
+       return physid_isset(read_apic_id(), phys_cpu_present_map);
+}
+
+static inline unsigned int
+default_cpu_mask_to_apicid(const struct cpumask *cpumask)
+{
+       return cpumask_bits(cpumask)[0];
+}
+
+static inline unsigned int
+default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+                              const struct cpumask *andmask)
+{
+       unsigned long mask1 = cpumask_bits(cpumask)[0];
+       unsigned long mask2 = cpumask_bits(andmask)[0];
+       unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
+
+       return (unsigned int)(mask1 & mask2 & mask3);
+}
+
+static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
+{
+       return cpuid_apic >> index_msb;
+}
+
+static inline void default_setup_apic_routing(void)
+{
+#ifdef CONFIG_X86_IO_APIC
+       printk("Enabling APIC mode:  %s.  Using %d I/O APICs\n",
+                                       "Flat", nr_ioapics);
+#endif
+}
+
+extern int default_apicid_to_node(int logical_apicid);
+
+#endif
+
+static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+       return physid_isset(apicid, bitmap);
+}
+
+static inline unsigned long default_check_apicid_present(int bit)
+{
+       return physid_isset(bit, phys_cpu_present_map);
+}
+
+static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map)
+{
+       return phys_map;
+}
+
+/* Mapping from cpu number to logical apicid */
+static inline int default_cpu_to_logical_apicid(int cpu)
+{
+       return 1 << cpu;
+}
+
+static inline int __default_cpu_present_to_apicid(int mps_cpu)
+{
+       if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
+               return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
+       else
+               return BAD_APICID;
+}
+
+static inline int
+__default_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+       return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
+}
+
+#ifdef CONFIG_X86_32
+static inline int default_cpu_present_to_apicid(int mps_cpu)
+{
+       return __default_cpu_present_to_apicid(mps_cpu);
+}
+
+static inline int
+default_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+       return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
+}
+#else
+extern int default_cpu_present_to_apicid(int mps_cpu);
+extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
+#endif
+
+static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid)
+{
+       return physid_mask_of_physid(phys_apicid);
+}
+
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+#endif /* _ASM_X86_GENAPIC_64_H */
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h
deleted file mode 100644 (file)
index 2c05b73..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-#ifndef _ASM_X86_GENAPIC_32_H
-#define _ASM_X86_GENAPIC_32_H
-
-#include <asm/mpspec.h>
-#include <asm/atomic.h>
-
-/*
- * Generic APIC driver interface.
- *
- * An straight forward mapping of the APIC related parts of the
- * x86 subarchitecture interface to a dynamic object.
- *
- * This is used by the "generic" x86 subarchitecture.
- *
- * Copyright 2003 Andi Kleen, SuSE Labs.
- */
-
-struct mpc_bus;
-struct mpc_table;
-struct mpc_cpu;
-
-struct genapic {
-       char *name;
-       int (*probe)(void);
-
-       int (*apic_id_registered)(void);
-       const struct cpumask *(*target_cpus)(void);
-       int int_delivery_mode;
-       int int_dest_mode;
-       int ESR_DISABLE;
-       int apic_destination_logical;
-       unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
-       unsigned long (*check_apicid_present)(int apicid);
-       int no_balance_irq;
-       int no_ioapic_check;
-       void (*init_apic_ldr)(void);
-       physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
-
-       void (*setup_apic_routing)(void);
-       int (*multi_timer_check)(int apic, int irq);
-       int (*apicid_to_node)(int logical_apicid);
-       int (*cpu_to_logical_apicid)(int cpu);
-       int (*cpu_present_to_apicid)(int mps_cpu);
-       physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
-       void (*setup_portio_remap)(void);
-       int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
-       void (*enable_apic_mode)(void);
-       u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
-
-       /* mpparse */
-       /* When one of the next two hooks returns 1 the genapic
-          is switched to this. Essentially they are additional probe
-          functions. */
-       int (*mps_oem_check)(struct mpc_table *mpc, char *oem,
-                            char *productid);
-       int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
-
-       unsigned (*get_apic_id)(unsigned long x);
-       unsigned long apic_id_mask;
-       unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
-       unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
-                                              const struct cpumask *andmask);
-       void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
-
-#ifdef CONFIG_SMP
-       /* ipi */
-       void (*send_IPI_mask)(const struct cpumask *mask, int vector);
-       void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
-                                        int vector);
-       void (*send_IPI_allbutself)(int vector);
-       void (*send_IPI_all)(int vector);
-#endif
-       int (*wakeup_cpu)(int apicid, unsigned long start_eip);
-       int trampoline_phys_low;
-       int trampoline_phys_high;
-       void (*wait_for_init_deassert)(atomic_t *deassert);
-       void (*smp_callin_clear_local_apic)(void);
-       void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
-       void (*restore_NMI_vector)(unsigned short *high, unsigned short *low);
-       void (*inquire_remote_apic)(int apicid);
-};
-
-#define APICFUNC(x) .x = x,
-
-/* More functions could be probably marked IPIFUNC and save some space
-   in UP GENERICARCH kernels, but I don't have the nerve right now
-   to untangle this mess. -AK  */
-#ifdef CONFIG_SMP
-#define IPIFUNC(x) APICFUNC(x)
-#else
-#define IPIFUNC(x)
-#endif
-
-#define APIC_INIT(aname, aprobe)                       \
-{                                                      \
-       .name = aname,                                  \
-       .probe = aprobe,                                \
-       .int_delivery_mode = INT_DELIVERY_MODE,         \
-       .int_dest_mode = INT_DEST_MODE,                 \
-       .no_balance_irq = NO_BALANCE_IRQ,               \
-       .ESR_DISABLE = esr_disable,                     \
-       .apic_destination_logical = APIC_DEST_LOGICAL,  \
-       APICFUNC(apic_id_registered)                    \
-       APICFUNC(target_cpus)                           \
-       APICFUNC(check_apicid_used)                     \
-       APICFUNC(check_apicid_present)                  \
-       APICFUNC(init_apic_ldr)                         \
-       APICFUNC(ioapic_phys_id_map)                    \
-       APICFUNC(setup_apic_routing)                    \
-       APICFUNC(multi_timer_check)                     \
-       APICFUNC(apicid_to_node)                        \
-       APICFUNC(cpu_to_logical_apicid)                 \
-       APICFUNC(cpu_present_to_apicid)                 \
-       APICFUNC(apicid_to_cpu_present)                 \
-       APICFUNC(setup_portio_remap)                    \
-       APICFUNC(check_phys_apicid_present)             \
-       APICFUNC(mps_oem_check)                         \
-       APICFUNC(get_apic_id)                           \
-       .apic_id_mask = APIC_ID_MASK,                   \
-       APICFUNC(cpu_mask_to_apicid)                    \
-       APICFUNC(cpu_mask_to_apicid_and)                \
-       APICFUNC(vector_allocation_domain)              \
-       APICFUNC(acpi_madt_oem_check)                   \
-       IPIFUNC(send_IPI_mask)                          \
-       IPIFUNC(send_IPI_allbutself)                    \
-       IPIFUNC(send_IPI_all)                           \
-       APICFUNC(enable_apic_mode)                      \
-       APICFUNC(phys_pkg_id)                           \
-       .trampoline_phys_low = TRAMPOLINE_PHYS_LOW,             \
-       .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH,           \
-       APICFUNC(wait_for_init_deassert)                \
-       APICFUNC(smp_callin_clear_local_apic)           \
-       APICFUNC(store_NMI_vector)                      \
-       APICFUNC(restore_NMI_vector)                    \
-       APICFUNC(inquire_remote_apic)                   \
-}
-
-extern struct genapic *genapic;
-extern void es7000_update_genapic_to_cluster(void);
-
-enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
-#define get_uv_system_type()           UV_NONE
-#define is_uv_system()                 0
-#define uv_wakeup_secondary(a, b)      1
-#define uv_system_init()               do {} while (0)
-
-
-#endif /* _ASM_X86_GENAPIC_32_H */
diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h
deleted file mode 100644 (file)
index adf32fb..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-#ifndef _ASM_X86_GENAPIC_64_H
-#define _ASM_X86_GENAPIC_64_H
-
-#include <linux/cpumask.h>
-
-/*
- * Copyright 2004 James Cleverdon, IBM.
- * Subject to the GNU Public License, v.2
- *
- * Generic APIC sub-arch data struct.
- *
- * Hacked for x86-64 by James Cleverdon from i386 architecture code by
- * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
- * James Cleverdon.
- */
-
-struct genapic {
-       char *name;
-       int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
-       u32 int_delivery_mode;
-       u32 int_dest_mode;
-       int (*apic_id_registered)(void);
-       const struct cpumask *(*target_cpus)(void);
-       void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
-       void (*init_apic_ldr)(void);
-       /* ipi */
-       void (*send_IPI_mask)(const struct cpumask *mask, int vector);
-       void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
-                                        int vector);
-       void (*send_IPI_allbutself)(int vector);
-       void (*send_IPI_all)(int vector);
-       void (*send_IPI_self)(int vector);
-       /* */
-       unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
-       unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
-                                              const struct cpumask *andmask);
-       unsigned int (*phys_pkg_id)(int index_msb);
-       unsigned int (*get_apic_id)(unsigned long x);
-       unsigned long (*set_apic_id)(unsigned int id);
-       unsigned long apic_id_mask;
-       /* wakeup_secondary_cpu */
-       int (*wakeup_cpu)(int apicid, unsigned long start_eip);
-};
-
-extern struct genapic *genapic;
-
-extern struct genapic apic_flat;
-extern struct genapic apic_physflat;
-extern struct genapic apic_x2apic_cluster;
-extern struct genapic apic_x2apic_phys;
-extern int acpi_madt_oem_check(char *, char *);
-
-extern void apic_send_IPI_self(int vector);
-enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
-extern enum uv_system_type get_uv_system_type(void);
-extern int is_uv_system(void);
-
-extern struct genapic apic_x2apic_uv_x;
-DECLARE_PER_CPU(int, x2apic_extra_bits);
-extern void uv_cpu_init(void);
-extern void uv_system_init(void);
-extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
-
-extern void setup_apic_routing(void);
-
-#endif /* _ASM_X86_GENAPIC_64_H */
index 000787d..176f058 100644 (file)
@@ -1,11 +1,52 @@
-#ifdef CONFIG_X86_32
-# include "hardirq_32.h"
-#else
-# include "hardirq_64.h"
+#ifndef _ASM_X86_HARDIRQ_H
+#define _ASM_X86_HARDIRQ_H
+
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+typedef struct {
+       unsigned int __softirq_pending;
+       unsigned int __nmi_count;       /* arch dependent */
+       unsigned int irq0_irqs;
+#ifdef CONFIG_X86_LOCAL_APIC
+       unsigned int apic_timer_irqs;   /* arch dependent */
+       unsigned int irq_spurious_count;
+#endif
+#ifdef CONFIG_SMP
+       unsigned int irq_resched_count;
+       unsigned int irq_call_count;
+       unsigned int irq_tlb_count;
+#endif
+#ifdef CONFIG_X86_MCE
+       unsigned int irq_thermal_count;
+# ifdef CONFIG_X86_64
+       unsigned int irq_threshold_count;
+# endif
 #endif
+} ____cacheline_aligned irq_cpustat_t;
+
+DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
+
+/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
+#define MAX_HARDIRQS_PER_CPU NR_VECTORS
+
+#define __ARCH_IRQ_STAT
+
+#define inc_irq_stat(member)   percpu_add(irq_stat.member, 1)
+
+#define local_softirq_pending()        percpu_read(irq_stat.__softirq_pending)
+
+#define __ARCH_SET_SOFTIRQ_PENDING
+
+#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
+#define or_softirq_pending(x)  percpu_or(irq_stat.__softirq_pending, (x))
+
+extern void ack_bad_irq(unsigned int irq);
 
 extern u64 arch_irq_stat_cpu(unsigned int cpu);
 #define arch_irq_stat_cpu      arch_irq_stat_cpu
 
 extern u64 arch_irq_stat(void);
 #define arch_irq_stat          arch_irq_stat
+
+#endif /* _ASM_X86_HARDIRQ_H */
diff --git a/arch/x86/include/asm/hardirq_32.h b/arch/x86/include/asm/hardirq_32.h
deleted file mode 100644 (file)
index cf7954d..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef _ASM_X86_HARDIRQ_32_H
-#define _ASM_X86_HARDIRQ_32_H
-
-#include <linux/threads.h>
-#include <linux/irq.h>
-
-typedef struct {
-       unsigned int __softirq_pending;
-       unsigned long idle_timestamp;
-       unsigned int __nmi_count;       /* arch dependent */
-       unsigned int apic_timer_irqs;   /* arch dependent */
-       unsigned int irq0_irqs;
-       unsigned int irq_resched_count;
-       unsigned int irq_call_count;
-       unsigned int irq_tlb_count;
-       unsigned int irq_thermal_count;
-       unsigned int irq_spurious_count;
-} ____cacheline_aligned irq_cpustat_t;
-
-DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
-
-#define __ARCH_IRQ_STAT
-#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
-
-#define inc_irq_stat(member)   (__get_cpu_var(irq_stat).member++)
-
-void ack_bad_irq(unsigned int irq);
-#include <linux/irq_cpustat.h>
-
-#endif /* _ASM_X86_HARDIRQ_32_H */
diff --git a/arch/x86/include/asm/hardirq_64.h b/arch/x86/include/asm/hardirq_64.h
deleted file mode 100644 (file)
index b5a6b5d..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef _ASM_X86_HARDIRQ_64_H
-#define _ASM_X86_HARDIRQ_64_H
-
-#include <linux/threads.h>
-#include <linux/irq.h>
-#include <asm/pda.h>
-#include <asm/apic.h>
-
-/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
-#define MAX_HARDIRQS_PER_CPU NR_VECTORS
-
-#define __ARCH_IRQ_STAT 1
-
-#define inc_irq_stat(member)   add_pda(member, 1)
-
-#define local_softirq_pending() read_pda(__softirq_pending)
-
-#define __ARCH_SET_SOFTIRQ_PENDING 1
-
-#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
-#define or_softirq_pending(x)  or_pda(__softirq_pending, (x))
-
-extern void ack_bad_irq(unsigned int irq);
-
-#endif /* _ASM_X86_HARDIRQ_64_H */
index 8de644b..370e1c8 100644 (file)
@@ -25,8 +25,6 @@
 #include <asm/irq.h>
 #include <asm/sections.h>
 
-#define platform_legacy_irq(irq)       ((irq) < 16)
-
 /* Interrupt handlers registered during init_IRQ */
 extern void apic_timer_interrupt(void);
 extern void error_interrupt(void);
@@ -58,7 +56,7 @@ extern void make_8259A_irq(unsigned int irq);
 extern void init_8259A(int aeoi);
 
 /* IOAPIC */
-#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
+#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
 extern unsigned long io_apic_irqs;
 
 extern void init_VISWS_APIC_irqs(void);
@@ -67,15 +65,7 @@ extern void disable_IO_APIC(void);
 extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
 extern void setup_ioapic_dest(void);
 
-#ifdef CONFIG_X86_64
 extern void enable_IO_APIC(void);
-#endif
-
-/* IPI functions */
-#ifdef CONFIG_X86_32
-extern void send_IPI_self(int vector);
-#endif
-extern void send_IPI(int dest, int vector);
 
 /* Statistics */
 extern atomic_t irq_err_count;
@@ -84,21 +74,11 @@ extern atomic_t irq_mis_count;
 /* EISA */
 extern void eisa_set_level_irq(unsigned int irq);
 
-/* Voyager functions */
-extern asmlinkage void vic_cpi_interrupt(void);
-extern asmlinkage void vic_sys_interrupt(void);
-extern asmlinkage void vic_cmn_interrupt(void);
-extern asmlinkage void qic_timer_interrupt(void);
-extern asmlinkage void qic_invalidate_interrupt(void);
-extern asmlinkage void qic_reschedule_interrupt(void);
-extern asmlinkage void qic_enable_irq_interrupt(void);
-extern asmlinkage void qic_call_function_interrupt(void);
-
 /* SMP */
 extern void smp_apic_timer_interrupt(struct pt_regs *);
 extern void smp_spurious_interrupt(struct pt_regs *);
 extern void smp_error_interrupt(struct pt_regs *);
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
 extern void smp_reschedule_interrupt(struct pt_regs *);
 extern void smp_call_function_interrupt(struct pt_regs *);
 extern void smp_call_function_single_interrupt(struct pt_regs *);
index 1dbbdf4..e5a2ab4 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/compiler.h>
 #include <asm-generic/int-ll64.h>
+#include <asm/page.h>
 
 #define build_mmio_read(name, size, type, reg, barrier) \
 static inline type name(const volatile void __iomem *addr) \
@@ -80,6 +81,95 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
 #define readq                  readq
 #define writeq                 writeq
 
+/**
+ *     virt_to_phys    -       map virtual addresses to physical
+ *     @address: address to remap
+ *
+ *     The returned physical address is the physical (CPU) mapping for
+ *     the memory address given. It is only valid to use this function on
+ *     addresses directly mapped or allocated via kmalloc.
+ *
+ *     This function does not give bus mappings for DMA transfers. In
+ *     almost all conceivable cases a device driver should not be using
+ *     this function
+ */
+
+static inline phys_addr_t virt_to_phys(volatile void *address)
+{
+       return __pa(address);
+}
+
+/**
+ *     phys_to_virt    -       map physical address to virtual
+ *     @address: address to remap
+ *
+ *     The returned virtual address is a current CPU mapping for
+ *     the memory address given. It is only valid to use this function on
+ *     addresses that have a kernel mapping
+ *
+ *     This function does not handle bus mappings for DMA transfers. In
+ *     almost all conceivable cases a device driver should not be using
+ *     this function
+ */
+
+static inline void *phys_to_virt(phys_addr_t address)
+{
+       return __va(address);
+}
+
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+
+/*
+ * ISA I/O bus memory addresses are 1:1 with the physical address.
+ */
+#define isa_virt_to_bus virt_to_phys
+#define isa_page_to_bus page_to_phys
+#define isa_bus_to_virt phys_to_virt
+
+/*
+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
+ * are forbidden in portable PCI drivers.
+ *
+ * Allow them on x86 for legacy drivers, though.
+ */
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+/**
+ * ioremap     -   map bus memory into CPU space
+ * @offset:    bus address of the memory
+ * @size:      size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * If the area you are trying to map is a PCI BAR you should have a
+ * look at pci_iomap().
+ */
+extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
+extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
+extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
+                               unsigned long prot_val);
+
+/*
+ * The default ioremap() behavior is non-cached:
+ */
+static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
+{
+       return ioremap_nocache(offset, size);
+}
+
+extern void iounmap(volatile void __iomem *addr);
+
+extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
+
+
 #ifdef CONFIG_X86_32
 # include "io_32.h"
 #else
@@ -91,7 +181,7 @@ extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
 
 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
                                unsigned long prot_val);
-extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
+extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
 
 /*
  * early_ioremap() and early_iounmap() are for temporary early boot-time
@@ -105,5 +195,6 @@ extern void __iomem *early_memremap(unsigned long offset, unsigned long size);
 extern void early_iounmap(void __iomem *addr, unsigned long size);
 extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
 
+#define IO_SPACE_LIMIT 0xffff
 
 #endif /* _ASM_X86_IO_H */
index d8e242e..a299900 100644 (file)
@@ -37,8 +37,6 @@
   *  - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
   */
 
-#define IO_SPACE_LIMIT 0xffff
-
 #define XQUAD_PORTIO_BASE 0xfe400000
 #define XQUAD_PORTIO_QUAD 0x40000  /* 256k per quad. */
 
  */
 #define xlate_dev_kmem_ptr(p)  p
 
-/**
- *     virt_to_phys    -       map virtual addresses to physical
- *     @address: address to remap
- *
- *     The returned physical address is the physical (CPU) mapping for
- *     the memory address given. It is only valid to use this function on
- *     addresses directly mapped or allocated via kmalloc.
- *
- *     This function does not give bus mappings for DMA transfers. In
- *     almost all conceivable cases a device driver should not be using
- *     this function
- */
-
-static inline unsigned long virt_to_phys(volatile void *address)
-{
-       return __pa(address);
-}
-
-/**
- *     phys_to_virt    -       map physical address to virtual
- *     @address: address to remap
- *
- *     The returned virtual address is a current CPU mapping for
- *     the memory address given. It is only valid to use this function on
- *     addresses that have a kernel mapping
- *
- *     This function does not handle bus mappings for DMA transfers. In
- *     almost all conceivable cases a device driver should not be using
- *     this function
- */
-
-static inline void *phys_to_virt(unsigned long address)
-{
-       return __va(address);
-}
-
-/*
- * Change "struct page" to physical address.
- */
-#define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-
-/**
- * ioremap     -   map bus memory into CPU space
- * @offset:    bus address of the memory
- * @size:      size of the resource to map
- *
- * ioremap performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address.
- *
- * If the area you are trying to map is a PCI BAR you should have a
- * look at pci_iomap().
- */
-extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
-extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
-extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
-                               unsigned long prot_val);
-
-/*
- * The default ioremap() behavior is non-cached:
- */
-static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
-{
-       return ioremap_nocache(offset, size);
-}
-
-extern void iounmap(volatile void __iomem *addr);
-
-/*
- * ISA I/O bus memory addresses are 1:1 with the physical address.
- */
-#define isa_virt_to_bus virt_to_phys
-#define isa_page_to_bus page_to_phys
-#define isa_bus_to_virt phys_to_virt
-
-/*
- * However PCI ones are not necessarily 1:1 and therefore these interfaces
- * are forbidden in portable PCI drivers.
- *
- * Allow them on x86 for legacy drivers, though.
- */
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
 static inline void
 memset_io(volatile void __iomem *addr, unsigned char val, int count)
 {
index 563c162..2440678 100644 (file)
@@ -136,73 +136,12 @@ __OUTS(b)
 __OUTS(w)
 __OUTS(l)
 
-#define IO_SPACE_LIMIT 0xffff
-
 #if defined(__KERNEL__) && defined(__x86_64__)
 
 #include <linux/vmalloc.h>
 
-#ifndef __i386__
-/*
- * Change virtual addresses to physical addresses and vv.
- * These are pretty trivial
- */
-static inline unsigned long virt_to_phys(volatile void *address)
-{
-       return __pa(address);
-}
-
-static inline void *phys_to_virt(unsigned long address)
-{
-       return __va(address);
-}
-#endif
-
-/*
- * Change "struct page" to physical address.
- */
-#define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-
 #include <asm-generic/iomap.h>
 
-/*
- * This one maps high address device memory and turns off caching for that area.
- * it's useful if some control registers are in such an area and write combining
- * or read caching is not desirable:
- */
-extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
-extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
-extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
-                               unsigned long prot_val);
-
-/*
- * The default ioremap() behavior is non-cached:
- */
-static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
-{
-       return ioremap_nocache(offset, size);
-}
-
-extern void iounmap(volatile void __iomem *addr);
-
-extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
-
-/*
- * ISA I/O bus memory addresses are 1:1 with the physical address.
- */
-#define isa_virt_to_bus virt_to_phys
-#define isa_page_to_bus page_to_phys
-#define isa_bus_to_virt phys_to_virt
-
-/*
- * However PCI ones are not necessarily 1:1 and therefore these interfaces
- * are forbidden in portable PCI drivers.
- *
- * Allow them on x86 for legacy drivers, though.
- */
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
 void __memcpy_fromio(void *, unsigned long, unsigned);
 void __memcpy_toio(unsigned long, const void *, unsigned);
 
index 7a1f44a..59cb4a1 100644 (file)
@@ -114,38 +114,16 @@ struct IR_IO_APIC_route_entry {
 extern int nr_ioapics;
 extern int nr_ioapic_registers[MAX_IO_APICS];
 
-/*
- * MP-BIOS irq configuration table structures:
- */
-
 #define MP_MAX_IOAPIC_PIN 127
 
-struct mp_config_ioapic {
-       unsigned long mp_apicaddr;
-       unsigned int mp_apicid;
-       unsigned char mp_type;
-       unsigned char mp_apicver;
-       unsigned char mp_flags;
-};
-
-struct mp_config_intsrc {
-       unsigned int mp_dstapic;
-       unsigned char mp_type;
-       unsigned char mp_irqtype;
-       unsigned short mp_irqflag;
-       unsigned char mp_srcbus;
-       unsigned char mp_srcbusirq;
-       unsigned char mp_dstirq;
-};
-
 /* I/O APIC entries */
-extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
+extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
 
 /* # of MP IRQ source entries */
 extern int mp_irq_entries;
 
 /* MP IRQ source entries */
-extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
+extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
 
 /* non-0 if default (table-less) MP configuration */
 extern int mpc_default_type;
@@ -165,15 +143,6 @@ extern int noioapicreroute;
 /* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
 extern int timer_through_8259;
 
-static inline void disable_ioapic_setup(void)
-{
-#ifdef CONFIG_PCI
-       noioapicquirk = 1;
-       noioapicreroute = -1;
-#endif
-       skip_ioapic_setup = 1;
-}
-
 /*
  * If we use the IO-APIC for IRQ routing, disable automatic
  * assignment of PCI IRQ's.
@@ -200,6 +169,12 @@ extern void reinit_intr_remapped_IO_APIC(int);
 
 extern void probe_nr_irqs_gsi(void);
 
+extern int setup_ioapic_entry(int apic, int irq,
+                             struct IO_APIC_route_entry *entry,
+                             unsigned int destination, int trigger,
+                             int polarity, int vector);
+extern void ioapic_write_entry(int apic, int pin,
+                              struct IO_APIC_route_entry e);
 #else  /* !CONFIG_X86_IO_APIC */
 #define io_apic_assign_pci_irqs 0
 static const int timer_through_8259 = 0;
index c745a30..5f2efc5 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _ASM_X86_IPI_H
 #define _ASM_X86_IPI_H
 
+#ifdef CONFIG_X86_LOCAL_APIC
+
 /*
  * Copyright 2004 James Cleverdon, IBM.
  * Subject to the GNU Public License, v.2
@@ -55,8 +57,8 @@ static inline void __xapic_wait_icr_idle(void)
                cpu_relax();
 }
 
-static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
-                                      unsigned int dest)
+static inline void
+__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
 {
        /*
         * Subtle. In the case of the 'never do double writes' workaround
@@ -87,8 +89,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
  * This is used to send an IPI with no shorthand notation (the destination is
  * specified in bits 56 to 63 of the ICR).
  */
-static inline void __send_IPI_dest_field(unsigned int mask, int vector,
                                       unsigned int dest)
+static inline void
__default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
 {
        unsigned long cfg;
 
@@ -117,41 +119,46 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
        native_apic_mem_write(APIC_ICR, cfg);
 }
 
-static inline void send_IPI_mask_sequence(const struct cpumask *mask,
-                                         int vector)
-{
-       unsigned long flags;
-       unsigned long query_cpu;
+extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
+                                                int vector);
+extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
+                                                        int vector);
+#include <asm/genapic.h>
 
-       /*
-        * Hack. The clustered APIC addressing mode doesn't allow us to send
-        * to an arbitrary mask, so I do a unicast to each CPU instead.
-        * - mbligh
-        */
-       local_irq_save(flags);
-       for_each_cpu(query_cpu, mask) {
-               __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
-                                     vector, APIC_DEST_PHYSICAL);
-       }
-       local_irq_restore(flags);
+extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
+                                                        int vector);
+extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
+                                                        int vector);
+
+/* Avoid include hell */
+#define NMI_VECTOR 0x02
+
+extern int no_broadcast;
+
+static inline void __default_local_send_IPI_allbutself(int vector)
+{
+       if (no_broadcast || vector == NMI_VECTOR)
+               apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
+       else
+               __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical);
 }
 
-static inline void send_IPI_mask_allbutself(const struct cpumask *mask,
-                                           int vector)
+static inline void __default_local_send_IPI_all(int vector)
 {
-       unsigned long flags;
-       unsigned int query_cpu;
-       unsigned int this_cpu = smp_processor_id();
-
-       /* See Hack comment above */
-
-       local_irq_save(flags);
-       for_each_cpu(query_cpu, mask)
-               if (query_cpu != this_cpu)
-                       __send_IPI_dest_field(
-                               per_cpu(x86_cpu_to_apicid, query_cpu),
-                               vector, APIC_DEST_PHYSICAL);
-       local_irq_restore(flags);
+       if (no_broadcast || vector == NMI_VECTOR)
+               apic->send_IPI_mask(cpu_online_mask, vector);
+       else
+               __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical);
 }
 
+#ifdef CONFIG_X86_32
+extern void default_send_IPI_mask_logical(const struct cpumask *mask,
+                                                int vector);
+extern void default_send_IPI_allbutself(int vector);
+extern void default_send_IPI_all(int vector);
+extern void default_send_IPI_self(int vector);
+#endif
+
+#endif
+
 #endif /* _ASM_X86_IPI_H */
index 592688e..107eb21 100644 (file)
@@ -36,9 +36,11 @@ static inline int irq_canonicalize(int irq)
 extern void fixup_irqs(void);
 #endif
 
-extern unsigned int do_IRQ(struct pt_regs *regs);
 extern void init_IRQ(void);
 extern void native_init_IRQ(void);
+extern bool handle_irq(unsigned irq, struct pt_regs *regs);
+
+extern unsigned int do_IRQ(struct pt_regs *regs);
 
 /* Interrupt vector management */
 extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
index 89c898a..7784322 100644 (file)
@@ -1,5 +1,31 @@
-#ifdef CONFIG_X86_32
-# include "irq_regs_32.h"
-#else
-# include "irq_regs_64.h"
-#endif
+/*
+ * Per-cpu current frame pointer - the location of the last exception frame on
+ * the stack, stored in the per-cpu area.
+ *
+ * Jeremy Fitzhardinge <jeremy@goop.org>
+ */
+#ifndef _ASM_X86_IRQ_REGS_H
+#define _ASM_X86_IRQ_REGS_H
+
+#include <asm/percpu.h>
+
+#define ARCH_HAS_OWN_IRQ_REGS
+
+DECLARE_PER_CPU(struct pt_regs *, irq_regs);
+
+static inline struct pt_regs *get_irq_regs(void)
+{
+       return percpu_read(irq_regs);
+}
+
+static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
+{
+       struct pt_regs *old_regs;
+
+       old_regs = get_irq_regs();
+       percpu_write(irq_regs, new_regs);
+
+       return old_regs;
+}
+
+#endif /* _ASM_X86_IRQ_REGS_32_H */
diff --git a/arch/x86/include/asm/irq_regs_32.h b/arch/x86/include/asm/irq_regs_32.h
deleted file mode 100644 (file)
index 86afd74..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Per-cpu current frame pointer - the location of the last exception frame on
- * the stack, stored in the per-cpu area.
- *
- * Jeremy Fitzhardinge <jeremy@goop.org>
- */
-#ifndef _ASM_X86_IRQ_REGS_32_H
-#define _ASM_X86_IRQ_REGS_32_H
-
-#include <asm/percpu.h>
-
-#define ARCH_HAS_OWN_IRQ_REGS
-
-DECLARE_PER_CPU(struct pt_regs *, irq_regs);
-
-static inline struct pt_regs *get_irq_regs(void)
-{
-       return x86_read_percpu(irq_regs);
-}
-
-static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
-{
-       struct pt_regs *old_regs;
-
-       old_regs = get_irq_regs();
-       x86_write_percpu(irq_regs, new_regs);
-
-       return old_regs;
-}
-
-#endif /* _ASM_X86_IRQ_REGS_32_H */
diff --git a/arch/x86/include/asm/irq_regs_64.h b/arch/x86/include/asm/irq_regs_64.h
deleted file mode 100644 (file)
index 3dd9c0b..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
index f7ff650..b07278c 100644 (file)
@@ -1,47 +1,69 @@
 #ifndef _ASM_X86_IRQ_VECTORS_H
 #define _ASM_X86_IRQ_VECTORS_H
 
-#include <linux/threads.h>
+/*
+ * Linux IRQ vector layout.
+ *
+ * There are 256 IDT entries (per CPU - each entry is 8 bytes) which can
+ * be defined by Linux. They are used as a jump table by the CPU when a
+ * given vector is triggered - by a CPU-external, CPU-internal or
+ * software-triggered event.
+ *
+ * Linux sets the kernel code address each entry jumps to early during
+ * bootup, and never changes them. This is the general layout of the
+ * IDT entries:
+ *
+ *  Vectors   0 ...  31 : system traps and exceptions - hardcoded events
+ *  Vectors  32 ... 127 : device interrupts
+ *  Vector  128         : legacy int80 syscall interface
+ *  Vectors 129 ... 237 : device interrupts
+ *  Vectors 238 ... 255 : special interrupts
+ *
+ * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
+ *
+ * This file enumerates the exact layout of them:
+ */
 
-#define NMI_VECTOR             0x02
+#define NMI_VECTOR                     0x02
 
 /*
  * IDT vectors usable for external interrupt sources start
  * at 0x20:
  */
-#define FIRST_EXTERNAL_VECTOR  0x20
+#define FIRST_EXTERNAL_VECTOR          0x20
 
 #ifdef CONFIG_X86_32
-# define SYSCALL_VECTOR                0x80
+# define SYSCALL_VECTOR                        0x80
 #else
-# define IA32_SYSCALL_VECTOR   0x80
+# define IA32_SYSCALL_VECTOR           0x80
 #endif
 
 /*
  * Reserve the lowest usable priority level 0x20 - 0x2f for triggering
  * cleanup after irq migration.
  */
-#define IRQ_MOVE_CLEANUP_VECTOR        FIRST_EXTERNAL_VECTOR
+#define IRQ_MOVE_CLEANUP_VECTOR                FIRST_EXTERNAL_VECTOR
 
 /*
  * Vectors 0x30-0x3f are used for ISA interrupts.
  */
-#define IRQ0_VECTOR            (FIRST_EXTERNAL_VECTOR + 0x10)
-#define IRQ1_VECTOR            (IRQ0_VECTOR + 1)
-#define IRQ2_VECTOR            (IRQ0_VECTOR + 2)
-#define IRQ3_VECTOR            (IRQ0_VECTOR + 3)
-#define IRQ4_VECTOR            (IRQ0_VECTOR + 4)
-#define IRQ5_VECTOR            (IRQ0_VECTOR + 5)
-#define IRQ6_VECTOR            (IRQ0_VECTOR + 6)
-#define IRQ7_VECTOR            (IRQ0_VECTOR + 7)
-#define IRQ8_VECTOR            (IRQ0_VECTOR + 8)
-#define IRQ9_VECTOR            (IRQ0_VECTOR + 9)
-#define IRQ10_VECTOR           (IRQ0_VECTOR + 10)
-#define IRQ11_VECTOR           (IRQ0_VECTOR + 11)
-#define IRQ12_VECTOR           (IRQ0_VECTOR + 12)
-#define IRQ13_VECTOR           (IRQ0_VECTOR + 13)
-#define IRQ14_VECTOR           (IRQ0_VECTOR + 14)
-#define IRQ15_VECTOR           (IRQ0_VECTOR + 15)
+#define IRQ0_VECTOR                    (FIRST_EXTERNAL_VECTOR + 0x10)
+
+#define IRQ1_VECTOR                    (IRQ0_VECTOR +  1)
+#define IRQ2_VECTOR                    (IRQ0_VECTOR +  2)
+#define IRQ3_VECTOR                    (IRQ0_VECTOR +  3)
+#define IRQ4_VECTOR                    (IRQ0_VECTOR +  4)
+#define IRQ5_VECTOR                    (IRQ0_VECTOR +  5)
+#define IRQ6_VECTOR                    (IRQ0_VECTOR +  6)
+#define IRQ7_VECTOR                    (IRQ0_VECTOR +  7)
+#define IRQ8_VECTOR                    (IRQ0_VECTOR +  8)
+#define IRQ9_VECTOR                    (IRQ0_VECTOR +  9)
+#define IRQ10_VECTOR                   (IRQ0_VECTOR + 10)
+#define IRQ11_VECTOR                   (IRQ0_VECTOR + 11)
+#define IRQ12_VECTOR                   (IRQ0_VECTOR + 12)
+#define IRQ13_VECTOR                   (IRQ0_VECTOR + 13)
+#define IRQ14_VECTOR                   (IRQ0_VECTOR + 14)
+#define IRQ15_VECTOR                   (IRQ0_VECTOR + 15)
 
 /*
  * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
  *  some of the following vectors are 'rare', they are merged
  *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
  *  TLB, reschedule and local APIC vectors are performance-critical.
- *
- *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
  */
-#ifdef CONFIG_X86_32
-
-# define SPURIOUS_APIC_VECTOR          0xff
-# define ERROR_APIC_VECTOR             0xfe
-# define INVALIDATE_TLB_VECTOR         0xfd
-# define RESCHEDULE_VECTOR             0xfc
-# define CALL_FUNCTION_VECTOR          0xfb
-# define CALL_FUNCTION_SINGLE_VECTOR   0xfa
-# define THERMAL_APIC_VECTOR           0xf0
-
-#else
 
 #define SPURIOUS_APIC_VECTOR           0xff
+/*
+ * Sanity check
+ */
+#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
+# error SPURIOUS_APIC_VECTOR definition error
+#endif
+
 #define ERROR_APIC_VECTOR              0xfe
 #define RESCHEDULE_VECTOR              0xfd
 #define CALL_FUNCTION_VECTOR           0xfc
 #define CALL_FUNCTION_SINGLE_VECTOR    0xfb
 #define THERMAL_APIC_VECTOR            0xfa
-#define THRESHOLD_APIC_VECTOR          0xf9
-#define UV_BAU_MESSAGE                 0xf8
-#define INVALIDATE_TLB_VECTOR_END      0xf7
-#define INVALIDATE_TLB_VECTOR_START    0xf0    /* f0-f7 used for TLB flush */
-
-#define NUM_INVALIDATE_TLB_VECTORS     8
 
+#ifdef CONFIG_X86_32
+/* 0xf8 - 0xf9 : free */
+#else
+# define THRESHOLD_APIC_VECTOR         0xf9
+# define UV_BAU_MESSAGE                        0xf8
 #endif
 
+/* f0-f7 used for spreading out TLB flushes: */
+#define INVALIDATE_TLB_VECTOR_END      0xf7
+#define INVALIDATE_TLB_VECTOR_START    0xf0
+#define NUM_INVALIDATE_TLB_VECTORS        8
+
 /*
  * Local APIC timer IRQ vector is on a different priority level,
  * to work around the 'lost local interrupt if more than 2 IRQ
  * sources per level' errata.
  */
-#define LOCAL_TIMER_VECTOR     0xef
+#define LOCAL_TIMER_VECTOR             0xef
+
+/*
+ * Performance monitoring interrupt vector:
+ */
+#define LOCAL_PERF_VECTOR              0xee
 
 /*
  * First APIC vector available to drivers: (vectors 0x30-0xee) we
  * start at 0x31(0x41) to spread out vectors evenly between priority
  * levels. (0x80 is the syscall vector)
  */
-#define FIRST_DEVICE_VECTOR    (IRQ15_VECTOR + 2)
-
-#define NR_VECTORS             256
+#define FIRST_DEVICE_VECTOR            (IRQ15_VECTOR + 2)
 
-#define FPU_IRQ                        13
+#define NR_VECTORS                      256
 
-#define        FIRST_VM86_IRQ          3
-#define LAST_VM86_IRQ          15
-#define invalid_vm86_irq(irq)  ((irq) < 3 || (irq) > 15)
+#define FPU_IRQ                                  13
 
-#define NR_IRQS_LEGACY         16
+#define        FIRST_VM86_IRQ                     3
+#define LAST_VM86_IRQ                    15
 
-#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER)
-
-#ifndef CONFIG_SPARSE_IRQ
-# if NR_CPUS < MAX_IO_APICS
-#  define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
-# else
-#  define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
-# endif
-#else
-# if (8 * NR_CPUS) > (32 * MAX_IO_APICS)
-#  define NR_IRQS (NR_VECTORS + (8 * NR_CPUS))
-# else
-#  define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
-# endif
+#ifndef __ASSEMBLY__
+static inline int invalid_vm86_irq(int irq)
+{
+       return irq < 3 || irq > 15;
+}
 #endif
 
-#elif defined(CONFIG_X86_VOYAGER)
-
-# define NR_IRQS               224
+/*
+ * Size the maximum number of interrupts.
+ *
+ * If the irq_desc[] array has a sparse layout, we can size things
+ * generously - it scales up linearly with the maximum number of CPUs,
+ * and the maximum number of IO-APICs, whichever is higher.
+ *
+ * In other cases we size more conservatively, to not create too large
+ * static arrays.
+ */
 
-#else /* IO_APIC || VOYAGER */
+#define NR_IRQS_LEGACY                   16
 
-# define NR_IRQS               16
+#define CPU_VECTOR_LIMIT               (  8 * NR_CPUS      )
+#define IO_APIC_VECTOR_LIMIT           ( 32 * MAX_IO_APICS )
 
+#ifdef CONFIG_X86_IO_APIC
+# ifdef CONFIG_SPARSE_IRQ
+#  define NR_IRQS                                      \
+       (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ?      \
+               (NR_VECTORS + CPU_VECTOR_LIMIT)  :      \
+               (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
+# else
+#  if NR_CPUS < MAX_IO_APICS
+#   define NR_IRQS                     (NR_VECTORS + 4*CPU_VECTOR_LIMIT)
+#  else
+#   define NR_IRQS                     (NR_VECTORS + IO_APIC_VECTOR_LIMIT)
+#  endif
+# endif
+#else /* !CONFIG_X86_IO_APIC: */
+# define NR_IRQS                       NR_IRQS_LEGACY
 #endif
 
-/* Voyager specific defines */
-/* These define the CPIs we use in linux */
-#define VIC_CPI_LEVEL0                 0
-#define VIC_CPI_LEVEL1                 1
-/* now the fake CPIs */
-#define VIC_TIMER_CPI                  2
-#define VIC_INVALIDATE_CPI             3
-#define VIC_RESCHEDULE_CPI             4
-#define VIC_ENABLE_IRQ_CPI             5
-#define VIC_CALL_FUNCTION_CPI          6
-#define VIC_CALL_FUNCTION_SINGLE_CPI   7
-
-/* Now the QIC CPIs:  Since we don't need the two initial levels,
- * these are 2 less than the VIC CPIs */
-#define QIC_CPI_OFFSET                 1
-#define QIC_TIMER_CPI                  (VIC_TIMER_CPI - QIC_CPI_OFFSET)
-#define QIC_INVALIDATE_CPI             (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
-#define QIC_RESCHEDULE_CPI             (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
-#define QIC_ENABLE_IRQ_CPI             (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
-#define QIC_CALL_FUNCTION_CPI          (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
-#define QIC_CALL_FUNCTION_SINGLE_CPI   (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
-
-#define VIC_START_FAKE_CPI             VIC_TIMER_CPI
-#define VIC_END_FAKE_CPI               VIC_CALL_FUNCTION_SINGLE_CPI
-
-/* this is the SYS_INT CPI. */
-#define VIC_SYS_INT                    8
-#define VIC_CMN_INT                    15
-
-/* This is the boot CPI for alternate processors.  It gets overwritten
- * by the above once the system has activated all available processors */
-#define VIC_CPU_BOOT_CPI               VIC_CPI_LEVEL0
-#define VIC_CPU_BOOT_ERRATA_CPI                (VIC_CPI_LEVEL0 + 8)
-
-
 #endif /* _ASM_X86_IRQ_VECTORS_H */
index c61d8b2..0ceb6d1 100644 (file)
@@ -9,23 +9,8 @@
 # define PAGES_NR              4
 #else
 # define PA_CONTROL_PAGE       0
-# define VA_CONTROL_PAGE       1
-# define PA_PGD                        2
-# define VA_PGD                        3
-# define PA_PUD_0              4
-# define VA_PUD_0              5
-# define PA_PMD_0              6
-# define VA_PMD_0              7
-# define PA_PTE_0              8
-# define VA_PTE_0              9
-# define PA_PUD_1              10
-# define VA_PUD_1              11
-# define PA_PMD_1              12
-# define VA_PMD_1              13
-# define PA_PTE_1              14
-# define VA_PTE_1              15
-# define PA_TABLE_PAGE         16
-# define PAGES_NR              17
+# define PA_TABLE_PAGE         1
+# define PAGES_NR              2
 #endif
 
 #ifdef CONFIG_X86_32
@@ -157,9 +142,9 @@ relocate_kernel(unsigned long indirection_page,
                unsigned long start_address) ATTRIB_NORET;
 #endif
 
-#ifdef CONFIG_X86_32
 #define ARCH_HAS_KIMAGE_ARCH
 
+#ifdef CONFIG_X86_32
 struct kimage_arch {
        pgd_t *pgd;
 #ifdef CONFIG_X86_PAE
@@ -169,6 +154,12 @@ struct kimage_arch {
        pte_t *pte0;
        pte_t *pte1;
 };
+#else
+struct kimage_arch {
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+};
 #endif
 
 #endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h
deleted file mode 100644 (file)
index cc09cbb..0000000
+++ /dev/null
@@ -1,168 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_APIC_H
-#define _ASM_X86_MACH_DEFAULT_MACH_APIC_H
-
-#ifdef CONFIG_X86_LOCAL_APIC
-
-#include <mach_apicdef.h>
-#include <asm/smp.h>
-
-#define APIC_DFR_VALUE (APIC_DFR_FLAT)
-
-static inline const struct cpumask *target_cpus(void)
-{ 
-#ifdef CONFIG_SMP
-       return cpu_online_mask;
-#else
-       return cpumask_of(0);
-#endif
-} 
-
-#define NO_BALANCE_IRQ (0)
-#define esr_disable (0)
-
-#ifdef CONFIG_X86_64
-#include <asm/genapic.h>
-#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
-#define INT_DEST_MODE (genapic->int_dest_mode)
-#define TARGET_CPUS      (genapic->target_cpus())
-#define apic_id_registered (genapic->apic_id_registered)
-#define init_apic_ldr (genapic->init_apic_ldr)
-#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
-#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
-#define phys_pkg_id    (genapic->phys_pkg_id)
-#define vector_allocation_domain    (genapic->vector_allocation_domain)
-#define read_apic_id()  (GET_APIC_ID(apic_read(APIC_ID)))
-#define send_IPI_self (genapic->send_IPI_self)
-#define wakeup_secondary_cpu (genapic->wakeup_cpu)
-extern void setup_apic_routing(void);
-#else
-#define INT_DELIVERY_MODE dest_LowestPrio
-#define INT_DEST_MODE 1     /* logical delivery broadcast to all procs */
-#define TARGET_CPUS (target_cpus())
-#define wakeup_secondary_cpu wakeup_secondary_cpu_via_init
-/*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LDR and TPR before enabling
- * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116).  So here it goes...
- */
-static inline void init_apic_ldr(void)
-{
-       unsigned long val;
-
-       apic_write(APIC_DFR, APIC_DFR_VALUE);
-       val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
-       val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
-       apic_write(APIC_LDR, val);
-}
-
-static inline int apic_id_registered(void)
-{
-       return physid_isset(read_apic_id(), phys_cpu_present_map);
-}
-
-static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
-{
-       return cpumask_bits(cpumask)[0];
-}
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-                                                 const struct cpumask *andmask)
-{
-       unsigned long mask1 = cpumask_bits(cpumask)[0];
-       unsigned long mask2 = cpumask_bits(andmask)[0];
-       unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
-
-       return (unsigned int)(mask1 & mask2 & mask3);
-}
-
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
-       return cpuid_apic >> index_msb;
-}
-
-static inline void setup_apic_routing(void)
-{
-#ifdef CONFIG_X86_IO_APIC
-       printk("Enabling APIC mode:  %s.  Using %d I/O APICs\n",
-                                       "Flat", nr_ioapics);
-#endif
-}
-
-static inline int apicid_to_node(int logical_apicid)
-{
-#ifdef CONFIG_SMP
-       return apicid_2_node[hard_smp_processor_id()];
-#else
-       return 0;
-#endif
-}
-
-static inline void vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
-        /* Careful. Some cpus do not strictly honor the set of cpus
-         * specified in the interrupt destination when using lowest
-         * priority interrupt delivery mode.
-         *
-         * In particular there was a hyperthreading cpu observed to
-         * deliver interrupts to the wrong hyperthread when only one
-         * hyperthread was specified in the interrupt desitination.
-         */
-       *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
-}
-#endif
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
-       return physid_isset(apicid, bitmap);
-}
-
-static inline unsigned long check_apicid_present(int bit)
-{
-       return physid_isset(bit, phys_cpu_present_map);
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
-{
-       return phys_map;
-}
-
-static inline int multi_timer_check(int apic, int irq)
-{
-       return 0;
-}
-
-/* Mapping from cpu number to logical apicid */
-static inline int cpu_to_logical_apicid(int cpu)
-{
-       return 1 << cpu;
-}
-
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
-       if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
-               return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
-       else
-               return BAD_APICID;
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
-{
-       return physid_mask_of_physid(phys_apicid);
-}
-
-static inline void setup_portio_remap(void)
-{
-}
-
-static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
-{
-       return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
-}
-
-static inline void enable_apic_mode(void)
-{
-}
-#endif /* CONFIG_X86_LOCAL_APIC */
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-default/mach_apicdef.h b/arch/x86/include/asm/mach-default/mach_apicdef.h
deleted file mode 100644 (file)
index 5317993..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
-#define _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
-
-#include <asm/apic.h>
-
-#ifdef CONFIG_X86_64
-#define        APIC_ID_MASK            (genapic->apic_id_mask)
-#define GET_APIC_ID(x)         (genapic->get_apic_id(x))
-#define        SET_APIC_ID(x)          (genapic->set_apic_id(x))
-#else
-#define                APIC_ID_MASK            (0xF<<24)
-static inline unsigned get_apic_id(unsigned long x) 
-{
-       unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
-       if (APIC_XAPIC(ver))
-               return (((x)>>24)&0xFF);
-       else
-               return (((x)>>24)&0xF);
-} 
-
-#define                GET_APIC_ID(x)  get_apic_id(x)
-#endif
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H */
diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h
deleted file mode 100644 (file)
index 191312d..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_IPI_H
-#define _ASM_X86_MACH_DEFAULT_MACH_IPI_H
-
-/* Avoid include hell */
-#define NMI_VECTOR 0x02
-
-void send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-void __send_IPI_shortcut(unsigned int shortcut, int vector);
-
-extern int no_broadcast;
-
-#ifdef CONFIG_X86_64
-#include <asm/genapic.h>
-#define send_IPI_mask (genapic->send_IPI_mask)
-#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
-#else
-static inline void send_IPI_mask(const struct cpumask *mask, int vector)
-{
-       send_IPI_mask_bitmask(mask, vector);
-}
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-#endif
-
-static inline void __local_send_IPI_allbutself(int vector)
-{
-       if (no_broadcast || vector == NMI_VECTOR)
-               send_IPI_mask_allbutself(cpu_online_mask, vector);
-       else
-               __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
-}
-
-static inline void __local_send_IPI_all(int vector)
-{
-       if (no_broadcast || vector == NMI_VECTOR)
-               send_IPI_mask(cpu_online_mask, vector);
-       else
-               __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
-}
-
-#ifdef CONFIG_X86_64
-#define send_IPI_allbutself (genapic->send_IPI_allbutself)
-#define send_IPI_all (genapic->send_IPI_all)
-#else
-static inline void send_IPI_allbutself(int vector)
-{
-       /*
-        * if there are no other CPUs in the system then we get an APIC send 
-        * error if we try to broadcast, thus avoid sending IPIs in this case.
-        */
-       if (!(num_online_cpus() > 1))
-               return;
-
-       __local_send_IPI_allbutself(vector);
-       return;
-}
-
-static inline void send_IPI_all(int vector)
-{
-       __local_send_IPI_all(vector);
-}
-#endif
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_IPI_H */
diff --git a/arch/x86/include/asm/mach-default/mach_mpparse.h b/arch/x86/include/asm/mach-default/mach_mpparse.h
deleted file mode 100644 (file)
index c70a263..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
-#define _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
-
-static inline int
-mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
-{
-       return 0;
-}
-
-/* Hook from generic ACPI tables.c */
-static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
-       return 0;
-}
-
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/mach-default/mach_mpspec.h b/arch/x86/include/asm/mach-default/mach_mpspec.h
deleted file mode 100644 (file)
index e85ede6..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
-#define _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
-
-#define MAX_IRQ_SOURCES 256
-
-#if CONFIG_BASE_SMALL == 0
-#define MAX_MP_BUSSES 256
-#else
-#define MAX_MP_BUSSES 32
-#endif
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H */
diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h
deleted file mode 100644 (file)
index 89897a6..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
-#define _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
-
-#define TRAMPOLINE_PHYS_LOW (0x467)
-#define TRAMPOLINE_PHYS_HIGH (0x469)
-
-static inline void wait_for_init_deassert(atomic_t *deassert)
-{
-       while (!atomic_read(deassert))
-               cpu_relax();
-       return;
-}
-
-/* Nothing to do for most platforms, since cleared by the INIT cycle */
-static inline void smp_callin_clear_local_apic(void)
-{
-}
-
-static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
-{
-}
-
-static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
-{
-}
-
-#ifdef CONFIG_SMP
-extern void __inquire_remote_apic(int apicid);
-#else /* CONFIG_SMP */
-static inline void __inquire_remote_apic(int apicid)
-{
-}
-#endif /* CONFIG_SMP */
-
-static inline void inquire_remote_apic(int apicid)
-{
-       if (apic_verbosity >= APIC_DEBUG)
-               __inquire_remote_apic(apicid);
-}
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */
diff --git a/arch/x86/include/asm/mach-generic/gpio.h b/arch/x86/include/asm/mach-generic/gpio.h
deleted file mode 100644 (file)
index 995c45e..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_GPIO_H
-#define _ASM_X86_MACH_GENERIC_GPIO_H
-
-int gpio_request(unsigned gpio, const char *label);
-void gpio_free(unsigned gpio);
-int gpio_direction_input(unsigned gpio);
-int gpio_direction_output(unsigned gpio, int value);
-int gpio_get_value(unsigned gpio);
-void gpio_set_value(unsigned gpio, int value);
-int gpio_to_irq(unsigned gpio);
-int irq_to_gpio(unsigned irq);
-
-#include <asm-generic/gpio.h>           /* cansleep wrappers */
-
-#endif /* _ASM_X86_MACH_GENERIC_GPIO_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h
deleted file mode 100644 (file)
index 48553e9..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_APIC_H
-#define _ASM_X86_MACH_GENERIC_MACH_APIC_H
-
-#include <asm/genapic.h>
-
-#define esr_disable (genapic->ESR_DISABLE)
-#define NO_BALANCE_IRQ (genapic->no_balance_irq)
-#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
-#define INT_DEST_MODE (genapic->int_dest_mode)
-#undef APIC_DEST_LOGICAL
-#define APIC_DEST_LOGICAL (genapic->apic_destination_logical)
-#define TARGET_CPUS      (genapic->target_cpus())
-#define apic_id_registered (genapic->apic_id_registered)
-#define init_apic_ldr (genapic->init_apic_ldr)
-#define ioapic_phys_id_map (genapic->ioapic_phys_id_map)
-#define setup_apic_routing (genapic->setup_apic_routing)
-#define multi_timer_check (genapic->multi_timer_check)
-#define apicid_to_node (genapic->apicid_to_node)
-#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) 
-#define cpu_present_to_apicid (genapic->cpu_present_to_apicid)
-#define apicid_to_cpu_present (genapic->apicid_to_cpu_present)
-#define setup_portio_remap (genapic->setup_portio_remap)
-#define check_apicid_present (genapic->check_apicid_present)
-#define check_phys_apicid_present (genapic->check_phys_apicid_present)
-#define check_apicid_used (genapic->check_apicid_used)
-#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
-#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
-#define vector_allocation_domain (genapic->vector_allocation_domain)
-#define enable_apic_mode (genapic->enable_apic_mode)
-#define phys_pkg_id (genapic->phys_pkg_id)
-#define wakeup_secondary_cpu (genapic->wakeup_cpu)
-
-extern void generic_bigsmp_probe(void);
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_apicdef.h b/arch/x86/include/asm/mach-generic/mach_apicdef.h
deleted file mode 100644 (file)
index 68041f3..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
-#define _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
-
-#ifndef APIC_DEFINITION
-#include <asm/genapic.h>
-
-#define GET_APIC_ID (genapic->get_apic_id)
-#define APIC_ID_MASK (genapic->apic_id_mask)
-#endif
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_APICDEF_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_ipi.h b/arch/x86/include/asm/mach-generic/mach_ipi.h
deleted file mode 100644 (file)
index ffd637e..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_IPI_H
-#define _ASM_X86_MACH_GENERIC_MACH_IPI_H
-
-#include <asm/genapic.h>
-
-#define send_IPI_mask (genapic->send_IPI_mask)
-#define send_IPI_allbutself (genapic->send_IPI_allbutself)
-#define send_IPI_all (genapic->send_IPI_all)
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_IPI_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_mpparse.h b/arch/x86/include/asm/mach-generic/mach_mpparse.h
deleted file mode 100644 (file)
index 9444ab8..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
-#define _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
-
-
-extern int mps_oem_check(struct mpc_table *, char *, char *);
-
-extern int acpi_madt_oem_check(char *, char *);
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_mpspec.h b/arch/x86/include/asm/mach-generic/mach_mpspec.h
deleted file mode 100644 (file)
index 3bc4072..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
-#define _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
-
-#define MAX_IRQ_SOURCES 256
-
-/* Summit or generic (i.e. installer) kernels need lots of bus entries. */
-/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
-#define MAX_MP_BUSSES 260
-
-extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_wakecpu.h b/arch/x86/include/asm/mach-generic/mach_wakecpu.h
deleted file mode 100644 (file)
index 1ab16b1..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
-#define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
-
-#define TRAMPOLINE_PHYS_LOW (genapic->trampoline_phys_low)
-#define TRAMPOLINE_PHYS_HIGH (genapic->trampoline_phys_high)
-#define wait_for_init_deassert (genapic->wait_for_init_deassert)
-#define smp_callin_clear_local_apic (genapic->smp_callin_clear_local_apic)
-#define store_NMI_vector (genapic->store_NMI_vector)
-#define restore_NMI_vector (genapic->restore_NMI_vector)
-#define inquire_remote_apic (genapic->inquire_remote_apic)
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-rdc321x/gpio.h b/arch/x86/include/asm/mach-rdc321x/gpio.h
deleted file mode 100644 (file)
index c210ab5..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef _ASM_X86_MACH_RDC321X_GPIO_H
-#define _ASM_X86_MACH_RDC321X_GPIO_H
-
-#include <linux/kernel.h>
-
-extern int rdc_gpio_get_value(unsigned gpio);
-extern void rdc_gpio_set_value(unsigned gpio, int value);
-extern int rdc_gpio_direction_input(unsigned gpio);
-extern int rdc_gpio_direction_output(unsigned gpio, int value);
-extern int rdc_gpio_request(unsigned gpio, const char *label);
-extern void rdc_gpio_free(unsigned gpio);
-extern void __init rdc321x_gpio_setup(void);
-
-/* Wrappers for the arch-neutral GPIO API */
-
-static inline int gpio_request(unsigned gpio, const char *label)
-{
-       return rdc_gpio_request(gpio, label);
-}
-
-static inline void gpio_free(unsigned gpio)
-{
-       might_sleep();
-       rdc_gpio_free(gpio);
-}
-
-static inline int gpio_direction_input(unsigned gpio)
-{
-       return rdc_gpio_direction_input(gpio);
-}
-
-static inline int gpio_direction_output(unsigned gpio, int value)
-{
-       return rdc_gpio_direction_output(gpio, value);
-}
-
-static inline int gpio_get_value(unsigned gpio)
-{
-       return rdc_gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned gpio, int value)
-{
-       rdc_gpio_set_value(gpio, value);
-}
-
-static inline int gpio_to_irq(unsigned gpio)
-{
-       return gpio;
-}
-
-static inline int irq_to_gpio(unsigned irq)
-{
-       return irq;
-}
-
-/* For cansleep */
-#include <asm-generic/gpio.h>
-
-#endif /* _ASM_X86_MACH_RDC321X_GPIO_H */
index 8aeeb3f..f923203 100644 (file)
@@ -21,11 +21,54 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 void destroy_context(struct mm_struct *mm);
 
-#ifdef CONFIG_X86_32
-# include "mmu_context_32.h"
-#else
-# include "mmu_context_64.h"
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+#ifdef CONFIG_SMP
+       if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
+               percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
+#endif
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+                            struct task_struct *tsk)
+{
+       unsigned cpu = smp_processor_id();
+
+       if (likely(prev != next)) {
+               /* stop flush ipis for the previous mm */
+               cpu_clear(cpu, prev->cpu_vm_mask);
+#ifdef CONFIG_SMP
+               percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+               percpu_write(cpu_tlbstate.active_mm, next);
 #endif
+               cpu_set(cpu, next->cpu_vm_mask);
+
+               /* Re-load page tables */
+               load_cr3(next->pgd);
+
+               /*
+                * load the LDT, if the LDT is different:
+                */
+               if (unlikely(prev->context.ldt != next->context.ldt))
+                       load_LDT_nolock(&next->context);
+       }
+#ifdef CONFIG_SMP
+       else {
+               percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+               BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
+
+               if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
+                       /* We were in lazy tlb mode and leave_mm disabled
+                        * tlb flush IPI delivery. We must reload CR3
+                        * to make sure to use no freed page tables.
+                        */
+                       load_cr3(next->pgd);
+                       load_LDT_nolock(&next->context);
+               }
+       }
+#endif
+}
 
 #define activate_mm(prev, next)                        \
 do {                                           \
@@ -33,5 +76,17 @@ do {                                         \
        switch_mm((prev), (next), NULL);        \
 } while (0);
 
+#ifdef CONFIG_X86_32
+#define deactivate_mm(tsk, mm)                 \
+do {                                           \
+       lazy_load_gs(0);                        \
+} while (0)
+#else
+#define deactivate_mm(tsk, mm)                 \
+do {                                           \
+       load_gs_index(0);                       \
+       loadsegment(fs, 0);                     \
+} while (0)
+#endif
 
 #endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/include/asm/mmu_context_32.h b/arch/x86/include/asm/mmu_context_32.h
deleted file mode 100644 (file)
index 7e98ce1..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef _ASM_X86_MMU_CONTEXT_32_H
-#define _ASM_X86_MMU_CONTEXT_32_H
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-#ifdef CONFIG_SMP
-       if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK)
-               x86_write_percpu(cpu_tlbstate.state, TLBSTATE_LAZY);
-#endif
-}
-
-static inline void switch_mm(struct mm_struct *prev,
-                            struct mm_struct *next,
-                            struct task_struct *tsk)
-{
-       int cpu = smp_processor_id();
-
-       if (likely(prev != next)) {
-               /* stop flush ipis for the previous mm */
-               cpu_clear(cpu, prev->cpu_vm_mask);
-#ifdef CONFIG_SMP
-               x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
-               x86_write_percpu(cpu_tlbstate.active_mm, next);
-#endif
-               cpu_set(cpu, next->cpu_vm_mask);
-
-               /* Re-load page tables */
-               load_cr3(next->pgd);
-
-               /*
-                * load the LDT, if the LDT is different:
-                */
-               if (unlikely(prev->context.ldt != next->context.ldt))
-                       load_LDT_nolock(&next->context);
-       }
-#ifdef CONFIG_SMP
-       else {
-               x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
-               BUG_ON(x86_read_percpu(cpu_tlbstate.active_mm) != next);
-
-               if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
-                       /* We were in lazy tlb mode and leave_mm disabled
-                        * tlb flush IPI delivery. We must reload %cr3.
-                        */
-                       load_cr3(next->pgd);
-                       load_LDT_nolock(&next->context);
-               }
-       }
-#endif
-}
-
-#define deactivate_mm(tsk, mm)                 \
-       asm("movl %0,%%gs": :"r" (0));
-
-#endif /* _ASM_X86_MMU_CONTEXT_32_H */
diff --git a/arch/x86/include/asm/mmu_context_64.h b/arch/x86/include/asm/mmu_context_64.h
deleted file mode 100644 (file)
index 677d36e..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-#ifndef _ASM_X86_MMU_CONTEXT_64_H
-#define _ASM_X86_MMU_CONTEXT_64_H
-
-#include <asm/pda.h>
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-#ifdef CONFIG_SMP
-       if (read_pda(mmu_state) == TLBSTATE_OK)
-               write_pda(mmu_state, TLBSTATE_LAZY);
-#endif
-}
-
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
-                            struct task_struct *tsk)
-{
-       unsigned cpu = smp_processor_id();
-       if (likely(prev != next)) {
-               /* stop flush ipis for the previous mm */
-               cpu_clear(cpu, prev->cpu_vm_mask);
-#ifdef CONFIG_SMP
-               write_pda(mmu_state, TLBSTATE_OK);
-               write_pda(active_mm, next);
-#endif
-               cpu_set(cpu, next->cpu_vm_mask);
-               load_cr3(next->pgd);
-
-               if (unlikely(next->context.ldt != prev->context.ldt))
-                       load_LDT_nolock(&next->context);
-       }
-#ifdef CONFIG_SMP
-       else {
-               write_pda(mmu_state, TLBSTATE_OK);
-               if (read_pda(active_mm) != next)
-                       BUG();
-               if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
-                       /* We were in lazy tlb mode and leave_mm disabled
-                        * tlb flush IPI delivery. We must reload CR3
-                        * to make sure to use no freed page tables.
-                        */
-                       load_cr3(next->pgd);
-                       load_LDT_nolock(&next->context);
-               }
-       }
-#endif
-}
-
-#define deactivate_mm(tsk, mm)                 \
-do {                                           \
-       load_gs_index(0);                       \
-       asm volatile("movl %0,%%fs"::"r"(0));   \
-} while (0)
-
-#endif /* _ASM_X86_MMU_CONTEXT_64_H */
index bd22f2a..5916c8d 100644 (file)
@@ -9,7 +9,18 @@ extern int apic_version[MAX_APICS];
 extern int pic_mode;
 
 #ifdef CONFIG_X86_32
-#include <mach_mpspec.h>
+
+/*
+ * Summit or generic (i.e. installer) kernels need lots of bus entries.
+ * Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets.
+ */
+#if CONFIG_BASE_SMALL == 0
+# define MAX_MP_BUSSES         260
+#else
+# define MAX_MP_BUSSES         32
+#endif
+
+#define MAX_IRQ_SOURCES                256
 
 extern unsigned int def_to_bigsmp;
 extern u8 apicid_2_node[];
@@ -20,15 +31,15 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES];
 extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
 #endif
 
-#define MAX_APICID 256
+#define MAX_APICID             256
 
-#else
+#else /* CONFIG_X86_64: */
 
-#define MAX_MP_BUSSES 256
+#define MAX_MP_BUSSES          256
 /* Each PCI slot may be a combo card with its own bus.  4 IRQ pins per slot. */
-#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
+#define MAX_IRQ_SOURCES                (MAX_MP_BUSSES * 4)
 
-#endif
+#endif /* CONFIG_X86_64 */
 
 extern void early_find_smp_config(void);
 extern void early_get_smp_config(void);
@@ -45,11 +56,13 @@ extern int smp_found_config;
 extern int mpc_default_type;
 extern unsigned long mp_lapic_addr;
 
-extern void find_smp_config(void);
 extern void get_smp_config(void);
+
 #ifdef CONFIG_X86_MPPARSE
+extern void find_smp_config(void);
 extern void early_reserve_e820_mpc_new(void);
 #else
+static inline void find_smp_config(void) { }
 static inline void early_reserve_e820_mpc_new(void) { }
 #endif
 
@@ -64,6 +77,8 @@ extern int acpi_probe_gsi(void);
 #ifdef CONFIG_X86_IO_APIC
 extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
                                u32 gsi, int triggering, int polarity);
+extern int mp_find_ioapic(int gsi);
+extern int mp_find_ioapic_pin(int ioapic, int gsi);
 #else
 static inline int
 mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
@@ -148,4 +163,10 @@ static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
 
 extern physid_mask_t phys_cpu_present_map;
 
+extern int generic_mps_oem_check(struct mpc_table *, char *, char *);
+
+extern int default_acpi_madt_oem_check(char *, char *);
+
+extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
+
 #endif /* _ASM_X86_MPSPEC_H */
index 59568bc..4a7f96d 100644 (file)
 # endif
 #endif
 
-struct intel_mp_floating {
-       char mpf_signature[4];          /* "_MP_"                       */
-       unsigned int mpf_physptr;       /* Configuration table address  */
-       unsigned char mpf_length;       /* Our length (paragraphs)      */
-       unsigned char mpf_specification;/* Specification version        */
-       unsigned char mpf_checksum;     /* Checksum (makes sum 0)       */
-       unsigned char mpf_feature1;     /* Standard or configuration ?  */
-       unsigned char mpf_feature2;     /* Bit7 set for IMCR|PIC        */
-       unsigned char mpf_feature3;     /* Unused (0)                   */
-       unsigned char mpf_feature4;     /* Unused (0)                   */
-       unsigned char mpf_feature5;     /* Unused (0)                   */
+/* Intel MP Floating Pointer Structure */
+struct mpf_intel {
+       char signature[4];              /* "_MP_"                       */
+       unsigned int physptr;           /* Configuration table address  */
+       unsigned char length;           /* Our length (paragraphs)      */
+       unsigned char specification;    /* Specification version        */
+       unsigned char checksum;         /* Checksum (makes sum 0)       */
+       unsigned char feature1;         /* Standard or configuration ?  */
+       unsigned char feature2;         /* Bit7 set for IMCR|PIC        */
+       unsigned char feature3;         /* Unused (0)                   */
+       unsigned char feature4;         /* Unused (0)                   */
+       unsigned char feature5;         /* Unused (0)                   */
 };
 
 #define MPC_SIGNATURE "PCMP"
index 1e8bd30..9f0a5f5 100644 (file)
@@ -31,6 +31,8 @@
 extern int found_numaq;
 extern int get_memcfg_numaq(void);
 
+extern void *xquad_portio;
+
 /*
  * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the
  */
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h
deleted file mode 100644 (file)
index bf37bc4..0000000
+++ /dev/null
@@ -1,142 +0,0 @@
-#ifndef __ASM_NUMAQ_APIC_H
-#define __ASM_NUMAQ_APIC_H
-
-#include <asm/io.h>
-#include <linux/mmzone.h>
-#include <linux/nodemask.h>
-
-#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
-
-static inline const cpumask_t *target_cpus(void)
-{
-       return &CPU_MASK_ALL;
-}
-
-#define NO_BALANCE_IRQ (1)
-#define esr_disable (1)
-
-#define INT_DELIVERY_MODE dest_LowestPrio
-#define INT_DEST_MODE 0     /* physical delivery on LOCAL quad */
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
-       return physid_isset(apicid, bitmap);
-}
-static inline unsigned long check_apicid_present(int bit)
-{
-       return physid_isset(bit, phys_cpu_present_map);
-}
-#define apicid_cluster(apicid) (apicid & 0xF0)
-
-static inline int apic_id_registered(void)
-{
-       return 1;
-}
-
-static inline void init_apic_ldr(void)
-{
-       /* Already done in NUMA-Q firmware */
-}
-
-static inline void setup_apic_routing(void)
-{
-       printk("Enabling APIC mode:  %s.  Using %d I/O APICs\n",
-               "NUMA-Q", nr_ioapics);
-}
-
-/*
- * Skip adding the timer int on secondary nodes, which causes
- * a small but painful rift in the time-space continuum.
- */
-static inline int multi_timer_check(int apic, int irq)
-{
-       return apic != 0 && irq == 0;
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
-{
-       /* We don't have a good way to do this yet - hack */
-       return physids_promote(0xFUL);
-}
-
-/* Mapping from cpu number to logical apicid */
-extern u8 cpu_2_logical_apicid[];
-static inline int cpu_to_logical_apicid(int cpu)
-{
-       if (cpu >= nr_cpu_ids)
-               return BAD_APICID;
-       return (int)cpu_2_logical_apicid[cpu];
-}
-
-/*
- * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
- * cpu to APIC ID relation to properly interact with the intelligent
- * mode of the cluster controller.
- */
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
-       if (mps_cpu < 60)
-               return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
-       else
-               return BAD_APICID;
-}
-
-static inline int apicid_to_node(int logical_apicid) 
-{
-       return logical_apicid >> 4;
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int logical_apicid)
-{
-       int node = apicid_to_node(logical_apicid);
-       int cpu = __ffs(logical_apicid & 0xf);
-
-       return physid_mask_of_physid(cpu + 4*node);
-}
-
-extern void *xquad_portio;
-
-static inline void setup_portio_remap(void)
-{
-       int num_quads = num_online_nodes();
-
-       if (num_quads <= 1)
-                       return;
-
-       printk("Remapping cross-quad port I/O for %d quads\n", num_quads);
-       xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
-       printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
-               (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
-}
-
-static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
-{
-       return (1);
-}
-
-static inline void enable_apic_mode(void)
-{
-}
-
-/*
- * We use physical apicids here, not logical, so just return the default
- * physical broadcast to stop people from breaking us
- */
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
-{
-       return (int) 0xF;
-}
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-                                                 const struct cpumask *andmask)
-{
-       return (int) 0xF;
-}
-
-/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
-       return cpuid_apic >> index_msb;
-}
-
-#endif /* __ASM_NUMAQ_APIC_H */
diff --git a/arch/x86/include/asm/numaq/apicdef.h b/arch/x86/include/asm/numaq/apicdef.h
deleted file mode 100644 (file)
index e012a46..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef __ASM_NUMAQ_APICDEF_H
-#define __ASM_NUMAQ_APICDEF_H
-
-
-#define APIC_ID_MASK (0xF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
-               return (((x)>>24)&0x0F);
-}
-
-#define         GET_APIC_ID(x)  get_apic_id(x)
-
-#endif
diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h
deleted file mode 100644 (file)
index a8374c6..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef __ASM_NUMAQ_IPI_H
-#define __ASM_NUMAQ_IPI_H
-
-void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-
-static inline void send_IPI_mask(const struct cpumask *mask, int vector)
-{
-       send_IPI_mask_sequence(mask, vector);
-}
-
-static inline void send_IPI_allbutself(int vector)
-{
-       send_IPI_mask_allbutself(cpu_online_mask, vector);
-}
-
-static inline void send_IPI_all(int vector)
-{
-       send_IPI_mask(cpu_online_mask, vector);
-}
-
-#endif /* __ASM_NUMAQ_IPI_H */
diff --git a/arch/x86/include/asm/numaq/mpparse.h b/arch/x86/include/asm/numaq/mpparse.h
deleted file mode 100644 (file)
index a2eeefc..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_NUMAQ_MPPARSE_H
-#define __ASM_NUMAQ_MPPARSE_H
-
-extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
-
-#endif /* __ASM_NUMAQ_MPPARSE_H */
diff --git a/arch/x86/include/asm/numaq/wakecpu.h b/arch/x86/include/asm/numaq/wakecpu.h
deleted file mode 100644 (file)
index 6f499df..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef __ASM_NUMAQ_WAKECPU_H
-#define __ASM_NUMAQ_WAKECPU_H
-
-/* This file copes with machines that wakeup secondary CPUs by NMIs */
-
-#define TRAMPOLINE_PHYS_LOW (0x8)
-#define TRAMPOLINE_PHYS_HIGH (0xa)
-
-/* We don't do anything here because we use NMI's to boot instead */
-static inline void wait_for_init_deassert(atomic_t *deassert)
-{
-}
-
-/*
- * Because we use NMIs rather than the INIT-STARTUP sequence to
- * bootstrap the CPUs, the APIC may be in a weird state. Kick it.
- */
-static inline void smp_callin_clear_local_apic(void)
-{
-       clear_local_APIC();
-}
-
-static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
-{
-       printk("Storing NMI vector\n");
-       *high =
-         *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH));
-       *low =
-         *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW));
-}
-
-static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
-{
-       printk("Restoring NMI vector\n");
-       *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
-                                                                *high;
-       *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
-                                                                *low;
-}
-
-static inline void inquire_remote_apic(int apicid)
-{
-}
-
-#endif /* __ASM_NUMAQ_WAKECPU_H */
index e9873a2..4022699 100644 (file)
@@ -95,6 +95,11 @@ static inline pgdval_t native_pgd_val(pgd_t pgd)
        return pgd.pgd;
 }
 
+static inline pgdval_t pgd_flags(pgd_t pgd)
+{
+       return native_pgd_val(pgd) & PTE_FLAGS_MASK;
+}
+
 #if PAGETABLE_LEVELS >= 3
 #if PAGETABLE_LEVELS == 4
 typedef struct { pudval_t pud; } pud_t;
@@ -117,6 +122,11 @@ static inline pudval_t native_pud_val(pud_t pud)
 }
 #endif /* PAGETABLE_LEVELS == 4 */
 
+static inline pudval_t pud_flags(pud_t pud)
+{
+       return native_pud_val(pud) & PTE_FLAGS_MASK;
+}
+
 typedef struct { pmdval_t pmd; } pmd_t;
 
 static inline pmd_t native_make_pmd(pmdval_t val)
@@ -128,6 +138,7 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
 {
        return pmd.pmd;
 }
+
 #else  /* PAGETABLE_LEVELS == 2 */
 #include <asm-generic/pgtable-nopmd.h>
 
@@ -137,6 +148,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
 }
 #endif /* PAGETABLE_LEVELS >= 3 */
 
+static inline pmdval_t pmd_flags(pmd_t pmd)
+{
+       return native_pmd_val(pmd) & PTE_FLAGS_MASK;
+}
+
 static inline pte_t native_make_pte(pteval_t val)
 {
        return (pte_t) { .pte = val };
@@ -147,7 +163,7 @@ static inline pteval_t native_pte_val(pte_t pte)
        return pte.pte;
 }
 
-static inline pteval_t native_pte_flags(pte_t pte)
+static inline pteval_t pte_flags(pte_t pte)
 {
        return native_pte_val(pte) & PTE_FLAGS_MASK;
 }
@@ -173,7 +189,6 @@ static inline pteval_t native_pte_flags(pte_t pte)
 #endif
 
 #define pte_val(x)     native_pte_val(x)
-#define pte_flags(x)   native_pte_flags(x)
 #define __pte(x)       native_make_pte(x)
 
 #endif /* CONFIG_PARAVIRT */
index 5ebca29..e27fdbe 100644 (file)
@@ -13,8 +13,8 @@
 #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
 #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
 
-#define IRQSTACK_ORDER 2
-#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
+#define IRQ_STACK_ORDER 2
+#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
 
 #define STACKFAULT_STACK 1
 #define DOUBLEFAULT_STACK 2
index c09a141..1c244b6 100644 (file)
 #define CLBR_EAX  (1 << 0)
 #define CLBR_ECX  (1 << 1)
 #define CLBR_EDX  (1 << 2)
+#define CLBR_EDI  (1 << 3)
 
-#ifdef CONFIG_X86_64
-#define CLBR_RSI  (1 << 3)
-#define CLBR_RDI  (1 << 4)
+#ifdef CONFIG_X86_32
+/* CLBR_ANY should match all regs platform has. For i386, that's just it */
+#define CLBR_ANY  ((1 << 4) - 1)
+
+#define CLBR_ARG_REGS  (CLBR_EAX | CLBR_EDX | CLBR_ECX)
+#define CLBR_RET_REG   (CLBR_EAX | CLBR_EDX)
+#define CLBR_SCRATCH   (0)
+#else
+#define CLBR_RAX  CLBR_EAX
+#define CLBR_RCX  CLBR_ECX
+#define CLBR_RDX  CLBR_EDX
+#define CLBR_RDI  CLBR_EDI
+#define CLBR_RSI  (1 << 4)
 #define CLBR_R8   (1 << 5)
 #define CLBR_R9   (1 << 6)
 #define CLBR_R10  (1 << 7)
 #define CLBR_R11  (1 << 8)
+
 #define CLBR_ANY  ((1 << 9) - 1)
+
+#define CLBR_ARG_REGS  (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
+                        CLBR_RCX | CLBR_R8 | CLBR_R9)
+#define CLBR_RET_REG   (CLBR_RAX)
+#define CLBR_SCRATCH   (CLBR_R10 | CLBR_R11)
+
 #include <asm/desc_defs.h>
-#else
-/* CLBR_ANY should match all regs platform has. For i386, that's just it */
-#define CLBR_ANY  ((1 << 3) - 1)
 #endif /* X86_64 */
 
+#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
+
 #ifndef __ASSEMBLY__
 #include <linux/types.h>
 #include <linux/cpumask.h>
@@ -40,6 +57,14 @@ struct tss_struct;
 struct mm_struct;
 struct desc_struct;
 
+/*
+ * Wrapper type for pointers to code which uses the non-standard
+ * calling convention.  See PV_CALL_SAVE_REGS_THUNK below.
+ */
+struct paravirt_callee_save {
+       void *func;
+};
+
 /* general info */
 struct pv_info {
        unsigned int kernel_rpl;
@@ -189,11 +214,15 @@ struct pv_irq_ops {
         * expected to use X86_EFLAGS_IF; all other bits
         * returned from save_fl are undefined, and may be ignored by
         * restore_fl.
+        *
+        * NOTE: These functions callers expect the callee to preserve
+        * more registers than the standard C calling convention.
         */
-       unsigned long (*save_fl)(void);
-       void (*restore_fl)(unsigned long);
-       void (*irq_disable)(void);
-       void (*irq_enable)(void);
+       struct paravirt_callee_save save_fl;
+       struct paravirt_callee_save restore_fl;
+       struct paravirt_callee_save irq_disable;
+       struct paravirt_callee_save irq_enable;
+
        void (*safe_halt)(void);
        void (*halt)(void);
 
@@ -244,7 +273,8 @@ struct pv_mmu_ops {
        void (*flush_tlb_user)(void);
        void (*flush_tlb_kernel)(void);
        void (*flush_tlb_single)(unsigned long addr);
-       void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
+       void (*flush_tlb_others)(const struct cpumask *cpus,
+                                struct mm_struct *mm,
                                 unsigned long va);
 
        /* Hooks for allocating and freeing a pagetable top-level */
@@ -278,12 +308,11 @@ struct pv_mmu_ops {
        void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
                                        pte_t *ptep, pte_t pte);
 
-       pteval_t (*pte_val)(pte_t);
-       pteval_t (*pte_flags)(pte_t);
-       pte_t (*make_pte)(pteval_t pte);
+       struct paravirt_callee_save pte_val;
+       struct paravirt_callee_save make_pte;
 
-       pgdval_t (*pgd_val)(pgd_t);
-       pgd_t (*make_pgd)(pgdval_t pgd);
+       struct paravirt_callee_save pgd_val;
+       struct paravirt_callee_save make_pgd;
 
 #if PAGETABLE_LEVELS >= 3
 #ifdef CONFIG_X86_PAE
@@ -298,12 +327,12 @@ struct pv_mmu_ops {
 
        void (*set_pud)(pud_t *pudp, pud_t pudval);
 
-       pmdval_t (*pmd_val)(pmd_t);
-       pmd_t (*make_pmd)(pmdval_t pmd);
+       struct paravirt_callee_save pmd_val;
+       struct paravirt_callee_save make_pmd;
 
 #if PAGETABLE_LEVELS == 4
-       pudval_t (*pud_val)(pud_t);
-       pud_t (*make_pud)(pudval_t pud);
+       struct paravirt_callee_save pud_val;
+       struct paravirt_callee_save make_pud;
 
        void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
 #endif /* PAGETABLE_LEVELS == 4 */
@@ -388,6 +417,8 @@ extern struct pv_lock_ops pv_lock_ops;
        asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
 
 unsigned paravirt_patch_nop(void);
+unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
+unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
 unsigned paravirt_patch_ignore(unsigned len);
 unsigned paravirt_patch_call(void *insnbuf,
                             const void *target, u16 tgt_clobbers,
@@ -479,25 +510,45 @@ int paravirt_disable_iospace(void);
  * makes sure the incoming and outgoing types are always correct.
  */
 #ifdef CONFIG_X86_32
-#define PVOP_VCALL_ARGS                        unsigned long __eax, __edx, __ecx
+#define PVOP_VCALL_ARGS                                \
+       unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
 #define PVOP_CALL_ARGS                 PVOP_VCALL_ARGS
+
+#define PVOP_CALL_ARG1(x)              "a" ((unsigned long)(x))
+#define PVOP_CALL_ARG2(x)              "d" ((unsigned long)(x))
+#define PVOP_CALL_ARG3(x)              "c" ((unsigned long)(x))
+
 #define PVOP_VCALL_CLOBBERS            "=a" (__eax), "=d" (__edx),     \
                                        "=c" (__ecx)
 #define PVOP_CALL_CLOBBERS             PVOP_VCALL_CLOBBERS
+
+#define PVOP_VCALLEE_CLOBBERS          "=a" (__eax), "=d" (__edx)
+#define PVOP_CALLEE_CLOBBERS           PVOP_VCALLEE_CLOBBERS
+
 #define EXTRA_CLOBBERS
 #define VEXTRA_CLOBBERS
-#else
-#define PVOP_VCALL_ARGS                unsigned long __edi, __esi, __edx, __ecx
+#else  /* CONFIG_X86_64 */
+#define PVOP_VCALL_ARGS                                        \
+       unsigned long __edi = __edi, __esi = __esi,     \
+               __edx = __edx, __ecx = __ecx
 #define PVOP_CALL_ARGS         PVOP_VCALL_ARGS, __eax
+
+#define PVOP_CALL_ARG1(x)              "D" ((unsigned long)(x))
+#define PVOP_CALL_ARG2(x)              "S" ((unsigned long)(x))
+#define PVOP_CALL_ARG3(x)              "d" ((unsigned long)(x))
+#define PVOP_CALL_ARG4(x)              "c" ((unsigned long)(x))
+
 #define PVOP_VCALL_CLOBBERS    "=D" (__edi),                           \
                                "=S" (__esi), "=d" (__edx),             \
                                "=c" (__ecx)
-
 #define PVOP_CALL_CLOBBERS     PVOP_VCALL_CLOBBERS, "=a" (__eax)
 
+#define PVOP_VCALLEE_CLOBBERS  "=a" (__eax)
+#define PVOP_CALLEE_CLOBBERS   PVOP_VCALLEE_CLOBBERS
+
 #define EXTRA_CLOBBERS  , "r8", "r9", "r10", "r11"
 #define VEXTRA_CLOBBERS         , "rax", "r8", "r9", "r10", "r11"
-#endif
+#endif /* CONFIG_X86_32 */
 
 #ifdef CONFIG_PARAVIRT_DEBUG
 #define PVOP_TEST_NULL(op)     BUG_ON(op == NULL)
@@ -505,10 +556,11 @@ int paravirt_disable_iospace(void);
 #define PVOP_TEST_NULL(op)     ((void)op)
 #endif
 
-#define __PVOP_CALL(rettype, op, pre, post, ...)                       \
+#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr,                \
+                     pre, post, ...)                                   \
        ({                                                              \
                rettype __ret;                                          \
-               PVOP_CALL_ARGS;                                 \
+               PVOP_CALL_ARGS;                                         \
                PVOP_TEST_NULL(op);                                     \
                /* This is 32-bit specific, but is okay in 64-bit */    \
                /* since this condition will never hold */              \
@@ -516,70 +568,113 @@ int paravirt_disable_iospace(void);
                        asm volatile(pre                                \
                                     paravirt_alt(PARAVIRT_CALL)        \
                                     post                               \
-                                    : PVOP_CALL_CLOBBERS               \
+                                    : call_clbr                        \
                                     : paravirt_type(op),               \
-                                      paravirt_clobber(CLBR_ANY),      \
+                                      paravirt_clobber(clbr),          \
                                       ##__VA_ARGS__                    \
-                                    : "memory", "cc" EXTRA_CLOBBERS);  \
+                                    : "memory", "cc" extra_clbr);      \
                        __ret = (rettype)((((u64)__edx) << 32) | __eax); \
                } else {                                                \
                        asm volatile(pre                                \
                                     paravirt_alt(PARAVIRT_CALL)        \
                                     post                               \
-                                    : PVOP_CALL_CLOBBERS               \
+                                    : call_clbr                        \
                                     : paravirt_type(op),               \
-                                      paravirt_clobber(CLBR_ANY),      \
+                                      paravirt_clobber(clbr),          \
                                       ##__VA_ARGS__                    \
-                                    : "memory", "cc" EXTRA_CLOBBERS);  \
+                                    : "memory", "cc" extra_clbr);      \
                        __ret = (rettype)__eax;                         \
                }                                                       \
                __ret;                                                  \
        })
-#define __PVOP_VCALL(op, pre, post, ...)                               \
+
+#define __PVOP_CALL(rettype, op, pre, post, ...)                       \
+       ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS,        \
+                     EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
+
+#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...)                 \
+       ____PVOP_CALL(rettype, op.func, CLBR_RET_REG,                   \
+                     PVOP_CALLEE_CLOBBERS, ,                           \
+                     pre, post, ##__VA_ARGS__)
+
+
+#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...)        \
        ({                                                              \
                PVOP_VCALL_ARGS;                                        \
                PVOP_TEST_NULL(op);                                     \
                asm volatile(pre                                        \
                             paravirt_alt(PARAVIRT_CALL)                \
                             post                                       \
-                            : PVOP_VCALL_CLOBBERS                      \
+                            : call_clbr                                \
                             : paravirt_type(op),                       \
-                              paravirt_clobber(CLBR_ANY),              \
+                              paravirt_clobber(clbr),                  \
                               ##__VA_ARGS__                            \
-                            : "memory", "cc" VEXTRA_CLOBBERS);         \
+                            : "memory", "cc" extra_clbr);              \
        })
 
+#define __PVOP_VCALL(op, pre, post, ...)                               \
+       ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS,               \
+                      VEXTRA_CLOBBERS,                                 \
+                      pre, post, ##__VA_ARGS__)
+
+#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...)                        \
+       ____PVOP_CALL(rettype, op.func, CLBR_RET_REG,                   \
+                     PVOP_VCALLEE_CLOBBERS, ,                          \
+                     pre, post, ##__VA_ARGS__)
+
+
+
 #define PVOP_CALL0(rettype, op)                                                \
        __PVOP_CALL(rettype, op, "", "")
 #define PVOP_VCALL0(op)                                                        \
        __PVOP_VCALL(op, "", "")
 
+#define PVOP_CALLEE0(rettype, op)                                      \
+       __PVOP_CALLEESAVE(rettype, op, "", "")
+#define PVOP_VCALLEE0(op)                                              \
+       __PVOP_VCALLEESAVE(op, "", "")
+
+
 #define PVOP_CALL1(rettype, op, arg1)                                  \
-       __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
+       __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
 #define PVOP_VCALL1(op, arg1)                                          \
-       __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
+       __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
+
+#define PVOP_CALLEE1(rettype, op, arg1)                                        \
+       __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
+#define PVOP_VCALLEE1(op, arg1)                                                \
+       __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
+
 
 #define PVOP_CALL2(rettype, op, arg1, arg2)                            \
-       __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
-       "1" ((unsigned long)(arg2)))
+       __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),          \
+                   PVOP_CALL_ARG2(arg2))
 #define PVOP_VCALL2(op, arg1, arg2)                                    \
-       __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
-       "1" ((unsigned long)(arg2)))
+       __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),                  \
+                    PVOP_CALL_ARG2(arg2))
+
+#define PVOP_CALLEE2(rettype, op, arg1, arg2)                          \
+       __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1),    \
+                         PVOP_CALL_ARG2(arg2))
+#define PVOP_VCALLEE2(op, arg1, arg2)                                  \
+       __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1),            \
+                          PVOP_CALL_ARG2(arg2))
+
 
 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3)                      \
-       __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
-       "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
+       __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),          \
+                   PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
 #define PVOP_VCALL3(op, arg1, arg2, arg3)                              \
-       __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
-       "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
+       __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),                  \
+                    PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
 
 /* This is the only difference in x86_64. We can make it much simpler */
 #ifdef CONFIG_X86_32
 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                        \
        __PVOP_CALL(rettype, op,                                        \
                    "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
-                   "0" ((u32)(arg1)), "1" ((u32)(arg2)),               \
-                   "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
+                   PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),         \
+                   PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                                \
        __PVOP_VCALL(op,                                                \
                    "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
@@ -587,13 +682,13 @@ int paravirt_disable_iospace(void);
                    "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
 #else
 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                        \
-       __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
-       "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)),         \
-       "3"((unsigned long)(arg4)))
+       __PVOP_CALL(rettype, op, "", "",                                \
+                   PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),         \
+                   PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                                \
-       __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
-       "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)),         \
-       "3"((unsigned long)(arg4)))
+       __PVOP_VCALL(op, "", "",                                        \
+                    PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),        \
+                    PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
 #endif
 
 static inline int paravirt_enabled(void)
@@ -984,10 +1079,11 @@ static inline void __flush_tlb_single(unsigned long addr)
        PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
 }
 
-static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+static inline void flush_tlb_others(const struct cpumask *cpumask,
+                                   struct mm_struct *mm,
                                    unsigned long va)
 {
-       PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
+       PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
 }
 
 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
@@ -1059,13 +1155,13 @@ static inline pte_t __pte(pteval_t val)
        pteval_t ret;
 
        if (sizeof(pteval_t) > sizeof(long))
-               ret = PVOP_CALL2(pteval_t,
-                                pv_mmu_ops.make_pte,
-                                val, (u64)val >> 32);
+               ret = PVOP_CALLEE2(pteval_t,
+                                  pv_mmu_ops.make_pte,
+                                  val, (u64)val >> 32);
        else
-               ret = PVOP_CALL1(pteval_t,
-                                pv_mmu_ops.make_pte,
-                                val);
+               ret = PVOP_CALLEE1(pteval_t,
+                                  pv_mmu_ops.make_pte,
+                                  val);
 
        return (pte_t) { .pte = ret };
 }
@@ -1075,29 +1171,12 @@ static inline pteval_t pte_val(pte_t pte)
        pteval_t ret;
 
        if (sizeof(pteval_t) > sizeof(long))
-               ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
-                                pte.pte, (u64)pte.pte >> 32);
-       else
-               ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
-                                pte.pte);
-
-       return ret;
-}
-
-static inline pteval_t pte_flags(pte_t pte)
-{
-       pteval_t ret;
-
-       if (sizeof(pteval_t) > sizeof(long))
-               ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
-                                pte.pte, (u64)pte.pte >> 32);
+               ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
+                                  pte.pte, (u64)pte.pte >> 32);
        else
-               ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
-                                pte.pte);
+               ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
+                                  pte.pte);
 
-#ifdef CONFIG_PARAVIRT_DEBUG
-       BUG_ON(ret & PTE_PFN_MASK);
-#endif
        return ret;
 }
 
@@ -1106,11 +1185,11 @@ static inline pgd_t __pgd(pgdval_t val)
        pgdval_t ret;
 
        if (sizeof(pgdval_t) > sizeof(long))
-               ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
-                                val, (u64)val >> 32);
+               ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
+                                  val, (u64)val >> 32);
        else
-               ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
-                                val);
+               ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
+                                  val);
 
        return (pgd_t) { ret };
 }
@@ -1120,11 +1199,11 @@ static inline pgdval_t pgd_val(pgd_t pgd)
        pgdval_t ret;
 
        if (sizeof(pgdval_t) > sizeof(long))
-               ret =  PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
-                                 pgd.pgd, (u64)pgd.pgd >> 32);
+               ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
+                                   pgd.pgd, (u64)pgd.pgd >> 32);
        else
-               ret =  PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
-                                 pgd.pgd);
+               ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
+                                   pgd.pgd);
 
        return ret;
 }
@@ -1188,11 +1267,11 @@ static inline pmd_t __pmd(pmdval_t val)
        pmdval_t ret;
 
        if (sizeof(pmdval_t) > sizeof(long))
-               ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
-                                val, (u64)val >> 32);
+               ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
+                                  val, (u64)val >> 32);
        else
-               ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
-                                val);
+               ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
+                                  val);
 
        return (pmd_t) { ret };
 }
@@ -1202,11 +1281,11 @@ static inline pmdval_t pmd_val(pmd_t pmd)
        pmdval_t ret;
 
        if (sizeof(pmdval_t) > sizeof(long))
-               ret =  PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
-                                 pmd.pmd, (u64)pmd.pmd >> 32);
+               ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
+                                   pmd.pmd, (u64)pmd.pmd >> 32);
        else
-               ret =  PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
-                                 pmd.pmd);
+               ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
+                                   pmd.pmd);
 
        return ret;
 }
@@ -1228,11 +1307,11 @@ static inline pud_t __pud(pudval_t val)
        pudval_t ret;
 
        if (sizeof(pudval_t) > sizeof(long))
-               ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
-                                val, (u64)val >> 32);
+               ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
+                                  val, (u64)val >> 32);
        else
-               ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
-                                val);
+               ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
+                                  val);
 
        return (pud_t) { ret };
 }
@@ -1242,11 +1321,11 @@ static inline pudval_t pud_val(pud_t pud)
        pudval_t ret;
 
        if (sizeof(pudval_t) > sizeof(long))
-               ret =  PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
-                                 pud.pud, (u64)pud.pud >> 32);
+               ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
+                                   pud.pud, (u64)pud.pud >> 32);
        else
-               ret =  PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
-                                 pud.pud);
+               ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
+                                   pud.pud);
 
        return ret;
 }
@@ -1387,9 +1466,10 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
 }
 
 void _paravirt_nop(void);
-#define paravirt_nop   ((void *)_paravirt_nop)
+u32 _paravirt_ident_32(u32);
+u64 _paravirt_ident_64(u64);
 
-void paravirt_use_bytelocks(void);
+#define paravirt_nop   ((void *)_paravirt_nop)
 
 #ifdef CONFIG_SMP
 
@@ -1439,12 +1519,37 @@ extern struct paravirt_patch_site __parainstructions[],
        __parainstructions_end[];
 
 #ifdef CONFIG_X86_32
-#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
-#define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
+#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
+#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
+
+/* save and restore all caller-save registers, except return value */
+#define PV_SAVE_ALL_CALLER_REGS                "pushl %ecx;"
+#define PV_RESTORE_ALL_CALLER_REGS     "popl  %ecx;"
+
 #define PV_FLAGS_ARG "0"
 #define PV_EXTRA_CLOBBERS
 #define PV_VEXTRA_CLOBBERS
 #else
+/* save and restore all caller-save registers, except return value */
+#define PV_SAVE_ALL_CALLER_REGS                                                \
+       "push %rcx;"                                                    \
+       "push %rdx;"                                                    \
+       "push %rsi;"                                                    \
+       "push %rdi;"                                                    \
+       "push %r8;"                                                     \
+       "push %r9;"                                                     \
+       "push %r10;"                                                    \
+       "push %r11;"
+#define PV_RESTORE_ALL_CALLER_REGS                                     \
+       "pop %r11;"                                                     \
+       "pop %r10;"                                                     \
+       "pop %r9;"                                                      \
+       "pop %r8;"                                                      \
+       "pop %rdi;"                                                     \
+       "pop %rsi;"                                                     \
+       "pop %rdx;"                                                     \
+       "pop %rcx;"
+
 /* We save some registers, but all of them, that's too much. We clobber all
  * caller saved registers but the argument parameter */
 #define PV_SAVE_REGS "pushq %%rdi;"
@@ -1454,52 +1559,76 @@ extern struct paravirt_patch_site __parainstructions[],
 #define PV_FLAGS_ARG "D"
 #endif
 
+/*
+ * Generate a thunk around a function which saves all caller-save
+ * registers except for the return value.  This allows C functions to
+ * be called from assembler code where fewer than normal registers are
+ * available.  It may also help code generation around calls from C
+ * code if the common case doesn't use many registers.
+ *
+ * When a callee is wrapped in a thunk, the caller can assume that all
+ * arg regs and all scratch registers are preserved across the
+ * call. The return value in rax/eax will not be saved, even for void
+ * functions.
+ */
+#define PV_CALLEE_SAVE_REGS_THUNK(func)                                        \
+       extern typeof(func) __raw_callee_save_##func;                   \
+       static void *__##func##__ __used = func;                        \
+                                                                       \
+       asm(".pushsection .text;"                                       \
+           "__raw_callee_save_" #func ": "                             \
+           PV_SAVE_ALL_CALLER_REGS                                     \
+           "call " #func ";"                                           \
+           PV_RESTORE_ALL_CALLER_REGS                                  \
+           "ret;"                                                      \
+           ".popsection")
+
+/* Get a reference to a callee-save function */
+#define PV_CALLEE_SAVE(func)                                           \
+       ((struct paravirt_callee_save) { __raw_callee_save_##func })
+
+/* Promise that "func" already uses the right calling convention */
+#define __PV_IS_CALLEE_SAVE(func)                      \
+       ((struct paravirt_callee_save) { func })
+
 static inline unsigned long __raw_local_save_flags(void)
 {
        unsigned long f;
 
-       asm volatile(paravirt_alt(PV_SAVE_REGS
-                                 PARAVIRT_CALL
-                                 PV_RESTORE_REGS)
+       asm volatile(paravirt_alt(PARAVIRT_CALL)
                     : "=a"(f)
                     : paravirt_type(pv_irq_ops.save_fl),
                       paravirt_clobber(CLBR_EAX)
-                    : "memory", "cc" PV_VEXTRA_CLOBBERS);
+                    : "memory", "cc");
        return f;
 }
 
 static inline void raw_local_irq_restore(unsigned long f)
 {
-       asm volatile(paravirt_alt(PV_SAVE_REGS
-                                 PARAVIRT_CALL
-                                 PV_RESTORE_REGS)
+       asm volatile(paravirt_alt(PARAVIRT_CALL)
                     : "=a"(f)
                     : PV_FLAGS_ARG(f),
                       paravirt_type(pv_irq_ops.restore_fl),
                       paravirt_clobber(CLBR_EAX)
-                    : "memory", "cc" PV_EXTRA_CLOBBERS);
+                    : "memory", "cc");
 }
 
 static inline void raw_local_irq_disable(void)
 {
-       asm volatile(paravirt_alt(PV_SAVE_REGS
-                                 PARAVIRT_CALL
-                                 PV_RESTORE_REGS)
+       asm volatile(paravirt_alt(PARAVIRT_CALL)
                     :
                     : paravirt_type(pv_irq_ops.irq_disable),
                       paravirt_clobber(CLBR_EAX)
-                    : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
+                    : "memory", "eax", "cc");
 }
 
 static inline void raw_local_irq_enable(void)
 {
-       asm volatile(paravirt_alt(PV_SAVE_REGS
-                                 PARAVIRT_CALL
-                                 PV_RESTORE_REGS)
+       asm volatile(paravirt_alt(PARAVIRT_CALL)
                     :
                     : paravirt_type(pv_irq_ops.irq_enable),
                       paravirt_clobber(CLBR_EAX)
-                    : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
+                    : "memory", "eax", "cc");
 }
 
 static inline unsigned long __raw_local_irq_save(void)
@@ -1542,33 +1671,49 @@ static inline unsigned long __raw_local_irq_save(void)
        .popsection
 
 
+#define COND_PUSH(set, mask, reg)                      \
+       .if ((~(set)) & mask); push %reg; .endif
+#define COND_POP(set, mask, reg)                       \
+       .if ((~(set)) & mask); pop %reg; .endif
+
 #ifdef CONFIG_X86_64
-#define PV_SAVE_REGS                           \
-       push %rax;                              \
-       push %rcx;                              \
-       push %rdx;                              \
-       push %rsi;                              \
-       push %rdi;                              \
-       push %r8;                               \
-       push %r9;                               \
-       push %r10;                              \
-       push %r11
-#define PV_RESTORE_REGS                                \
-       pop %r11;                               \
-       pop %r10;                               \
-       pop %r9;                                \
-       pop %r8;                                \
-       pop %rdi;                               \
-       pop %rsi;                               \
-       pop %rdx;                               \
-       pop %rcx;                               \
-       pop %rax
+
+#define PV_SAVE_REGS(set)                      \
+       COND_PUSH(set, CLBR_RAX, rax);          \
+       COND_PUSH(set, CLBR_RCX, rcx);          \
+       COND_PUSH(set, CLBR_RDX, rdx);          \
+       COND_PUSH(set, CLBR_RSI, rsi);          \
+       COND_PUSH(set, CLBR_RDI, rdi);          \
+       COND_PUSH(set, CLBR_R8, r8);            \
+       COND_PUSH(set, CLBR_R9, r9);            \
+       COND_PUSH(set, CLBR_R10, r10);          \
+       COND_PUSH(set, CLBR_R11, r11)
+#define PV_RESTORE_REGS(set)                   \
+       COND_POP(set, CLBR_R11, r11);           \
+       COND_POP(set, CLBR_R10, r10);           \
+       COND_POP(set, CLBR_R9, r9);             \
+       COND_POP(set, CLBR_R8, r8);             \
+       COND_POP(set, CLBR_RDI, rdi);           \
+       COND_POP(set, CLBR_RSI, rsi);           \
+       COND_POP(set, CLBR_RDX, rdx);           \
+       COND_POP(set, CLBR_RCX, rcx);           \
+       COND_POP(set, CLBR_RAX, rax)
+
 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
 #define PARA_INDIRECT(addr)    *addr(%rip)
 #else
-#define PV_SAVE_REGS   pushl %eax; pushl %edi; pushl %ecx; pushl %edx
-#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
+#define PV_SAVE_REGS(set)                      \
+       COND_PUSH(set, CLBR_EAX, eax);          \
+       COND_PUSH(set, CLBR_EDI, edi);          \
+       COND_PUSH(set, CLBR_ECX, ecx);          \
+       COND_PUSH(set, CLBR_EDX, edx)
+#define PV_RESTORE_REGS(set)                   \
+       COND_POP(set, CLBR_EDX, edx);           \
+       COND_POP(set, CLBR_ECX, ecx);           \
+       COND_POP(set, CLBR_EDI, edi);           \
+       COND_POP(set, CLBR_EAX, eax)
+
 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
 #define PARA_INDIRECT(addr)    *%cs:addr
@@ -1580,15 +1725,15 @@ static inline unsigned long __raw_local_irq_save(void)
 
 #define DISABLE_INTERRUPTS(clobbers)                                   \
        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
-                 PV_SAVE_REGS;                                         \
+                 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
-                 PV_RESTORE_REGS;)                     \
+                 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 
 #define ENABLE_INTERRUPTS(clobbers)                                    \
        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
-                 PV_SAVE_REGS;                                         \
+                 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
-                 PV_RESTORE_REGS;)
+                 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 
 #define USERGS_SYSRET32                                                        \
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32),       \
@@ -1618,11 +1763,15 @@ static inline unsigned long __raw_local_irq_save(void)
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
                  swapgs)
 
+/*
+ * Note: swapgs is very special, and in practise is either going to be
+ * implemented with a single "swapgs" instruction or something very
+ * special.  Either way, we don't need to save any registers for
+ * it.
+ */
 #define SWAPGS                                                         \
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
-                 PV_SAVE_REGS;                                         \
-                 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs);         \
-                 PV_RESTORE_REGS                                       \
+                 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)          \
                 )
 
 #define GET_CR2_INTO_RCX                               \
index b8493b3..9709fdf 100644 (file)
@@ -5,10 +5,8 @@
 
 #ifdef CONFIG_X86_PAT
 extern int pat_enabled;
-extern void validate_pat_support(struct cpuinfo_x86 *c);
 #else
 static const int pat_enabled;
-static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
 #endif
 
 extern void pat_init(void);
@@ -17,6 +15,4 @@ extern int reserve_memtype(u64 start, u64 end,
                unsigned long req_type, unsigned long *ret_type);
 extern int free_memtype(u64 start, u64 end);
 
-extern void pat_disable(char *reason);
-
 #endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h
deleted file mode 100644 (file)
index 2fbfff8..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-#ifndef _ASM_X86_PDA_H
-#define _ASM_X86_PDA_H
-
-#ifndef __ASSEMBLY__
-#include <linux/stddef.h>
-#include <linux/types.h>
-#include <linux/cache.h>
-#include <asm/page.h>
-
-/* Per processor datastructure. %gs points to it while the kernel runs */
-struct x8664_pda {
-       struct task_struct *pcurrent;   /* 0  Current process */
-       unsigned long data_offset;      /* 8 Per cpu data offset from linker
-                                          address */
-       unsigned long kernelstack;      /* 16 top of kernel stack for current */
-       unsigned long oldrsp;           /* 24 user rsp for system call */
-       int irqcount;                   /* 32 Irq nesting counter. Starts -1 */
-       unsigned int cpunumber;         /* 36 Logical CPU number */
-#ifdef CONFIG_CC_STACKPROTECTOR
-       unsigned long stack_canary;     /* 40 stack canary value */
-                                       /* gcc-ABI: this canary MUST be at
-                                          offset 40!!! */
-#endif
-       char *irqstackptr;
-       short nodenumber;               /* number of current node (32k max) */
-       short in_bootmem;               /* pda lives in bootmem */
-       unsigned int __softirq_pending;
-       unsigned int __nmi_count;       /* number of NMI on this CPUs */
-       short mmu_state;
-       short isidle;
-       struct mm_struct *active_mm;
-       unsigned apic_timer_irqs;
-       unsigned irq0_irqs;
-       unsigned irq_resched_count;
-       unsigned irq_call_count;
-       unsigned irq_tlb_count;
-       unsigned irq_thermal_count;
-       unsigned irq_threshold_count;
-       unsigned irq_spurious_count;
-} ____cacheline_aligned_in_smp;
-
-extern struct x8664_pda **_cpu_pda;
-extern void pda_init(int);
-
-#define cpu_pda(i) (_cpu_pda[i])
-
-/*
- * There is no fast way to get the base address of the PDA, all the accesses
- * have to mention %fs/%gs.  So it needs to be done this Torvaldian way.
- */
-extern void __bad_pda_field(void) __attribute__((noreturn));
-
-/*
- * proxy_pda doesn't actually exist, but tell gcc it is accessed for
- * all PDA accesses so it gets read/write dependencies right.
- */
-extern struct x8664_pda _proxy_pda;
-
-#define pda_offset(field) offsetof(struct x8664_pda, field)
-
-#define pda_to_op(op, field, val)                                      \
-do {                                                                   \
-       typedef typeof(_proxy_pda.field) T__;                           \
-       if (0) { T__ tmp__; tmp__ = (val); }    /* type checking */     \
-       switch (sizeof(_proxy_pda.field)) {                             \
-       case 2:                                                         \
-               asm(op "w %1,%%gs:%c2" :                                \
-                   "+m" (_proxy_pda.field) :                           \
-                   "ri" ((T__)val),                                    \
-                   "i"(pda_offset(field)));                            \
-               break;                                                  \
-       case 4:                                                         \
-               asm(op "l %1,%%gs:%c2" :                                \
-                   "+m" (_proxy_pda.field) :                           \
-                   "ri" ((T__)val),                                    \
-                   "i" (pda_offset(field)));                           \
-               break;                                                  \
-       case 8:                                                         \
-               asm(op "q %1,%%gs:%c2":                                 \
-                   "+m" (_proxy_pda.field) :                           \
-                   "ri" ((T__)val),                                    \
-                   "i"(pda_offset(field)));                            \
-               break;                                                  \
-       default:                                                        \
-               __bad_pda_field();                                      \
-       }                                                               \
-} while (0)
-
-#define pda_from_op(op, field)                 \
-({                                             \
-       typeof(_proxy_pda.field) ret__;         \
-       switch (sizeof(_proxy_pda.field)) {     \
-       case 2:                                 \
-               asm(op "w %%gs:%c1,%0" :        \
-                   "=r" (ret__) :              \
-                   "i" (pda_offset(field)),    \
-                   "m" (_proxy_pda.field));    \
-               break;                          \
-       case 4:                                 \
-               asm(op "l %%gs:%c1,%0":         \
-                   "=r" (ret__):               \
-                   "i" (pda_offset(field)),    \
-                   "m" (_proxy_pda.field));    \
-               break;                          \
-       case 8:                                 \
-               asm(op "q %%gs:%c1,%0":         \
-                   "=r" (ret__) :              \
-                   "i" (pda_offset(field)),    \
-                   "m" (_proxy_pda.field));    \
-               break;                          \
-       default:                                \
-               __bad_pda_field();              \
-       }                                       \
-       ret__;                                  \
-})
-
-#define read_pda(field)                pda_from_op("mov", field)
-#define write_pda(field, val)  pda_to_op("mov", field, val)
-#define add_pda(field, val)    pda_to_op("add", field, val)
-#define sub_pda(field, val)    pda_to_op("sub", field, val)
-#define or_pda(field, val)     pda_to_op("or", field, val)
-
-/* This is not atomic against other CPUs -- CPU preemption needs to be off */
-#define test_and_clear_bit_pda(bit, field)                             \
-({                                                                     \
-       int old__;                                                      \
-       asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0"                    \
-                    : "=r" (old__), "+m" (_proxy_pda.field)            \
-                    : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
-       old__;                                                          \
-})
-
-#endif
-
-#define PDA_STACKOFFSET (5*8)
-
-#endif /* _ASM_X86_PDA_H */
index ece7205..aee103b 100644 (file)
@@ -2,53 +2,12 @@
 #define _ASM_X86_PERCPU_H
 
 #ifdef CONFIG_X86_64
-#include <linux/compiler.h>
-
-/* Same as asm-generic/percpu.h, except that we store the per cpu offset
-   in the PDA. Longer term the PDA and every per cpu variable
-   should be just put into a single section and referenced directly
-   from %gs */
-
-#ifdef CONFIG_SMP
-#include <asm/pda.h>
-
-#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
-#define __my_cpu_offset read_pda(data_offset)
-
-#define per_cpu_offset(x) (__per_cpu_offset(x))
-
+#define __percpu_seg           gs
+#define __percpu_mov_op                movq
+#else
+#define __percpu_seg           fs
+#define __percpu_mov_op                movl
 #endif
-#include <asm-generic/percpu.h>
-
-DECLARE_PER_CPU(struct x8664_pda, pda);
-
-/*
- * These are supposed to be implemented as a single instruction which
- * operates on the per-cpu data base segment.  x86-64 doesn't have
- * that yet, so this is a fairly inefficient workaround for the
- * meantime.  The single instruction is atomic with respect to
- * preemption and interrupts, so we need to explicitly disable
- * interrupts here to achieve the same effect.  However, because it
- * can be used from within interrupt-disable/enable, we can't actually
- * disable interrupts; disabling preemption is enough.
- */
-#define x86_read_percpu(var)                                           \
-       ({                                                              \
-               typeof(per_cpu_var(var)) __tmp;                         \
-               preempt_disable();                                      \
-               __tmp = __get_cpu_var(var);                             \
-               preempt_enable();                                       \
-               __tmp;                                                  \
-       })
-
-#define x86_write_percpu(var, val)                                     \
-       do {                                                            \
-               preempt_disable();                                      \
-               __get_cpu_var(var) = (val);                             \
-               preempt_enable();                                       \
-       } while(0)
-
-#else /* CONFIG_X86_64 */
 
 #ifdef __ASSEMBLY__
 
@@ -65,47 +24,48 @@ DECLARE_PER_CPU(struct x8664_pda, pda);
  *    PER_CPU(cpu_gdt_descr, %ebx)
  */
 #ifdef CONFIG_SMP
-#define PER_CPU(var, reg)                              \
-       movl %fs:per_cpu__##this_cpu_off, reg;          \
+#define PER_CPU(var, reg)                                              \
+       __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg;       \
        lea per_cpu__##var(reg), reg
-#define PER_CPU_VAR(var)       %fs:per_cpu__##var
+#define PER_CPU_VAR(var)       %__percpu_seg:per_cpu__##var
 #else /* ! SMP */
-#define PER_CPU(var, reg)                      \
-       movl $per_cpu__##var, reg
+#define PER_CPU(var, reg)                                              \
+       __percpu_mov_op $per_cpu__##var, reg
 #define PER_CPU_VAR(var)       per_cpu__##var
 #endif /* SMP */
 
+#ifdef CONFIG_X86_64_SMP
+#define INIT_PER_CPU_VAR(var)  init_per_cpu__##var
+#else
+#define INIT_PER_CPU_VAR(var)  per_cpu__##var
+#endif
+
 #else /* ...!ASSEMBLY */
 
+#include <linux/stringify.h>
+
+#ifdef CONFIG_SMP
+#define __percpu_arg(x)                "%%"__stringify(__percpu_seg)":%P" #x
+#define __my_cpu_offset                percpu_read(this_cpu_off)
+#else
+#define __percpu_arg(x)                "%" #x
+#endif
+
 /*
- * PER_CPU finds an address of a per-cpu variable.
+ * Initialized pointers to per-cpu variables needed for the boot
+ * processor need to use these macros to get the proper address
+ * offset from __per_cpu_load on SMP.
  *
- * Args:
- *    var - variable name
- *    cpu - 32bit register containing the current CPU number
- *
- * The resulting address is stored in the "cpu" argument.
- *
- * Example:
- *    PER_CPU(cpu_gdt_descr, %ebx)
+ * There also must be an entry in vmlinux_64.lds.S
  */
-#ifdef CONFIG_SMP
-
-#define __my_cpu_offset x86_read_percpu(this_cpu_off)
-
-/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
-#define __percpu_seg "%%fs:"
-
-#else  /* !SMP */
-
-#define __percpu_seg ""
-
-#endif /* SMP */
-
-#include <asm-generic/percpu.h>
+#define DECLARE_INIT_PER_CPU(var) \
+       extern typeof(per_cpu_var(var)) init_per_cpu_var(var)
 
-/* We can use this directly for local CPU (faster). */
-DECLARE_PER_CPU(unsigned long, this_cpu_off);
+#ifdef CONFIG_X86_64_SMP
+#define init_per_cpu_var(var)  init_per_cpu__##var
+#else
+#define init_per_cpu_var(var)  per_cpu_var(var)
+#endif
 
 /* For arch-specific code, we can use direct single-insn ops (they
  * don't give an lvalue though). */
@@ -120,20 +80,25 @@ do {                                                       \
        }                                               \
        switch (sizeof(var)) {                          \
        case 1:                                         \
-               asm(op "b %1,"__percpu_seg"%0"          \
+               asm(op "b %1,"__percpu_arg(0)           \
                    : "+m" (var)                        \
                    : "ri" ((T__)val));                 \
                break;                                  \
        case 2:                                         \
-               asm(op "w %1,"__percpu_seg"%0"          \
+               asm(op "w %1,"__percpu_arg(0)           \
                    : "+m" (var)                        \
                    : "ri" ((T__)val));                 \
                break;                                  \
        case 4:                                         \
-               asm(op "l %1,"__percpu_seg"%0"          \
+               asm(op "l %1,"__percpu_arg(0)           \
                    : "+m" (var)                        \
                    : "ri" ((T__)val));                 \
                break;                                  \
+       case 8:                                         \
+               asm(op "q %1,"__percpu_arg(0)           \
+                   : "+m" (var)                        \
+                   : "re" ((T__)val));                 \
+               break;                                  \
        default: __bad_percpu_size();                   \
        }                                               \
 } while (0)
@@ -143,17 +108,22 @@ do {                                                      \
        typeof(var) ret__;                              \
        switch (sizeof(var)) {                          \
        case 1:                                         \
-               asm(op "b "__percpu_seg"%1,%0"          \
+               asm(op "b "__percpu_arg(1)",%0"         \
                    : "=r" (ret__)                      \
                    : "m" (var));                       \
                break;                                  \
        case 2:                                         \
-               asm(op "w "__percpu_seg"%1,%0"          \
+               asm(op "w "__percpu_arg(1)",%0"         \
                    : "=r" (ret__)                      \
                    : "m" (var));                       \
                break;                                  \
        case 4:                                         \
-               asm(op "l "__percpu_seg"%1,%0"          \
+               asm(op "l "__percpu_arg(1)",%0"         \
+                   : "=r" (ret__)                      \
+                   : "m" (var));                       \
+               break;                                  \
+       case 8:                                         \
+               asm(op "q "__percpu_arg(1)",%0"         \
                    : "=r" (ret__)                      \
                    : "m" (var));                       \
                break;                                  \
@@ -162,13 +132,30 @@ do {                                                      \
        ret__;                                          \
 })
 
-#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
-#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val)
-#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val)
-#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
-#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
+#define percpu_read(var)       percpu_from_op("mov", per_cpu__##var)
+#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
+#define percpu_add(var, val)   percpu_to_op("add", per_cpu__##var, val)
+#define percpu_sub(var, val)   percpu_to_op("sub", per_cpu__##var, val)
+#define percpu_and(var, val)   percpu_to_op("and", per_cpu__##var, val)
+#define percpu_or(var, val)    percpu_to_op("or", per_cpu__##var, val)
+#define percpu_xor(var, val)   percpu_to_op("xor", per_cpu__##var, val)
+
+/* This is not atomic against other CPUs -- CPU preemption needs to be off */
+#define x86_test_and_clear_bit_percpu(bit, var)                                \
+({                                                                     \
+       int old__;                                                      \
+       asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0"           \
+                    : "=r" (old__), "+m" (per_cpu__##var)              \
+                    : "dIr" (bit));                                    \
+       old__;                                                          \
+})
+
+#include <asm-generic/percpu.h>
+
+/* We can use this directly for local CPU (faster). */
+DECLARE_PER_CPU(unsigned long, this_cpu_off);
+
 #endif /* !__ASSEMBLY__ */
-#endif /* !CONFIG_X86_64 */
 
 #ifdef CONFIG_SMP
 
@@ -195,9 +182,9 @@ do {                                                        \
 #define        early_per_cpu_ptr(_name) (_name##_early_ptr)
 #define        early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
 #define        early_per_cpu(_name, _cpu)                              \
-       (early_per_cpu_ptr(_name) ?                             \
-               early_per_cpu_ptr(_name)[_cpu] :                \
-               per_cpu(_name, _cpu))
+       *(early_per_cpu_ptr(_name) ?                            \
+               &early_per_cpu_ptr(_name)[_cpu] :               \
+               &per_cpu(_name, _cpu))
 
 #else  /* !CONFIG_SMP */
 #define        DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)          \
index e0d199f..c1774ac 100644 (file)
@@ -53,8 +53,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
 #endif
 
-#define pte_none(x)            (!(x).pte_low)
-
 /*
  * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
  * split up the 29 bits of offset into this range:
index 447da43..3f13cdf 100644 (file)
        printk("%s:%d: bad pgd %p(%016Lx).\n",                          \
               __FILE__, __LINE__, &(e), pgd_val(e))
 
-static inline int pud_none(pud_t pud)
-{
-       return pud_val(pud) == 0;
-}
-
-static inline int pud_bad(pud_t pud)
-{
-       return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
-}
-
-static inline int pud_present(pud_t pud)
-{
-       return pud_val(pud) & _PAGE_PRESENT;
-}
-
 /* Rules for using set_pte: the pte being assigned *must* be
  * either not present or in a state where the hardware will
  * not attempt to update the pte.  In places where this is
@@ -120,15 +105,6 @@ static inline void pud_clear(pud_t *pudp)
                write_cr3(pgd);
 }
 
-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
-
-#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
-
-
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) +    \
-                                 pmd_index(address))
-
 #ifdef CONFIG_SMP
 static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
 {
@@ -145,17 +121,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
 #endif
 
-#define __HAVE_ARCH_PTE_SAME
-static inline int pte_same(pte_t a, pte_t b)
-{
-       return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
-}
-
-static inline int pte_none(pte_t pte)
-{
-       return !pte.pte_low && !pte.pte_high;
-}
-
 /*
  * Bits 0, 6 and 7 are taken in the low part of the pte,
  * put the 32 bits of offset into the high part.
index 4f5af84..8fef0f6 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _ASM_X86_PGTABLE_H
 #define _ASM_X86_PGTABLE_H
 
+#include <asm/page.h>
+
 #define FIRST_USER_ADDRESS     0
 
 #define _PAGE_BIT_PRESENT      0       /* is present */
@@ -236,68 +238,82 @@ static inline unsigned long pte_pfn(pte_t pte)
 
 static inline int pmd_large(pmd_t pte)
 {
-       return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
+       return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
                (_PAGE_PSE | _PAGE_PRESENT);
 }
 
+static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
+{
+       pteval_t v = native_pte_val(pte);
+
+       return native_make_pte(v | set);
+}
+
+static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
+{
+       pteval_t v = native_pte_val(pte);
+
+       return native_make_pte(v & ~clear);
+}
+
 static inline pte_t pte_mkclean(pte_t pte)
 {
-       return __pte(pte_val(pte) & ~_PAGE_DIRTY);
+       return pte_clear_flags(pte, _PAGE_DIRTY);
 }
 
 static inline pte_t pte_mkold(pte_t pte)
 {
-       return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
+       return pte_clear_flags(pte, _PAGE_ACCESSED);
 }
 
 static inline pte_t pte_wrprotect(pte_t pte)
 {
-       return __pte(pte_val(pte) & ~_PAGE_RW);
+       return pte_clear_flags(pte, _PAGE_RW);
 }
 
 static inline pte_t pte_mkexec(pte_t pte)
 {
-       return __pte(pte_val(pte) & ~_PAGE_NX);
+       return pte_clear_flags(pte, _PAGE_NX);
 }
 
 static inline pte_t pte_mkdirty(pte_t pte)
 {
-       return __pte(pte_val(pte) | _PAGE_DIRTY);
+       return pte_set_flags(pte, _PAGE_DIRTY);
 }
 
 static inline pte_t pte_mkyoung(pte_t pte)
 {
-       return __pte(pte_val(pte) | _PAGE_ACCESSED);
+       return pte_set_flags(pte, _PAGE_ACCESSED);
 }
 
 static inline pte_t pte_mkwrite(pte_t pte)
 {
-       return __pte(pte_val(pte) | _PAGE_RW);
+       return pte_set_flags(pte, _PAGE_RW);
 }
 
 static inline pte_t pte_mkhuge(pte_t pte)
 {
-       return __pte(pte_val(pte) | _PAGE_PSE);
+       return pte_set_flags(pte, _PAGE_PSE);
 }
 
 static inline pte_t pte_clrhuge(pte_t pte)
 {
-       return __pte(pte_val(pte) & ~_PAGE_PSE);
+       return pte_clear_flags(pte, _PAGE_PSE);
 }
 
 static inline pte_t pte_mkglobal(pte_t pte)
 {
-       return __pte(pte_val(pte) | _PAGE_GLOBAL);
+       return pte_set_flags(pte, _PAGE_GLOBAL);
 }
 
 static inline pte_t pte_clrglobal(pte_t pte)
 {
-       return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
+       return pte_clear_flags(pte, _PAGE_GLOBAL);
 }
 
 static inline pte_t pte_mkspecial(pte_t pte)
 {
-       return __pte(pte_val(pte) | _PAGE_SPECIAL);
+       return pte_set_flags(pte, _PAGE_SPECIAL);
 }
 
 extern pteval_t __supported_pte_mask;
@@ -451,6 +467,190 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
 # include "pgtable_64.h"
 #endif
 
+#ifndef __ASSEMBLY__
+#include <linux/mm_types.h>
+
+static inline int pte_none(pte_t pte)
+{
+       return !pte.pte;
+}
+
+#define __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t a, pte_t b)
+{
+       return a.pte == b.pte;
+}
+
+static inline int pte_present(pte_t a)
+{
+       return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
+}
+
+static inline int pmd_present(pmd_t pmd)
+{
+       return pmd_flags(pmd) & _PAGE_PRESENT;
+}
+
+static inline int pmd_none(pmd_t pmd)
+{
+       /* Only check low word on 32-bit platforms, since it might be
+          out of sync with upper half. */
+       return (unsigned long)native_pmd_val(pmd) == 0;
+}
+
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+       return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
+}
+
+/*
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+#define pmd_page(pmd)  pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
+
+/*
+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
+ *
+ * this macro returns the index of the entry in the pmd page which would
+ * control the given virtual address
+ */
+static inline unsigned pmd_index(unsigned long address)
+{
+       return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * (Currently stuck as a macro because of indirect forward reference
+ * to linux/mm.h:page_to_nid())
+ */
+#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
+
+/*
+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
+ *
+ * this function returns the index of the entry in the pte page which would
+ * control the given virtual address
+ */
+static inline unsigned pte_index(unsigned long address)
+{
+       return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+}
+
+static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
+{
+       return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
+}
+
+static inline int pmd_bad(pmd_t pmd)
+{
+       return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
+}
+
+static inline unsigned long pages_to_mb(unsigned long npg)
+{
+       return npg >> (20 - PAGE_SHIFT);
+}
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)        \
+       remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#if PAGETABLE_LEVELS == 2
+static inline int pud_large(pud_t pud)
+{
+       return 0;
+}
+#endif
+
+#if PAGETABLE_LEVELS > 2
+static inline int pud_none(pud_t pud)
+{
+       return native_pud_val(pud) == 0;
+}
+
+static inline int pud_present(pud_t pud)
+{
+       return pud_flags(pud) & _PAGE_PRESENT;
+}
+
+static inline unsigned long pud_page_vaddr(pud_t pud)
+{
+       return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
+}
+
+/*
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+#define pud_page(pud)          pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
+
+/* Find an entry in the second-level page table.. */
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+{
+       return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
+}
+
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+       return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
+}
+
+static inline int pud_large(pud_t pud)
+{
+       return (pud_flags(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
+               (_PAGE_PSE | _PAGE_PRESENT);
+}
+
+static inline int pud_bad(pud_t pud)
+{
+       return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
+}
+#endif /* PAGETABLE_LEVELS > 2 */
+
+#if PAGETABLE_LEVELS > 3
+static inline int pgd_present(pgd_t pgd)
+{
+       return pgd_flags(pgd) & _PAGE_PRESENT;
+}
+
+static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+{
+       return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
+}
+
+/*
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+#define pgd_page(pgd)          pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
+
+/* to find an entry in a page-table-directory. */
+static inline unsigned pud_index(unsigned long address)
+{
+       return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
+}
+
+static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+{
+       return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
+}
+
+static inline int pgd_bad(pgd_t pgd)
+{
+       return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
+}
+
+static inline int pgd_none(pgd_t pgd)
+{
+       return !native_pgd_val(pgd);
+}
+#endif /* PAGETABLE_LEVELS > 3 */
+
+#endif /* __ASSEMBLY__ */
+
 /*
  * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  *
index 72b020d..1952bb7 100644 (file)
@@ -85,55 +85,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
 /* The boot page tables (all created as a single array) */
 extern unsigned long pg0[];
 
-#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
-
-/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
-#define pmd_none(x)    (!(unsigned long)pmd_val((x)))
-#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
-#define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
-
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-
 #ifdef CONFIG_X86_PAE
 # include <asm/pgtable-3level.h>
 #else
 # include <asm/pgtable-2level.h>
 #endif
 
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
-
-
-static inline int pud_large(pud_t pud) { return 0; }
-
-/*
- * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
- *
- * this macro returns the index of the entry in the pmd page which would
- * control the given virtual address
- */
-#define pmd_index(address)                             \
-       (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-
-/*
- * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
- *
- * this macro returns the index of the entry in the pte page which would
- * control the given virtual address
- */
-#define pte_index(address)                                     \
-       (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address)                                \
-       ((pte_t *)pmd_page_vaddr(*(dir)) +  pte_index((address)))
-
-#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
-
-#define pmd_page_vaddr(pmd)                                    \
-       ((unsigned long)__va(pmd_val((pmd)) & PTE_PFN_MASK))
-
 #if defined(CONFIG_HIGHPTE)
 #define pte_offset_map(dir, address)                                   \
        ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) +          \
@@ -176,7 +133,4 @@ do {                                                \
 #define kern_addr_valid(kaddr) (0)
 #endif
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)        \
-       remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 #endif /* _ASM_X86_PGTABLE_32_H */
index ba09289..1c4e247 100644 (file)
@@ -11,7 +11,6 @@
 #include <asm/processor.h>
 #include <linux/bitops.h>
 #include <linux/threads.h>
-#include <asm/pda.h>
 
 extern pud_t level3_kernel_pgt[512];
 extern pud_t level3_ident_pgt[512];
@@ -67,9 +66,6 @@ extern void paging_init(void);
        printk("%s:%d: bad pgd %p(%016lx).\n",          \
               __FILE__, __LINE__, &(e), pgd_val(e))
 
-#define pgd_none(x)    (!pgd_val(x))
-#define pud_none(x)    (!pud_val(x))
-
 struct mm_struct;
 
 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
@@ -134,8 +130,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
        native_set_pgd(pgd, native_make_pgd(0));
 }
 
-#define pte_same(a, b)         ((a).pte == (b).pte)
-
 #endif /* !__ASSEMBLY__ */
 
 #define PMD_SIZE       (_AC(1, UL) << PMD_SHIFT)
@@ -156,26 +150,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
 
 #ifndef __ASSEMBLY__
 
-static inline int pgd_bad(pgd_t pgd)
-{
-       return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
-}
-
-static inline int pud_bad(pud_t pud)
-{
-       return (pud_val(pud) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
-}
-
-static inline int pmd_bad(pmd_t pmd)
-{
-       return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
-}
-
-#define pte_none(x)    (!pte_val((x)))
-#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
-
-#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))   /* FIXME: is this right? */
-
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
@@ -184,41 +158,12 @@ static inline int pmd_bad(pmd_t pmd)
 /*
  * Level 4 access.
  */
-#define pgd_page_vaddr(pgd)                                            \
-       ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_PFN_MASK))
-#define pgd_page(pgd)          (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT))
-#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
 static inline int pgd_large(pgd_t pgd) { return 0; }
 #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
 
 /* PUD - Level3 access */
-/* to find an entry in a page-table-directory. */
-#define pud_page_vaddr(pud)                                            \
-       ((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK))
-#define pud_page(pud)  (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT))
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
-#define pud_offset(pgd, address)                                       \
-       ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address)))
-#define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT)
-
-static inline int pud_large(pud_t pte)
-{
-       return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
-               (_PAGE_PSE | _PAGE_PRESENT);
-}
 
 /* PMD  - Level 2 access */
-#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_PFN_MASK))
-#define pmd_page(pmd)          (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
-
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-#define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \
-                                 pmd_index(address))
-#define pmd_none(x)    (!pmd_val((x)))
-#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
-#define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot))))
-#define pmd_pfn(x)  ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
-
 #define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
 #define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) |    \
                                            _PAGE_FILE })
@@ -226,13 +171,6 @@ static inline int pud_large(pud_t pte)
 
 /* PTE - Level 1 access. */
 
-/* page, protection -> pte */
-#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn((page)), (pgprot))
-
-#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
-                                        pte_index((address)))
-
 /* x86-64 always has all page tables mapped. */
 #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
 #define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
@@ -266,9 +204,6 @@ extern int direct_gbpages;
 extern int kern_addr_valid(unsigned long addr);
 extern void cleanup_highmap(void);
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)        \
-       remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 #define HAVE_ARCH_UNMAPPED_AREA
 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 
index a889464..3ac5032 100644 (file)
@@ -6,8 +6,4 @@
 #define ARCH_GET_FS 0x1003
 #define ARCH_GET_GS 0x1004
 
-#ifdef CONFIG_X86_64
-extern long sys_arch_prctl(int, unsigned long);
-#endif /* CONFIG_X86_64 */
-
 #endif /* _ASM_X86_PRCTL_H */
index 3bfd523..a013383 100644 (file)
@@ -73,7 +73,7 @@ struct cpuinfo_x86 {
        char                    pad0;
 #else
        /* Number of 4K pages in DTLB/ITLB combined(in pages): */
-       int                      x86_tlbsize;
+       int                     x86_tlbsize;
        __u8                    x86_virt_bits;
        __u8                    x86_phys_bits;
 #endif
@@ -378,7 +378,29 @@ union thread_xstate {
 
 #ifdef CONFIG_X86_64
 DECLARE_PER_CPU(struct orig_ist, orig_ist);
+
+union irq_stack_union {
+       char irq_stack[IRQ_STACK_SIZE];
+       /*
+        * GCC hardcodes the stack canary as %gs:40.  Since the
+        * irq_stack is the object at %gs:0, we reserve the bottom
+        * 48 bytes of the irq stack for the canary.
+        */
+       struct {
+               char gs_base[40];
+               unsigned long stack_canary;
+       };
+};
+
+DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
+DECLARE_INIT_PER_CPU(irq_stack_union);
+
+DECLARE_PER_CPU(char *, irq_stack_ptr);
+#else  /* X86_64 */
+#ifdef CONFIG_CC_STACKPROTECTOR
+DECLARE_PER_CPU(unsigned long, stack_canary);
 #endif
+#endif /* X86_64 */
 
 extern void print_cpu_info(struct cpuinfo_x86 *);
 extern unsigned int xstate_size;
@@ -752,9 +774,9 @@ extern int sysenter_setup(void);
 extern struct desc_ptr         early_gdt_descr;
 
 extern void cpu_set_gdt(int);
-extern void switch_to_new_gdt(void);
+extern void switch_to_new_gdt(int);
+extern void load_percpu_segment(int);
 extern void cpu_init(void);
-extern void init_gdt(int cpu);
 
 static inline unsigned long get_debugctlmsr(void)
 {
index d6a22f9..49fb3ec 100644 (file)
@@ -18,11 +18,7 @@ extern void syscall32_cpu_init(void);
 
 extern void check_efer(void);
 
-#ifdef CONFIG_X86_BIOS_REBOOT
 extern int reboot_force;
-#else
-static const int reboot_force = 0;
-#endif
 
 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
 
index 6d34d95..e304b66 100644 (file)
@@ -28,7 +28,7 @@ struct pt_regs {
        int  xds;
        int  xes;
        int  xfs;
-       /* int  gs; */
+       int  xgs;
        long orig_eax;
        long eip;
        int  xcs;
@@ -50,7 +50,7 @@ struct pt_regs {
        unsigned long ds;
        unsigned long es;
        unsigned long fs;
-       /* int  gs; */
+       unsigned long gs;
        unsigned long orig_ax;
        unsigned long ip;
        unsigned long cs;
index 1dc1b51..14e0ed8 100644 (file)
@@ -61,7 +61,7 @@
  *
  *  26 - ESPFIX small SS
  *  27 - per-cpu                       [ offset to per-cpu data area ]
- *  28 - unused
+ *  28 - stack_canary-20               [ for stack protector ]
  *  29 - unused
  *  30 - unused
  *  31 - TSS for double fault handler
 #define __KERNEL_PERCPU 0
 #endif
 
+#define GDT_ENTRY_STACK_CANARY         (GDT_ENTRY_KERNEL_BASE + 16)
+#ifdef CONFIG_CC_STACKPROTECTOR
+#define __KERNEL_STACK_CANARY          (GDT_ENTRY_STACK_CANARY * 8)
+#else
+#define __KERNEL_STACK_CANARY          0
+#endif
+
 #define GDT_ENTRY_DOUBLEFAULT_TSS      31
 
 /*
index ebe858c..45b4027 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _ASM_X86_SETUP_H
 #define _ASM_X86_SETUP_H
 
+#ifdef __KERNEL__
+
 #define COMMAND_LINE_SIZE 2048
 
 #ifndef __ASSEMBLY__
@@ -8,10 +10,8 @@
 /* Interrupt control for vSMPowered x86_64 systems */
 void vsmp_init(void);
 
-
 void setup_bios_corruption_check(void);
 
-
 #ifdef CONFIG_X86_VISWS
 extern void visws_early_detect(void);
 extern int is_visws_box(void);
@@ -43,7 +43,7 @@ struct x86_quirks {
        void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
        void (*mpc_oem_pci_bus)(struct mpc_bus *m);
        void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
-                                    unsigned short oemsize);
+                               unsigned short oemsize);
        int (*setup_ioapic_ids)(void);
        int (*update_genapic)(void);
 };
@@ -56,8 +56,6 @@ extern unsigned long saved_video_mode;
 #endif
 #endif /* __ASSEMBLY__ */
 
-#ifdef __KERNEL__
-
 #ifdef __i386__
 
 #include <linux/pfn.h>
@@ -100,7 +98,6 @@ extern unsigned long init_pg_tables_start;
 extern unsigned long init_pg_tables_end;
 
 #else
-void __init x86_64_init_pda(void);
 void __init x86_64_start_kernel(char *real_mode);
 void __init x86_64_start_reservations(char *real_mode_data);
 
index 19953df..47d0e21 100644 (file)
 #  include <asm/io_apic.h>
 # endif
 #endif
-#include <asm/pda.h>
 #include <asm/thread_info.h>
-
-#ifdef CONFIG_X86_64
-
-extern cpumask_var_t cpu_callin_mask;
-extern cpumask_var_t cpu_callout_mask;
-extern cpumask_var_t cpu_initialized_mask;
-extern cpumask_var_t cpu_sibling_setup_mask;
-
-#else /* CONFIG_X86_32 */
-
-extern cpumask_t cpu_callin_map;
-extern cpumask_t cpu_callout_map;
-extern cpumask_t cpu_initialized;
-extern cpumask_t cpu_sibling_setup_map;
-
-#define cpu_callin_mask                ((struct cpumask *)&cpu_callin_map)
-#define cpu_callout_mask       ((struct cpumask *)&cpu_callout_map)
-#define cpu_initialized_mask   ((struct cpumask *)&cpu_initialized)
-#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
-
-#endif /* CONFIG_X86_32 */
-
-extern void (*mtrr_hook)(void);
-extern void zap_low_mappings(void);
-
-extern int __cpuinit get_local_pda(int cpu);
+#include <asm/cpumask.h>
 
 extern int smp_num_siblings;
 extern unsigned int num_processors;
@@ -50,9 +24,7 @@ extern unsigned int num_processors;
 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 DECLARE_PER_CPU(cpumask_t, cpu_core_map);
 DECLARE_PER_CPU(u16, cpu_llc_id);
-#ifdef CONFIG_X86_32
 DECLARE_PER_CPU(int, cpu_number);
-#endif
 
 static inline struct cpumask *cpu_sibling_mask(int cpu)
 {
@@ -167,8 +139,6 @@ void play_dead_common(void);
 void native_send_call_func_ipi(const struct cpumask *mask);
 void native_send_call_func_single_ipi(int cpu);
 
-extern void prefill_possible_map(void);
-
 void smp_store_cpu_info(int id);
 #define cpu_physical_id(cpu)   per_cpu(x86_cpu_to_apicid, cpu)
 
@@ -177,10 +147,6 @@ static inline int num_booting_cpus(void)
 {
        return cpumask_weight(cpu_callout_mask);
 }
-#else
-static inline void prefill_possible_map(void)
-{
-}
 #endif /* CONFIG_SMP */
 
 extern unsigned disabled_cpus __cpuinitdata;
@@ -191,11 +157,11 @@ extern unsigned disabled_cpus __cpuinitdata;
  * from the initial startup. We map APIC_BASE very early in page_setup(),
  * so this is correct in the x86 case.
  */
-#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
+#define raw_smp_processor_id() (percpu_read(cpu_number))
 extern int safe_smp_processor_id(void);
 
 #elif defined(CONFIG_X86_64_SMP)
-#define raw_smp_processor_id() read_pda(cpunumber)
+#define raw_smp_processor_id() (percpu_read(cpu_number))
 
 #define stack_smp_processor_id()                                       \
 ({                                                             \
@@ -205,10 +171,6 @@ extern int safe_smp_processor_id(void);
 })
 #define safe_smp_processor_id()                smp_processor_id()
 
-#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */
-#define cpu_physical_id(cpu)           boot_cpu_physical_apicid
-#define safe_smp_processor_id()                0
-#define stack_smp_processor_id()       0
 #endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
@@ -220,28 +182,9 @@ static inline int logical_smp_processor_id(void)
        return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
 }
 
-#include <mach_apicdef.h>
-static inline unsigned int read_apic_id(void)
-{
-       unsigned int reg;
-
-       reg = *(u32 *)(APIC_BASE + APIC_ID);
-
-       return GET_APIC_ID(reg);
-}
 #endif
 
-
-# if defined(APIC_DEFINITION) || defined(CONFIG_X86_64)
 extern int hard_smp_processor_id(void);
-# else
-#include <mach_apicdef.h>
-static inline int hard_smp_processor_id(void)
-{
-       /* we don't want to mark this access volatile - bad code generation */
-       return read_apic_id();
-}
-# endif /* APIC_DEFINITION */
 
 #else /* CONFIG_X86_LOCAL_APIC */
 
@@ -251,11 +194,5 @@ static inline int hard_smp_processor_id(void)
 
 #endif /* CONFIG_X86_LOCAL_APIC */
 
-#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
-extern unsigned char boot_cpu_id;
-#else
-#define boot_cpu_id    0
-#endif
-
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_X86_SMP_H */
similarity index 83%
rename from arch/x86/include/asm/mach-default/smpboot_hooks.h
rename to arch/x86/include/asm/smpboot_hooks.h
index 23bf521..1def601 100644 (file)
@@ -13,10 +13,10 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
        CMOS_WRITE(0xa, 0xf);
        local_flush_tlb();
        pr_debug("1.\n");
-       *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
+       *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) =
                                                                 start_eip >> 4;
        pr_debug("2.\n");
-       *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
+       *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_low)) =
                                                         start_eip & 0xf;
        pr_debug("3.\n");
 }
@@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
         */
        CMOS_WRITE(0, 0xf);
 
-       *((volatile long *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
+       *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
 }
 
 static inline void __init smpboot_setup_io_apic(void)
index 8247e94..3a56966 100644 (file)
@@ -172,70 +172,8 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
        return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
 }
 
-#ifdef CONFIG_PARAVIRT
-/*
- * Define virtualization-friendly old-style lock byte lock, for use in
- * pv_lock_ops if desired.
- *
- * This differs from the pre-2.6.24 spinlock by always using xchgb
- * rather than decb to take the lock; this allows it to use a
- * zero-initialized lock structure.  It also maintains a 1-byte
- * contention counter, so that we can implement
- * __byte_spin_is_contended.
- */
-struct __byte_spinlock {
-       s8 lock;
-       s8 spinners;
-};
-
-static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
-{
-       struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
-       return bl->lock != 0;
-}
-
-static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
-{
-       struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
-       return bl->spinners != 0;
-}
-
-static inline void __byte_spin_lock(raw_spinlock_t *lock)
-{
-       struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
-       s8 val = 1;
-
-       asm("1: xchgb %1, %0\n"
-           "   test %1,%1\n"
-           "   jz 3f\n"
-           "   " LOCK_PREFIX "incb %2\n"
-           "2: rep;nop\n"
-           "   cmpb $1, %0\n"
-           "   je 2b\n"
-           "   " LOCK_PREFIX "decb %2\n"
-           "   jmp 1b\n"
-           "3:"
-           : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
-}
-
-static inline int __byte_spin_trylock(raw_spinlock_t *lock)
-{
-       struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
-       u8 old = 1;
-
-       asm("xchgb %1,%0"
-           : "+m" (bl->lock), "+q" (old) : : "memory");
+#ifndef CONFIG_PARAVIRT
 
-       return old == 0;
-}
-
-static inline void __byte_spin_unlock(raw_spinlock_t *lock)
-{
-       struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
-       smp_wmb();
-       bl->lock = 0;
-}
-#else  /* !CONFIG_PARAVIRT */
 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
 {
        return __ticket_spin_is_locked(lock);
@@ -268,7 +206,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
        __raw_spin_lock(lock);
 }
 
-#endif /* CONFIG_PARAVIRT */
+#endif
 
 static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
 {
@@ -330,8 +268,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
 {
        atomic_t *count = (atomic_t *)lock;
 
-       atomic_dec(count);
-       if (atomic_read(count) >= 0)
+       if (atomic_dec_return(count) >= 0)
                return 1;
        atomic_inc(count);
        return 0;
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
new file mode 100644 (file)
index 0000000..c2d742c
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * GCC stack protector support.
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function.  The pattern is called stack canary
+ * and unfortunately gcc requires it to be at a fixed offset from %gs.
+ * On x86_64, the offset is 40 bytes and on x86_32 20 bytes.  x86_64
+ * and x86_32 use segment registers differently and thus handles this
+ * requirement differently.
+ *
+ * On x86_64, %gs is shared by percpu area and stack canary.  All
+ * percpu symbols are zero based and %gs points to the base of percpu
+ * area.  The first occupant of the percpu area is always
+ * irq_stack_union which contains stack_canary at offset 40.  Userland
+ * %gs is always saved and restored on kernel entry and exit using
+ * swapgs, so stack protector doesn't add any complexity there.
+ *
+ * On x86_32, it's slightly more complicated.  As in x86_64, %gs is
+ * used for userland TLS.  Unfortunately, some processors are much
+ * slower at loading segment registers with different value when
+ * entering and leaving the kernel, so the kernel uses %fs for percpu
+ * area and manages %gs lazily so that %gs is switched only when
+ * necessary, usually during task switch.
+ *
+ * As gcc requires the stack canary at %gs:20, %gs can't be managed
+ * lazily if stack protector is enabled, so the kernel saves and
+ * restores userland %gs on kernel entry and exit.  This behavior is
+ * controlled by CONFIG_X86_32_LAZY_GS and accessors are defined in
+ * system.h to hide the details.
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H 1
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+
+#include <asm/tsc.h>
+#include <asm/processor.h>
+#include <asm/percpu.h>
+#include <asm/system.h>
+#include <asm/desc.h>
+#include <linux/random.h>
+
+/*
+ * 24 byte read-only segment initializer for stack canary.  Linker
+ * can't handle the address bit shifting.  Address will be set in
+ * head_32 for boot CPU and setup_per_cpu_areas() for others.
+ */
+#define GDT_STACK_CANARY_INIT                                          \
+       [GDT_ENTRY_STACK_CANARY] = { { { 0x00000018, 0x00409000 } } },
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+       u64 canary;
+       u64 tsc;
+
+#ifdef CONFIG_X86_64
+       BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40);
+#endif
+       /*
+        * We both use the random pool and the current TSC as a source
+        * of randomness. The TSC only matters for very early init,
+        * there it already has some randomness on most systems. Later
+        * on during the bootup the random pool has true entropy too.
+        */
+       get_random_bytes(&canary, sizeof(canary));
+       tsc = __native_read_tsc();
+       canary += tsc + (tsc << 32UL);
+
+       current->stack_canary = canary;
+#ifdef CONFIG_X86_64
+       percpu_write(irq_stack_union.stack_canary, canary);
+#else
+       percpu_write(stack_canary, canary);
+#endif
+}
+
+static inline void setup_stack_canary_segment(int cpu)
+{
+#ifdef CONFIG_X86_32
+       unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20;
+       struct desc_struct *gdt_table = get_cpu_gdt_table(cpu);
+       struct desc_struct desc;
+
+       desc = gdt_table[GDT_ENTRY_STACK_CANARY];
+       desc.base0 = canary & 0xffff;
+       desc.base1 = (canary >> 16) & 0xff;
+       desc.base2 = (canary >> 24) & 0xff;
+       write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S);
+#endif
+}
+
+static inline void load_stack_canary_segment(void)
+{
+#ifdef CONFIG_X86_32
+       asm("mov %0, %%gs" : : "r" (__KERNEL_STACK_CANARY) : "memory");
+#endif
+}
+
+#else  /* CC_STACKPROTECTOR */
+
+#define GDT_STACK_CANARY_INIT
+
+/* dummy boot_init_stack_canary() is defined in linux/stackprotector.h */
+
+static inline void setup_stack_canary_segment(int cpu)
+{ }
+
+static inline void load_stack_canary_segment(void)
+{
+#ifdef CONFIG_X86_32
+       asm volatile ("mov %0, %%gs" : : "r" (0));
+#endif
+}
+
+#endif /* CC_STACKPROTECTOR */
+#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h
deleted file mode 100644 (file)
index 93d2c86..0000000
+++ /dev/null
@@ -1,202 +0,0 @@
-#ifndef __ASM_SUMMIT_APIC_H
-#define __ASM_SUMMIT_APIC_H
-
-#include <asm/smp.h>
-#include <linux/gfp.h>
-
-#define esr_disable (1)
-#define NO_BALANCE_IRQ (0)
-
-/* In clustered mode, the high nibble of APIC ID is a cluster number.
- * The low nibble is a 4-bit bitmap. */
-#define XAPIC_DEST_CPUS_SHIFT  4
-#define XAPIC_DEST_CPUS_MASK   ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
-#define XAPIC_DEST_CLUSTER_MASK        (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
-
-#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
-
-static inline const cpumask_t *target_cpus(void)
-{
-       /* CPU_MASK_ALL (0xff) has undefined behaviour with
-        * dest_LowestPrio mode logical clustered apic interrupt routing
-        * Just start on cpu 0.  IRQ balancing will spread load
-        */
-       return &cpumask_of_cpu(0);
-}
-
-#define INT_DELIVERY_MODE (dest_LowestPrio)
-#define INT_DEST_MODE 1     /* logical delivery broadcast to all procs */
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
-       return 0;
-}
-
-/* we don't use the phys_cpu_present_map to indicate apicid presence */
-static inline unsigned long check_apicid_present(int bit)
-{
-       return 1;
-}
-
-#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
-
-extern u8 cpu_2_logical_apicid[];
-
-static inline void init_apic_ldr(void)
-{
-       unsigned long val, id;
-       int count = 0;
-       u8 my_id = (u8)hard_smp_processor_id();
-       u8 my_cluster = (u8)apicid_cluster(my_id);
-#ifdef CONFIG_SMP
-       u8 lid;
-       int i;
-
-       /* Create logical APIC IDs by counting CPUs already in cluster. */
-       for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
-               lid = cpu_2_logical_apicid[i];
-               if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
-                       ++count;
-       }
-#endif
-       /* We only have a 4 wide bitmap in cluster mode.  If a deranged
-        * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
-       BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
-       id = my_cluster | (1UL << count);
-       apic_write(APIC_DFR, APIC_DFR_VALUE);
-       val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
-       val |= SET_APIC_LOGICAL_ID(id);
-       apic_write(APIC_LDR, val);
-}
-
-static inline int multi_timer_check(int apic, int irq)
-{
-       return 0;
-}
-
-static inline int apic_id_registered(void)
-{
-       return 1;
-}
-
-static inline void setup_apic_routing(void)
-{
-       printk("Enabling APIC mode:  Summit.  Using %d I/O APICs\n",
-                                               nr_ioapics);
-}
-
-static inline int apicid_to_node(int logical_apicid)
-{
-#ifdef CONFIG_SMP
-       return apicid_2_node[hard_smp_processor_id()];
-#else
-       return 0;
-#endif
-}
-
-/* Mapping from cpu number to logical apicid */
-static inline int cpu_to_logical_apicid(int cpu)
-{
-#ifdef CONFIG_SMP
-       if (cpu >= nr_cpu_ids)
-               return BAD_APICID;
-       return (int)cpu_2_logical_apicid[cpu];
-#else
-       return logical_smp_processor_id();
-#endif
-}
-
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
-       if (mps_cpu < nr_cpu_ids)
-               return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
-       else
-               return BAD_APICID;
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
-{
-       /* For clustered we don't have a good way to do this yet - hack */
-       return physids_promote(0x0F);
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int apicid)
-{
-       return physid_mask_of_physid(0);
-}
-
-static inline void setup_portio_remap(void)
-{
-}
-
-static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
-{
-       return 1;
-}
-
-static inline void enable_apic_mode(void)
-{
-}
-
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
-{
-       int num_bits_set;
-       int cpus_found = 0;
-       int cpu;
-       int apicid;
-
-       num_bits_set = cpus_weight(*cpumask);
-       /* Return id to all */
-       if (num_bits_set >= nr_cpu_ids)
-               return (int) 0xFF;
-       /*
-        * The cpus in the mask must all be on the apic cluster.  If are not
-        * on the same apicid cluster return default value of TARGET_CPUS.
-        */
-       cpu = first_cpu(*cpumask);
-       apicid = cpu_to_logical_apicid(cpu);
-       while (cpus_found < num_bits_set) {
-               if (cpu_isset(cpu, *cpumask)) {
-                       int new_apicid = cpu_to_logical_apicid(cpu);
-                       if (apicid_cluster(apicid) !=
-                                       apicid_cluster(new_apicid)){
-                               printk ("%s: Not a valid mask!\n", __func__);
-                               return 0xFF;
-                       }
-                       apicid = apicid | new_apicid;
-                       cpus_found++;
-               }
-               cpu++;
-       }
-       return apicid;
-}
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
-                                                 const struct cpumask *andmask)
-{
-       int apicid = cpu_to_logical_apicid(0);
-       cpumask_var_t cpumask;
-
-       if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
-               return apicid;
-
-       cpumask_and(cpumask, inmask, andmask);
-       cpumask_and(cpumask, cpumask, cpu_online_mask);
-       apicid = cpu_mask_to_apicid(cpumask);
-
-       free_cpumask_var(cpumask);
-       return apicid;
-}
-
-/* cpuid returns the value latched in the HW at reset, not the APIC ID
- * register's value.  For any box whose BIOS changes APIC IDs, like
- * clustered APIC systems, we must use hard_smp_processor_id.
- *
- * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
- */
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
-       return hard_smp_processor_id() >> index_msb;
-}
-
-#endif /* __ASM_SUMMIT_APIC_H */
diff --git a/arch/x86/include/asm/summit/apicdef.h b/arch/x86/include/asm/summit/apicdef.h
deleted file mode 100644 (file)
index f3fbca1..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_SUMMIT_APICDEF_H
-#define __ASM_SUMMIT_APICDEF_H
-
-#define                APIC_ID_MASK            (0xFF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
-       return (x>>24)&0xFF;
-}
-
-#define                GET_APIC_ID(x)  get_apic_id(x)
-
-#endif
diff --git a/arch/x86/include/asm/summit/ipi.h b/arch/x86/include/asm/summit/ipi.h
deleted file mode 100644 (file)
index a8a2c24..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef __ASM_SUMMIT_IPI_H
-#define __ASM_SUMMIT_IPI_H
-
-void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
-void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
-
-static inline void send_IPI_mask(const cpumask_t *mask, int vector)
-{
-       send_IPI_mask_sequence(mask, vector);
-}
-
-static inline void send_IPI_allbutself(int vector)
-{
-       cpumask_t mask = cpu_online_map;
-       cpu_clear(smp_processor_id(), mask);
-
-       if (!cpus_empty(mask))
-               send_IPI_mask(&mask, vector);
-}
-
-static inline void send_IPI_all(int vector)
-{
-       send_IPI_mask(&cpu_online_map, vector);
-}
-
-#endif /* __ASM_SUMMIT_IPI_H */
diff --git a/arch/x86/include/asm/summit/mpparse.h b/arch/x86/include/asm/summit/mpparse.h
deleted file mode 100644 (file)
index 380e86c..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-#ifndef __ASM_SUMMIT_MPPARSE_H
-#define __ASM_SUMMIT_MPPARSE_H
-
-#include <asm/tsc.h>
-
-extern int use_cyclone;
-
-#ifdef CONFIG_X86_SUMMIT_NUMA
-extern void setup_summit(void);
-#else
-#define setup_summit() {}
-#endif
-
-static inline int mps_oem_check(struct mpc_table *mpc, char *oem,
-               char *productid)
-{
-       if (!strncmp(oem, "IBM ENSW", 8) &&
-                       (!strncmp(productid, "VIGIL SMP", 9)
-                        || !strncmp(productid, "EXA", 3)
-                        || !strncmp(productid, "RUTHLESS SMP", 12))){
-               mark_tsc_unstable("Summit based system");
-               use_cyclone = 1; /*enable cyclone-timer*/
-               setup_summit();
-               return 1;
-       }
-       return 0;
-}
-
-/* Hook from generic ACPI tables.c */
-static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
-       if (!strncmp(oem_id, "IBM", 3) &&
-           (!strncmp(oem_table_id, "SERVIGIL", 8)
-            || !strncmp(oem_table_id, "EXA", 3))){
-               mark_tsc_unstable("Summit based system");
-               use_cyclone = 1; /*enable cyclone-timer*/
-               setup_summit();
-               return 1;
-       }
-       return 0;
-}
-
-struct rio_table_hdr {
-       unsigned char version;      /* Version number of this data structure           */
-                                   /* Version 3 adds chassis_num & WP_index           */
-       unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil)   */
-       unsigned char num_rio_dev;  /* # of RIO I/O devices (Cyclones and Winnipegs)   */
-} __attribute__((packed));
-
-struct scal_detail {
-       unsigned char node_id;      /* Scalability Node ID                             */
-       unsigned long CBAR;         /* Address of 1MB register space                   */
-       unsigned char port0node;    /* Node ID port connected to: 0xFF=None            */
-       unsigned char port0port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
-       unsigned char port1node;    /* Node ID port connected to: 0xFF = None          */
-       unsigned char port1port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
-       unsigned char port2node;    /* Node ID port connected to: 0xFF = None          */
-       unsigned char port2port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
-       unsigned char chassis_num;  /* 1 based Chassis number (1 = boot node)          */
-} __attribute__((packed));
-
-struct rio_detail {
-       unsigned char node_id;      /* RIO Node ID                                     */
-       unsigned long BBAR;         /* Address of 1MB register space                   */
-       unsigned char type;         /* Type of device                                  */
-       unsigned char owner_id;     /* For WPEG: Node ID of Cyclone that owns this WPEG*/
-                                   /* For CYC:  Node ID of Twister that owns this CYC */
-       unsigned char port0node;    /* Node ID port connected to: 0xFF=None            */
-       unsigned char port0port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
-       unsigned char port1node;    /* Node ID port connected to: 0xFF=None            */
-       unsigned char port1port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
-       unsigned char first_slot;   /* For WPEG: Lowest slot number below this WPEG    */
-                                   /* For CYC:  0                                     */
-       unsigned char status;       /* For WPEG: Bit 0 = 1 : the XAPIC is used         */
-                                   /*                 = 0 : the XAPIC is not used, ie:*/
-                                   /*                     ints fwded to another XAPIC */
-                                   /*           Bits1:7 Reserved                      */
-                                   /* For CYC:  Bits0:7 Reserved                      */
-       unsigned char WP_index;     /* For WPEG: WPEG instance index - lower ones have */
-                                   /*           lower slot numbers/PCI bus numbers    */
-                                   /* For CYC:  No meaning                            */
-       unsigned char chassis_num;  /* 1 based Chassis number                          */
-                                   /* For LookOut WPEGs this field indicates the      */
-                                   /* Expansion Chassis #, enumerated from Boot       */
-                                   /* Node WPEG external port, then Boot Node CYC     */
-                                   /* external port, then Next Vigil chassis WPEG     */
-                                   /* external port, etc.                             */
-                                   /* Shared Lookouts have only 1 chassis number (the */
-                                   /* first one assigned)                             */
-} __attribute__((packed));
-
-
-typedef enum {
-       CompatTwister = 0,  /* Compatibility Twister               */
-       AltTwister    = 1,  /* Alternate Twister of internal 8-way */
-       CompatCyclone = 2,  /* Compatibility Cyclone               */
-       AltCyclone    = 3,  /* Alternate Cyclone of internal 8-way */
-       CompatWPEG    = 4,  /* Compatibility WPEG                  */
-       AltWPEG       = 5,  /* Second Planar WPEG                  */
-       LookOutAWPEG  = 6,  /* LookOut WPEG                        */
-       LookOutBWPEG  = 7,  /* LookOut WPEG                        */
-} node_type;
-
-static inline int is_WPEG(struct rio_detail *rio){
-       return (rio->type == CompatWPEG || rio->type == AltWPEG ||
-               rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
-}
-
-#endif /* __ASM_SUMMIT_MPPARSE_H */
index c0b0bda..68b1be1 100644 (file)
@@ -29,21 +29,21 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *);
 /* X86_32 only */
 #ifdef CONFIG_X86_32
 /* kernel/process_32.c */
-asmlinkage int sys_fork(struct pt_regs);
-asmlinkage int sys_clone(struct pt_regs);
-asmlinkage int sys_vfork(struct pt_regs);
-asmlinkage int sys_execve(struct pt_regs);
+int sys_fork(struct pt_regs *);
+int sys_clone(struct pt_regs *);
+int sys_vfork(struct pt_regs *);
+int sys_execve(struct pt_regs *);
 
 /* kernel/signal_32.c */
 asmlinkage int sys_sigsuspend(int, int, old_sigset_t);
 asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
                             struct old_sigaction __user *);
-asmlinkage int sys_sigaltstack(unsigned long);
-asmlinkage unsigned long sys_sigreturn(unsigned long);
-asmlinkage int sys_rt_sigreturn(unsigned long);
+int sys_sigaltstack(struct pt_regs *);
+unsigned long sys_sigreturn(struct pt_regs *);
+long sys_rt_sigreturn(struct pt_regs *);
 
 /* kernel/ioport.c */
-asmlinkage long sys_iopl(unsigned long);
+long sys_iopl(struct pt_regs *);
 
 /* kernel/sys_i386_32.c */
 asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
@@ -59,8 +59,8 @@ struct oldold_utsname;
 asmlinkage int sys_olduname(struct oldold_utsname __user *);
 
 /* kernel/vm86_32.c */
-asmlinkage int sys_vm86old(struct pt_regs);
-asmlinkage int sys_vm86(struct pt_regs);
+int sys_vm86old(struct pt_regs *);
+int sys_vm86(struct pt_regs *);
 
 #else /* CONFIG_X86_32 */
 
index 8e626ea..c00bfdb 100644 (file)
@@ -23,6 +23,20 @@ struct task_struct *__switch_to(struct task_struct *prev,
 
 #ifdef CONFIG_X86_32
 
+#ifdef CONFIG_CC_STACKPROTECTOR
+#define __switch_canary                                                        \
+       "movl %P[task_canary](%[next]), %%ebx\n\t"                      \
+       "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
+#define __switch_canary_oparam                                         \
+       , [stack_canary] "=m" (per_cpu_var(stack_canary))
+#define __switch_canary_iparam                                         \
+       , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
+#else  /* CC_STACKPROTECTOR */
+#define __switch_canary
+#define __switch_canary_oparam
+#define __switch_canary_iparam
+#endif /* CC_STACKPROTECTOR */
+
 /*
  * Saving eflags is important. It switches not only IOPL between tasks,
  * it also protects other tasks from NT leaking through sysenter etc.
@@ -44,6 +58,7 @@ do {                                                                  \
                     "movl %[next_sp],%%esp\n\t"        /* restore ESP   */ \
                     "movl $1f,%[prev_ip]\n\t"  /* save    EIP   */     \
                     "pushl %[next_ip]\n\t"     /* restore EIP   */     \
+                    __switch_canary                                    \
                     "jmp __switch_to\n"        /* regparm call  */     \
                     "1:\t"                                             \
                     "popl %%ebp\n\t"           /* restore EBP   */     \
@@ -58,6 +73,8 @@ do {                                                                  \
                       "=b" (ebx), "=c" (ecx), "=d" (edx),              \
                       "=S" (esi), "=D" (edi)                           \
                                                                        \
+                      __switch_canary_oparam                           \
+                                                                       \
                       /* input parameters: */                          \
                     : [next_sp]  "m" (next->thread.sp),                \
                       [next_ip]  "m" (next->thread.ip),                \
@@ -66,6 +83,8 @@ do {                                                                  \
                       [prev]     "a" (prev),                           \
                       [next]     "d" (next)                            \
                                                                        \
+                      __switch_canary_iparam                           \
+                                                                       \
                     : /* reloaded segment registers */                 \
                        "memory");                                      \
 } while (0)
@@ -86,27 +105,44 @@ do {                                                                       \
        , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
          "r12", "r13", "r14", "r15"
 
+#ifdef CONFIG_CC_STACKPROTECTOR
+#define __switch_canary                                                          \
+       "movq %P[task_canary](%%rsi),%%r8\n\t"                            \
+       "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
+#define __switch_canary_oparam                                           \
+       , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
+#define __switch_canary_iparam                                           \
+       , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
+#else  /* CC_STACKPROTECTOR */
+#define __switch_canary
+#define __switch_canary_oparam
+#define __switch_canary_iparam
+#endif /* CC_STACKPROTECTOR */
+
 /* Save restore flags to clear handle leaking NT */
 #define switch_to(prev, next, last) \
-       asm volatile(SAVE_CONTEXT                                                   \
+       asm volatile(SAVE_CONTEXT                                         \
             "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */       \
             "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */    \
             "call __switch_to\n\t"                                       \
             ".globl thread_return\n"                                     \
             "thread_return:\n\t"                                         \
-            "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"                       \
+            "movq "__percpu_arg([current_task])",%%rsi\n\t"              \
+            __switch_canary                                              \
             "movq %P[thread_info](%%rsi),%%r8\n\t"                       \
-            LOCK_PREFIX "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"        \
             "movq %%rax,%%rdi\n\t"                                       \
-            "jc   ret_from_fork\n\t"                                     \
+            "testl  %[_tif_fork],%P[ti_flags](%%r8)\n\t"         \
+            "jnz   ret_from_fork\n\t"                                    \
             RESTORE_CONTEXT                                              \
             : "=a" (last)                                                \
+              __switch_canary_oparam                                     \
             : [next] "S" (next), [prev] "D" (prev),                      \
               [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
               [ti_flags] "i" (offsetof(struct thread_info, flags)),      \
-              [tif_fork] "i" (TIF_FORK),                                 \
+              [_tif_fork] "i" (_TIF_FORK),                               \
               [thread_info] "i" (offsetof(struct task_struct, stack)),   \
-              [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))  \
+              [current_task] "m" (per_cpu_var(current_task))             \
+              __switch_canary_iparam                                     \
             : "memory", "cc" __EXTRA_CLOBBER)
 #endif
 
@@ -165,6 +201,25 @@ extern void native_load_gs_index(unsigned);
 #define savesegment(seg, value)                                \
        asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
 
+/*
+ * x86_32 user gs accessors.
+ */
+#ifdef CONFIG_X86_32
+#ifdef CONFIG_X86_32_LAZY_GS
+#define get_user_gs(regs)      (u16)({unsigned long v; savesegment(gs, v); v;})
+#define set_user_gs(regs, v)   loadsegment(gs, (unsigned long)(v))
+#define task_user_gs(tsk)      ((tsk)->thread.gs)
+#define lazy_save_gs(v)                savesegment(gs, (v))
+#define lazy_load_gs(v)                loadsegment(gs, (v))
+#else  /* X86_32_LAZY_GS */
+#define get_user_gs(regs)      (u16)((regs)->gs)
+#define set_user_gs(regs, v)   do { (regs)->gs = (v); } while (0)
+#define task_user_gs(tsk)      (task_pt_regs(tsk)->gs)
+#define lazy_save_gs(v)                do { } while (0)
+#define lazy_load_gs(v)                do { } while (0)
+#endif /* X86_32_LAZY_GS */
+#endif /* X86_32 */
+
 static inline unsigned long get_limit(unsigned long segment)
 {
        unsigned long __limit;
index 9878964..df9d5f7 100644 (file)
@@ -40,6 +40,7 @@ struct thread_info {
                                                */
        __u8                    supervisor_stack[0];
 #endif
+       int                     uaccess_err;
 };
 
 #define INIT_THREAD_INFO(tsk)                  \
@@ -194,25 +195,21 @@ static inline struct thread_info *current_thread_info(void)
 
 #else /* X86_32 */
 
-#include <asm/pda.h>
+#include <asm/percpu.h>
+#define KERNEL_STACK_OFFSET (5*8)
 
 /*
  * macros/functions for gaining access to the thread information structure
  * preempt_count needs to be 1 initially, until the scheduler is functional.
  */
 #ifndef __ASSEMBLY__
-static inline struct thread_info *current_thread_info(void)
-{
-       struct thread_info *ti;
-       ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
-       return ti;
-}
+DECLARE_PER_CPU(unsigned long, kernel_stack);
 
-/* do not use in interrupt context */
-static inline struct thread_info *stack_thread_info(void)
+static inline struct thread_info *current_thread_info(void)
 {
        struct thread_info *ti;
-       asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
+       ti = (void *)(percpu_read(kernel_stack) +
+                     KERNEL_STACK_OFFSET - THREAD_SIZE);
        return ti;
 }
 
@@ -220,8 +217,8 @@ static inline struct thread_info *stack_thread_info(void)
 
 /* how to get the thread information struct from ASM */
 #define GET_THREAD_INFO(reg) \
-       movq %gs:pda_kernelstack,reg ; \
-       subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
+       movq PER_CPU_VAR(kernel_stack),reg ; \
+       subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
 
 #endif
 
index 0e7bbb5..d3539f9 100644 (file)
@@ -113,7 +113,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
                __flush_tlb();
 }
 
-static inline void native_flush_tlb_others(const cpumask_t *cpumask,
+static inline void native_flush_tlb_others(const struct cpumask *cpumask,
                                           struct mm_struct *mm,
                                           unsigned long va)
 {
@@ -142,31 +142,28 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
        flush_tlb_mm(vma->vm_mm);
 }
 
-void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm,
-                            unsigned long va);
+void native_flush_tlb_others(const struct cpumask *cpumask,
+                            struct mm_struct *mm, unsigned long va);
 
 #define TLBSTATE_OK    1
 #define TLBSTATE_LAZY  2
 
-#ifdef CONFIG_X86_32
 struct tlb_state {
        struct mm_struct *active_mm;
        int state;
-       char __cacheline_padding[L1_CACHE_BYTES-8];
 };
 DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
 
-void reset_lazy_tlbstate(void);
-#else
 static inline void reset_lazy_tlbstate(void)
 {
+       percpu_write(cpu_tlbstate.state, 0);
+       percpu_write(cpu_tlbstate.active_mm, &init_mm);
 }
-#endif
 
 #endif /* SMP */
 
 #ifndef CONFIG_PARAVIRT
-#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va)
+#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
 #endif
 
 static inline void flush_tlb_kernel_range(unsigned long start,
@@ -175,4 +172,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
        flush_tlb_all();
 }
 
+extern void zap_low_mappings(void);
+
 #endif /* _ASM_X86_TLBFLUSH_H */
index 4e2f2e0..77cfb2c 100644 (file)
@@ -74,6 +74,8 @@ static inline const struct cpumask *cpumask_of_node(int node)
        return &node_to_cpumask_map[node];
 }
 
+static inline void setup_node_to_cpumask_map(void) { }
+
 #else /* CONFIG_X86_64 */
 
 /* Mappings between node number and cpus on that node. */
@@ -83,7 +85,8 @@ extern cpumask_t *node_to_cpumask_map;
 DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
 
 /* Returns the number of the current Node. */
-#define numa_node_id()         read_pda(nodenumber)
+DECLARE_PER_CPU(int, node_number);
+#define numa_node_id()         percpu_read(node_number)
 
 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
 extern int cpu_to_node(int cpu);
@@ -102,10 +105,7 @@ static inline int cpu_to_node(int cpu)
 /* Same function but used if called before per_cpu areas are setup */
 static inline int early_cpu_to_node(int cpu)
 {
-       if (early_per_cpu_ptr(x86_cpu_to_node_map))
-               return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
-
-       return per_cpu(x86_cpu_to_node_map, cpu);
+       return early_per_cpu(x86_cpu_to_node_map, cpu);
 }
 
 /* Returns a pointer to the cpumask of CPUs on Node 'node'. */
@@ -122,6 +122,8 @@ static inline cpumask_t node_to_cpumask(int node)
 
 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
 
+extern void setup_node_to_cpumask_map(void);
+
 /*
  * Replace default node_to_cpumask_ptr with optimized version
  * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
@@ -192,9 +194,20 @@ extern int __node_distance(int, int);
 
 #else /* !CONFIG_NUMA */
 
-#define numa_node_id()         0
-#define        cpu_to_node(cpu)        0
-#define        early_cpu_to_node(cpu)  0
+static inline int numa_node_id(void)
+{
+       return 0;
+}
+
+static inline int cpu_to_node(int cpu)
+{
+       return 0;
+}
+
+static inline int early_cpu_to_node(int cpu)
+{
+       return 0;
+}
 
 static inline const cpumask_t *cpumask_of_node(int node)
 {
@@ -209,6 +222,8 @@ static inline int node_to_first_cpu(int node)
        return first_cpu(cpu_online_map);
 }
 
+static inline void setup_node_to_cpumask_map(void) { }
+
 /*
  * Replace default node_to_cpumask_ptr with optimized version
  * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
index 780ba0a..90f06c2 100644 (file)
@@ -13,6 +13,7 @@ extern unsigned char *trampoline_base;
 
 extern unsigned long init_rsp;
 extern unsigned long initial_code;
+extern unsigned long initial_gs;
 
 #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
 #define TRAMPOLINE_BASE 0x6000
index cf3bb05..0d53425 100644 (file)
@@ -41,7 +41,7 @@ dotraplinkage void do_int3(struct pt_regs *, long);
 dotraplinkage void do_overflow(struct pt_regs *, long);
 dotraplinkage void do_bounds(struct pt_regs *, long);
 dotraplinkage void do_invalid_op(struct pt_regs *, long);
-dotraplinkage void do_device_not_available(struct pt_regs);
+dotraplinkage void do_device_not_available(struct pt_regs *, long);
 dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long);
 dotraplinkage void do_invalid_TSS(struct pt_regs *, long);
 dotraplinkage void do_segment_not_present(struct pt_regs *, long);
index 4340055..b685ece 100644 (file)
@@ -121,7 +121,7 @@ extern int __get_user_bad(void);
 
 #define __get_user_x(size, ret, x, ptr)                      \
        asm volatile("call __get_user_" #size         \
-                    : "=a" (ret),"=d" (x)            \
+                    : "=a" (ret), "=d" (x)           \
                     : "0" (ptr))                     \
 
 /* Careful: we have to cast the result to the type of the pointer
@@ -181,12 +181,12 @@ extern int __get_user_bad(void);
 
 #define __put_user_x(size, x, ptr, __ret_pu)                   \
        asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
-                    :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+                    : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
 
 
 
 #ifdef CONFIG_X86_32
-#define __put_user_u64(x, addr, err)                                   \
+#define __put_user_asm_u64(x, addr, err, errret)                       \
        asm volatile("1:        movl %%eax,0(%2)\n"                     \
                     "2:        movl %%edx,4(%2)\n"                     \
                     "3:\n"                                             \
@@ -197,14 +197,24 @@ extern int __get_user_bad(void);
                     _ASM_EXTABLE(1b, 4b)                               \
                     _ASM_EXTABLE(2b, 4b)                               \
                     : "=r" (err)                                       \
-                    : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
+                    : "A" (x), "r" (addr), "i" (errret), "0" (err))
+
+#define __put_user_asm_ex_u64(x, addr)                                 \
+       asm volatile("1:        movl %%eax,0(%1)\n"                     \
+                    "2:        movl %%edx,4(%1)\n"                     \
+                    "3:\n"                                             \
+                    _ASM_EXTABLE(1b, 2b - 1b)                          \
+                    _ASM_EXTABLE(2b, 3b - 2b)                          \
+                    : : "A" (x), "r" (addr))
 
 #define __put_user_x8(x, ptr, __ret_pu)                                \
        asm volatile("call __put_user_8" : "=a" (__ret_pu)      \
                     : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
 #else
-#define __put_user_u64(x, ptr, retval) \
-       __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
+#define __put_user_asm_u64(x, ptr, retval, errret) \
+       __put_user_asm(x, ptr, retval, "q", "", "Zr", errret)
+#define __put_user_asm_ex_u64(x, addr) \
+       __put_user_asm_ex(x, addr, "q", "", "Zr")
 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
 #endif
 
@@ -276,10 +286,32 @@ do {                                                                      \
                __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
                break;                                                  \
        case 4:                                                         \
-               __put_user_asm(x, ptr, retval, "l", "k",  "ir", errret);\
+               __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
                break;                                                  \
        case 8:                                                         \
-               __put_user_u64((__typeof__(*ptr))(x), ptr, retval);     \
+               __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval,  \
+                                  errret);                             \
+               break;                                                  \
+       default:                                                        \
+               __put_user_bad();                                       \
+       }                                                               \
+} while (0)
+
+#define __put_user_size_ex(x, ptr, size)                               \
+do {                                                                   \
+       __chk_user_ptr(ptr);                                            \
+       switch (size) {                                                 \
+       case 1:                                                         \
+               __put_user_asm_ex(x, ptr, "b", "b", "iq");              \
+               break;                                                  \
+       case 2:                                                         \
+               __put_user_asm_ex(x, ptr, "w", "w", "ir");              \
+               break;                                                  \
+       case 4:                                                         \
+               __put_user_asm_ex(x, ptr, "l", "k", "ir");              \
+               break;                                                  \
+       case 8:                                                         \
+               __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr);      \
                break;                                                  \
        default:                                                        \
                __put_user_bad();                                       \
@@ -311,9 +343,12 @@ do {                                                                       \
 
 #ifdef CONFIG_X86_32
 #define __get_user_asm_u64(x, ptr, retval, errret)     (x) = __get_user_bad()
+#define __get_user_asm_ex_u64(x, ptr)                  (x) = __get_user_bad()
 #else
 #define __get_user_asm_u64(x, ptr, retval, errret) \
         __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
+#define __get_user_asm_ex_u64(x, ptr) \
+        __get_user_asm_ex(x, ptr, "q", "", "=r")
 #endif
 
 #define __get_user_size(x, ptr, size, retval, errret)                  \
@@ -350,6 +385,33 @@ do {                                                                       \
                     : "=r" (err), ltype(x)                             \
                     : "m" (__m(addr)), "i" (errret), "0" (err))
 
+#define __get_user_size_ex(x, ptr, size)                               \
+do {                                                                   \
+       __chk_user_ptr(ptr);                                            \
+       switch (size) {                                                 \
+       case 1:                                                         \
+               __get_user_asm_ex(x, ptr, "b", "b", "=q");              \
+               break;                                                  \
+       case 2:                                                         \
+               __get_user_asm_ex(x, ptr, "w", "w", "=r");              \
+               break;                                                  \
+       case 4:                                                         \
+               __get_user_asm_ex(x, ptr, "l", "k", "=r");              \
+               break;                                                  \
+       case 8:                                                         \
+               __get_user_asm_ex_u64(x, ptr);                          \
+               break;                                                  \
+       default:                                                        \
+               (x) = __get_user_bad();                                 \
+       }                                                               \
+} while (0)
+
+#define __get_user_asm_ex(x, addr, itype, rtype, ltype)                        \
+       asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
+                    "2:\n"                                             \
+                    _ASM_EXTABLE(1b, 2b - 1b)                          \
+                    : ltype(x) : "m" (__m(addr)))
+
 #define __put_user_nocheck(x, ptr, size)                       \
 ({                                                             \
        int __pu_err;                                           \
@@ -385,6 +447,26 @@ struct __large_struct { unsigned long buf[100]; };
                     _ASM_EXTABLE(1b, 3b)                               \
                     : "=r"(err)                                        \
                     : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
+
+#define __put_user_asm_ex(x, addr, itype, rtype, ltype)                        \
+       asm volatile("1:        mov"itype" %"rtype"0,%1\n"              \
+                    "2:\n"                                             \
+                    _ASM_EXTABLE(1b, 2b - 1b)                          \
+                    : : ltype(x), "m" (__m(addr)))
+
+/*
+ * uaccess_try and catch
+ */
+#define uaccess_try    do {                                            \
+       int prev_err = current_thread_info()->uaccess_err;              \
+       current_thread_info()->uaccess_err = 0;                         \
+       barrier();
+
+#define uaccess_catch(err)                                             \
+       (err) |= current_thread_info()->uaccess_err;                    \
+       current_thread_info()->uaccess_err = prev_err;                  \
+} while (0)
+
 /**
  * __get_user: - Get a simple variable from user space, with less checking.
  * @x:   Variable to store result.
@@ -408,6 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
 
 #define __get_user(x, ptr)                                             \
        __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
 /**
  * __put_user: - Write a simple value into user space, with less checking.
  * @x:   Value to copy to user space.
@@ -434,6 +517,45 @@ struct __large_struct { unsigned long buf[100]; };
 #define __get_user_unaligned __get_user
 #define __put_user_unaligned __put_user
 
+/*
+ * {get|put}_user_try and catch
+ *
+ * get_user_try {
+ *     get_user_ex(...);
+ * } get_user_catch(err)
+ */
+#define get_user_try           uaccess_try
+#define get_user_catch(err)    uaccess_catch(err)
+
+#define get_user_ex(x, ptr)    do {                                    \
+       unsigned long __gue_val;                                        \
+       __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));       \
+       (x) = (__force __typeof__(*(ptr)))__gue_val;                    \
+} while (0)
+
+#ifdef CONFIG_X86_WP_WORKS_OK
+
+#define put_user_try           uaccess_try
+#define put_user_catch(err)    uaccess_catch(err)
+
+#define put_user_ex(x, ptr)                                            \
+       __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+#else /* !CONFIG_X86_WP_WORKS_OK */
+
+#define put_user_try           do {            \
+       int __uaccess_err = 0;
+
+#define put_user_catch(err)                    \
+       (err) |= __uaccess_err;                 \
+} while (0)
+
+#define put_user_ex(x, ptr)    do {            \
+       __uaccess_err |= __put_user(x, ptr);    \
+} while (0)
+
+#endif /* CONFIG_X86_WP_WORKS_OK */
+
 /*
  * movsl can be slow when source and dest are not both 8-byte aligned
  */
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
new file mode 100644 (file)
index 0000000..8242bf9
--- /dev/null
@@ -0,0 +1,36 @@
+#ifndef _ASM_X86_UV_UV_H
+#define _ASM_X86_UV_UV_H
+
+enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
+
+struct cpumask;
+struct mm_struct;
+
+#ifdef CONFIG_X86_UV
+
+extern enum uv_system_type get_uv_system_type(void);
+extern int is_uv_system(void);
+extern void uv_cpu_init(void);
+extern void uv_system_init(void);
+extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
+extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
+                                                struct mm_struct *mm,
+                                                unsigned long va,
+                                                unsigned int cpu);
+
+#else  /* X86_UV */
+
+static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
+static inline int is_uv_system(void)   { return 0; }
+static inline void uv_cpu_init(void)   { }
+static inline void uv_system_init(void)        { }
+static inline int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
+{ return 1; }
+static inline const struct cpumask *
+uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
+                   unsigned long va, unsigned int cpu)
+{ return cpumask; }
+
+#endif /* X86_UV */
+
+#endif /* _ASM_X86_UV_UV_H */
index 50423c7..9b0e61b 100644 (file)
@@ -325,7 +325,6 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
 #define cpubit_isset(cpu, bau_local_cpumask) \
        test_bit((cpu), (bau_local_cpumask).bits)
 
-extern int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long);
 extern void uv_bau_message_intr1(void);
 extern void uv_bau_timeout_intr1(void);
 
index b3e6473..c1635d4 100644 (file)
@@ -527,3 +527,45 @@ extern void voyager_smp_intr_init(void);
 #define VOYAGER_PSI_SUBREAD    2
 #define VOYAGER_PSI_SUBWRITE   3
 extern void voyager_cat_psi(__u8, __u16, __u8 *);
+
+/* These define the CPIs we use in linux */
+#define VIC_CPI_LEVEL0                 0
+#define VIC_CPI_LEVEL1                 1
+/* now the fake CPIs */
+#define VIC_TIMER_CPI                  2
+#define VIC_INVALIDATE_CPI             3
+#define VIC_RESCHEDULE_CPI             4
+#define VIC_ENABLE_IRQ_CPI             5
+#define VIC_CALL_FUNCTION_CPI          6
+#define VIC_CALL_FUNCTION_SINGLE_CPI   7
+
+/* Now the QIC CPIs:  Since we don't need the two initial levels,
+ * these are 2 less than the VIC CPIs */
+#define QIC_CPI_OFFSET                 1
+#define QIC_TIMER_CPI                  (VIC_TIMER_CPI - QIC_CPI_OFFSET)
+#define QIC_INVALIDATE_CPI             (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
+#define QIC_RESCHEDULE_CPI             (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
+#define QIC_ENABLE_IRQ_CPI             (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
+#define QIC_CALL_FUNCTION_CPI          (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
+#define QIC_CALL_FUNCTION_SINGLE_CPI   (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
+
+#define VIC_START_FAKE_CPI             VIC_TIMER_CPI
+#define VIC_END_FAKE_CPI               VIC_CALL_FUNCTION_SINGLE_CPI
+
+/* this is the SYS_INT CPI. */
+#define VIC_SYS_INT                    8
+#define VIC_CMN_INT                    15
+
+/* This is the boot CPI for alternate processors.  It gets overwritten
+ * by the above once the system has activated all available processors */
+#define VIC_CPU_BOOT_CPI               VIC_CPI_LEVEL0
+#define VIC_CPU_BOOT_ERRATA_CPI                (VIC_CPI_LEVEL0 + 8)
+
+extern asmlinkage void vic_cpi_interrupt(void);
+extern asmlinkage void vic_sys_interrupt(void);
+extern asmlinkage void vic_cmn_interrupt(void);
+extern asmlinkage void qic_timer_interrupt(void);
+extern asmlinkage void qic_invalidate_interrupt(void);
+extern asmlinkage void qic_reschedule_interrupt(void);
+extern asmlinkage void qic_enable_irq_interrupt(void);
+extern asmlinkage void qic_call_function_interrupt(void);
index 1914418..1df3541 100644 (file)
@@ -15,10 +15,4 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
        return raw_irqs_disabled_flags(regs->flags);
 }
 
-static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
-{
-       regs->orig_ax = ~irq;
-       do_IRQ(regs);
-}
-
 #endif /* _ASM_X86_XEN_EVENTS_H */
index d364df0..24f357e 100644 (file)
@@ -23,13 +23,14 @@ nostackp := $(call cc-option, -fno-stack-protector)
 CFLAGS_vsyscall_64.o   := $(PROFILING) -g0 $(nostackp)
 CFLAGS_hpet.o          := $(nostackp)
 CFLAGS_tsc.o           := $(nostackp)
+CFLAGS_paravirt.o      := $(nostackp)
 
 obj-y                  := process_$(BITS).o signal.o entry_$(BITS).o
 obj-y                  += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
 obj-y                  += time_$(BITS).o ioport.o ldt.o dumpstack.o
-obj-y                  += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o
+obj-y                  += setup.o i8259.o irqinit_$(BITS).o
 obj-$(CONFIG_X86_VISWS)        += visws_quirks.o
-obj-$(CONFIG_X86_32)   += probe_roms_32.o
+obj-$(CONFIG_X86_32)   += probe_32.o probe_roms_32.o
 obj-$(CONFIG_X86_32)   += sys_i386_32.o i386_ksyms_32.o
 obj-$(CONFIG_X86_64)   += sys_x86_64.o x8664_ksyms_64.o
 obj-$(CONFIG_X86_64)   += syscall_64.o vsyscall_64.o
@@ -49,20 +50,20 @@ obj-y                               += step.o
 obj-$(CONFIG_STACKTRACE)       += stacktrace.o
 obj-y                          += cpu/
 obj-y                          += acpi/
-obj-$(CONFIG_X86_BIOS_REBOOT)  += reboot.o
+obj-y                          += reboot.o
 obj-$(CONFIG_MCA)              += mca_32.o
 obj-$(CONFIG_X86_MSR)          += msr.o
 obj-$(CONFIG_X86_CPUID)                += cpuid.o
 obj-$(CONFIG_PCI)              += early-quirks.o
 apm-y                          := apm_32.o
 obj-$(CONFIG_APM)              += apm.o
-obj-$(CONFIG_X86_SMP)          += smp.o
-obj-$(CONFIG_X86_SMP)          += smpboot.o tsc_sync.o ipi.o tlb_$(BITS).o
-obj-$(CONFIG_X86_32_SMP)       += smpcommon.o
-obj-$(CONFIG_X86_64_SMP)       += tsc_sync.o smpcommon.o
+obj-$(CONFIG_SMP)              += smp.o
+obj-$(CONFIG_SMP)              += smpboot.o tsc_sync.o ipi.o
+obj-$(CONFIG_SMP)              += setup_percpu.o
+obj-$(CONFIG_X86_64_SMP)       += tsc_sync.o
 obj-$(CONFIG_X86_TRAMPOLINE)   += trampoline_$(BITS).o
 obj-$(CONFIG_X86_MPPARSE)      += mpparse.o
-obj-$(CONFIG_X86_LOCAL_APIC)   += apic.o nmi.o
+obj-$(CONFIG_X86_LOCAL_APIC)   += apic.o nmi.o ipi.o
 obj-$(CONFIG_X86_IO_APIC)      += io_apic.o
 obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
 obj-$(CONFIG_DYNAMIC_FTRACE)   += ftrace.o
@@ -70,9 +71,10 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER)  += ftrace.o
 obj-$(CONFIG_KEXEC)            += machine_kexec_$(BITS).o
 obj-$(CONFIG_KEXEC)            += relocate_kernel_$(BITS).o crash.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump_$(BITS).o
+obj-$(CONFIG_X86_BIGSMP)       += bigsmp_32.o
 obj-$(CONFIG_X86_NUMAQ)                += numaq_32.o
 obj-$(CONFIG_X86_ES7000)       += es7000_32.o
-obj-$(CONFIG_X86_SUMMIT_NUMA)  += summit_32.o
+obj-$(CONFIG_X86_SUMMIT)       += summit_32.o
 obj-y                          += vsmp_64.o
 obj-$(CONFIG_KPROBES)          += kprobes.o
 obj-$(CONFIG_MODULES)          += module_$(BITS).o
@@ -114,10 +116,11 @@ obj-$(CONFIG_SWIOTLB)                     += pci-swiotlb_64.o # NB rename without _64
 ###
 # 64 bit specific files
 ifeq ($(CONFIG_X86_64),y)
-        obj-y                          += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o
-       obj-y                           += bios_uv.o uv_irq.o uv_sysfs.o
+        obj-y                          += genapic_64.o genapic_flat_64.o
         obj-y                          += genx2apic_cluster.o
         obj-y                          += genx2apic_phys.o
+       obj-$(CONFIG_X86_UV)            += genx2apic_uv_x.o tlb_uv.o
+       obj-$(CONFIG_X86_UV)            += bios_uv.o uv_irq.o uv_sysfs.o
         obj-$(CONFIG_X86_PM_TIMER)     += pmtimer_64.o
         obj-$(CONFIG_AUDIT)            += audit_64.o
 
index 7678f10..956c1de 100644 (file)
 #include <asm/mpspec.h>
 #include <asm/smp.h>
 
-#ifdef CONFIG_X86_LOCAL_APIC
-# include <mach_apic.h>
-#endif
-
 static int __initdata acpi_force = 0;
 u32 acpi_rsdt_forced;
 #ifdef CONFIG_ACPI
@@ -56,16 +52,7 @@ int acpi_disabled = 1;
 EXPORT_SYMBOL(acpi_disabled);
 
 #ifdef CONFIG_X86_64
-
-#include <asm/proto.h>
-
-#else                          /* X86 */
-
-#ifdef CONFIG_X86_LOCAL_APIC
-#include <mach_apic.h>
-#include <mach_mpparse.h>
-#endif                         /* CONFIG_X86_LOCAL_APIC */
-
+# include <asm/proto.h>
 #endif                         /* X86 */
 
 #define BAD_MADT_ENTRY(entry, end) (                                       \
@@ -121,35 +108,18 @@ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
  */
 char *__init __acpi_map_table(unsigned long phys, unsigned long size)
 {
-       unsigned long base, offset, mapped_size;
-       int idx;
 
        if (!phys || !size)
                return NULL;
 
-       if (phys+size <= (max_low_pfn_mapped << PAGE_SHIFT))
-               return __va(phys);
-
-       offset = phys & (PAGE_SIZE - 1);
-       mapped_size = PAGE_SIZE - offset;
-       clear_fixmap(FIX_ACPI_END);
-       set_fixmap(FIX_ACPI_END, phys);
-       base = fix_to_virt(FIX_ACPI_END);
-
-       /*
-        * Most cases can be covered by the below.
-        */
-       idx = FIX_ACPI_END;
-       while (mapped_size < size) {
-               if (--idx < FIX_ACPI_BEGIN)
-                       return NULL;    /* cannot handle this */
-               phys += PAGE_SIZE;
-               clear_fixmap(idx);
-               set_fixmap(idx, phys);
-               mapped_size += PAGE_SIZE;
-       }
+       return early_ioremap(phys, size);
+}
+void __init __acpi_unmap_table(char *map, unsigned long size)
+{
+       if (!map || !size)
+               return;
 
-       return ((unsigned char *)base + offset);
+       early_iounmap(map, size);
 }
 
 #ifdef CONFIG_PCI_MMCONFIG
@@ -239,7 +209,8 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
                       madt->address);
        }
 
-       acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
+       default_acpi_madt_oem_check(madt->header.oem_id,
+                                   madt->header.oem_table_id);
 
        return 0;
 }
@@ -884,7 +855,7 @@ static struct {
        DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
 } mp_ioapic_routing[MAX_IO_APICS];
 
-static int mp_find_ioapic(int gsi)
+int mp_find_ioapic(int gsi)
 {
        int i = 0;
 
@@ -899,6 +870,16 @@ static int mp_find_ioapic(int gsi)
        return -1;
 }
 
+int mp_find_ioapic_pin(int ioapic, int gsi)
+{
+       if (WARN_ON(ioapic == -1))
+               return -1;
+       if (WARN_ON(gsi > mp_ioapic_routing[ioapic].gsi_end))
+               return -1;
+
+       return gsi - mp_ioapic_routing[ioapic].gsi_base;
+}
+
 static u8 __init uniq_ioapic_id(u8 id)
 {
 #ifdef CONFIG_X86_32
@@ -912,8 +893,8 @@ static u8 __init uniq_ioapic_id(u8 id)
        DECLARE_BITMAP(used, 256);
        bitmap_zero(used, 256);
        for (i = 0; i < nr_ioapics; i++) {
-               struct mp_config_ioapic *ia = &mp_ioapics[i];
-               __set_bit(ia->mp_apicid, used);
+               struct mpc_ioapic *ia = &mp_ioapics[i];
+               __set_bit(ia->apicid, used);
        }
        if (!test_bit(id, used))
                return id;
@@ -945,29 +926,29 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
 
        idx = nr_ioapics;
 
-       mp_ioapics[idx].mp_type = MP_IOAPIC;
-       mp_ioapics[idx].mp_flags = MPC_APIC_USABLE;
-       mp_ioapics[idx].mp_apicaddr = address;
+       mp_ioapics[idx].type = MP_IOAPIC;
+       mp_ioapics[idx].flags = MPC_APIC_USABLE;
+       mp_ioapics[idx].apicaddr = address;
 
        set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
-       mp_ioapics[idx].mp_apicid = uniq_ioapic_id(id);
+       mp_ioapics[idx].apicid = uniq_ioapic_id(id);
 #ifdef CONFIG_X86_32
-       mp_ioapics[idx].mp_apicver = io_apic_get_version(idx);
+       mp_ioapics[idx].apicver = io_apic_get_version(idx);
 #else
-       mp_ioapics[idx].mp_apicver = 0;
+       mp_ioapics[idx].apicver = 0;
 #endif
        /*
         * Build basic GSI lookup table to facilitate gsi->io_apic lookups
         * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
         */
-       mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mp_apicid;
+       mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].apicid;
        mp_ioapic_routing[idx].gsi_base = gsi_base;
        mp_ioapic_routing[idx].gsi_end = gsi_base +
            io_apic_get_redir_entries(idx);
 
-       printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
-              "GSI %d-%d\n", idx, mp_ioapics[idx].mp_apicid,
-              mp_ioapics[idx].mp_apicver, mp_ioapics[idx].mp_apicaddr,
+       printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
+              "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
+              mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
               mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end);
 
        nr_ioapics++;
@@ -996,19 +977,19 @@ int __init acpi_probe_gsi(void)
        return max_gsi + 1;
 }
 
-static void assign_to_mp_irq(struct mp_config_intsrc *m,
-                                   struct mp_config_intsrc *mp_irq)
+static void assign_to_mp_irq(struct mpc_intsrc *m,
+                                   struct mpc_intsrc *mp_irq)
 {
-       memcpy(mp_irq, m, sizeof(struct mp_config_intsrc));
+       memcpy(mp_irq, m, sizeof(struct mpc_intsrc));
 }
 
-static int mp_irq_cmp(struct mp_config_intsrc *mp_irq,
-                               struct mp_config_intsrc *m)
+static int mp_irq_cmp(struct mpc_intsrc *mp_irq,
+                               struct mpc_intsrc *m)
 {
-       return memcmp(mp_irq, m, sizeof(struct mp_config_intsrc));
+       return memcmp(mp_irq, m, sizeof(struct mpc_intsrc));
 }
 
-static void save_mp_irq(struct mp_config_intsrc *m)
+static void save_mp_irq(struct mpc_intsrc *m)
 {
        int i;
 
@@ -1026,7 +1007,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
 {
        int ioapic;
        int pin;
-       struct mp_config_intsrc mp_irq;
+       struct mpc_intsrc mp_irq;
 
        /*
         * Convert 'gsi' to 'ioapic.pin'.
@@ -1034,7 +1015,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
        ioapic = mp_find_ioapic(gsi);
        if (ioapic < 0)
                return;
-       pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
+       pin = mp_find_ioapic_pin(ioapic, gsi);
 
        /*
         * TBD: This check is for faulty timer entries, where the override
@@ -1044,13 +1025,13 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
        if ((bus_irq == 0) && (trigger == 3))
                trigger = 1;
 
-       mp_irq.mp_type = MP_INTSRC;
-       mp_irq.mp_irqtype = mp_INT;
-       mp_irq.mp_irqflag = (trigger << 2) | polarity;
-       mp_irq.mp_srcbus = MP_ISA_BUS;
-       mp_irq.mp_srcbusirq = bus_irq;  /* IRQ */
-       mp_irq.mp_dstapic = mp_ioapics[ioapic].mp_apicid; /* APIC ID */
-       mp_irq.mp_dstirq = pin; /* INTIN# */
+       mp_irq.type = MP_INTSRC;
+       mp_irq.irqtype = mp_INT;
+       mp_irq.irqflag = (trigger << 2) | polarity;
+       mp_irq.srcbus = MP_ISA_BUS;
+       mp_irq.srcbusirq = bus_irq;     /* IRQ */
+       mp_irq.dstapic = mp_ioapics[ioapic].apicid; /* APIC ID */
+       mp_irq.dstirq = pin;    /* INTIN# */
 
        save_mp_irq(&mp_irq);
 }
@@ -1060,7 +1041,7 @@ void __init mp_config_acpi_legacy_irqs(void)
        int i;
        int ioapic;
        unsigned int dstapic;
-       struct mp_config_intsrc mp_irq;
+       struct mpc_intsrc mp_irq;
 
 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
        /*
@@ -1085,7 +1066,7 @@ void __init mp_config_acpi_legacy_irqs(void)
        ioapic = mp_find_ioapic(0);
        if (ioapic < 0)
                return;
-       dstapic = mp_ioapics[ioapic].mp_apicid;
+       dstapic = mp_ioapics[ioapic].apicid;
 
        /*
         * Use the default configuration for the IRQs 0-15.  Unless
@@ -1095,16 +1076,14 @@ void __init mp_config_acpi_legacy_irqs(void)
                int idx;
 
                for (idx = 0; idx < mp_irq_entries; idx++) {
-                       struct mp_config_intsrc *irq = mp_irqs + idx;
+                       struct mpc_intsrc *irq = mp_irqs + idx;
 
                        /* Do we already have a mapping for this ISA IRQ? */
-                       if (irq->mp_srcbus == MP_ISA_BUS
-                           && irq->mp_srcbusirq == i)
+                       if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i)
                                break;
 
                        /* Do we already have a mapping for this IOAPIC pin */
-                       if (irq->mp_dstapic == dstapic &&
-                           irq->mp_dstirq == i)
+                       if (irq->dstapic == dstapic && irq->dstirq == i)
                                break;
                }
 
@@ -1113,13 +1092,13 @@ void __init mp_config_acpi_legacy_irqs(void)
                        continue;       /* IRQ already used */
                }
 
-               mp_irq.mp_type = MP_INTSRC;
-               mp_irq.mp_irqflag = 0;  /* Conforming */
-               mp_irq.mp_srcbus = MP_ISA_BUS;
-               mp_irq.mp_dstapic = dstapic;
-               mp_irq.mp_irqtype = mp_INT;
-               mp_irq.mp_srcbusirq = i; /* Identity mapped */
-               mp_irq.mp_dstirq = i;
+               mp_irq.type = MP_INTSRC;
+               mp_irq.irqflag = 0;     /* Conforming */
+               mp_irq.srcbus = MP_ISA_BUS;
+               mp_irq.dstapic = dstapic;
+               mp_irq.irqtype = mp_INT;
+               mp_irq.srcbusirq = i; /* Identity mapped */
+               mp_irq.dstirq = i;
 
                save_mp_irq(&mp_irq);
        }
@@ -1156,7 +1135,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
                return gsi;
        }
 
-       ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
+       ioapic_pin = mp_find_ioapic_pin(ioapic, gsi);
 
 #ifdef CONFIG_X86_32
        if (ioapic_renumber_irq)
@@ -1230,22 +1209,22 @@ int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
                        u32 gsi, int triggering, int polarity)
 {
 #ifdef CONFIG_X86_MPPARSE
-       struct mp_config_intsrc mp_irq;
+       struct mpc_intsrc mp_irq;
        int ioapic;
 
        if (!acpi_ioapic)
                return 0;
 
        /* print the entry should happen on mptable identically */
-       mp_irq.mp_type = MP_INTSRC;
-       mp_irq.mp_irqtype = mp_INT;
-       mp_irq.mp_irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
+       mp_irq.type = MP_INTSRC;
+       mp_irq.irqtype = mp_INT;
+       mp_irq.irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
                                (polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
-       mp_irq.mp_srcbus = number;
-       mp_irq.mp_srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
+       mp_irq.srcbus = number;
+       mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
        ioapic = mp_find_ioapic(gsi);
-       mp_irq.mp_dstapic = mp_ioapic_routing[ioapic].apic_id;
-       mp_irq.mp_dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base;
+       mp_irq.dstapic = mp_ioapic_routing[ioapic].apic_id;
+       mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi);
 
        save_mp_irq(&mp_irq);
 #endif
@@ -1372,7 +1351,7 @@ static void __init acpi_process_madt(void)
                if (!error) {
                        acpi_lapic = 1;
 
-#ifdef CONFIG_X86_GENERICARCH
+#ifdef CONFIG_X86_BIGSMP
                        generic_bigsmp_probe();
 #endif
                        /*
@@ -1384,9 +1363,8 @@ static void __init acpi_process_madt(void)
                                acpi_ioapic = 1;
 
                                smp_found_config = 1;
-#ifdef CONFIG_X86_32
-                               setup_apic_routing();
-#endif
+                               if (apic->setup_apic_routing)
+                                       apic->setup_apic_routing();
                        }
                }
                if (error == -EINVAL) {
index a60c1f3..7c243a2 100644 (file)
@@ -101,6 +101,7 @@ int acpi_save_state_mem(void)
        stack_start.sp = temp_stack + sizeof(temp_stack);
        early_gdt_descr.address =
                        (unsigned long)get_cpu_gdt_table(smp_processor_id());
+       initial_gs = per_cpu_offset(smp_processor_id());
 #endif
        initial_code = (unsigned long)wakeup_long64;
        saved_magic = 0x123456789abcdef0;
index 115449f..cf2ca19 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *     Local APIC handling, local APIC timers
  *
- *     (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ *     (c) 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
  *
  *     Fixes
  *     Maciej W. Rozycki       :       Bits for genuine 82489DX APICs;
  *     Mikael Pettersson       :       PM converted to driver model.
  */
 
-#include <linux/init.h>
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/bootmem.h>
-#include <linux/interrupt.h>
-#include <linux/mc146818rtc.h>
 #include <linux/kernel_stat.h>
-#include <linux/sysdev.h>
-#include <linux/ioport.h>
-#include <linux/cpu.h>
-#include <linux/clockchips.h>
+#include <linux/mc146818rtc.h>
 #include <linux/acpi_pmtmr.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/bootmem.h>
+#include <linux/ftrace.h>
+#include <linux/ioport.h>
 #include <linux/module.h>
-#include <linux/dmi.h>
+#include <linux/sysdev.h>
+#include <linux/delay.h>
+#include <linux/timex.h>
 #include <linux/dmar.h>
-#include <linux/ftrace.h>
-#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/dmi.h>
 #include <linux/nmi.h>
-#include <linux/timex.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
 
-#include <asm/atomic.h>
-#include <asm/mtrr.h>
-#include <asm/mpspec.h>
-#include <asm/desc.h>
 #include <asm/arch_hooks.h>
-#include <asm/hpet.h>
 #include <asm/pgalloc.h>
+#include <asm/genapic.h>
+#include <asm/atomic.h>
+#include <asm/mpspec.h>
 #include <asm/i8253.h>
-#include <asm/idle.h>
+#include <asm/i8259.h>
 #include <asm/proto.h>
 #include <asm/apic.h>
-#include <asm/i8259.h>
+#include <asm/desc.h>
+#include <asm/hpet.h>
+#include <asm/idle.h>
+#include <asm/mtrr.h>
 #include <asm/smp.h>
 
-#include <mach_apic.h>
-#include <mach_apicdef.h>
-#include <mach_ipi.h>
+unsigned int num_processors;
+
+unsigned disabled_cpus __cpuinitdata;
+
+/* Processor that is doing the boot up */
+unsigned int boot_cpu_physical_apicid = -1U;
 
 /*
- * Sanity check
+ * The highest APIC ID seen during enumeration.
+ *
+ * This determines the messaging protocol we can use: if all APIC IDs
+ * are in the 0 ... 7 range, then we can use logical addressing which
+ * has some performance advantages (better broadcasting).
+ *
+ * If there's an APIC ID above 8, we use physical addressing.
  */
-#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
-# error SPURIOUS_APIC_VECTOR definition error
-#endif
+unsigned int max_physical_apicid;
+
+/*
+ * Bitmask of physically existing CPUs:
+ */
+physid_mask_t phys_cpu_present_map;
+
+/*
+ * Map cpu index to physical APIC ID
+ */
+DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
+DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
+EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
+EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
 
 #ifdef CONFIG_X86_32
 /*
@@ -457,7 +477,7 @@ static void lapic_timer_setup(enum clock_event_mode mode,
 static void lapic_timer_broadcast(const struct cpumask *mask)
 {
 #ifdef CONFIG_SMP
-       send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
+       apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
 #endif
 }
 
@@ -535,7 +555,8 @@ static void __init lapic_cal_handler(struct clock_event_device *dev)
        }
 }
 
-static int __init calibrate_by_pmtimer(long deltapm, long *delta)
+static int __init
+calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
 {
        const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
        const long pm_thresh = pm_100ms / 100;
@@ -546,7 +567,7 @@ static int __init calibrate_by_pmtimer(long deltapm, long *delta)
        return -1;
 #endif
 
-       apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm);
+       apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm);
 
        /* Check, if the PM timer is available */
        if (!deltapm)
@@ -556,19 +577,30 @@ static int __init calibrate_by_pmtimer(long deltapm, long *delta)
 
        if (deltapm > (pm_100ms - pm_thresh) &&
            deltapm < (pm_100ms + pm_thresh)) {
-               apic_printk(APIC_VERBOSE, "... PM timer result ok\n");
-       } else {
-               res = (((u64)deltapm) *  mult) >> 22;
-               do_div(res, 1000000);
-               pr_warning("APIC calibration not consistent "
-                       "with PM Timer: %ldms instead of 100ms\n",
-                       (long)res);
-               /* Correct the lapic counter value */
-               res = (((u64)(*delta)) * pm_100ms);
+               apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n");
+               return 0;
+       }
+
+       res = (((u64)deltapm) *  mult) >> 22;
+       do_div(res, 1000000);
+       pr_warning("APIC calibration not consistent "
+                  "with PM-Timer: %ldms instead of 100ms\n",(long)res);
+
+       /* Correct the lapic counter value */
+       res = (((u64)(*delta)) * pm_100ms);
+       do_div(res, deltapm);
+       pr_info("APIC delta adjusted to PM-Timer: "
+               "%lu (%ld)\n", (unsigned long)res, *delta);
+       *delta = (long)res;
+
+       /* Correct the tsc counter value */
+       if (cpu_has_tsc) {
+               res = (((u64)(*deltatsc)) * pm_100ms);
                do_div(res, deltapm);
-               pr_info("APIC delta adjusted to PM-Timer: "
-                       "%lu (%ld)\n", (unsigned long)res, *delta);
-               *delta = (long)res;
+               apic_printk(APIC_VERBOSE, "TSC delta adjusted to "
+                                         "PM-Timer: %lu (%ld) \n",
+                                       (unsigned long)res, *deltatsc);
+               *deltatsc = (long)res;
        }
 
        return 0;
@@ -579,7 +611,7 @@ static int __init calibrate_APIC_clock(void)
        struct clock_event_device *levt = &__get_cpu_var(lapic_events);
        void (*real_handler)(struct clock_event_device *dev);
        unsigned long deltaj;
-       long delta;
+       long delta, deltatsc;
        int pm_referenced = 0;
 
        local_irq_disable();
@@ -609,9 +641,11 @@ static int __init calibrate_APIC_clock(void)
        delta = lapic_cal_t1 - lapic_cal_t2;
        apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
 
+       deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
+
        /* we trust the PM based calibration if possible */
        pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
-                                       &delta);
+                                       &delta, &deltatsc);
 
        /* Calculate the scaled math multiplication factor */
        lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
@@ -629,11 +663,10 @@ static int __init calibrate_APIC_clock(void)
                    calibration_result);
 
        if (cpu_has_tsc) {
-               delta = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
                apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
                            "%ld.%04ld MHz.\n",
-                           (delta / LAPIC_CAL_LOOPS) / (1000000 / HZ),
-                           (delta / LAPIC_CAL_LOOPS) % (1000000 / HZ));
+                           (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
+                           (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
        }
 
        apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
@@ -991,11 +1024,11 @@ int __init verify_local_APIC(void)
         */
        reg0 = apic_read(APIC_ID);
        apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
-       apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
+       apic_write(APIC_ID, reg0 ^ apic->apic_id_mask);
        reg1 = apic_read(APIC_ID);
        apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
        apic_write(APIC_ID, reg0);
-       if (reg1 != (reg0 ^ APIC_ID_MASK))
+       if (reg1 != (reg0 ^ apic->apic_id_mask))
                return 0;
 
        /*
@@ -1089,7 +1122,7 @@ static void __cpuinit lapic_setup_esr(void)
                return;
        }
 
-       if (esr_disable) {
+       if (apic->disable_esr) {
                /*
                 * Something untraceable is creating bad interrupts on
                 * secondary quads ... for the moment, just leave the
@@ -1130,9 +1163,14 @@ void __cpuinit setup_local_APIC(void)
        unsigned int value;
        int i, j;
 
+       if (disable_apic) {
+               arch_disable_smp_support();
+               return;
+       }
+
 #ifdef CONFIG_X86_32
        /* Pound the ESR really hard over the head with a big hammer - mbligh */
-       if (lapic_is_integrated() && esr_disable) {
+       if (lapic_is_integrated() && apic->disable_esr) {
                apic_write(APIC_ESR, 0);
                apic_write(APIC_ESR, 0);
                apic_write(APIC_ESR, 0);
@@ -1146,7 +1184,7 @@ void __cpuinit setup_local_APIC(void)
         * Double-check whether this APIC is really registered.
         * This is meaningless in clustered apic mode, so we skip it.
         */
-       if (!apic_id_registered())
+       if (!apic->apic_id_registered())
                BUG();
 
        /*
@@ -1154,7 +1192,7 @@ void __cpuinit setup_local_APIC(void)
         * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
         * document number 292116).  So here it goes...
         */
-       init_apic_ldr();
+       apic->init_apic_ldr();
 
        /*
         * Set Task Priority to 'accept all'. We never change this
@@ -1570,11 +1608,11 @@ int apic_version[MAX_APICS];
 
 int __init APIC_init_uniprocessor(void)
 {
-#ifdef CONFIG_X86_64
        if (disable_apic) {
                pr_info("Apic disabled\n");
                return -1;
        }
+#ifdef CONFIG_X86_64
        if (!cpu_has_apic) {
                disable_apic = 1;
                pr_info("Apic disabled by BIOS\n");
@@ -1600,7 +1638,7 @@ int __init APIC_init_uniprocessor(void)
        enable_IR_x2apic();
 #endif
 #ifdef CONFIG_X86_64
-       setup_apic_routing();
+       default_setup_apic_routing();
 #endif
 
        verify_local_APIC();
@@ -1738,7 +1776,8 @@ void __init connect_bsp_APIC(void)
                outb(0x01, 0x23);
        }
 #endif
-       enable_apic_mode();
+       if (apic->enable_apic_mode)
+               apic->enable_apic_mode();
 }
 
 /**
@@ -1876,29 +1915,39 @@ void __cpuinit generic_processor_info(int apicid, int version)
        }
 #endif
 
-#if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64)
-       /* are we being called early in kernel startup? */
-       if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
-               u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
-               u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
-
-               cpu_to_apicid[cpu] = apicid;
-               bios_cpu_apicid[cpu] = apicid;
-       } else {
-               per_cpu(x86_cpu_to_apicid, cpu) = apicid;
-               per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
-       }
+#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
+       early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
+       early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
 #endif
 
        set_cpu_possible(cpu, true);
        set_cpu_present(cpu, true);
 }
 
-#ifdef CONFIG_X86_64
 int hard_smp_processor_id(void)
 {
        return read_apic_id();
 }
+
+void default_init_apic_ldr(void)
+{
+       unsigned long val;
+
+       apic_write(APIC_DFR, APIC_DFR_VALUE);
+       val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+       val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
+       apic_write(APIC_LDR, val);
+}
+
+#ifdef CONFIG_X86_32
+int default_apicid_to_node(int logical_apicid)
+{
+#ifdef CONFIG_SMP
+       return apicid_2_node[hard_smp_processor_id()];
+#else
+       return 0;
+#endif
+}
 #endif
 
 /*
index 98807bb..37ba5f8 100644 (file)
@@ -301,7 +301,7 @@ extern int (*console_blank_hook)(int);
  */
 #define APM_ZERO_SEGS
 
-#include "apm.h"
+#include <asm/apm.h>
 
 /*
  * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend.
index ee4df08..fbf2f33 100644 (file)
@@ -75,6 +75,7 @@ void foo(void)
        OFFSET(PT_DS,  pt_regs, ds);
        OFFSET(PT_ES,  pt_regs, es);
        OFFSET(PT_FS,  pt_regs, fs);
+       OFFSET(PT_GS,  pt_regs, gs);
        OFFSET(PT_ORIG_EAX, pt_regs, orig_ax);
        OFFSET(PT_EIP, pt_regs, ip);
        OFFSET(PT_CS,  pt_regs, cs);
index 1d41d3f..8793ab3 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/hardirq.h>
 #include <linux/suspend.h>
 #include <linux/kbuild.h>
-#include <asm/pda.h>
 #include <asm/processor.h>
 #include <asm/segment.h>
 #include <asm/thread_info.h>
@@ -48,16 +47,6 @@ int main(void)
 #endif
        BLANK();
 #undef ENTRY
-#define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
-       ENTRY(kernelstack); 
-       ENTRY(oldrsp); 
-       ENTRY(pcurrent); 
-       ENTRY(irqcount);
-       ENTRY(cpunumber);
-       ENTRY(irqstackptr);
-       ENTRY(data_offset);
-       BLANK();
-#undef ENTRY
 #ifdef CONFIG_PARAVIRT
        BLANK();
        OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
diff --git a/arch/x86/kernel/bigsmp_32.c b/arch/x86/kernel/bigsmp_32.c
new file mode 100644 (file)
index 0000000..47a62f4
--- /dev/null
@@ -0,0 +1,266 @@
+/*
+ * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs.
+ * Drives the local APIC in "clustered mode".
+ */
+#define APIC_DEFINITION 1
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <asm/mpspec.h>
+#include <asm/genapic.h>
+#include <asm/fixmap.h>
+#include <asm/apicdef.h>
+#include <asm/ipi.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/smp.h>
+
+
+static inline unsigned bigsmp_get_apic_id(unsigned long x)
+{
+       return (x >> 24) & 0xFF;
+}
+
+#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
+
+static inline int bigsmp_apic_id_registered(void)
+{
+       return 1;
+}
+
+static inline const cpumask_t *bigsmp_target_cpus(void)
+{
+#ifdef CONFIG_SMP
+       return &cpu_online_map;
+#else
+       return &cpumask_of_cpu(0);
+#endif
+}
+
+#define APIC_DFR_VALUE         (APIC_DFR_FLAT)
+
+static inline unsigned long
+bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+       return 0;
+}
+
+static inline unsigned long bigsmp_check_apicid_present(int bit)
+{
+       return 1;
+}
+
+static inline unsigned long calculate_ldr(int cpu)
+{
+       unsigned long val, id;
+       val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+       id = xapic_phys_to_log_apicid(cpu);
+       val |= SET_APIC_LOGICAL_ID(id);
+       return val;
+}
+
+/*
+ * Set up the logical destination ID.
+ *
+ * Intel recommends to set DFR, LDR and TPR before enabling
+ * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
+ * document number 292116).  So here it goes...
+ */
+static inline void bigsmp_init_apic_ldr(void)
+{
+       unsigned long val;
+       int cpu = smp_processor_id();
+
+       apic_write(APIC_DFR, APIC_DFR_VALUE);
+       val = calculate_ldr(cpu);
+       apic_write(APIC_LDR, val);
+}
+
+static inline void bigsmp_setup_apic_routing(void)
+{
+       printk("Enabling APIC mode:  %s.  Using %d I/O APICs\n",
+               "Physflat", nr_ioapics);
+}
+
+static inline int bigsmp_apicid_to_node(int logical_apicid)
+{
+       return apicid_2_node[hard_smp_processor_id()];
+}
+
+static inline int bigsmp_cpu_present_to_apicid(int mps_cpu)
+{
+       if (mps_cpu < nr_cpu_ids)
+               return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
+
+       return BAD_APICID;
+}
+
+static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid)
+{
+       return physid_mask_of_physid(phys_apicid);
+}
+
+extern u8 cpu_2_logical_apicid[];
+/* Mapping from cpu number to logical apicid */
+static inline int bigsmp_cpu_to_logical_apicid(int cpu)
+{
+       if (cpu >= nr_cpu_ids)
+               return BAD_APICID;
+       return cpu_physical_id(cpu);
+}
+
+static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map)
+{
+       /* For clustered we don't have a good way to do this yet - hack */
+       return physids_promote(0xFFL);
+}
+
+static inline void bigsmp_setup_portio_remap(void)
+{
+}
+
+static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+       return 1;
+}
+
+/* As we are using single CPU as destination, pick only one CPU here */
+static inline unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask)
+{
+       return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask));
+}
+
+static inline unsigned int
+bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+                             const struct cpumask *andmask)
+{
+       int cpu;
+
+       /*
+        * We're using fixed IRQ delivery, can only return one phys APIC ID.
+        * May as well be the first.
+        */
+       for_each_cpu_and(cpu, cpumask, andmask) {
+               if (cpumask_test_cpu(cpu, cpu_online_mask))
+                       break;
+       }
+       if (cpu < nr_cpu_ids)
+               return bigsmp_cpu_to_logical_apicid(cpu);
+
+       return BAD_APICID;
+}
+
+static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
+{
+       return cpuid_apic >> index_msb;
+}
+
+static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
+{
+       default_send_IPI_mask_sequence_phys(mask, vector);
+}
+
+static inline void bigsmp_send_IPI_allbutself(int vector)
+{
+       default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
+}
+
+static inline void bigsmp_send_IPI_all(int vector)
+{
+       bigsmp_send_IPI_mask(cpu_online_mask, vector);
+}
+
+static int dmi_bigsmp; /* can be set by dmi scanners */
+
+static int hp_ht_bigsmp(const struct dmi_system_id *d)
+{
+       printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
+       dmi_bigsmp = 1;
+       return 0;
+}
+
+
+static const struct dmi_system_id bigsmp_dmi_table[] = {
+       { hp_ht_bigsmp, "HP ProLiant DL760 G2",
+       { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
+       DMI_MATCH(DMI_BIOS_VERSION, "P44-"),}
+       },
+
+       { hp_ht_bigsmp, "HP ProLiant DL740",
+       { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
+       DMI_MATCH(DMI_BIOS_VERSION, "P47-"),}
+       },
+        { }
+};
+
+static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask)
+{
+       cpus_clear(*retmask);
+       cpu_set(cpu, *retmask);
+}
+
+static int probe_bigsmp(void)
+{
+       if (def_to_bigsmp)
+               dmi_bigsmp = 1;
+       else
+               dmi_check_system(bigsmp_dmi_table);
+       return dmi_bigsmp;
+}
+
+struct genapic apic_bigsmp = {
+
+       .name                           = "bigsmp",
+       .probe                          = probe_bigsmp,
+       .acpi_madt_oem_check            = NULL,
+       .apic_id_registered             = bigsmp_apic_id_registered,
+
+       .irq_delivery_mode              = dest_Fixed,
+       /* phys delivery to target CPU: */
+       .irq_dest_mode                  = 0,
+
+       .target_cpus                    = bigsmp_target_cpus,
+       .disable_esr                    = 1,
+       .dest_logical                   = 0,
+       .check_apicid_used              = bigsmp_check_apicid_used,
+       .check_apicid_present           = bigsmp_check_apicid_present,
+
+       .vector_allocation_domain       = bigsmp_vector_allocation_domain,
+       .init_apic_ldr                  = bigsmp_init_apic_ldr,
+
+       .ioapic_phys_id_map             = bigsmp_ioapic_phys_id_map,
+       .setup_apic_routing             = bigsmp_setup_apic_routing,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = bigsmp_apicid_to_node,
+       .cpu_to_logical_apicid          = bigsmp_cpu_to_logical_apicid,
+       .cpu_present_to_apicid          = bigsmp_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = bigsmp_apicid_to_cpu_present,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = bigsmp_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = bigsmp_phys_pkg_id,
+       .mps_oem_check                  = NULL,
+
+       .get_apic_id                    = bigsmp_get_apic_id,
+       .set_apic_id                    = NULL,
+       .apic_id_mask                   = 0xFF << 24,
+
+       .cpu_mask_to_apicid             = bigsmp_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = bigsmp_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = bigsmp_send_IPI_mask,
+       .send_IPI_mask_allbutself       = NULL,
+       .send_IPI_allbutself            = bigsmp_send_IPI_allbutself,
+       .send_IPI_all                   = bigsmp_send_IPI_all,
+       .send_IPI_self                  = default_send_IPI_self,
+
+       .wakeup_cpu                     = NULL,
+       .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+
+       .wait_for_init_deassert         = default_wait_for_init_deassert,
+
+       .smp_callin_clear_local_apic    = NULL,
+       .store_NMI_vector               = NULL,
+       .inquire_remote_apic            = default_inquire_remote_apic,
+};
index 2cf2363..e48640c 100644 (file)
@@ -7,7 +7,7 @@
 #include <asm/pat.h>
 #include <asm/processor.h>
 
-#include <mach_apic.h>
+#include <asm/genapic.h>
 
 struct cpuid_bit {
        u16 feature;
@@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
  */
 void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
 {
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
        unsigned int eax, ebx, ecx, edx, sub_index;
        unsigned int ht_mask_width, core_plus_mask_width;
        unsigned int core_select_mask, core_level_siblings;
@@ -116,22 +116,14 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
 
        core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
 
-#ifdef CONFIG_X86_32
-       c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)
+       c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width)
                                                 & core_select_mask;
-       c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width);
+       c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width);
        /*
         * Reinit the apicid, now that we have extended initial_apicid.
         */
-       c->apicid = phys_pkg_id(c->initial_apicid, 0);
-#else
-       c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;
-       c->phys_proc_id = phys_pkg_id(core_plus_mask_width);
-       /*
-        * Reinit the apicid, now that we have extended initial_apicid.
-        */
-       c->apicid = phys_pkg_id(0);
-#endif
+       c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
+
        c->x86_max_cores = (core_level_siblings / smp_num_siblings);
 
 
@@ -143,37 +135,3 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
        return;
 #endif
 }
-
-#ifdef CONFIG_X86_PAT
-void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
-{
-       if (!cpu_has_pat)
-               pat_disable("PAT not supported by CPU.");
-
-       switch (c->x86_vendor) {
-       case X86_VENDOR_INTEL:
-               /*
-                * There is a known erratum on Pentium III and Core Solo
-                * and Core Duo CPUs.
-                * " Page with PAT set to WC while associated MTRR is UC
-                *   may consolidate to UC "
-                * Because of this erratum, it is better to stick with
-                * setting WC in MTRR rather than using PAT on these CPUs.
-                *
-                * Enable PAT WC only on P4, Core 2 or later CPUs.
-                */
-               if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
-                       return;
-
-               pat_disable("PAT WC disabled due to known CPU erratum.");
-               return;
-
-       case X86_VENDOR_AMD:
-       case X86_VENDOR_CENTAUR:
-       case X86_VENDOR_TRANSMETA:
-               return;
-       }
-
-       pat_disable("PAT disabled. Not yet verified on this CPU type.");
-}
-#endif
index 7c878f6..ff4d7b9 100644 (file)
@@ -12,7 +12,7 @@
 # include <asm/cacheflush.h>
 #endif
 
-#include <mach_apic.h>
+#include <asm/genapic.h>
 
 #include "cpu.h"
 
index 83492b1..e8f4a38 100644 (file)
 #include <asm/asm.h>
 #include <asm/numa.h>
 #include <asm/smp.h>
+#include <asm/cpu.h>
+#include <asm/cpumask.h>
 #ifdef CONFIG_X86_LOCAL_APIC
 #include <asm/mpspec.h>
 #include <asm/apic.h>
-#include <mach_apic.h>
 #include <asm/genapic.h>
+#include <asm/genapic.h>
+#include <asm/uv/uv.h>
 #endif
 
-#include <asm/pda.h>
 #include <asm/pgtable.h>
 #include <asm/processor.h>
 #include <asm/desc.h>
@@ -37,6 +39,7 @@
 #include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/hypervisor.h>
+#include <asm/stackprotector.h>
 
 #include "cpu.h"
 
@@ -50,6 +53,15 @@ cpumask_var_t cpu_initialized_mask;
 /* representing cpus for which sibling maps can be computed */
 cpumask_var_t cpu_sibling_setup_mask;
 
+/* correctly size the local cpu masks */
+void __init setup_cpu_local_masks(void)
+{
+       alloc_bootmem_cpumask_var(&cpu_initialized_mask);
+       alloc_bootmem_cpumask_var(&cpu_callin_mask);
+       alloc_bootmem_cpumask_var(&cpu_callout_mask);
+       alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
+}
+
 #else /* CONFIG_X86_32 */
 
 cpumask_t cpu_callin_map;
@@ -62,23 +74,23 @@ cpumask_t cpu_sibling_setup_map;
 
 static struct cpu_dev *this_cpu __cpuinitdata;
 
+DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
 #ifdef CONFIG_X86_64
-/* We need valid kernel segments for data and code in long mode too
- * IRET will check the segment types  kkeil 2000/10/28
- * Also sysret mandates a special GDT layout
- */
-/* The TLS descriptors are currently at a different place compared to i386.
-   Hopefully nobody expects them at a fixed place (Wine?) */
-DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
+       /*
+        * We need valid kernel segments for data and code in long mode too
+        * IRET will check the segment types  kkeil 2000/10/28
+        * Also sysret mandates a special GDT layout
+        *
+        * The TLS descriptors are currently at a different place compared to i386.
+        * Hopefully nobody expects them at a fixed place (Wine?)
+        */
        [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
        [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
        [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
        [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
        [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
        [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
-} };
 #else
-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
        [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
        [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
        [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
@@ -110,9 +122,10 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
        [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
 
        [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
-       [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } },
-} };
+       [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
+       GDT_STACK_CANARY_INIT
 #endif
+} };
 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
 
 #ifdef CONFIG_X86_32
@@ -212,6 +225,49 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 }
 #endif
 
+/*
+ * Some CPU features depend on higher CPUID levels, which may not always
+ * be available due to CPUID level capping or broken virtualization
+ * software.  Add those features to this table to auto-disable them.
+ */
+struct cpuid_dependent_feature {
+       u32 feature;
+       u32 level;
+};
+static const struct cpuid_dependent_feature __cpuinitconst
+cpuid_dependent_features[] = {
+       { X86_FEATURE_MWAIT,            0x00000005 },
+       { X86_FEATURE_DCA,              0x00000009 },
+       { X86_FEATURE_XSAVE,            0x0000000d },
+       { 0, 0 }
+};
+
+static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
+{
+       const struct cpuid_dependent_feature *df;
+       for (df = cpuid_dependent_features; df->feature; df++) {
+               /*
+                * Note: cpuid_level is set to -1 if unavailable, but
+                * extended_extended_level is set to 0 if unavailable
+                * and the legitimate extended levels are all negative
+                * when signed; hence the weird messing around with
+                * signs here...
+                */
+               if (cpu_has(c, df->feature) &&
+                   ((s32)df->feature < 0 ?
+                    (u32)df->feature > (u32)c->extended_cpuid_level :
+                    (s32)df->feature > (s32)c->cpuid_level)) {
+                       clear_cpu_cap(c, df->feature);
+                       if (warn)
+                               printk(KERN_WARNING
+                                      "CPU: CPU feature %s disabled "
+                                      "due to lack of CPUID level 0x%x\n",
+                                      x86_cap_flags[df->feature],
+                                      df->level);
+               }
+       }
+}      
+
 /*
  * Naming convention should be: <Name> [(<Codename>)]
  * This table only is used unless init_<vendor>() below doesn't set it;
@@ -242,18 +298,29 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
 
 __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
 
+void load_percpu_segment(int cpu)
+{
+#ifdef CONFIG_X86_32
+       loadsegment(fs, __KERNEL_PERCPU);
+#else
+       loadsegment(gs, 0);
+       wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
+#endif
+       load_stack_canary_segment();
+}
+
 /* Current gdt points %fs at the "master" per-cpu area: after this,
  * it's on the real one. */
-void switch_to_new_gdt(void)
+void switch_to_new_gdt(int cpu)
 {
        struct desc_ptr gdt_descr;
 
-       gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
+       gdt_descr.address = (long)get_cpu_gdt_table(cpu);
        gdt_descr.size = GDT_SIZE - 1;
        load_gdt(&gdt_descr);
-#ifdef CONFIG_X86_32
-       asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
-#endif
+       /* Reload the per-cpu base */
+
+       load_percpu_segment(cpu);
 }
 
 static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
@@ -383,11 +450,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
                }
 
                index_msb = get_count_order(smp_num_siblings);
-#ifdef CONFIG_X86_64
-               c->phys_proc_id = phys_pkg_id(index_msb);
-#else
-               c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
-#endif
+               c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
 
                smp_num_siblings = smp_num_siblings / c->x86_max_cores;
 
@@ -395,13 +458,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
 
                core_bits = get_count_order(c->x86_max_cores);
 
-#ifdef CONFIG_X86_64
-               c->cpu_core_id = phys_pkg_id(index_msb) &
-                                              ((1 << core_bits) - 1);
-#else
-               c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
+               c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
                                               ((1 << core_bits) - 1);
-#endif
        }
 
 out:
@@ -570,11 +628,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
        if (this_cpu->c_early_init)
                this_cpu->c_early_init(c);
 
-       validate_pat_support(c);
-
 #ifdef CONFIG_SMP
        c->cpu_index = boot_cpu_id;
 #endif
+       filter_cpuid_features(c, false);
 }
 
 void __init early_cpu_init(void)
@@ -637,7 +694,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
                c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
 #ifdef CONFIG_X86_32
 # ifdef CONFIG_X86_HT
-               c->apicid = phys_pkg_id(c->initial_apicid, 0);
+               c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
 # else
                c->apicid = c->initial_apicid;
 # endif
@@ -684,7 +741,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
                this_cpu->c_identify(c);
 
 #ifdef CONFIG_X86_64
-       c->apicid = phys_pkg_id(0);
+       c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
 #endif
 
        /*
@@ -708,6 +765,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
         * we do "generic changes."
         */
 
+       /* Filter out anything that depends on CPUID levels we don't have */
+       filter_cpuid_features(c, true);
+
        /* If the model name is still unset, do table lookup. */
        if (!c->x86_model_id[0]) {
                char *p;
@@ -877,54 +937,22 @@ static __init int setup_disablecpuid(char *arg)
 __setup("clearcpuid=", setup_disablecpuid);
 
 #ifdef CONFIG_X86_64
-struct x8664_pda **_cpu_pda __read_mostly;
-EXPORT_SYMBOL(_cpu_pda);
-
 struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
 
-static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
-
-void __cpuinit pda_init(int cpu)
-{
-       struct x8664_pda *pda = cpu_pda(cpu);
+DEFINE_PER_CPU_FIRST(union irq_stack_union,
+                    irq_stack_union) __aligned(PAGE_SIZE);
+DEFINE_PER_CPU(char *, irq_stack_ptr) =
+       init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
 
-       /* Setup up data that may be needed in __get_free_pages early */
-       loadsegment(fs, 0);
-       loadsegment(gs, 0);
-       /* Memory clobbers used to order PDA accessed */
-       mb();
-       wrmsrl(MSR_GS_BASE, pda);
-       mb();
-
-       pda->cpunumber = cpu;
-       pda->irqcount = -1;
-       pda->kernelstack = (unsigned long)stack_thread_info() -
-                                PDA_STACKOFFSET + THREAD_SIZE;
-       pda->active_mm = &init_mm;
-       pda->mmu_state = 0;
-
-       if (cpu == 0) {
-               /* others are initialized in smpboot.c */
-               pda->pcurrent = &init_task;
-               pda->irqstackptr = boot_cpu_stack;
-               pda->irqstackptr += IRQSTACKSIZE - 64;
-       } else {
-               if (!pda->irqstackptr) {
-                       pda->irqstackptr = (char *)
-                               __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
-                       if (!pda->irqstackptr)
-                               panic("cannot allocate irqstack for cpu %d",
-                                     cpu);
-                       pda->irqstackptr += IRQSTACKSIZE - 64;
-               }
+DEFINE_PER_CPU(unsigned long, kernel_stack) =
+       (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
+EXPORT_PER_CPU_SYMBOL(kernel_stack);
 
-               if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
-                       pda->nodenumber = cpu_to_node(cpu);
-       }
-}
+DEFINE_PER_CPU(unsigned int, irq_count) = -1;
 
-static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
-                                 DEBUG_STKSZ] __page_aligned_bss;
+static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
+       [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
+       __aligned(PAGE_SIZE);
 
 extern asmlinkage void ignore_sysret(void);
 
@@ -957,16 +985,21 @@ unsigned long kernel_eflags;
  */
 DEFINE_PER_CPU(struct orig_ist, orig_ist);
 
-#else
+#else  /* x86_64 */
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+DEFINE_PER_CPU(unsigned long, stack_canary);
+#endif
 
-/* Make sure %fs is initialized properly in idle threads */
+/* Make sure %fs and %gs are initialized properly in idle threads */
 struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
 {
        memset(regs, 0, sizeof(struct pt_regs));
        regs->fs = __KERNEL_PERCPU;
+       regs->gs = __KERNEL_STACK_CANARY;
        return regs;
 }
-#endif
+#endif /* x86_64 */
 
 /*
  * cpu_init() initializes state that is per-CPU. Some data is already
@@ -982,15 +1015,14 @@ void __cpuinit cpu_init(void)
        struct tss_struct *t = &per_cpu(init_tss, cpu);
        struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
        unsigned long v;
-       char *estacks = NULL;
        struct task_struct *me;
        int i;
 
-       /* CPU 0 is initialised in head64.c */
-       if (cpu != 0)
-               pda_init(cpu);
-       else
-               estacks = boot_exception_stacks;
+#ifdef CONFIG_NUMA
+       if (cpu != 0 && percpu_read(node_number) == 0 &&
+           cpu_to_node(cpu) != NUMA_NO_NODE)
+               percpu_write(node_number, cpu_to_node(cpu));
+#endif
 
        me = current;
 
@@ -1006,7 +1038,9 @@ void __cpuinit cpu_init(void)
         * and set up the GDT descriptor:
         */
 
-       switch_to_new_gdt();
+       switch_to_new_gdt(cpu);
+       loadsegment(fs, 0);
+
        load_idt((const struct desc_ptr *)&idt_descr);
 
        memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
@@ -1024,18 +1058,13 @@ void __cpuinit cpu_init(void)
         * set up and load the per-CPU TSS
         */
        if (!orig_ist->ist[0]) {
-               static const unsigned int order[N_EXCEPTION_STACKS] = {
-                 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
-                 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
+               static const unsigned int sizes[N_EXCEPTION_STACKS] = {
+                 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
+                 [DEBUG_STACK - 1] = DEBUG_STKSZ
                };
+               char *estacks = per_cpu(exception_stacks, cpu);
                for (v = 0; v < N_EXCEPTION_STACKS; v++) {
-                       if (cpu) {
-                               estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
-                               if (!estacks)
-                                       panic("Cannot allocate exception "
-                                             "stack %ld %d\n", v, cpu);
-                       }
-                       estacks += PAGE_SIZE << order[v];
+                       estacks += sizes[v];
                        orig_ist->ist[v] = t->x86_tss.ist[v] =
                                        (unsigned long)estacks;
                }
@@ -1069,22 +1098,19 @@ void __cpuinit cpu_init(void)
         */
        if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
                arch_kgdb_ops.correct_hw_break();
-       else {
+       else
 #endif
-       /*
-        * Clear all 6 debug registers:
-        */
-
-       set_debugreg(0UL, 0);
-       set_debugreg(0UL, 1);
-       set_debugreg(0UL, 2);
-       set_debugreg(0UL, 3);
-       set_debugreg(0UL, 6);
-       set_debugreg(0UL, 7);
-#ifdef CONFIG_KGDB
-       /* If the kgdb is connected no debug regs should be altered. */
+       {
+               /*
+                * Clear all 6 debug registers:
+                */
+               set_debugreg(0UL, 0);
+               set_debugreg(0UL, 1);
+               set_debugreg(0UL, 2);
+               set_debugreg(0UL, 3);
+               set_debugreg(0UL, 6);
+               set_debugreg(0UL, 7);
        }
-#endif
 
        fpu_init();
 
@@ -1114,7 +1140,7 @@ void __cpuinit cpu_init(void)
                clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
 
        load_idt(&idt_descr);
-       switch_to_new_gdt();
+       switch_to_new_gdt(cpu);
 
        /*
         * Set up and load the per-CPU TSS and LDT
@@ -1135,9 +1161,6 @@ void __cpuinit cpu_init(void)
        __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
 #endif
 
-       /* Clear %gs. */
-       asm volatile ("mov %0, %%gs" : : "r" (0));
-
        /* Clear all 6 debug registers: */
        set_debugreg(0, 0);
        set_debugreg(0, 1);
index 24ff26a..1f137a8 100644 (file)
@@ -24,7 +24,7 @@
 #ifdef CONFIG_X86_LOCAL_APIC
 #include <asm/mpspec.h>
 #include <asm/apic.h>
-#include <mach_apic.h>
+#include <asm/genapic.h>
 #endif
 
 static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
@@ -63,6 +63,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
                set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
        }
 
+       /*
+        * There is a known erratum on Pentium III and Core Solo
+        * and Core Duo CPUs.
+        * " Page with PAT set to WC while associated MTRR is UC
+        *   may consolidate to UC "
+        * Because of this erratum, it is better to stick with
+        * setting WC in MTRR rather than using PAT on these CPUs.
+        *
+        * Enable PAT WC only on P4, Core 2 or later CPUs.
+        */
+       if (c->x86 == 6 && c->x86_model < 15)
+               clear_cpu_cap(c, X86_FEATURE_PAT);
 }
 
 #ifdef CONFIG_X86_32
index da299eb..7293508 100644 (file)
@@ -147,7 +147,16 @@ struct _cpuid4_info {
        union _cpuid4_leaf_ecx ecx;
        unsigned long size;
        unsigned long can_disable;
-       cpumask_t shared_cpu_map;       /* future?: only cpus/node is needed */
+       DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
+};
+
+/* subset of above _cpuid4_info w/o shared_cpu_map */
+struct _cpuid4_info_regs {
+       union _cpuid4_leaf_eax eax;
+       union _cpuid4_leaf_ebx ebx;
+       union _cpuid4_leaf_ecx ecx;
+       unsigned long size;
+       unsigned long can_disable;
 };
 
 #ifdef CONFIG_PCI
@@ -278,7 +287,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
 }
 
 static void __cpuinit
-amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
+amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
 {
        if (index < 3)
                return;
@@ -286,7 +295,8 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
 }
 
 static int
-__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
+__cpuinit cpuid4_cache_lookup_regs(int index,
+                                  struct _cpuid4_info_regs *this_leaf)
 {
        union _cpuid4_leaf_eax  eax;
        union _cpuid4_leaf_ebx  ebx;
@@ -314,6 +324,15 @@ __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
        return 0;
 }
 
+static int
+__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
+{
+       struct _cpuid4_info_regs *leaf_regs =
+               (struct _cpuid4_info_regs *)this_leaf;
+
+       return cpuid4_cache_lookup_regs(index, leaf_regs);
+}
+
 static int __cpuinit find_num_cache_leaves(void)
 {
        unsigned int            eax, ebx, ecx, edx;
@@ -353,11 +372,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
                 * parameters cpuid leaf to find the cache details
                 */
                for (i = 0; i < num_cache_leaves; i++) {
-                       struct _cpuid4_info this_leaf;
-
+                       struct _cpuid4_info_regs this_leaf;
                        int retval;
 
-                       retval = cpuid4_cache_lookup(i, &this_leaf);
+                       retval = cpuid4_cache_lookup_regs(i, &this_leaf);
                        if (retval >= 0) {
                                switch(this_leaf.eax.split.level) {
                                    case 1:
@@ -506,17 +524,20 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
        num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
 
        if (num_threads_sharing == 1)
-               cpu_set(cpu, this_leaf->shared_cpu_map);
+               cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
        else {
                index_msb = get_count_order(num_threads_sharing);
 
                for_each_online_cpu(i) {
                        if (cpu_data(i).apicid >> index_msb ==
                            c->apicid >> index_msb) {
-                               cpu_set(i, this_leaf->shared_cpu_map);
+                               cpumask_set_cpu(i,
+                                       to_cpumask(this_leaf->shared_cpu_map));
                                if (i != cpu && per_cpu(cpuid4_info, i))  {
-                                       sibling_leaf = CPUID4_INFO_IDX(i, index);
-                                       cpu_set(cpu, sibling_leaf->shared_cpu_map);
+                                       sibling_leaf =
+                                               CPUID4_INFO_IDX(i, index);
+                                       cpumask_set_cpu(cpu, to_cpumask(
+                                               sibling_leaf->shared_cpu_map));
                                }
                        }
                }
@@ -528,9 +549,10 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
        int sibling;
 
        this_leaf = CPUID4_INFO_IDX(cpu, index);
-       for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
+       for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
                sibling_leaf = CPUID4_INFO_IDX(sibling, index);
-               cpu_clear(cpu, sibling_leaf->shared_cpu_map);
+               cpumask_clear_cpu(cpu,
+                                 to_cpumask(sibling_leaf->shared_cpu_map));
        }
 }
 #else
@@ -635,8 +657,9 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
        int n = 0;
 
        if (len > 1) {
-               cpumask_t *mask = &this_leaf->shared_cpu_map;
+               const struct cpumask *mask;
 
+               mask = to_cpumask(this_leaf->shared_cpu_map);
                n = type?
                        cpulist_scnprintf(buf, len-2, mask) :
                        cpumask_scnprintf(buf, len-2, mask);
@@ -699,7 +722,8 @@ static struct pci_dev *get_k8_northbridge(int node)
 
 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
 {
-       int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
+       const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
+       int node = cpu_to_node(cpumask_first(mask));
        struct pci_dev *dev = NULL;
        ssize_t ret = 0;
        int i;
@@ -733,7 +757,8 @@ static ssize_t
 store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
                    size_t count)
 {
-       int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
+       const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
+       int node = cpu_to_node(cpumask_first(mask));
        struct pci_dev *dev = NULL;
        unsigned int ret, index, val;
 
@@ -878,7 +903,7 @@ err_out:
        return -ENOMEM;
 }
 
-static cpumask_t cache_dev_map = CPU_MASK_NONE;
+static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
 
 /* Add/Remove cache interface for CPU device */
 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
@@ -918,7 +943,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
                }
                kobject_uevent(&(this_object->kobj), KOBJ_ADD);
        }
-       cpu_set(cpu, cache_dev_map);
+       cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
 
        kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
        return 0;
@@ -931,9 +956,9 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
 
        if (per_cpu(cpuid4_info, cpu) == NULL)
                return;
-       if (!cpu_isset(cpu, cache_dev_map))
+       if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
                return;
-       cpu_clear(cpu, cache_dev_map);
+       cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
 
        for (i = 0; i < num_cache_leaves; i++)
                kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
index 8ae8c4f..4772e91 100644 (file)
@@ -67,7 +67,7 @@ static struct threshold_block threshold_defaults = {
 struct threshold_bank {
        struct kobject *kobj;
        struct threshold_block *blocks;
-       cpumask_t cpus;
+       cpumask_var_t cpus;
 };
 static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
 
@@ -481,7 +481,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 
 #ifdef CONFIG_SMP
        if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) {   /* symlink */
-               i = first_cpu(per_cpu(cpu_core_map, cpu));
+               i = cpumask_first(&per_cpu(cpu_core_map, cpu));
 
                /* first core not up yet */
                if (cpu_data(i).cpu_core_id)
@@ -501,7 +501,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
                if (err)
                        goto out;
 
-               b->cpus = per_cpu(cpu_core_map, cpu);
+               cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
                per_cpu(threshold_banks, cpu)[bank] = b;
                goto out;
        }
@@ -512,15 +512,20 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
                err = -ENOMEM;
                goto out;
        }
+       if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
+               kfree(b);
+               err = -ENOMEM;
+               goto out;
+       }
 
        b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj);
        if (!b->kobj)
                goto out_free;
 
 #ifndef CONFIG_SMP
-       b->cpus = CPU_MASK_ALL;
+       cpumask_setall(b->cpus);
 #else
-       b->cpus = per_cpu(cpu_core_map, cpu);
+       cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
 #endif
 
        per_cpu(threshold_banks, cpu)[bank] = b;
@@ -529,7 +534,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
        if (err)
                goto out_free;
 
-       for_each_cpu_mask_nr(i, b->cpus) {
+       for_each_cpu(i, b->cpus) {
                if (i == cpu)
                        continue;
 
@@ -545,6 +550,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 
 out_free:
        per_cpu(threshold_banks, cpu)[bank] = NULL;
+       free_cpumask_var(b->cpus);
        kfree(b);
 out:
        return err;
@@ -619,7 +625,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
 #endif
 
        /* remove all sibling symlinks before unregistering */
-       for_each_cpu_mask_nr(i, b->cpus) {
+       for_each_cpu(i, b->cpus) {
                if (i == cpu)
                        continue;
 
@@ -632,6 +638,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
 free_out:
        kobject_del(b->kobj);
        kobject_put(b->kobj);
+       free_cpumask_var(b->cpus);
        kfree(b);
        per_cpu(threshold_banks, cpu)[bank] = NULL;
 }
index 4b48f25..5e8c79e 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/interrupt.h>
 #include <linux/percpu.h>
 #include <asm/processor.h>
+#include <asm/apic.h>
 #include <asm/msr.h>
 #include <asm/mce.h>
 #include <asm/hw_irq.h>
index c689d19..ad7f2a6 100644 (file)
 #include <asm/apic.h>
 #include <asm/hpet.h>
 #include <linux/kdebug.h>
-#include <asm/smp.h>
+#include <asm/cpu.h>
 #include <asm/reboot.h>
 #include <asm/virtext.h>
 
-#include <mach_ipi.h>
+#include <asm/genapic.h>
 
 
 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
index 6b1f6f6..87d103d 100644 (file)
@@ -99,7 +99,7 @@ print_context_stack(struct thread_info *tinfo,
                                frame = frame->next_frame;
                                bp = (unsigned long) frame;
                        } else {
-                               ops->address(data, addr, bp == 0);
+                               ops->address(data, addr, 0);
                        }
                        print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
                }
index c302d07..d35db59 100644 (file)
@@ -106,7 +106,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                const struct stacktrace_ops *ops, void *data)
 {
        const unsigned cpu = get_cpu();
-       unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
+       unsigned long *irq_stack_end =
+               (unsigned long *)per_cpu(irq_stack_ptr, cpu);
        unsigned used = 0;
        struct thread_info *tinfo;
        int graph = 0;
@@ -160,23 +161,23 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                        stack = (unsigned long *) estack_end[-2];
                        continue;
                }
-               if (irqstack_end) {
-                       unsigned long *irqstack;
-                       irqstack = irqstack_end -
-                               (IRQSTACKSIZE - 64) / sizeof(*irqstack);
+               if (irq_stack_end) {
+                       unsigned long *irq_stack;
+                       irq_stack = irq_stack_end -
+                               (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack);
 
-                       if (stack >= irqstack && stack < irqstack_end) {
+                       if (stack >= irq_stack && stack < irq_stack_end) {
                                if (ops->stack(data, "IRQ") < 0)
                                        break;
                                bp = print_context_stack(tinfo, stack, bp,
-                                       ops, data, irqstack_end, &graph);
+                                       ops, data, irq_stack_end, &graph);
                                /*
                                 * We link to the next stack (which would be
                                 * the process stack normally) the last
                                 * pointer (index -1 to end) in the IRQ stack:
                                 */
-                               stack = (unsigned long *) (irqstack_end[-1]);
-                               irqstack_end = NULL;
+                               stack = (unsigned long *) (irq_stack_end[-1]);
+                               irq_stack_end = NULL;
                                ops->stack(data, "EOI");
                                continue;
                        }
@@ -199,10 +200,10 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
        unsigned long *stack;
        int i;
        const int cpu = smp_processor_id();
-       unsigned long *irqstack_end =
-               (unsigned long *) (cpu_pda(cpu)->irqstackptr);
-       unsigned long *irqstack =
-               (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
+       unsigned long *irq_stack_end =
+               (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
+       unsigned long *irq_stack =
+               (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE);
 
        /*
         * debugging aid: "show_stack(NULL, NULL);" prints the
@@ -218,9 +219,9 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
 
        stack = sp;
        for (i = 0; i < kstack_depth_to_print; i++) {
-               if (stack >= irqstack && stack <= irqstack_end) {
-                       if (stack == irqstack_end) {
-                               stack = (unsigned long *) (irqstack_end[-1]);
+               if (stack >= irq_stack && stack <= irq_stack_end) {
+                       if (stack == irq_stack_end) {
+                               stack = (unsigned long *) (irq_stack_end[-1]);
                                printk(" <EOI> ");
                        }
                } else {
@@ -241,7 +242,7 @@ void show_registers(struct pt_regs *regs)
        int i;
        unsigned long sp;
        const int cpu = smp_processor_id();
-       struct task_struct *cur = cpu_pda(cpu)->pcurrent;
+       struct task_struct *cur = current;
 
        sp = regs->sp;
        printk("CPU %d ", cpu);
index 504ad19..639ad98 100644 (file)
@@ -13,8 +13,8 @@
 #include <asm/setup.h>
 #include <xen/hvc-console.h>
 #include <asm/pci-direct.h>
-#include <asm/pgtable.h>
 #include <asm/fixmap.h>
+#include <asm/pgtable.h>
 #include <linux/usb/ehci_def.h>
 
 /* Simple VGA output */
index 1119d24..b205272 100644 (file)
@@ -366,10 +366,12 @@ void __init efi_init(void)
                                        SMBIOS_TABLE_GUID)) {
                        efi.smbios = config_tables[i].table;
                        printk(" SMBIOS=0x%lx ", config_tables[i].table);
+#ifdef CONFIG_X86_UV
                } else if (!efi_guidcmp(config_tables[i].guid,
                                        UV_SYSTEM_TABLE_GUID)) {
                        efi.uv_systab = config_tables[i].table;
                        printk(" UVsystab=0x%lx ", config_tables[i].table);
+#endif
                } else if (!efi_guidcmp(config_tables[i].guid,
                                        HCDP_TABLE_GUID)) {
                        efi.hcdp = config_tables[i].table;
index 652c528..a4ee291 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/proto.h>
 #include <asm/efi.h>
 #include <asm/cacheflush.h>
+#include <asm/fixmap.h>
 
 static pgd_t save_pgd __initdata;
 static unsigned long efi_flags __initdata;
index 4646902..e992068 100644 (file)
  *     1C(%esp) - %ds
  *     20(%esp) - %es
  *     24(%esp) - %fs
- *     28(%esp) - orig_eax
- *     2C(%esp) - %eip
- *     30(%esp) - %cs
- *     34(%esp) - %eflags
- *     38(%esp) - %oldesp
- *     3C(%esp) - %oldss
+ *     28(%esp) - %gs          saved iff !CONFIG_X86_32_LAZY_GS
+ *     2C(%esp) - orig_eax
+ *     30(%esp) - %eip
+ *     34(%esp) - %cs
+ *     38(%esp) - %eflags
+ *     3C(%esp) - %oldesp
+ *     40(%esp) - %oldss
  *
  * "current" is in register %ebx during any slow entries.
  */
 #define resume_userspace_sig   resume_userspace
 #endif
 
-#define SAVE_ALL \
-       cld; \
-       pushl %fs; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       /*CFI_REL_OFFSET fs, 0;*/\
-       pushl %es; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       /*CFI_REL_OFFSET es, 0;*/\
-       pushl %ds; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       /*CFI_REL_OFFSET ds, 0;*/\
-       pushl %eax; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       CFI_REL_OFFSET eax, 0;\
-       pushl %ebp; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       CFI_REL_OFFSET ebp, 0;\
-       pushl %edi; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       CFI_REL_OFFSET edi, 0;\
-       pushl %esi; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       CFI_REL_OFFSET esi, 0;\
-       pushl %edx; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       CFI_REL_OFFSET edx, 0;\
-       pushl %ecx; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       CFI_REL_OFFSET ecx, 0;\
-       pushl %ebx; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       CFI_REL_OFFSET ebx, 0;\
-       movl $(__USER_DS), %edx; \
-       movl %edx, %ds; \
-       movl %edx, %es; \
-       movl $(__KERNEL_PERCPU), %edx; \
+/*
+ * User gs save/restore
+ *
+ * %gs is used for userland TLS and kernel only uses it for stack
+ * canary which is required to be at %gs:20 by gcc.  Read the comment
+ * at the top of stackprotector.h for more info.
+ *
+ * Local labels 98 and 99 are used.
+ */
+#ifdef CONFIG_X86_32_LAZY_GS
+
+ /* unfortunately push/pop can't be no-op */
+.macro PUSH_GS
+       pushl $0
+       CFI_ADJUST_CFA_OFFSET 4
+.endm
+.macro POP_GS pop=0
+       addl $(4 + \pop), %esp
+       CFI_ADJUST_CFA_OFFSET -(4 + \pop)
+.endm
+.macro POP_GS_EX
+.endm
+
+ /* all the rest are no-op */
+.macro PTGS_TO_GS
+.endm
+.macro PTGS_TO_GS_EX
+.endm
+.macro GS_TO_REG reg
+.endm
+.macro REG_TO_PTGS reg
+.endm
+.macro SET_KERNEL_GS reg
+.endm
+
+#else  /* CONFIG_X86_32_LAZY_GS */
+
+.macro PUSH_GS
+       pushl %gs
+       CFI_ADJUST_CFA_OFFSET 4
+       /*CFI_REL_OFFSET gs, 0*/
+.endm
+
+.macro POP_GS pop=0
+98:    popl %gs
+       CFI_ADJUST_CFA_OFFSET -4
+       /*CFI_RESTORE gs*/
+  .if \pop <> 0
+       add $\pop, %esp
+       CFI_ADJUST_CFA_OFFSET -\pop
+  .endif
+.endm
+.macro POP_GS_EX
+.pushsection .fixup, "ax"
+99:    movl $0, (%esp)
+       jmp 98b
+.section __ex_table, "a"
+       .align 4
+       .long 98b, 99b
+.popsection
+.endm
+
+.macro PTGS_TO_GS
+98:    mov PT_GS(%esp), %gs
+.endm
+.macro PTGS_TO_GS_EX
+.pushsection .fixup, "ax"
+99:    movl $0, PT_GS(%esp)
+       jmp 98b
+.section __ex_table, "a"
+       .align 4
+       .long 98b, 99b
+.popsection
+.endm
+
+.macro GS_TO_REG reg
+       movl %gs, \reg
+       /*CFI_REGISTER gs, \reg*/
+.endm
+.macro REG_TO_PTGS reg
+       movl \reg, PT_GS(%esp)
+       /*CFI_REL_OFFSET gs, PT_GS*/
+.endm
+.macro SET_KERNEL_GS reg
+       movl $(__KERNEL_STACK_CANARY), \reg
+       movl \reg, %gs
+.endm
+
+#endif /* CONFIG_X86_32_LAZY_GS */
+
+.macro SAVE_ALL
+       cld
+       PUSH_GS
+       pushl %fs
+       CFI_ADJUST_CFA_OFFSET 4
+       /*CFI_REL_OFFSET fs, 0;*/
+       pushl %es
+       CFI_ADJUST_CFA_OFFSET 4
+       /*CFI_REL_OFFSET es, 0;*/
+       pushl %ds
+       CFI_ADJUST_CFA_OFFSET 4
+       /*CFI_REL_OFFSET ds, 0;*/
+       pushl %eax
+       CFI_ADJUST_CFA_OFFSET 4
+       CFI_REL_OFFSET eax, 0
+       pushl %ebp
+       CFI_ADJUST_CFA_OFFSET 4
+       CFI_REL_OFFSET ebp, 0
+       pushl %edi
+       CFI_ADJUST_CFA_OFFSET 4
+       CFI_REL_OFFSET edi, 0
+       pushl %esi
+       CFI_ADJUST_CFA_OFFSET 4
+       CFI_REL_OFFSET esi, 0
+       pushl %edx
+       CFI_ADJUST_CFA_OFFSET 4
+       CFI_REL_OFFSET edx, 0
+       pushl %ecx
+       CFI_ADJUST_CFA_OFFSET 4
+       CFI_REL_OFFSET ecx, 0
+       pushl %ebx
+       CFI_ADJUST_CFA_OFFSET 4
+       CFI_REL_OFFSET ebx, 0
+       movl $(__USER_DS), %edx
+       movl %edx, %ds
+       movl %edx, %es
+       movl $(__KERNEL_PERCPU), %edx
        movl %edx, %fs
+       SET_KERNEL_GS %edx
+.endm
 
-#define RESTORE_INT_REGS \
-       popl %ebx;      \
-       CFI_ADJUST_CFA_OFFSET -4;\
-       CFI_RESTORE ebx;\
-       popl %ecx;      \
-       CFI_ADJUST_CFA_OFFSET -4;\
-       CFI_RESTORE ecx;\
-       popl %edx;      \
-       CFI_ADJUST_CFA_OFFSET -4;\
-       CFI_RESTORE edx;\
-       popl %esi;      \
-       CFI_ADJUST_CFA_OFFSET -4;\
-       CFI_RESTORE esi;\
-       popl %edi;      \
-       CFI_ADJUST_CFA_OFFSET -4;\
-       CFI_RESTORE edi;\
-       popl %ebp;      \
-       CFI_ADJUST_CFA_OFFSET -4;\
-       CFI_RESTORE ebp;\
-       popl %eax;      \
-       CFI_ADJUST_CFA_OFFSET -4;\
+.macro RESTORE_INT_REGS
+       popl %ebx
+       CFI_ADJUST_CFA_OFFSET -4
+       CFI_RESTORE ebx
+       popl %ecx
+       CFI_ADJUST_CFA_OFFSET -4
+       CFI_RESTORE ecx
+       popl %edx
+       CFI_ADJUST_CFA_OFFSET -4
+       CFI_RESTORE edx
+       popl %esi
+       CFI_ADJUST_CFA_OFFSET -4
+       CFI_RESTORE esi
+       popl %edi
+       CFI_ADJUST_CFA_OFFSET -4
+       CFI_RESTORE edi
+       popl %ebp
+       CFI_ADJUST_CFA_OFFSET -4
+       CFI_RESTORE ebp
+       popl %eax
+       CFI_ADJUST_CFA_OFFSET -4
        CFI_RESTORE eax
+.endm
 
-#define RESTORE_REGS   \
-       RESTORE_INT_REGS; \
-1:     popl %ds;       \
-       CFI_ADJUST_CFA_OFFSET -4;\
-       /*CFI_RESTORE ds;*/\
-2:     popl %es;       \
-       CFI_ADJUST_CFA_OFFSET -4;\
-       /*CFI_RESTORE es;*/\
-3:     popl %fs;       \
-       CFI_ADJUST_CFA_OFFSET -4;\
-       /*CFI_RESTORE fs;*/\
-.pushsection .fixup,"ax";      \
-4:     movl $0,(%esp); \
-       jmp 1b;         \
-5:     movl $0,(%esp); \
-       jmp 2b;         \
-6:     movl $0,(%esp); \
-       jmp 3b;         \
-.section __ex_table,"a";\
-       .align 4;       \
-       .long 1b,4b;    \
-       .long 2b,5b;    \
-       .long 3b,6b;    \
+.macro RESTORE_REGS pop=0
+       RESTORE_INT_REGS
+1:     popl %ds
+       CFI_ADJUST_CFA_OFFSET -4
+       /*CFI_RESTORE ds;*/
+2:     popl %es
+       CFI_ADJUST_CFA_OFFSET -4
+       /*CFI_RESTORE es;*/
+3:     popl %fs
+       CFI_ADJUST_CFA_OFFSET -4
+       /*CFI_RESTORE fs;*/
+       POP_GS \pop
+.pushsection .fixup, "ax"
+4:     movl $0, (%esp)
+       jmp 1b
+5:     movl $0, (%esp)
+       jmp 2b
+6:     movl $0, (%esp)
+       jmp 3b
+.section __ex_table, "a"
+       .align 4
+       .long 1b, 4b
+       .long 2b, 5b
+       .long 3b, 6b
 .popsection
+       POP_GS_EX
+.endm
 
-#define RING0_INT_FRAME \
-       CFI_STARTPROC simple;\
-       CFI_SIGNAL_FRAME;\
-       CFI_DEF_CFA esp, 3*4;\
-       /*CFI_OFFSET cs, -2*4;*/\
+.macro RING0_INT_FRAME
+       CFI_STARTPROC simple
+       CFI_SIGNAL_FRAME
+       CFI_DEF_CFA esp, 3*4
+       /*CFI_OFFSET cs, -2*4;*/
        CFI_OFFSET eip, -3*4
+.endm
 
-#define RING0_EC_FRAME \
-       CFI_STARTPROC simple;\
-       CFI_SIGNAL_FRAME;\
-       CFI_DEF_CFA esp, 4*4;\
-       /*CFI_OFFSET cs, -2*4;*/\
+.macro RING0_EC_FRAME
+       CFI_STARTPROC simple
+       CFI_SIGNAL_FRAME
+       CFI_DEF_CFA esp, 4*4
+       /*CFI_OFFSET cs, -2*4;*/
        CFI_OFFSET eip, -3*4
+.endm
 
-#define RING0_PTREGS_FRAME \
-       CFI_STARTPROC simple;\
-       CFI_SIGNAL_FRAME;\
-       CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
-       /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
-       CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
-       /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
-       /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
-       CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
-       CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
-       CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
-       CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
-       CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
-       CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
+.macro RING0_PTREGS_FRAME
+       CFI_STARTPROC simple
+       CFI_SIGNAL_FRAME
+       CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
+       /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
+       CFI_OFFSET eip, PT_EIP-PT_OLDESP
+       /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
+       /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
+       CFI_OFFSET eax, PT_EAX-PT_OLDESP
+       CFI_OFFSET ebp, PT_EBP-PT_OLDESP
+       CFI_OFFSET edi, PT_EDI-PT_OLDESP
+       CFI_OFFSET esi, PT_ESI-PT_OLDESP
+       CFI_OFFSET edx, PT_EDX-PT_OLDESP
+       CFI_OFFSET ecx, PT_ECX-PT_OLDESP
        CFI_OFFSET ebx, PT_EBX-PT_OLDESP
+.endm
 
 ENTRY(ret_from_fork)
        CFI_STARTPROC
@@ -362,6 +463,7 @@ sysenter_exit:
        xorl %ebp,%ebp
        TRACE_IRQS_ON
 1:     mov  PT_FS(%esp), %fs
+       PTGS_TO_GS
        ENABLE_INTERRUPTS_SYSEXIT
 
 #ifdef CONFIG_AUDITSYSCALL
@@ -410,6 +512,7 @@ sysexit_audit:
        .align 4
        .long 1b,2b
 .popsection
+       PTGS_TO_GS_EX
 ENDPROC(ia32_sysenter_target)
 
        # system call handler stub
@@ -452,8 +555,7 @@ restore_all:
 restore_nocheck:
        TRACE_IRQS_IRET
 restore_nocheck_notrace:
-       RESTORE_REGS
-       addl $4, %esp                   # skip orig_eax/error_code
+       RESTORE_REGS 4                  # skip orig_eax/error_code
        CFI_ADJUST_CFA_OFFSET -4
 irq_return:
        INTERRUPT_RETURN
@@ -595,28 +697,50 @@ syscall_badsys:
 END(syscall_badsys)
        CFI_ENDPROC
 
-#define FIXUP_ESPFIX_STACK \
-       /* since we are on a wrong stack, we cant make it a C code :( */ \
-       PER_CPU(gdt_page, %ebx); \
-       GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
-       addl %esp, %eax; \
-       pushl $__KERNEL_DS; \
-       CFI_ADJUST_CFA_OFFSET 4; \
-       pushl %eax; \
-       CFI_ADJUST_CFA_OFFSET 4; \
-       lss (%esp), %esp; \
-       CFI_ADJUST_CFA_OFFSET -8;
-#define UNWIND_ESPFIX_STACK \
-       movl %ss, %eax; \
-       /* see if on espfix stack */ \
-       cmpw $__ESPFIX_SS, %ax; \
-       jne 27f; \
-       movl $__KERNEL_DS, %eax; \
-       movl %eax, %ds; \
-       movl %eax, %es; \
-       /* switch to normal stack */ \
-       FIXUP_ESPFIX_STACK; \
-27:;
+/*
+ * System calls that need a pt_regs pointer.
+ */
+#define PTREGSCALL(name) \
+       ALIGN; \
+ptregs_##name: \
+       leal 4(%esp),%eax; \
+       jmp sys_##name;
+
+PTREGSCALL(iopl)
+PTREGSCALL(fork)
+PTREGSCALL(clone)
+PTREGSCALL(vfork)
+PTREGSCALL(execve)
+PTREGSCALL(sigaltstack)
+PTREGSCALL(sigreturn)
+PTREGSCALL(rt_sigreturn)
+PTREGSCALL(vm86)
+PTREGSCALL(vm86old)
+
+.macro FIXUP_ESPFIX_STACK
+       /* since we are on a wrong stack, we cant make it a C code :( */
+       PER_CPU(gdt_page, %ebx)
+       GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
+       addl %esp, %eax
+       pushl $__KERNEL_DS
+       CFI_ADJUST_CFA_OFFSET 4
+       pushl %eax
+       CFI_ADJUST_CFA_OFFSET 4
+       lss (%esp), %esp
+       CFI_ADJUST_CFA_OFFSET -8
+.endm
+.macro UNWIND_ESPFIX_STACK
+       movl %ss, %eax
+       /* see if on espfix stack */
+       cmpw $__ESPFIX_SS, %ax
+       jne 27f
+       movl $__KERNEL_DS, %eax
+       movl %eax, %ds
+       movl %eax, %es
+       /* switch to normal stack */
+       FIXUP_ESPFIX_STACK
+27:
+.endm
 
 /*
  * Build the entry stubs and pointer table with some assembler magic.
@@ -672,7 +796,7 @@ common_interrupt:
 ENDPROC(common_interrupt)
        CFI_ENDPROC
 
-#define BUILD_INTERRUPT(name, nr)      \
+#define BUILD_INTERRUPT3(name, nr, fn) \
 ENTRY(name)                            \
        RING0_INT_FRAME;                \
        pushl $~(nr);                   \
@@ -680,13 +804,15 @@ ENTRY(name)                               \
        SAVE_ALL;                       \
        TRACE_IRQS_OFF                  \
        movl %esp,%eax;                 \
-       call smp_##name;                \
+       call fn;                        \
        jmp ret_from_intr;              \
        CFI_ENDPROC;                    \
 ENDPROC(name)
 
+#define BUILD_INTERRUPT(name, nr)      BUILD_INTERRUPT3(name, nr, smp_##name)
+
 /* The include is where all of the SMP etc. interrupts come from */
-#include "entry_arch.h"
+#include <asm/entry_arch.h>
 
 ENTRY(coprocessor_error)
        RING0_INT_FRAME
@@ -1068,7 +1194,10 @@ ENTRY(page_fault)
        CFI_ADJUST_CFA_OFFSET 4
        ALIGN
 error_code:
-       /* the function address is in %fs's slot on the stack */
+       /* the function address is in %gs's slot on the stack */
+       pushl %fs
+       CFI_ADJUST_CFA_OFFSET 4
+       /*CFI_REL_OFFSET fs, 0*/
        pushl %es
        CFI_ADJUST_CFA_OFFSET 4
        /*CFI_REL_OFFSET es, 0*/
@@ -1097,20 +1226,15 @@ error_code:
        CFI_ADJUST_CFA_OFFSET 4
        CFI_REL_OFFSET ebx, 0
        cld
-       pushl %fs
-       CFI_ADJUST_CFA_OFFSET 4
-       /*CFI_REL_OFFSET fs, 0*/
        movl $(__KERNEL_PERCPU), %ecx
        movl %ecx, %fs
        UNWIND_ESPFIX_STACK
-       popl %ecx
-       CFI_ADJUST_CFA_OFFSET -4
-       /*CFI_REGISTER es, ecx*/
-       movl PT_FS(%esp), %edi          # get the function address
+       GS_TO_REG %ecx
+       movl PT_GS(%esp), %edi          # get the function address
        movl PT_ORIG_EAX(%esp), %edx    # get the error code
        movl $-1, PT_ORIG_EAX(%esp)     # no syscall to restart
-       mov  %ecx, PT_FS(%esp)
-       /*CFI_REL_OFFSET fs, ES*/
+       REG_TO_PTGS %ecx
+       SET_KERNEL_GS %ecx
        movl $(__USER_DS), %ecx
        movl %ecx, %ds
        movl %ecx, %es
@@ -1134,26 +1258,27 @@ END(page_fault)
  * by hand onto the new stack - while updating the return eip past
  * the instruction that would have done it for sysenter.
  */
-#define FIX_STACK(offset, ok, label)           \
-       cmpw $__KERNEL_CS,4(%esp);              \
-       jne ok;                                 \
-label:                                         \
-       movl TSS_sysenter_sp0+offset(%esp),%esp;        \
-       CFI_DEF_CFA esp, 0;                     \
-       CFI_UNDEFINED eip;                      \
-       pushfl;                                 \
-       CFI_ADJUST_CFA_OFFSET 4;                \
-       pushl $__KERNEL_CS;                     \
-       CFI_ADJUST_CFA_OFFSET 4;                \
-       pushl $sysenter_past_esp;               \
-       CFI_ADJUST_CFA_OFFSET 4;                \
+.macro FIX_STACK offset ok label
+       cmpw $__KERNEL_CS, 4(%esp)
+       jne \ok
+\label:
+       movl TSS_sysenter_sp0 + \offset(%esp), %esp
+       CFI_DEF_CFA esp, 0
+       CFI_UNDEFINED eip
+       pushfl
+       CFI_ADJUST_CFA_OFFSET 4
+       pushl $__KERNEL_CS
+       CFI_ADJUST_CFA_OFFSET 4
+       pushl $sysenter_past_esp
+       CFI_ADJUST_CFA_OFFSET 4
        CFI_REL_OFFSET eip, 0
+.endm
 
 ENTRY(debug)
        RING0_INT_FRAME
        cmpl $ia32_sysenter_target,(%esp)
        jne debug_stack_correct
-       FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
+       FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
 debug_stack_correct:
        pushl $-1                       # mark this as an int
        CFI_ADJUST_CFA_OFFSET 4
@@ -1211,7 +1336,7 @@ nmi_stack_correct:
 
 nmi_stack_fixup:
        RING0_INT_FRAME
-       FIX_STACK(12,nmi_stack_correct, 1)
+       FIX_STACK 12, nmi_stack_correct, 1
        jmp nmi_stack_correct
 
 nmi_debug_stack_check:
@@ -1222,7 +1347,7 @@ nmi_debug_stack_check:
        jb nmi_stack_correct
        cmpl $debug_esp_fix_insn,(%esp)
        ja nmi_stack_correct
-       FIX_STACK(24,nmi_stack_correct, 1)
+       FIX_STACK 24, nmi_stack_correct, 1
        jmp nmi_stack_correct
 
 nmi_espfix_stack:
index a134621..fbcf96b 100644 (file)
@@ -52,6 +52,7 @@
 #include <asm/irqflags.h>
 #include <asm/paravirt.h>
 #include <asm/ftrace.h>
+#include <asm/percpu.h>
 
 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
 #include <linux/elf-em.h>
@@ -209,7 +210,7 @@ ENTRY(native_usergs_sysret64)
 
        /* %rsp:at FRAMEEND */
        .macro FIXUP_TOP_OF_STACK tmp offset=0
-       movq %gs:pda_oldrsp,\tmp
+       movq PER_CPU_VAR(old_rsp),\tmp
        movq \tmp,RSP+\offset(%rsp)
        movq $__USER_DS,SS+\offset(%rsp)
        movq $__USER_CS,CS+\offset(%rsp)
@@ -220,7 +221,7 @@ ENTRY(native_usergs_sysret64)
 
        .macro RESTORE_TOP_OF_STACK tmp offset=0
        movq RSP+\offset(%rsp),\tmp
-       movq \tmp,%gs:pda_oldrsp
+       movq \tmp,PER_CPU_VAR(old_rsp)
        movq EFLAGS+\offset(%rsp),\tmp
        movq \tmp,R11+\offset(%rsp)
        .endm
@@ -336,15 +337,15 @@ ENTRY(save_args)
        je 1f
        SWAPGS
        /*
-        * irqcount is used to check if a CPU is already on an interrupt stack
+        * irq_count is used to check if a CPU is already on an interrupt stack
         * or not. While this is essentially redundant with preempt_count it is
         * a little cheaper to use a separate counter in the PDA (short of
         * moving irq_enter into assembly, which would be too much work)
         */
-1:     incl %gs:pda_irqcount
+1:     incl PER_CPU_VAR(irq_count)
        jne 2f
        popq_cfi %rax                   /* move return address... */
-       mov %gs:pda_irqstackptr,%rsp
+       mov PER_CPU_VAR(irq_stack_ptr),%rsp
        EMPTY_FRAME 0
        pushq_cfi %rbp                  /* backlink for unwinder */
        pushq_cfi %rax                  /* ... to the new stack */
@@ -409,6 +410,8 @@ END(save_paranoid)
 ENTRY(ret_from_fork)
        DEFAULT_FRAME
 
+       LOCK ; btr $TIF_FORK,TI_flags(%r8)
+
        push kernel_eflags(%rip)
        CFI_ADJUST_CFA_OFFSET 8
        popf                                    # reset kernel eflags
@@ -468,7 +471,7 @@ END(ret_from_fork)
 ENTRY(system_call)
        CFI_STARTPROC   simple
        CFI_SIGNAL_FRAME
-       CFI_DEF_CFA     rsp,PDA_STACKOFFSET
+       CFI_DEF_CFA     rsp,KERNEL_STACK_OFFSET
        CFI_REGISTER    rip,rcx
        /*CFI_REGISTER  rflags,r11*/
        SWAPGS_UNSAFE_STACK
@@ -479,8 +482,8 @@ ENTRY(system_call)
         */
 ENTRY(system_call_after_swapgs)
 
-       movq    %rsp,%gs:pda_oldrsp
-       movq    %gs:pda_kernelstack,%rsp
+       movq    %rsp,PER_CPU_VAR(old_rsp)
+       movq    PER_CPU_VAR(kernel_stack),%rsp
        /*
         * No need to follow this irqs off/on section - it's straight
         * and short:
@@ -523,7 +526,7 @@ sysret_check:
        CFI_REGISTER    rip,rcx
        RESTORE_ARGS 0,-ARG_SKIP,1
        /*CFI_REGISTER  rflags,r11*/
-       movq    %gs:pda_oldrsp, %rsp
+       movq    PER_CPU_VAR(old_rsp), %rsp
        USERGS_SYSRET64
 
        CFI_RESTORE_STATE
@@ -833,11 +836,11 @@ common_interrupt:
        XCPT_FRAME
        addq $-0x80,(%rsp)              /* Adjust vector to [-256,-1] range */
        interrupt do_IRQ
-       /* 0(%rsp): oldrsp-ARGOFFSET */
+       /* 0(%rsp): old_rsp-ARGOFFSET */
 ret_from_intr:
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
-       decl %gs:pda_irqcount
+       decl PER_CPU_VAR(irq_count)
        leaveq
        CFI_DEF_CFA_REGISTER    rsp
        CFI_ADJUST_CFA_OFFSET   -8
@@ -982,8 +985,10 @@ apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
        irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
 #endif
 
+#ifdef CONFIG_X86_UV
 apicinterrupt UV_BAU_MESSAGE \
        uv_bau_message_intr1 uv_bau_message_interrupt
+#endif
 apicinterrupt LOCAL_TIMER_VECTOR \
        apic_timer_interrupt smp_apic_timer_interrupt
 
@@ -1073,10 +1078,10 @@ ENTRY(\sym)
        TRACE_IRQS_OFF
        movq %rsp,%rdi          /* pt_regs pointer */
        xorl %esi,%esi          /* no error code */
-       movq %gs:pda_data_offset, %rbp
-       subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
+       PER_CPU(init_tss, %rbp)
+       subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
        call \do_sym
-       addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
+       addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
        jmp paranoid_exit       /* %ebx: no swapgs flag */
        CFI_ENDPROC
 END(\sym)
@@ -1138,7 +1143,7 @@ ENTRY(native_load_gs_index)
        CFI_STARTPROC
        pushf
        CFI_ADJUST_CFA_OFFSET 8
-       DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
+       DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
        SWAPGS
 gs_change:
        movl %edi,%gs
@@ -1260,14 +1265,14 @@ ENTRY(call_softirq)
        CFI_REL_OFFSET rbp,0
        mov  %rsp,%rbp
        CFI_DEF_CFA_REGISTER rbp
-       incl %gs:pda_irqcount
-       cmove %gs:pda_irqstackptr,%rsp
+       incl PER_CPU_VAR(irq_count)
+       cmove PER_CPU_VAR(irq_stack_ptr),%rsp
        push  %rbp                      # backlink for old unwinder
        call __do_softirq
        leaveq
        CFI_DEF_CFA_REGISTER    rsp
        CFI_ADJUST_CFA_OFFSET   -8
-       decl %gs:pda_irqcount
+       decl PER_CPU_VAR(irq_count)
        ret
        CFI_ENDPROC
 END(call_softirq)
@@ -1297,15 +1302,15 @@ ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
        movq %rdi, %rsp            # we don't return, adjust the stack frame
        CFI_ENDPROC
        DEFAULT_FRAME
-11:    incl %gs:pda_irqcount
+11:    incl PER_CPU_VAR(irq_count)
        movq %rsp,%rbp
        CFI_DEF_CFA_REGISTER rbp
-       cmovzq %gs:pda_irqstackptr,%rsp
+       cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
        pushq %rbp                      # backlink for old unwinder
        call xen_evtchn_do_upcall
        popq %rsp
        CFI_DEF_CFA_REGISTER rsp
-       decl %gs:pda_irqcount
+       decl PER_CPU_VAR(irq_count)
        jmp  error_exit
        CFI_ENDPROC
 END(do_hypervisor_callback)
index 53699c9..55515d7 100644 (file)
@@ -40,7 +40,6 @@
 #include <asm/smp.h>
 #include <asm/atomic.h>
 #include <asm/apicdef.h>
-#include <mach_mpparse.h>
 #include <asm/genapic.h>
 #include <asm/setup.h>
 
@@ -182,20 +181,16 @@ static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
        return 0;
 }
 
-static void noop_wait_for_deassert(atomic_t *deassert_not_used)
-{
-}
-
 static int __init es7000_update_genapic(void)
 {
-       genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
+       apic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
 
        /* MPENTIUMIII */
        if (boot_cpu_data.x86 == 6 &&
            (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) {
                es7000_update_genapic_to_cluster();
-               genapic->wait_for_init_deassert = noop_wait_for_deassert;
-               genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
+               apic->wait_for_init_deassert = NULL;
+               apic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
        }
 
        return 0;
@@ -292,24 +287,31 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
 {
        struct acpi_table_header *header = NULL;
        int i = 0;
+       acpi_size tbl_size;
 
-       while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
+       while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) {
                if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
                        struct oem_table *t = (struct oem_table *)header;
 
                        oem_addrX = t->OEMTableAddr;
                        oem_size = t->OEMTableSize;
+                       early_acpi_os_unmap_memory(header, tbl_size);
 
                        *oem_addr = (unsigned long)__acpi_map_table(oem_addrX,
                                                                    oem_size);
                        return 0;
                }
+               early_acpi_os_unmap_memory(header, tbl_size);
        }
        return -1;
 }
 
 void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
 {
+       if (!oem_addr)
+               return;
+
+       __acpi_unmap_table((char *)oem_addr, oem_size);
 }
 #endif
 
@@ -359,20 +361,449 @@ es7000_mip_write(struct mip_reg *mip_reg)
        return status;
 }
 
-void __init
-es7000_sw_apic(void)
-{
-       if (es7000_plat) {
-               int mip_status;
-               struct mip_reg es7000_mip_reg;
-
-               printk("ES7000: Enabling APIC mode.\n");
-               memset(&es7000_mip_reg, 0, sizeof(struct mip_reg));
-               es7000_mip_reg.off_0 = MIP_SW_APIC;
-               es7000_mip_reg.off_38 = (MIP_VALID);
-               while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0)
-                       printk("es7000_sw_apic: command failed, status = %x\n",
-                               mip_status);
+void __init es7000_enable_apic_mode(void)
+{
+       struct mip_reg es7000_mip_reg;
+       int mip_status;
+
+       if (!es7000_plat)
                return;
+
+       printk("ES7000: Enabling APIC mode.\n");
+               memset(&es7000_mip_reg, 0, sizeof(struct mip_reg));
+               es7000_mip_reg.off_0 = MIP_SW_APIC;
+               es7000_mip_reg.off_38 = MIP_VALID;
+
+               while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0) {
+               printk("es7000_enable_apic_mode: command failed, status = %x\n",
+                       mip_status);
+       }
+}
+
+/*
+ * APIC driver for the Unisys ES7000 chipset.
+ */
+#define APIC_DEFINITION 1
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <asm/mpspec.h>
+#include <asm/genapic.h>
+#include <asm/fixmap.h>
+#include <asm/apicdef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/smp.h>
+#include <asm/ipi.h>
+
+#define APIC_DFR_VALUE_CLUSTER         (APIC_DFR_CLUSTER)
+#define INT_DELIVERY_MODE_CLUSTER      (dest_LowestPrio)
+#define INT_DEST_MODE_CLUSTER          (1) /* logical delivery broadcast to all procs */
+
+#define APIC_DFR_VALUE                 (APIC_DFR_FLAT)
+
+extern void es7000_enable_apic_mode(void);
+extern int apic_version [MAX_APICS];
+extern u8 cpu_2_logical_apicid[];
+extern unsigned int boot_cpu_physical_apicid;
+
+extern int parse_unisys_oem (char *oemptr);
+extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
+extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
+extern void setup_unisys(void);
+
+#define apicid_cluster(apicid)         (apicid & 0xF0)
+#define xapic_phys_to_log_apicid(cpu)  per_cpu(x86_bios_cpu_apicid, cpu)
+
+static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask)
+{
+       /* Careful. Some cpus do not strictly honor the set of cpus
+        * specified in the interrupt destination when using lowest
+        * priority interrupt delivery mode.
+        *
+        * In particular there was a hyperthreading cpu observed to
+        * deliver interrupts to the wrong hyperthread when only one
+        * hyperthread was specified in the interrupt desitination.
+        */
+       *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
+}
+
+
+static void es7000_wait_for_init_deassert(atomic_t *deassert)
+{
+#ifndef CONFIG_ES7000_CLUSTERED_APIC
+       while (!atomic_read(deassert))
+               cpu_relax();
+#endif
+       return;
+}
+
+static unsigned int es7000_get_apic_id(unsigned long x)
+{
+       return (x >> 24) & 0xFF;
+}
+
+#ifdef CONFIG_ACPI
+static int es7000_check_dsdt(void)
+{
+       struct acpi_table_header header;
+
+       if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
+           !strncmp(header.oem_id, "UNISYS", 6))
+               return 1;
+       return 0;
+}
+#endif
+
+static void es7000_send_IPI_mask(const struct cpumask *mask, int vector)
+{
+       default_send_IPI_mask_sequence_phys(mask, vector);
+}
+
+static void es7000_send_IPI_allbutself(int vector)
+{
+       default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
+}
+
+static void es7000_send_IPI_all(int vector)
+{
+       es7000_send_IPI_mask(cpu_online_mask, vector);
+}
+
+static int es7000_apic_id_registered(void)
+{
+               return 1;
+}
+
+static const cpumask_t *target_cpus_cluster(void)
+{
+       return &CPU_MASK_ALL;
+}
+
+static const cpumask_t *es7000_target_cpus(void)
+{
+       return &cpumask_of_cpu(smp_processor_id());
+}
+
+static unsigned long
+es7000_check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+       return 0;
+}
+static unsigned long es7000_check_apicid_present(int bit)
+{
+       return physid_isset(bit, phys_cpu_present_map);
+}
+
+static unsigned long calculate_ldr(int cpu)
+{
+       unsigned long id = xapic_phys_to_log_apicid(cpu);
+
+       return (SET_APIC_LOGICAL_ID(id));
+}
+
+/*
+ * Set up the logical destination ID.
+ *
+ * Intel recommends to set DFR, LdR and TPR before enabling
+ * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
+ * document number 292116).  So here it goes...
+ */
+static void es7000_init_apic_ldr_cluster(void)
+{
+       unsigned long val;
+       int cpu = smp_processor_id();
+
+       apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
+       val = calculate_ldr(cpu);
+       apic_write(APIC_LDR, val);
+}
+
+static void es7000_init_apic_ldr(void)
+{
+       unsigned long val;
+       int cpu = smp_processor_id();
+
+       apic_write(APIC_DFR, APIC_DFR_VALUE);
+       val = calculate_ldr(cpu);
+       apic_write(APIC_LDR, val);
+}
+
+static void es7000_setup_apic_routing(void)
+{
+       int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
+       printk("Enabling APIC mode:  %s. Using %d I/O APICs, target cpus %lx\n",
+               (apic_version[apic] == 0x14) ?
+                       "Physical Cluster" : "Logical Cluster",
+                       nr_ioapics, cpus_addr(*es7000_target_cpus())[0]);
+}
+
+static int es7000_apicid_to_node(int logical_apicid)
+{
+       return 0;
+}
+
+
+static int es7000_cpu_present_to_apicid(int mps_cpu)
+{
+       if (!mps_cpu)
+               return boot_cpu_physical_apicid;
+       else if (mps_cpu < nr_cpu_ids)
+               return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
+       else
+               return BAD_APICID;
+}
+
+static physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid)
+{
+       static int id = 0;
+       physid_mask_t mask;
+
+       mask = physid_mask_of_physid(id);
+       ++id;
+
+       return mask;
+}
+
+/* Mapping from cpu number to logical apicid */
+static int es7000_cpu_to_logical_apicid(int cpu)
+{
+#ifdef CONFIG_SMP
+       if (cpu >= nr_cpu_ids)
+               return BAD_APICID;
+       return (int)cpu_2_logical_apicid[cpu];
+#else
+       return logical_smp_processor_id();
+#endif
+}
+
+static physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map)
+{
+       /* For clustered we don't have a good way to do this yet - hack */
+       return physids_promote(0xff);
+}
+
+static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
+{
+       boot_cpu_physical_apicid = read_apic_id();
+       return (1);
+}
+
+static unsigned int
+es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
+{
+       int cpus_found = 0;
+       int num_bits_set;
+       int apicid;
+       int cpu;
+
+       num_bits_set = cpumask_weight(cpumask);
+       /* Return id to all */
+       if (num_bits_set == nr_cpu_ids)
+               return 0xFF;
+       /*
+        * The cpus in the mask must all be on the apic cluster.  If are not
+        * on the same apicid cluster return default value of target_cpus():
+        */
+       cpu = cpumask_first(cpumask);
+       apicid = es7000_cpu_to_logical_apicid(cpu);
+
+       while (cpus_found < num_bits_set) {
+               if (cpumask_test_cpu(cpu, cpumask)) {
+                       int new_apicid = es7000_cpu_to_logical_apicid(cpu);
+
+                       if (apicid_cluster(apicid) !=
+                                       apicid_cluster(new_apicid)) {
+                               printk ("%s: Not a valid mask!\n", __func__);
+
+                               return 0xFF;
+                       }
+                       apicid = new_apicid;
+                       cpus_found++;
+               }
+               cpu++;
+       }
+       return apicid;
+}
+
+static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask)
+{
+       int cpus_found = 0;
+       int num_bits_set;
+       int apicid;
+       int cpu;
+
+       num_bits_set = cpus_weight(*cpumask);
+       /* Return id to all */
+       if (num_bits_set == nr_cpu_ids)
+               return es7000_cpu_to_logical_apicid(0);
+       /*
+        * The cpus in the mask must all be on the apic cluster.  If are not
+        * on the same apicid cluster return default value of target_cpus():
+        */
+       cpu = first_cpu(*cpumask);
+       apicid = es7000_cpu_to_logical_apicid(cpu);
+       while (cpus_found < num_bits_set) {
+               if (cpu_isset(cpu, *cpumask)) {
+                       int new_apicid = es7000_cpu_to_logical_apicid(cpu);
+
+                       if (apicid_cluster(apicid) !=
+                                       apicid_cluster(new_apicid)) {
+                               printk ("%s: Not a valid mask!\n", __func__);
+
+                               return es7000_cpu_to_logical_apicid(0);
+                       }
+                       apicid = new_apicid;
+                       cpus_found++;
+               }
+               cpu++;
+       }
+       return apicid;
+}
+
+static unsigned int
+es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
+                             const struct cpumask *andmask)
+{
+       int apicid = es7000_cpu_to_logical_apicid(0);
+       cpumask_var_t cpumask;
+
+       if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
+               return apicid;
+
+       cpumask_and(cpumask, inmask, andmask);
+       cpumask_and(cpumask, cpumask, cpu_online_mask);
+       apicid = es7000_cpu_mask_to_apicid(cpumask);
+
+       free_cpumask_var(cpumask);
+
+       return apicid;
+}
+
+static int es7000_phys_pkg_id(int cpuid_apic, int index_msb)
+{
+       return cpuid_apic >> index_msb;
+}
+
+void __init es7000_update_genapic_to_cluster(void)
+{
+       apic->target_cpus = target_cpus_cluster;
+       apic->irq_delivery_mode = INT_DELIVERY_MODE_CLUSTER;
+       apic->irq_dest_mode = INT_DEST_MODE_CLUSTER;
+
+       apic->init_apic_ldr = es7000_init_apic_ldr_cluster;
+
+       apic->cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster;
+}
+
+static int probe_es7000(void)
+{
+       /* probed later in mptable/ACPI hooks */
+       return 0;
+}
+
+static __init int
+es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
+{
+       if (mpc->oemptr) {
+               struct mpc_oemtable *oem_table =
+                       (struct mpc_oemtable *)mpc->oemptr;
+
+               if (!strncmp(oem, "UNISYS", 6))
+                       return parse_unisys_oem((char *)oem_table);
+       }
+       return 0;
+}
+
+#ifdef CONFIG_ACPI
+/* Hook from generic ACPI tables.c */
+static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+       unsigned long oem_addr = 0;
+       int check_dsdt;
+       int ret = 0;
+
+       /* check dsdt at first to avoid clear fix_map for oem_addr */
+       check_dsdt = es7000_check_dsdt();
+
+       if (!find_unisys_acpi_oem_table(&oem_addr)) {
+               if (check_dsdt)
+                       ret = parse_unisys_oem((char *)oem_addr);
+               else {
+                       setup_unisys();
+                       ret = 1;
+               }
+               /*
+                * we need to unmap it
+                */
+               unmap_unisys_acpi_oem_table(oem_addr);
        }
+       return ret;
+}
+#else
+static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+       return 0;
 }
+#endif
+
+
+struct genapic apic_es7000 = {
+
+       .name                           = "es7000",
+       .probe                          = probe_es7000,
+       .acpi_madt_oem_check            = es7000_acpi_madt_oem_check,
+       .apic_id_registered             = es7000_apic_id_registered,
+
+       .irq_delivery_mode              = dest_Fixed,
+       /* phys delivery to target CPUs: */
+       .irq_dest_mode                  = 0,
+
+       .target_cpus                    = es7000_target_cpus,
+       .disable_esr                    = 1,
+       .dest_logical                   = 0,
+       .check_apicid_used              = es7000_check_apicid_used,
+       .check_apicid_present           = es7000_check_apicid_present,
+
+       .vector_allocation_domain       = es7000_vector_allocation_domain,
+       .init_apic_ldr                  = es7000_init_apic_ldr,
+
+       .ioapic_phys_id_map             = es7000_ioapic_phys_id_map,
+       .setup_apic_routing             = es7000_setup_apic_routing,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = es7000_apicid_to_node,
+       .cpu_to_logical_apicid          = es7000_cpu_to_logical_apicid,
+       .cpu_present_to_apicid          = es7000_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = es7000_apicid_to_cpu_present,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = es7000_check_phys_apicid_present,
+       .enable_apic_mode               = es7000_enable_apic_mode,
+       .phys_pkg_id                    = es7000_phys_pkg_id,
+       .mps_oem_check                  = es7000_mps_oem_check,
+
+       .get_apic_id                    = es7000_get_apic_id,
+       .set_apic_id                    = NULL,
+       .apic_id_mask                   = 0xFF << 24,
+
+       .cpu_mask_to_apicid             = es7000_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = es7000_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = es7000_send_IPI_mask,
+       .send_IPI_mask_allbutself       = NULL,
+       .send_IPI_allbutself            = es7000_send_IPI_allbutself,
+       .send_IPI_all                   = es7000_send_IPI_all,
+       .send_IPI_self                  = default_send_IPI_self,
+
+       .wakeup_cpu                     = NULL,
+
+       .trampoline_phys_low            = 0x467,
+       .trampoline_phys_high           = 0x469,
+
+       .wait_for_init_deassert         = es7000_wait_for_init_deassert,
+
+       /* Nothing to do for most platforms, since cleared by the INIT cycle: */
+       .smp_callin_clear_local_apic    = NULL,
+       .store_NMI_vector               = NULL,
+       .inquire_remote_apic            = default_inquire_remote_apic,
+};
index 2bced78..820dea5 100644 (file)
@@ -29,10 +29,12 @@ extern struct genapic apic_x2xpic_uv_x;
 extern struct genapic apic_x2apic_phys;
 extern struct genapic apic_x2apic_cluster;
 
-struct genapic __read_mostly *genapic = &apic_flat;
+struct genapic __read_mostly *apic = &apic_flat;
 
 static struct genapic *apic_probe[] __initdata = {
+#ifdef CONFIG_X86_UV
        &apic_x2apic_uv_x,
+#endif
        &apic_x2apic_phys,
        &apic_x2apic_cluster,
        &apic_physflat,
@@ -42,17 +44,17 @@ static struct genapic *apic_probe[] __initdata = {
 /*
  * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
  */
-void __init setup_apic_routing(void)
+void __init default_setup_apic_routing(void)
 {
-       if (genapic == &apic_x2apic_phys || genapic == &apic_x2apic_cluster) {
+       if (apic == &apic_x2apic_phys || apic == &apic_x2apic_cluster) {
                if (!intr_remapping_enabled)
-                       genapic = &apic_flat;
+                       apic = &apic_flat;
        }
 
-       if (genapic == &apic_flat) {
+       if (apic == &apic_flat) {
                if (max_physical_apicid >= 8)
-                       genapic = &apic_physflat;
-               printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
+                       apic = &apic_physflat;
+               printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
        }
 
        if (x86_quirks->update_genapic)
@@ -63,18 +65,18 @@ void __init setup_apic_routing(void)
 
 void apic_send_IPI_self(int vector)
 {
-       __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
+       __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
 }
 
-int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
 {
        int i;
 
        for (i = 0; apic_probe[i]; ++i) {
                if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
-                       genapic = apic_probe[i];
+                       apic = apic_probe[i];
                        printk(KERN_INFO "Setting APIC routing to %s.\n",
-                               genapic->name);
+                               apic->name);
                        return 1;
                }
        }
index 3418548..249d2d3 100644 (file)
@@ -19,7 +19,6 @@
 #include <asm/smp.h>
 #include <asm/ipi.h>
 #include <asm/genapic.h>
-#include <mach_apicdef.h>
 
 #ifdef CONFIG_ACPI
 #include <acpi/acpi_bus.h>
@@ -74,7 +73,7 @@ static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
        unsigned long flags;
 
        local_irq_save(flags);
-       __send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
+       __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
        local_irq_restore(flags);
 }
 
@@ -85,14 +84,15 @@ static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
        _flat_send_IPI_mask(mask, vector);
 }
 
-static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
                                        int vector)
+static void
flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
 {
        unsigned long mask = cpumask_bits(cpumask)[0];
        int cpu = smp_processor_id();
 
        if (cpu < BITS_PER_LONG)
                clear_bit(cpu, &mask);
+
        _flat_send_IPI_mask(mask, vector);
 }
 
@@ -114,23 +114,27 @@ static void flat_send_IPI_allbutself(int vector)
                        _flat_send_IPI_mask(mask, vector);
                }
        } else if (num_online_cpus() > 1) {
-               __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
+               __default_send_IPI_shortcut(APIC_DEST_ALLBUT,
+                                           vector, apic->dest_logical);
        }
 }
 
 static void flat_send_IPI_all(int vector)
 {
-       if (vector == NMI_VECTOR)
+       if (vector == NMI_VECTOR) {
                flat_send_IPI_mask(cpu_online_mask, vector);
-       else
-               __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
+       } else {
+               __default_send_IPI_shortcut(APIC_DEST_ALLINC,
+                                           vector, apic->dest_logical);
+       }
 }
 
-static unsigned int get_apic_id(unsigned long x)
+static unsigned int flat_get_apic_id(unsigned long x)
 {
        unsigned int id;
 
        id = (((x)>>24) & 0xFFu);
+
        return id;
 }
 
@@ -146,7 +150,7 @@ static unsigned int read_xapic_id(void)
 {
        unsigned int id;
 
-       id = get_apic_id(apic_read(APIC_ID));
+       id = flat_get_apic_id(apic_read(APIC_ID));
        return id;
 }
 
@@ -169,31 +173,62 @@ static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
        return mask1 & mask2;
 }
 
-static unsigned int phys_pkg_id(int index_msb)
+static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
 {
        return hard_smp_processor_id() >> index_msb;
 }
 
 struct genapic apic_flat =  {
-       .name = "flat",
-       .acpi_madt_oem_check = flat_acpi_madt_oem_check,
-       .int_delivery_mode = dest_LowestPrio,
-       .int_dest_mode = (APIC_DEST_LOGICAL != 0),
-       .target_cpus = flat_target_cpus,
-       .vector_allocation_domain = flat_vector_allocation_domain,
-       .apic_id_registered = flat_apic_id_registered,
-       .init_apic_ldr = flat_init_apic_ldr,
-       .send_IPI_all = flat_send_IPI_all,
-       .send_IPI_allbutself = flat_send_IPI_allbutself,
-       .send_IPI_mask = flat_send_IPI_mask,
-       .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
-       .send_IPI_self = apic_send_IPI_self,
-       .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
-       .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
-       .phys_pkg_id = phys_pkg_id,
-       .get_apic_id = get_apic_id,
-       .set_apic_id = set_apic_id,
-       .apic_id_mask = (0xFFu<<24),
+       .name                           = "flat",
+       .probe                          = NULL,
+       .acpi_madt_oem_check            = flat_acpi_madt_oem_check,
+       .apic_id_registered             = flat_apic_id_registered,
+
+       .irq_delivery_mode              = dest_LowestPrio,
+       .irq_dest_mode                  = 1, /* logical */
+
+       .target_cpus                    = flat_target_cpus,
+       .disable_esr                    = 0,
+       .dest_logical                   = APIC_DEST_LOGICAL,
+       .check_apicid_used              = NULL,
+       .check_apicid_present           = NULL,
+
+       .vector_allocation_domain       = flat_vector_allocation_domain,
+       .init_apic_ldr                  = flat_init_apic_ldr,
+
+       .ioapic_phys_id_map             = NULL,
+       .setup_apic_routing             = NULL,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = NULL,
+       .cpu_to_logical_apicid          = NULL,
+       .cpu_present_to_apicid          = default_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = NULL,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = default_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = flat_phys_pkg_id,
+       .mps_oem_check                  = NULL,
+
+       .get_apic_id                    = flat_get_apic_id,
+       .set_apic_id                    = set_apic_id,
+       .apic_id_mask                   = 0xFFu << 24,
+
+       .cpu_mask_to_apicid             = flat_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = flat_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = flat_send_IPI_mask,
+       .send_IPI_mask_allbutself       = flat_send_IPI_mask_allbutself,
+       .send_IPI_allbutself            = flat_send_IPI_allbutself,
+       .send_IPI_all                   = flat_send_IPI_all,
+       .send_IPI_self                  = apic_send_IPI_self,
+
+       .wakeup_cpu                     = NULL,
+       .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+       .wait_for_init_deassert         = NULL,
+       .smp_callin_clear_local_apic    = NULL,
+       .store_NMI_vector               = NULL,
+       .inquire_remote_apic            = NULL,
 };
 
 /*
@@ -232,18 +267,18 @@ static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
 
 static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
 {
-       send_IPI_mask_sequence(cpumask, vector);
+       default_send_IPI_mask_sequence_phys(cpumask, vector);
 }
 
 static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
                                              int vector)
 {
-       send_IPI_mask_allbutself(cpumask, vector);
+       default_send_IPI_mask_allbutself_phys(cpumask, vector);
 }
 
 static void physflat_send_IPI_allbutself(int vector)
 {
-       send_IPI_mask_allbutself(cpu_online_mask, vector);
+       default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
 }
 
 static void physflat_send_IPI_all(int vector)
@@ -276,32 +311,67 @@ physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
         * We're using fixed IRQ delivery, can only return one phys APIC ID.
         * May as well be the first.
         */
-       for_each_cpu_and(cpu, cpumask, andmask)
+       for_each_cpu_and(cpu, cpumask, andmask) {
                if (cpumask_test_cpu(cpu, cpu_online_mask))
                        break;
+       }
        if (cpu < nr_cpu_ids)
                return per_cpu(x86_cpu_to_apicid, cpu);
+
        return BAD_APICID;
 }
 
 struct genapic apic_physflat =  {
-       .name = "physical flat",
-       .acpi_madt_oem_check = physflat_acpi_madt_oem_check,
-       .int_delivery_mode = dest_Fixed,
-       .int_dest_mode = (APIC_DEST_PHYSICAL != 0),
-       .target_cpus = physflat_target_cpus,
-       .vector_allocation_domain = physflat_vector_allocation_domain,
-       .apic_id_registered = flat_apic_id_registered,
-       .init_apic_ldr = flat_init_apic_ldr,/*not needed, but shouldn't hurt*/
-       .send_IPI_all = physflat_send_IPI_all,
-       .send_IPI_allbutself = physflat_send_IPI_allbutself,
-       .send_IPI_mask = physflat_send_IPI_mask,
-       .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
-       .send_IPI_self = apic_send_IPI_self,
-       .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
-       .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and,
-       .phys_pkg_id = phys_pkg_id,
-       .get_apic_id = get_apic_id,
-       .set_apic_id = set_apic_id,
-       .apic_id_mask = (0xFFu<<24),
+
+       .name                           = "physical flat",
+       .probe                          = NULL,
+       .acpi_madt_oem_check            = physflat_acpi_madt_oem_check,
+       .apic_id_registered             = flat_apic_id_registered,
+
+       .irq_delivery_mode              = dest_Fixed,
+       .irq_dest_mode                  = 0, /* physical */
+
+       .target_cpus                    = physflat_target_cpus,
+       .disable_esr                    = 0,
+       .dest_logical                   = 0,
+       .check_apicid_used              = NULL,
+       .check_apicid_present           = NULL,
+
+       .vector_allocation_domain       = physflat_vector_allocation_domain,
+       /* not needed, but shouldn't hurt: */
+       .init_apic_ldr                  = flat_init_apic_ldr,
+
+       .ioapic_phys_id_map             = NULL,
+       .setup_apic_routing             = NULL,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = NULL,
+       .cpu_to_logical_apicid          = NULL,
+       .cpu_present_to_apicid          = default_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = NULL,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = default_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = flat_phys_pkg_id,
+       .mps_oem_check                  = NULL,
+
+       .get_apic_id                    = flat_get_apic_id,
+       .set_apic_id                    = set_apic_id,
+       .apic_id_mask                   = 0xFFu << 24,
+
+       .cpu_mask_to_apicid             = physflat_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = physflat_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = physflat_send_IPI_mask,
+       .send_IPI_mask_allbutself       = physflat_send_IPI_mask_allbutself,
+       .send_IPI_allbutself            = physflat_send_IPI_allbutself,
+       .send_IPI_all                   = physflat_send_IPI_all,
+       .send_IPI_self                  = apic_send_IPI_self,
+
+       .wakeup_cpu                     = NULL,
+       .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+       .wait_for_init_deassert         = NULL,
+       .smp_callin_clear_local_apic    = NULL,
+       .store_NMI_vector               = NULL,
+       .inquire_remote_apic            = NULL,
 };
index 6ce497c..7c87156 100644 (file)
@@ -36,8 +36,8 @@ static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
        cpumask_set_cpu(cpu, retmask);
 }
 
-static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
                                 unsigned int dest)
+static void
__x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
 {
        unsigned long cfg;
 
@@ -57,45 +57,50 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
  */
 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
 {
-       unsigned long flags;
        unsigned long query_cpu;
+       unsigned long flags;
 
        local_irq_save(flags);
-       for_each_cpu(query_cpu, mask)
+       for_each_cpu(query_cpu, mask) {
                __x2apic_send_IPI_dest(
                        per_cpu(x86_cpu_to_logical_apicid, query_cpu),
-                       vector, APIC_DEST_LOGICAL);
+                       vector, apic->dest_logical);
+       }
        local_irq_restore(flags);
 }
 
-static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
                                          int vector)
+static void
x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
 {
-       unsigned long flags;
-       unsigned long query_cpu;
        unsigned long this_cpu = smp_processor_id();
+       unsigned long query_cpu;
+       unsigned long flags;
 
        local_irq_save(flags);
-       for_each_cpu(query_cpu, mask)
-               if (query_cpu != this_cpu)
-                       __x2apic_send_IPI_dest(
+       for_each_cpu(query_cpu, mask) {
+               if (query_cpu == this_cpu)
+                       continue;
+               __x2apic_send_IPI_dest(
                                per_cpu(x86_cpu_to_logical_apicid, query_cpu),
-                               vector, APIC_DEST_LOGICAL);
+                               vector, apic->dest_logical);
+       }
        local_irq_restore(flags);
 }
 
 static void x2apic_send_IPI_allbutself(int vector)
 {
-       unsigned long flags;
-       unsigned long query_cpu;
        unsigned long this_cpu = smp_processor_id();
+       unsigned long query_cpu;
+       unsigned long flags;
 
        local_irq_save(flags);
-       for_each_online_cpu(query_cpu)
-               if (query_cpu != this_cpu)
-                       __x2apic_send_IPI_dest(
+       for_each_online_cpu(query_cpu) {
+               if (query_cpu == this_cpu)
+                       continue;
+               __x2apic_send_IPI_dest(
                                per_cpu(x86_cpu_to_logical_apicid, query_cpu),
-                               vector, APIC_DEST_LOGICAL);
+                               vector, apic->dest_logical);
+       }
        local_irq_restore(flags);
 }
 
@@ -111,21 +116,21 @@ static int x2apic_apic_id_registered(void)
 
 static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
 {
-       int cpu;
-
        /*
         * We're using fixed IRQ delivery, can only return one logical APIC ID.
         * May as well be the first.
         */
-       cpu = cpumask_first(cpumask);
+       int cpu = cpumask_first(cpumask);
+
        if ((unsigned)cpu < nr_cpu_ids)
                return per_cpu(x86_cpu_to_logical_apicid, cpu);
        else
                return BAD_APICID;
 }
 
-static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-                                                 const struct cpumask *andmask)
+static unsigned int
+x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+                             const struct cpumask *andmask)
 {
        int cpu;
 
@@ -133,15 +138,18 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
         * We're using fixed IRQ delivery, can only return one logical APIC ID.
         * May as well be the first.
         */
-       for_each_cpu_and(cpu, cpumask, andmask)
+       for_each_cpu_and(cpu, cpumask, andmask) {
                if (cpumask_test_cpu(cpu, cpu_online_mask))
                        break;
+       }
+
        if (cpu < nr_cpu_ids)
                return per_cpu(x86_cpu_to_logical_apicid, cpu);
+
        return BAD_APICID;
 }
 
-static unsigned int get_apic_id(unsigned long x)
+static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x)
 {
        unsigned int id;
 
@@ -157,7 +165,7 @@ static unsigned long set_apic_id(unsigned int id)
        return x;
 }
 
-static unsigned int phys_pkg_id(int index_msb)
+static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb)
 {
        return current_cpu_data.initial_apicid >> index_msb;
 }
@@ -172,27 +180,58 @@ static void init_x2apic_ldr(void)
        int cpu = smp_processor_id();
 
        per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR);
-       return;
 }
 
 struct genapic apic_x2apic_cluster = {
-       .name = "cluster x2apic",
-       .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
-       .int_delivery_mode = dest_LowestPrio,
-       .int_dest_mode = (APIC_DEST_LOGICAL != 0),
-       .target_cpus = x2apic_target_cpus,
-       .vector_allocation_domain = x2apic_vector_allocation_domain,
-       .apic_id_registered = x2apic_apic_id_registered,
-       .init_apic_ldr = init_x2apic_ldr,
-       .send_IPI_all = x2apic_send_IPI_all,
-       .send_IPI_allbutself = x2apic_send_IPI_allbutself,
-       .send_IPI_mask = x2apic_send_IPI_mask,
-       .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
-       .send_IPI_self = x2apic_send_IPI_self,
-       .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
-       .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
-       .phys_pkg_id = phys_pkg_id,
-       .get_apic_id = get_apic_id,
-       .set_apic_id = set_apic_id,
-       .apic_id_mask = (0xFFFFFFFFu),
+
+       .name                           = "cluster x2apic",
+       .probe                          = NULL,
+       .acpi_madt_oem_check            = x2apic_acpi_madt_oem_check,
+       .apic_id_registered             = x2apic_apic_id_registered,
+
+       .irq_delivery_mode              = dest_LowestPrio,
+       .irq_dest_mode                  = 1, /* logical */
+
+       .target_cpus                    = x2apic_target_cpus,
+       .disable_esr                    = 0,
+       .dest_logical                   = APIC_DEST_LOGICAL,
+       .check_apicid_used              = NULL,
+       .check_apicid_present           = NULL,
+
+       .vector_allocation_domain       = x2apic_vector_allocation_domain,
+       .init_apic_ldr                  = init_x2apic_ldr,
+
+       .ioapic_phys_id_map             = NULL,
+       .setup_apic_routing             = NULL,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = NULL,
+       .cpu_to_logical_apicid          = NULL,
+       .cpu_present_to_apicid          = default_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = NULL,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = default_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = x2apic_cluster_phys_pkg_id,
+       .mps_oem_check                  = NULL,
+
+       .get_apic_id                    = x2apic_cluster_phys_get_apic_id,
+       .set_apic_id                    = set_apic_id,
+       .apic_id_mask                   = 0xFFFFFFFFu,
+
+       .cpu_mask_to_apicid             = x2apic_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = x2apic_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = x2apic_send_IPI_mask,
+       .send_IPI_mask_allbutself       = x2apic_send_IPI_mask_allbutself,
+       .send_IPI_allbutself            = x2apic_send_IPI_allbutself,
+       .send_IPI_all                   = x2apic_send_IPI_all,
+       .send_IPI_self                  = x2apic_send_IPI_self,
+
+       .wakeup_cpu                     = NULL,
+       .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+       .wait_for_init_deassert         = NULL,
+       .smp_callin_clear_local_apic    = NULL,
+       .store_NMI_vector               = NULL,
+       .inquire_remote_apic            = NULL,
 };
index 21bcc0e..5cbae8a 100644 (file)
@@ -55,8 +55,8 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
 
 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
 {
-       unsigned long flags;
        unsigned long query_cpu;
+       unsigned long flags;
 
        local_irq_save(flags);
        for_each_cpu(query_cpu, mask) {
@@ -66,12 +66,12 @@ static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
        local_irq_restore(flags);
 }
 
-static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
                                          int vector)
+static void
x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
 {
-       unsigned long flags;
-       unsigned long query_cpu;
        unsigned long this_cpu = smp_processor_id();
+       unsigned long query_cpu;
+       unsigned long flags;
 
        local_irq_save(flags);
        for_each_cpu(query_cpu, mask) {
@@ -85,16 +85,17 @@ static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
 
 static void x2apic_send_IPI_allbutself(int vector)
 {
-       unsigned long flags;
-       unsigned long query_cpu;
        unsigned long this_cpu = smp_processor_id();
+       unsigned long query_cpu;
+       unsigned long flags;
 
        local_irq_save(flags);
-       for_each_online_cpu(query_cpu)
-               if (query_cpu != this_cpu)
-                       __x2apic_send_IPI_dest(
-                               per_cpu(x86_cpu_to_apicid, query_cpu),
-                               vector, APIC_DEST_PHYSICAL);
+       for_each_online_cpu(query_cpu) {
+               if (query_cpu == this_cpu)
+                       continue;
+               __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
+                                      vector, APIC_DEST_PHYSICAL);
+       }
        local_irq_restore(flags);
 }
 
@@ -110,21 +111,21 @@ static int x2apic_apic_id_registered(void)
 
 static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
 {
-       int cpu;
-
        /*
         * We're using fixed IRQ delivery, can only return one phys APIC ID.
         * May as well be the first.
         */
-       cpu = cpumask_first(cpumask);
+       int cpu = cpumask_first(cpumask);
+
        if ((unsigned)cpu < nr_cpu_ids)
                return per_cpu(x86_cpu_to_apicid, cpu);
        else
                return BAD_APICID;
 }
 
-static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-                                                 const struct cpumask *andmask)
+static unsigned int
+x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+                             const struct cpumask *andmask)
 {
        int cpu;
 
@@ -132,31 +133,28 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
         * We're using fixed IRQ delivery, can only return one phys APIC ID.
         * May as well be the first.
         */
-       for_each_cpu_and(cpu, cpumask, andmask)
+       for_each_cpu_and(cpu, cpumask, andmask) {
                if (cpumask_test_cpu(cpu, cpu_online_mask))
                        break;
+       }
+
        if (cpu < nr_cpu_ids)
                return per_cpu(x86_cpu_to_apicid, cpu);
+
        return BAD_APICID;
 }
 
-static unsigned int get_apic_id(unsigned long x)
+static unsigned int x2apic_phys_get_apic_id(unsigned long x)
 {
-       unsigned int id;
-
-       id = x;
-       return id;
+       return x;
 }
 
 static unsigned long set_apic_id(unsigned int id)
 {
-       unsigned long x;
-
-       x = id;
-       return x;
+       return id;
 }
 
-static unsigned int phys_pkg_id(int index_msb)
+static int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
 {
        return current_cpu_data.initial_apicid >> index_msb;
 }
@@ -168,27 +166,58 @@ static void x2apic_send_IPI_self(int vector)
 
 static void init_x2apic_ldr(void)
 {
-       return;
 }
 
 struct genapic apic_x2apic_phys = {
-       .name = "physical x2apic",
-       .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
-       .int_delivery_mode = dest_Fixed,
-       .int_dest_mode = (APIC_DEST_PHYSICAL != 0),
-       .target_cpus = x2apic_target_cpus,
-       .vector_allocation_domain = x2apic_vector_allocation_domain,
-       .apic_id_registered = x2apic_apic_id_registered,
-       .init_apic_ldr = init_x2apic_ldr,
-       .send_IPI_all = x2apic_send_IPI_all,
-       .send_IPI_allbutself = x2apic_send_IPI_allbutself,
-       .send_IPI_mask = x2apic_send_IPI_mask,
-       .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
-       .send_IPI_self = x2apic_send_IPI_self,
-       .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
-       .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
-       .phys_pkg_id = phys_pkg_id,
-       .get_apic_id = get_apic_id,
-       .set_apic_id = set_apic_id,
-       .apic_id_mask = (0xFFFFFFFFu),
+
+       .name                           = "physical x2apic",
+       .probe                          = NULL,
+       .acpi_madt_oem_check            = x2apic_acpi_madt_oem_check,
+       .apic_id_registered             = x2apic_apic_id_registered,
+
+       .irq_delivery_mode              = dest_Fixed,
+       .irq_dest_mode                  = 0, /* physical */
+
+       .target_cpus                    = x2apic_target_cpus,
+       .disable_esr                    = 0,
+       .dest_logical                   = 0,
+       .check_apicid_used              = NULL,
+       .check_apicid_present           = NULL,
+
+       .vector_allocation_domain       = x2apic_vector_allocation_domain,
+       .init_apic_ldr                  = init_x2apic_ldr,
+
+       .ioapic_phys_id_map             = NULL,
+       .setup_apic_routing             = NULL,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = NULL,
+       .cpu_to_logical_apicid          = NULL,
+       .cpu_present_to_apicid          = default_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = NULL,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = default_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = x2apic_phys_pkg_id,
+       .mps_oem_check                  = NULL,
+
+       .get_apic_id                    = x2apic_phys_get_apic_id,
+       .set_apic_id                    = set_apic_id,
+       .apic_id_mask                   = 0xFFFFFFFFu,
+
+       .cpu_mask_to_apicid             = x2apic_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = x2apic_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = x2apic_send_IPI_mask,
+       .send_IPI_mask_allbutself       = x2apic_send_IPI_mask_allbutself,
+       .send_IPI_allbutself            = x2apic_send_IPI_allbutself,
+       .send_IPI_all                   = x2apic_send_IPI_all,
+       .send_IPI_self                  = x2apic_send_IPI_self,
+
+       .wakeup_cpu                     = NULL,
+       .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+       .wait_for_init_deassert         = NULL,
+       .smp_callin_clear_local_apic    = NULL,
+       .store_NMI_vector               = NULL,
+       .inquire_remote_apic            = NULL,
 };
index b193e08..6adb5e6 100644 (file)
@@ -25,6 +25,7 @@
 #include <asm/ipi.h>
 #include <asm/genapic.h>
 #include <asm/pgtable.h>
+#include <asm/uv/uv.h>
 #include <asm/uv/uv_mmrs.h>
 #include <asm/uv/uv_hub.h>
 #include <asm/uv/bios.h>
@@ -117,12 +118,13 @@ static void uv_send_IPI_one(int cpu, int vector)
        int pnode;
 
        apicid = per_cpu(x86_cpu_to_apicid, cpu);
-       lapicid = apicid & 0x3f;                /* ZZZ macro needed */
+       lapicid = apicid & 0x3f; /* ZZZ macro needed */
        pnode = uv_apicid_to_pnode(apicid);
-       val =
-           (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid <<
-                                             UVH_IPI_INT_APIC_ID_SHFT) |
-           (vector << UVH_IPI_INT_VECTOR_SHFT);
+
+       val = (     1UL << UVH_IPI_INT_SEND_SHFT    ) |
+             ( lapicid << UVH_IPI_INT_APIC_ID_SHFT ) |
+             (  vector << UVH_IPI_INT_VECTOR_SHFT  );
+
        uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
 }
 
@@ -136,22 +138,24 @@ static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
 
 static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
 {
-       unsigned int cpu;
        unsigned int this_cpu = smp_processor_id();
+       unsigned int cpu;
 
-       for_each_cpu(cpu, mask)
+       for_each_cpu(cpu, mask) {
                if (cpu != this_cpu)
                        uv_send_IPI_one(cpu, vector);
+       }
 }
 
 static void uv_send_IPI_allbutself(int vector)
 {
-       unsigned int cpu;
        unsigned int this_cpu = smp_processor_id();
+       unsigned int cpu;
 
-       for_each_online_cpu(cpu)
+       for_each_online_cpu(cpu) {
                if (cpu != this_cpu)
                        uv_send_IPI_one(cpu, vector);
+       }
 }
 
 static void uv_send_IPI_all(int vector)
@@ -170,21 +174,21 @@ static void uv_init_apic_ldr(void)
 
 static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
 {
-       int cpu;
-
        /*
         * We're using fixed IRQ delivery, can only return one phys APIC ID.
         * May as well be the first.
         */
-       cpu = cpumask_first(cpumask);
+       int cpu = cpumask_first(cpumask);
+
        if ((unsigned)cpu < nr_cpu_ids)
                return per_cpu(x86_cpu_to_apicid, cpu);
        else
                return BAD_APICID;
 }
 
-static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-                                             const struct cpumask *andmask)
+static unsigned int
+uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+                         const struct cpumask *andmask)
 {
        int cpu;
 
@@ -192,15 +196,17 @@ static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
         * We're using fixed IRQ delivery, can only return one phys APIC ID.
         * May as well be the first.
         */
-       for_each_cpu_and(cpu, cpumask, andmask)
+       for_each_cpu_and(cpu, cpumask, andmask) {
                if (cpumask_test_cpu(cpu, cpu_online_mask))
                        break;
+       }
        if (cpu < nr_cpu_ids)
                return per_cpu(x86_cpu_to_apicid, cpu);
+
        return BAD_APICID;
 }
 
-static unsigned int get_apic_id(unsigned long x)
+static unsigned int x2apic_get_apic_id(unsigned long x)
 {
        unsigned int id;
 
@@ -222,10 +228,10 @@ static unsigned long set_apic_id(unsigned int id)
 static unsigned int uv_read_apic_id(void)
 {
 
-       return get_apic_id(apic_read(APIC_ID));
+       return x2apic_get_apic_id(apic_read(APIC_ID));
 }
 
-static unsigned int phys_pkg_id(int index_msb)
+static int uv_phys_pkg_id(int initial_apicid, int index_msb)
 {
        return uv_read_apic_id() >> index_msb;
 }
@@ -236,25 +242,57 @@ static void uv_send_IPI_self(int vector)
 }
 
 struct genapic apic_x2apic_uv_x = {
-       .name = "UV large system",
-       .acpi_madt_oem_check = uv_acpi_madt_oem_check,
-       .int_delivery_mode = dest_Fixed,
-       .int_dest_mode = (APIC_DEST_PHYSICAL != 0),
-       .target_cpus = uv_target_cpus,
-       .vector_allocation_domain = uv_vector_allocation_domain,
-       .apic_id_registered = uv_apic_id_registered,
-       .init_apic_ldr = uv_init_apic_ldr,
-       .send_IPI_all = uv_send_IPI_all,
-       .send_IPI_allbutself = uv_send_IPI_allbutself,
-       .send_IPI_mask = uv_send_IPI_mask,
-       .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
-       .send_IPI_self = uv_send_IPI_self,
-       .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
-       .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
-       .phys_pkg_id = phys_pkg_id,
-       .get_apic_id = get_apic_id,
-       .set_apic_id = set_apic_id,
-       .apic_id_mask = (0xFFFFFFFFu),
+
+       .name                           = "UV large system",
+       .probe                          = NULL,
+       .acpi_madt_oem_check            = uv_acpi_madt_oem_check,
+       .apic_id_registered             = uv_apic_id_registered,
+
+       .irq_delivery_mode              = dest_Fixed,
+       .irq_dest_mode                  = 1, /* logical */
+
+       .target_cpus                    = uv_target_cpus,
+       .disable_esr                    = 0,
+       .dest_logical                   = APIC_DEST_LOGICAL,
+       .check_apicid_used              = NULL,
+       .check_apicid_present           = NULL,
+
+       .vector_allocation_domain       = uv_vector_allocation_domain,
+       .init_apic_ldr                  = uv_init_apic_ldr,
+
+       .ioapic_phys_id_map             = NULL,
+       .setup_apic_routing             = NULL,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = NULL,
+       .cpu_to_logical_apicid          = NULL,
+       .cpu_present_to_apicid          = default_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = NULL,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = default_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = uv_phys_pkg_id,
+       .mps_oem_check                  = NULL,
+
+       .get_apic_id                    = x2apic_get_apic_id,
+       .set_apic_id                    = set_apic_id,
+       .apic_id_mask                   = 0xFFFFFFFFu,
+
+       .cpu_mask_to_apicid             = uv_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = uv_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = uv_send_IPI_mask,
+       .send_IPI_mask_allbutself       = uv_send_IPI_mask_allbutself,
+       .send_IPI_allbutself            = uv_send_IPI_allbutself,
+       .send_IPI_all                   = uv_send_IPI_all,
+       .send_IPI_self                  = uv_send_IPI_self,
+
+       .wakeup_cpu                     = NULL,
+       .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+       .wait_for_init_deassert         = NULL,
+       .smp_callin_clear_local_apic    = NULL,
+       .store_NMI_vector               = NULL,
+       .inquire_remote_apic            = NULL,
 };
 
 static __cpuinit void set_x2apic_extra_bits(int pnode)
index b9a4d8c..f5b2722 100644 (file)
 #include <asm/bios_ebda.h>
 #include <asm/trampoline.h>
 
-/* boot cpu pda */
-static struct x8664_pda _boot_cpu_pda;
-
-#ifdef CONFIG_SMP
-/*
- * We install an empty cpu_pda pointer table to indicate to early users
- * (numa_set_node) that the cpu_pda pointer table for cpus other than
- * the boot cpu is not yet setup.
- */
-static struct x8664_pda *__cpu_pda[NR_CPUS] __initdata;
-#else
-static struct x8664_pda *__cpu_pda[NR_CPUS] __read_mostly;
-#endif
-
-void __init x86_64_init_pda(void)
-{
-       _cpu_pda = __cpu_pda;
-       cpu_pda(0) = &_boot_cpu_pda;
-       pda_init(0);
-}
-
 static void __init zap_identity_mappings(void)
 {
        pgd_t *pgd = pgd_offset_k(0UL);
@@ -112,8 +91,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
        if (console_loglevel == 10)
                early_printk("Kernel alive\n");
 
-       x86_64_init_pda();
-
        x86_64_start_reservations(real_mode_data);
 }
 
index e835b4e..2a0aad7 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/setup.h>
 #include <asm/processor-flags.h>
+#include <asm/percpu.h>
 
 /* Physical address */
 #define pa(X) ((X) - __PAGE_OFFSET)
@@ -429,14 +430,34 @@ is386:    movl $2,%ecx            # set MP
        ljmp $(__KERNEL_CS),$1f
 1:     movl $(__KERNEL_DS),%eax        # reload all the segment registers
        movl %eax,%ss                   # after changing gdt.
-       movl %eax,%fs                   # gets reset once there's real percpu
 
        movl $(__USER_DS),%eax          # DS/ES contains default USER segment
        movl %eax,%ds
        movl %eax,%es
 
-       xorl %eax,%eax                  # Clear GS and LDT
+       movl $(__KERNEL_PERCPU), %eax
+       movl %eax,%fs                   # set this cpu's percpu
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+       /*
+        * The linker can't handle this by relocation.  Manually set
+        * base address in stack canary segment descriptor.
+        */
+       cmpb $0,ready
+       jne 1f
+       movl $per_cpu__gdt_page,%eax
+       movl $per_cpu__stack_canary,%ecx
+       subl $20, %ecx
+       movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
+       shrl $16, %ecx
+       movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
+       movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
+1:
+#endif
+       movl $(__KERNEL_STACK_CANARY),%eax
        movl %eax,%gs
+
+       xorl %eax,%eax                  # Clear LDT
        lldt %ax
 
        cld                     # gcc2 wants the direction flag cleared at all times
@@ -446,8 +467,6 @@ is386:      movl $2,%ecx            # set MP
        movb $1, ready
        cmpb $0,%cl             # the first CPU calls start_kernel
        je   1f
-       movl $(__KERNEL_PERCPU), %eax
-       movl %eax,%fs           # set this cpu's percpu
        movl (stack_start), %esp
 1:
 #endif /* CONFIG_SMP */
@@ -548,11 +567,7 @@ early_fault:
        pushl %eax
        pushl %edx              /* trapno */
        pushl $fault_msg
-#ifdef CONFIG_EARLY_PRINTK
-       call early_printk
-#else
        call printk
-#endif
 #endif
        call dump_stack
 hlt_loop:
@@ -580,11 +595,10 @@ ignore_int:
        pushl 32(%esp)
        pushl 40(%esp)
        pushl $int_msg
-#ifdef CONFIG_EARLY_PRINTK
-       call early_printk
-#else
        call printk
-#endif
+
+       call dump_stack
+
        addl $(5*4),%esp
        popl %ds
        popl %es
@@ -660,7 +674,7 @@ early_recursion_flag:
        .long 0
 
 int_msg:
-       .asciz "Unknown interrupt or fault at EIP %p %p %p\n"
+       .asciz "Unknown interrupt or fault at: %p %p %p\n"
 
 fault_msg:
 /* fault info: */
index 0e275d4..2e648e3 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/msr.h>
 #include <asm/cache.h>
 #include <asm/processor-flags.h>
+#include <asm/percpu.h>
 
 #ifdef CONFIG_PARAVIRT
 #include <asm/asm-offsets.h>
@@ -226,12 +227,15 @@ ENTRY(secondary_startup_64)
        movl %eax,%fs
        movl %eax,%gs
 
-       /* 
-        * Setup up a dummy PDA. this is just for some early bootup code
-        * that does in_interrupt() 
-        */ 
+       /* Set up %gs.
+        *
+        * The base of %gs always points to the bottom of the irqstack
+        * union.  If the stack protector canary is enabled, it is
+        * located at %gs:40.  Note that, on SMP, the boot cpu uses
+        * init data section till per cpu areas are set up.
+        */
        movl    $MSR_GS_BASE,%ecx
-       movq    $empty_zero_page,%rax
+       movq    initial_gs(%rip),%rax
        movq    %rax,%rdx
        shrq    $32,%rdx
        wrmsr   
@@ -257,6 +261,8 @@ ENTRY(secondary_startup_64)
        .align  8
        ENTRY(initial_code)
        .quad   x86_64_start_kernel
+       ENTRY(initial_gs)
+       .quad   INIT_PER_CPU_VAR(irq_stack_union)
        __FINITDATA
 
        ENTRY(stack_start)
@@ -401,7 +407,8 @@ NEXT_PAGE(level2_spare_pgt)
        .globl early_gdt_descr
 early_gdt_descr:
        .word   GDT_ENTRIES*8-1
-       .quad   per_cpu__gdt_page
+early_gdt_descr_base:
+       .quad   INIT_PER_CPU_VAR(gdt_page)
 
 ENTRY(phys_base)
        /* This must match the first entry in level2_kernel_pgt */
index bc7ac4d..7248ca1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *     Intel IO-APIC support for multi-Pentium hosts.
  *
- *     Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
+ *     Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
  *
  *     Many thanks to Stig Venaas for trying out countless experimental
  *     patches and reporting/debugging problems patiently!
@@ -46,6 +46,7 @@
 #include <asm/idle.h>
 #include <asm/io.h>
 #include <asm/smp.h>
+#include <asm/cpu.h>
 #include <asm/desc.h>
 #include <asm/proto.h>
 #include <asm/acpi.h>
@@ -61,9 +62,7 @@
 #include <asm/uv/uv_hub.h>
 #include <asm/uv/uv_irq.h>
 
-#include <mach_ipi.h>
-#include <mach_apic.h>
-#include <mach_apicdef.h>
+#include <asm/genapic.h>
 
 #define __apicdebuginit(type) static type __init
 
@@ -82,11 +81,11 @@ static DEFINE_SPINLOCK(vector_lock);
 int nr_ioapic_registers[MAX_IO_APICS];
 
 /* I/O APIC entries */
-struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
+struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
 int nr_ioapics;
 
 /* MP IRQ source entries */
-struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
+struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
 
 /* # of MP IRQ source entries */
 int mp_irq_entries;
@@ -99,10 +98,19 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
 
 int skip_ioapic_setup;
 
+void arch_disable_smp_support(void)
+{
+#ifdef CONFIG_PCI
+       noioapicquirk = 1;
+       noioapicreroute = -1;
+#endif
+       skip_ioapic_setup = 1;
+}
+
 static int __init parse_noapic(char *str)
 {
        /* disable IO-APIC */
-       disable_ioapic_setup();
+       arch_disable_smp_support();
        return 0;
 }
 early_param("noapic", parse_noapic);
@@ -356,7 +364,7 @@ set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
 
        if (!cfg->move_in_progress) {
                /* it means that domain is not changed */
-               if (!cpumask_intersects(&desc->affinity, mask))
+               if (!cpumask_intersects(desc->affinity, mask))
                        cfg->move_desc_pending = 1;
        }
 }
@@ -386,7 +394,7 @@ struct io_apic {
 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
 {
        return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
-               + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
+               + (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
 }
 
 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
@@ -478,7 +486,7 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
        io_apic_write(apic, 0x10 + 2*pin, eu.w1);
 }
 
-static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
+void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
 {
        unsigned long flags;
        spin_lock_irqsave(&ioapic_lock, flags);
@@ -513,11 +521,11 @@ static void send_cleanup_vector(struct irq_cfg *cfg)
                for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
                        cfg->move_cleanup_count++;
                for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
-                       send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
+                       apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
        } else {
                cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
                cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
-               send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+               apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
                free_cpumask_var(cleanup_mask);
        }
        cfg->move_in_progress = 0;
@@ -562,8 +570,9 @@ static int
 assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
 
 /*
- * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid
- * of that, or returns BAD_APICID and leaves desc->affinity untouched.
+ * Either sets desc->affinity to a valid value, and returns
+ * ->cpu_mask_to_apicid of that, or returns BAD_APICID and
+ * leaves desc->affinity untouched.
  */
 static unsigned int
 set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
@@ -579,9 +588,10 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
        if (assign_irq_vector(irq, cfg, mask))
                return BAD_APICID;
 
-       cpumask_and(&desc->affinity, cfg->domain, mask);
+       cpumask_and(desc->affinity, cfg->domain, mask);
        set_extra_move_desc(desc, mask);
-       return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask);
+
+       return apic->cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
 }
 
 static void
@@ -796,23 +806,6 @@ static void clear_IO_APIC (void)
                        clear_IO_APIC_pin(apic, pin);
 }
 
-#if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
-void send_IPI_self(int vector)
-{
-       unsigned int cfg;
-
-       /*
-        * Wait for idle.
-        */
-       apic_wait_icr_idle();
-       cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
-       /*
-        * Send the IPI. The write to APIC_ICR fires this off.
-        */
-       apic_write(APIC_ICR, cfg);
-}
-#endif /* !CONFIG_SMP && CONFIG_X86_32*/
-
 #ifdef CONFIG_X86_32
 /*
  * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
@@ -944,10 +937,10 @@ static int find_irq_entry(int apic, int pin, int type)
        int i;
 
        for (i = 0; i < mp_irq_entries; i++)
-               if (mp_irqs[i].mp_irqtype == type &&
-                   (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
-                    mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
-                   mp_irqs[i].mp_dstirq == pin)
+               if (mp_irqs[i].irqtype == type &&
+                   (mp_irqs[i].dstapic == mp_ioapics[apic].apicid ||
+                    mp_irqs[i].dstapic == MP_APIC_ALL) &&
+                   mp_irqs[i].dstirq == pin)
                        return i;
 
        return -1;
@@ -961,13 +954,13 @@ static int __init find_isa_irq_pin(int irq, int type)
        int i;
 
        for (i = 0; i < mp_irq_entries; i++) {
-               int lbus = mp_irqs[i].mp_srcbus;
+               int lbus = mp_irqs[i].srcbus;
 
                if (test_bit(lbus, mp_bus_not_pci) &&
-                   (mp_irqs[i].mp_irqtype == type) &&
-                   (mp_irqs[i].mp_srcbusirq == irq))
+                   (mp_irqs[i].irqtype == type) &&
+                   (mp_irqs[i].srcbusirq == irq))
 
-                       return mp_irqs[i].mp_dstirq;
+                       return mp_irqs[i].dstirq;
        }
        return -1;
 }
@@ -977,17 +970,17 @@ static int __init find_isa_irq_apic(int irq, int type)
        int i;
 
        for (i = 0; i < mp_irq_entries; i++) {
-               int lbus = mp_irqs[i].mp_srcbus;
+               int lbus = mp_irqs[i].srcbus;
 
                if (test_bit(lbus, mp_bus_not_pci) &&
-                   (mp_irqs[i].mp_irqtype == type) &&
-                   (mp_irqs[i].mp_srcbusirq == irq))
+                   (mp_irqs[i].irqtype == type) &&
+                   (mp_irqs[i].srcbusirq == irq))
                        break;
        }
        if (i < mp_irq_entries) {
                int apic;
                for(apic = 0; apic < nr_ioapics; apic++) {
-                       if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
+                       if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic)
                                return apic;
                }
        }
@@ -1012,23 +1005,23 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
                return -1;
        }
        for (i = 0; i < mp_irq_entries; i++) {
-               int lbus = mp_irqs[i].mp_srcbus;
+               int lbus = mp_irqs[i].srcbus;
 
                for (apic = 0; apic < nr_ioapics; apic++)
-                       if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
-                           mp_irqs[i].mp_dstapic == MP_APIC_ALL)
+                       if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic ||
+                           mp_irqs[i].dstapic == MP_APIC_ALL)
                                break;
 
                if (!test_bit(lbus, mp_bus_not_pci) &&
-                   !mp_irqs[i].mp_irqtype &&
+                   !mp_irqs[i].irqtype &&
                    (bus == lbus) &&
-                   (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
-                       int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
+                   (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
+                       int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq);
 
                        if (!(apic || IO_APIC_IRQ(irq)))
                                continue;
 
-                       if (pin == (mp_irqs[i].mp_srcbusirq & 3))
+                       if (pin == (mp_irqs[i].srcbusirq & 3))
                                return irq;
                        /*
                         * Use the first all-but-pin matching entry as a
@@ -1071,7 +1064,7 @@ static int EISA_ELCR(unsigned int irq)
  * EISA conforming in the MP table, that means its trigger type must
  * be read in from the ELCR */
 
-#define default_EISA_trigger(idx)      (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
+#define default_EISA_trigger(idx)      (EISA_ELCR(mp_irqs[idx].srcbusirq))
 #define default_EISA_polarity(idx)     default_ISA_polarity(idx)
 
 /* PCI interrupts are always polarity one level triggered,
@@ -1088,13 +1081,13 @@ static int EISA_ELCR(unsigned int irq)
 
 static int MPBIOS_polarity(int idx)
 {
-       int bus = mp_irqs[idx].mp_srcbus;
+       int bus = mp_irqs[idx].srcbus;
        int polarity;
 
        /*
         * Determine IRQ line polarity (high active or low active):
         */
-       switch (mp_irqs[idx].mp_irqflag & 3)
+       switch (mp_irqs[idx].irqflag & 3)
        {
                case 0: /* conforms, ie. bus-type dependent polarity */
                        if (test_bit(bus, mp_bus_not_pci))
@@ -1130,13 +1123,13 @@ static int MPBIOS_polarity(int idx)
 
 static int MPBIOS_trigger(int idx)
 {
-       int bus = mp_irqs[idx].mp_srcbus;
+       int bus = mp_irqs[idx].srcbus;
        int trigger;
 
        /*
         * Determine IRQ trigger mode (edge or level sensitive):
         */
-       switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
+       switch ((mp_irqs[idx].irqflag>>2) & 3)
        {
                case 0: /* conforms, ie. bus-type dependent */
                        if (test_bit(bus, mp_bus_not_pci))
@@ -1214,16 +1207,16 @@ int (*ioapic_renumber_irq)(int ioapic, int irq);
 static int pin_2_irq(int idx, int apic, int pin)
 {
        int irq, i;
-       int bus = mp_irqs[idx].mp_srcbus;
+       int bus = mp_irqs[idx].srcbus;
 
        /*
         * Debugging check, we are in big trouble if this message pops up!
         */
-       if (mp_irqs[idx].mp_dstirq != pin)
+       if (mp_irqs[idx].dstirq != pin)
                printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
 
        if (test_bit(bus, mp_bus_not_pci)) {
-               irq = mp_irqs[idx].mp_srcbusirq;
+               irq = mp_irqs[idx].srcbusirq;
        } else {
                /*
                 * PCI IRQs are mapped in order
@@ -1315,7 +1308,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
                int new_cpu;
                int vector, offset;
 
-               vector_allocation_domain(cpu, tmp_mask);
+               apic->vector_allocation_domain(cpu, tmp_mask);
 
                vector = current_vector;
                offset = current_offset;
@@ -1485,10 +1478,10 @@ static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long t
                                              handle_edge_irq, "edge");
 }
 
-static int setup_ioapic_entry(int apic, int irq,
-                             struct IO_APIC_route_entry *entry,
-                             unsigned int destination, int trigger,
-                             int polarity, int vector)
+int setup_ioapic_entry(int apic_id, int irq,
+                      struct IO_APIC_route_entry *entry,
+                      unsigned int destination, int trigger,
+                      int polarity, int vector)
 {
        /*
         * add it to the IO-APIC irq-routing table:
@@ -1497,25 +1490,25 @@ static int setup_ioapic_entry(int apic, int irq,
 
 #ifdef CONFIG_INTR_REMAP
        if (intr_remapping_enabled) {
-               struct intel_iommu *iommu = map_ioapic_to_ir(apic);
+               struct intel_iommu *iommu = map_ioapic_to_ir(apic_id);
                struct irte irte;
                struct IR_IO_APIC_route_entry *ir_entry =
                        (struct IR_IO_APIC_route_entry *) entry;
                int index;
 
                if (!iommu)
-                       panic("No mapping iommu for ioapic %d\n", apic);
+                       panic("No mapping iommu for ioapic %d\n", apic_id);
 
                index = alloc_irte(iommu, irq, 1);
                if (index < 0)
-                       panic("Failed to allocate IRTE for ioapic %d\n", apic);
+                       panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
 
                memset(&irte, 0, sizeof(irte));
 
                irte.present = 1;
-               irte.dst_mode = INT_DEST_MODE;
+               irte.dst_mode = apic->irq_dest_mode;
                irte.trigger_mode = trigger;
-               irte.dlvry_mode = INT_DELIVERY_MODE;
+               irte.dlvry_mode = apic->irq_delivery_mode;
                irte.vector = vector;
                irte.dest_id = IRTE_DEST(destination);
 
@@ -1528,8 +1521,8 @@ static int setup_ioapic_entry(int apic, int irq,
        } else
 #endif
        {
-               entry->delivery_mode = INT_DELIVERY_MODE;
-               entry->dest_mode = INT_DEST_MODE;
+               entry->delivery_mode = apic->irq_delivery_mode;
+               entry->dest_mode = apic->irq_dest_mode;
                entry->dest = destination;
        }
 
@@ -1546,7 +1539,7 @@ static int setup_ioapic_entry(int apic, int irq,
        return 0;
 }
 
-static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_desc *desc,
+static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc,
                              int trigger, int polarity)
 {
        struct irq_cfg *cfg;
@@ -1558,22 +1551,22 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
 
        cfg = desc->chip_data;
 
-       if (assign_irq_vector(irq, cfg, TARGET_CPUS))
+       if (assign_irq_vector(irq, cfg, apic->target_cpus()))
                return;
 
-       dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
+       dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
 
        apic_printk(APIC_VERBOSE,KERN_DEBUG
                    "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
                    "IRQ %d Mode:%i Active:%i)\n",
-                   apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
+                   apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector,
                    irq, trigger, polarity);
 
 
-       if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
+       if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry,
                               dest, trigger, polarity, cfg->vector)) {
                printk("Failed to setup ioapic entry for ioapic  %d, pin %d\n",
-                      mp_ioapics[apic].mp_apicid, pin);
+                      mp_ioapics[apic_id].apicid, pin);
                __clear_irq_vector(irq, cfg);
                return;
        }
@@ -1582,12 +1575,12 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
        if (irq < NR_IRQS_LEGACY)
                disable_8259A_irq(irq);
 
-       ioapic_write_entry(apic, pin, entry);
+       ioapic_write_entry(apic_id, pin, entry);
 }
 
 static void __init setup_IO_APIC_irqs(void)
 {
-       int apic, pin, idx, irq;
+       int apic_id, pin, idx, irq;
        int notcon = 0;
        struct irq_desc *desc;
        struct irq_cfg *cfg;
@@ -1595,21 +1588,19 @@ static void __init setup_IO_APIC_irqs(void)
 
        apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
 
-       for (apic = 0; apic < nr_ioapics; apic++) {
-               for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+       for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
+               for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
 
-                       idx = find_irq_entry(apic, pin, mp_INT);
+                       idx = find_irq_entry(apic_id, pin, mp_INT);
                        if (idx == -1) {
                                if (!notcon) {
                                        notcon = 1;
                                        apic_printk(APIC_VERBOSE,
                                                KERN_DEBUG " %d-%d",
-                                               mp_ioapics[apic].mp_apicid,
-                                               pin);
+                                               mp_ioapics[apic_id].apicid, pin);
                                } else
                                        apic_printk(APIC_VERBOSE, " %d-%d",
-                                               mp_ioapics[apic].mp_apicid,
-                                               pin);
+                                               mp_ioapics[apic_id].apicid, pin);
                                continue;
                        }
                        if (notcon) {
@@ -1618,20 +1609,25 @@ static void __init setup_IO_APIC_irqs(void)
                                notcon = 0;
                        }
 
-                       irq = pin_2_irq(idx, apic, pin);
-#ifdef CONFIG_X86_32
-                       if (multi_timer_check(apic, irq))
+                       irq = pin_2_irq(idx, apic_id, pin);
+
+                       /*
+                        * Skip the timer IRQ if there's a quirk handler
+                        * installed and if it returns 1:
+                        */
+                       if (apic->multi_timer_check &&
+                                       apic->multi_timer_check(apic_id, irq))
                                continue;
-#endif
+
                        desc = irq_to_desc_alloc_cpu(irq, cpu);
                        if (!desc) {
                                printk(KERN_INFO "can not get irq_desc for %d\n", irq);
                                continue;
                        }
                        cfg = desc->chip_data;
-                       add_pin_to_irq_cpu(cfg, cpu, apic, pin);
+                       add_pin_to_irq_cpu(cfg, cpu, apic_id, pin);
 
-                       setup_IO_APIC_irq(apic, pin, irq, desc,
+                       setup_IO_APIC_irq(apic_id, pin, irq, desc,
                                        irq_trigger(idx), irq_polarity(idx));
                }
        }
@@ -1644,7 +1640,7 @@ static void __init setup_IO_APIC_irqs(void)
 /*
  * Set up the timer pin, possibly with the 8259A-master behind.
  */
-static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
+static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
                                        int vector)
 {
        struct IO_APIC_route_entry entry;
@@ -1660,10 +1656,10 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
         * We use logical delivery to get the timer IRQ
         * to the first CPU.
         */
-       entry.dest_mode = INT_DEST_MODE;
-       entry.mask = 1;                                 /* mask IRQ now */
-       entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
-       entry.delivery_mode = INT_DELIVERY_MODE;
+       entry.dest_mode = apic->irq_dest_mode;
+       entry.mask = 0;                 /* don't mask IRQ for edge */
+       entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus());
+       entry.delivery_mode = apic->irq_delivery_mode;
        entry.polarity = 0;
        entry.trigger = 0;
        entry.vector = vector;
@@ -1677,7 +1673,7 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
        /*
         * Add it to the IO-APIC irq-routing table:
         */
-       ioapic_write_entry(apic, pin, entry);
+       ioapic_write_entry(apic_id, pin, entry);
 }
 
 
@@ -1699,7 +1695,7 @@ __apicdebuginit(void) print_IO_APIC(void)
        printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
        for (i = 0; i < nr_ioapics; i++)
                printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
-                      mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
+                      mp_ioapics[i].apicid, nr_ioapic_registers[i]);
 
        /*
         * We are a bit conservative about what we expect.  We have to
@@ -1719,7 +1715,7 @@ __apicdebuginit(void) print_IO_APIC(void)
        spin_unlock_irqrestore(&ioapic_lock, flags);
 
        printk("\n");
-       printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
+       printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
        printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
        printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
        printk(KERN_DEBUG ".......    : Delivery Type: %X\n", reg_00.bits.delivery_type);
@@ -2090,7 +2086,7 @@ static void __init setup_ioapic_ids_from_mpc(void)
 {
        union IO_APIC_reg_00 reg_00;
        physid_mask_t phys_id_present_map;
-       int apic;
+       int apic_id;
        int i;
        unsigned char old_id;
        unsigned long flags;
@@ -2109,26 +2105,26 @@ static void __init setup_ioapic_ids_from_mpc(void)
         * This is broken; anything with a real cpu count has to
         * circumvent this idiocy regardless.
         */
-       phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
+       phys_id_present_map = apic->ioapic_phys_id_map(phys_cpu_present_map);
 
        /*
         * Set the IOAPIC ID to the value stored in the MPC table.
         */
-       for (apic = 0; apic < nr_ioapics; apic++) {
+       for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
 
                /* Read the register 0 value */
                spin_lock_irqsave(&ioapic_lock, flags);
-               reg_00.raw = io_apic_read(apic, 0);
+               reg_00.raw = io_apic_read(apic_id, 0);
                spin_unlock_irqrestore(&ioapic_lock, flags);
 
-               old_id = mp_ioapics[apic].mp_apicid;
+               old_id = mp_ioapics[apic_id].apicid;
 
-               if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) {
+               if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) {
                        printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
-                               apic, mp_ioapics[apic].mp_apicid);
+                               apic_id, mp_ioapics[apic_id].apicid);
                        printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
                                reg_00.bits.ID);
-                       mp_ioapics[apic].mp_apicid = reg_00.bits.ID;
+                       mp_ioapics[apic_id].apicid = reg_00.bits.ID;
                }
 
                /*
@@ -2136,10 +2132,10 @@ static void __init setup_ioapic_ids_from_mpc(void)
                 * system must have a unique ID or we get lots of nice
                 * 'stuck on smp_invalidate_needed IPI wait' messages.
                 */
-               if (check_apicid_used(phys_id_present_map,
-                                       mp_ioapics[apic].mp_apicid)) {
+               if (apic->check_apicid_used(phys_id_present_map,
+                                       mp_ioapics[apic_id].apicid)) {
                        printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
-                               apic, mp_ioapics[apic].mp_apicid);
+                               apic_id, mp_ioapics[apic_id].apicid);
                        for (i = 0; i < get_physical_broadcast(); i++)
                                if (!physid_isset(i, phys_id_present_map))
                                        break;
@@ -2148,13 +2144,13 @@ static void __init setup_ioapic_ids_from_mpc(void)
                        printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
                                i);
                        physid_set(i, phys_id_present_map);
-                       mp_ioapics[apic].mp_apicid = i;
+                       mp_ioapics[apic_id].apicid = i;
                } else {
                        physid_mask_t tmp;
-                       tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid);
+                       tmp = apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid);
                        apic_printk(APIC_VERBOSE, "Setting %d in the "
                                        "phys_id_present_map\n",
-                                       mp_ioapics[apic].mp_apicid);
+                                       mp_ioapics[apic_id].apicid);
                        physids_or(phys_id_present_map, phys_id_present_map, tmp);
                }
 
@@ -2163,11 +2159,11 @@ static void __init setup_ioapic_ids_from_mpc(void)
                 * We need to adjust the IRQ routing table
                 * if the ID changed.
                 */
-               if (old_id != mp_ioapics[apic].mp_apicid)
+               if (old_id != mp_ioapics[apic_id].apicid)
                        for (i = 0; i < mp_irq_entries; i++)
-                               if (mp_irqs[i].mp_dstapic == old_id)
-                                       mp_irqs[i].mp_dstapic
-                                               = mp_ioapics[apic].mp_apicid;
+                               if (mp_irqs[i].dstapic == old_id)
+                                       mp_irqs[i].dstapic
+                                               = mp_ioapics[apic_id].apicid;
 
                /*
                 * Read the right value from the MPC table and
@@ -2175,20 +2171,20 @@ static void __init setup_ioapic_ids_from_mpc(void)
                 */
                apic_printk(APIC_VERBOSE, KERN_INFO
                        "...changing IO-APIC physical APIC ID to %d ...",
-                       mp_ioapics[apic].mp_apicid);
+                       mp_ioapics[apic_id].apicid);
 
-               reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
+               reg_00.bits.ID = mp_ioapics[apic_id].apicid;
                spin_lock_irqsave(&ioapic_lock, flags);
-               io_apic_write(apic, 0, reg_00.raw);
+               io_apic_write(apic_id, 0, reg_00.raw);
                spin_unlock_irqrestore(&ioapic_lock, flags);
 
                /*
                 * Sanity check
                 */
                spin_lock_irqsave(&ioapic_lock, flags);
-               reg_00.raw = io_apic_read(apic, 0);
+               reg_00.raw = io_apic_read(apic_id, 0);
                spin_unlock_irqrestore(&ioapic_lock, flags);
-               if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid)
+               if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
                        printk("could not set ID!\n");
                else
                        apic_printk(APIC_VERBOSE, " ok.\n");
@@ -2291,7 +2287,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
        unsigned long flags;
 
        spin_lock_irqsave(&vector_lock, flags);
-       send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
+       apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
        spin_unlock_irqrestore(&vector_lock, flags);
 
        return 1;
@@ -2299,7 +2295,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
 #else
 static int ioapic_retrigger_irq(unsigned int irq)
 {
-       send_IPI_self(irq_cfg(irq)->vector);
+       apic->send_IPI_self(irq_cfg(irq)->vector);
 
        return 1;
 }
@@ -2363,7 +2359,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
 
        set_extra_move_desc(desc, mask);
 
-       dest = cpu_mask_to_apicid_and(cfg->domain, mask);
+       dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
 
        modify_ioapic_rte = desc->status & IRQ_LEVEL;
        if (modify_ioapic_rte) {
@@ -2383,7 +2379,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
        if (cfg->move_in_progress)
                send_cleanup_vector(cfg);
 
-       cpumask_copy(&desc->affinity, mask);
+       cpumask_copy(desc->affinity, mask);
 }
 
 static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
@@ -2405,11 +2401,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
        }
 
        /* everthing is clear. we have right of way */
-       migrate_ioapic_irq_desc(desc, &desc->pending_mask);
+       migrate_ioapic_irq_desc(desc, desc->pending_mask);
 
        ret = 0;
        desc->status &= ~IRQ_MOVE_PENDING;
-       cpumask_clear(&desc->pending_mask);
+       cpumask_clear(desc->pending_mask);
 
 unmask:
        unmask_IO_APIC_irq_desc(desc);
@@ -2434,7 +2430,7 @@ static void ir_irq_migration(struct work_struct *work)
                                continue;
                        }
 
-                       desc->chip->set_affinity(irq, &desc->pending_mask);
+                       desc->chip->set_affinity(irq, desc->pending_mask);
                        spin_unlock_irqrestore(&desc->lock, flags);
                }
        }
@@ -2448,7 +2444,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
 {
        if (desc->status & IRQ_LEVEL) {
                desc->status |= IRQ_MOVE_PENDING;
-               cpumask_copy(&desc->pending_mask, mask);
+               cpumask_copy(desc->pending_mask, mask);
                migrate_irq_remapped_level_desc(desc);
                return;
        }
@@ -2516,7 +2512,7 @@ static void irq_complete_move(struct irq_desc **descp)
 
                /* domain has not changed, but affinity did */
                me = smp_processor_id();
-               if (cpu_isset(me, desc->affinity)) {
+               if (cpumask_test_cpu(me, desc->affinity)) {
                        *descp = desc = move_irq_desc(desc, me);
                        /* get the new one */
                        cfg = desc->chip_data;
@@ -2867,19 +2863,15 @@ static inline void __init check_timer(void)
        int cpu = boot_cpu_id;
        int apic1, pin1, apic2, pin2;
        unsigned long flags;
-       unsigned int ver;
        int no_pin1 = 0;
 
        local_irq_save(flags);
 
-       ver = apic_read(APIC_LVR);
-       ver = GET_APIC_VERSION(ver);
-
        /*
         * get/set the timer IRQ vector:
         */
        disable_8259A_irq(0);
-       assign_irq_vector(0, cfg, TARGET_CPUS);
+       assign_irq_vector(0, cfg, apic->target_cpus());
 
        /*
         * As IRQ0 is to be enabled in the 8259A, the virtual
@@ -2893,7 +2885,13 @@ static inline void __init check_timer(void)
        apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
        init_8259A(1);
 #ifdef CONFIG_X86_32
-       timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
+       {
+               unsigned int ver;
+
+               ver = apic_read(APIC_LVR);
+               ver = GET_APIC_VERSION(ver);
+               timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
+       }
 #endif
 
        pin1  = find_isa_irq_pin(0, mp_INT);
@@ -2932,8 +2930,17 @@ static inline void __init check_timer(void)
                if (no_pin1) {
                        add_pin_to_irq_cpu(cfg, cpu, apic1, pin1);
                        setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
+               } else {
+                       /* for edge trigger, setup_IO_APIC_irq already
+                        * leave it unmasked.
+                        * so only need to unmask if it is level-trigger
+                        * do we really have level trigger timer?
+                        */
+                       int idx;
+                       idx = find_irq_entry(apic1, pin1, mp_INT);
+                       if (idx != -1 && irq_trigger(idx))
+                               unmask_IO_APIC_irq_desc(desc);
                }
-               unmask_IO_APIC_irq_desc(desc);
                if (timer_irq_works()) {
                        if (nmi_watchdog == NMI_IO_APIC) {
                                setup_nmi();
@@ -2947,6 +2954,7 @@ static inline void __init check_timer(void)
                if (intr_remapping_enabled)
                        panic("timer doesn't work through Interrupt-remapped IO-APIC");
 #endif
+               local_irq_disable();
                clear_IO_APIC_pin(apic1, pin1);
                if (!no_pin1)
                        apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
@@ -2961,7 +2969,6 @@ static inline void __init check_timer(void)
                 */
                replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2);
                setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
-               unmask_IO_APIC_irq_desc(desc);
                enable_8259A_irq(0);
                if (timer_irq_works()) {
                        apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
@@ -2976,6 +2983,7 @@ static inline void __init check_timer(void)
                /*
                 * Cleanup, just in case ...
                 */
+               local_irq_disable();
                disable_8259A_irq(0);
                clear_IO_APIC_pin(apic2, pin2);
                apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
@@ -3001,6 +3009,7 @@ static inline void __init check_timer(void)
                apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
                goto out;
        }
+       local_irq_disable();
        disable_8259A_irq(0);
        apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
        apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
@@ -3018,6 +3027,7 @@ static inline void __init check_timer(void)
                apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
                goto out;
        }
+       local_irq_disable();
        apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
        panic("IO-APIC + timer doesn't work!  Boot with apic=debug and send a "
                "report.  Then try booting with the 'noapic' option.\n");
@@ -3118,8 +3128,8 @@ static int ioapic_resume(struct sys_device *dev)
 
        spin_lock_irqsave(&ioapic_lock, flags);
        reg_00.raw = io_apic_read(dev->id, 0);
-       if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
-               reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
+       if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
+               reg_00.bits.ID = mp_ioapics[dev->id].apicid;
                io_apic_write(dev->id, 0, reg_00.raw);
        }
        spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -3169,6 +3179,7 @@ static int __init ioapic_init_sysfs(void)
 
 device_initcall(ioapic_init_sysfs);
 
+static int nr_irqs_gsi = NR_IRQS_LEGACY;
 /*
  * Dynamic irq allocate and deallocation
  */
@@ -3183,11 +3194,11 @@ unsigned int create_irq_nr(unsigned int irq_want)
        struct irq_desc *desc_new = NULL;
 
        irq = 0;
-       spin_lock_irqsave(&vector_lock, flags);
-       for (new = irq_want; new < NR_IRQS; new++) {
-               if (platform_legacy_irq(new))
-                       continue;
+       if (irq_want < nr_irqs_gsi)
+               irq_want = nr_irqs_gsi;
 
+       spin_lock_irqsave(&vector_lock, flags);
+       for (new = irq_want; new < nr_irqs; new++) {
                desc_new = irq_to_desc_alloc_cpu(new, cpu);
                if (!desc_new) {
                        printk(KERN_INFO "can not get irq_desc for %d\n", new);
@@ -3197,7 +3208,7 @@ unsigned int create_irq_nr(unsigned int irq_want)
 
                if (cfg_new->vector != 0)
                        continue;
-               if (__assign_irq_vector(new, cfg_new, TARGET_CPUS) == 0)
+               if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0)
                        irq = new;
                break;
        }
@@ -3212,7 +3223,6 @@ unsigned int create_irq_nr(unsigned int irq_want)
        return irq;
 }
 
-static int nr_irqs_gsi = NR_IRQS_LEGACY;
 int create_irq(void)
 {
        unsigned int irq_want;
@@ -3259,12 +3269,15 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
        int err;
        unsigned dest;
 
+       if (disable_apic)
+               return -ENXIO;
+
        cfg = irq_cfg(irq);
-       err = assign_irq_vector(irq, cfg, TARGET_CPUS);
+       err = assign_irq_vector(irq, cfg, apic->target_cpus());
        if (err)
                return err;
 
-       dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
+       dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
 
 #ifdef CONFIG_INTR_REMAP
        if (irq_remapped(irq)) {
@@ -3278,9 +3291,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
                memset (&irte, 0, sizeof(irte));
 
                irte.present = 1;
-               irte.dst_mode = INT_DEST_MODE;
+               irte.dst_mode = apic->irq_dest_mode;
                irte.trigger_mode = 0; /* edge */
-               irte.dlvry_mode = INT_DELIVERY_MODE;
+               irte.dlvry_mode = apic->irq_delivery_mode;
                irte.vector = cfg->vector;
                irte.dest_id = IRTE_DEST(dest);
 
@@ -3298,10 +3311,10 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
                msg->address_hi = MSI_ADDR_BASE_HI;
                msg->address_lo =
                        MSI_ADDR_BASE_LO |
-                       ((INT_DEST_MODE == 0) ?
+                       ((apic->irq_dest_mode == 0) ?
                                MSI_ADDR_DEST_MODE_PHYSICAL:
                                MSI_ADDR_DEST_MODE_LOGICAL) |
-                       ((INT_DELIVERY_MODE != dest_LowestPrio) ?
+                       ((apic->irq_delivery_mode != dest_LowestPrio) ?
                                MSI_ADDR_REDIRECTION_CPU:
                                MSI_ADDR_REDIRECTION_LOWPRI) |
                        MSI_ADDR_DEST_ID(dest);
@@ -3309,7 +3322,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
                msg->data =
                        MSI_DATA_TRIGGER_EDGE |
                        MSI_DATA_LEVEL_ASSERT |
-                       ((INT_DELIVERY_MODE != dest_LowestPrio) ?
+                       ((apic->irq_delivery_mode != dest_LowestPrio) ?
                                MSI_DATA_DELIVERY_FIXED:
                                MSI_DATA_DELIVERY_LOWPRI) |
                        MSI_DATA_VECTOR(cfg->vector);
@@ -3464,40 +3477,6 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
        return 0;
 }
 
-int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc)
-{
-       unsigned int irq;
-       int ret;
-       unsigned int irq_want;
-
-       irq_want = nr_irqs_gsi;
-       irq = create_irq_nr(irq_want);
-       if (irq == 0)
-               return -1;
-
-#ifdef CONFIG_INTR_REMAP
-       if (!intr_remapping_enabled)
-               goto no_ir;
-
-       ret = msi_alloc_irte(dev, irq, 1);
-       if (ret < 0)
-               goto error;
-no_ir:
-#endif
-       ret = setup_msi_irq(dev, msidesc, irq);
-       if (ret < 0) {
-               destroy_irq(irq);
-               return ret;
-       }
-       return 0;
-
-#ifdef CONFIG_INTR_REMAP
-error:
-       destroy_irq(irq);
-       return ret;
-#endif
-}
-
 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 {
        unsigned int irq;
@@ -3514,9 +3493,9 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
        sub_handle = 0;
        list_for_each_entry(msidesc, &dev->msi_list, list) {
                irq = create_irq_nr(irq_want);
-               irq_want++;
                if (irq == 0)
                        return -1;
+               irq_want = irq + 1;
 #ifdef CONFIG_INTR_REMAP
                if (!intr_remapping_enabled)
                        goto no_ir;
@@ -3727,13 +3706,17 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
        struct irq_cfg *cfg;
        int err;
 
+       if (disable_apic)
+               return -ENXIO;
+
        cfg = irq_cfg(irq);
-       err = assign_irq_vector(irq, cfg, TARGET_CPUS);
+       err = assign_irq_vector(irq, cfg, apic->target_cpus());
        if (!err) {
                struct ht_irq_msg msg;
                unsigned dest;
 
-               dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
+               dest = apic->cpu_mask_to_apicid_and(cfg->domain,
+                                                   apic->target_cpus());
 
                msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
 
@@ -3741,11 +3724,11 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
                        HT_IRQ_LOW_BASE |
                        HT_IRQ_LOW_DEST_ID(dest) |
                        HT_IRQ_LOW_VECTOR(cfg->vector) |
-                       ((INT_DEST_MODE == 0) ?
+                       ((apic->irq_dest_mode == 0) ?
                                HT_IRQ_LOW_DM_PHYSICAL :
                                HT_IRQ_LOW_DM_LOGICAL) |
                        HT_IRQ_LOW_RQEOI_EDGE |
-                       ((INT_DELIVERY_MODE != dest_LowestPrio) ?
+                       ((apic->irq_delivery_mode != dest_LowestPrio) ?
                                HT_IRQ_LOW_MT_FIXED :
                                HT_IRQ_LOW_MT_ARBITRATED) |
                        HT_IRQ_LOW_IRQ_MASKED;
@@ -3761,7 +3744,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
 }
 #endif /* CONFIG_HT_IRQ */
 
-#ifdef CONFIG_X86_64
+#ifdef CONFIG_X86_UV
 /*
  * Re-target the irq to the specified CPU and enable the specified MMR located
  * on the specified blade to allow the sending of MSIs to the specified CPU.
@@ -3793,12 +3776,12 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
        BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
 
        entry->vector = cfg->vector;
-       entry->delivery_mode = INT_DELIVERY_MODE;
-       entry->dest_mode = INT_DEST_MODE;
+       entry->delivery_mode = apic->irq_delivery_mode;
+       entry->dest_mode = apic->irq_dest_mode;
        entry->polarity = 0;
        entry->trigger = 0;
        entry->mask = 0;
-       entry->dest = cpu_mask_to_apicid(eligible_cpu);
+       entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
 
        mmr_pnode = uv_blade_to_pnode(mmr_blade);
        uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
@@ -3861,6 +3844,28 @@ void __init probe_nr_irqs_gsi(void)
        printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
 }
 
+#ifdef CONFIG_SPARSE_IRQ
+int __init arch_probe_nr_irqs(void)
+{
+       int nr;
+
+       if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
+               nr_irqs = NR_VECTORS * nr_cpu_ids;
+
+       nr = nr_irqs_gsi + 8 * nr_cpu_ids;
+#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
+       /*
+        * for MSI and HT dyn irq
+        */
+       nr += nr_irqs_gsi * 16;
+#endif
+       if (nr < nr_irqs)
+               nr_irqs = nr;
+
+       return 0;
+}
+#endif
+
 /* --------------------------------------------------------------------------
                           ACPI-based IOAPIC Configuration
    -------------------------------------------------------------------------- */
@@ -3886,7 +3891,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
         */
 
        if (physids_empty(apic_id_map))
-               apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
+               apic_id_map = apic->ioapic_phys_id_map(phys_cpu_present_map);
 
        spin_lock_irqsave(&ioapic_lock, flags);
        reg_00.raw = io_apic_read(ioapic, 0);
@@ -3902,10 +3907,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
         * Every APIC in a system must have a unique ID or we get lots of nice
         * 'stuck on smp_invalidate_needed IPI wait' messages.
         */
-       if (check_apicid_used(apic_id_map, apic_id)) {
+       if (apic->check_apicid_used(apic_id_map, apic_id)) {
 
                for (i = 0; i < get_physical_broadcast(); i++) {
-                       if (!check_apicid_used(apic_id_map, i))
+                       if (!apic->check_apicid_used(apic_id_map, i))
                                break;
                }
 
@@ -3918,7 +3923,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
                apic_id = i;
        }
 
-       tmp = apicid_to_cpu_present(apic_id);
+       tmp = apic->apicid_to_cpu_present(apic_id);
        physids_or(apic_id_map, apic_id_map, tmp);
 
        if (reg_00.bits.ID != apic_id) {
@@ -3995,8 +4000,8 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
                return -1;
 
        for (i = 0; i < mp_irq_entries; i++)
-               if (mp_irqs[i].mp_irqtype == mp_INT &&
-                   mp_irqs[i].mp_srcbusirq == bus_irq)
+               if (mp_irqs[i].irqtype == mp_INT &&
+                   mp_irqs[i].srcbusirq == bus_irq)
                        break;
        if (i >= mp_irq_entries)
                return -1;
@@ -4011,7 +4016,7 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
 /*
  * This function currently is only a helper for the i386 smp boot process where
  * we need to reprogram the ioredtbls to cater for the cpus which have come online
- * so mask in all cases should simply be TARGET_CPUS
+ * so mask in all cases should simply be apic->target_cpus()
  */
 #ifdef CONFIG_SMP
 void __init setup_ioapic_dest(void)
@@ -4050,9 +4055,9 @@ void __init setup_ioapic_dest(void)
                         */
                        if (desc->status &
                            (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
-                               mask = &desc->affinity;
+                               mask = desc->affinity;
                        else
-                               mask = TARGET_CPUS;
+                               mask = apic->target_cpus();
 
 #ifdef CONFIG_INTR_REMAP
                        if (intr_remapping_enabled)
@@ -4111,7 +4116,7 @@ void __init ioapic_init_mappings(void)
        ioapic_res = ioapic_setup_resources();
        for (i = 0; i < nr_ioapics; i++) {
                if (smp_found_config) {
-                       ioapic_phys = mp_ioapics[i].mp_apicaddr;
+                       ioapic_phys = mp_ioapics[i].apicaddr;
 #ifdef CONFIG_X86_32
                        if (!ioapic_phys) {
                                printk(KERN_ERR
index b12208f..e41980a 100644 (file)
@@ -131,9 +131,8 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
 }
 
 #ifdef CONFIG_X86_32
-asmlinkage long sys_iopl(unsigned long regsp)
+long sys_iopl(struct pt_regs *regs)
 {
-       struct pt_regs *regs = (struct pt_regs *)&regsp;
        unsigned int level = regs->bx;
        struct thread_struct *t = &current->thread;
        int rc;
index 285bbf8..dbf5445 100644 (file)
 #include <asm/mmu_context.h>
 #include <asm/apic.h>
 #include <asm/proto.h>
+#include <asm/ipi.h>
 
-#ifdef CONFIG_X86_32
-#include <mach_apic.h>
-#include <mach_ipi.h>
-
-/*
- * the following functions deal with sending IPIs between CPUs.
- *
- * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
- */
-
-static inline int __prepare_ICR(unsigned int shortcut, int vector)
+void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
 {
-       unsigned int icr = shortcut | APIC_DEST_LOGICAL;
-
-       switch (vector) {
-       default:
-               icr |= APIC_DM_FIXED | vector;
-               break;
-       case NMI_VECTOR:
-               icr |= APIC_DM_NMI;
-               break;
+       unsigned long query_cpu;
+       unsigned long flags;
+
+       /*
+        * Hack. The clustered APIC addressing mode doesn't allow us to send
+        * to an arbitrary mask, so I do a unicast to each CPU instead.
+        * - mbligh
+        */
+       local_irq_save(flags);
+       for_each_cpu(query_cpu, mask) {
+               __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
+                               query_cpu), vector, APIC_DEST_PHYSICAL);
        }
-       return icr;
+       local_irq_restore(flags);
 }
 
-static inline int __prepare_ICR2(unsigned int mask)
+void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
+                                                int vector)
 {
-       return SET_APIC_DEST_FIELD(mask);
-}
+       unsigned int this_cpu = smp_processor_id();
+       unsigned int query_cpu;
+       unsigned long flags;
 
-void __send_IPI_shortcut(unsigned int shortcut, int vector)
-{
-       /*
-        * Subtle. In the case of the 'never do double writes' workaround
-        * we have to lock out interrupts to be safe.  As we don't care
-        * of the value read we use an atomic rmw access to avoid costly
-        * cli/sti.  Otherwise we use an even cheaper single atomic write
-        * to the APIC.
-        */
-       unsigned int cfg;
+       /* See Hack comment above */
 
-       /*
-        * Wait for idle.
-        */
-       apic_wait_icr_idle();
+       local_irq_save(flags);
+       for_each_cpu(query_cpu, mask) {
+               if (query_cpu == this_cpu)
+                       continue;
+               __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
+                                query_cpu), vector, APIC_DEST_PHYSICAL);
+       }
+       local_irq_restore(flags);
+}
 
-       /*
-        * No need to touch the target chip field
-        */
-       cfg = __prepare_ICR(shortcut, vector);
+void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
+                                                int vector)
+{
+       unsigned long flags;
+       unsigned int query_cpu;
 
        /*
-        * Send the IPI. The write to APIC_ICR fires this off.
+        * Hack. The clustered APIC addressing mode doesn't allow us to send
+        * to an arbitrary mask, so I do a unicasts to each CPU instead. This
+        * should be modified to do 1 message per cluster ID - mbligh
         */
-       apic_write(APIC_ICR, cfg);
-}
 
-void send_IPI_self(int vector)
-{
-       __send_IPI_shortcut(APIC_DEST_SELF, vector);
+       local_irq_save(flags);
+       for_each_cpu(query_cpu, mask)
+               __default_send_IPI_dest_field(
+                       apic->cpu_to_logical_apicid(query_cpu), vector,
+                       apic->dest_logical);
+       local_irq_restore(flags);
 }
 
-/*
- * This is used to send an IPI with no shorthand notation (the destination is
- * specified in bits 56 to 63 of the ICR).
- */
-static inline void __send_IPI_dest_field(unsigned long mask, int vector)
+void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
+                                                int vector)
 {
-       unsigned long cfg;
-
-       /*
-        * Wait for idle.
-        */
-       if (unlikely(vector == NMI_VECTOR))
-               safe_apic_wait_icr_idle();
-       else
-               apic_wait_icr_idle();
-
-       /*
-        * prepare target chip field
-        */
-       cfg = __prepare_ICR2(mask);
-       apic_write(APIC_ICR2, cfg);
+       unsigned long flags;
+       unsigned int query_cpu;
+       unsigned int this_cpu = smp_processor_id();
 
-       /*
-        * program the ICR
-        */
-       cfg = __prepare_ICR(0, vector);
+       /* See Hack comment above */
 
-       /*
-        * Send the IPI. The write to APIC_ICR fires this off.
-        */
-       apic_write(APIC_ICR, cfg);
+       local_irq_save(flags);
+       for_each_cpu(query_cpu, mask) {
+               if (query_cpu == this_cpu)
+                       continue;
+               __default_send_IPI_dest_field(
+                       apic->cpu_to_logical_apicid(query_cpu), vector,
+                       apic->dest_logical);
+               }
+       local_irq_restore(flags);
 }
 
+#ifdef CONFIG_X86_32
+
 /*
  * This is only used on smaller machines.
  */
-void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector)
+void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
 {
        unsigned long mask = cpumask_bits(cpumask)[0];
        unsigned long flags;
 
        local_irq_save(flags);
        WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
-       __send_IPI_dest_field(mask, vector);
+       __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
        local_irq_restore(flags);
 }
 
-void send_IPI_mask_sequence(const struct cpumask *mask, int vector)
+void default_send_IPI_allbutself(int vector)
 {
-       unsigned long flags;
-       unsigned int query_cpu;
-
        /*
-        * Hack. The clustered APIC addressing mode doesn't allow us to send
-        * to an arbitrary mask, so I do a unicasts to each CPU instead. This
-        * should be modified to do 1 message per cluster ID - mbligh
+        * if there are no other CPUs in the system then we get an APIC send
+        * error if we try to broadcast, thus avoid sending IPIs in this case.
         */
+       if (!(num_online_cpus() > 1))
+               return;
 
-       local_irq_save(flags);
-       for_each_cpu(query_cpu, mask)
-               __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector);
-       local_irq_restore(flags);
+       __default_local_send_IPI_allbutself(vector);
 }
 
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
+void default_send_IPI_all(int vector)
 {
-       unsigned long flags;
-       unsigned int query_cpu;
-       unsigned int this_cpu = smp_processor_id();
-
-       /* See Hack comment above */
+       __default_local_send_IPI_all(vector);
+}
 
-       local_irq_save(flags);
-       for_each_cpu(query_cpu, mask)
-               if (query_cpu != this_cpu)
-                       __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
-                                             vector);
-       local_irq_restore(flags);
+void default_send_IPI_self(int vector)
+{
+       __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);
 }
 
 /* must come after the send_IPI functions above for inlining */
index 3973e2d..f13ca16 100644 (file)
@@ -6,10 +6,12 @@
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
 #include <linux/smp.h>
+#include <linux/ftrace.h>
 
 #include <asm/apic.h>
 #include <asm/io_apic.h>
 #include <asm/irq.h>
+#include <asm/idle.h>
 
 atomic_t irq_err_count;
 
@@ -36,11 +38,7 @@ void ack_bad_irq(unsigned int irq)
 #endif
 }
 
-#ifdef CONFIG_X86_32
-# define irq_stats(x)          (&per_cpu(irq_stat, x))
-#else
-# define irq_stats(x)          cpu_pda(x)
-#endif
+#define irq_stats(x)           (&per_cpu(irq_stat, x))
 /*
  * /proc/interrupts printing:
  */
@@ -192,4 +190,40 @@ u64 arch_irq_stat(void)
        return sum;
 }
 
+
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
+{
+       struct pt_regs *old_regs = set_irq_regs(regs);
+
+       /* high bit used in ret_from_ code  */
+       unsigned vector = ~regs->orig_ax;
+       unsigned irq;
+
+       exit_idle();
+       irq_enter();
+
+       irq = __get_cpu_var(vector_irq)[vector];
+
+       if (!handle_irq(irq, regs)) {
+#ifdef CONFIG_X86_64
+               if (!disable_apic)
+                       ack_APIC_irq();
+#endif
+
+               if (printk_ratelimit())
+                       printk(KERN_EMERG "%s: %d.%d No irq handler for vector (irq %d)\n",
+                              __func__, smp_processor_id(), vector, irq);
+       }
+
+       irq_exit();
+
+       set_irq_regs(old_regs);
+       return 1;
+}
+
 EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
index 74b9ff7..4beb9a1 100644 (file)
@@ -191,33 +191,16 @@ static inline int
 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
 #endif
 
-/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- */
-unsigned int do_IRQ(struct pt_regs *regs)
+bool handle_irq(unsigned irq, struct pt_regs *regs)
 {
-       struct pt_regs *old_regs;
-       /* high bit used in ret_from_ code */
-       int overflow;
-       unsigned vector = ~regs->orig_ax;
        struct irq_desc *desc;
-       unsigned irq;
-
-
-       old_regs = set_irq_regs(regs);
-       irq_enter();
-       irq = __get_cpu_var(vector_irq)[vector];
+       int overflow;
 
        overflow = check_stack_overflow();
 
        desc = irq_to_desc(irq);
-       if (unlikely(!desc)) {
-               printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x cpu %d\n",
-                                       __func__, irq, vector, smp_processor_id());
-               BUG();
-       }
+       if (unlikely(!desc))
+               return false;
 
        if (!execute_on_irq_stack(overflow, desc, irq)) {
                if (unlikely(overflow))
@@ -225,13 +208,11 @@ unsigned int do_IRQ(struct pt_regs *regs)
                desc->handle_irq(irq, desc);
        }
 
-       irq_exit();
-       set_irq_regs(old_regs);
-       return 1;
+       return true;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-#include <mach_apic.h>
+#include <asm/genapic.h>
 
 /* A cpu has been removed from cpu_online_mask.  Reset irq affinities. */
 void fixup_irqs(void)
@@ -248,7 +229,7 @@ void fixup_irqs(void)
                if (irq == 2)
                        continue;
 
-               affinity = &desc->affinity;
+               affinity = desc->affinity;
                if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
                        printk("Breaking affinity for irq %i\n", irq);
                        affinity = cpu_all_mask;
index 63c88e6..977d8b4 100644 (file)
 #include <linux/smp.h>
 #include <asm/io_apic.h>
 #include <asm/idle.h>
+#include <asm/apic.h>
+
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+EXPORT_PER_CPU_SYMBOL(irq_stat);
+
+DEFINE_PER_CPU(struct pt_regs *, irq_regs);
+EXPORT_PER_CPU_SYMBOL(irq_regs);
 
 /*
  * Probabilistic stack overflow check:
@@ -41,42 +48,18 @@ static inline void stack_overflow_check(struct pt_regs *regs)
 #endif
 }
 
-/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- */
-asmlinkage unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
+bool handle_irq(unsigned irq, struct pt_regs *regs)
 {
-       struct pt_regs *old_regs = set_irq_regs(regs);
        struct irq_desc *desc;
 
-       /* high bit used in ret_from_ code  */
-       unsigned vector = ~regs->orig_ax;
-       unsigned irq;
-
-       exit_idle();
-       irq_enter();
-       irq = __get_cpu_var(vector_irq)[vector];
-
        stack_overflow_check(regs);
 
        desc = irq_to_desc(irq);
-       if (likely(desc))
-               generic_handle_irq_desc(irq, desc);
-       else {
-               if (!disable_apic)
-                       ack_APIC_irq();
-
-               if (printk_ratelimit())
-                       printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
-                               __func__, smp_processor_id(), vector);
-       }
-
-       irq_exit();
+       if (unlikely(!desc))
+               return false;
 
-       set_irq_regs(old_regs);
-       return 1;
+       generic_handle_irq_desc(irq, desc);
+       return true;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -100,7 +83,7 @@ void fixup_irqs(void)
                /* interrupt's are disabled at this point */
                spin_lock(&desc->lock);
 
-               affinity = &desc->affinity;
+               affinity = desc->affinity;
                if (!irq_has_action(irq) ||
                    cpumask_equal(affinity, cpu_online_mask)) {
                        spin_unlock(&desc->lock);
index 10a09c2..bf629ca 100644 (file)
@@ -78,6 +78,15 @@ void __init init_ISA_irqs(void)
        }
 }
 
+/*
+ * IRQ2 is cascade interrupt to second interrupt controller
+ */
+static struct irqaction irq2 = {
+       .handler = no_action,
+       .mask = CPU_MASK_NONE,
+       .name = "cascade",
+};
+
 DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
        [0 ... IRQ0_VECTOR - 1] = -1,
        [IRQ0_VECTOR] = 0,
@@ -140,8 +149,15 @@ void __init native_init_IRQ(void)
         */
        alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
 
-       /* IPI for invalidation */
-       alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
+       /* IPIs for invalidation */
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
 
        /* IPI for generic function call */
        alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
@@ -169,6 +185,9 @@ void __init native_init_IRQ(void)
        alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
 #endif
 
+       if (!acpi_ioapic)
+               setup_irq(2, &irq2);
+
        /* setup after call gates are initialised (usually add in
         * the architecture specific gates)
         */
index 10435a1..5c4f554 100644 (file)
@@ -46,7 +46,7 @@
 #include <asm/apicdef.h>
 #include <asm/system.h>
 
-#include <mach_ipi.h>
+#include <asm/genapic.h>
 
 /*
  * Put the error code here just in case the user cares:
@@ -347,7 +347,7 @@ void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
  */
 void kgdb_roundup_cpus(unsigned long flags)
 {
-       send_IPI_allbutself(APIC_DM_NMI);
+       apic->send_IPI_allbutself(APIC_DM_NMI);
 }
 #endif
 
index c43caa3..6993d51 100644 (file)
 #include <asm/mmu_context.h>
 #include <asm/io.h>
 
-#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
-static u64 kexec_pgd[512] PAGE_ALIGNED;
-static u64 kexec_pud0[512] PAGE_ALIGNED;
-static u64 kexec_pmd0[512] PAGE_ALIGNED;
-static u64 kexec_pte0[512] PAGE_ALIGNED;
-static u64 kexec_pud1[512] PAGE_ALIGNED;
-static u64 kexec_pmd1[512] PAGE_ALIGNED;
-static u64 kexec_pte1[512] PAGE_ALIGNED;
-
 static void init_level2_page(pmd_t *level2p, unsigned long addr)
 {
        unsigned long end_addr;
@@ -107,12 +98,65 @@ out:
        return result;
 }
 
+static void free_transition_pgtable(struct kimage *image)
+{
+       free_page((unsigned long)image->arch.pud);
+       free_page((unsigned long)image->arch.pmd);
+       free_page((unsigned long)image->arch.pte);
+}
+
+static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
+{
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+       unsigned long vaddr, paddr;
+       int result = -ENOMEM;
+
+       vaddr = (unsigned long)relocate_kernel;
+       paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
+       pgd += pgd_index(vaddr);
+       if (!pgd_present(*pgd)) {
+               pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
+               if (!pud)
+                       goto err;
+               image->arch.pud = pud;
+               set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
+       }
+       pud = pud_offset(pgd, vaddr);
+       if (!pud_present(*pud)) {
+               pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
+               if (!pmd)
+                       goto err;
+               image->arch.pmd = pmd;
+               set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
+       }
+       pmd = pmd_offset(pud, vaddr);
+       if (!pmd_present(*pmd)) {
+               pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
+               if (!pte)
+                       goto err;
+               image->arch.pte = pte;
+               set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
+       }
+       pte = pte_offset_kernel(pmd, vaddr);
+       set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
+       return 0;
+err:
+       free_transition_pgtable(image);
+       return result;
+}
+
 
 static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
 {
        pgd_t *level4p;
+       int result;
        level4p = (pgd_t *)__va(start_pgtable);
-       return init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
+       result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
+       if (result)
+               return result;
+       return init_transition_pgtable(image, level4p);
 }
 
 static void set_idt(void *newidt, u16 limit)
@@ -174,7 +218,7 @@ int machine_kexec_prepare(struct kimage *image)
 
 void machine_kexec_cleanup(struct kimage *image)
 {
-       return;
+       free_transition_pgtable(image);
 }
 
 /*
@@ -195,22 +239,6 @@ void machine_kexec(struct kimage *image)
        memcpy(control_page, relocate_kernel, PAGE_SIZE);
 
        page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
-       page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
-       page_list[PA_PGD] = virt_to_phys(&kexec_pgd);
-       page_list[VA_PGD] = (unsigned long)kexec_pgd;
-       page_list[PA_PUD_0] = virt_to_phys(&kexec_pud0);
-       page_list[VA_PUD_0] = (unsigned long)kexec_pud0;
-       page_list[PA_PMD_0] = virt_to_phys(&kexec_pmd0);
-       page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
-       page_list[PA_PTE_0] = virt_to_phys(&kexec_pte0);
-       page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
-       page_list[PA_PUD_1] = virt_to_phys(&kexec_pud1);
-       page_list[VA_PUD_1] = (unsigned long)kexec_pud1;
-       page_list[PA_PMD_1] = virt_to_phys(&kexec_pmd1);
-       page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
-       page_list[PA_PTE_1] = virt_to_phys(&kexec_pte1);
-       page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
-
        page_list[PA_TABLE_PAGE] =
          (unsigned long)__pa(page_address(image->control_code_page));
 
index b7f4c92..5e9f4fc 100644 (file)
@@ -87,9 +87,9 @@
 #include <linux/cpu.h>
 #include <linux/firmware.h>
 #include <linux/platform_device.h>
+#include <linux/uaccess.h>
 
 #include <asm/msr.h>
-#include <asm/uaccess.h>
 #include <asm/processor.h>
 #include <asm/microcode.h>
 
@@ -196,7 +196,7 @@ static inline int update_match_cpu(struct cpu_signature *csig, int sig, int pf)
        return (!sigmatch(sig, csig->sig, pf, csig->pf)) ? 0 : 1;
 }
 
-static inline int 
+static inline int
 update_match_revision(struct microcode_header_intel *mc_header,        int rev)
 {
        return (mc_header->rev <= rev) ? 0 : 1;
@@ -442,8 +442,8 @@ static int request_microcode_fw(int cpu, struct device *device)
                return ret;
        }
 
-       ret = generic_load_microcode(cpu, (void*)firmware->data, firmware->size,
-                       &get_ucode_fw);
+       ret = generic_load_microcode(cpu, (void *)firmware->data,
+                                    firmware->size, &get_ucode_fw);
 
        release_firmware(firmware);
 
@@ -460,7 +460,7 @@ static int request_microcode_user(int cpu, const void __user *buf, size_t size)
        /* We should bind the task to the CPU */
        BUG_ON(cpu != raw_smp_processor_id());
 
-       return generic_load_microcode(cpu, (void*)buf, size, &get_ucode_user);
+       return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
 }
 
 static void microcode_fini_cpu(int cpu)
index 3db0a54..0edd819 100644 (file)
@@ -42,7 +42,7 @@ void module_free(struct module *mod, void *module_region)
 {
        vfree(module_region);
        /* FIXME: If module_region == mod->init_region, trim exception
-           table entries. */
+          table entries. */
 }
 
 /* We don't need anything special. */
@@ -113,13 +113,13 @@ int module_finalize(const Elf_Ehdr *hdr,
                *para = NULL;
        char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 
-       for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 
+       for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
                if (!strcmp(".text", secstrings + s->sh_name))
                        text = s;
                if (!strcmp(".altinstructions", secstrings + s->sh_name))
                        alt = s;
                if (!strcmp(".smp_locks", secstrings + s->sh_name))
-                       locks= s;
+                       locks = s;
                if (!strcmp(".parainstructions", secstrings + s->sh_name))
                        para = s;
        }
index 6ba8783..c23880b 100644 (file)
 #include <asm/page.h>
 #include <asm/pgtable.h>
 
-#define DEBUGP(fmt...) 
+#define DEBUGP(fmt...)
 
 #ifndef CONFIG_UML
 void module_free(struct module *mod, void *module_region)
 {
        vfree(module_region);
        /* FIXME: If module_region == mod->init_region, trim exception
-           table entries. */
+          table entries. */
 }
 
 void *module_alloc(unsigned long size)
@@ -77,7 +77,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
        Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
        Elf64_Sym *sym;
        void *loc;
-       u64 val; 
+       u64 val;
 
        DEBUGP("Applying relocate section %u to %u\n", relsec,
               sechdrs[relsec].sh_info);
@@ -91,11 +91,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
                        + ELF64_R_SYM(rel[i].r_info);
 
-               DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
-                      (int)ELF64_R_TYPE(rel[i].r_info), 
-                      sym->st_value, rel[i].r_addend, (u64)loc);
+               DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
+                       (int)ELF64_R_TYPE(rel[i].r_info),
+                       sym->st_value, rel[i].r_addend, (u64)loc);
 
-               val = sym->st_value + rel[i].r_addend; 
+               val = sym->st_value + rel[i].r_addend;
 
                switch (ELF64_R_TYPE(rel[i].r_info)) {
                case R_X86_64_NONE:
@@ -113,16 +113,16 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                        if ((s64)val != *(s32 *)loc)
                                goto overflow;
                        break;
-               case R_X86_64_PC32: 
+               case R_X86_64_PC32:
                        val -= (u64)loc;
                        *(u32 *)loc = val;
 #if 0
                        if ((s64)val != *(s32 *)loc)
-                               goto overflow; 
+                               goto overflow;
 #endif
                        break;
                default:
-                       printk(KERN_ERR "module %s: Unknown rela relocation: %Lu\n",
+                       printk(KERN_ERR "module %s: Unknown rela relocation: %llu\n",
                               me->name, ELF64_R_TYPE(rel[i].r_info));
                        return -ENOEXEC;
                }
@@ -130,7 +130,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
        return 0;
 
 overflow:
-       printk(KERN_ERR "overflow in relocation type %d val %Lx\n", 
+       printk(KERN_ERR "overflow in relocation type %d val %Lx\n",
               (int)ELF64_R_TYPE(rel[i].r_info), val);
        printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n",
               me->name);
@@ -143,13 +143,13 @@ int apply_relocate(Elf_Shdr *sechdrs,
                   unsigned int relsec,
                   struct module *me)
 {
-       printk("non add relocation not supported\n");
+       printk(KERN_ERR "non add relocation not supported\n");
        return -ENOSYS;
-} 
+}
 
 int module_finalize(const Elf_Ehdr *hdr,
-                    const Elf_Shdr *sechdrs,
-                    struct module *me)
+                   const Elf_Shdr *sechdrs,
+                   struct module *me)
 {
        const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
                *para = NULL;
@@ -161,7 +161,7 @@ int module_finalize(const Elf_Ehdr *hdr,
                if (!strcmp(".altinstructions", secstrings + s->sh_name))
                        alt = s;
                if (!strcmp(".smp_locks", secstrings + s->sh_name))
-                       locks= s;
+                       locks = s;
                if (!strcmp(".parainstructions", secstrings + s->sh_name))
                        para = s;
        }
index a649a4c..2007644 100644 (file)
@@ -3,7 +3,7 @@
  *     compliant MP-table parsing routines.
  *
  *     (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
- *     (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ *     (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
  *      (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
  */
 
 #include <asm/setup.h>
 #include <asm/smp.h>
 
-#include <mach_apic.h>
-#ifdef CONFIG_X86_32
-#include <mach_apicdef.h>
-#include <mach_mpparse.h>
-#endif
-
+#include <asm/genapic.h>
 /*
  * Checksum an MP configuration block.
  */
@@ -144,11 +139,11 @@ static void __init MP_ioapic_info(struct mpc_ioapic *m)
        if (bad_ioapic(m->apicaddr))
                return;
 
-       mp_ioapics[nr_ioapics].mp_apicaddr = m->apicaddr;
-       mp_ioapics[nr_ioapics].mp_apicid = m->apicid;
-       mp_ioapics[nr_ioapics].mp_type = m->type;
-       mp_ioapics[nr_ioapics].mp_apicver = m->apicver;
-       mp_ioapics[nr_ioapics].mp_flags = m->flags;
+       mp_ioapics[nr_ioapics].apicaddr = m->apicaddr;
+       mp_ioapics[nr_ioapics].apicid = m->apicid;
+       mp_ioapics[nr_ioapics].type = m->type;
+       mp_ioapics[nr_ioapics].apicver = m->apicver;
+       mp_ioapics[nr_ioapics].flags = m->flags;
        nr_ioapics++;
 }
 
@@ -160,55 +155,55 @@ static void print_MP_intsrc_info(struct mpc_intsrc *m)
                m->srcbusirq, m->dstapic, m->dstirq);
 }
 
-static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq)
+static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
 {
        apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
                " IRQ %02x, APIC ID %x, APIC INT %02x\n",
-               mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3,
-               (mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus,
-               mp_irq->mp_srcbusirq, mp_irq->mp_dstapic, mp_irq->mp_dstirq);
+               mp_irq->irqtype, mp_irq->irqflag & 3,
+               (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
+               mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
 }
 
 static void __init assign_to_mp_irq(struct mpc_intsrc *m,
-                                   struct mp_config_intsrc *mp_irq)
+                                   struct mpc_intsrc *mp_irq)
 {
-       mp_irq->mp_dstapic = m->dstapic;
-       mp_irq->mp_type = m->type;
-       mp_irq->mp_irqtype = m->irqtype;
-       mp_irq->mp_irqflag = m->irqflag;
-       mp_irq->mp_srcbus = m->srcbus;
-       mp_irq->mp_srcbusirq = m->srcbusirq;
-       mp_irq->mp_dstirq = m->dstirq;
+       mp_irq->dstapic = m->dstapic;
+       mp_irq->type = m->type;
+       mp_irq->irqtype = m->irqtype;
+       mp_irq->irqflag = m->irqflag;
+       mp_irq->srcbus = m->srcbus;
+       mp_irq->srcbusirq = m->srcbusirq;
+       mp_irq->dstirq = m->dstirq;
 }
 
-static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq,
+static void __init assign_to_mpc_intsrc(struct mpc_intsrc *mp_irq,
                                        struct mpc_intsrc *m)
 {
-       m->dstapic = mp_irq->mp_dstapic;
-       m->type = mp_irq->mp_type;
-       m->irqtype = mp_irq->mp_irqtype;
-       m->irqflag = mp_irq->mp_irqflag;
-       m->srcbus = mp_irq->mp_srcbus;
-       m->srcbusirq = mp_irq->mp_srcbusirq;
-       m->dstirq = mp_irq->mp_dstirq;
+       m->dstapic = mp_irq->dstapic;
+       m->type = mp_irq->type;
+       m->irqtype = mp_irq->irqtype;
+       m->irqflag = mp_irq->irqflag;
+       m->srcbus = mp_irq->srcbus;
+       m->srcbusirq = mp_irq->srcbusirq;
+       m->dstirq = mp_irq->dstirq;
 }
 
-static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq,
+static int __init mp_irq_mpc_intsrc_cmp(struct mpc_intsrc *mp_irq,
                                        struct mpc_intsrc *m)
 {
-       if (mp_irq->mp_dstapic != m->dstapic)
+       if (mp_irq->dstapic != m->dstapic)
                return 1;
-       if (mp_irq->mp_type != m->type)
+       if (mp_irq->type != m->type)
                return 2;
-       if (mp_irq->mp_irqtype != m->irqtype)
+       if (mp_irq->irqtype != m->irqtype)
                return 3;
-       if (mp_irq->mp_irqflag != m->irqflag)
+       if (mp_irq->irqflag != m->irqflag)
                return 4;
-       if (mp_irq->mp_srcbus != m->srcbus)
+       if (mp_irq->srcbus != m->srcbus)
                return 5;
-       if (mp_irq->mp_srcbusirq != m->srcbusirq)
+       if (mp_irq->srcbusirq != m->srcbusirq)
                return 6;
-       if (mp_irq->mp_dstirq != m->dstirq)
+       if (mp_irq->dstirq != m->dstirq)
                return 7;
 
        return 0;
@@ -292,16 +287,7 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
                return 0;
 
 #ifdef CONFIG_X86_32
-       /*
-        * need to make sure summit and es7000's mps_oem_check is safe to be
-        * called early via genericarch 's mps_oem_check
-        */
-       if (early) {
-#ifdef CONFIG_X86_NUMAQ
-               numaq_mps_oem_check(mpc, oem, str);
-#endif
-       } else
-               mps_oem_check(mpc, oem, str);
+       generic_mps_oem_check(mpc, oem, str);
 #endif
        /* save the local APIC address, it might be non-default */
        if (!acpi_lapic)
@@ -386,13 +372,13 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
                        (*x86_quirks->mpc_record)++;
        }
 
-#ifdef CONFIG_X86_GENERICARCH
-       generic_bigsmp_probe();
+#ifdef CONFIG_X86_BIGSMP
+       generic_bigsmp_probe();
 #endif
 
-#ifdef CONFIG_X86_32
-       setup_apic_routing();
-#endif
+       if (apic->setup_apic_routing)
+               apic->setup_apic_routing();
+
        if (!num_processors)
                printk(KERN_ERR "MPTABLE: no processors registered!\n");
        return num_processors;
@@ -417,7 +403,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
        intsrc.type = MP_INTSRC;
        intsrc.irqflag = 0;     /* conforming */
        intsrc.srcbus = 0;
-       intsrc.dstapic = mp_ioapics[0].mp_apicid;
+       intsrc.dstapic = mp_ioapics[0].apicid;
 
        intsrc.irqtype = mp_INT;
 
@@ -570,14 +556,14 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
        }
 }
 
-static struct intel_mp_floating *mpf_found;
+static struct mpf_intel *mpf_found;
 
 /*
  * Scan the memory blocks for an SMP configuration block.
  */
 static void __init __get_smp_config(unsigned int early)
 {
-       struct intel_mp_floating *mpf = mpf_found;
+       struct mpf_intel *mpf = mpf_found;
 
        if (!mpf)
                return;
@@ -598,9 +584,9 @@ static void __init __get_smp_config(unsigned int early)
        }
 
        printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
-              mpf->mpf_specification);
+              mpf->specification);
 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
-       if (mpf->mpf_feature2 & (1 << 7)) {
+       if (mpf->feature2 & (1 << 7)) {
                printk(KERN_INFO "    IMCR and PIC compatibility mode.\n");
                pic_mode = 1;
        } else {
@@ -611,7 +597,7 @@ static void __init __get_smp_config(unsigned int early)
        /*
         * Now see if we need to read further.
         */
-       if (mpf->mpf_feature1 != 0) {
+       if (mpf->feature1 != 0) {
                if (early) {
                        /*
                         * local APIC has default address
@@ -621,16 +607,16 @@ static void __init __get_smp_config(unsigned int early)
                }
 
                printk(KERN_INFO "Default MP configuration #%d\n",
-                      mpf->mpf_feature1);
-               construct_default_ISA_mptable(mpf->mpf_feature1);
+                      mpf->feature1);
+               construct_default_ISA_mptable(mpf->feature1);
 
-       } else if (mpf->mpf_physptr) {
+       } else if (mpf->physptr) {
 
                /*
                 * Read the physical hardware table.  Anything here will
                 * override the defaults.
                 */
-               if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) {
+               if (!smp_read_mpc(phys_to_virt(mpf->physptr), early)) {
 #ifdef CONFIG_X86_LOCAL_APIC
                        smp_found_config = 0;
 #endif
@@ -688,32 +674,32 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
                                  unsigned reserve)
 {
        unsigned int *bp = phys_to_virt(base);
-       struct intel_mp_floating *mpf;
+       struct mpf_intel *mpf;
 
        apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
                        bp, length);
        BUILD_BUG_ON(sizeof(*mpf) != 16);
 
        while (length > 0) {
-               mpf = (struct intel_mp_floating *)bp;
+               mpf = (struct mpf_intel *)bp;
                if ((*bp == SMP_MAGIC_IDENT) &&
-                   (mpf->mpf_length == 1) &&
+                   (mpf->length == 1) &&
                    !mpf_checksum((unsigned char *)bp, 16) &&
-                   ((mpf->mpf_specification == 1)
-                    || (mpf->mpf_specification == 4))) {
+                   ((mpf->specification == 1)
+                    || (mpf->specification == 4))) {
 #ifdef CONFIG_X86_LOCAL_APIC
                        smp_found_config = 1;
 #endif
                        mpf_found = mpf;
 
-                       printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n",
-                              mpf, virt_to_phys(mpf));
+                       printk(KERN_INFO "found SMP MP-table at [%p] %llx\n",
+                              mpf, (u64)virt_to_phys(mpf));
 
                        if (!reserve)
                                return 1;
                        reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE,
                                        BOOTMEM_DEFAULT);
-                       if (mpf->mpf_physptr) {
+                       if (mpf->physptr) {
                                unsigned long size = PAGE_SIZE;
 #ifdef CONFIG_X86_32
                                /*
@@ -722,14 +708,14 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
                                 * the bottom is mapped now.
                                 * PC-9800's MPC table places on the very last
                                 * of physical memory; so that simply reserving
-                                * PAGE_SIZE from mpg->mpf_physptr yields BUG()
+                                * PAGE_SIZE from mpf->physptr yields BUG()
                                 * in reserve_bootmem.
                                 */
                                unsigned long end = max_low_pfn * PAGE_SIZE;
-                               if (mpf->mpf_physptr + size > end)
-                                       size = end - mpf->mpf_physptr;
+                               if (mpf->physptr + size > end)
+                                       size = end - mpf->physptr;
 #endif
-                               reserve_bootmem_generic(mpf->mpf_physptr, size,
+                               reserve_bootmem_generic(mpf->physptr, size,
                                                BOOTMEM_DEFAULT);
                        }
 
@@ -809,15 +795,15 @@ static int  __init get_MP_intsrc_index(struct mpc_intsrc *m)
        /* not legacy */
 
        for (i = 0; i < mp_irq_entries; i++) {
-               if (mp_irqs[i].mp_irqtype != mp_INT)
+               if (mp_irqs[i].irqtype != mp_INT)
                        continue;
 
-               if (mp_irqs[i].mp_irqflag != 0x0f)
+               if (mp_irqs[i].irqflag != 0x0f)
                        continue;
 
-               if (mp_irqs[i].mp_srcbus != m->srcbus)
+               if (mp_irqs[i].srcbus != m->srcbus)
                        continue;
-               if (mp_irqs[i].mp_srcbusirq != m->srcbusirq)
+               if (mp_irqs[i].srcbusirq != m->srcbusirq)
                        continue;
                if (irq_used[i]) {
                        /* already claimed */
@@ -922,10 +908,10 @@ static int  __init replace_intsrc_all(struct mpc_table *mpc,
                if (irq_used[i])
                        continue;
 
-               if (mp_irqs[i].mp_irqtype != mp_INT)
+               if (mp_irqs[i].irqtype != mp_INT)
                        continue;
 
-               if (mp_irqs[i].mp_irqflag != 0x0f)
+               if (mp_irqs[i].irqflag != 0x0f)
                        continue;
 
                if (nr_m_spare > 0) {
@@ -1001,7 +987,7 @@ static int __init update_mp_table(void)
 {
        char str[16];
        char oem[10];
-       struct intel_mp_floating *mpf;
+       struct mpf_intel *mpf;
        struct mpc_table *mpc, *mpc_new;
 
        if (!enable_update_mptable)
@@ -1014,19 +1000,19 @@ static int __init update_mp_table(void)
        /*
         * Now see if we need to go further.
         */
-       if (mpf->mpf_feature1 != 0)
+       if (mpf->feature1 != 0)
                return 0;
 
-       if (!mpf->mpf_physptr)
+       if (!mpf->physptr)
                return 0;
 
-       mpc = phys_to_virt(mpf->mpf_physptr);
+       mpc = phys_to_virt(mpf->physptr);
 
        if (!smp_check_mpc(mpc, oem, str))
                return 0;
 
-       printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf));
-       printk(KERN_INFO "mpf_physptr: %x\n", mpf->mpf_physptr);
+       printk(KERN_INFO "mpf: %llx\n", (u64)virt_to_phys(mpf));
+       printk(KERN_INFO "physptr: %x\n", mpf->physptr);
 
        if (mpc_new_phys && mpc->length > mpc_new_length) {
                mpc_new_phys = 0;
@@ -1047,23 +1033,23 @@ static int __init update_mp_table(void)
                }
                printk(KERN_INFO "use in-positon replacing\n");
        } else {
-               mpf->mpf_physptr = mpc_new_phys;
+               mpf->physptr = mpc_new_phys;
                mpc_new = phys_to_virt(mpc_new_phys);
                memcpy(mpc_new, mpc, mpc->length);
                mpc = mpc_new;
                /* check if we can modify that */
-               if (mpc_new_phys - mpf->mpf_physptr) {
-                       struct intel_mp_floating *mpf_new;
+               if (mpc_new_phys - mpf->physptr) {
+                       struct mpf_intel *mpf_new;
                        /* steal 16 bytes from [0, 1k) */
                        printk(KERN_INFO "mpf new: %x\n", 0x400 - 16);
                        mpf_new = phys_to_virt(0x400 - 16);
                        memcpy(mpf_new, mpf, 16);
                        mpf = mpf_new;
-                       mpf->mpf_physptr = mpc_new_phys;
+                       mpf->physptr = mpc_new_phys;
                }
-               mpf->mpf_checksum = 0;
-               mpf->mpf_checksum -= mpf_checksum((unsigned char *)mpf, 16);
-               printk(KERN_INFO "mpf_physptr new: %x\n", mpf->mpf_physptr);
+               mpf->checksum = 0;
+               mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
+               printk(KERN_INFO "physptr new: %x\n", mpf->physptr);
        }
 
        /*
index 7262666..3cf3413 100644 (file)
 #include <linux/device.h>
 #include <linux/cpu.h>
 #include <linux/notifier.h>
+#include <linux/uaccess.h>
 
 #include <asm/processor.h>
 #include <asm/msr.h>
-#include <asm/uaccess.h>
 #include <asm/system.h>
 
 static struct class *msr_class;
index 7228979..bdfad80 100644 (file)
@@ -34,7 +34,7 @@
 
 #include <asm/mce.h>
 
-#include <mach_traps.h>
+#include <asm/mach_traps.h>
 
 int unknown_nmi_panic;
 int nmi_watchdog_enabled;
@@ -61,11 +61,7 @@ static int endflag __initdata;
 
 static inline unsigned int get_nmi_count(int cpu)
 {
-#ifdef CONFIG_X86_64
-       return cpu_pda(cpu)->__nmi_count;
-#else
-       return nmi_count(cpu);
-#endif
+       return per_cpu(irq_stat, cpu).__nmi_count;
 }
 
 static inline int mce_in_progress(void)
@@ -82,12 +78,8 @@ static inline int mce_in_progress(void)
  */
 static inline unsigned int get_timer_irqs(int cpu)
 {
-#ifdef CONFIG_X86_64
-       return read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
-#else
        return per_cpu(irq_stat, cpu).apic_timer_irqs +
                per_cpu(irq_stat, cpu).irq0_irqs;
-#endif
 }
 
 #ifdef CONFIG_SMP
index f2191d4..0cc41a1 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2002, IBM Corp.
  *
- * All rights reserved.          
+ * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * Send feedback to <gone@us.ibm.com>
  */
 
-#include <linux/mm.h>
+#include <linux/nodemask.h>
 #include <linux/bootmem.h>
 #include <linux/mmzone.h>
 #include <linux/module.h>
-#include <linux/nodemask.h>
-#include <asm/numaq.h>
-#include <asm/topology.h>
+#include <linux/mm.h>
+
 #include <asm/processor.h>
+#include <asm/topology.h>
 #include <asm/genapic.h>
-#include <asm/e820.h>
+#include <asm/numaq.h>
 #include <asm/setup.h>
+#include <asm/e820.h>
 
 #define        MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT))
 
@@ -91,19 +92,20 @@ static int __init numaq_pre_time_init(void)
 }
 
 int found_numaq;
+
 /*
  * Have to match translation table entries to main table entries by counter
  * hence the mpc_record variable .... can't see a less disgusting way of
  * doing this ....
  */
 struct mpc_config_translation {
-       unsigned char mpc_type;
-       unsigned char trans_len;
-       unsigned char trans_type;
-       unsigned char trans_quad;
-       unsigned char trans_global;
-       unsigned char trans_local;
-       unsigned short trans_reserved;
+       unsigned char           mpc_type;
+       unsigned char           trans_len;
+       unsigned char           trans_type;
+       unsigned char           trans_quad;
+       unsigned char           trans_global;
+       unsigned char           trans_local;
+       unsigned short          trans_reserved;
 };
 
 /* x86_quirks member */
@@ -236,7 +238,7 @@ static int __init numaq_setup_ioapic_ids(void)
 
 static int __init numaq_update_genapic(void)
 {
-       genapic->wakeup_cpu = wakeup_secondary_cpu_via_nmi;
+       apic->wakeup_cpu = wakeup_secondary_cpu_via_nmi;
 
        return 0;
 }
@@ -291,3 +293,280 @@ int __init get_memcfg_numaq(void)
        smp_dump_qct();
        return 1;
 }
+
+/*
+ * APIC driver for the IBM NUMAQ chipset.
+ */
+#define APIC_DEFINITION 1
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <asm/mpspec.h>
+#include <asm/genapic.h>
+#include <asm/fixmap.h>
+#include <asm/apicdef.h>
+#include <asm/ipi.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/numa.h>
+#include <linux/smp.h>
+#include <asm/numaq.h>
+#include <asm/io.h>
+#include <linux/mmzone.h>
+#include <linux/nodemask.h>
+
+#define NUMAQ_APIC_DFR_VALUE   (APIC_DFR_CLUSTER)
+
+static inline unsigned int numaq_get_apic_id(unsigned long x)
+{
+       return (x >> 24) & 0x0F;
+}
+
+static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector)
+{
+       default_send_IPI_mask_sequence_logical(mask, vector);
+}
+
+static inline void numaq_send_IPI_allbutself(int vector)
+{
+       default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector);
+}
+
+static inline void numaq_send_IPI_all(int vector)
+{
+       numaq_send_IPI_mask(cpu_online_mask, vector);
+}
+
+extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
+
+#define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8)
+#define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa)
+
+/*
+ * Because we use NMIs rather than the INIT-STARTUP sequence to
+ * bootstrap the CPUs, the APIC may be in a weird state. Kick it:
+ */
+static inline void numaq_smp_callin_clear_local_apic(void)
+{
+       clear_local_APIC();
+}
+
+static inline void
+numaq_store_NMI_vector(unsigned short *high, unsigned short *low)
+{
+       printk("Storing NMI vector\n");
+       *high =
+         *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_HIGH));
+       *low =
+         *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_LOW));
+}
+
+static inline const cpumask_t *numaq_target_cpus(void)
+{
+       return &CPU_MASK_ALL;
+}
+
+static inline unsigned long
+numaq_check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+       return physid_isset(apicid, bitmap);
+}
+
+static inline unsigned long numaq_check_apicid_present(int bit)
+{
+       return physid_isset(bit, phys_cpu_present_map);
+}
+
+#define apicid_cluster(apicid) (apicid & 0xF0)
+
+static inline int numaq_apic_id_registered(void)
+{
+       return 1;
+}
+
+static inline void numaq_init_apic_ldr(void)
+{
+       /* Already done in NUMA-Q firmware */
+}
+
+static inline void numaq_setup_apic_routing(void)
+{
+       printk("Enabling APIC mode:  %s.  Using %d I/O APICs\n",
+               "NUMA-Q", nr_ioapics);
+}
+
+/*
+ * Skip adding the timer int on secondary nodes, which causes
+ * a small but painful rift in the time-space continuum.
+ */
+static inline int numaq_multi_timer_check(int apic, int irq)
+{
+       return apic != 0 && irq == 0;
+}
+
+static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map)
+{
+       /* We don't have a good way to do this yet - hack */
+       return physids_promote(0xFUL);
+}
+
+/* Mapping from cpu number to logical apicid */
+extern u8 cpu_2_logical_apicid[];
+
+static inline int numaq_cpu_to_logical_apicid(int cpu)
+{
+       if (cpu >= nr_cpu_ids)
+               return BAD_APICID;
+       return (int)cpu_2_logical_apicid[cpu];
+}
+
+/*
+ * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
+ * cpu to APIC ID relation to properly interact with the intelligent
+ * mode of the cluster controller.
+ */
+static inline int numaq_cpu_present_to_apicid(int mps_cpu)
+{
+       if (mps_cpu < 60)
+               return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
+       else
+               return BAD_APICID;
+}
+
+static inline int numaq_apicid_to_node(int logical_apicid) 
+{
+       return logical_apicid >> 4;
+}
+
+static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid)
+{
+       int node = numaq_apicid_to_node(logical_apicid);
+       int cpu = __ffs(logical_apicid & 0xf);
+
+       return physid_mask_of_physid(cpu + 4*node);
+}
+
+/* Where the IO area was mapped on multiquad, always 0 otherwise */
+void *xquad_portio;
+
+static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+       return 1;
+}
+
+/*
+ * We use physical apicids here, not logical, so just return the default
+ * physical broadcast to stop people from breaking us
+ */
+static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask)
+{
+       return 0x0F;
+}
+
+static inline unsigned int
+numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+                            const struct cpumask *andmask)
+{
+       return 0x0F;
+}
+
+/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
+static inline int numaq_phys_pkg_id(int cpuid_apic, int index_msb)
+{
+       return cpuid_apic >> index_msb;
+}
+static int __numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
+{
+       numaq_mps_oem_check(mpc, oem, productid);
+       return found_numaq;
+}
+
+static int probe_numaq(void)
+{
+       /* already know from get_memcfg_numaq() */
+       return found_numaq;
+}
+
+static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask)
+{
+       /* Careful. Some cpus do not strictly honor the set of cpus
+        * specified in the interrupt destination when using lowest
+        * priority interrupt delivery mode.
+        *
+        * In particular there was a hyperthreading cpu observed to
+        * deliver interrupts to the wrong hyperthread when only one
+        * hyperthread was specified in the interrupt desitination.
+        */
+       *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
+}
+
+static void numaq_setup_portio_remap(void)
+{
+       int num_quads = num_online_nodes();
+
+       if (num_quads <= 1)
+               return;
+
+       printk("Remapping cross-quad port I/O for %d quads\n", num_quads);
+       xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
+       printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
+               (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
+}
+
+struct genapic apic_numaq = {
+
+       .name                           = "NUMAQ",
+       .probe                          = probe_numaq,
+       .acpi_madt_oem_check            = NULL,
+       .apic_id_registered             = numaq_apic_id_registered,
+
+       .irq_delivery_mode              = dest_LowestPrio,
+       /* physical delivery on LOCAL quad: */
+       .irq_dest_mode                  = 0,
+
+       .target_cpus                    = numaq_target_cpus,
+       .disable_esr                    = 1,
+       .dest_logical                   = APIC_DEST_LOGICAL,
+       .check_apicid_used              = numaq_check_apicid_used,
+       .check_apicid_present           = numaq_check_apicid_present,
+
+       .vector_allocation_domain       = numaq_vector_allocation_domain,
+       .init_apic_ldr                  = numaq_init_apic_ldr,
+
+       .ioapic_phys_id_map             = numaq_ioapic_phys_id_map,
+       .setup_apic_routing             = numaq_setup_apic_routing,
+       .multi_timer_check              = numaq_multi_timer_check,
+       .apicid_to_node                 = numaq_apicid_to_node,
+       .cpu_to_logical_apicid          = numaq_cpu_to_logical_apicid,
+       .cpu_present_to_apicid          = numaq_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = numaq_apicid_to_cpu_present,
+       .setup_portio_remap             = numaq_setup_portio_remap,
+       .check_phys_apicid_present      = numaq_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = numaq_phys_pkg_id,
+       .mps_oem_check                  = __numaq_mps_oem_check,
+
+       .get_apic_id                    = numaq_get_apic_id,
+       .set_apic_id                    = NULL,
+       .apic_id_mask                   = 0x0F << 24,
+
+       .cpu_mask_to_apicid             = numaq_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = numaq_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = numaq_send_IPI_mask,
+       .send_IPI_mask_allbutself       = NULL,
+       .send_IPI_allbutself            = numaq_send_IPI_allbutself,
+       .send_IPI_all                   = numaq_send_IPI_all,
+       .send_IPI_self                  = default_send_IPI_self,
+
+       .wakeup_cpu                     = NULL,
+       .trampoline_phys_low            = NUMAQ_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = NUMAQ_TRAMPOLINE_PHYS_HIGH,
+
+       /* We don't do anything here because we use NMI's to boot instead */
+       .wait_for_init_deassert         = NULL,
+
+       .smp_callin_clear_local_apic    = numaq_smp_callin_clear_local_apic,
+       .store_NMI_vector               = numaq_store_NMI_vector,
+       .inquire_remote_apic            = NULL,
+};
index 95777b0..3a7c5a4 100644 (file)
@@ -26,13 +26,3 @@ struct pv_lock_ops pv_lock_ops = {
 };
 EXPORT_SYMBOL(pv_lock_ops);
 
-void __init paravirt_use_bytelocks(void)
-{
-#ifdef CONFIG_SMP
-       pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
-       pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
-       pv_lock_ops.spin_lock = __byte_spin_lock;
-       pv_lock_ops.spin_trylock = __byte_spin_trylock;
-       pv_lock_ops.spin_unlock = __byte_spin_unlock;
-#endif
-}
index e4c8fb6..cea11c8 100644 (file)
@@ -44,6 +44,17 @@ void _paravirt_nop(void)
 {
 }
 
+/* identity function, which can be inlined */
+u32 _paravirt_ident_32(u32 x)
+{
+       return x;
+}
+
+u64 _paravirt_ident_64(u64 x)
+{
+       return x;
+}
+
 static void __init default_banner(void)
 {
        printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
@@ -138,9 +149,16 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
        if (opfunc == NULL)
                /* If there's no function, patch it with a ud2a (BUG) */
                ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
-       else if (opfunc == paravirt_nop)
+       else if (opfunc == _paravirt_nop)
                /* If the operation is a nop, then nop the callsite */
                ret = paravirt_patch_nop();
+
+       /* identity functions just return their single argument */
+       else if (opfunc == _paravirt_ident_32)
+               ret = paravirt_patch_ident_32(insnbuf, len);
+       else if (opfunc == _paravirt_ident_64)
+               ret = paravirt_patch_ident_64(insnbuf, len);
+
        else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
                 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
                 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
@@ -292,10 +310,10 @@ struct pv_time_ops pv_time_ops = {
 
 struct pv_irq_ops pv_irq_ops = {
        .init_IRQ = native_init_IRQ,
-       .save_fl = native_save_fl,
-       .restore_fl = native_restore_fl,
-       .irq_disable = native_irq_disable,
-       .irq_enable = native_irq_enable,
+       .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
+       .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
+       .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
+       .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
        .safe_halt = native_safe_halt,
        .halt = native_halt,
 #ifdef CONFIG_X86_64
@@ -373,6 +391,14 @@ struct pv_apic_ops pv_apic_ops = {
 #endif
 };
 
+#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
+/* 32-bit pagetable entries */
+#define PTE_IDENT      __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
+#else
+/* 64-bit pagetable entries */
+#define PTE_IDENT      __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
+#endif
+
 struct pv_mmu_ops pv_mmu_ops = {
 #ifndef CONFIG_X86_64
        .pagetable_setup_start = native_pagetable_setup_start,
@@ -424,22 +450,23 @@ struct pv_mmu_ops pv_mmu_ops = {
        .pmd_clear = native_pmd_clear,
 #endif
        .set_pud = native_set_pud,
-       .pmd_val = native_pmd_val,
-       .make_pmd = native_make_pmd,
+
+       .pmd_val = PTE_IDENT,
+       .make_pmd = PTE_IDENT,
 
 #if PAGETABLE_LEVELS == 4
-       .pud_val = native_pud_val,
-       .make_pud = native_make_pud,
+       .pud_val = PTE_IDENT,
+       .make_pud = PTE_IDENT,
+
        .set_pgd = native_set_pgd,
 #endif
 #endif /* PAGETABLE_LEVELS >= 3 */
 
-       .pte_val = native_pte_val,
-       .pte_flags = native_pte_flags,
-       .pgd_val = native_pgd_val,
+       .pte_val = PTE_IDENT,
+       .pgd_val = PTE_IDENT,
 
-       .make_pte = native_make_pte,
-       .make_pgd = native_make_pgd,
+       .make_pte = PTE_IDENT,
+       .make_pgd = PTE_IDENT,
 
        .dup_mmap = paravirt_nop,
        .exit_mmap = paravirt_nop,
index 9fe644f..d9f32e6 100644 (file)
@@ -12,6 +12,18 @@ DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
 DEF_NATIVE(pv_cpu_ops, clts, "clts");
 DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
 
+unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
+{
+       /* arg in %eax, return in %eax */
+       return 0;
+}
+
+unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
+{
+       /* arg in %edx:%eax, return in %edx:%eax */
+       return 0;
+}
+
 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                      unsigned long addr, unsigned len)
 {
index 061d01d..3f08f34 100644 (file)
@@ -19,6 +19,21 @@ DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
 DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl");
 DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
 
+DEF_NATIVE(, mov32, "mov %edi, %eax");
+DEF_NATIVE(, mov64, "mov %rdi, %rax");
+
+unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
+{
+       return paravirt_patch_insns(insnbuf, len,
+                                   start__mov32, end__mov32);
+}
+
+unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
+{
+       return paravirt_patch_insns(insnbuf, len,
+                                   start__mov64, end__mov64);
+}
+
 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                      unsigned long addr, unsigned len)
 {
diff --git a/arch/x86/kernel/probe_32.c b/arch/x86/kernel/probe_32.c
new file mode 100644 (file)
index 0000000..22337b7
--- /dev/null
@@ -0,0 +1,411 @@
+/*
+ * Default generic APIC driver. This handles up to 8 CPUs.
+ *
+ * Copyright 2003 Andi Kleen, SuSE Labs.
+ * Subject to the GNU Public License, v.2
+ *
+ * Generic x86 APIC driver probe layer.
+ */
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <asm/fixmap.h>
+#include <asm/mpspec.h>
+#include <asm/apicdef.h>
+#include <asm/genapic.h>
+#include <asm/setup.h>
+
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <asm/mpspec.h>
+#include <asm/genapic.h>
+#include <asm/fixmap.h>
+#include <asm/apicdef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <asm/genapic.h>
+#include <asm/ipi.h>
+
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/acpi.h>
+#include <asm/arch_hooks.h>
+#include <asm/e820.h>
+#include <asm/setup.h>
+
+#include <asm/genapic.h>
+
+#ifdef CONFIG_HOTPLUG_CPU
+#define DEFAULT_SEND_IPI       (1)
+#else
+#define DEFAULT_SEND_IPI       (0)
+#endif
+
+int no_broadcast = DEFAULT_SEND_IPI;
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+static void default_vector_allocation_domain(int cpu, struct cpumask *retmask)
+{
+       /*
+        * Careful. Some cpus do not strictly honor the set of cpus
+        * specified in the interrupt destination when using lowest
+        * priority interrupt delivery mode.
+        *
+        * In particular there was a hyperthreading cpu observed to
+        * deliver interrupts to the wrong hyperthread when only one
+        * hyperthread was specified in the interrupt desitination.
+        */
+       *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
+}
+
+/* should be called last. */
+static int probe_default(void)
+{
+       return 1;
+}
+
+struct genapic apic_default = {
+
+       .name                           = "default",
+       .probe                          = probe_default,
+       .acpi_madt_oem_check            = NULL,
+       .apic_id_registered             = default_apic_id_registered,
+
+       .irq_delivery_mode              = dest_LowestPrio,
+       /* logical delivery broadcast to all CPUs: */
+       .irq_dest_mode                  = 1,
+
+       .target_cpus                    = default_target_cpus,
+       .disable_esr                    = 0,
+       .dest_logical                   = APIC_DEST_LOGICAL,
+       .check_apicid_used              = default_check_apicid_used,
+       .check_apicid_present           = default_check_apicid_present,
+
+       .vector_allocation_domain       = default_vector_allocation_domain,
+       .init_apic_ldr                  = default_init_apic_ldr,
+
+       .ioapic_phys_id_map             = default_ioapic_phys_id_map,
+       .setup_apic_routing             = default_setup_apic_routing,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = default_apicid_to_node,
+       .cpu_to_logical_apicid          = default_cpu_to_logical_apicid,
+       .cpu_present_to_apicid          = default_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = default_apicid_to_cpu_present,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = default_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = default_phys_pkg_id,
+       .mps_oem_check                  = NULL,
+
+       .get_apic_id                    = default_get_apic_id,
+       .set_apic_id                    = NULL,
+       .apic_id_mask                   = 0x0F << 24,
+
+       .cpu_mask_to_apicid             = default_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = default_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = default_send_IPI_mask_logical,
+       .send_IPI_mask_allbutself       = default_send_IPI_mask_allbutself_logical,
+       .send_IPI_allbutself            = default_send_IPI_allbutself,
+       .send_IPI_all                   = default_send_IPI_all,
+       .send_IPI_self                  = default_send_IPI_self,
+
+       .wakeup_cpu                     = NULL,
+       .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+
+       .wait_for_init_deassert         = default_wait_for_init_deassert,
+
+       .smp_callin_clear_local_apic    = NULL,
+       .store_NMI_vector               = NULL,
+       .inquire_remote_apic            = default_inquire_remote_apic,
+};
+
+extern struct genapic apic_numaq;
+extern struct genapic apic_summit;
+extern struct genapic apic_bigsmp;
+extern struct genapic apic_es7000;
+extern struct genapic apic_default;
+
+struct genapic *apic = &apic_default;
+
+static struct genapic *apic_probe[] __initdata = {
+#ifdef CONFIG_X86_NUMAQ
+       &apic_numaq,
+#endif
+#ifdef CONFIG_X86_SUMMIT
+       &apic_summit,
+#endif
+#ifdef CONFIG_X86_BIGSMP
+       &apic_bigsmp,
+#endif
+#ifdef CONFIG_X86_ES7000
+       &apic_es7000,
+#endif
+       &apic_default,  /* must be last */
+       NULL,
+};
+
+static int cmdline_apic __initdata;
+static int __init parse_apic(char *arg)
+{
+       int i;
+
+       if (!arg)
+               return -EINVAL;
+
+       for (i = 0; apic_probe[i]; i++) {
+               if (!strcmp(apic_probe[i]->name, arg)) {
+                       apic = apic_probe[i];
+                       cmdline_apic = 1;
+                       return 0;
+               }
+       }
+
+       if (x86_quirks->update_genapic)
+               x86_quirks->update_genapic();
+
+       /* Parsed again by __setup for debug/verbose */
+       return 0;
+}
+early_param("apic", parse_apic);
+
+void __init generic_bigsmp_probe(void)
+{
+#ifdef CONFIG_X86_BIGSMP
+       /*
+        * This routine is used to switch to bigsmp mode when
+        * - There is no apic= option specified by the user
+        * - generic_apic_probe() has chosen apic_default as the sub_arch
+        * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
+        */
+
+       if (!cmdline_apic && apic == &apic_default) {
+               if (apic_bigsmp.probe()) {
+                       apic = &apic_bigsmp;
+                       if (x86_quirks->update_genapic)
+                               x86_quirks->update_genapic();
+                       printk(KERN_INFO "Overriding APIC driver with %s\n",
+                              apic->name);
+               }
+       }
+#endif
+}
+
+void __init generic_apic_probe(void)
+{
+       if (!cmdline_apic) {
+               int i;
+               for (i = 0; apic_probe[i]; i++) {
+                       if (apic_probe[i]->probe()) {
+                               apic = apic_probe[i];
+                               break;
+                       }
+               }
+               /* Not visible without early console */
+               if (!apic_probe[i])
+                       panic("Didn't find an APIC driver");
+
+               if (x86_quirks->update_genapic)
+                       x86_quirks->update_genapic();
+       }
+       printk(KERN_INFO "Using APIC driver %s\n", apic->name);
+}
+
+/* These functions can switch the APIC even after the initial ->probe() */
+
+int __init
+generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
+{
+       int i;
+
+       for (i = 0; apic_probe[i]; ++i) {
+               if (!apic_probe[i]->mps_oem_check)
+                       continue;
+               if (!apic_probe[i]->mps_oem_check(mpc, oem, productid))
+                       continue;
+
+               if (!cmdline_apic) {
+                       apic = apic_probe[i];
+                       if (x86_quirks->update_genapic)
+                               x86_quirks->update_genapic();
+                       printk(KERN_INFO "Switched to APIC driver `%s'.\n",
+                              apic->name);
+               }
+               return 1;
+       }
+       return 0;
+}
+
+int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+       int i;
+
+       for (i = 0; apic_probe[i]; ++i) {
+               if (!apic_probe[i]->acpi_madt_oem_check)
+                       continue;
+               if (!apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id))
+                       continue;
+
+               if (!cmdline_apic) {
+                       apic = apic_probe[i];
+                       if (x86_quirks->update_genapic)
+                               x86_quirks->update_genapic();
+                       printk(KERN_INFO "Switched to APIC driver `%s'.\n",
+                              apic->name);
+               }
+               return 1;
+       }
+       return 0;
+}
+
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+/**
+ * pre_intr_init_hook - initialisation prior to setting up interrupt vectors
+ *
+ * Description:
+ *     Perform any necessary interrupt initialisation prior to setting up
+ *     the "ordinary" interrupt call gates.  For legacy reasons, the ISA
+ *     interrupts should be initialised here if the machine emulates a PC
+ *     in any way.
+ **/
+void __init pre_intr_init_hook(void)
+{
+       if (x86_quirks->arch_pre_intr_init) {
+               if (x86_quirks->arch_pre_intr_init())
+                       return;
+       }
+       init_ISA_irqs();
+}
+
+/**
+ * intr_init_hook - post gate setup interrupt initialisation
+ *
+ * Description:
+ *     Fill in any interrupts that may have been left out by the general
+ *     init_IRQ() routine.  interrupts having to do with the machine rather
+ *     than the devices on the I/O bus (like APIC interrupts in intel MP
+ *     systems) are started here.
+ **/
+void __init intr_init_hook(void)
+{
+       if (x86_quirks->arch_intr_init) {
+               if (x86_quirks->arch_intr_init())
+                       return;
+       }
+}
+
+/**
+ * pre_setup_arch_hook - hook called prior to any setup_arch() execution
+ *
+ * Description:
+ *     generally used to activate any machine specific identification
+ *     routines that may be needed before setup_arch() runs.  On Voyager
+ *     this is used to get the board revision and type.
+ **/
+void __init pre_setup_arch_hook(void)
+{
+}
+
+/**
+ * trap_init_hook - initialise system specific traps
+ *
+ * Description:
+ *     Called as the final act of trap_init().  Used in VISWS to initialise
+ *     the various board specific APIC traps.
+ **/
+void __init trap_init_hook(void)
+{
+       if (x86_quirks->arch_trap_init) {
+               if (x86_quirks->arch_trap_init())
+                       return;
+       }
+}
+
+static struct irqaction irq0  = {
+       .handler = timer_interrupt,
+       .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL,
+       .mask = CPU_MASK_NONE,
+       .name = "timer"
+};
+
+/**
+ * pre_time_init_hook - do any specific initialisations before.
+ *
+ **/
+void __init pre_time_init_hook(void)
+{
+       if (x86_quirks->arch_pre_time_init)
+               x86_quirks->arch_pre_time_init();
+}
+
+/**
+ * time_init_hook - do any specific initialisations for the system timer.
+ *
+ * Description:
+ *     Must plug the system timer interrupt source at HZ into the IRQ listed
+ *     in irq_vectors.h:TIMER_IRQ
+ **/
+void __init time_init_hook(void)
+{
+       if (x86_quirks->arch_time_init) {
+               /*
+                * A nonzero return code does not mean failure, it means
+                * that the architecture quirk does not want any
+                * generic (timer) setup to be performed after this:
+                */
+               if (x86_quirks->arch_time_init())
+                       return;
+       }
+
+       irq0.mask = cpumask_of_cpu(0);
+       setup_irq(0, &irq0);
+}
+
+#ifdef CONFIG_MCA
+/**
+ * mca_nmi_hook - hook into MCA specific NMI chain
+ *
+ * Description:
+ *     The MCA (Microchannel Architecture) has an NMI chain for NMI sources
+ *     along the MCA bus.  Use this to hook into that chain if you will need
+ *     it.
+ **/
+void mca_nmi_hook(void)
+{
+       /*
+        * If I recall correctly, there's a whole bunch of other things that
+        * we can do to check for NMI problems, but that's all I know about
+        * at the moment.
+        */
+       pr_warning("NMI generated from unknown source!\n");
+}
+#endif
+
+static __init int no_ipi_broadcast(char *str)
+{
+       get_option(&str, &no_broadcast);
+       pr_info("Using %s mode\n",
+               no_broadcast ? "No IPI Broadcast" : "IPI Broadcast");
+       return 1;
+}
+__setup("no_ipi_broadcast=", no_ipi_broadcast);
+
+static int __init print_ipi_mode(void)
+{
+       pr_info("Using IPI %s mode\n",
+               no_broadcast ? "No-Shortcut" : "Shortcut");
+       return 0;
+}
+
+late_initcall(print_ipi_mode);
+
index 675a48c..071e7fe 100644 (file)
@@ -18,7 +18,7 @@
 #include <asm/setup.h>
 #include <asm/sections.h>
 #include <asm/io.h>
-#include <setup_arch.h>
+#include <asm/setup_arch.h>
 
 static struct resource system_rom_resource = {
        .name   = "System ROM",
index 6d12f7e..87b69d4 100644 (file)
@@ -350,7 +350,7 @@ static void c1e_idle(void)
 
 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
 {
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
        if (pm_idle == poll_idle && smp_num_siblings > 1) {
                printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
                        " performance may degrade.\n");
index a546f55..fec79ad 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <stdarg.h>
 
+#include <linux/stackprotector.h>
 #include <linux/cpu.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
@@ -66,9 +67,6 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
 EXPORT_PER_CPU_SYMBOL(current_task);
 
-DEFINE_PER_CPU(int, cpu_number);
-EXPORT_PER_CPU_SYMBOL(cpu_number);
-
 /*
  * Return saved PC of a blocked thread.
  */
@@ -94,6 +92,15 @@ void cpu_idle(void)
 {
        int cpu = smp_processor_id();
 
+       /*
+        * If we're the non-boot CPU, nothing set the stack canary up
+        * for us.  CPU0 already has it initialized but no harm in
+        * doing it again.  This is a good place for updating it, as
+        * we wont ever return from this function (so the invalid
+        * canaries already on the stack wont ever trigger).
+        */
+       boot_init_stack_canary();
+
        current_thread_info()->status |= TS_POLLING;
 
        /* endless idle loop with no priority at all */
@@ -111,7 +118,6 @@ void cpu_idle(void)
                                play_dead();
 
                        local_irq_disable();
-                       __get_cpu_var(irq_stat).idle_timestamp = jiffies;
                        /* Don't trace irqs off for idle */
                        stop_critical_timings();
                        pm_idle();
@@ -135,7 +141,7 @@ void __show_regs(struct pt_regs *regs, int all)
        if (user_mode_vm(regs)) {
                sp = regs->sp;
                ss = regs->ss & 0xffff;
-               savesegment(gs, gs);
+               gs = get_user_gs(regs);
        } else {
                sp = (unsigned long) (&regs->sp);
                savesegment(ss, ss);
@@ -216,6 +222,7 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
        regs.ds = __USER_DS;
        regs.es = __USER_DS;
        regs.fs = __KERNEL_PERCPU;
+       regs.gs = __KERNEL_STACK_CANARY;
        regs.orig_ax = -1;
        regs.ip = (unsigned long) kernel_thread_helper;
        regs.cs = __KERNEL_CS | get_kernel_rpl();
@@ -308,7 +315,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
 
        p->thread.ip = (unsigned long) ret_from_fork;
 
-       savesegment(gs, p->thread.gs);
+       task_user_gs(p) = get_user_gs(regs);
 
        tsk = current;
        if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
@@ -346,7 +353,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
 void
 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
 {
-       __asm__("movl %0, %%gs" : : "r"(0));
+       set_user_gs(regs, 0);
        regs->fs                = 0;
        set_fs(USER_DS);
        regs->ds                = __USER_DS;
@@ -543,7 +550,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * used %fs or %gs (it does not today), or if the kernel is
         * running inside of a hypervisor layer.
         */
-       savesegment(gs, prev->gs);
+       lazy_save_gs(prev->gs);
 
        /*
         * Load the per-thread Thread-Local Storage descriptor.
@@ -589,31 +596,31 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * Restore %gs if needed (which is common)
         */
        if (prev->gs | next->gs)
-               loadsegment(gs, next->gs);
+               lazy_load_gs(next->gs);
 
-       x86_write_percpu(current_task, next_p);
+       percpu_write(current_task, next_p);
 
        return prev_p;
 }
 
-asmlinkage int sys_fork(struct pt_regs regs)
+int sys_fork(struct pt_regs *regs)
 {
-       return do_fork(SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
+       return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
 }
 
-asmlinkage int sys_clone(struct pt_regs regs)
+int sys_clone(struct pt_regs *regs)
 {
        unsigned long clone_flags;
        unsigned long newsp;
        int __user *parent_tidptr, *child_tidptr;
 
-       clone_flags = regs.bx;
-       newsp = regs.cx;
-       parent_tidptr = (int __user *)regs.dx;
-       child_tidptr = (int __user *)regs.di;
+       clone_flags = regs->bx;
+       newsp = regs->cx;
+       parent_tidptr = (int __user *)regs->dx;
+       child_tidptr = (int __user *)regs->di;
        if (!newsp)
-               newsp = regs.sp;
-       return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
+               newsp = regs->sp;
+       return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr);
 }
 
 /*
@@ -626,27 +633,27 @@ asmlinkage int sys_clone(struct pt_regs regs)
  * do not have enough call-clobbered registers to hold all
  * the information you need.
  */
-asmlinkage int sys_vfork(struct pt_regs regs)
+int sys_vfork(struct pt_regs *regs)
 {
-       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
+       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, NULL, NULL);
 }
 
 /*
  * sys_execve() executes a new program.
  */
-asmlinkage int sys_execve(struct pt_regs regs)
+int sys_execve(struct pt_regs *regs)
 {
        int error;
        char *filename;
 
-       filename = getname((char __user *) regs.bx);
+       filename = getname((char __user *) regs->bx);
        error = PTR_ERR(filename);
        if (IS_ERR(filename))
                goto out;
        error = do_execve(filename,
-                       (char __user * __user *) regs.cx,
-                       (char __user * __user *) regs.dx,
-                       &regs);
+                       (char __user * __user *) regs->cx,
+                       (char __user * __user *) regs->dx,
+                       regs);
        if (error == 0) {
                /* Make sure we don't return using sysenter.. */
                set_thread_flag(TIF_IRET);
index 85b4cb5..836ef65 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <stdarg.h>
 
+#include <linux/stackprotector.h>
 #include <linux/cpu.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
@@ -47,7 +48,6 @@
 #include <asm/processor.h>
 #include <asm/i387.h>
 #include <asm/mmu_context.h>
-#include <asm/pda.h>
 #include <asm/prctl.h>
 #include <asm/desc.h>
 #include <asm/proto.h>
 
 asmlinkage extern void ret_from_fork(void);
 
+DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
+EXPORT_PER_CPU_SYMBOL(current_task);
+
+DEFINE_PER_CPU(unsigned long, old_rsp);
+static DEFINE_PER_CPU(unsigned char, is_idle);
+
 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
 
 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
@@ -76,13 +82,13 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister);
 
 void enter_idle(void)
 {
-       write_pda(isidle, 1);
+       percpu_write(is_idle, 1);
        atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
 }
 
 static void __exit_idle(void)
 {
-       if (test_and_clear_bit_pda(0, isidle) == 0)
+       if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
                return;
        atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
 }
@@ -112,6 +118,16 @@ static inline void play_dead(void)
 void cpu_idle(void)
 {
        current_thread_info()->status |= TS_POLLING;
+
+       /*
+        * If we're the non-boot CPU, nothing set the stack canary up
+        * for us.  CPU0 already has it initialized but no harm in
+        * doing it again.  This is a good place for updating it, as
+        * we wont ever return from this function (so the invalid
+        * canaries already on the stack wont ever trigger).
+        */
+       boot_init_stack_canary();
+
        /* endless idle loop with no priority at all */
        while (1) {
                tick_nohz_stop_sched_tick(1);
@@ -397,7 +413,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
        load_gs_index(0);
        regs->ip                = new_ip;
        regs->sp                = new_sp;
-       write_pda(oldrsp, new_sp);
+       percpu_write(old_rsp, new_sp);
        regs->cs                = __USER_CS;
        regs->ss                = __USER_DS;
        regs->flags             = 0x200;
@@ -618,21 +634,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        /*
         * Switch the PDA and FPU contexts.
         */
-       prev->usersp = read_pda(oldrsp);
-       write_pda(oldrsp, next->usersp);
-       write_pda(pcurrent, next_p);
+       prev->usersp = percpu_read(old_rsp);
+       percpu_write(old_rsp, next->usersp);
+       percpu_write(current_task, next_p);
 
-       write_pda(kernelstack,
+       percpu_write(kernel_stack,
                  (unsigned long)task_stack_page(next_p) +
-                 THREAD_SIZE - PDA_STACKOFFSET);
-#ifdef CONFIG_CC_STACKPROTECTOR
-       write_pda(stack_canary, next_p->stack_canary);
-       /*
-        * Build time only check to make sure the stack_canary is at
-        * offset 40 in the pda; this is a gcc ABI requirement
-        */
-       BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
-#endif
+                 THREAD_SIZE - KERNEL_STACK_OFFSET);
 
        /*
         * Now maybe reload the debug registers and handle I/O bitmaps
index 0a5df5f..7ec39ab 100644 (file)
@@ -75,10 +75,7 @@ static inline bool invalid_selector(u16 value)
 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
 {
        BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
-       regno >>= 2;
-       if (regno > FS)
-               --regno;
-       return &regs->bx + regno;
+       return &regs->bx + (regno >> 2);
 }
 
 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
@@ -90,9 +87,10 @@ static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
        if (offset != offsetof(struct user_regs_struct, gs))
                retval = *pt_regs_access(task_pt_regs(task), offset);
        else {
-               retval = task->thread.gs;
                if (task == current)
-                       savesegment(gs, retval);
+                       retval = get_user_gs(task_pt_regs(task));
+               else
+                       retval = task_user_gs(task);
        }
        return retval;
 }
@@ -126,13 +124,10 @@ static int set_segment_reg(struct task_struct *task,
                break;
 
        case offsetof(struct user_regs_struct, gs):
-               task->thread.gs = value;
                if (task == current)
-                       /*
-                        * The user-mode %gs is not affected by
-                        * kernel entry, so we must update the CPU.
-                        */
-                       loadsegment(gs, value);
+                       set_user_gs(task_pt_regs(task), value);
+               else
+                       task_user_gs(task) = value;
        }
 
        return 0;
index 2b46eb4..32e8f0a 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/reboot.h>
 #include <asm/pci_x86.h>
 #include <asm/virtext.h>
+#include <asm/cpu.h>
 
 #ifdef CONFIG_X86_32
 # include <linux/dmi.h>
@@ -23,7 +24,7 @@
 # include <asm/iommu.h>
 #endif
 
-#include <mach_ipi.h>
+#include <asm/genapic.h>
 
 /*
  * Power off function, if any
@@ -650,7 +651,7 @@ static int crash_nmi_callback(struct notifier_block *self,
 
 static void smp_send_nmi_allbutself(void)
 {
-       send_IPI_allbutself(NMI_VECTOR);
+       apic->send_IPI_allbutself(NMI_VECTOR);
 }
 
 static struct notifier_block crash_nmi_nb = {
index f5afe66..b0bbdd4 100644 (file)
@@ -29,122 +29,6 @@ relocate_kernel:
         * %rdx start address
         */
 
-       /* map the control page at its virtual address */
-
-       movq    $0x0000ff8000000000, %r10        /* mask */
-       mov     $(39 - 3), %cl                   /* bits to shift */
-       movq    PTR(VA_CONTROL_PAGE)(%rsi), %r11 /* address to map */
-
-       movq    %r11, %r9
-       andq    %r10, %r9
-       shrq    %cl, %r9
-
-       movq    PTR(VA_PGD)(%rsi), %r8
-       addq    %r8, %r9
-       movq    PTR(PA_PUD_0)(%rsi), %r8
-       orq     $PAGE_ATTR, %r8
-       movq    %r8, (%r9)
-
-       shrq    $9, %r10
-       sub     $9, %cl
-
-       movq    %r11, %r9
-       andq    %r10, %r9
-       shrq    %cl, %r9
-
-       movq    PTR(VA_PUD_0)(%rsi), %r8
-       addq    %r8, %r9
-       movq    PTR(PA_PMD_0)(%rsi), %r8
-       orq     $PAGE_ATTR, %r8
-       movq    %r8, (%r9)
-
-       shrq    $9, %r10
-       sub     $9, %cl
-
-       movq    %r11, %r9
-       andq    %r10, %r9
-       shrq    %cl, %r9
-
-       movq    PTR(VA_PMD_0)(%rsi), %r8
-       addq    %r8, %r9
-       movq    PTR(PA_PTE_0)(%rsi), %r8
-       orq     $PAGE_ATTR, %r8
-       movq    %r8, (%r9)
-
-       shrq    $9, %r10
-       sub     $9, %cl
-
-       movq    %r11, %r9
-       andq    %r10, %r9
-       shrq    %cl, %r9
-
-       movq    PTR(VA_PTE_0)(%rsi), %r8
-       addq    %r8, %r9
-       movq    PTR(PA_CONTROL_PAGE)(%rsi), %r8
-       orq     $PAGE_ATTR, %r8
-       movq    %r8, (%r9)
-
-       /* identity map the control page at its physical address */
-
-       movq    $0x0000ff8000000000, %r10        /* mask */
-       mov     $(39 - 3), %cl                   /* bits to shift */
-       movq    PTR(PA_CONTROL_PAGE)(%rsi), %r11 /* address to map */
-
-       movq    %r11, %r9
-       andq    %r10, %r9
-       shrq    %cl, %r9
-
-       movq    PTR(VA_PGD)(%rsi), %r8
-       addq    %r8, %r9
-       movq    PTR(PA_PUD_1)(%rsi), %r8
-       orq     $PAGE_ATTR, %r8
-       movq    %r8, (%r9)
-
-       shrq    $9, %r10
-       sub     $9, %cl
-
-       movq    %r11, %r9
-       andq    %r10, %r9
-       shrq    %cl, %r9
-
-       movq    PTR(VA_PUD_1)(%rsi), %r8
-       addq    %r8, %r9
-       movq    PTR(PA_PMD_1)(%rsi), %r8
-       orq     $PAGE_ATTR, %r8
-       movq    %r8, (%r9)
-
-       shrq    $9, %r10
-       sub     $9, %cl
-
-       movq    %r11, %r9
-       andq    %r10, %r9
-       shrq    %cl, %r9
-
-       movq    PTR(VA_PMD_1)(%rsi), %r8
-       addq    %r8, %r9
-       movq    PTR(PA_PTE_1)(%rsi), %r8
-       orq     $PAGE_ATTR, %r8
-       movq    %r8, (%r9)
-
-       shrq    $9, %r10
-       sub     $9, %cl
-
-       movq    %r11, %r9
-       andq    %r10, %r9
-       shrq    %cl, %r9
-
-       movq    PTR(VA_PTE_1)(%rsi), %r8
-       addq    %r8, %r9
-       movq    PTR(PA_CONTROL_PAGE)(%rsi), %r8
-       orq     $PAGE_ATTR, %r8
-       movq    %r8, (%r9)
-
-relocate_new_kernel:
-       /* %rdi indirection_page
-        * %rsi page_list
-        * %rdx start address
-        */
-
        /* zero out flags, and disable interrupts */
        pushq $0
        popfq
@@ -156,9 +40,8 @@ relocate_new_kernel:
        /* get physical address of page table now too */
        movq    PTR(PA_TABLE_PAGE)(%rsi), %rcx
 
-       /* switch to new set of page tables */
-       movq    PTR(PA_PGD)(%rsi), %r9
-       movq    %r9, %cr3
+       /* Switch to the identity mapped page tables */
+       movq    %rcx, %cr3
 
        /* setup a new stack at the end of the physical control page */
        lea     PAGE_SIZE(%r8), %rsp
@@ -194,9 +77,7 @@ identity_mapped:
        jmp 1f
 1:
 
-       /* Switch to the identity mapped page tables,
-        * and flush the TLB.
-       */
+       /* Flush the TLB (needed?) */
        movq    %rcx, %cr3
 
        /* Do the copies */
index c461f6d..8fce6c7 100644 (file)
@@ -81,7 +81,7 @@
 #include <asm/io_apic.h>
 #include <asm/ist.h>
 #include <asm/vmi.h>
-#include <setup_arch.h>
+#include <asm/setup_arch.h>
 #include <asm/bios_ebda.h>
 #include <asm/cacheflush.h>
 #include <asm/processor.h>
@@ -89,7 +89,7 @@
 
 #include <asm/system.h>
 #include <asm/vsyscall.h>
-#include <asm/smp.h>
+#include <asm/cpu.h>
 #include <asm/desc.h>
 #include <asm/dma.h>
 #include <asm/iommu.h>
@@ -97,7 +97,7 @@
 #include <asm/mmu_context.h>
 #include <asm/proto.h>
 
-#include <mach_apic.h>
+#include <asm/genapic.h>
 #include <asm/paravirt.h>
 #include <asm/hypervisor.h>
 
 #define ARCH_SETUP
 #endif
 
+unsigned int boot_cpu_id __read_mostly;
+
+#ifdef CONFIG_X86_64
+int default_cpu_present_to_apicid(int mps_cpu)
+{
+       return __default_cpu_present_to_apicid(mps_cpu);
+}
+
+int default_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+       return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
+}
+#endif
+
 #ifndef CONFIG_DEBUG_BOOT_PARAMS
 struct boot_params __initdata boot_params;
 #else
@@ -588,10 +602,9 @@ early_param("elfcorehdr", setup_elfcorehdr);
 
 static int __init default_update_genapic(void)
 {
-#ifdef CONFIG_X86_SMP
-# if defined(CONFIG_X86_GENERICARCH) || defined(CONFIG_X86_64)
-       genapic->wakeup_cpu = wakeup_secondary_cpu_via_init;
-# endif
+#ifdef CONFIG_SMP
+       if (!apic->wakeup_cpu)
+               apic->wakeup_cpu = wakeup_secondary_cpu_via_init;
 #endif
 
        return 0;
@@ -892,12 +905,11 @@ void __init setup_arch(char **cmdline_p)
         */
        acpi_reserve_bootmem();
 #endif
-#ifdef CONFIG_X86_FIND_SMP_CONFIG
        /*
         * Find and reserve possible boot-time SMP configuration:
         */
        find_smp_config();
-#endif
+
        reserve_crashkernel();
 
 #ifdef CONFIG_X86_64
@@ -924,9 +936,7 @@ void __init setup_arch(char **cmdline_p)
        map_vsyscall();
 #endif
 
-#ifdef CONFIG_X86_GENERICARCH
        generic_apic_probe();
-#endif
 
        early_quirks();
 
index 0116107..d992e6c 100644 (file)
 #include <asm/mpspec.h>
 #include <asm/apicdef.h>
 #include <asm/highmem.h>
+#include <asm/proto.h>
+#include <asm/cpumask.h>
+#include <asm/cpu.h>
+#include <asm/stackprotector.h>
 
-#ifdef CONFIG_X86_LOCAL_APIC
-unsigned int num_processors;
-unsigned disabled_cpus __cpuinitdata;
-/* Processor that is doing the boot up */
-unsigned int boot_cpu_physical_apicid = -1U;
-EXPORT_SYMBOL(boot_cpu_physical_apicid);
-unsigned int max_physical_apicid;
-
-/* Bitmask of physically existing CPUs */
-physid_mask_t phys_cpu_present_map;
-#endif
-
-/* map cpu index to physical APIC ID */
-DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
-DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
-EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
-EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
-
-#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
-#define        X86_64_NUMA     1
-
-/* map cpu index to node index */
-DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
-EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
-
-/* which logical CPUs are on which nodes */
-cpumask_t *node_to_cpumask_map;
-EXPORT_SYMBOL(node_to_cpumask_map);
-
-/* setup node_to_cpumask_map */
-static void __init setup_node_to_cpumask_map(void);
-
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+# define DBG(x...) printk(KERN_DEBUG x)
 #else
-static inline void setup_node_to_cpumask_map(void) { }
+# define DBG(x...)
 #endif
 
-#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
-/*
- * Copy data used in early init routines from the initial arrays to the
- * per cpu data areas.  These arrays then become expendable and the
- * *_early_ptr's are zeroed indicating that the static arrays are gone.
- */
-static void __init setup_per_cpu_maps(void)
-{
-       int cpu;
+DEFINE_PER_CPU(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
 
-       for_each_possible_cpu(cpu) {
-               per_cpu(x86_cpu_to_apicid, cpu) =
-                               early_per_cpu_map(x86_cpu_to_apicid, cpu);
-               per_cpu(x86_bios_cpu_apicid, cpu) =
-                               early_per_cpu_map(x86_bios_cpu_apicid, cpu);
-#ifdef X86_64_NUMA
-               per_cpu(x86_cpu_to_node_map, cpu) =
-                               early_per_cpu_map(x86_cpu_to_node_map, cpu);
+#ifdef CONFIG_X86_64
+#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
+#else
+#define BOOT_PERCPU_OFFSET 0
 #endif
-       }
 
-       /* indicate the early static arrays will soon be gone */
-       early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
-       early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
-#ifdef X86_64_NUMA
-       early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
-#endif
-}
+DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
+EXPORT_PER_CPU_SYMBOL(this_cpu_off);
 
-#ifdef CONFIG_X86_32
-/*
- * Great future not-so-futuristic plan: make i386 and x86_64 do it
- * the same way
- */
-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
+       [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
+};
 EXPORT_SYMBOL(__per_cpu_offset);
-static inline void setup_cpu_pda_map(void) { }
-
-#elif !defined(CONFIG_SMP)
-static inline void setup_cpu_pda_map(void) { }
-
-#else /* CONFIG_SMP && CONFIG_X86_64 */
-
-/*
- * Allocate cpu_pda pointer table and array via alloc_bootmem.
- */
-static void __init setup_cpu_pda_map(void)
-{
-       char *pda;
-       struct x8664_pda **new_cpu_pda;
-       unsigned long size;
-       int cpu;
-
-       size = roundup(sizeof(struct x8664_pda), cache_line_size());
-
-       /* allocate cpu_pda array and pointer table */
-       {
-               unsigned long tsize = nr_cpu_ids * sizeof(void *);
-               unsigned long asize = size * (nr_cpu_ids - 1);
 
-               tsize = roundup(tsize, cache_line_size());
-               new_cpu_pda = alloc_bootmem(tsize + asize);
-               pda = (char *)new_cpu_pda + tsize;
-       }
-
-       /* initialize pointer table to static pda's */
-       for_each_possible_cpu(cpu) {
-               if (cpu == 0) {
-                       /* leave boot cpu pda in place */
-                       new_cpu_pda[0] = cpu_pda(0);
-                       continue;
-               }
-               new_cpu_pda[cpu] = (struct x8664_pda *)pda;
-               new_cpu_pda[cpu]->in_bootmem = 1;
-               pda += size;
-       }
-
-       /* point to new pointer table */
-       _cpu_pda = new_cpu_pda;
-}
-
-#endif /* CONFIG_SMP && CONFIG_X86_64 */
-
-#ifdef CONFIG_X86_64
-
-/* correctly size the local cpu masks */
-static void __init setup_cpu_local_masks(void)
+static inline void setup_percpu_segment(int cpu)
 {
-       alloc_bootmem_cpumask_var(&cpu_initialized_mask);
-       alloc_bootmem_cpumask_var(&cpu_callin_mask);
-       alloc_bootmem_cpumask_var(&cpu_callout_mask);
-       alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
-}
-
-#else /* CONFIG_X86_32 */
+#ifdef CONFIG_X86_32
+       struct desc_struct gdt;
 
-static inline void setup_cpu_local_masks(void)
-{
+       pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
+                       0x2 | DESCTYPE_S, 0x8);
+       gdt.s = 1;
+       write_gdt_entry(get_cpu_gdt_table(cpu),
+                       GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
+#endif
 }
 
-#endif /* CONFIG_X86_32 */
-
 /*
  * Great future plan:
  * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
@@ -159,18 +61,12 @@ static inline void setup_cpu_local_masks(void)
  */
 void __init setup_per_cpu_areas(void)
 {
-       ssize_t size, old_size;
+       ssize_t size;
        char *ptr;
        int cpu;
-       unsigned long align = 1;
-
-       /* Setup cpu_pda map */
-       setup_cpu_pda_map();
 
        /* Copy section for each CPU (we discard the original) */
-       old_size = PERCPU_ENOUGH_ROOM;
-       align = max_t(unsigned long, PAGE_SIZE, align);
-       size = roundup(old_size, align);
+       size = roundup(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
 
        pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
                NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
@@ -179,30 +75,68 @@ void __init setup_per_cpu_areas(void)
 
        for_each_possible_cpu(cpu) {
 #ifndef CONFIG_NEED_MULTIPLE_NODES
-               ptr = __alloc_bootmem(size, align,
-                                __pa(MAX_DMA_ADDRESS));
+               ptr = alloc_bootmem_pages(size);
 #else
                int node = early_cpu_to_node(cpu);
                if (!node_online(node) || !NODE_DATA(node)) {
-                       ptr = __alloc_bootmem(size, align,
-                                        __pa(MAX_DMA_ADDRESS));
+                       ptr = alloc_bootmem_pages(size);
                        pr_info("cpu %d has no node %d or node-local memory\n",
                                cpu, node);
                        pr_debug("per cpu data for cpu%d at %016lx\n",
                                 cpu, __pa(ptr));
                } else {
-                       ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
-                                                       __pa(MAX_DMA_ADDRESS));
+                       ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
                        pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
                                cpu, node, __pa(ptr));
                }
 #endif
+
+               memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
                per_cpu_offset(cpu) = ptr - __per_cpu_start;
-               memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+               per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
+               per_cpu(cpu_number, cpu) = cpu;
+               setup_percpu_segment(cpu);
+               setup_stack_canary_segment(cpu);
+               /*
+                * Copy data used in early init routines from the
+                * initial arrays to the per cpu data areas.  These
+                * arrays then become expendable and the *_early_ptr's
+                * are zeroed indicating that the static arrays are
+                * gone.
+                */
+#ifdef CONFIG_X86_LOCAL_APIC
+               per_cpu(x86_cpu_to_apicid, cpu) =
+                       early_per_cpu_map(x86_cpu_to_apicid, cpu);
+               per_cpu(x86_bios_cpu_apicid, cpu) =
+                       early_per_cpu_map(x86_bios_cpu_apicid, cpu);
+#endif
+#ifdef CONFIG_X86_64
+               per_cpu(irq_stack_ptr, cpu) =
+                       per_cpu(irq_stack_union.irq_stack, cpu) +
+                       IRQ_STACK_SIZE - 64;
+#ifdef CONFIG_NUMA
+               per_cpu(x86_cpu_to_node_map, cpu) =
+                       early_per_cpu_map(x86_cpu_to_node_map, cpu);
+#endif
+#endif
+               /*
+                * Up to this point, the boot CPU has been using .data.init
+                * area.  Reload any changed state for the boot CPU.
+                */
+               if (cpu == boot_cpu_id)
+                       switch_to_new_gdt(cpu);
+
+               DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
        }
 
-       /* Setup percpu data maps */
-       setup_per_cpu_maps();
+       /* indicate the early static arrays will soon be gone */
+#ifdef CONFIG_X86_LOCAL_APIC
+       early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
+       early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
+#endif
+#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
+       early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
+#endif
 
        /* Setup node to cpumask map */
        setup_node_to_cpumask_map();
@@ -210,199 +144,3 @@ void __init setup_per_cpu_areas(void)
        /* Setup cpu initialized, callin, callout masks */
        setup_cpu_local_masks();
 }
-
-#endif
-
-#ifdef X86_64_NUMA
-
-/*
- * Allocate node_to_cpumask_map based on number of available nodes
- * Requires node_possible_map to be valid.
- *
- * Note: node_to_cpumask() is not valid until after this is done.
- */
-static void __init setup_node_to_cpumask_map(void)
-{
-       unsigned int node, num = 0;
-       cpumask_t *map;
-
-       /* setup nr_node_ids if not done yet */
-       if (nr_node_ids == MAX_NUMNODES) {
-               for_each_node_mask(node, node_possible_map)
-                       num = node;
-               nr_node_ids = num + 1;
-       }
-
-       /* allocate the map */
-       map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
-
-       pr_debug("Node to cpumask map at %p for %d nodes\n",
-                map, nr_node_ids);
-
-       /* node_to_cpumask() will now work */
-       node_to_cpumask_map = map;
-}
-
-void __cpuinit numa_set_node(int cpu, int node)
-{
-       int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
-
-       if (cpu_pda(cpu) && node != NUMA_NO_NODE)
-               cpu_pda(cpu)->nodenumber = node;
-
-       if (cpu_to_node_map)
-               cpu_to_node_map[cpu] = node;
-
-       else if (per_cpu_offset(cpu))
-               per_cpu(x86_cpu_to_node_map, cpu) = node;
-
-       else
-               pr_debug("Setting node for non-present cpu %d\n", cpu);
-}
-
-void __cpuinit numa_clear_node(int cpu)
-{
-       numa_set_node(cpu, NUMA_NO_NODE);
-}
-
-#ifndef CONFIG_DEBUG_PER_CPU_MAPS
-
-void __cpuinit numa_add_cpu(int cpu)
-{
-       cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
-}
-
-void __cpuinit numa_remove_cpu(int cpu)
-{
-       cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
-}
-
-#else /* CONFIG_DEBUG_PER_CPU_MAPS */
-
-/*
- * --------- debug versions of the numa functions ---------
- */
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
-{
-       int node = cpu_to_node(cpu);
-       cpumask_t *mask;
-       char buf[64];
-
-       if (node_to_cpumask_map == NULL) {
-               printk(KERN_ERR "node_to_cpumask_map NULL\n");
-               dump_stack();
-               return;
-       }
-
-       mask = &node_to_cpumask_map[node];
-       if (enable)
-               cpu_set(cpu, *mask);
-       else
-               cpu_clear(cpu, *mask);
-
-       cpulist_scnprintf(buf, sizeof(buf), mask);
-       printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
-               enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
-}
-
-void __cpuinit numa_add_cpu(int cpu)
-{
-       numa_set_cpumask(cpu, 1);
-}
-
-void __cpuinit numa_remove_cpu(int cpu)
-{
-       numa_set_cpumask(cpu, 0);
-}
-
-int cpu_to_node(int cpu)
-{
-       if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
-               printk(KERN_WARNING
-                       "cpu_to_node(%d): usage too early!\n", cpu);
-               dump_stack();
-               return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
-       }
-       return per_cpu(x86_cpu_to_node_map, cpu);
-}
-EXPORT_SYMBOL(cpu_to_node);
-
-/*
- * Same function as cpu_to_node() but used if called before the
- * per_cpu areas are setup.
- */
-int early_cpu_to_node(int cpu)
-{
-       if (early_per_cpu_ptr(x86_cpu_to_node_map))
-               return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
-
-       if (!per_cpu_offset(cpu)) {
-               printk(KERN_WARNING
-                       "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
-               dump_stack();
-               return NUMA_NO_NODE;
-       }
-       return per_cpu(x86_cpu_to_node_map, cpu);
-}
-
-
-/* empty cpumask */
-static const cpumask_t cpu_mask_none;
-
-/*
- * Returns a pointer to the bitmask of CPUs on Node 'node'.
- */
-const cpumask_t *cpumask_of_node(int node)
-{
-       if (node_to_cpumask_map == NULL) {
-               printk(KERN_WARNING
-                       "cpumask_of_node(%d): no node_to_cpumask_map!\n",
-                       node);
-               dump_stack();
-               return (const cpumask_t *)&cpu_online_map;
-       }
-       if (node >= nr_node_ids) {
-               printk(KERN_WARNING
-                       "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
-                       node, nr_node_ids);
-               dump_stack();
-               return &cpu_mask_none;
-       }
-       return &node_to_cpumask_map[node];
-}
-EXPORT_SYMBOL(cpumask_of_node);
-
-/*
- * Returns a bitmask of CPUs on Node 'node'.
- *
- * Side note: this function creates the returned cpumask on the stack
- * so with a high NR_CPUS count, excessive stack space is used.  The
- * node_to_cpumask_ptr function should be used whenever possible.
- */
-cpumask_t node_to_cpumask(int node)
-{
-       if (node_to_cpumask_map == NULL) {
-               printk(KERN_WARNING
-                       "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
-               dump_stack();
-               return cpu_online_map;
-       }
-       if (node >= nr_node_ids) {
-               printk(KERN_WARNING
-                       "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
-                       node, nr_node_ids);
-               dump_stack();
-               return cpu_mask_none;
-       }
-       return node_to_cpumask_map[node];
-}
-EXPORT_SYMBOL(node_to_cpumask);
-
-/*
- * --------- end of debug versions of the numa functions ---------
- */
-
-#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
-
-#endif /* X86_64_NUMA */
-
index df0587f..7cdcd16 100644 (file)
 # define FIX_EFLAGS    __FIX_EFLAGS
 #endif
 
-#define COPY(x)                        {               \
-       err |= __get_user(regs->x, &sc->x);     \
-}
+#define COPY(x)                        do {                    \
+       get_user_ex(regs->x, &sc->x);                   \
+} while (0)
 
-#define COPY_SEG(seg)          {                       \
-               unsigned short tmp;                     \
-               err |= __get_user(tmp, &sc->seg);       \
-               regs->seg = tmp;                        \
-}
+#define GET_SEG(seg)           ({                      \
+       unsigned short tmp;                             \
+       get_user_ex(tmp, &sc->seg);                     \
+       tmp;                                            \
+})
 
-#define COPY_SEG_CPL3(seg)     {                       \
-               unsigned short tmp;                     \
-               err |= __get_user(tmp, &sc->seg);       \
-               regs->seg = tmp | 3;                    \
-}
+#define COPY_SEG(seg)          do {                    \
+       regs->seg = GET_SEG(seg);                       \
+} while (0)
 
-#define GET_SEG(seg)           {                       \
-               unsigned short tmp;                     \
-               err |= __get_user(tmp, &sc->seg);       \
-               loadsegment(seg, tmp);                  \
-}
+#define COPY_SEG_CPL3(seg)     do {                    \
+       regs->seg = GET_SEG(seg) | 3;                   \
+} while (0)
 
 static int
 restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
@@ -83,45 +79,49 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
        /* Always make any pending restarted system calls return -EINTR */
        current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
+       get_user_try {
+
 #ifdef CONFIG_X86_32
-       GET_SEG(gs);
-       COPY_SEG(fs);
-       COPY_SEG(es);
-       COPY_SEG(ds);
+               set_user_gs(regs, GET_SEG(gs));
+               COPY_SEG(fs);
+               COPY_SEG(es);
+               COPY_SEG(ds);
 #endif /* CONFIG_X86_32 */
 
-       COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
-       COPY(dx); COPY(cx); COPY(ip);
+               COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
+               COPY(dx); COPY(cx); COPY(ip);
 
 #ifdef CONFIG_X86_64
-       COPY(r8);
-       COPY(r9);
-       COPY(r10);
-       COPY(r11);
-       COPY(r12);
-       COPY(r13);
-       COPY(r14);
-       COPY(r15);
+               COPY(r8);
+               COPY(r9);
+               COPY(r10);
+               COPY(r11);
+               COPY(r12);
+               COPY(r13);
+               COPY(r14);
+               COPY(r15);
 #endif /* CONFIG_X86_64 */
 
 #ifdef CONFIG_X86_32
-       COPY_SEG_CPL3(cs);
-       COPY_SEG_CPL3(ss);
+               COPY_SEG_CPL3(cs);
+               COPY_SEG_CPL3(ss);
 #else /* !CONFIG_X86_32 */
-       /* Kernel saves and restores only the CS segment register on signals,
-        * which is the bare minimum needed to allow mixed 32/64-bit code.
-        * App's signal handler can save/restore other segments if needed. */
-       COPY_SEG_CPL3(cs);
+               /* Kernel saves and restores only the CS segment register on signals,
+                * which is the bare minimum needed to allow mixed 32/64-bit code.
+                * App's signal handler can save/restore other segments if needed. */
+               COPY_SEG_CPL3(cs);
 #endif /* CONFIG_X86_32 */
 
-       err |= __get_user(tmpflags, &sc->flags);
-       regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
-       regs->orig_ax = -1;             /* disable syscall checks */
+               get_user_ex(tmpflags, &sc->flags);
+               regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
+               regs->orig_ax = -1;             /* disable syscall checks */
+
+               get_user_ex(buf, &sc->fpstate);
+               err |= restore_i387_xstate(buf);
 
-       err |= __get_user(buf, &sc->fpstate);
-       err |= restore_i387_xstate(buf);
+               get_user_ex(*pax, &sc->ax);
+       } get_user_catch(err);
 
-       err |= __get_user(*pax, &sc->ax);
        return err;
 }
 
@@ -131,57 +131,55 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
 {
        int err = 0;
 
-#ifdef CONFIG_X86_32
-       {
-               unsigned int tmp;
+       put_user_try {
 
-               savesegment(gs, tmp);
-               err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
-       }
-       err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs);
-       err |= __put_user(regs->es, (unsigned int __user *)&sc->es);
-       err |= __put_user(regs->ds, (unsigned int __user *)&sc->ds);
+#ifdef CONFIG_X86_32
+               put_user_ex(get_user_gs(regs), (unsigned int __user *)&sc->gs);
+               put_user_ex(regs->fs, (unsigned int __user *)&sc->fs);
+               put_user_ex(regs->es, (unsigned int __user *)&sc->es);
+               put_user_ex(regs->ds, (unsigned int __user *)&sc->ds);
 #endif /* CONFIG_X86_32 */
 
-       err |= __put_user(regs->di, &sc->di);
-       err |= __put_user(regs->si, &sc->si);
-       err |= __put_user(regs->bp, &sc->bp);
-       err |= __put_user(regs->sp, &sc->sp);
-       err |= __put_user(regs->bx, &sc->bx);
-       err |= __put_user(regs->dx, &sc->dx);
-       err |= __put_user(regs->cx, &sc->cx);
-       err |= __put_user(regs->ax, &sc->ax);
+               put_user_ex(regs->di, &sc->di);
+               put_user_ex(regs->si, &sc->si);
+               put_user_ex(regs->bp, &sc->bp);
+               put_user_ex(regs->sp, &sc->sp);
+               put_user_ex(regs->bx, &sc->bx);
+               put_user_ex(regs->dx, &sc->dx);
+               put_user_ex(regs->cx, &sc->cx);
+               put_user_ex(regs->ax, &sc->ax);
 #ifdef CONFIG_X86_64
-       err |= __put_user(regs->r8, &sc->r8);
-       err |= __put_user(regs->r9, &sc->r9);
-       err |= __put_user(regs->r10, &sc->r10);
-       err |= __put_user(regs->r11, &sc->r11);
-       err |= __put_user(regs->r12, &sc->r12);
-       err |= __put_user(regs->r13, &sc->r13);
-       err |= __put_user(regs->r14, &sc->r14);
-       err |= __put_user(regs->r15, &sc->r15);
+               put_user_ex(regs->r8, &sc->r8);
+               put_user_ex(regs->r9, &sc->r9);
+               put_user_ex(regs->r10, &sc->r10);
+               put_user_ex(regs->r11, &sc->r11);
+               put_user_ex(regs->r12, &sc->r12);
+               put_user_ex(regs->r13, &sc->r13);
+               put_user_ex(regs->r14, &sc->r14);
+               put_user_ex(regs->r15, &sc->r15);
 #endif /* CONFIG_X86_64 */
 
-       err |= __put_user(current->thread.trap_no, &sc->trapno);
-       err |= __put_user(current->thread.error_code, &sc->err);
-       err |= __put_user(regs->ip, &sc->ip);
+               put_user_ex(current->thread.trap_no, &sc->trapno);
+               put_user_ex(current->thread.error_code, &sc->err);
+               put_user_ex(regs->ip, &sc->ip);
 #ifdef CONFIG_X86_32
-       err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
-       err |= __put_user(regs->flags, &sc->flags);
-       err |= __put_user(regs->sp, &sc->sp_at_signal);
-       err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
+               put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
+               put_user_ex(regs->flags, &sc->flags);
+               put_user_ex(regs->sp, &sc->sp_at_signal);
+               put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
 #else /* !CONFIG_X86_32 */
-       err |= __put_user(regs->flags, &sc->flags);
-       err |= __put_user(regs->cs, &sc->cs);
-       err |= __put_user(0, &sc->gs);
-       err |= __put_user(0, &sc->fs);
+               put_user_ex(regs->flags, &sc->flags);
+               put_user_ex(regs->cs, &sc->cs);
+               put_user_ex(0, &sc->gs);
+               put_user_ex(0, &sc->fs);
 #endif /* CONFIG_X86_32 */
 
-       err |= __put_user(fpstate, &sc->fpstate);
+               put_user_ex(fpstate, &sc->fpstate);
 
-       /* non-iBCS2 extensions.. */
-       err |= __put_user(mask, &sc->oldmask);
-       err |= __put_user(current->thread.cr2, &sc->cr2);
+               /* non-iBCS2 extensions.. */
+               put_user_ex(mask, &sc->oldmask);
+               put_user_ex(current->thread.cr2, &sc->cr2);
+       } put_user_catch(err);
 
        return err;
 }
@@ -336,43 +334,41 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
                return -EFAULT;
 
-       err |= __put_user(sig, &frame->sig);
-       err |= __put_user(&frame->info, &frame->pinfo);
-       err |= __put_user(&frame->uc, &frame->puc);
-       err |= copy_siginfo_to_user(&frame->info, info);
-       if (err)
-               return -EFAULT;
-
-       /* Create the ucontext.  */
-       if (cpu_has_xsave)
-               err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
-       else
-               err |= __put_user(0, &frame->uc.uc_flags);
-       err |= __put_user(0, &frame->uc.uc_link);
-       err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
-       err |= __put_user(sas_ss_flags(regs->sp),
-                         &frame->uc.uc_stack.ss_flags);
-       err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
-       err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
-                               regs, set->sig[0]);
-       err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
-       if (err)
-               return -EFAULT;
+       put_user_try {
+               put_user_ex(sig, &frame->sig);
+               put_user_ex(&frame->info, &frame->pinfo);
+               put_user_ex(&frame->uc, &frame->puc);
+               err |= copy_siginfo_to_user(&frame->info, info);
 
-       /* Set up to return from userspace.  */
-       restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
-       if (ka->sa.sa_flags & SA_RESTORER)
-               restorer = ka->sa.sa_restorer;
-       err |= __put_user(restorer, &frame->pretcode);
+               /* Create the ucontext.  */
+               if (cpu_has_xsave)
+                       put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
+               else
+                       put_user_ex(0, &frame->uc.uc_flags);
+               put_user_ex(0, &frame->uc.uc_link);
+               put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+               put_user_ex(sas_ss_flags(regs->sp),
+                           &frame->uc.uc_stack.ss_flags);
+               put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+               err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
+                                       regs, set->sig[0]);
+               err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+               /* Set up to return from userspace.  */
+               restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
+               if (ka->sa.sa_flags & SA_RESTORER)
+                       restorer = ka->sa.sa_restorer;
+               put_user_ex(restorer, &frame->pretcode);
 
-       /*
-        * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
-        *
-        * WE DO NOT USE IT ANY MORE! It's only left here for historical
-        * reasons and because gdb uses it as a signature to notice
-        * signal handler stack frames.
-        */
-       err |= __put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
+               /*
+                * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
+                *
+                * WE DO NOT USE IT ANY MORE! It's only left here for historical
+                * reasons and because gdb uses it as a signature to notice
+                * signal handler stack frames.
+                */
+               put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
+       } put_user_catch(err);
 
        if (err)
                return -EFAULT;
@@ -436,28 +432,30 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                        return -EFAULT;
        }
 
-       /* Create the ucontext.  */
-       if (cpu_has_xsave)
-               err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
-       else
-               err |= __put_user(0, &frame->uc.uc_flags);
-       err |= __put_user(0, &frame->uc.uc_link);
-       err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
-       err |= __put_user(sas_ss_flags(regs->sp),
-                         &frame->uc.uc_stack.ss_flags);
-       err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
-       err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
-       err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
-
-       /* Set up to return from userspace.  If provided, use a stub
-          already in userspace.  */
-       /* x86-64 should always use SA_RESTORER. */
-       if (ka->sa.sa_flags & SA_RESTORER) {
-               err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
-       } else {
-               /* could use a vstub here */
-               return -EFAULT;
-       }
+       put_user_try {
+               /* Create the ucontext.  */
+               if (cpu_has_xsave)
+                       put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
+               else
+                       put_user_ex(0, &frame->uc.uc_flags);
+               put_user_ex(0, &frame->uc.uc_link);
+               put_user_ex(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+               put_user_ex(sas_ss_flags(regs->sp),
+                           &frame->uc.uc_stack.ss_flags);
+               put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
+               err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
+               err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+               /* Set up to return from userspace.  If provided, use a stub
+                  already in userspace.  */
+               /* x86-64 should always use SA_RESTORER. */
+               if (ka->sa.sa_flags & SA_RESTORER) {
+                       put_user_ex(ka->sa.sa_restorer, &frame->pretcode);
+               } else {
+                       /* could use a vstub here */
+                       err |= -EFAULT;
+               }
+       } put_user_catch(err);
 
        if (err)
                return -EFAULT;
@@ -509,31 +507,41 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
              struct old_sigaction __user *oact)
 {
        struct k_sigaction new_ka, old_ka;
-       int ret;
+       int ret = 0;
 
        if (act) {
                old_sigset_t mask;
 
-               if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
-                   __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+               if (!access_ok(VERIFY_READ, act, sizeof(*act)))
                        return -EFAULT;
 
-               __get_user(new_ka.sa.sa_flags, &act->sa_flags);
-               __get_user(mask, &act->sa_mask);
+               get_user_try {
+                       get_user_ex(new_ka.sa.sa_handler, &act->sa_handler);
+                       get_user_ex(new_ka.sa.sa_flags, &act->sa_flags);
+                       get_user_ex(mask, &act->sa_mask);
+                       get_user_ex(new_ka.sa.sa_restorer, &act->sa_restorer);
+               } get_user_catch(ret);
+
+               if (ret)
+                       return -EFAULT;
                siginitset(&new_ka.sa.sa_mask, mask);
        }
 
        ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
 
        if (!ret && oact) {
-               if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
-                   __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+               if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
                        return -EFAULT;
 
-               __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+               put_user_try {
+                       put_user_ex(old_ka.sa.sa_handler, &oact->sa_handler);
+                       put_user_ex(old_ka.sa.sa_flags, &oact->sa_flags);
+                       put_user_ex(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+                       put_user_ex(old_ka.sa.sa_restorer, &oact->sa_restorer);
+               } put_user_catch(ret);
+
+               if (ret)
+                       return -EFAULT;
        }
 
        return ret;
@@ -541,14 +549,9 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
 #endif /* CONFIG_X86_32 */
 
 #ifdef CONFIG_X86_32
-asmlinkage int sys_sigaltstack(unsigned long bx)
+int sys_sigaltstack(struct pt_regs *regs)
 {
-       /*
-        * This is needed to make gcc realize it doesn't own the
-        * "struct pt_regs"
-        */
-       struct pt_regs *regs = (struct pt_regs *)&bx;
-       const stack_t __user *uss = (const stack_t __user *)bx;
+       const stack_t __user *uss = (const stack_t __user *)regs->bx;
        stack_t __user *uoss = (stack_t __user *)regs->cx;
 
        return do_sigaltstack(uss, uoss, regs->sp);
@@ -566,14 +569,12 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
  * Do a signal return; undo the signal stack.
  */
 #ifdef CONFIG_X86_32
-asmlinkage unsigned long sys_sigreturn(unsigned long __unused)
+unsigned long sys_sigreturn(struct pt_regs *regs)
 {
        struct sigframe __user *frame;
-       struct pt_regs *regs;
        unsigned long ax;
        sigset_t set;
 
-       regs = (struct pt_regs *) &__unused;
        frame = (struct sigframe __user *)(regs->sp - 8);
 
        if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@@ -600,7 +601,7 @@ badframe:
 }
 #endif /* CONFIG_X86_32 */
 
-static long do_rt_sigreturn(struct pt_regs *regs)
+long sys_rt_sigreturn(struct pt_regs *regs)
 {
        struct rt_sigframe __user *frame;
        unsigned long ax;
@@ -631,25 +632,6 @@ badframe:
        return 0;
 }
 
-#ifdef CONFIG_X86_32
-/*
- * Note: do not pass in pt_regs directly as with tail-call optimization
- * GCC will incorrectly stomp on the caller's frame and corrupt user-space
- * register state:
- */
-asmlinkage int sys_rt_sigreturn(unsigned long __unused)
-{
-       struct pt_regs *regs = (struct pt_regs *)&__unused;
-
-       return do_rt_sigreturn(regs);
-}
-#else /* !CONFIG_X86_32 */
-asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
-{
-       return do_rt_sigreturn(regs);
-}
-#endif /* CONFIG_X86_32 */
-
 /*
  * OK, we're invoking a handler:
  */
index e6faa33..eaaffae 100644 (file)
@@ -2,7 +2,7 @@
  *     Intel SMP support routines.
  *
  *     (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
- *     (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
+ *     (c) 1998-99, 2000, 2009 Ingo Molnar <mingo@redhat.com>
  *      (c) 2002,2003 Andi Kleen, SuSE Labs.
  *
  *     i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com>
@@ -26,8 +26,7 @@
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
 #include <asm/proto.h>
-#include <mach_ipi.h>
-#include <mach_apic.h>
+#include <asm/genapic.h>
 /*
  *     Some notes on x86 processor bugs affecting SMP operation:
  *
@@ -118,12 +117,12 @@ static void native_smp_send_reschedule(int cpu)
                WARN_ON(1);
                return;
        }
-       send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
+       apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
 }
 
 void native_send_call_func_single_ipi(int cpu)
 {
-       send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
+       apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
 }
 
 void native_send_call_func_ipi(const struct cpumask *mask)
@@ -131,7 +130,7 @@ void native_send_call_func_ipi(const struct cpumask *mask)
        cpumask_var_t allbutself;
 
        if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
-               send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
+               apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
                return;
        }
 
@@ -140,9 +139,9 @@ void native_send_call_func_ipi(const struct cpumask *mask)
 
        if (cpumask_equal(mask, allbutself) &&
            cpumask_equal(cpu_online_mask, cpu_callout_mask))
-               send_IPI_allbutself(CALL_FUNCTION_VECTOR);
+               apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
        else
-               send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
+               apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
 
        free_cpumask_var(allbutself);
 }
index bb1a3b1..af57f88 100644 (file)
@@ -2,7 +2,7 @@
  *     x86 SMP booting functions
  *
  *     (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
- *     (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ *     (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
  *     Copyright 2001 Andi Kleen, SuSE Labs.
  *
  *     Much of the core SMP work is based on previous work by Thomas Radke, to
@@ -53,7 +53,6 @@
 #include <asm/nmi.h>
 #include <asm/irq.h>
 #include <asm/idle.h>
-#include <asm/smp.h>
 #include <asm/trampoline.h>
 #include <asm/cpu.h>
 #include <asm/numa.h>
 #include <asm/vmi.h>
 #include <asm/genapic.h>
 #include <asm/setup.h>
+#include <asm/uv/uv.h>
 #include <linux/mc146818rtc.h>
 
-#include <mach_apic.h>
-#include <mach_wakecpu.h>
-#include <smpboot_hooks.h>
+#include <asm/genapic.h>
+#include <asm/smpboot_hooks.h>
 
 #ifdef CONFIG_X86_32
 u8 apicid_2_node[MAX_APICID];
@@ -163,7 +162,7 @@ static void map_cpu_to_logical_apicid(void)
 {
        int cpu = smp_processor_id();
        int apicid = logical_smp_processor_id();
-       int node = apicid_to_node(apicid);
+       int node = apic->apicid_to_node(apicid);
 
        if (!node_online(node))
                node = first_online_node;
@@ -196,7 +195,8 @@ static void __cpuinit smp_callin(void)
         * our local APIC.  We have to wait for the IPI or we'll
         * lock up on an APIC access.
         */
-       wait_for_init_deassert(&init_deasserted);
+       if (apic->wait_for_init_deassert)
+               apic->wait_for_init_deassert(&init_deasserted);
 
        /*
         * (This works even if the APIC is not enabled.)
@@ -243,7 +243,8 @@ static void __cpuinit smp_callin(void)
         */
 
        pr_debug("CALLIN, before setup_local_APIC().\n");
-       smp_callin_clear_local_apic();
+       if (apic->smp_callin_clear_local_apic)
+               apic->smp_callin_clear_local_apic();
        setup_local_APIC();
        end_local_APIC_setup();
        map_cpu_to_logical_apicid();
@@ -583,7 +584,7 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
        /* Target chip */
        /* Boot on the stack */
        /* Kick the second */
-       apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid);
+       apic_icr_write(APIC_DM_NMI | apic->dest_logical, logical_apicid);
 
        pr_debug("Waiting for send to finish...\n");
        send_status = safe_apic_wait_icr_idle();
@@ -745,57 +746,11 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
        complete(&c_idle->done);
 }
 
-#ifdef CONFIG_X86_64
-
-/* __ref because it's safe to call free_bootmem when after_bootmem == 0. */
-static void __ref free_bootmem_pda(struct x8664_pda *oldpda)
-{
-       if (!after_bootmem)
-               free_bootmem((unsigned long)oldpda, sizeof(*oldpda));
-}
-
-/*
- * Allocate node local memory for the AP pda.
- *
- * Must be called after the _cpu_pda pointer table is initialized.
- */
-int __cpuinit get_local_pda(int cpu)
-{
-       struct x8664_pda *oldpda, *newpda;
-       unsigned long size = sizeof(struct x8664_pda);
-       int node = cpu_to_node(cpu);
-
-       if (cpu_pda(cpu) && !cpu_pda(cpu)->in_bootmem)
-               return 0;
-
-       oldpda = cpu_pda(cpu);
-       newpda = kmalloc_node(size, GFP_ATOMIC, node);
-       if (!newpda) {
-               printk(KERN_ERR "Could not allocate node local PDA "
-                       "for CPU %d on node %d\n", cpu, node);
-
-               if (oldpda)
-                       return 0;       /* have a usable pda */
-               else
-                       return -1;
-       }
-
-       if (oldpda) {
-               memcpy(newpda, oldpda, size);
-               free_bootmem_pda(oldpda);
-       }
-
-       newpda->in_bootmem = 0;
-       cpu_pda(cpu) = newpda;
-       return 0;
-}
-#endif /* CONFIG_X86_64 */
-
 static int __cpuinit do_boot_cpu(int apicid, int cpu)
 /*
  * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
  * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
- * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
+ * Returns zero if CPU booted OK, else error code from ->wakeup_cpu.
  */
 {
        unsigned long boot_error = 0;
@@ -808,16 +763,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
        };
        INIT_WORK(&c_idle.work, do_fork_idle);
 
-#ifdef CONFIG_X86_64
-       /* Allocate node local memory for AP pdas */
-       if (cpu > 0) {
-               boot_error = get_local_pda(cpu);
-               if (boot_error)
-                       goto restore_state;
-                       /* if can't get pda memory, can't start cpu */
-       }
-#endif
-
        alternatives_smp_switch(1);
 
        c_idle.idle = get_idle_for_cpu(cpu);
@@ -847,14 +792,16 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
 
        set_idle_for_cpu(cpu, c_idle.idle);
 do_rest:
-#ifdef CONFIG_X86_32
        per_cpu(current_task, cpu) = c_idle.idle;
-       init_gdt(cpu);
+#ifdef CONFIG_X86_32
        /* Stack for startup_32 can be just as for start_secondary onwards */
        irq_ctx_init(cpu);
 #else
-       cpu_pda(cpu)->pcurrent = c_idle.idle;
        clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
+       initial_gs = per_cpu_offset(cpu);
+       per_cpu(kernel_stack, cpu) =
+               (unsigned long)task_stack_page(c_idle.idle) -
+               KERNEL_STACK_OFFSET + THREAD_SIZE;
 #endif
        early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
        initial_code = (unsigned long)start_secondary;
@@ -878,7 +825,8 @@ do_rest:
 
                pr_debug("Setting warm reset code and vector.\n");
 
-               store_NMI_vector(&nmi_high, &nmi_low);
+               if (apic->store_NMI_vector)
+                       apic->store_NMI_vector(&nmi_high, &nmi_low);
 
                smpboot_setup_warm_reset_vector(start_ip);
                /*
@@ -893,7 +841,7 @@ do_rest:
        /*
         * Starting actual IPI sequence...
         */
-       boot_error = wakeup_secondary_cpu(apicid, start_ip);
+       boot_error = apic->wakeup_cpu(apicid, start_ip);
 
        if (!boot_error) {
                /*
@@ -927,13 +875,11 @@ do_rest:
                        else
                                /* trampoline code not run */
                                printk(KERN_ERR "Not responding.\n");
-                       if (get_uv_system_type() != UV_NON_UNIQUE_APIC)
-                               inquire_remote_apic(apicid);
+                       if (apic->inquire_remote_apic)
+                               apic->inquire_remote_apic(apicid);
                }
        }
-#ifdef CONFIG_X86_64
-restore_state:
-#endif
+
        if (boot_error) {
                /* Try to put things back the way they were before ... */
                numa_remove_cpu(cpu); /* was set by numa_add_cpu */
@@ -961,7 +907,7 @@ restore_state:
 
 int __cpuinit native_cpu_up(unsigned int cpu)
 {
-       int apicid = cpu_present_to_apicid(cpu);
+       int apicid = apic->cpu_present_to_apicid(cpu);
        unsigned long flags;
        int err;
 
@@ -1054,14 +1000,14 @@ static int __init smp_sanity_check(unsigned max_cpus)
 {
        preempt_disable();
 
-#if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
+#if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
        if (def_to_bigsmp && nr_cpu_ids > 8) {
                unsigned int cpu;
                unsigned nr;
 
                printk(KERN_WARNING
                       "More than 8 CPUs detected - skipping them.\n"
-                      "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n");
+                      "Use CONFIG_X86_BIGSMP.\n");
 
                nr = 0;
                for_each_present_cpu(cpu) {
@@ -1107,7 +1053,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
         * Should not be necessary because the MP table should list the boot
         * CPU too, but we do it for the sake of robustness anyway.
         */
-       if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
+       if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
                printk(KERN_NOTICE
                        "weird, boot CPU (#%d) not listed by the BIOS.\n",
                        boot_cpu_physical_apicid);
@@ -1125,6 +1071,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
                printk(KERN_ERR "... forcing use of dummy APIC emulation."
                                "(tell your hw vendor)\n");
                smpboot_clear_io_apic();
+               arch_disable_smp_support();
                return -1;
        }
 
@@ -1183,7 +1130,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
 
 #ifdef CONFIG_X86_64
        enable_IR_x2apic();
-       setup_apic_routing();
+       default_setup_apic_routing();
 #endif
 
        if (smp_sanity_check(max_cpus) < 0) {
@@ -1218,7 +1165,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
 
        map_cpu_to_logical_apicid();
 
-       setup_portio_remap();
+       if (apic->setup_portio_remap)
+               apic->setup_portio_remap();
 
        smpboot_setup_io_apic();
        /*
@@ -1240,10 +1188,7 @@ out:
 void __init native_smp_prepare_boot_cpu(void)
 {
        int me = smp_processor_id();
-#ifdef CONFIG_X86_32
-       init_gdt(me);
-#endif
-       switch_to_new_gdt();
+       switch_to_new_gdt(me);
        /* already set me in cpu_online_mask in boot_cpu_init() */
        cpumask_set_cpu(me, cpu_callout_mask);
        per_cpu(cpu_state, me) = CPU_ONLINE;
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c
deleted file mode 100644 (file)
index 397e309..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * SMP stuff which is common to all sub-architectures.
- */
-#include <linux/module.h>
-#include <asm/smp.h>
-
-#ifdef CONFIG_X86_32
-DEFINE_PER_CPU(unsigned long, this_cpu_off);
-EXPORT_PER_CPU_SYMBOL(this_cpu_off);
-
-/*
- * Initialize the CPU's GDT.  This is either the boot CPU doing itself
- * (still using the master per-cpu area), or a CPU doing it for a
- * secondary which will soon come up.
- */
-__cpuinit void init_gdt(int cpu)
-{
-       struct desc_struct gdt;
-
-       pack_descriptor(&gdt, __per_cpu_offset[cpu], 0xFFFFF,
-                       0x2 | DESCTYPE_S, 0x8);
-       gdt.s = 1;
-
-       write_gdt_entry(get_cpu_gdt_table(cpu),
-                       GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
-
-       per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
-       per_cpu(cpu_number, cpu) = cpu;
-}
-#endif
index 10786af..f7bddc2 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Stack trace management functions
  *
- *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *  Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  */
 #include <linux/sched.h>
 #include <linux/stacktrace.h>
index 7b98785..1e733ef 100644 (file)
 #include <linux/init.h>
 #include <asm/io.h>
 #include <asm/bios_ebda.h>
-#include <asm/summit/mpparse.h>
 
+/*
+ * APIC driver for the IBM "Summit" chipset.
+ */
+#define APIC_DEFINITION 1
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <asm/mpspec.h>
+#include <asm/apic.h>
+#include <asm/smp.h>
+#include <asm/genapic.h>
+#include <asm/fixmap.h>
+#include <asm/apicdef.h>
+#include <asm/ipi.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <linux/smp.h>
+
+static inline unsigned summit_get_apic_id(unsigned long x)
+{
+       return (x >> 24) & 0xFF;
+}
+
+static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector)
+{
+       default_send_IPI_mask_sequence_logical(mask, vector);
+}
+
+static inline void summit_send_IPI_allbutself(int vector)
+{
+       cpumask_t mask = cpu_online_map;
+       cpu_clear(smp_processor_id(), mask);
+
+       if (!cpus_empty(mask))
+               summit_send_IPI_mask(&mask, vector);
+}
+
+static inline void summit_send_IPI_all(int vector)
+{
+       summit_send_IPI_mask(&cpu_online_map, vector);
+}
+
+#include <asm/tsc.h>
+
+extern int use_cyclone;
+
+#ifdef CONFIG_X86_SUMMIT_NUMA
+extern void setup_summit(void);
+#else
+#define setup_summit() {}
+#endif
+
+static inline int
+summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
+{
+       if (!strncmp(oem, "IBM ENSW", 8) &&
+                       (!strncmp(productid, "VIGIL SMP", 9)
+                        || !strncmp(productid, "EXA", 3)
+                        || !strncmp(productid, "RUTHLESS SMP", 12))){
+               mark_tsc_unstable("Summit based system");
+               use_cyclone = 1; /*enable cyclone-timer*/
+               setup_summit();
+               return 1;
+       }
+       return 0;
+}
+
+/* Hook from generic ACPI tables.c */
+static inline int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+       if (!strncmp(oem_id, "IBM", 3) &&
+           (!strncmp(oem_table_id, "SERVIGIL", 8)
+            || !strncmp(oem_table_id, "EXA", 3))){
+               mark_tsc_unstable("Summit based system");
+               use_cyclone = 1; /*enable cyclone-timer*/
+               setup_summit();
+               return 1;
+       }
+       return 0;
+}
+
+struct rio_table_hdr {
+       unsigned char version;      /* Version number of this data structure           */
+                                   /* Version 3 adds chassis_num & WP_index           */
+       unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil)   */
+       unsigned char num_rio_dev;  /* # of RIO I/O devices (Cyclones and Winnipegs)   */
+} __attribute__((packed));
+
+struct scal_detail {
+       unsigned char node_id;      /* Scalability Node ID                             */
+       unsigned long CBAR;         /* Address of 1MB register space                   */
+       unsigned char port0node;    /* Node ID port connected to: 0xFF=None            */
+       unsigned char port0port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
+       unsigned char port1node;    /* Node ID port connected to: 0xFF = None          */
+       unsigned char port1port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
+       unsigned char port2node;    /* Node ID port connected to: 0xFF = None          */
+       unsigned char port2port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
+       unsigned char chassis_num;  /* 1 based Chassis number (1 = boot node)          */
+} __attribute__((packed));
+
+struct rio_detail {
+       unsigned char node_id;      /* RIO Node ID                                     */
+       unsigned long BBAR;         /* Address of 1MB register space                   */
+       unsigned char type;         /* Type of device                                  */
+       unsigned char owner_id;     /* For WPEG: Node ID of Cyclone that owns this WPEG*/
+                                   /* For CYC:  Node ID of Twister that owns this CYC */
+       unsigned char port0node;    /* Node ID port connected to: 0xFF=None            */
+       unsigned char port0port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
+       unsigned char port1node;    /* Node ID port connected to: 0xFF=None            */
+       unsigned char port1port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
+       unsigned char first_slot;   /* For WPEG: Lowest slot number below this WPEG    */
+                                   /* For CYC:  0                                     */
+       unsigned char status;       /* For WPEG: Bit 0 = 1 : the XAPIC is used         */
+                                   /*                 = 0 : the XAPIC is not used, ie:*/
+                                   /*                     ints fwded to another XAPIC */
+                                   /*           Bits1:7 Reserved                      */
+                                   /* For CYC:  Bits0:7 Reserved                      */
+       unsigned char WP_index;     /* For WPEG: WPEG instance index - lower ones have */
+                                   /*           lower slot numbers/PCI bus numbers    */
+                                   /* For CYC:  No meaning                            */
+       unsigned char chassis_num;  /* 1 based Chassis number                          */
+                                   /* For LookOut WPEGs this field indicates the      */
+                                   /* Expansion Chassis #, enumerated from Boot       */
+                                   /* Node WPEG external port, then Boot Node CYC     */
+                                   /* external port, then Next Vigil chassis WPEG     */
+                                   /* external port, etc.                             */
+                                   /* Shared Lookouts have only 1 chassis number (the */
+                                   /* first one assigned)                             */
+} __attribute__((packed));
+
+
+typedef enum {
+       CompatTwister = 0,  /* Compatibility Twister               */
+       AltTwister    = 1,  /* Alternate Twister of internal 8-way */
+       CompatCyclone = 2,  /* Compatibility Cyclone               */
+       AltCyclone    = 3,  /* Alternate Cyclone of internal 8-way */
+       CompatWPEG    = 4,  /* Compatibility WPEG                  */
+       AltWPEG       = 5,  /* Second Planar WPEG                  */
+       LookOutAWPEG  = 6,  /* LookOut WPEG                        */
+       LookOutBWPEG  = 7,  /* LookOut WPEG                        */
+} node_type;
+
+static inline int is_WPEG(struct rio_detail *rio){
+       return (rio->type == CompatWPEG || rio->type == AltWPEG ||
+               rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
+}
+
+
+/* In clustered mode, the high nibble of APIC ID is a cluster number.
+ * The low nibble is a 4-bit bitmap. */
+#define XAPIC_DEST_CPUS_SHIFT  4
+#define XAPIC_DEST_CPUS_MASK   ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
+#define XAPIC_DEST_CLUSTER_MASK        (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
+
+#define SUMMIT_APIC_DFR_VALUE  (APIC_DFR_CLUSTER)
+
+static inline const cpumask_t *summit_target_cpus(void)
+{
+       /* CPU_MASK_ALL (0xff) has undefined behaviour with
+        * dest_LowestPrio mode logical clustered apic interrupt routing
+        * Just start on cpu 0.  IRQ balancing will spread load
+        */
+       return &cpumask_of_cpu(0);
+}
+
+static inline unsigned long
+summit_check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+       return 0;
+}
+
+/* we don't use the phys_cpu_present_map to indicate apicid presence */
+static inline unsigned long summit_check_apicid_present(int bit)
+{
+       return 1;
+}
+
+#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
+
+extern u8 cpu_2_logical_apicid[];
+
+static inline void summit_init_apic_ldr(void)
+{
+       unsigned long val, id;
+       int count = 0;
+       u8 my_id = (u8)hard_smp_processor_id();
+       u8 my_cluster = (u8)apicid_cluster(my_id);
+#ifdef CONFIG_SMP
+       u8 lid;
+       int i;
+
+       /* Create logical APIC IDs by counting CPUs already in cluster. */
+       for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
+               lid = cpu_2_logical_apicid[i];
+               if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
+                       ++count;
+       }
+#endif
+       /* We only have a 4 wide bitmap in cluster mode.  If a deranged
+        * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
+       BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
+       id = my_cluster | (1UL << count);
+       apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE);
+       val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+       val |= SET_APIC_LOGICAL_ID(id);
+       apic_write(APIC_LDR, val);
+}
+
+static inline int summit_apic_id_registered(void)
+{
+       return 1;
+}
+
+static inline void summit_setup_apic_routing(void)
+{
+       printk("Enabling APIC mode:  Summit.  Using %d I/O APICs\n",
+                                               nr_ioapics);
+}
+
+static inline int summit_apicid_to_node(int logical_apicid)
+{
+#ifdef CONFIG_SMP
+       return apicid_2_node[hard_smp_processor_id()];
+#else
+       return 0;
+#endif
+}
+
+/* Mapping from cpu number to logical apicid */
+static inline int summit_cpu_to_logical_apicid(int cpu)
+{
+#ifdef CONFIG_SMP
+       if (cpu >= nr_cpu_ids)
+               return BAD_APICID;
+       return (int)cpu_2_logical_apicid[cpu];
+#else
+       return logical_smp_processor_id();
+#endif
+}
+
+static inline int summit_cpu_present_to_apicid(int mps_cpu)
+{
+       if (mps_cpu < nr_cpu_ids)
+               return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
+       else
+               return BAD_APICID;
+}
+
+static inline physid_mask_t
+summit_ioapic_phys_id_map(physid_mask_t phys_id_map)
+{
+       /* For clustered we don't have a good way to do this yet - hack */
+       return physids_promote(0x0F);
+}
+
+static inline physid_mask_t summit_apicid_to_cpu_present(int apicid)
+{
+       return physid_mask_of_physid(0);
+}
+
+static inline void summit_setup_portio_remap(void)
+{
+}
+
+static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+       return 1;
+}
+
+static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask)
+{
+       int cpus_found = 0;
+       int num_bits_set;
+       int apicid;
+       int cpu;
+
+       num_bits_set = cpus_weight(*cpumask);
+       /* Return id to all */
+       if (num_bits_set >= nr_cpu_ids)
+               return 0xFF;
+       /*
+        * The cpus in the mask must all be on the apic cluster.  If are not
+        * on the same apicid cluster return default value of target_cpus():
+        */
+       cpu = first_cpu(*cpumask);
+       apicid = summit_cpu_to_logical_apicid(cpu);
+
+       while (cpus_found < num_bits_set) {
+               if (cpu_isset(cpu, *cpumask)) {
+                       int new_apicid = summit_cpu_to_logical_apicid(cpu);
+
+                       if (apicid_cluster(apicid) !=
+                                       apicid_cluster(new_apicid)) {
+                               printk ("%s: Not a valid mask!\n", __func__);
+
+                               return 0xFF;
+                       }
+                       apicid = apicid | new_apicid;
+                       cpus_found++;
+               }
+               cpu++;
+       }
+       return apicid;
+}
+
+static inline unsigned int
+summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
+                             const struct cpumask *andmask)
+{
+       int apicid = summit_cpu_to_logical_apicid(0);
+       cpumask_var_t cpumask;
+
+       if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
+               return apicid;
+
+       cpumask_and(cpumask, inmask, andmask);
+       cpumask_and(cpumask, cpumask, cpu_online_mask);
+       apicid = summit_cpu_mask_to_apicid(cpumask);
+
+       free_cpumask_var(cpumask);
+
+       return apicid;
+}
+
+/*
+ * cpuid returns the value latched in the HW at reset, not the APIC ID
+ * register's value.  For any box whose BIOS changes APIC IDs, like
+ * clustered APIC systems, we must use hard_smp_processor_id.
+ *
+ * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
+ */
+static inline int summit_phys_pkg_id(int cpuid_apic, int index_msb)
+{
+       return hard_smp_processor_id() >> index_msb;
+}
+
+static int probe_summit(void)
+{
+       /* probed later in mptable/ACPI hooks */
+       return 0;
+}
+
+static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask)
+{
+       /* Careful. Some cpus do not strictly honor the set of cpus
+        * specified in the interrupt destination when using lowest
+        * priority interrupt delivery mode.
+        *
+        * In particular there was a hyperthreading cpu observed to
+        * deliver interrupts to the wrong hyperthread when only one
+        * hyperthread was specified in the interrupt desitination.
+        */
+       *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
+}
+
+#ifdef CONFIG_X86_SUMMIT_NUMA
 static struct rio_table_hdr *rio_table_hdr __initdata;
 static struct scal_detail   *scal_devs[MAX_NUMNODES] __initdata;
 static struct rio_detail    *rio_devs[MAX_NUMNODES*4] __initdata;
@@ -186,3 +542,61 @@ void __init setup_summit(void)
                        next_wpeg = 0;
        } while (next_wpeg != 0);
 }
+#endif
+
+struct genapic apic_summit = {
+
+       .name                           = "summit",
+       .probe                          = probe_summit,
+       .acpi_madt_oem_check            = summit_acpi_madt_oem_check,
+       .apic_id_registered             = summit_apic_id_registered,
+
+       .irq_delivery_mode              = dest_LowestPrio,
+       /* logical delivery broadcast to all CPUs: */
+       .irq_dest_mode                  = 1,
+
+       .target_cpus                    = summit_target_cpus,
+       .disable_esr                    = 1,
+       .dest_logical                   = APIC_DEST_LOGICAL,
+       .check_apicid_used              = summit_check_apicid_used,
+       .check_apicid_present           = summit_check_apicid_present,
+
+       .vector_allocation_domain       = summit_vector_allocation_domain,
+       .init_apic_ldr                  = summit_init_apic_ldr,
+
+       .ioapic_phys_id_map             = summit_ioapic_phys_id_map,
+       .setup_apic_routing             = summit_setup_apic_routing,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = summit_apicid_to_node,
+       .cpu_to_logical_apicid          = summit_cpu_to_logical_apicid,
+       .cpu_present_to_apicid          = summit_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = summit_apicid_to_cpu_present,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = summit_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = summit_phys_pkg_id,
+       .mps_oem_check                  = summit_mps_oem_check,
+
+       .get_apic_id                    = summit_get_apic_id,
+       .set_apic_id                    = NULL,
+       .apic_id_mask                   = 0xFF << 24,
+
+       .cpu_mask_to_apicid             = summit_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = summit_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = summit_send_IPI_mask,
+       .send_IPI_mask_allbutself       = NULL,
+       .send_IPI_allbutself            = summit_send_IPI_allbutself,
+       .send_IPI_all                   = summit_send_IPI_all,
+       .send_IPI_self                  = default_send_IPI_self,
+
+       .wakeup_cpu                     = NULL,
+       .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+
+       .wait_for_init_deassert         = default_wait_for_init_deassert,
+
+       .smp_callin_clear_local_apic    = NULL,
+       .store_NMI_vector               = NULL,
+       .inquire_remote_apic            = default_inquire_remote_apic,
+};
index e2e86a0..3bdb648 100644 (file)
@@ -1,7 +1,7 @@
 ENTRY(sys_call_table)
        .long sys_restart_syscall       /* 0 - old "setup()" system call, used for restarting */
        .long sys_exit
-       .long sys_fork
+       .long ptregs_fork
        .long sys_read
        .long sys_write
        .long sys_open          /* 5 */
@@ -10,7 +10,7 @@ ENTRY(sys_call_table)
        .long sys_creat
        .long sys_link
        .long sys_unlink        /* 10 */
-       .long sys_execve
+       .long ptregs_execve
        .long sys_chdir
        .long sys_time
        .long sys_mknod
@@ -109,17 +109,17 @@ ENTRY(sys_call_table)
        .long sys_newlstat
        .long sys_newfstat
        .long sys_uname
-       .long sys_iopl          /* 110 */
+       .long ptregs_iopl       /* 110 */
        .long sys_vhangup
        .long sys_ni_syscall    /* old "idle" system call */
-       .long sys_vm86old
+       .long ptregs_vm86old
        .long sys_wait4
        .long sys_swapoff       /* 115 */
        .long sys_sysinfo
        .long sys_ipc
        .long sys_fsync
-       .long sys_sigreturn
-       .long sys_clone         /* 120 */
+       .long ptregs_sigreturn
+       .long ptregs_clone      /* 120 */
        .long sys_setdomainname
        .long sys_newuname
        .long sys_modify_ldt
@@ -165,14 +165,14 @@ ENTRY(sys_call_table)
        .long sys_mremap
        .long sys_setresuid16
        .long sys_getresuid16   /* 165 */
-       .long sys_vm86
+       .long ptregs_vm86
        .long sys_ni_syscall    /* Old sys_query_module */
        .long sys_poll
        .long sys_nfsservctl
        .long sys_setresgid16   /* 170 */
        .long sys_getresgid16
        .long sys_prctl
-       .long sys_rt_sigreturn
+       .long ptregs_rt_sigreturn
        .long sys_rt_sigaction
        .long sys_rt_sigprocmask        /* 175 */
        .long sys_rt_sigpending
@@ -185,11 +185,11 @@ ENTRY(sys_call_table)
        .long sys_getcwd
        .long sys_capget
        .long sys_capset        /* 185 */
-       .long sys_sigaltstack
+       .long ptregs_sigaltstack
        .long sys_sendfile
        .long sys_ni_syscall    /* reserved for streams1 */
        .long sys_ni_syscall    /* reserved for streams2 */
-       .long sys_vfork         /* 190 */
+       .long ptregs_vfork      /* 190 */
        .long sys_getrlimit
        .long sys_mmap2
        .long sys_truncate64
index 3985cac..764c74e 100644 (file)
@@ -38,7 +38,7 @@
 #include <asm/time.h>
 #include <asm/timer.h>
 
-#include "do_timer.h"
+#include <asm/do_timer.h>
 
 int timer_ack;
 
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
deleted file mode 100644 (file)
index ce50546..0000000
+++ /dev/null
@@ -1,256 +0,0 @@
-#include <linux/spinlock.h>
-#include <linux/cpu.h>
-#include <linux/interrupt.h>
-
-#include <asm/tlbflush.h>
-
-DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate)
-                       ____cacheline_aligned = { &init_mm, 0, };
-
-/* must come after the send_IPI functions above for inlining */
-#include <mach_ipi.h>
-
-/*
- *     Smarter SMP flushing macros.
- *             c/o Linus Torvalds.
- *
- *     These mean you can really definitely utterly forget about
- *     writing to user space from interrupts. (Its not allowed anyway).
- *
- *     Optimizations Manfred Spraul <manfred@colorfullife.com>
- */
-
-static cpumask_t flush_cpumask;
-static struct mm_struct *flush_mm;
-static unsigned long flush_va;
-static DEFINE_SPINLOCK(tlbstate_lock);
-
-/*
- * We cannot call mmdrop() because we are in interrupt context,
- * instead update mm->cpu_vm_mask.
- *
- * We need to reload %cr3 since the page tables may be going
- * away from under us..
- */
-void leave_mm(int cpu)
-{
-       BUG_ON(x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK);
-       cpu_clear(cpu, x86_read_percpu(cpu_tlbstate.active_mm)->cpu_vm_mask);
-       load_cr3(swapper_pg_dir);
-}
-EXPORT_SYMBOL_GPL(leave_mm);
-
-/*
- *
- * The flush IPI assumes that a thread switch happens in this order:
- * [cpu0: the cpu that switches]
- * 1) switch_mm() either 1a) or 1b)
- * 1a) thread switch to a different mm
- * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
- *     Stop ipi delivery for the old mm. This is not synchronized with
- *     the other cpus, but smp_invalidate_interrupt ignore flush ipis
- *     for the wrong mm, and in the worst case we perform a superfluous
- *     tlb flush.
- * 1a2) set cpu_tlbstate to TLBSTATE_OK
- *     Now the smp_invalidate_interrupt won't call leave_mm if cpu0
- *     was in lazy tlb mode.
- * 1a3) update cpu_tlbstate[].active_mm
- *     Now cpu0 accepts tlb flushes for the new mm.
- * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
- *     Now the other cpus will send tlb flush ipis.
- * 1a4) change cr3.
- * 1b) thread switch without mm change
- *     cpu_tlbstate[].active_mm is correct, cpu0 already handles
- *     flush ipis.
- * 1b1) set cpu_tlbstate to TLBSTATE_OK
- * 1b2) test_and_set the cpu bit in cpu_vm_mask.
- *     Atomically set the bit [other cpus will start sending flush ipis],
- *     and test the bit.
- * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
- * 2) switch %%esp, ie current
- *
- * The interrupt must handle 2 special cases:
- * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
- * - the cpu performs speculative tlb reads, i.e. even if the cpu only
- *   runs in kernel space, the cpu could load tlb entries for user space
- *   pages.
- *
- * The good news is that cpu_tlbstate is local to each cpu, no
- * write/read ordering problems.
- */
-
-/*
- * TLB flush IPI:
- *
- * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
- * 2) Leave the mm if we are in the lazy tlb mode.
- */
-
-void smp_invalidate_interrupt(struct pt_regs *regs)
-{
-       unsigned long cpu;
-
-       cpu = get_cpu();
-
-       if (!cpu_isset(cpu, flush_cpumask))
-               goto out;
-               /*
-                * This was a BUG() but until someone can quote me the
-                * line from the intel manual that guarantees an IPI to
-                * multiple CPUs is retried _only_ on the erroring CPUs
-                * its staying as a return
-                *
-                * BUG();
-                */
-
-       if (flush_mm == x86_read_percpu(cpu_tlbstate.active_mm)) {
-               if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) {
-                       if (flush_va == TLB_FLUSH_ALL)
-                               local_flush_tlb();
-                       else
-                               __flush_tlb_one(flush_va);
-               } else
-                       leave_mm(cpu);
-       }
-       ack_APIC_irq();
-       smp_mb__before_clear_bit();
-       cpu_clear(cpu, flush_cpumask);
-       smp_mb__after_clear_bit();
-out:
-       put_cpu_no_resched();
-       inc_irq_stat(irq_tlb_count);
-}
-
-void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
-                            unsigned long va)
-{
-       cpumask_t cpumask = *cpumaskp;
-
-       /*
-        * A couple of (to be removed) sanity checks:
-        *
-        * - current CPU must not be in mask
-        * - mask must exist :)
-        */
-       BUG_ON(cpus_empty(cpumask));
-       BUG_ON(cpu_isset(smp_processor_id(), cpumask));
-       BUG_ON(!mm);
-
-#ifdef CONFIG_HOTPLUG_CPU
-       /* If a CPU which we ran on has gone down, OK. */
-       cpus_and(cpumask, cpumask, cpu_online_map);
-       if (unlikely(cpus_empty(cpumask)))
-               return;
-#endif
-
-       /*
-        * i'm not happy about this global shared spinlock in the
-        * MM hot path, but we'll see how contended it is.
-        * AK: x86-64 has a faster method that could be ported.
-        */
-       spin_lock(&tlbstate_lock);
-
-       flush_mm = mm;
-       flush_va = va;
-       cpus_or(flush_cpumask, cpumask, flush_cpumask);
-
-       /*
-        * Make the above memory operations globally visible before
-        * sending the IPI.
-        */
-       smp_mb();
-       /*
-        * We have to send the IPI only to
-        * CPUs affected.
-        */
-       send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR);
-
-       while (!cpus_empty(flush_cpumask))
-               /* nothing. lockup detection does not belong here */
-               cpu_relax();
-
-       flush_mm = NULL;
-       flush_va = 0;
-       spin_unlock(&tlbstate_lock);
-}
-
-void flush_tlb_current_task(void)
-{
-       struct mm_struct *mm = current->mm;
-       cpumask_t cpu_mask;
-
-       preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
-
-       local_flush_tlb();
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
-       preempt_enable();
-}
-
-void flush_tlb_mm(struct mm_struct *mm)
-{
-       cpumask_t cpu_mask;
-
-       preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
-
-       if (current->active_mm == mm) {
-               if (current->mm)
-                       local_flush_tlb();
-               else
-                       leave_mm(smp_processor_id());
-       }
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
-
-       preempt_enable();
-}
-
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
-{
-       struct mm_struct *mm = vma->vm_mm;
-       cpumask_t cpu_mask;
-
-       preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
-
-       if (current->active_mm == mm) {
-               if (current->mm)
-                       __flush_tlb_one(va);
-                else
-                       leave_mm(smp_processor_id());
-       }
-
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, va);
-
-       preempt_enable();
-}
-EXPORT_SYMBOL(flush_tlb_page);
-
-static void do_flush_tlb_all(void *info)
-{
-       unsigned long cpu = smp_processor_id();
-
-       __flush_tlb_all();
-       if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_LAZY)
-               leave_mm(cpu);
-}
-
-void flush_tlb_all(void)
-{
-       on_each_cpu(do_flush_tlb_all, NULL, 1);
-}
-
-void reset_lazy_tlbstate(void)
-{
-       int cpu = raw_smp_processor_id();
-
-       per_cpu(cpu_tlbstate, cpu).state = 0;
-       per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
-}
-
index 6812b82..f396e61 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 
 #include <asm/mmu_context.h>
+#include <asm/uv/uv.h>
 #include <asm/uv/uv_mmrs.h>
 #include <asm/uv/uv_hub.h>
 #include <asm/uv/uv_bau.h>
@@ -19,7 +20,7 @@
 #include <asm/tsc.h>
 #include <asm/irq_vectors.h>
 
-#include <mach_apic.h>
+#include <asm/genapic.h>
 
 static struct bau_control      **uv_bau_table_bases __read_mostly;
 static int                     uv_bau_retry_limit __read_mostly;
@@ -210,14 +211,15 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
  *
  * Send a broadcast and wait for a broadcast message to complete.
  *
- * The cpumaskp mask contains the cpus the broadcast was sent to.
+ * The flush_mask contains the cpus the broadcast was sent to.
  *
- * Returns 1 if all remote flushing was done. The mask is zeroed.
- * Returns 0 if some remote flushing remains to be done. The mask is left
- * unchanged.
+ * Returns NULL if all remote flushing was done. The mask is zeroed.
+ * Returns @flush_mask if some remote flushing remains to be done. The
+ * mask will have some bits still set.
  */
-int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
-                          cpumask_t *cpumaskp)
+const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
+                                            struct bau_desc *bau_desc,
+                                            struct cpumask *flush_mask)
 {
        int completion_status = 0;
        int right_shift;
@@ -257,66 +259,76 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
                 * the cpu's, all of which are still in the mask.
                 */
                __get_cpu_var(ptcstats).ptc_i++;
-               return 0;
+               return flush_mask;
        }
 
        /*
         * Success, so clear the remote cpu's from the mask so we don't
         * use the IPI method of shootdown on them.
         */
-       for_each_cpu_mask(bit, *cpumaskp) {
+       for_each_cpu(bit, flush_mask) {
                blade = uv_cpu_to_blade_id(bit);
                if (blade == this_blade)
                        continue;
-               cpu_clear(bit, *cpumaskp);
+               cpumask_clear_cpu(bit, flush_mask);
        }
-       if (!cpus_empty(*cpumaskp))
-               return 0;
-       return 1;
+       if (!cpumask_empty(flush_mask))
+               return flush_mask;
+       return NULL;
 }
 
 /**
  * uv_flush_tlb_others - globally purge translation cache of a virtual
  * address or all TLB's
- * @cpumaskp: mask of all cpu's in which the address is to be removed
+ * @cpumask: mask of all cpu's in which the address is to be removed
  * @mm: mm_struct containing virtual address range
  * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
+ * @cpu: the current cpu
  *
  * This is the entry point for initiating any UV global TLB shootdown.
  *
  * Purges the translation caches of all specified processors of the given
  * virtual address, or purges all TLB's on specified processors.
  *
- * The caller has derived the cpumaskp from the mm_struct and has subtracted
- * the local cpu from the mask.  This function is called only if there
- * are bits set in the mask. (e.g. flush_tlb_page())
+ * The caller has derived the cpumask from the mm_struct.  This function
+ * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
  *
- * The cpumaskp is converted into a nodemask of the nodes containing
+ * The cpumask is converted into a nodemask of the nodes containing
  * the cpus.
  *
- * Returns 1 if all remote flushing was done.
- * Returns 0 if some remote flushing remains to be done.
+ * Note that this function should be called with preemption disabled.
+ *
+ * Returns NULL if all remote flushing was done.
+ * Returns pointer to cpumask if some remote flushing remains to be
+ * done.  The returned pointer is valid till preemption is re-enabled.
  */
-int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
-                       unsigned long va)
+const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
+                                         struct mm_struct *mm,
+                                         unsigned long va, unsigned int cpu)
 {
+       static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
+       struct cpumask *flush_mask = &__get_cpu_var(flush_tlb_mask);
        int i;
        int bit;
        int blade;
-       int cpu;
+       int uv_cpu;
        int this_blade;
        int locals = 0;
        struct bau_desc *bau_desc;
 
-       cpu = uv_blade_processor_id();
+       WARN_ON(!in_atomic());
+
+       cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
+
+       uv_cpu = uv_blade_processor_id();
        this_blade = uv_numa_blade_id();
        bau_desc = __get_cpu_var(bau_control).descriptor_base;
-       bau_desc += UV_ITEMS_PER_DESCRIPTOR * cpu;
+       bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
 
        bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
 
        i = 0;
-       for_each_cpu_mask(bit, *cpumaskp) {
+       for_each_cpu(bit, flush_mask) {
                blade = uv_cpu_to_blade_id(bit);
                BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1));
                if (blade == this_blade) {
@@ -331,17 +343,17 @@ int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
                 * no off_node flushing; return status for local node
                 */
                if (locals)
-                       return 0;
+                       return flush_mask;
                else
-                       return 1;
+                       return NULL;
        }
        __get_cpu_var(ptcstats).requestor++;
        __get_cpu_var(ptcstats).ntargeted += i;
 
        bau_desc->payload.address = va;
-       bau_desc->payload.sending_cpu = smp_processor_id();
+       bau_desc->payload.sending_cpu = cpu;
 
-       return uv_flush_send_and_wait(cpu, this_blade, bau_desc, cpumaskp);
+       return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask);
 }
 
 /*
index 894293c..95a012a 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/page.h>
 #include <asm/msr.h>
 #include <asm/segment.h>
+#include <asm/processor-flags.h>
 
 .section .rodata, "a", @progbits
 
@@ -37,7 +38,7 @@
 ENTRY(trampoline_data)
 r_base = .
        cli                     # We should be safe anyway
-       wbinvd  
+       wbinvd
        mov     %cs, %ax        # Code and data in the same place
        mov     %ax, %ds
        mov     %ax, %es
@@ -73,9 +74,8 @@ r_base = .
        lidtl   tidt - r_base   # load idt with 0, 0
        lgdtl   tgdt - r_base   # load gdt with whatever is appropriate
 
-       xor     %ax, %ax
-       inc     %ax             # protected mode (PE) bit
-       lmsw    %ax             # into protected mode
+       mov     $X86_CR0_PE, %ax        # protected mode (PE) bit
+       lmsw    %ax                     # into protected mode
 
        # flush prefetch and jump to startup_32
        ljmpl   *(startup_32_vector - r_base)
@@ -86,9 +86,8 @@ startup_32:
        movl    $__KERNEL_DS, %eax      # Initialize the %ds segment register
        movl    %eax, %ds
 
-       xorl    %eax, %eax
-       btsl    $5, %eax                # Enable PAE mode
-       movl    %eax, %cr4
+       movl    $X86_CR4_PAE, %eax
+       movl    %eax, %cr4              # Enable PAE mode
 
                                        # Setup trampoline 4 level pagetables
        leal    (trampoline_level4_pgt - r_base)(%esi), %eax
@@ -99,9 +98,9 @@ startup_32:
        xorl    %edx, %edx
        wrmsr
 
-       xorl    %eax, %eax
-       btsl    $31, %eax               # Enable paging and in turn activate Long Mode
-       btsl    $0, %eax                # Enable protected mode
+       # Enable paging and in turn activate Long Mode
+       # Enable protected mode
+       movl    $(X86_CR0_PG | X86_CR0_PE), %eax
        movl    %eax, %cr0
 
        /*
index 7932338..bde57f0 100644 (file)
 #include <asm/desc.h>
 #include <asm/i387.h>
 
-#include <mach_traps.h>
+#include <asm/mach_traps.h>
 
 #ifdef CONFIG_X86_64
 #include <asm/pgalloc.h>
 #include <asm/proto.h>
-#include <asm/pda.h>
 #else
 #include <asm/processor-flags.h>
 #include <asm/arch_hooks.h>
@@ -906,19 +905,20 @@ void math_emulate(struct math_emu_info *info)
 }
 #endif /* CONFIG_MATH_EMULATION */
 
-dotraplinkage void __kprobes do_device_not_available(struct pt_regs regs)
+dotraplinkage void __kprobes
+do_device_not_available(struct pt_regs *regs, long error_code)
 {
 #ifdef CONFIG_X86_32
        if (read_cr0() & X86_CR0_EM) {
                struct math_emu_info info = { };
 
-               conditional_sti(&regs);
+               conditional_sti(regs);
 
-               info.regs = &regs;
+               info.regs = regs;
                math_emulate(&info);
        } else {
                math_state_restore(); /* interrupts still off */
-               conditional_sti(&regs);
+               conditional_sti(regs);
        }
 #else
        math_state_restore();
index 599e581..83d53ce 100644 (file)
@@ -773,7 +773,7 @@ __cpuinit int unsynchronized_tsc(void)
        if (!cpu_has_tsc || tsc_unstable)
                return 1;
 
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
        if (apic_is_clustered_box())
                return 1;
 #endif
index d801d06..4fd646e 100644 (file)
@@ -32,9 +32,9 @@
 #include <asm/e820.h>
 #include <asm/io.h>
 
-#include <mach_ipi.h>
+#include <asm/genapic.h>
 
-#include "mach_apic.h"
+#include <asm/genapic.h>
 
 #include <linux/kernel_stat.h>
 
@@ -200,7 +200,7 @@ static void __init MP_processor_info(struct mpc_cpu *m)
                return;
        }
 
-       apic_cpus = apicid_to_cpu_present(m->apicid);
+       apic_cpus = apic->apicid_to_cpu_present(m->apicid);
        physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus);
        /*
         * Validate version
index 4eeb5cf..d7ac84e 100644 (file)
@@ -158,7 +158,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
        ret = KVM86->regs32;
 
        ret->fs = current->thread.saved_fs;
-       loadsegment(gs, current->thread.saved_gs);
+       set_user_gs(ret, current->thread.saved_gs);
 
        return ret;
 }
@@ -197,9 +197,9 @@ out:
 static int do_vm86_irq_handling(int subfunction, int irqnumber);
 static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk);
 
-asmlinkage int sys_vm86old(struct pt_regs regs)
+int sys_vm86old(struct pt_regs *regs)
 {
-       struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.bx;
+       struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs->bx;
        struct kernel_vm86_struct info; /* declare this _on top_,
                                         * this avoids wasting of stack space.
                                         * This remains on the stack until we
@@ -218,7 +218,7 @@ asmlinkage int sys_vm86old(struct pt_regs regs)
        if (tmp)
                goto out;
        memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus);
-       info.regs32 = &regs;
+       info.regs32 = regs;
        tsk->thread.vm86_info = v86;
        do_sys_vm86(&info, tsk);
        ret = 0;        /* we never return here */
@@ -227,7 +227,7 @@ out:
 }
 
 
-asmlinkage int sys_vm86(struct pt_regs regs)
+int sys_vm86(struct pt_regs *regs)
 {
        struct kernel_vm86_struct info; /* declare this _on top_,
                                         * this avoids wasting of stack space.
@@ -239,12 +239,12 @@ asmlinkage int sys_vm86(struct pt_regs regs)
        struct vm86plus_struct __user *v86;
 
        tsk = current;
-       switch (regs.bx) {
+       switch (regs->bx) {
        case VM86_REQUEST_IRQ:
        case VM86_FREE_IRQ:
        case VM86_GET_IRQ_BITS:
        case VM86_GET_AND_RESET_IRQ:
-               ret = do_vm86_irq_handling(regs.bx, (int)regs.cx);
+               ret = do_vm86_irq_handling(regs->bx, (int)regs->cx);
                goto out;
        case VM86_PLUS_INSTALL_CHECK:
                /*
@@ -261,14 +261,14 @@ asmlinkage int sys_vm86(struct pt_regs regs)
        ret = -EPERM;
        if (tsk->thread.saved_sp0)
                goto out;
-       v86 = (struct vm86plus_struct __user *)regs.cx;
+       v86 = (struct vm86plus_struct __user *)regs->cx;
        tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
                                       offsetof(struct kernel_vm86_struct, regs32) -
                                       sizeof(info.regs));
        ret = -EFAULT;
        if (tmp)
                goto out;
-       info.regs32 = &regs;
+       info.regs32 = regs;
        info.vm86plus.is_vm86pus = 1;
        tsk->thread.vm86_info = (struct vm86_struct __user *)v86;
        do_sys_vm86(&info, tsk);
@@ -323,7 +323,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
        info->regs32->ax = 0;
        tsk->thread.saved_sp0 = tsk->thread.sp0;
        tsk->thread.saved_fs = info->regs32->fs;
-       savesegment(gs, tsk->thread.saved_gs);
+       tsk->thread.saved_gs = get_user_gs(info->regs32);
 
        tss = &per_cpu(init_tss, get_cpu());
        tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
index bef58b4..f052c84 100644 (file)
@@ -680,10 +680,11 @@ static inline int __init activate_vmi(void)
        para_fill(pv_mmu_ops.write_cr2, SetCR2);
        para_fill(pv_mmu_ops.write_cr3, SetCR3);
        para_fill(pv_cpu_ops.write_cr4, SetCR4);
-       para_fill(pv_irq_ops.save_fl, GetInterruptMask);
-       para_fill(pv_irq_ops.restore_fl, SetInterruptMask);
-       para_fill(pv_irq_ops.irq_disable, DisableInterrupts);
-       para_fill(pv_irq_ops.irq_enable, EnableInterrupts);
+
+       para_fill(pv_irq_ops.save_fl.func, GetInterruptMask);
+       para_fill(pv_irq_ops.restore_fl.func, SetInterruptMask);
+       para_fill(pv_irq_ops.irq_disable.func, DisableInterrupts);
+       para_fill(pv_irq_ops.irq_enable.func, EnableInterrupts);
 
        para_fill(pv_cpu_ops.wbinvd, WBINVD);
        para_fill(pv_cpu_ops.read_tsc, RDTSC);
index c4c1f9e..a4791ef 100644 (file)
@@ -256,7 +256,7 @@ void __devinit vmi_time_bsp_init(void)
         */
        clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
        local_irq_disable();
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
        /*
         * XXX handle_percpu_irq only defined for SMP; we need to switch over
         * to using it, since this is a local interrupt, which each CPU must
index 82c6755..3eba7f7 100644 (file)
@@ -178,14 +178,7 @@ SECTIONS
        __initramfs_end = .;
   }
 #endif
-  . = ALIGN(PAGE_SIZE);
-  .data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
-       __per_cpu_start = .;
-       *(.data.percpu.page_aligned)
-       *(.data.percpu)
-       *(.data.percpu.shared_aligned)
-       __per_cpu_end = .;
-  }
+  PERCPU(PAGE_SIZE)
   . = ALIGN(PAGE_SIZE);
   /* freed after init ends here */
 
index 1a614c0..087a7f2 100644 (file)
@@ -5,6 +5,7 @@
 #define LOAD_OFFSET __START_KERNEL_map
 
 #include <asm-generic/vmlinux.lds.h>
+#include <asm/asm-offsets.h>
 #include <asm/page.h>
 
 #undef i386    /* in case the preprocessor is a 32bit one */
@@ -13,12 +14,15 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
 OUTPUT_ARCH(i386:x86-64)
 ENTRY(phys_startup_64)
 jiffies_64 = jiffies;
-_proxy_pda = 1;
 PHDRS {
        text PT_LOAD FLAGS(5);  /* R_E */
        data PT_LOAD FLAGS(7);  /* RWE */
        user PT_LOAD FLAGS(7);  /* RWE */
        data.init PT_LOAD FLAGS(7);     /* RWE */
+#ifdef CONFIG_SMP
+       percpu PT_LOAD FLAGS(7);        /* RWE */
+#endif
+       data.init2 PT_LOAD FLAGS(7);    /* RWE */
        note PT_NOTE FLAGS(0);  /* ___ */
 }
 SECTIONS
@@ -208,14 +212,28 @@ SECTIONS
   __initramfs_end = .;
 #endif
 
+#ifdef CONFIG_SMP
+  /*
+   * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
+   * output PHDR, so the next output section - __data_nosave - should
+   * start another section data.init2.  Also, pda should be at the head of
+   * percpu area.  Preallocate it and define the percpu offset symbol
+   * so that it can be accessed as a percpu variable.
+   */
+  . = ALIGN(PAGE_SIZE);
+  PERCPU_VADDR(0, :percpu)
+#else
   PERCPU(PAGE_SIZE)
+#endif
 
   . = ALIGN(PAGE_SIZE);
   __init_end = .;
 
   . = ALIGN(PAGE_SIZE);
   __nosave_begin = .;
-  .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) }
+  .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
+      *(.data.nosave)
+  } :data.init2 /* use another section data.init2, see PERCPU_VADDR() above */
   . = ALIGN(PAGE_SIZE);
   __nosave_end = .;
 
@@ -239,8 +257,21 @@ SECTIONS
   DWARF_DEBUG
 }
 
+ /*
+  * Per-cpu symbols which need to be offset from __per_cpu_load
+  * for the boot processor.
+  */
+#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
+INIT_PER_CPU(gdt_page);
+INIT_PER_CPU(irq_stack_union);
+
 /*
  * Build-time check on the image size:
  */
 ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
        "kernel image bigger than KERNEL_IMAGE_SIZE")
+
+#ifdef CONFIG_SMP
+ASSERT((per_cpu__irq_stack_union == 0),
+        "irq_stack_union is not at start of per-cpu area");
+#endif
index a688f3b..c609205 100644 (file)
@@ -37,6 +37,7 @@ static unsigned long vsmp_save_fl(void)
                flags &= ~X86_EFLAGS_IF;
        return flags;
 }
+PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
 
 static void vsmp_restore_fl(unsigned long flags)
 {
@@ -46,6 +47,7 @@ static void vsmp_restore_fl(unsigned long flags)
                flags |= X86_EFLAGS_AC;
        native_restore_fl(flags);
 }
+PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
 
 static void vsmp_irq_disable(void)
 {
@@ -53,6 +55,7 @@ static void vsmp_irq_disable(void)
 
        native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
 }
+PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
 
 static void vsmp_irq_enable(void)
 {
@@ -60,6 +63,7 @@ static void vsmp_irq_enable(void)
 
        native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
 }
+PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
 
 static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
                                  unsigned long addr, unsigned len)
@@ -90,10 +94,10 @@ static void __init set_vsmp_pv_ops(void)
               cap, ctl);
        if (cap & ctl & (1 << 4)) {
                /* Setup irq ops and turn on vSMP  IRQ fastpath handling */
-               pv_irq_ops.irq_disable = vsmp_irq_disable;
-               pv_irq_ops.irq_enable  = vsmp_irq_enable;
-               pv_irq_ops.save_fl  = vsmp_save_fl;
-               pv_irq_ops.restore_fl  = vsmp_restore_fl;
+               pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
+               pv_irq_ops.irq_enable  = PV_CALLEE_SAVE(vsmp_irq_enable);
+               pv_irq_ops.save_fl  = PV_CALLEE_SAVE(vsmp_save_fl);
+               pv_irq_ops.restore_fl  = PV_CALLEE_SAVE(vsmp_restore_fl);
                pv_init_ops.patch = vsmp_patch;
 
                ctl &= ~(1 << 4);
index 695e426..3909e3b 100644 (file)
@@ -58,5 +58,3 @@ EXPORT_SYMBOL(__memcpy);
 EXPORT_SYMBOL(empty_zero_page);
 EXPORT_SYMBOL(init_level4_pgt);
 EXPORT_SYMBOL(load_gs_index);
-
-EXPORT_SYMBOL(_proxy_pda);
index 92f1c6f..da2e314 100644 (file)
@@ -173,24 +173,29 @@ static unsigned long save_fl(void)
 {
        return lguest_data.irq_enabled;
 }
+PV_CALLEE_SAVE_REGS_THUNK(save_fl);
 
 /* restore_flags() just sets the flags back to the value given. */
 static void restore_fl(unsigned long flags)
 {
        lguest_data.irq_enabled = flags;
 }
+PV_CALLEE_SAVE_REGS_THUNK(restore_fl);
 
 /* Interrupts go off... */
 static void irq_disable(void)
 {
        lguest_data.irq_enabled = 0;
 }
+PV_CALLEE_SAVE_REGS_THUNK(irq_disable);
 
 /* Interrupts go on... */
 static void irq_enable(void)
 {
        lguest_data.irq_enabled = X86_EFLAGS_IF;
 }
+PV_CALLEE_SAVE_REGS_THUNK(irq_enable);
+
 /*:*/
 /*M:003 Note that we don't check for outstanding interrupts when we re-enable
  * them (or when we unmask an interrupt).  This seems to work for the moment,
@@ -278,7 +283,7 @@ static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
        /* There's one problem which normal hardware doesn't have: the Host
         * can't handle us removing entries we're currently using.  So we clear
         * the GS register here: if it's needed it'll be reloaded anyway. */
-       loadsegment(gs, 0);
+       lazy_load_gs(0);
        lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0);
 }
 
@@ -984,10 +989,10 @@ __init void lguest_init(void)
 
        /* interrupt-related operations */
        pv_irq_ops.init_IRQ = lguest_init_IRQ;
-       pv_irq_ops.save_fl = save_fl;
-       pv_irq_ops.restore_fl = restore_fl;
-       pv_irq_ops.irq_disable = irq_disable;
-       pv_irq_ops.irq_enable = irq_enable;
+       pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl);
+       pv_irq_ops.restore_fl = PV_CALLEE_SAVE(restore_fl);
+       pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable);
+       pv_irq_ops.irq_enable = PV_CALLEE_SAVE(irq_enable);
        pv_irq_ops.safe_halt = lguest_safe_halt;
 
        /* init-time operations */
diff --git a/arch/x86/mach-default/Makefile b/arch/x86/mach-default/Makefile
deleted file mode 100644 (file)
index 012fe34..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-obj-y                          := setup.o
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c
deleted file mode 100644 (file)
index a265a7c..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- *     Machine specific setup for generic
- */
-
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <asm/acpi.h>
-#include <asm/arch_hooks.h>
-#include <asm/e820.h>
-#include <asm/setup.h>
-
-#include <mach_ipi.h>
-
-#ifdef CONFIG_HOTPLUG_CPU
-#define DEFAULT_SEND_IPI       (1)
-#else
-#define DEFAULT_SEND_IPI       (0)
-#endif
-
-int no_broadcast = DEFAULT_SEND_IPI;
-
-/**
- * pre_intr_init_hook - initialisation prior to setting up interrupt vectors
- *
- * Description:
- *     Perform any necessary interrupt initialisation prior to setting up
- *     the "ordinary" interrupt call gates.  For legacy reasons, the ISA
- *     interrupts should be initialised here if the machine emulates a PC
- *     in any way.
- **/
-void __init pre_intr_init_hook(void)
-{
-       if (x86_quirks->arch_pre_intr_init) {
-               if (x86_quirks->arch_pre_intr_init())
-                       return;
-       }
-       init_ISA_irqs();
-}
-
-/*
- * IRQ2 is cascade interrupt to second interrupt controller
- */
-static struct irqaction irq2 = {
-       .handler = no_action,
-       .mask = CPU_MASK_NONE,
-       .name = "cascade",
-};
-
-/**
- * intr_init_hook - post gate setup interrupt initialisation
- *
- * Description:
- *     Fill in any interrupts that may have been left out by the general
- *     init_IRQ() routine.  interrupts having to do with the machine rather
- *     than the devices on the I/O bus (like APIC interrupts in intel MP
- *     systems) are started here.
- **/
-void __init intr_init_hook(void)
-{
-       if (x86_quirks->arch_intr_init) {
-               if (x86_quirks->arch_intr_init())
-                       return;
-       }
-       if (!acpi_ioapic)
-               setup_irq(2, &irq2);
-
-}
-
-/**
- * pre_setup_arch_hook - hook called prior to any setup_arch() execution
- *
- * Description:
- *     generally used to activate any machine specific identification
- *     routines that may be needed before setup_arch() runs.  On Voyager
- *     this is used to get the board revision and type.
- **/
-void __init pre_setup_arch_hook(void)
-{
-}
-
-/**
- * trap_init_hook - initialise system specific traps
- *
- * Description:
- *     Called as the final act of trap_init().  Used in VISWS to initialise
- *     the various board specific APIC traps.
- **/
-void __init trap_init_hook(void)
-{
-       if (x86_quirks->arch_trap_init) {
-               if (x86_quirks->arch_trap_init())
-                       return;
-       }
-}
-
-static struct irqaction irq0  = {
-       .handler = timer_interrupt,
-       .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL,
-       .mask = CPU_MASK_NONE,
-       .name = "timer"
-};
-
-/**
- * pre_time_init_hook - do any specific initialisations before.
- *
- **/
-void __init pre_time_init_hook(void)
-{
-       if (x86_quirks->arch_pre_time_init)
-               x86_quirks->arch_pre_time_init();
-}
-
-/**
- * time_init_hook - do any specific initialisations for the system timer.
- *
- * Description:
- *     Must plug the system timer interrupt source at HZ into the IRQ listed
- *     in irq_vectors.h:TIMER_IRQ
- **/
-void __init time_init_hook(void)
-{
-       if (x86_quirks->arch_time_init) {
-               /*
-                * A nonzero return code does not mean failure, it means
-                * that the architecture quirk does not want any
-                * generic (timer) setup to be performed after this:
-                */
-               if (x86_quirks->arch_time_init())
-                       return;
-       }
-
-       irq0.mask = cpumask_of_cpu(0);
-       setup_irq(0, &irq0);
-}
-
-#ifdef CONFIG_MCA
-/**
- * mca_nmi_hook - hook into MCA specific NMI chain
- *
- * Description:
- *     The MCA (Microchannel Architecture) has an NMI chain for NMI sources
- *     along the MCA bus.  Use this to hook into that chain if you will need
- *     it.
- **/
-void mca_nmi_hook(void)
-{
-       /*
-        * If I recall correctly, there's a whole bunch of other things that
-        * we can do to check for NMI problems, but that's all I know about
-        * at the moment.
-        */
-       pr_warning("NMI generated from unknown source!\n");
-}
-#endif
-
-static __init int no_ipi_broadcast(char *str)
-{
-       get_option(&str, &no_broadcast);
-       pr_info("Using %s mode\n",
-               no_broadcast ? "No IPI Broadcast" : "IPI Broadcast");
-       return 1;
-}
-__setup("no_ipi_broadcast=", no_ipi_broadcast);
-
-static int __init print_ipi_mode(void)
-{
-       pr_info("Using IPI %s mode\n",
-               no_broadcast ? "No-Shortcut" : "Shortcut");
-       return 0;
-}
-
-late_initcall(print_ipi_mode);
-
diff --git a/arch/x86/mach-generic/Makefile b/arch/x86/mach-generic/Makefile
deleted file mode 100644 (file)
index 6730f4e..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# Makefile for the generic architecture
-#
-
-EXTRA_CFLAGS                   := -Iarch/x86/kernel
-
-obj-y                          := probe.o default.o
-obj-$(CONFIG_X86_NUMAQ)                += numaq.o
-obj-$(CONFIG_X86_SUMMIT)       += summit.o
-obj-$(CONFIG_X86_BIGSMP)       += bigsmp.o
-obj-$(CONFIG_X86_ES7000)       += es7000.o
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c
deleted file mode 100644 (file)
index bc4c784..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs.
- * Drives the local APIC in "clustered mode".
- */
-#define APIC_DEFINITION 1
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <asm/mpspec.h>
-#include <asm/genapic.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/dmi.h>
-#include <asm/bigsmp/apicdef.h>
-#include <linux/smp.h>
-#include <asm/bigsmp/apic.h>
-#include <asm/bigsmp/ipi.h>
-#include <asm/mach-default/mach_mpparse.h>
-#include <asm/mach-default/mach_wakecpu.h>
-
-static int dmi_bigsmp; /* can be set by dmi scanners */
-
-static int hp_ht_bigsmp(const struct dmi_system_id *d)
-{
-       printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
-       dmi_bigsmp = 1;
-       return 0;
-}
-
-
-static const struct dmi_system_id bigsmp_dmi_table[] = {
-       { hp_ht_bigsmp, "HP ProLiant DL760 G2",
-       { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
-       DMI_MATCH(DMI_BIOS_VERSION, "P44-"),}
-       },
-
-       { hp_ht_bigsmp, "HP ProLiant DL740",
-       { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
-       DMI_MATCH(DMI_BIOS_VERSION, "P47-"),}
-       },
-        { }
-};
-
-static void vector_allocation_domain(int cpu, cpumask_t *retmask)
-{
-       cpus_clear(*retmask);
-       cpu_set(cpu, *retmask);
-}
-
-static int probe_bigsmp(void)
-{
-       if (def_to_bigsmp)
-               dmi_bigsmp = 1;
-       else
-               dmi_check_system(bigsmp_dmi_table);
-       return dmi_bigsmp;
-}
-
-struct genapic apic_bigsmp = APIC_INIT("bigsmp", probe_bigsmp);
diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c
deleted file mode 100644 (file)
index e63a4a7..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Default generic APIC driver. This handles up to 8 CPUs.
- */
-#define APIC_DEFINITION 1
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <asm/mpspec.h>
-#include <asm/mach-default/mach_apicdef.h>
-#include <asm/genapic.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <asm/mach-default/mach_apic.h>
-#include <asm/mach-default/mach_ipi.h>
-#include <asm/mach-default/mach_mpparse.h>
-#include <asm/mach-default/mach_wakecpu.h>
-
-/* should be called last. */
-static int probe_default(void)
-{
-       return 1;
-}
-
-struct genapic apic_default = APIC_INIT("default", probe_default);
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c
deleted file mode 100644 (file)
index c2ded14..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * APIC driver for the Unisys ES7000 chipset.
- */
-#define APIC_DEFINITION 1
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <asm/mpspec.h>
-#include <asm/genapic.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <asm/es7000/apicdef.h>
-#include <linux/smp.h>
-#include <asm/es7000/apic.h>
-#include <asm/es7000/ipi.h>
-#include <asm/es7000/mpparse.h>
-#include <asm/mach-default/mach_wakecpu.h>
-
-void __init es7000_update_genapic_to_cluster(void)
-{
-       genapic->target_cpus = target_cpus_cluster;
-       genapic->int_delivery_mode = INT_DELIVERY_MODE_CLUSTER;
-       genapic->int_dest_mode = INT_DEST_MODE_CLUSTER;
-       genapic->no_balance_irq = NO_BALANCE_IRQ_CLUSTER;
-
-       genapic->init_apic_ldr = init_apic_ldr_cluster;
-
-       genapic->cpu_mask_to_apicid = cpu_mask_to_apicid_cluster;
-}
-
-static int probe_es7000(void)
-{
-       /* probed later in mptable/ACPI hooks */
-       return 0;
-}
-
-extern void es7000_sw_apic(void);
-static void __init enable_apic_mode(void)
-{
-       es7000_sw_apic();
-       return;
-}
-
-static __init int
-mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
-{
-       if (mpc->oemptr) {
-               struct mpc_oemtable *oem_table =
-                       (struct mpc_oemtable *)mpc->oemptr;
-               if (!strncmp(oem, "UNISYS", 6))
-                       return parse_unisys_oem((char *)oem_table);
-       }
-       return 0;
-}
-
-#ifdef CONFIG_ACPI
-/* Hook from generic ACPI tables.c */
-static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
-       unsigned long oem_addr = 0;
-       int check_dsdt;
-       int ret = 0;
-
-       /* check dsdt at first to avoid clear fix_map for oem_addr */
-       check_dsdt = es7000_check_dsdt();
-
-       if (!find_unisys_acpi_oem_table(&oem_addr)) {
-               if (check_dsdt)
-                       ret = parse_unisys_oem((char *)oem_addr);
-               else {
-                       setup_unisys();
-                       ret = 1;
-               }
-               /*
-                * we need to unmap it
-                */
-               unmap_unisys_acpi_oem_table(oem_addr);
-       }
-       return ret;
-}
-#else
-static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
-       return 0;
-}
-#endif
-
-static void vector_allocation_domain(int cpu, cpumask_t *retmask)
-{
-       /* Careful. Some cpus do not strictly honor the set of cpus
-        * specified in the interrupt destination when using lowest
-        * priority interrupt delivery mode.
-        *
-        * In particular there was a hyperthreading cpu observed to
-        * deliver interrupts to the wrong hyperthread when only one
-        * hyperthread was specified in the interrupt desitination.
-        */
-       *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
-}
-
-struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000);
diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c
deleted file mode 100644 (file)
index 3679e22..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * APIC driver for the IBM NUMAQ chipset.
- */
-#define APIC_DEFINITION 1
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <asm/mpspec.h>
-#include <asm/genapic.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <asm/numaq/apicdef.h>
-#include <linux/smp.h>
-#include <asm/numaq/apic.h>
-#include <asm/numaq/ipi.h>
-#include <asm/numaq/mpparse.h>
-#include <asm/numaq/wakecpu.h>
-#include <asm/numaq.h>
-
-static int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
-{
-       numaq_mps_oem_check(mpc, oem, productid);
-       return found_numaq;
-}
-
-static int probe_numaq(void)
-{
-       /* already know from get_memcfg_numaq() */
-       return found_numaq;
-}
-
-/* Hook from generic ACPI tables.c */
-static int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
-       return 0;
-}
-
-static void vector_allocation_domain(int cpu, cpumask_t *retmask)
-{
-       /* Careful. Some cpus do not strictly honor the set of cpus
-        * specified in the interrupt destination when using lowest
-        * priority interrupt delivery mode.
-        *
-        * In particular there was a hyperthreading cpu observed to
-        * deliver interrupts to the wrong hyperthread when only one
-        * hyperthread was specified in the interrupt desitination.
-        */
-       *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
-}
-
-struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq);
diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c
deleted file mode 100644 (file)
index 15a38da..0000000
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Copyright 2003 Andi Kleen, SuSE Labs.
- * Subject to the GNU Public License, v.2
- *
- * Generic x86 APIC driver probe layer.
- */
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/ctype.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <asm/fixmap.h>
-#include <asm/mpspec.h>
-#include <asm/apicdef.h>
-#include <asm/genapic.h>
-#include <asm/setup.h>
-
-extern struct genapic apic_numaq;
-extern struct genapic apic_summit;
-extern struct genapic apic_bigsmp;
-extern struct genapic apic_es7000;
-extern struct genapic apic_default;
-
-struct genapic *genapic = &apic_default;
-
-static struct genapic *apic_probe[] __initdata = {
-#ifdef CONFIG_X86_NUMAQ
-       &apic_numaq,
-#endif
-#ifdef CONFIG_X86_SUMMIT
-       &apic_summit,
-#endif
-#ifdef CONFIG_X86_BIGSMP
-       &apic_bigsmp,
-#endif
-#ifdef CONFIG_X86_ES7000
-       &apic_es7000,
-#endif
-       &apic_default,  /* must be last */
-       NULL,
-};
-
-static int cmdline_apic __initdata;
-static int __init parse_apic(char *arg)
-{
-       int i;
-
-       if (!arg)
-               return -EINVAL;
-
-       for (i = 0; apic_probe[i]; i++) {
-               if (!strcmp(apic_probe[i]->name, arg)) {
-                       genapic = apic_probe[i];
-                       cmdline_apic = 1;
-                       return 0;
-               }
-       }
-
-       if (x86_quirks->update_genapic)
-               x86_quirks->update_genapic();
-
-       /* Parsed again by __setup for debug/verbose */
-       return 0;
-}
-early_param("apic", parse_apic);
-
-void __init generic_bigsmp_probe(void)
-{
-#ifdef CONFIG_X86_BIGSMP
-       /*
-        * This routine is used to switch to bigsmp mode when
-        * - There is no apic= option specified by the user
-        * - generic_apic_probe() has chosen apic_default as the sub_arch
-        * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
-        */
-
-       if (!cmdline_apic && genapic == &apic_default) {
-               if (apic_bigsmp.probe()) {
-                       genapic = &apic_bigsmp;
-                       if (x86_quirks->update_genapic)
-                               x86_quirks->update_genapic();
-                       printk(KERN_INFO "Overriding APIC driver with %s\n",
-                              genapic->name);
-               }
-       }
-#endif
-}
-
-void __init generic_apic_probe(void)
-{
-       if (!cmdline_apic) {
-               int i;
-               for (i = 0; apic_probe[i]; i++) {
-                       if (apic_probe[i]->probe()) {
-                               genapic = apic_probe[i];
-                               break;
-                       }
-               }
-               /* Not visible without early console */
-               if (!apic_probe[i])
-                       panic("Didn't find an APIC driver");
-
-               if (x86_quirks->update_genapic)
-                       x86_quirks->update_genapic();
-       }
-       printk(KERN_INFO "Using APIC driver %s\n", genapic->name);
-}
-
-/* These functions can switch the APIC even after the initial ->probe() */
-
-int __init mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
-{
-       int i;
-       for (i = 0; apic_probe[i]; ++i) {
-               if (apic_probe[i]->mps_oem_check(mpc, oem, productid)) {
-                       if (!cmdline_apic) {
-                               genapic = apic_probe[i];
-                               if (x86_quirks->update_genapic)
-                                       x86_quirks->update_genapic();
-                               printk(KERN_INFO "Switched to APIC driver `%s'.\n",
-                                      genapic->name);
-                       }
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
-       int i;
-       for (i = 0; apic_probe[i]; ++i) {
-               if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
-                       if (!cmdline_apic) {
-                               genapic = apic_probe[i];
-                               if (x86_quirks->update_genapic)
-                                       x86_quirks->update_genapic();
-                               printk(KERN_INFO "Switched to APIC driver `%s'.\n",
-                                      genapic->name);
-                       }
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-int hard_smp_processor_id(void)
-{
-       return genapic->get_apic_id(*(unsigned long *)(APIC_BASE+APIC_ID));
-}
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c
deleted file mode 100644 (file)
index 2821ffc..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * APIC driver for the IBM "Summit" chipset.
- */
-#define APIC_DEFINITION 1
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <asm/mpspec.h>
-#include <asm/genapic.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <asm/summit/apicdef.h>
-#include <linux/smp.h>
-#include <asm/summit/apic.h>
-#include <asm/summit/ipi.h>
-#include <asm/summit/mpparse.h>
-#include <asm/mach-default/mach_wakecpu.h>
-
-static int probe_summit(void)
-{
-       /* probed later in mptable/ACPI hooks */
-       return 0;
-}
-
-static void vector_allocation_domain(int cpu, cpumask_t *retmask)
-{
-       /* Careful. Some cpus do not strictly honor the set of cpus
-        * specified in the interrupt destination when using lowest
-        * priority interrupt delivery mode.
-        *
-        * In particular there was a hyperthreading cpu observed to
-        * deliver interrupts to the wrong hyperthread when only one
-        * hyperthread was specified in the interrupt desitination.
-        */
-       *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
-}
-
-struct genapic apic_summit = APIC_INIT("summit", probe_summit);
diff --git a/arch/x86/mach-rdc321x/Makefile b/arch/x86/mach-rdc321x/Makefile
deleted file mode 100644 (file)
index 8325b4c..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the RDC321x specific parts of the kernel
-#
-obj-$(CONFIG_X86_RDC321X)        := gpio.o platform.o
-
diff --git a/arch/x86/mach-rdc321x/gpio.c b/arch/x86/mach-rdc321x/gpio.c
deleted file mode 100644 (file)
index 247f33d..0000000
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- *  GPIO support for RDC SoC R3210/R8610
- *
- *  Copyright (C) 2007, Florian Fainelli <florian@openwrt.org>
- *  Copyright (C) 2008, Volker Weiss <dev@tintuc.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <linux/types.h>
-#include <linux/module.h>
-
-#include <asm/gpio.h>
-#include <asm/mach-rdc321x/rdc321x_defs.h>
-
-
-/* spin lock to protect our private copy of GPIO data register plus
-   the access to PCI conf registers. */
-static DEFINE_SPINLOCK(gpio_lock);
-
-/* copy of GPIO data registers */
-static u32 gpio_data_reg1;
-static u32 gpio_data_reg2;
-
-static u32 gpio_request_data[2];
-
-
-static inline void rdc321x_conf_write(unsigned addr, u32 value)
-{
-       outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
-       outl(value, RDC3210_CFGREG_DATA);
-}
-
-static inline void rdc321x_conf_or(unsigned addr, u32 value)
-{
-       outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
-       value |= inl(RDC3210_CFGREG_DATA);
-       outl(value, RDC3210_CFGREG_DATA);
-}
-
-static inline u32 rdc321x_conf_read(unsigned addr)
-{
-       outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
-
-       return inl(RDC3210_CFGREG_DATA);
-}
-
-/* configure pin as GPIO */
-static void rdc321x_configure_gpio(unsigned gpio)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&gpio_lock, flags);
-       rdc321x_conf_or(gpio < 32
-               ? RDC321X_GPIO_CTRL_REG1 : RDC321X_GPIO_CTRL_REG2,
-               1 << (gpio & 0x1f));
-       spin_unlock_irqrestore(&gpio_lock, flags);
-}
-
-/* initially setup the 2 copies of the gpio data registers.
-   This function must be called by the platform setup code. */
-void __init rdc321x_gpio_setup()
-{
-       /* this might not be, what others (BIOS, bootloader, etc.)
-          wrote to these registers before, but it's a good guess. Still
-          better than just using 0xffffffff. */
-
-       gpio_data_reg1 = rdc321x_conf_read(RDC321X_GPIO_DATA_REG1);
-       gpio_data_reg2 = rdc321x_conf_read(RDC321X_GPIO_DATA_REG2);
-}
-
-/* determine, if gpio number is valid */
-static inline int rdc321x_is_gpio(unsigned gpio)
-{
-       return gpio <= RDC321X_MAX_GPIO;
-}
-
-/* request GPIO */
-int rdc_gpio_request(unsigned gpio, const char *label)
-{
-       unsigned long flags;
-
-       if (!rdc321x_is_gpio(gpio))
-               return -EINVAL;
-
-       spin_lock_irqsave(&gpio_lock, flags);
-       if (gpio_request_data[(gpio & 0x20) ? 1 : 0] & (1 << (gpio & 0x1f)))
-               goto inuse;
-       gpio_request_data[(gpio & 0x20) ? 1 : 0] |= (1 << (gpio & 0x1f));
-       spin_unlock_irqrestore(&gpio_lock, flags);
-
-       return 0;
-inuse:
-       spin_unlock_irqrestore(&gpio_lock, flags);
-       return -EINVAL;
-}
-EXPORT_SYMBOL(rdc_gpio_request);
-
-/* release previously-claimed GPIO */
-void rdc_gpio_free(unsigned gpio)
-{
-       unsigned long flags;
-
-       if (!rdc321x_is_gpio(gpio))
-               return;
-
-       spin_lock_irqsave(&gpio_lock, flags);
-       gpio_request_data[(gpio & 0x20) ? 1 : 0] &= ~(1 << (gpio & 0x1f));
-       spin_unlock_irqrestore(&gpio_lock, flags);
-}
-EXPORT_SYMBOL(rdc_gpio_free);
-
-/* read GPIO pin */
-int rdc_gpio_get_value(unsigned gpio)
-{
-       u32 reg;
-       unsigned long flags;
-
-       spin_lock_irqsave(&gpio_lock, flags);
-       reg = rdc321x_conf_read(gpio < 32
-               ? RDC321X_GPIO_DATA_REG1 : RDC321X_GPIO_DATA_REG2);
-       spin_unlock_irqrestore(&gpio_lock, flags);
-
-       return (1 << (gpio & 0x1f)) & reg ? 1 : 0;
-}
-EXPORT_SYMBOL(rdc_gpio_get_value);
-
-/* set GPIO pin to value */
-void rdc_gpio_set_value(unsigned gpio, int value)
-{
-       unsigned long flags;
-       u32 reg;
-
-       reg = 1 << (gpio & 0x1f);
-       if (gpio < 32) {
-               spin_lock_irqsave(&gpio_lock, flags);
-               if (value)
-                       gpio_data_reg1 |= reg;
-               else
-                       gpio_data_reg1 &= ~reg;
-               rdc321x_conf_write(RDC321X_GPIO_DATA_REG1, gpio_data_reg1);
-               spin_unlock_irqrestore(&gpio_lock, flags);
-       } else {
-               spin_lock_irqsave(&gpio_lock, flags);
-               if (value)
-                       gpio_data_reg2 |= reg;
-               else
-                       gpio_data_reg2 &= ~reg;
-               rdc321x_conf_write(RDC321X_GPIO_DATA_REG2, gpio_data_reg2);
-               spin_unlock_irqrestore(&gpio_lock, flags);
-       }
-}
-EXPORT_SYMBOL(rdc_gpio_set_value);
-
-/* configure GPIO pin as input */
-int rdc_gpio_direction_input(unsigned gpio)
-{
-       if (!rdc321x_is_gpio(gpio))
-               return -EINVAL;
-
-       rdc321x_configure_gpio(gpio);
-
-       return 0;
-}
-EXPORT_SYMBOL(rdc_gpio_direction_input);
-
-/* configure GPIO pin as output and set value */
-int rdc_gpio_direction_output(unsigned gpio, int value)
-{
-       if (!rdc321x_is_gpio(gpio))
-               return -EINVAL;
-
-       gpio_set_value(gpio, value);
-       rdc321x_configure_gpio(gpio);
-
-       return 0;
-}
-EXPORT_SYMBOL(rdc_gpio_direction_output);
diff --git a/arch/x86/mach-rdc321x/platform.c b/arch/x86/mach-rdc321x/platform.c
deleted file mode 100644 (file)
index 4f4e50c..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- *  Generic RDC321x platform devices
- *
- *  Copyright (C) 2007 Florian Fainelli <florian@openwrt.org>
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version 2
- *  of the License, or (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the
- *  Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- *  Boston, MA  02110-1301, USA.
- *
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-
-#include <asm/gpio.h>
-
-/* LEDS */
-static struct gpio_led default_leds[] = {
-       { .name = "rdc:dmz", .gpio = 1, },
-};
-
-static struct gpio_led_platform_data rdc321x_led_data = {
-       .num_leds = ARRAY_SIZE(default_leds),
-       .leds = default_leds,
-};
-
-static struct platform_device rdc321x_leds = {
-       .name = "leds-gpio",
-       .id = -1,
-       .dev = {
-               .platform_data = &rdc321x_led_data,
-       }
-};
-
-/* Watchdog */
-static struct platform_device rdc321x_wdt = {
-       .name = "rdc321x-wdt",
-       .id = -1,
-       .num_resources = 0,
-};
-
-static struct platform_device *rdc321x_devs[] = {
-       &rdc321x_leds,
-       &rdc321x_wdt
-};
-
-static int __init rdc_board_setup(void)
-{
-       rdc321x_gpio_setup();
-
-       return platform_add_devices(rdc321x_devs, ARRAY_SIZE(rdc321x_devs));
-}
-
-arch_initcall(rdc_board_setup);
index d914a79..66b7eb5 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/e820.h>
 #include <asm/io.h>
 #include <asm/setup.h>
+#include <asm/cpu.h>
 
 void __init pre_intr_init_hook(void)
 {
index 7ffcdee..6f5a38c 100644 (file)
@@ -400,7 +400,7 @@ void __init find_smp_config(void)
             VOYAGER_SUS_IN_CONTROL_PORT);
 
        current_thread_info()->cpu = boot_cpu_id;
-       x86_write_percpu(cpu_number, boot_cpu_id);
+       percpu_write(cpu_number, boot_cpu_id);
 }
 
 /*
@@ -528,7 +528,6 @@ static void __init do_boot_cpu(__u8 cpu)
        /* init_tasks (in sched.c) is indexed logically */
        stack_start.sp = (void *)idle->thread.sp;
 
-       init_gdt(cpu);
        per_cpu(current_task, cpu) = idle;
        early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
        irq_ctx_init(cpu);
@@ -1745,14 +1744,13 @@ static void __init voyager_smp_prepare_cpus(unsigned int max_cpus)
 
 static void __cpuinit voyager_smp_prepare_boot_cpu(void)
 {
-       init_gdt(smp_processor_id());
-       switch_to_new_gdt();
+       int cpu = smp_processor_id();
+       switch_to_new_gdt(cpu);
 
        cpu_online_map = cpumask_of_cpu(smp_processor_id());
        cpu_callout_map = cpumask_of_cpu(smp_processor_id());
        cpu_callin_map = CPU_MASK_NONE;
        cpu_present_map = cpumask_of_cpu(smp_processor_id());
-
 }
 
 static int __cpuinit voyager_cpu_up(unsigned int cpu)
@@ -1779,7 +1777,6 @@ static void __init voyager_smp_cpus_done(unsigned int max_cpus)
 void __init smp_setup_processor_id(void)
 {
        current_thread_info()->cpu = hard_smp_processor_id();
-       x86_write_percpu(cpu_number, hard_smp_processor_id());
 }
 
 static void voyager_send_call_func(const struct cpumask *callmask)
index 420b3b6..6ef5e99 100644 (file)
@@ -150,11 +150,9 @@ static long pm_address(u_char FPU_modrm, u_char segment,
 #endif /* PARANOID */
 
        switch (segment) {
-               /* gs isn't used by the kernel, so it still has its
-                  user-space value. */
        case PREFIX_GS_ - 1:
-               /* N.B. - movl %seg, mem is a 2 byte write regardless of prefix */
-               savesegment(gs, addr->selector);
+               /* user gs handling can be lazy, use special accessors */
+               addr->selector = get_user_gs(FPU_info->regs);
                break;
        default:
                addr->selector = PM_REG_(segment);
index d8cc96a..2b938a3 100644 (file)
@@ -1,6 +1,8 @@
 obj-y  :=  init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
            pat.o pgtable.o gup.o
 
+obj-$(CONFIG_SMP)              += tlb.o
+
 obj-$(CONFIG_X86_32)           += pgtable_32.o iomap_32.o
 
 obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
index 7e8db53..61b41ca 100644 (file)
@@ -23,6 +23,12 @@ int fixup_exception(struct pt_regs *regs)
 
        fixup = search_exception_tables(regs->ip);
        if (fixup) {
+               /* If fixup is less than 16, it means uaccess error */
+               if (fixup->fixup < 16) {
+                       current_thread_info()->uaccess_err = -EFAULT;
+                       regs->ip += fixup->fixup;
+                       return 1;
+               }
                regs->ip = fixup->fixup;
                return 1;
        }
index c76ef1d..2a9ea3a 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/kprobes.h>
 #include <linux/uaccess.h>
 #include <linux/kdebug.h>
+#include <linux/magic.h>
 
 #include <asm/system.h>
 #include <asm/desc.h>
@@ -91,8 +92,8 @@ static inline int notify_page_fault(struct pt_regs *regs)
  *
  * Opcode checker based on code by Richard Brunner
  */
-static int is_prefetch(struct pt_regs *regs, unsigned long addr,
-                      unsigned long error_code)
+static int is_prefetch(struct pt_regs *regs, unsigned long error_code,
+                       unsigned long addr)
 {
        unsigned char *instr;
        int scan_more = 1;
@@ -409,17 +410,16 @@ static void show_fault_oops(struct pt_regs *regs, unsigned long error_code,
 }
 
 #ifdef CONFIG_X86_64
-static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
-                                unsigned long error_code)
+static noinline void pgtable_bad(struct pt_regs *regs,
+                        unsigned long error_code, unsigned long address)
 {
        unsigned long flags = oops_begin();
        int sig = SIGKILL;
-       struct task_struct *tsk;
+       struct task_struct *tsk = current;
 
        printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
-              current->comm, address);
+              tsk->comm, address);
        dump_pagetable(address);
-       tsk = current;
        tsk->thread.cr2 = address;
        tsk->thread.trap_no = 14;
        tsk->thread.error_code = error_code;
@@ -429,6 +429,196 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
 }
 #endif
 
+static noinline void no_context(struct pt_regs *regs,
+                       unsigned long error_code, unsigned long address)
+{
+       struct task_struct *tsk = current;
+       unsigned long *stackend;
+
+#ifdef CONFIG_X86_64
+       unsigned long flags;
+       int sig;
+#endif
+
+       /* Are we prepared to handle this kernel fault?  */
+       if (fixup_exception(regs))
+               return;
+
+       /*
+        * X86_32
+        * Valid to do another page fault here, because if this fault
+        * had been triggered by is_prefetch fixup_exception would have
+        * handled it.
+        *
+        * X86_64
+        * Hall of shame of CPU/BIOS bugs.
+        */
+       if (is_prefetch(regs, error_code, address))
+               return;
+
+       if (is_errata93(regs, address))
+               return;
+
+       /*
+        * Oops. The kernel tried to access some bad page. We'll have to
+        * terminate things with extreme prejudice.
+        */
+#ifdef CONFIG_X86_32
+       bust_spinlocks(1);
+#else
+       flags = oops_begin();
+#endif
+
+       show_fault_oops(regs, error_code, address);
+
+       stackend = end_of_stack(tsk);
+       if (*stackend != STACK_END_MAGIC)
+               printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
+
+       tsk->thread.cr2 = address;
+       tsk->thread.trap_no = 14;
+       tsk->thread.error_code = error_code;
+
+#ifdef CONFIG_X86_32
+       die("Oops", regs, error_code);
+       bust_spinlocks(0);
+       do_exit(SIGKILL);
+#else
+       sig = SIGKILL;
+       if (__die("Oops", regs, error_code))
+               sig = 0;
+       /* Executive summary in case the body of the oops scrolled away */
+       printk(KERN_EMERG "CR2: %016lx\n", address);
+       oops_end(flags, regs, sig);
+#endif
+}
+
+static void __bad_area_nosemaphore(struct pt_regs *regs,
+                       unsigned long error_code, unsigned long address,
+                       int si_code)
+{
+       struct task_struct *tsk = current;
+
+       /* User mode accesses just cause a SIGSEGV */
+       if (error_code & PF_USER) {
+               /*
+                * It's possible to have interrupts off here.
+                */
+               local_irq_enable();
+
+               /*
+                * Valid to do another page fault here because this one came
+                * from user space.
+                */
+               if (is_prefetch(regs, error_code, address))
+                       return;
+
+               if (is_errata100(regs, address))
+                       return;
+
+               if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
+                   printk_ratelimit()) {
+                       printk(
+                       "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
+                       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
+                       tsk->comm, task_pid_nr(tsk), address,
+                       (void *) regs->ip, (void *) regs->sp, error_code);
+                       print_vma_addr(" in ", regs->ip);
+                       printk("\n");
+               }
+
+               tsk->thread.cr2 = address;
+               /* Kernel addresses are always protection faults */
+               tsk->thread.error_code = error_code | (address >= TASK_SIZE);
+               tsk->thread.trap_no = 14;
+               force_sig_info_fault(SIGSEGV, si_code, address, tsk);
+               return;
+       }
+
+       if (is_f00f_bug(regs, address))
+               return;
+
+       no_context(regs, error_code, address);
+}
+
+static noinline void bad_area_nosemaphore(struct pt_regs *regs,
+                       unsigned long error_code, unsigned long address)
+{
+       __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
+}
+
+static void __bad_area(struct pt_regs *regs,
+                       unsigned long error_code, unsigned long address,
+                       int si_code)
+{
+       struct mm_struct *mm = current->mm;
+
+       /*
+        * Something tried to access memory that isn't in our memory map..
+        * Fix it, but check if it's kernel or user first..
+        */
+       up_read(&mm->mmap_sem);
+
+       __bad_area_nosemaphore(regs, error_code, address, si_code);
+}
+
+static noinline void bad_area(struct pt_regs *regs,
+                       unsigned long error_code, unsigned long address)
+{
+       __bad_area(regs, error_code, address, SEGV_MAPERR);
+}
+
+static noinline void bad_area_access_error(struct pt_regs *regs,
+                       unsigned long error_code, unsigned long address)
+{
+       __bad_area(regs, error_code, address, SEGV_ACCERR);
+}
+
+/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
+static void out_of_memory(struct pt_regs *regs,
+                       unsigned long error_code, unsigned long address)
+{
+       /*
+        * We ran out of memory, call the OOM killer, and return the userspace
+        * (which will retry the fault, or kill us if we got oom-killed).
+        */
+       up_read(&current->mm->mmap_sem);
+       pagefault_out_of_memory();
+}
+
+static void do_sigbus(struct pt_regs *regs,
+                       unsigned long error_code, unsigned long address)
+{
+       struct task_struct *tsk = current;
+       struct mm_struct *mm = tsk->mm;
+
+       up_read(&mm->mmap_sem);
+
+       /* Kernel mode? Handle exceptions or die */
+       if (!(error_code & PF_USER))
+               no_context(regs, error_code, address);
+#ifdef CONFIG_X86_32
+       /* User space => ok to do another page fault */
+       if (is_prefetch(regs, error_code, address))
+               return;
+#endif
+       tsk->thread.cr2 = address;
+       tsk->thread.error_code = error_code;
+       tsk->thread.trap_no = 14;
+       force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
+}
+
+static noinline void mm_fault_error(struct pt_regs *regs,
+               unsigned long error_code, unsigned long address, unsigned int fault)
+{
+       if (fault & VM_FAULT_OOM)
+               out_of_memory(regs, error_code, address);
+       else if (fault & VM_FAULT_SIGBUS)
+               do_sigbus(regs, error_code, address);
+       else
+               BUG();
+}
+
 static int spurious_fault_check(unsigned long error_code, pte_t *pte)
 {
        if ((error_code & PF_WRITE) && !pte_write(*pte))
@@ -448,8 +638,8 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
  * There are no security implications to leaving a stale TLB when
  * increasing the permissions on a page.
  */
-static int spurious_fault(unsigned long address,
-                         unsigned long error_code)
+static noinline int spurious_fault(unsigned long error_code,
+                               unsigned long address)
 {
        pgd_t *pgd;
        pud_t *pud;
@@ -494,7 +684,7 @@ static int spurious_fault(unsigned long address,
  *
  * This assumes no large pages in there.
  */
-static int vmalloc_fault(unsigned long address)
+static noinline int vmalloc_fault(unsigned long address)
 {
 #ifdef CONFIG_X86_32
        unsigned long pgd_paddr;
@@ -573,6 +763,25 @@ static int vmalloc_fault(unsigned long address)
 
 int show_unhandled_signals = 1;
 
+static inline int access_error(unsigned long error_code, int write,
+                               struct vm_area_struct *vma)
+{
+       if (write) {
+               /* write, present and write, not present */
+               if (unlikely(!(vma->vm_flags & VM_WRITE)))
+                       return 1;
+       } else if (unlikely(error_code & PF_PROT)) {
+               /* read, present */
+               return 1;
+       } else {
+               /* read, not present */
+               if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
+                       return 1;
+       }
+
+       return 0;
+}
+
 /*
  * This routine handles page faults.  It determines the address,
  * and the problem, and then passes it off to one of the appropriate
@@ -583,16 +792,12 @@ asmlinkage
 #endif
 void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
 {
+       unsigned long address;
        struct task_struct *tsk;
        struct mm_struct *mm;
        struct vm_area_struct *vma;
-       unsigned long address;
-       int write, si_code;
+       int write;
        int fault;
-#ifdef CONFIG_X86_64
-       unsigned long flags;
-       int sig;
-#endif
 
        tsk = current;
        mm = tsk->mm;
@@ -601,8 +806,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
        /* get the address */
        address = read_cr2();
 
-       si_code = SEGV_MAPERR;
-
        if (unlikely(kmmio_fault(regs, address)))
                return;
 
@@ -629,7 +832,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
                        return;
 
                /* Can handle a stale RO->RW TLB */
-               if (spurious_fault(address, error_code))
+               if (spurious_fault(error_code, address))
                        return;
 
                /* kprobes don't want to hook the spurious faults. */
@@ -639,13 +842,12 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
                 * Don't take the mm semaphore here. If we fixup a prefetch
                 * fault we could otherwise deadlock.
                 */
-               goto bad_area_nosemaphore;
+               bad_area_nosemaphore(regs, error_code, address);
+               return;
        }
 
-       /* kprobes don't want to hook the spurious faults. */
-       if (notify_page_fault(regs))
+       if (unlikely(notify_page_fault(regs)))
                return;
-
        /*
         * It's safe to allow irq's after cr2 has been saved and the
         * vmalloc fault has been handled.
@@ -661,15 +863,17 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
 
 #ifdef CONFIG_X86_64
        if (unlikely(error_code & PF_RSVD))
-               pgtable_bad(address, regs, error_code);
+               pgtable_bad(regs, error_code, address);
 #endif
 
        /*
         * If we're in an interrupt, have no user context or are running in an
         * atomic region then we must not take the fault.
         */
-       if (unlikely(in_atomic() || !mm))
-               goto bad_area_nosemaphore;
+       if (unlikely(in_atomic() || !mm)) {
+               bad_area_nosemaphore(regs, error_code, address);
+               return;
+       }
 
        /*
         * When running in the kernel we expect faults to occur only to
@@ -687,20 +891,26 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
         * source.  If this is invalid we can skip the address space check,
         * thus avoiding the deadlock.
         */
-       if (!down_read_trylock(&mm->mmap_sem)) {
+       if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
                if ((error_code & PF_USER) == 0 &&
-                   !search_exception_tables(regs->ip))
-                       goto bad_area_nosemaphore;
+                   !search_exception_tables(regs->ip)) {
+                       bad_area_nosemaphore(regs, error_code, address);
+                       return;
+               }
                down_read(&mm->mmap_sem);
        }
 
        vma = find_vma(mm, address);
-       if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
+       if (unlikely(!vma)) {
+               bad_area(regs, error_code, address);
+               return;
+       }
+       if (likely(vma->vm_start <= address))
                goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
+       if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
+               bad_area(regs, error_code, address);
+               return;
+       }
        if (error_code & PF_USER) {
                /*
                 * Accessing the stack below %sp is always a bug.
@@ -708,31 +918,25 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
                 * and pusha to work.  ("enter $65535,$31" pushes
                 * 32 pointers and then decrements %sp by 65535.)
                 */
-               if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp)
-                       goto bad_area;
+               if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
+                       bad_area(regs, error_code, address);
+                       return;
+               }
        }
-       if (expand_stack(vma, address))
-               goto bad_area;
-/*
- * Ok, we have a good vm_area for this memory access, so
- * we can handle it..
- */
+       if (unlikely(expand_stack(vma, address))) {
+               bad_area(regs, error_code, address);
+               return;
+       }
+
+       /*
+        * Ok, we have a good vm_area for this memory access, so
+        * we can handle it..
+        */
 good_area:
-       si_code = SEGV_ACCERR;
-       write = 0;
-       switch (error_code & (PF_PROT|PF_WRITE)) {
-       default:        /* 3: write, present */
-               /* fall through */
-       case PF_WRITE:          /* write, not present */
-               if (!(vma->vm_flags & VM_WRITE))
-                       goto bad_area;
-               write++;
-               break;
-       case PF_PROT:           /* read, present */
-               goto bad_area;
-       case 0:                 /* read, not present */
-               if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
-                       goto bad_area;
+       write = error_code & PF_WRITE;
+       if (unlikely(access_error(error_code, write, vma))) {
+               bad_area_access_error(regs, error_code, address);
+               return;
        }
 
        /*
@@ -742,11 +946,8 @@ good_area:
         */
        fault = handle_mm_fault(mm, vma, address, write);
        if (unlikely(fault & VM_FAULT_ERROR)) {
-               if (fault & VM_FAULT_OOM)
-                       goto out_of_memory;
-               else if (fault & VM_FAULT_SIGBUS)
-                       goto do_sigbus;
-               BUG();
+               mm_fault_error(regs, error_code, address, fault);
+               return;
        }
        if (fault & VM_FAULT_MAJOR)
                tsk->maj_flt++;
@@ -764,128 +965,6 @@ good_area:
        }
 #endif
        up_read(&mm->mmap_sem);
-       return;
-
-/*
- * Something tried to access memory that isn't in our memory map..
- * Fix it, but check if it's kernel or user first..
- */
-bad_area:
-       up_read(&mm->mmap_sem);
-
-bad_area_nosemaphore:
-       /* User mode accesses just cause a SIGSEGV */
-       if (error_code & PF_USER) {
-               /*
-                * It's possible to have interrupts off here.
-                */
-               local_irq_enable();
-
-               /*
-                * Valid to do another page fault here because this one came
-                * from user space.
-                */
-               if (is_prefetch(regs, address, error_code))
-                       return;
-
-               if (is_errata100(regs, address))
-                       return;
-
-               if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
-                   printk_ratelimit()) {
-                       printk(
-                       "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
-                       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
-                       tsk->comm, task_pid_nr(tsk), address,
-                       (void *) regs->ip, (void *) regs->sp, error_code);
-                       print_vma_addr(" in ", regs->ip);
-                       printk("\n");
-               }
-
-               tsk->thread.cr2 = address;
-               /* Kernel addresses are always protection faults */
-               tsk->thread.error_code = error_code | (address >= TASK_SIZE);
-               tsk->thread.trap_no = 14;
-               force_sig_info_fault(SIGSEGV, si_code, address, tsk);
-               return;
-       }
-
-       if (is_f00f_bug(regs, address))
-               return;
-
-no_context:
-       /* Are we prepared to handle this kernel fault?  */
-       if (fixup_exception(regs))
-               return;
-
-       /*
-        * X86_32
-        * Valid to do another page fault here, because if this fault
-        * had been triggered by is_prefetch fixup_exception would have
-        * handled it.
-        *
-        * X86_64
-        * Hall of shame of CPU/BIOS bugs.
-        */
-       if (is_prefetch(regs, address, error_code))
-               return;
-
-       if (is_errata93(regs, address))
-               return;
-
-/*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
-#ifdef CONFIG_X86_32
-       bust_spinlocks(1);
-#else
-       flags = oops_begin();
-#endif
-
-       show_fault_oops(regs, error_code, address);
-
-       tsk->thread.cr2 = address;
-       tsk->thread.trap_no = 14;
-       tsk->thread.error_code = error_code;
-
-#ifdef CONFIG_X86_32
-       die("Oops", regs, error_code);
-       bust_spinlocks(0);
-       do_exit(SIGKILL);
-#else
-       sig = SIGKILL;
-       if (__die("Oops", regs, error_code))
-               sig = 0;
-       /* Executive summary in case the body of the oops scrolled away */
-       printk(KERN_EMERG "CR2: %016lx\n", address);
-       oops_end(flags, regs, sig);
-#endif
-
-out_of_memory:
-       /*
-        * We ran out of memory, call the OOM killer, and return the userspace
-        * (which will retry the fault, or kill us if we got oom-killed).
-        */
-       up_read(&mm->mmap_sem);
-       pagefault_out_of_memory();
-       return;
-
-do_sigbus:
-       up_read(&mm->mmap_sem);
-
-       /* Kernel mode? Handle exceptions or die */
-       if (!(error_code & PF_USER))
-               goto no_context;
-#ifdef CONFIG_X86_32
-       /* User space => ok to do another page fault */
-       if (is_prefetch(regs, address, error_code))
-               return;
-#endif
-       tsk->thread.cr2 = address;
-       tsk->thread.error_code = error_code;
-       tsk->thread.trap_no = 14;
-       force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
 }
 
 DEFINE_SPINLOCK(pgd_lock);
index 2cef050..06708ee 100644 (file)
@@ -49,7 +49,6 @@
 #include <asm/paravirt.h>
 #include <asm/setup.h>
 #include <asm/cacheflush.h>
-#include <asm/smp.h>
 
 unsigned int __VMALLOC_RESERVE = 128 << 20;
 
@@ -675,75 +674,97 @@ static int __init parse_highmem(char *arg)
 }
 early_param("highmem", parse_highmem);
 
+#define MSG_HIGHMEM_TOO_BIG \
+       "highmem size (%luMB) is bigger than pages available (%luMB)!\n"
+
+#define MSG_LOWMEM_TOO_SMALL \
+       "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
 /*
- * Determine low and high memory ranges:
+ * All of RAM fits into lowmem - but if user wants highmem
+ * artificially via the highmem=x boot parameter then create
+ * it:
  */
-void __init find_low_pfn_range(void)
+void __init lowmem_pfn_init(void)
 {
-       /* it could update max_pfn */
-
        /* max_low_pfn is 0, we already have early_res support */
-
        max_low_pfn = max_pfn;
-       if (max_low_pfn > MAXMEM_PFN) {
-               if (highmem_pages == -1)
-                       highmem_pages = max_pfn - MAXMEM_PFN;
-               if (highmem_pages + MAXMEM_PFN < max_pfn)
-                       max_pfn = MAXMEM_PFN + highmem_pages;
-               if (highmem_pages + MAXMEM_PFN > max_pfn) {
-                       printk(KERN_WARNING "only %luMB highmem pages "
-                               "available, ignoring highmem size of %uMB.\n",
-                               pages_to_mb(max_pfn - MAXMEM_PFN),
+
+       if (highmem_pages == -1)
+               highmem_pages = 0;
+#ifdef CONFIG_HIGHMEM
+       if (highmem_pages >= max_pfn) {
+               printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
+                       pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
+               highmem_pages = 0;
+       }
+       if (highmem_pages) {
+               if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
+                       printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
                                pages_to_mb(highmem_pages));
                        highmem_pages = 0;
                }
-               max_low_pfn = MAXMEM_PFN;
+               max_low_pfn -= highmem_pages;
+       }
+#else
+       if (highmem_pages)
+               printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
+#endif
+}
+
+#define MSG_HIGHMEM_TOO_SMALL \
+       "only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
+
+#define MSG_HIGHMEM_TRIMMED \
+       "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
+/*
+ * We have more RAM than fits into lowmem - we try to put it into
+ * highmem, also taking the highmem=x boot parameter into account:
+ */
+void __init highmem_pfn_init(void)
+{
+       max_low_pfn = MAXMEM_PFN;
+
+       if (highmem_pages == -1)
+               highmem_pages = max_pfn - MAXMEM_PFN;
+
+       if (highmem_pages + MAXMEM_PFN < max_pfn)
+               max_pfn = MAXMEM_PFN + highmem_pages;
+
+       if (highmem_pages + MAXMEM_PFN > max_pfn) {
+               printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
+                       pages_to_mb(max_pfn - MAXMEM_PFN),
+                       pages_to_mb(highmem_pages));
+               highmem_pages = 0;
+       }
 #ifndef CONFIG_HIGHMEM
-               /* Maximum memory usable is what is directly addressable */
-               printk(KERN_WARNING "Warning only %ldMB will be used.\n",
-                                       MAXMEM>>20);
-               if (max_pfn > MAX_NONPAE_PFN)
-                       printk(KERN_WARNING
-                                "Use a HIGHMEM64G enabled kernel.\n");
-               else
-                       printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
-               max_pfn = MAXMEM_PFN;
+       /* Maximum memory usable is what is directly addressable */
+       printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
+       if (max_pfn > MAX_NONPAE_PFN)
+               printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
+       else
+               printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
+       max_pfn = MAXMEM_PFN;
 #else /* !CONFIG_HIGHMEM */
 #ifndef CONFIG_HIGHMEM64G
-               if (max_pfn > MAX_NONPAE_PFN) {
-                       max_pfn = MAX_NONPAE_PFN;
-                       printk(KERN_WARNING "Warning only 4GB will be used."
-                               "Use a HIGHMEM64G enabled kernel.\n");
-               }
+       if (max_pfn > MAX_NONPAE_PFN) {
+               max_pfn = MAX_NONPAE_PFN;
+               printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
+       }
 #endif /* !CONFIG_HIGHMEM64G */
 #endif /* !CONFIG_HIGHMEM */
-       } else {
-               if (highmem_pages == -1)
-                       highmem_pages = 0;
-#ifdef CONFIG_HIGHMEM
-               if (highmem_pages >= max_pfn) {
-                       printk(KERN_ERR "highmem size specified (%uMB) is "
-                               "bigger than pages available (%luMB)!.\n",
-                               pages_to_mb(highmem_pages),
-                               pages_to_mb(max_pfn));
-                       highmem_pages = 0;
-               }
-               if (highmem_pages) {
-                       if (max_low_pfn - highmem_pages <
-                           64*1024*1024/PAGE_SIZE){
-                               printk(KERN_ERR "highmem size %uMB results in "
-                               "smaller than 64MB lowmem, ignoring it.\n"
-                                       , pages_to_mb(highmem_pages));
-                               highmem_pages = 0;
-                       }
-                       max_low_pfn -= highmem_pages;
-               }
-#else
-               if (highmem_pages)
-                       printk(KERN_ERR "ignoring highmem size on non-highmem"
-                                       " kernel!\n");
-#endif
-       }
+}
+
+/*
+ * Determine low and high memory ranges:
+ */
+void __init find_low_pfn_range(void)
+{
+       /* it could update max_pfn */
+
+       if (max_pfn <= MAXMEM_PFN)
+               lowmem_pfn_init();
+       else
+               highmem_pfn_init();
 }
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES
index af750ab..1448bcb 100644 (file)
@@ -367,7 +367,7 @@ EXPORT_SYMBOL(ioremap_nocache);
  *
  * Must be freed with iounmap.
  */
-void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
+void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
 {
        if (pat_enabled)
                return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
index 56fe712..1658296 100644 (file)
@@ -4,7 +4,7 @@
  * Based on code by Ingo Molnar and Andi Kleen, copyrighted
  * as follows:
  *
- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
+ * Copyright 2003-2009 Red Hat Inc.
  * All Rights Reserved.
  * Copyright 2005 Andi Kleen, SUSE Labs.
  * Copyright 2007 Jiri Kosina, SUSE Labs.
index 71a14f8..deb1c1a 100644 (file)
 #include <asm/acpi.h>
 #include <asm/k8.h>
 
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+# define DBG(x...) printk(KERN_DEBUG x)
+#else
+# define DBG(x...)
+#endif
+
 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
 EXPORT_SYMBOL(node_data);
 
@@ -33,6 +39,21 @@ int numa_off __initdata;
 static unsigned long __initdata nodemap_addr;
 static unsigned long __initdata nodemap_size;
 
+DEFINE_PER_CPU(int, node_number) = 0;
+EXPORT_PER_CPU_SYMBOL(node_number);
+
+/*
+ * Map cpu index to node index
+ */
+DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
+EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
+
+/*
+ * Which logical CPUs are on which nodes
+ */
+cpumask_t *node_to_cpumask_map;
+EXPORT_SYMBOL(node_to_cpumask_map);
+
 /*
  * Given a shift value, try to populate memnodemap[]
  * Returns :
@@ -640,3 +661,199 @@ void __init init_cpu_to_node(void)
 #endif
 
 
+/*
+ * Allocate node_to_cpumask_map based on number of available nodes
+ * Requires node_possible_map to be valid.
+ *
+ * Note: node_to_cpumask() is not valid until after this is done.
+ * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
+ */
+void __init setup_node_to_cpumask_map(void)
+{
+       unsigned int node, num = 0;
+       cpumask_t *map;
+
+       /* setup nr_node_ids if not done yet */
+       if (nr_node_ids == MAX_NUMNODES) {
+               for_each_node_mask(node, node_possible_map)
+                       num = node;
+               nr_node_ids = num + 1;
+       }
+
+       /* allocate the map */
+       map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
+       DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
+
+       pr_debug("Node to cpumask map at %p for %d nodes\n",
+                map, nr_node_ids);
+
+       /* node_to_cpumask() will now work */
+       node_to_cpumask_map = map;
+}
+
+void __cpuinit numa_set_node(int cpu, int node)
+{
+       int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
+
+       /* early setting, no percpu area yet */
+       if (cpu_to_node_map) {
+               cpu_to_node_map[cpu] = node;
+               return;
+       }
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+       if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
+               printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
+               dump_stack();
+               return;
+       }
+#endif
+       per_cpu(x86_cpu_to_node_map, cpu) = node;
+
+       if (node != NUMA_NO_NODE)
+               per_cpu(node_number, cpu) = node;
+}
+
+void __cpuinit numa_clear_node(int cpu)
+{
+       numa_set_node(cpu, NUMA_NO_NODE);
+}
+
+#ifndef CONFIG_DEBUG_PER_CPU_MAPS
+
+void __cpuinit numa_add_cpu(int cpu)
+{
+       cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
+}
+
+void __cpuinit numa_remove_cpu(int cpu)
+{
+       cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
+}
+
+#else /* CONFIG_DEBUG_PER_CPU_MAPS */
+
+/*
+ * --------- debug versions of the numa functions ---------
+ */
+static void __cpuinit numa_set_cpumask(int cpu, int enable)
+{
+       int node = early_cpu_to_node(cpu);
+       cpumask_t *mask;
+       char buf[64];
+
+       if (node_to_cpumask_map == NULL) {
+               printk(KERN_ERR "node_to_cpumask_map NULL\n");
+               dump_stack();
+               return;
+       }
+
+       mask = &node_to_cpumask_map[node];
+       if (enable)
+               cpu_set(cpu, *mask);
+       else
+               cpu_clear(cpu, *mask);
+
+       cpulist_scnprintf(buf, sizeof(buf), mask);
+       printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
+               enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
+}
+
+void __cpuinit numa_add_cpu(int cpu)
+{
+       numa_set_cpumask(cpu, 1);
+}
+
+void __cpuinit numa_remove_cpu(int cpu)
+{
+       numa_set_cpumask(cpu, 0);
+}
+
+int cpu_to_node(int cpu)
+{
+       if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
+               printk(KERN_WARNING
+                       "cpu_to_node(%d): usage too early!\n", cpu);
+               dump_stack();
+               return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
+       }
+       return per_cpu(x86_cpu_to_node_map, cpu);
+}
+EXPORT_SYMBOL(cpu_to_node);
+
+/*
+ * Same function as cpu_to_node() but used if called before the
+ * per_cpu areas are setup.
+ */
+int early_cpu_to_node(int cpu)
+{
+       if (early_per_cpu_ptr(x86_cpu_to_node_map))
+               return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
+
+       if (!cpu_possible(cpu)) {
+               printk(KERN_WARNING
+                       "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
+               dump_stack();
+               return NUMA_NO_NODE;
+       }
+       return per_cpu(x86_cpu_to_node_map, cpu);
+}
+
+
+/* empty cpumask */
+static const cpumask_t cpu_mask_none;
+
+/*
+ * Returns a pointer to the bitmask of CPUs on Node 'node'.
+ */
+const cpumask_t *cpumask_of_node(int node)
+{
+       if (node_to_cpumask_map == NULL) {
+               printk(KERN_WARNING
+                       "cpumask_of_node(%d): no node_to_cpumask_map!\n",
+                       node);
+               dump_stack();
+               return (const cpumask_t *)&cpu_online_map;
+       }
+       if (node >= nr_node_ids) {
+               printk(KERN_WARNING
+                       "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
+                       node, nr_node_ids);
+               dump_stack();
+               return &cpu_mask_none;
+       }
+       return &node_to_cpumask_map[node];
+}
+EXPORT_SYMBOL(cpumask_of_node);
+
+/*
+ * Returns a bitmask of CPUs on Node 'node'.
+ *
+ * Side note: this function creates the returned cpumask on the stack
+ * so with a high NR_CPUS count, excessive stack space is used.  The
+ * node_to_cpumask_ptr function should be used whenever possible.
+ */
+cpumask_t node_to_cpumask(int node)
+{
+       if (node_to_cpumask_map == NULL) {
+               printk(KERN_WARNING
+                       "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
+               dump_stack();
+               return cpu_online_map;
+       }
+       if (node >= nr_node_ids) {
+               printk(KERN_WARNING
+                       "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
+                       node, nr_node_ids);
+               dump_stack();
+               return cpu_mask_none;
+       }
+       return node_to_cpumask_map[node];
+}
+EXPORT_SYMBOL(node_to_cpumask);
+
+/*
+ * --------- end of debug versions of the numa functions ---------
+ */
+
+#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
index 7b61036..9127e31 100644 (file)
@@ -30,7 +30,7 @@
 #ifdef CONFIG_X86_PAT
 int __read_mostly pat_enabled = 1;
 
-void __cpuinit pat_disable(char *reason)
+void __cpuinit pat_disable(const char *reason)
 {
        pat_enabled = 0;
        printk(KERN_INFO "%s\n", reason);
@@ -42,6 +42,11 @@ static int __init nopat(char *str)
        return 0;
 }
 early_param("nopat", nopat);
+#else
+static inline void pat_disable(const char *reason)
+{
+       (void)reason;
+}
 #endif
 
 
@@ -78,16 +83,20 @@ void pat_init(void)
        if (!pat_enabled)
                return;
 
-       /* Paranoia check. */
-       if (!cpu_has_pat && boot_pat_state) {
-               /*
-                * If this happens we are on a secondary CPU, but
-                * switched to PAT on the boot CPU. We have no way to
-                * undo PAT.
-                */
-               printk(KERN_ERR "PAT enabled, "
-                      "but not supported by secondary CPU\n");
-               BUG();
+       if (!cpu_has_pat) {
+               if (!boot_pat_state) {
+                       pat_disable("PAT not supported by CPU.");
+                       return;
+               } else {
+                       /*
+                        * If this happens we are on a secondary CPU, but
+                        * switched to PAT on the boot CPU. We have no way to
+                        * undo PAT.
+                        */
+                       printk(KERN_ERR "PAT enabled, "
+                              "but not supported by secondary CPU\n");
+                       BUG();
+               }
        }
 
        /* Set PWT to Write-Combining. All other bits stay the same */
index 09737c8..15df1ba 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/numa.h>
 #include <asm/e820.h>
 #include <asm/genapic.h>
+#include <asm/uv/uv.h>
 
 int acpi_numa __initdata;
 
similarity index 67%
rename from arch/x86/kernel/tlb_64.c
rename to arch/x86/mm/tlb.c
index f8be6f1..14c5af4 100644 (file)
@@ -1,24 +1,20 @@
 #include <linux/init.h>
 
 #include <linux/mm.h>
-#include <linux/delay.h>
 #include <linux/spinlock.h>
 #include <linux/smp.h>
-#include <linux/kernel_stat.h>
-#include <linux/mc146818rtc.h>
 #include <linux/interrupt.h>
+#include <linux/module.h>
 
-#include <asm/mtrr.h>
-#include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
-#include <asm/proto.h>
-#include <asm/apicdef.h>
-#include <asm/idle.h>
-#include <asm/uv/uv_hub.h>
-#include <asm/uv/uv_bau.h>
+#include <asm/apic.h>
+#include <asm/uv/uv.h>
 
-#include <mach_ipi.h>
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
+                       = { &init_mm, 0, };
+
+#include <asm/genapic.h>
 /*
  *     Smarter SMP flushing macros.
  *             c/o Linus Torvalds.
@@ -33,7 +29,7 @@
  *     To avoid global state use 8 different call vectors.
  *     Each CPU uses a specific vector to trigger flushes on other
  *     CPUs. Depending on the received vector the target CPUs look into
- *     the right per cpu variable for the flush data.
+ *     the right array slot for the flush data.
  *
  *     With more than 8 CPUs they are hashed to the 8 available
  *     vectors. The limited global vector space forces us to this right now.
 
 union smp_flush_state {
        struct {
-               cpumask_t flush_cpumask;
                struct mm_struct *flush_mm;
                unsigned long flush_va;
                spinlock_t tlbstate_lock;
+               DECLARE_BITMAP(flush_cpumask, NR_CPUS);
        };
-       char pad[SMP_CACHE_BYTES];
-} ____cacheline_aligned;
+       char pad[CONFIG_X86_INTERNODE_CACHE_BYTES];
+} ____cacheline_internodealigned_in_smp;
 
 /* State is put into the per CPU data section, but padded
    to a full cache line because other CPUs can access it and we don't
    want false sharing in the per cpu data segment. */
-static DEFINE_PER_CPU(union smp_flush_state, flush_state);
+static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
 
 /*
  * We cannot call mmdrop() because we are in interrupt context,
@@ -62,9 +58,9 @@ static DEFINE_PER_CPU(union smp_flush_state, flush_state);
  */
 void leave_mm(int cpu)
 {
-       if (read_pda(mmu_state) == TLBSTATE_OK)
+       if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
                BUG();
-       cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
+       cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask);
        load_cr3(swapper_pg_dir);
 }
 EXPORT_SYMBOL_GPL(leave_mm);
@@ -117,10 +113,20 @@ EXPORT_SYMBOL_GPL(leave_mm);
  * Interrupts are disabled.
  */
 
-asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
+/*
+ * FIXME: use of asmlinkage is not consistent.  On x86_64 it's noop
+ * but still used for documentation purpose but the usage is slightly
+ * inconsistent.  On x86_32, asmlinkage is regparm(0) but interrupt
+ * entry calls in with the first parameter in %eax.  Maybe define
+ * intrlinkage?
+ */
+#ifdef CONFIG_X86_64
+asmlinkage
+#endif
+void smp_invalidate_interrupt(struct pt_regs *regs)
 {
-       int cpu;
-       int sender;
+       unsigned int cpu;
+       unsigned int sender;
        union smp_flush_state *f;
 
        cpu = smp_processor_id();
@@ -129,9 +135,9 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
         * Use that to determine where the sender put the data.
         */
        sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
-       f = &per_cpu(flush_state, sender);
+       f = &flush_state[sender];
 
-       if (!cpu_isset(cpu, f->flush_cpumask))
+       if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
                goto out;
                /*
                 * This was a BUG() but until someone can quote me the
@@ -142,8 +148,8 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
                 * BUG();
                 */
 
-       if (f->flush_mm == read_pda(active_mm)) {
-               if (read_pda(mmu_state) == TLBSTATE_OK) {
+       if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
+               if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
                        if (f->flush_va == TLB_FLUSH_ALL)
                                local_flush_tlb();
                        else
@@ -153,23 +159,21 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
        }
 out:
        ack_APIC_irq();
-       cpu_clear(cpu, f->flush_cpumask);
+       smp_mb__before_clear_bit();
+       cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
+       smp_mb__after_clear_bit();
        inc_irq_stat(irq_tlb_count);
 }
 
-void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
-                            unsigned long va)
+static void flush_tlb_others_ipi(const struct cpumask *cpumask,
+                                struct mm_struct *mm, unsigned long va)
 {
-       int sender;
+       unsigned int sender;
        union smp_flush_state *f;
-       cpumask_t cpumask = *cpumaskp;
-
-       if (is_uv_system() && uv_flush_tlb_others(&cpumask, mm, va))
-               return;
 
        /* Caller has disabled preemption */
        sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
-       f = &per_cpu(flush_state, sender);
+       f = &flush_state[sender];
 
        /*
         * Could avoid this lock when
@@ -180,7 +184,8 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
 
        f->flush_mm = mm;
        f->flush_va = va;
-       cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
+       cpumask_andnot(to_cpumask(f->flush_cpumask),
+                      cpumask, cpumask_of(smp_processor_id()));
 
        /*
         * Make the above memory operations globally visible before
@@ -191,9 +196,10 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
         * We have to send the IPI only to
         * CPUs affected.
         */
-       send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender);
+       apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
+                     INVALIDATE_TLB_VECTOR_START + sender);
 
-       while (!cpus_empty(f->flush_cpumask))
+       while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
                cpu_relax();
 
        f->flush_mm = NULL;
@@ -201,12 +207,28 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
        spin_unlock(&f->tlbstate_lock);
 }
 
+void native_flush_tlb_others(const struct cpumask *cpumask,
+                            struct mm_struct *mm, unsigned long va)
+{
+       if (is_uv_system()) {
+               unsigned int cpu;
+
+               cpu = get_cpu();
+               cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu);
+               if (cpumask)
+                       flush_tlb_others_ipi(cpumask, mm, va);
+               put_cpu();
+               return;
+       }
+       flush_tlb_others_ipi(cpumask, mm, va);
+}
+
 static int __cpuinit init_smp_flush(void)
 {
        int i;
 
-       for_each_possible_cpu(i)
-               spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
+       for (i = 0; i < ARRAY_SIZE(flush_state); i++)
+               spin_lock_init(&flush_state[i].tlbstate_lock);
 
        return 0;
 }
@@ -215,25 +237,18 @@ core_initcall(init_smp_flush);
 void flush_tlb_current_task(void)
 {
        struct mm_struct *mm = current->mm;
-       cpumask_t cpu_mask;
 
        preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
 
        local_flush_tlb();
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+       if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
+               flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
        preempt_enable();
 }
 
 void flush_tlb_mm(struct mm_struct *mm)
 {
-       cpumask_t cpu_mask;
-
        preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
 
        if (current->active_mm == mm) {
                if (current->mm)
@@ -241,8 +256,8 @@ void flush_tlb_mm(struct mm_struct *mm)
                else
                        leave_mm(smp_processor_id());
        }
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+       if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
+               flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
 
        preempt_enable();
 }
@@ -250,11 +265,8 @@ void flush_tlb_mm(struct mm_struct *mm)
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
 {
        struct mm_struct *mm = vma->vm_mm;
-       cpumask_t cpu_mask;
 
        preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
 
        if (current->active_mm == mm) {
                if (current->mm)
@@ -263,8 +275,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
                        leave_mm(smp_processor_id());
        }
 
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, va);
+       if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
+               flush_tlb_others(&mm->cpu_vm_mask, mm, va);
 
        preempt_enable();
 }
@@ -274,7 +286,7 @@ static void do_flush_tlb_all(void *info)
        unsigned long cpu = smp_processor_id();
 
        __flush_tlb_all();
-       if (read_pda(mmu_state) == TLBSTATE_LAZY)
+       if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
                leave_mm(cpu);
 }
 
index 2089354..5601e82 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/nodemask.h>
-#include <mach_apic.h>
+#include <asm/genapic.h>
 #include <asm/mpspec.h>
 #include <asm/pci_x86.h>
 
 
 #define QUADLOCAL2BUS(quad,local) (quad_local_to_mp_bus_id[quad][local])
 
-/* Where the IO area was mapped on multiquad, always 0 otherwise */
-void *xquad_portio;
-EXPORT_SYMBOL(xquad_portio);
-
 #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
 
 #define PCI_CONF1_MQ_ADDRESS(bus, devfn, reg) \
index b82cae9..1c975cc 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/module.h>
 #include <linux/uaccess.h>
 #include <asm/pci_x86.h>
-#include <asm/mach-default/pci-functions.h>
+#include <asm/pci-functions.h>
 
 /* BIOS32 signature: "_32_" */
 #define BIOS32_SIGNATURE       (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24))
index 4d6ef0a..16a9020 100644 (file)
@@ -38,7 +38,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
        $(call if_changed,objcopy)
 
 CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
-       $(filter -g%,$(KBUILD_CFLAGS))
+       $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector)
 
 $(vobjs): KBUILD_CFLAGS += $(CFL)
 
index 6dcefba..3b767d0 100644 (file)
@@ -6,7 +6,8 @@ CFLAGS_REMOVE_irq.o = -pg
 endif
 
 obj-y          := enlighten.o setup.o multicalls.o mmu.o irq.o \
-                       time.o xen-asm_$(BITS).o grant-table.o suspend.o
+                       time.o xen-asm.o xen-asm_$(BITS).o \
+                       grant-table.o suspend.o
 
 obj-$(CONFIG_SMP)              += smp.o spinlock.o
 obj-$(CONFIG_XEN_DEBUG_FS)     += debugfs.o
\ No newline at end of file
index bea2152..95ff6a0 100644 (file)
@@ -61,40 +61,13 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
 enum xen_domain_type xen_domain_type = XEN_NATIVE;
 EXPORT_SYMBOL_GPL(xen_domain_type);
 
-/*
- * Identity map, in addition to plain kernel map.  This needs to be
- * large enough to allocate page table pages to allocate the rest.
- * Each page can map 2MB.
- */
-static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
-
-#ifdef CONFIG_X86_64
-/* l3 pud for userspace vsyscall mapping */
-static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
-#endif /* CONFIG_X86_64 */
-
-/*
- * Note about cr3 (pagetable base) values:
- *
- * xen_cr3 contains the current logical cr3 value; it contains the
- * last set cr3.  This may not be the current effective cr3, because
- * its update may be being lazily deferred.  However, a vcpu looking
- * at its own cr3 can use this value knowing that it everything will
- * be self-consistent.
- *
- * xen_current_cr3 contains the actual vcpu cr3; it is set once the
- * hypercall to set the vcpu cr3 is complete (so it may be a little
- * out of date, but it will never be set early).  If one vcpu is
- * looking at another vcpu's cr3 value, it should use this variable.
- */
-DEFINE_PER_CPU(unsigned long, xen_cr3);         /* cr3 stored as physaddr */
-DEFINE_PER_CPU(unsigned long, xen_current_cr3);         /* actual vcpu cr3 */
-
 struct start_info *xen_start_info;
 EXPORT_SYMBOL_GPL(xen_start_info);
 
 struct shared_info xen_dummy_shared_info;
 
+void *xen_initial_gdt;
+
 /*
  * Point at some empty memory to start with. We map the real shared_info
  * page as soon as fixmap is up and running.
@@ -114,14 +87,7 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
  *
  * 0: not available, 1: available
  */
-static int have_vcpu_info_placement =
-#ifdef CONFIG_X86_32
-       1
-#else
-       0
-#endif
-       ;
-
+static int have_vcpu_info_placement = 1;
 
 static void xen_vcpu_setup(int cpu)
 {
@@ -237,7 +203,7 @@ static unsigned long xen_get_debugreg(int reg)
        return HYPERVISOR_get_debugreg(reg);
 }
 
-static void xen_leave_lazy(void)
+void xen_leave_lazy(void)
 {
        paravirt_leave_lazy(paravirt_get_lazy_mode());
        xen_mc_flush();
@@ -357,13 +323,14 @@ static void load_TLS_descriptor(struct thread_struct *t,
 static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
 {
        /*
-        * XXX sleazy hack: If we're being called in a lazy-cpu zone,
-        * it means we're in a context switch, and %gs has just been
-        * saved.  This means we can zero it out to prevent faults on
-        * exit from the hypervisor if the next process has no %gs.
-        * Either way, it has been saved, and the new value will get
-        * loaded properly.  This will go away as soon as Xen has been
-        * modified to not save/restore %gs for normal hypercalls.
+        * XXX sleazy hack: If we're being called in a lazy-cpu zone
+        * and lazy gs handling is enabled, it means we're in a
+        * context switch, and %gs has just been saved.  This means we
+        * can zero it out to prevent faults on exit from the
+        * hypervisor if the next process has no %gs.  Either way, it
+        * has been saved, and the new value will get loaded properly.
+        * This will go away as soon as Xen has been modified to not
+        * save/restore %gs for normal hypercalls.
         *
         * On x86_64, this hack is not used for %gs, because gs points
         * to KERNEL_GS_BASE (and uses it for PDA references), so we
@@ -375,7 +342,7 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
         */
        if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
 #ifdef CONFIG_X86_32
-               loadsegment(gs, 0);
+               lazy_load_gs(0);
 #else
                loadsegment(fs, 0);
 #endif
@@ -598,83 +565,6 @@ static struct apic_ops xen_basic_apic_ops = {
 
 #endif
 
-static void xen_flush_tlb(void)
-{
-       struct mmuext_op *op;
-       struct multicall_space mcs;
-
-       preempt_disable();
-
-       mcs = xen_mc_entry(sizeof(*op));
-
-       op = mcs.args;
-       op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
-       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
-
-       xen_mc_issue(PARAVIRT_LAZY_MMU);
-
-       preempt_enable();
-}
-
-static void xen_flush_tlb_single(unsigned long addr)
-{
-       struct mmuext_op *op;
-       struct multicall_space mcs;
-
-       preempt_disable();
-
-       mcs = xen_mc_entry(sizeof(*op));
-       op = mcs.args;
-       op->cmd = MMUEXT_INVLPG_LOCAL;
-       op->arg1.linear_addr = addr & PAGE_MASK;
-       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
-
-       xen_mc_issue(PARAVIRT_LAZY_MMU);
-
-       preempt_enable();
-}
-
-static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
-                                unsigned long va)
-{
-       struct {
-               struct mmuext_op op;
-               cpumask_t mask;
-       } *args;
-       cpumask_t cpumask = *cpus;
-       struct multicall_space mcs;
-
-       /*
-        * A couple of (to be removed) sanity checks:
-        *
-        * - current CPU must not be in mask
-        * - mask must exist :)
-        */
-       BUG_ON(cpus_empty(cpumask));
-       BUG_ON(cpu_isset(smp_processor_id(), cpumask));
-       BUG_ON(!mm);
-
-       /* If a CPU which we ran on has gone down, OK. */
-       cpus_and(cpumask, cpumask, cpu_online_map);
-       if (cpus_empty(cpumask))
-               return;
-
-       mcs = xen_mc_entry(sizeof(*args));
-       args = mcs.args;
-       args->mask = cpumask;
-       args->op.arg2.vcpumask = &args->mask;
-
-       if (va == TLB_FLUSH_ALL) {
-               args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
-       } else {
-               args->op.cmd = MMUEXT_INVLPG_MULTI;
-               args->op.arg1.linear_addr = va;
-       }
-
-       MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
-
-       xen_mc_issue(PARAVIRT_LAZY_MMU);
-}
 
 static void xen_clts(void)
 {
@@ -700,21 +590,6 @@ static void xen_write_cr0(unsigned long cr0)
        xen_mc_issue(PARAVIRT_LAZY_CPU);
 }
 
-static void xen_write_cr2(unsigned long cr2)
-{
-       x86_read_percpu(xen_vcpu)->arch.cr2 = cr2;
-}
-
-static unsigned long xen_read_cr2(void)
-{
-       return x86_read_percpu(xen_vcpu)->arch.cr2;
-}
-
-static unsigned long xen_read_cr2_direct(void)
-{
-       return x86_read_percpu(xen_vcpu_info.arch.cr2);
-}
-
 static void xen_write_cr4(unsigned long cr4)
 {
        cr4 &= ~X86_CR4_PGE;
@@ -723,71 +598,6 @@ static void xen_write_cr4(unsigned long cr4)
        native_write_cr4(cr4);
 }
 
-static unsigned long xen_read_cr3(void)
-{
-       return x86_read_percpu(xen_cr3);
-}
-
-static void set_current_cr3(void *v)
-{
-       x86_write_percpu(xen_current_cr3, (unsigned long)v);
-}
-
-static void __xen_write_cr3(bool kernel, unsigned long cr3)
-{
-       struct mmuext_op *op;
-       struct multicall_space mcs;
-       unsigned long mfn;
-
-       if (cr3)
-               mfn = pfn_to_mfn(PFN_DOWN(cr3));
-       else
-               mfn = 0;
-
-       WARN_ON(mfn == 0 && kernel);
-
-       mcs = __xen_mc_entry(sizeof(*op));
-
-       op = mcs.args;
-       op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
-       op->arg1.mfn = mfn;
-
-       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
-
-       if (kernel) {
-               x86_write_percpu(xen_cr3, cr3);
-
-               /* Update xen_current_cr3 once the batch has actually
-                  been submitted. */
-               xen_mc_callback(set_current_cr3, (void *)cr3);
-       }
-}
-
-static void xen_write_cr3(unsigned long cr3)
-{
-       BUG_ON(preemptible());
-
-       xen_mc_batch();  /* disables interrupts */
-
-       /* Update while interrupts are disabled, so its atomic with
-          respect to ipis */
-       x86_write_percpu(xen_cr3, cr3);
-
-       __xen_write_cr3(true, cr3);
-
-#ifdef CONFIG_X86_64
-       {
-               pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
-               if (user_pgd)
-                       __xen_write_cr3(false, __pa(user_pgd));
-               else
-                       __xen_write_cr3(false, 0);
-       }
-#endif
-
-       xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */
-}
-
 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
 {
        int ret;
@@ -829,185 +639,6 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
        return ret;
 }
 
-/* Early in boot, while setting up the initial pagetable, assume
-   everything is pinned. */
-static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
-{
-#ifdef CONFIG_FLATMEM
-       BUG_ON(mem_map);        /* should only be used early */
-#endif
-       make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
-}
-
-/* Early release_pte assumes that all pts are pinned, since there's
-   only init_mm and anything attached to that is pinned. */
-static void xen_release_pte_init(unsigned long pfn)
-{
-       make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
-}
-
-static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
-{
-       struct mmuext_op op;
-       op.cmd = cmd;
-       op.arg1.mfn = pfn_to_mfn(pfn);
-       if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
-               BUG();
-}
-
-/* This needs to make sure the new pte page is pinned iff its being
-   attached to a pinned pagetable. */
-static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
-{
-       struct page *page = pfn_to_page(pfn);
-
-       if (PagePinned(virt_to_page(mm->pgd))) {
-               SetPagePinned(page);
-
-               vm_unmap_aliases();
-               if (!PageHighMem(page)) {
-                       make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
-                       if (level == PT_PTE && USE_SPLIT_PTLOCKS)
-                               pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
-               } else {
-                       /* make sure there are no stray mappings of
-                          this page */
-                       kmap_flush_unused();
-               }
-       }
-}
-
-static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
-{
-       xen_alloc_ptpage(mm, pfn, PT_PTE);
-}
-
-static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
-{
-       xen_alloc_ptpage(mm, pfn, PT_PMD);
-}
-
-static int xen_pgd_alloc(struct mm_struct *mm)
-{
-       pgd_t *pgd = mm->pgd;
-       int ret = 0;
-
-       BUG_ON(PagePinned(virt_to_page(pgd)));
-
-#ifdef CONFIG_X86_64
-       {
-               struct page *page = virt_to_page(pgd);
-               pgd_t *user_pgd;
-
-               BUG_ON(page->private != 0);
-
-               ret = -ENOMEM;
-
-               user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-               page->private = (unsigned long)user_pgd;
-
-               if (user_pgd != NULL) {
-                       user_pgd[pgd_index(VSYSCALL_START)] =
-                               __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
-                       ret = 0;
-               }
-
-               BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
-       }
-#endif
-
-       return ret;
-}
-
-static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
-#ifdef CONFIG_X86_64
-       pgd_t *user_pgd = xen_get_user_pgd(pgd);
-
-       if (user_pgd)
-               free_page((unsigned long)user_pgd);
-#endif
-}
-
-/* This should never happen until we're OK to use struct page */
-static void xen_release_ptpage(unsigned long pfn, unsigned level)
-{
-       struct page *page = pfn_to_page(pfn);
-
-       if (PagePinned(page)) {
-               if (!PageHighMem(page)) {
-                       if (level == PT_PTE && USE_SPLIT_PTLOCKS)
-                               pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
-                       make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
-               }
-               ClearPagePinned(page);
-       }
-}
-
-static void xen_release_pte(unsigned long pfn)
-{
-       xen_release_ptpage(pfn, PT_PTE);
-}
-
-static void xen_release_pmd(unsigned long pfn)
-{
-       xen_release_ptpage(pfn, PT_PMD);
-}
-
-#if PAGETABLE_LEVELS == 4
-static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
-{
-       xen_alloc_ptpage(mm, pfn, PT_PUD);
-}
-
-static void xen_release_pud(unsigned long pfn)
-{
-       xen_release_ptpage(pfn, PT_PUD);
-}
-#endif
-
-#ifdef CONFIG_HIGHPTE
-static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
-{
-       pgprot_t prot = PAGE_KERNEL;
-
-       if (PagePinned(page))
-               prot = PAGE_KERNEL_RO;
-
-       if (0 && PageHighMem(page))
-               printk("mapping highpte %lx type %d prot %s\n",
-                      page_to_pfn(page), type,
-                      (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
-
-       return kmap_atomic_prot(page, type, prot);
-}
-#endif
-
-#ifdef CONFIG_X86_32
-static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
-{
-       /* If there's an existing pte, then don't allow _PAGE_RW to be set */
-       if (pte_val_ma(*ptep) & _PAGE_PRESENT)
-               pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
-                              pte_val_ma(pte));
-
-       return pte;
-}
-
-/* Init-time set_pte while constructing initial pagetables, which
-   doesn't allow RO pagetable pages to be remapped RW */
-static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
-{
-       pte = mask_rw_pte(ptep, pte);
-
-       xen_set_pte(ptep, pte);
-}
-#endif
-
-static __init void xen_pagetable_setup_start(pgd_t *base)
-{
-}
-
 void xen_setup_shared_info(void)
 {
        if (!xen_feature(XENFEAT_auto_translated_physmap)) {
@@ -1028,37 +659,6 @@ void xen_setup_shared_info(void)
        xen_setup_mfn_list_list();
 }
 
-static __init void xen_pagetable_setup_done(pgd_t *base)
-{
-       xen_setup_shared_info();
-}
-
-static __init void xen_post_allocator_init(void)
-{
-       pv_mmu_ops.set_pte = xen_set_pte;
-       pv_mmu_ops.set_pmd = xen_set_pmd;
-       pv_mmu_ops.set_pud = xen_set_pud;
-#if PAGETABLE_LEVELS == 4
-       pv_mmu_ops.set_pgd = xen_set_pgd;
-#endif
-
-       /* This will work as long as patching hasn't happened yet
-          (which it hasn't) */
-       pv_mmu_ops.alloc_pte = xen_alloc_pte;
-       pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
-       pv_mmu_ops.release_pte = xen_release_pte;
-       pv_mmu_ops.release_pmd = xen_release_pmd;
-#if PAGETABLE_LEVELS == 4
-       pv_mmu_ops.alloc_pud = xen_alloc_pud;
-       pv_mmu_ops.release_pud = xen_release_pud;
-#endif
-
-#ifdef CONFIG_X86_64
-       SetPagePinned(virt_to_page(level3_user_vsyscall));
-#endif
-       xen_mark_init_mm_pinned();
-}
-
 /* This is called once we have the cpu_possible_map */
 void xen_setup_vcpu_info_placement(void)
 {
@@ -1072,10 +672,10 @@ void xen_setup_vcpu_info_placement(void)
        if (have_vcpu_info_placement) {
                printk(KERN_INFO "Xen: using vcpu_info placement\n");
 
-               pv_irq_ops.save_fl = xen_save_fl_direct;
-               pv_irq_ops.restore_fl = xen_restore_fl_direct;
-               pv_irq_ops.irq_disable = xen_irq_disable_direct;
-               pv_irq_ops.irq_enable = xen_irq_enable_direct;
+               pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
+               pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
+               pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
+               pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
                pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
        }
 }
@@ -1133,49 +733,6 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
        return ret;
 }
 
-static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot)
-{
-       pte_t pte;
-
-       phys >>= PAGE_SHIFT;
-
-       switch (idx) {
-       case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
-#ifdef CONFIG_X86_F00F_BUG
-       case FIX_F00F_IDT:
-#endif
-#ifdef CONFIG_X86_32
-       case FIX_WP_TEST:
-       case FIX_VDSO:
-# ifdef CONFIG_HIGHMEM
-       case FIX_KMAP_BEGIN ... FIX_KMAP_END:
-# endif
-#else
-       case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
-#endif
-#ifdef CONFIG_X86_LOCAL_APIC
-       case FIX_APIC_BASE:     /* maps dummy local APIC */
-#endif
-               pte = pfn_pte(phys, prot);
-               break;
-
-       default:
-               pte = mfn_pte(phys, prot);
-               break;
-       }
-
-       __native_set_fixmap(idx, pte);
-
-#ifdef CONFIG_X86_64
-       /* Replicate changes to map the vsyscall page into the user
-          pagetable vsyscall mapping. */
-       if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
-               unsigned long vaddr = __fix_to_virt(idx);
-               set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
-       }
-#endif
-}
-
 static const struct pv_info xen_info __initdata = {
        .paravirt_enabled = 1,
        .shared_kernel_pmd = 0,
@@ -1271,87 +828,6 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
 #endif
 };
 
-static const struct pv_mmu_ops xen_mmu_ops __initdata = {
-       .pagetable_setup_start = xen_pagetable_setup_start,
-       .pagetable_setup_done = xen_pagetable_setup_done,
-
-       .read_cr2 = xen_read_cr2,
-       .write_cr2 = xen_write_cr2,
-
-       .read_cr3 = xen_read_cr3,
-       .write_cr3 = xen_write_cr3,
-
-       .flush_tlb_user = xen_flush_tlb,
-       .flush_tlb_kernel = xen_flush_tlb,
-       .flush_tlb_single = xen_flush_tlb_single,
-       .flush_tlb_others = xen_flush_tlb_others,
-
-       .pte_update = paravirt_nop,
-       .pte_update_defer = paravirt_nop,
-
-       .pgd_alloc = xen_pgd_alloc,
-       .pgd_free = xen_pgd_free,
-
-       .alloc_pte = xen_alloc_pte_init,
-       .release_pte = xen_release_pte_init,
-       .alloc_pmd = xen_alloc_pte_init,
-       .alloc_pmd_clone = paravirt_nop,
-       .release_pmd = xen_release_pte_init,
-
-#ifdef CONFIG_HIGHPTE
-       .kmap_atomic_pte = xen_kmap_atomic_pte,
-#endif
-
-#ifdef CONFIG_X86_64
-       .set_pte = xen_set_pte,
-#else
-       .set_pte = xen_set_pte_init,
-#endif
-       .set_pte_at = xen_set_pte_at,
-       .set_pmd = xen_set_pmd_hyper,
-
-       .ptep_modify_prot_start = __ptep_modify_prot_start,
-       .ptep_modify_prot_commit = __ptep_modify_prot_commit,
-
-       .pte_val = xen_pte_val,
-       .pte_flags = native_pte_flags,
-       .pgd_val = xen_pgd_val,
-
-       .make_pte = xen_make_pte,
-       .make_pgd = xen_make_pgd,
-
-#ifdef CONFIG_X86_PAE
-       .set_pte_atomic = xen_set_pte_atomic,
-       .set_pte_present = xen_set_pte_at,
-       .pte_clear = xen_pte_clear,
-       .pmd_clear = xen_pmd_clear,
-#endif /* CONFIG_X86_PAE */
-       .set_pud = xen_set_pud_hyper,
-
-       .make_pmd = xen_make_pmd,
-       .pmd_val = xen_pmd_val,
-
-#if PAGETABLE_LEVELS == 4
-       .pud_val = xen_pud_val,
-       .make_pud = xen_make_pud,
-       .set_pgd = xen_set_pgd_hyper,
-
-       .alloc_pud = xen_alloc_pte_init,
-       .release_pud = xen_release_pte_init,
-#endif /* PAGETABLE_LEVELS == 4 */
-
-       .activate_mm = xen_activate_mm,
-       .dup_mmap = xen_dup_mmap,
-       .exit_mmap = xen_exit_mmap,
-
-       .lazy_mode = {
-               .enter = paravirt_enter_lazy_mmu,
-               .leave = xen_leave_lazy,
-       },
-
-       .set_fixmap = xen_set_fixmap,
-};
-
 static void xen_reboot(int reason)
 {
        struct sched_shutdown r = { .reason = reason };
@@ -1394,223 +870,6 @@ static const struct machine_ops __initdata xen_machine_ops = {
 };
 
 
-static void __init xen_reserve_top(void)
-{
-#ifdef CONFIG_X86_32
-       unsigned long top = HYPERVISOR_VIRT_START;
-       struct xen_platform_parameters pp;
-
-       if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
-               top = pp.virt_start;
-
-       reserve_top_address(-top);
-#endif /* CONFIG_X86_32 */
-}
-
-/*
- * Like __va(), but returns address in the kernel mapping (which is
- * all we have until the physical memory mapping has been set up.
- */
-static void *__ka(phys_addr_t paddr)
-{
-#ifdef CONFIG_X86_64
-       return (void *)(paddr + __START_KERNEL_map);
-#else
-       return __va(paddr);
-#endif
-}
-
-/* Convert a machine address to physical address */
-static unsigned long m2p(phys_addr_t maddr)
-{
-       phys_addr_t paddr;
-
-       maddr &= PTE_PFN_MASK;
-       paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
-
-       return paddr;
-}
-
-/* Convert a machine address to kernel virtual */
-static void *m2v(phys_addr_t maddr)
-{
-       return __ka(m2p(maddr));
-}
-
-static void set_page_prot(void *addr, pgprot_t prot)
-{
-       unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
-       pte_t pte = pfn_pte(pfn, prot);
-
-       if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
-               BUG();
-}
-
-static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
-{
-       unsigned pmdidx, pteidx;
-       unsigned ident_pte;
-       unsigned long pfn;
-
-       ident_pte = 0;
-       pfn = 0;
-       for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
-               pte_t *pte_page;
-
-               /* Reuse or allocate a page of ptes */
-               if (pmd_present(pmd[pmdidx]))
-                       pte_page = m2v(pmd[pmdidx].pmd);
-               else {
-                       /* Check for free pte pages */
-                       if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
-                               break;
-
-                       pte_page = &level1_ident_pgt[ident_pte];
-                       ident_pte += PTRS_PER_PTE;
-
-                       pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
-               }
-
-               /* Install mappings */
-               for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
-                       pte_t pte;
-
-                       if (pfn > max_pfn_mapped)
-                               max_pfn_mapped = pfn;
-
-                       if (!pte_none(pte_page[pteidx]))
-                               continue;
-
-                       pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
-                       pte_page[pteidx] = pte;
-               }
-       }
-
-       for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
-               set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
-
-       set_page_prot(pmd, PAGE_KERNEL_RO);
-}
-
-#ifdef CONFIG_X86_64
-static void convert_pfn_mfn(void *v)
-{
-       pte_t *pte = v;
-       int i;
-
-       /* All levels are converted the same way, so just treat them
-          as ptes. */
-       for (i = 0; i < PTRS_PER_PTE; i++)
-               pte[i] = xen_make_pte(pte[i].pte);
-}
-
-/*
- * Set up the inital kernel pagetable.
- *
- * We can construct this by grafting the Xen provided pagetable into
- * head_64.S's preconstructed pagetables.  We copy the Xen L2's into
- * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt.  This
- * means that only the kernel has a physical mapping to start with -
- * but that's enough to get __va working.  We need to fill in the rest
- * of the physical mapping once some sort of allocator has been set
- * up.
- */
-static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
-                                               unsigned long max_pfn)
-{
-       pud_t *l3;
-       pmd_t *l2;
-
-       /* Zap identity mapping */
-       init_level4_pgt[0] = __pgd(0);
-
-       /* Pre-constructed entries are in pfn, so convert to mfn */
-       convert_pfn_mfn(init_level4_pgt);
-       convert_pfn_mfn(level3_ident_pgt);
-       convert_pfn_mfn(level3_kernel_pgt);
-
-       l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
-       l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
-
-       memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
-       memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
-
-       l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
-       l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
-       memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
-
-       /* Set up identity map */
-       xen_map_identity_early(level2_ident_pgt, max_pfn);
-
-       /* Make pagetable pieces RO */
-       set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
-       set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
-       set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
-       set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
-       set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
-       set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
-
-       /* Pin down new L4 */
-       pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
-                         PFN_DOWN(__pa_symbol(init_level4_pgt)));
-
-       /* Unpin Xen-provided one */
-       pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
-
-       /* Switch over */
-       pgd = init_level4_pgt;
-
-       /*
-        * At this stage there can be no user pgd, and no page
-        * structure to attach it to, so make sure we just set kernel
-        * pgd.
-        */
-       xen_mc_batch();
-       __xen_write_cr3(true, __pa(pgd));
-       xen_mc_issue(PARAVIRT_LAZY_CPU);
-
-       reserve_early(__pa(xen_start_info->pt_base),
-                     __pa(xen_start_info->pt_base +
-                          xen_start_info->nr_pt_frames * PAGE_SIZE),
-                     "XEN PAGETABLES");
-
-       return pgd;
-}
-#else  /* !CONFIG_X86_64 */
-static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
-
-static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
-                                               unsigned long max_pfn)
-{
-       pmd_t *kernel_pmd;
-
-       init_pg_tables_start = __pa(pgd);
-       init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
-       max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024);
-
-       kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
-       memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
-
-       xen_map_identity_early(level2_kernel_pgt, max_pfn);
-
-       memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
-       set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
-                       __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
-
-       set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
-       set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
-       set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
-
-       pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
-
-       xen_write_cr3(__pa(swapper_pg_dir));
-
-       pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
-
-       return swapper_pg_dir;
-}
-#endif /* CONFIG_X86_64 */
-
 /* First C function to be called on Xen boot */
 asmlinkage void __init xen_start_kernel(void)
 {
@@ -1650,10 +909,18 @@ asmlinkage void __init xen_start_kernel(void)
        machine_ops = xen_machine_ops;
 
 #ifdef CONFIG_X86_64
-       /* Disable until direct per-cpu data access. */
-       have_vcpu_info_placement = 0;
-       x86_64_init_pda();
+       /*
+        * Setup percpu state.  We only need to do this for 64-bit
+        * because 32-bit already has %fs set properly.
+        */
+       load_percpu_segment(0);
 #endif
+       /*
+        * The only reliable way to retain the initial address of the
+        * percpu gdt_page is to remember it here, so we can go and
+        * mark it RW later, when the initial percpu area is freed.
+        */
+       xen_initial_gdt = &per_cpu(gdt_page, 0);
 
        xen_smp_init();
 
index bb04260..cfd1779 100644 (file)
@@ -19,27 +19,12 @@ void xen_force_evtchn_callback(void)
        (void)HYPERVISOR_xen_version(0, NULL);
 }
 
-static void __init __xen_init_IRQ(void)
-{
-       int i;
-
-       /* Create identity vector->irq map */
-       for(i = 0; i < NR_VECTORS; i++) {
-               int cpu;
-
-               for_each_possible_cpu(cpu)
-                       per_cpu(vector_irq, cpu)[i] = i;
-       }
-
-       xen_init_IRQ();
-}
-
 static unsigned long xen_save_fl(void)
 {
        struct vcpu_info *vcpu;
        unsigned long flags;
 
-       vcpu = x86_read_percpu(xen_vcpu);
+       vcpu = percpu_read(xen_vcpu);
 
        /* flag has opposite sense of mask */
        flags = !vcpu->evtchn_upcall_mask;
@@ -50,6 +35,7 @@ static unsigned long xen_save_fl(void)
        */
        return (-flags) & X86_EFLAGS_IF;
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
 
 static void xen_restore_fl(unsigned long flags)
 {
@@ -62,7 +48,7 @@ static void xen_restore_fl(unsigned long flags)
           make sure we're don't switch CPUs between getting the vcpu
           pointer and updating the mask. */
        preempt_disable();
-       vcpu = x86_read_percpu(xen_vcpu);
+       vcpu = percpu_read(xen_vcpu);
        vcpu->evtchn_upcall_mask = flags;
        preempt_enable_no_resched();
 
@@ -76,6 +62,7 @@ static void xen_restore_fl(unsigned long flags)
                        xen_force_evtchn_callback();
        }
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
 
 static void xen_irq_disable(void)
 {
@@ -83,9 +70,10 @@ static void xen_irq_disable(void)
           make sure we're don't switch CPUs between getting the vcpu
           pointer and updating the mask. */
        preempt_disable();
-       x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1;
+       percpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
        preempt_enable_no_resched();
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
 
 static void xen_irq_enable(void)
 {
@@ -96,7 +84,7 @@ static void xen_irq_enable(void)
           the caller is confused and is trying to re-enable interrupts
           on an indeterminate processor. */
 
-       vcpu = x86_read_percpu(xen_vcpu);
+       vcpu = percpu_read(xen_vcpu);
        vcpu->evtchn_upcall_mask = 0;
 
        /* Doesn't matter if we get preempted here, because any
@@ -106,6 +94,7 @@ static void xen_irq_enable(void)
        if (unlikely(vcpu->evtchn_upcall_pending))
                xen_force_evtchn_callback();
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
 
 static void xen_safe_halt(void)
 {
@@ -123,11 +112,13 @@ static void xen_halt(void)
 }
 
 static const struct pv_irq_ops xen_irq_ops __initdata = {
-       .init_IRQ = __xen_init_IRQ,
-       .save_fl = xen_save_fl,
-       .restore_fl = xen_restore_fl,
-       .irq_disable = xen_irq_disable,
-       .irq_enable = xen_irq_enable,
+       .init_IRQ = xen_init_IRQ,
+
+       .save_fl = PV_CALLEE_SAVE(xen_save_fl),
+       .restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
+       .irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
+       .irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
+
        .safe_halt = xen_safe_halt,
        .halt = xen_halt,
 #ifdef CONFIG_X86_64
index 503c240..d2e8ed1 100644 (file)
@@ -47,6 +47,7 @@
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
 #include <asm/mmu_context.h>
+#include <asm/setup.h>
 #include <asm/paravirt.h>
 #include <asm/linkage.h>
 
@@ -55,6 +56,8 @@
 
 #include <xen/page.h>
 #include <xen/interface/xen.h>
+#include <xen/interface/version.h>
+#include <xen/hvc-console.h>
 
 #include "multicalls.h"
 #include "mmu.h"
@@ -114,6 +117,37 @@ static inline void check_zero(void)
 
 #endif /* CONFIG_XEN_DEBUG_FS */
 
+
+/*
+ * Identity map, in addition to plain kernel map.  This needs to be
+ * large enough to allocate page table pages to allocate the rest.
+ * Each page can map 2MB.
+ */
+static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
+
+#ifdef CONFIG_X86_64
+/* l3 pud for userspace vsyscall mapping */
+static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
+#endif /* CONFIG_X86_64 */
+
+/*
+ * Note about cr3 (pagetable base) values:
+ *
+ * xen_cr3 contains the current logical cr3 value; it contains the
+ * last set cr3.  This may not be the current effective cr3, because
+ * its update may be being lazily deferred.  However, a vcpu looking
+ * at its own cr3 can use this value knowing that it everything will
+ * be self-consistent.
+ *
+ * xen_current_cr3 contains the actual vcpu cr3; it is set once the
+ * hypercall to set the vcpu cr3 is complete (so it may be a little
+ * out of date, but it will never be set early).  If one vcpu is
+ * looking at another vcpu's cr3 value, it should use this variable.
+ */
+DEFINE_PER_CPU(unsigned long, xen_cr3);         /* cr3 stored as physaddr */
+DEFINE_PER_CPU(unsigned long, xen_current_cr3);         /* actual vcpu cr3 */
+
+
 /*
  * Just beyond the highest usermode address.  STACK_TOP_MAX has a
  * redzone above it, so round it up to a PGD boundary.
@@ -458,28 +492,33 @@ pteval_t xen_pte_val(pte_t pte)
 {
        return pte_mfn_to_pfn(pte.pte);
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
 
 pgdval_t xen_pgd_val(pgd_t pgd)
 {
        return pte_mfn_to_pfn(pgd.pgd);
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
 
 pte_t xen_make_pte(pteval_t pte)
 {
        pte = pte_pfn_to_mfn(pte);
        return native_make_pte(pte);
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
 
 pgd_t xen_make_pgd(pgdval_t pgd)
 {
        pgd = pte_pfn_to_mfn(pgd);
        return native_make_pgd(pgd);
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
 
 pmdval_t xen_pmd_val(pmd_t pmd)
 {
        return pte_mfn_to_pfn(pmd.pmd);
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
 
 void xen_set_pud_hyper(pud_t *ptr, pud_t val)
 {
@@ -556,12 +595,14 @@ pmd_t xen_make_pmd(pmdval_t pmd)
        pmd = pte_pfn_to_mfn(pmd);
        return native_make_pmd(pmd);
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
 
 #if PAGETABLE_LEVELS == 4
 pudval_t xen_pud_val(pud_t pud)
 {
        return pte_mfn_to_pfn(pud.pud);
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
 
 pud_t xen_make_pud(pudval_t pud)
 {
@@ -569,6 +610,7 @@ pud_t xen_make_pud(pudval_t pud)
 
        return native_make_pud(pud);
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
 
 pgd_t *xen_get_user_pgd(pgd_t *pgd)
 {
@@ -1063,18 +1105,14 @@ static void drop_other_mm_ref(void *info)
        struct mm_struct *mm = info;
        struct mm_struct *active_mm;
 
-#ifdef CONFIG_X86_64
-       active_mm = read_pda(active_mm);
-#else
-       active_mm = __get_cpu_var(cpu_tlbstate).active_mm;
-#endif
+       active_mm = percpu_read(cpu_tlbstate.active_mm);
 
        if (active_mm == mm)
                leave_mm(smp_processor_id());
 
        /* If this cpu still has a stale cr3 reference, then make sure
           it has been flushed. */
-       if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
+       if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) {
                load_cr3(swapper_pg_dir);
                arch_flush_lazy_cpu_mode();
        }
@@ -1156,6 +1194,709 @@ void xen_exit_mmap(struct mm_struct *mm)
        spin_unlock(&mm->page_table_lock);
 }
 
+static __init void xen_pagetable_setup_start(pgd_t *base)
+{
+}
+
+static __init void xen_pagetable_setup_done(pgd_t *base)
+{
+       xen_setup_shared_info();
+}
+
+static void xen_write_cr2(unsigned long cr2)
+{
+       percpu_read(xen_vcpu)->arch.cr2 = cr2;
+}
+
+static unsigned long xen_read_cr2(void)
+{
+       return percpu_read(xen_vcpu)->arch.cr2;
+}
+
+unsigned long xen_read_cr2_direct(void)
+{
+       return percpu_read(xen_vcpu_info.arch.cr2);
+}
+
+static void xen_flush_tlb(void)
+{
+       struct mmuext_op *op;
+       struct multicall_space mcs;
+
+       preempt_disable();
+
+       mcs = xen_mc_entry(sizeof(*op));
+
+       op = mcs.args;
+       op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
+       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+       preempt_enable();
+}
+
+static void xen_flush_tlb_single(unsigned long addr)
+{
+       struct mmuext_op *op;
+       struct multicall_space mcs;
+
+       preempt_disable();
+
+       mcs = xen_mc_entry(sizeof(*op));
+       op = mcs.args;
+       op->cmd = MMUEXT_INVLPG_LOCAL;
+       op->arg1.linear_addr = addr & PAGE_MASK;
+       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+       preempt_enable();
+}
+
+static void xen_flush_tlb_others(const struct cpumask *cpus,
+                                struct mm_struct *mm, unsigned long va)
+{
+       struct {
+               struct mmuext_op op;
+               DECLARE_BITMAP(mask, NR_CPUS);
+       } *args;
+       struct multicall_space mcs;
+
+       BUG_ON(cpumask_empty(cpus));
+       BUG_ON(!mm);
+
+       mcs = xen_mc_entry(sizeof(*args));
+       args = mcs.args;
+       args->op.arg2.vcpumask = to_cpumask(args->mask);
+
+       /* Remove us, and any offline CPUS. */
+       cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
+       cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
+       if (unlikely(cpumask_empty(to_cpumask(args->mask))))
+               goto issue;
+
+       if (va == TLB_FLUSH_ALL) {
+               args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
+       } else {
+               args->op.cmd = MMUEXT_INVLPG_MULTI;
+               args->op.arg1.linear_addr = va;
+       }
+
+       MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
+
+issue:
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
+}
+
+static unsigned long xen_read_cr3(void)
+{
+       return percpu_read(xen_cr3);
+}
+
+static void set_current_cr3(void *v)
+{
+       percpu_write(xen_current_cr3, (unsigned long)v);
+}
+
+static void __xen_write_cr3(bool kernel, unsigned long cr3)
+{
+       struct mmuext_op *op;
+       struct multicall_space mcs;
+       unsigned long mfn;
+
+       if (cr3)
+               mfn = pfn_to_mfn(PFN_DOWN(cr3));
+       else
+               mfn = 0;
+
+       WARN_ON(mfn == 0 && kernel);
+
+       mcs = __xen_mc_entry(sizeof(*op));
+
+       op = mcs.args;
+       op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
+       op->arg1.mfn = mfn;
+
+       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+       if (kernel) {
+               percpu_write(xen_cr3, cr3);
+
+               /* Update xen_current_cr3 once the batch has actually
+                  been submitted. */
+               xen_mc_callback(set_current_cr3, (void *)cr3);
+       }
+}
+
+static void xen_write_cr3(unsigned long cr3)
+{
+       BUG_ON(preemptible());
+
+       xen_mc_batch();  /* disables interrupts */
+
+       /* Update while interrupts are disabled, so its atomic with
+          respect to ipis */
+       percpu_write(xen_cr3, cr3);
+
+       __xen_write_cr3(true, cr3);
+
+#ifdef CONFIG_X86_64
+       {
+               pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
+               if (user_pgd)
+                       __xen_write_cr3(false, __pa(user_pgd));
+               else
+                       __xen_write_cr3(false, 0);
+       }
+#endif
+
+       xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */
+}
+
+static int xen_pgd_alloc(struct mm_struct *mm)
+{
+       pgd_t *pgd = mm->pgd;
+       int ret = 0;
+
+       BUG_ON(PagePinned(virt_to_page(pgd)));
+
+#ifdef CONFIG_X86_64
+       {
+               struct page *page = virt_to_page(pgd);
+               pgd_t *user_pgd;
+
+               BUG_ON(page->private != 0);
+
+               ret = -ENOMEM;
+
+               user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+               page->private = (unsigned long)user_pgd;
+
+               if (user_pgd != NULL) {
+                       user_pgd[pgd_index(VSYSCALL_START)] =
+                               __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
+                       ret = 0;
+               }
+
+               BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
+       }
+#endif
+
+       return ret;
+}
+
+static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+#ifdef CONFIG_X86_64
+       pgd_t *user_pgd = xen_get_user_pgd(pgd);
+
+       if (user_pgd)
+               free_page((unsigned long)user_pgd);
+#endif
+}
+
+#ifdef CONFIG_HIGHPTE
+static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
+{
+       pgprot_t prot = PAGE_KERNEL;
+
+       if (PagePinned(page))
+               prot = PAGE_KERNEL_RO;
+
+       if (0 && PageHighMem(page))
+               printk("mapping highpte %lx type %d prot %s\n",
+                      page_to_pfn(page), type,
+                      (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
+
+       return kmap_atomic_prot(page, type, prot);
+}
+#endif
+
+#ifdef CONFIG_X86_32
+static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
+{
+       /* If there's an existing pte, then don't allow _PAGE_RW to be set */
+       if (pte_val_ma(*ptep) & _PAGE_PRESENT)
+               pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
+                              pte_val_ma(pte));
+
+       return pte;
+}
+
+/* Init-time set_pte while constructing initial pagetables, which
+   doesn't allow RO pagetable pages to be remapped RW */
+static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
+{
+       pte = mask_rw_pte(ptep, pte);
+
+       xen_set_pte(ptep, pte);
+}
+#endif
+
+/* Early in boot, while setting up the initial pagetable, assume
+   everything is pinned. */
+static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
+{
+#ifdef CONFIG_FLATMEM
+       BUG_ON(mem_map);        /* should only be used early */
+#endif
+       make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
+}
+
+/* Early release_pte assumes that all pts are pinned, since there's
+   only init_mm and anything attached to that is pinned. */
+static void xen_release_pte_init(unsigned long pfn)
+{
+       make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
+}
+
+static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
+{
+       struct mmuext_op op;
+       op.cmd = cmd;
+       op.arg1.mfn = pfn_to_mfn(pfn);
+       if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
+               BUG();
+}
+
+/* This needs to make sure the new pte page is pinned iff its being
+   attached to a pinned pagetable. */
+static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
+{
+       struct page *page = pfn_to_page(pfn);
+
+       if (PagePinned(virt_to_page(mm->pgd))) {
+               SetPagePinned(page);
+
+               vm_unmap_aliases();
+               if (!PageHighMem(page)) {
+                       make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
+                       if (level == PT_PTE && USE_SPLIT_PTLOCKS)
+                               pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
+               } else {
+                       /* make sure there are no stray mappings of
+                          this page */
+                       kmap_flush_unused();
+               }
+       }
+}
+
+static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
+{
+       xen_alloc_ptpage(mm, pfn, PT_PTE);
+}
+
+static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
+{
+       xen_alloc_ptpage(mm, pfn, PT_PMD);
+}
+
+/* This should never happen until we're OK to use struct page */
+static void xen_release_ptpage(unsigned long pfn, unsigned level)
+{
+       struct page *page = pfn_to_page(pfn);
+
+       if (PagePinned(page)) {
+               if (!PageHighMem(page)) {
+                       if (level == PT_PTE && USE_SPLIT_PTLOCKS)
+                               pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
+                       make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
+               }
+               ClearPagePinned(page);
+       }
+}
+
+static void xen_release_pte(unsigned long pfn)
+{
+       xen_release_ptpage(pfn, PT_PTE);
+}
+
+static void xen_release_pmd(unsigned long pfn)
+{
+       xen_release_ptpage(pfn, PT_PMD);
+}
+
+#if PAGETABLE_LEVELS == 4
+static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
+{
+       xen_alloc_ptpage(mm, pfn, PT_PUD);
+}
+
+static void xen_release_pud(unsigned long pfn)
+{
+       xen_release_ptpage(pfn, PT_PUD);
+}
+#endif
+
+void __init xen_reserve_top(void)
+{
+#ifdef CONFIG_X86_32
+       unsigned long top = HYPERVISOR_VIRT_START;
+       struct xen_platform_parameters pp;
+
+       if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
+               top = pp.virt_start;
+
+       reserve_top_address(-top);
+#endif /* CONFIG_X86_32 */
+}
+
+/*
+ * Like __va(), but returns address in the kernel mapping (which is
+ * all we have until the physical memory mapping has been set up.
+ */
+static void *__ka(phys_addr_t paddr)
+{
+#ifdef CONFIG_X86_64
+       return (void *)(paddr + __START_KERNEL_map);
+#else
+       return __va(paddr);
+#endif
+}
+
+/* Convert a machine address to physical address */
+static unsigned long m2p(phys_addr_t maddr)
+{
+       phys_addr_t paddr;
+
+       maddr &= PTE_PFN_MASK;
+       paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
+
+       return paddr;
+}
+
+/* Convert a machine address to kernel virtual */
+static void *m2v(phys_addr_t maddr)
+{
+       return __ka(m2p(maddr));
+}
+
+static void set_page_prot(void *addr, pgprot_t prot)
+{
+       unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
+       pte_t pte = pfn_pte(pfn, prot);
+
+       if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
+               BUG();
+}
+
+static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
+{
+       unsigned pmdidx, pteidx;
+       unsigned ident_pte;
+       unsigned long pfn;
+
+       ident_pte = 0;
+       pfn = 0;
+       for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
+               pte_t *pte_page;
+
+               /* Reuse or allocate a page of ptes */
+               if (pmd_present(pmd[pmdidx]))
+                       pte_page = m2v(pmd[pmdidx].pmd);
+               else {
+                       /* Check for free pte pages */
+                       if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
+                               break;
+
+                       pte_page = &level1_ident_pgt[ident_pte];
+                       ident_pte += PTRS_PER_PTE;
+
+                       pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
+               }
+
+               /* Install mappings */
+               for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
+                       pte_t pte;
+
+                       if (pfn > max_pfn_mapped)
+                               max_pfn_mapped = pfn;
+
+                       if (!pte_none(pte_page[pteidx]))
+                               continue;
+
+                       pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
+                       pte_page[pteidx] = pte;
+               }
+       }
+
+       for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
+               set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
+
+       set_page_prot(pmd, PAGE_KERNEL_RO);
+}
+
+#ifdef CONFIG_X86_64
+static void convert_pfn_mfn(void *v)
+{
+       pte_t *pte = v;
+       int i;
+
+       /* All levels are converted the same way, so just treat them
+          as ptes. */
+       for (i = 0; i < PTRS_PER_PTE; i++)
+               pte[i] = xen_make_pte(pte[i].pte);
+}
+
+/*
+ * Set up the inital kernel pagetable.
+ *
+ * We can construct this by grafting the Xen provided pagetable into
+ * head_64.S's preconstructed pagetables.  We copy the Xen L2's into
+ * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt.  This
+ * means that only the kernel has a physical mapping to start with -
+ * but that's enough to get __va working.  We need to fill in the rest
+ * of the physical mapping once some sort of allocator has been set
+ * up.
+ */
+__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
+                                        unsigned long max_pfn)
+{
+       pud_t *l3;
+       pmd_t *l2;
+
+       /* Zap identity mapping */
+       init_level4_pgt[0] = __pgd(0);
+
+       /* Pre-constructed entries are in pfn, so convert to mfn */
+       convert_pfn_mfn(init_level4_pgt);
+       convert_pfn_mfn(level3_ident_pgt);
+       convert_pfn_mfn(level3_kernel_pgt);
+
+       l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
+       l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
+
+       memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
+       memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
+
+       l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
+       l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
+       memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
+
+       /* Set up identity map */
+       xen_map_identity_early(level2_ident_pgt, max_pfn);
+
+       /* Make pagetable pieces RO */
+       set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
+       set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
+       set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
+       set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
+       set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
+       set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
+
+       /* Pin down new L4 */
+       pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
+                         PFN_DOWN(__pa_symbol(init_level4_pgt)));
+
+       /* Unpin Xen-provided one */
+       pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
+
+       /* Switch over */
+       pgd = init_level4_pgt;
+
+       /*
+        * At this stage there can be no user pgd, and no page
+        * structure to attach it to, so make sure we just set kernel
+        * pgd.
+        */
+       xen_mc_batch();
+       __xen_write_cr3(true, __pa(pgd));
+       xen_mc_issue(PARAVIRT_LAZY_CPU);
+
+       reserve_early(__pa(xen_start_info->pt_base),
+                     __pa(xen_start_info->pt_base +
+                          xen_start_info->nr_pt_frames * PAGE_SIZE),
+                     "XEN PAGETABLES");
+
+       return pgd;
+}
+#else  /* !CONFIG_X86_64 */
+static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
+
+__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
+                                        unsigned long max_pfn)
+{
+       pmd_t *kernel_pmd;
+
+       init_pg_tables_start = __pa(pgd);
+       init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
+       max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024);
+
+       kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
+       memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
+
+       xen_map_identity_early(level2_kernel_pgt, max_pfn);
+
+       memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
+       set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
+                       __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
+
+       set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
+       set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
+       set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
+
+       pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
+
+       xen_write_cr3(__pa(swapper_pg_dir));
+
+       pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
+
+       return swapper_pg_dir;
+}
+#endif /* CONFIG_X86_64 */
+
+static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot)
+{
+       pte_t pte;
+
+       phys >>= PAGE_SHIFT;
+
+       switch (idx) {
+       case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
+#ifdef CONFIG_X86_F00F_BUG
+       case FIX_F00F_IDT:
+#endif
+#ifdef CONFIG_X86_32
+       case FIX_WP_TEST:
+       case FIX_VDSO:
+# ifdef CONFIG_HIGHMEM
+       case FIX_KMAP_BEGIN ... FIX_KMAP_END:
+# endif
+#else
+       case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
+#endif
+#ifdef CONFIG_X86_LOCAL_APIC
+       case FIX_APIC_BASE:     /* maps dummy local APIC */
+#endif
+               pte = pfn_pte(phys, prot);
+               break;
+
+       default:
+               pte = mfn_pte(phys, prot);
+               break;
+       }
+
+       __native_set_fixmap(idx, pte);
+
+#ifdef CONFIG_X86_64
+       /* Replicate changes to map the vsyscall page into the user
+          pagetable vsyscall mapping. */
+       if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
+               unsigned long vaddr = __fix_to_virt(idx);
+               set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
+       }
+#endif
+}
+
+__init void xen_post_allocator_init(void)
+{
+       pv_mmu_ops.set_pte = xen_set_pte;
+       pv_mmu_ops.set_pmd = xen_set_pmd;
+       pv_mmu_ops.set_pud = xen_set_pud;
+#if PAGETABLE_LEVELS == 4
+       pv_mmu_ops.set_pgd = xen_set_pgd;
+#endif
+
+       /* This will work as long as patching hasn't happened yet
+          (which it hasn't) */
+       pv_mmu_ops.alloc_pte = xen_alloc_pte;
+       pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
+       pv_mmu_ops.release_pte = xen_release_pte;
+       pv_mmu_ops.release_pmd = xen_release_pmd;
+#if PAGETABLE_LEVELS == 4
+       pv_mmu_ops.alloc_pud = xen_alloc_pud;
+       pv_mmu_ops.release_pud = xen_release_pud;
+#endif
+
+#ifdef CONFIG_X86_64
+       SetPagePinned(virt_to_page(level3_user_vsyscall));
+#endif
+       xen_mark_init_mm_pinned();
+}
+
+
+const struct pv_mmu_ops xen_mmu_ops __initdata = {
+       .pagetable_setup_start = xen_pagetable_setup_start,
+       .pagetable_setup_done = xen_pagetable_setup_done,
+
+       .read_cr2 = xen_read_cr2,
+       .write_cr2 = xen_write_cr2,
+
+       .read_cr3 = xen_read_cr3,
+       .write_cr3 = xen_write_cr3,
+
+       .flush_tlb_user = xen_flush_tlb,
+       .flush_tlb_kernel = xen_flush_tlb,
+       .flush_tlb_single = xen_flush_tlb_single,
+       .flush_tlb_others = xen_flush_tlb_others,
+
+       .pte_update = paravirt_nop,
+       .pte_update_defer = paravirt_nop,
+
+       .pgd_alloc = xen_pgd_alloc,
+       .pgd_free = xen_pgd_free,
+
+       .alloc_pte = xen_alloc_pte_init,
+       .release_pte = xen_release_pte_init,
+       .alloc_pmd = xen_alloc_pte_init,
+       .alloc_pmd_clone = paravirt_nop,
+       .release_pmd = xen_release_pte_init,
+
+#ifdef CONFIG_HIGHPTE
+       .kmap_atomic_pte = xen_kmap_atomic_pte,
+#endif
+
+#ifdef CONFIG_X86_64
+       .set_pte = xen_set_pte,
+#else
+       .set_pte = xen_set_pte_init,
+#endif
+       .set_pte_at = xen_set_pte_at,
+       .set_pmd = xen_set_pmd_hyper,
+
+       .ptep_modify_prot_start = __ptep_modify_prot_start,
+       .ptep_modify_prot_commit = __ptep_modify_prot_commit,
+
+       .pte_val = PV_CALLEE_SAVE(xen_pte_val),
+       .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
+
+       .make_pte = PV_CALLEE_SAVE(xen_make_pte),
+       .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
+
+#ifdef CONFIG_X86_PAE
+       .set_pte_atomic = xen_set_pte_atomic,
+       .set_pte_present = xen_set_pte_at,
+       .pte_clear = xen_pte_clear,
+       .pmd_clear = xen_pmd_clear,
+#endif /* CONFIG_X86_PAE */
+       .set_pud = xen_set_pud_hyper,
+
+       .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
+       .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
+
+#if PAGETABLE_LEVELS == 4
+       .pud_val = PV_CALLEE_SAVE(xen_pud_val),
+       .make_pud = PV_CALLEE_SAVE(xen_make_pud),
+       .set_pgd = xen_set_pgd_hyper,
+
+       .alloc_pud = xen_alloc_pte_init,
+       .release_pud = xen_release_pte_init,
+#endif /* PAGETABLE_LEVELS == 4 */
+
+       .activate_mm = xen_activate_mm,
+       .dup_mmap = xen_dup_mmap,
+       .exit_mmap = xen_exit_mmap,
+
+       .lazy_mode = {
+               .enter = paravirt_enter_lazy_mmu,
+               .leave = xen_leave_lazy,
+       },
+
+       .set_fixmap = xen_set_fixmap,
+};
+
+
 #ifdef CONFIG_XEN_DEBUG_FS
 
 static struct dentry *d_mmu_debug;
index 98d7165..24d1b44 100644 (file)
@@ -54,4 +54,7 @@ pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t
 void  xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
                                  pte_t *ptep, pte_t pte);
 
+unsigned long xen_read_cr2_direct(void);
+
+extern const struct pv_mmu_ops xen_mmu_ops;
 #endif /* _XEN_MMU_H */
index fa3e107..9e565da 100644 (file)
@@ -41,7 +41,7 @@ static inline void xen_mc_issue(unsigned mode)
                xen_mc_flush();
 
        /* restore flags saved in xen_mc_batch */
-       local_irq_restore(x86_read_percpu(xen_mc_irq_flags));
+       local_irq_restore(percpu_read(xen_mc_irq_flags));
 }
 
 /* Set up a callback to be called when the current batch is flushed */
index c44e206..035582a 100644 (file)
@@ -50,11 +50,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
  */
 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
 {
-#ifdef CONFIG_X86_32
-       __get_cpu_var(irq_stat).irq_resched_count++;
-#else
-       add_pda(irq_resched_count, 1);
-#endif
+       inc_irq_stat(irq_resched_count);
 
        return IRQ_HANDLED;
 }
@@ -78,7 +74,7 @@ static __cpuinit void cpu_bringup(void)
        xen_setup_cpu_clockevents();
 
        cpu_set(cpu, cpu_online_map);
-       x86_write_percpu(cpu_state, CPU_ONLINE);
+       percpu_write(cpu_state, CPU_ONLINE);
        wmb();
 
        /* We can take interrupts now: we're officially "up". */
@@ -174,7 +170,7 @@ static void __init xen_smp_prepare_boot_cpu(void)
 
        /* We've switched to the "real" per-cpu gdt, so make sure the
           old memory can be recycled */
-       make_lowmem_page_readwrite(&per_cpu_var(gdt_page));
+       make_lowmem_page_readwrite(xen_initial_gdt);
 
        xen_setup_vcpu_info_placement();
 }
@@ -239,6 +235,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
        ctxt->user_regs.ss = __KERNEL_DS;
 #ifdef CONFIG_X86_32
        ctxt->user_regs.fs = __KERNEL_PERCPU;
+#else
+       ctxt->gs_base_kernel = per_cpu_offset(cpu);
 #endif
        ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
        ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
@@ -283,23 +281,14 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
        struct task_struct *idle = idle_task(cpu);
        int rc;
 
-#ifdef CONFIG_X86_64
-       /* Allocate node local memory for AP pdas */
-       WARN_ON(cpu == 0);
-       if (cpu > 0) {
-               rc = get_local_pda(cpu);
-               if (rc)
-                       return rc;
-       }
-#endif
-
-#ifdef CONFIG_X86_32
-       init_gdt(cpu);
        per_cpu(current_task, cpu) = idle;
+#ifdef CONFIG_X86_32
        irq_ctx_init(cpu);
 #else
-       cpu_pda(cpu)->pcurrent = idle;
        clear_tsk_thread_flag(idle, TIF_FORK);
+       per_cpu(kernel_stack, cpu) =
+               (unsigned long)task_stack_page(idle) -
+               KERNEL_STACK_OFFSET + THREAD_SIZE;
 #endif
        xen_setup_timer(cpu);
        xen_init_lock_cpu(cpu);
@@ -445,11 +434,7 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
 {
        irq_enter();
        generic_smp_call_function_interrupt();
-#ifdef CONFIG_X86_32
-       __get_cpu_var(irq_stat).irq_call_count++;
-#else
-       add_pda(irq_call_count, 1);
-#endif
+       inc_irq_stat(irq_call_count);
        irq_exit();
 
        return IRQ_HANDLED;
@@ -459,11 +444,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
 {
        irq_enter();
        generic_smp_call_function_single_interrupt();
-#ifdef CONFIG_X86_32
-       __get_cpu_var(irq_stat).irq_call_count++;
-#else
-       add_pda(irq_call_count, 1);
-#endif
+       inc_irq_stat(irq_call_count);
        irq_exit();
 
        return IRQ_HANDLED;
index 212ffe0..95be7b4 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <asm/xen/hypercall.h>
 #include <asm/xen/page.h>
+#include <asm/fixmap.h>
 
 #include "xen-ops.h"
 #include "mmu.h"
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
new file mode 100644 (file)
index 0000000..79d7362
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * Asm versions of Xen pv-ops, suitable for either direct use or
+ * inlining.  The inline versions are the same as the direct-use
+ * versions, with the pre- and post-amble chopped off.
+ *
+ * This code is encoded for size rather than absolute efficiency, with
+ * a view to being able to inline as much as possible.
+ *
+ * We only bother with direct forms (ie, vcpu in percpu data) of the
+ * operations here; the indirect forms are better handled in C, since
+ * they're generally too large to inline anyway.
+ */
+
+#include <asm/asm-offsets.h>
+#include <asm/percpu.h>
+#include <asm/processor-flags.h>
+
+#include "xen-asm.h"
+
+/*
+ * Enable events.  This clears the event mask and tests the pending
+ * event status with one and operation.  If there are pending events,
+ * then enter the hypervisor to get them handled.
+ */
+ENTRY(xen_irq_enable_direct)
+       /* Unmask events */
+       movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
+
+       /*
+        * Preempt here doesn't matter because that will deal with any
+        * pending interrupts.  The pending check may end up being run
+        * on the wrong CPU, but that doesn't hurt.
+        */
+
+       /* Test for pending */
+       testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
+       jz 1f
+
+2:     call check_events
+1:
+ENDPATCH(xen_irq_enable_direct)
+       ret
+       ENDPROC(xen_irq_enable_direct)
+       RELOC(xen_irq_enable_direct, 2b+1)
+
+
+/*
+ * Disabling events is simply a matter of making the event mask
+ * non-zero.
+ */
+ENTRY(xen_irq_disable_direct)
+       movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
+ENDPATCH(xen_irq_disable_direct)
+       ret
+       ENDPROC(xen_irq_disable_direct)
+       RELOC(xen_irq_disable_direct, 0)
+
+/*
+ * (xen_)save_fl is used to get the current interrupt enable status.
+ * Callers expect the status to be in X86_EFLAGS_IF, and other bits
+ * may be set in the return value.  We take advantage of this by
+ * making sure that X86_EFLAGS_IF has the right value (and other bits
+ * in that byte are 0), but other bits in the return value are
+ * undefined.  We need to toggle the state of the bit, because Xen and
+ * x86 use opposite senses (mask vs enable).
+ */
+ENTRY(xen_save_fl_direct)
+       testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
+       setz %ah
+       addb %ah, %ah
+ENDPATCH(xen_save_fl_direct)
+       ret
+       ENDPROC(xen_save_fl_direct)
+       RELOC(xen_save_fl_direct, 0)
+
+
+/*
+ * In principle the caller should be passing us a value return from
+ * xen_save_fl_direct, but for robustness sake we test only the
+ * X86_EFLAGS_IF flag rather than the whole byte. After setting the
+ * interrupt mask state, it checks for unmasked pending events and
+ * enters the hypervisor to get them delivered if so.
+ */
+ENTRY(xen_restore_fl_direct)
+#ifdef CONFIG_X86_64
+       testw $X86_EFLAGS_IF, %di
+#else
+       testb $X86_EFLAGS_IF>>8, %ah
+#endif
+       setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
+       /*
+        * Preempt here doesn't matter because that will deal with any
+        * pending interrupts.  The pending check may end up being run
+        * on the wrong CPU, but that doesn't hurt.
+        */
+
+       /* check for unmasked and pending */
+       cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
+       jz 1f
+2:     call check_events
+1:
+ENDPATCH(xen_restore_fl_direct)
+       ret
+       ENDPROC(xen_restore_fl_direct)
+       RELOC(xen_restore_fl_direct, 2b+1)
+
+
+/*
+ * Force an event check by making a hypercall, but preserve regs
+ * before making the call.
+ */
+check_events:
+#ifdef CONFIG_X86_32
+       push %eax
+       push %ecx
+       push %edx
+       call xen_force_evtchn_callback
+       pop %edx
+       pop %ecx
+       pop %eax
+#else
+       push %rax
+       push %rcx
+       push %rdx
+       push %rsi
+       push %rdi
+       push %r8
+       push %r9
+       push %r10
+       push %r11
+       call xen_force_evtchn_callback
+       pop %r11
+       pop %r10
+       pop %r9
+       pop %r8
+       pop %rdi
+       pop %rsi
+       pop %rdx
+       pop %rcx
+       pop %rax
+#endif
+       ret
diff --git a/arch/x86/xen/xen-asm.h b/arch/x86/xen/xen-asm.h
new file mode 100644 (file)
index 0000000..4652764
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef _XEN_XEN_ASM_H
+#define _XEN_XEN_ASM_H
+
+#include <linux/linkage.h>
+
+#define RELOC(x, v)    .globl x##_reloc; x##_reloc=v
+#define ENDPATCH(x)    .globl x##_end; x##_end=.
+
+/* Pseudo-flag used for virtual NMI, which we don't implement yet */
+#define XEN_EFLAGS_NMI 0x80000000
+
+#endif
index 42786f5..88e15de 100644 (file)
 /*
-       Asm versions of Xen pv-ops, suitable for either direct use or inlining.
      The inline versions are the same as the direct-use versions, with the
      pre- and post-amble chopped off.
-
-       This code is encoded for size rather than absolute efficiency,
      with a view to being able to inline as much as possible.
-
-       We only bother with direct forms (ie, vcpu in pda) of the operations
      here; the indirect forms are better handled in C, since they're
      generally too large to inline anyway.
+ * Asm versions of Xen pv-ops, suitable for either direct use or
* inlining.  The inline versions are the same as the direct-use
* versions, with the pre- and post-amble chopped off.
+ *
+ * This code is encoded for size rather than absolute efficiency, with
* a view to being able to inline as much as possible.
+ *
+ * We only bother with direct forms (ie, vcpu in pda) of the
* operations here; the indirect forms are better handled in C, since
* they're generally too large to inline anyway.
  */
 
-#include <linux/linkage.h>
-
-#include <asm/asm-offsets.h>
 #include <asm/thread_info.h>
-#include <asm/percpu.h>
 #include <asm/processor-flags.h>
 #include <asm/segment.h>
 
 #include <xen/interface/xen.h>
 
-#define RELOC(x, v)    .globl x##_reloc; x##_reloc=v
-#define ENDPATCH(x)    .globl x##_end; x##_end=.
-
-/* Pseudo-flag used for virtual NMI, which we don't implement yet */
-#define XEN_EFLAGS_NMI 0x80000000
-
-/*
-       Enable events.  This clears the event mask and tests the pending
-       event status with one and operation.  If there are pending
-       events, then enter the hypervisor to get them handled.
- */
-ENTRY(xen_irq_enable_direct)
-       /* Unmask events */
-       movb $0, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
-
-       /* Preempt here doesn't matter because that will deal with
-          any pending interrupts.  The pending check may end up being
-          run on the wrong CPU, but that doesn't hurt. */
-
-       /* Test for pending */
-       testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
-       jz 1f
-
-2:     call check_events
-1:
-ENDPATCH(xen_irq_enable_direct)
-       ret
-       ENDPROC(xen_irq_enable_direct)
-       RELOC(xen_irq_enable_direct, 2b+1)
-
-
-/*
-       Disabling events is simply a matter of making the event mask
-       non-zero.
- */
-ENTRY(xen_irq_disable_direct)
-       movb $1, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
-ENDPATCH(xen_irq_disable_direct)
-       ret
-       ENDPROC(xen_irq_disable_direct)
-       RELOC(xen_irq_disable_direct, 0)
+#include "xen-asm.h"
 
 /*
-       (xen_)save_fl is used to get the current interrupt enable status.
-       Callers expect the status to be in X86_EFLAGS_IF, and other bits
-       may be set in the return value.  We take advantage of this by
-       making sure that X86_EFLAGS_IF has the right value (and other bits
-       in that byte are 0), but other bits in the return value are
-       undefined.  We need to toggle the state of the bit, because
-       Xen and x86 use opposite senses (mask vs enable).
+ * Force an event check by making a hypercall, but preserve regs
+ * before making the call.
  */
-ENTRY(xen_save_fl_direct)
-       testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
-       setz %ah
-       addb %ah,%ah
-ENDPATCH(xen_save_fl_direct)
-       ret
-       ENDPROC(xen_save_fl_direct)
-       RELOC(xen_save_fl_direct, 0)
-
-
-/*
-       In principle the caller should be passing us a value return
-       from xen_save_fl_direct, but for robustness sake we test only
-       the X86_EFLAGS_IF flag rather than the whole byte. After
-       setting the interrupt mask state, it checks for unmasked
-       pending events and enters the hypervisor to get them delivered
-       if so.
- */
-ENTRY(xen_restore_fl_direct)
-       testb $X86_EFLAGS_IF>>8, %ah
-       setz PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
-       /* Preempt here doesn't matter because that will deal with
-          any pending interrupts.  The pending check may end up being
-          run on the wrong CPU, but that doesn't hurt. */
-
-       /* check for unmasked and pending */
-       cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
-       jz 1f
-2:     call check_events
-1:
-ENDPATCH(xen_restore_fl_direct)
+check_events:
+       push %eax
+       push %ecx
+       push %edx
+       call xen_force_evtchn_callback
+       pop %edx
+       pop %ecx
+       pop %eax
        ret
-       ENDPROC(xen_restore_fl_direct)
-       RELOC(xen_restore_fl_direct, 2b+1)
 
 /*
      We can't use sysexit directly, because we're not running in ring0.
-       But we can easily fake it up using iret.  Assuming xen_sysexit
-       is jumped to with a standard stack frame, we can just strip it
      back to a standard iret frame and use iret.
* We can't use sysexit directly, because we're not running in ring0.
+ * But we can easily fake it up using iret.  Assuming xen_sysexit is
+ * jumped to with a standard stack frame, we can just strip it back to
* a standard iret frame and use iret.
  */
 ENTRY(xen_sysexit)
        movl PT_EAX(%esp), %eax                 /* Shouldn't be necessary? */
@@ -122,33 +48,31 @@ ENTRY(xen_sysexit)
 ENDPROC(xen_sysexit)
 
 /*
-       This is run where a normal iret would be run, with the same stack setup:
-             8: eflags
-             4: cs
-       esp-> 0: eip
-
-       This attempts to make sure that any pending events are dealt
-       with on return to usermode, but there is a small window in
-       which an event can happen just before entering usermode.  If
-       the nested interrupt ends up setting one of the TIF_WORK_MASK
-       pending work flags, they will not be tested again before
-       returning to usermode. This means that a process can end up
-       with pending work, which will be unprocessed until the process
-       enters and leaves the kernel again, which could be an
-       unbounded amount of time.  This means that a pending signal or
-       reschedule event could be indefinitely delayed.
-
-       The fix is to notice a nested interrupt in the critical
-       window, and if one occurs, then fold the nested interrupt into
-       the current interrupt stack frame, and re-process it
-       iteratively rather than recursively.  This means that it will
-       exit via the normal path, and all pending work will be dealt
-       with appropriately.
-
-       Because the nested interrupt handler needs to deal with the
-       current stack state in whatever form its in, we keep things
-       simple by only using a single register which is pushed/popped
-       on the stack.
+ * This is run where a normal iret would be run, with the same stack setup:
+ *     8: eflags
+ *     4: cs
+ *     esp-> 0: eip
+ *
+ * This attempts to make sure that any pending events are dealt with
+ * on return to usermode, but there is a small window in which an
+ * event can happen just before entering usermode.  If the nested
+ * interrupt ends up setting one of the TIF_WORK_MASK pending work
+ * flags, they will not be tested again before returning to
+ * usermode. This means that a process can end up with pending work,
+ * which will be unprocessed until the process enters and leaves the
+ * kernel again, which could be an unbounded amount of time.  This
+ * means that a pending signal or reschedule event could be
+ * indefinitely delayed.
+ *
+ * The fix is to notice a nested interrupt in the critical window, and
+ * if one occurs, then fold the nested interrupt into the current
+ * interrupt stack frame, and re-process it iteratively rather than
+ * recursively.  This means that it will exit via the normal path, and
+ * all pending work will be dealt with appropriately.
+ *
+ * Because the nested interrupt handler needs to deal with the current
+ * stack state in whatever form its in, we keep things simple by only
+ * using a single register which is pushed/popped on the stack.
  */
 ENTRY(xen_iret)
        /* test eflags for special cases */
@@ -158,13 +82,15 @@ ENTRY(xen_iret)
        push %eax
        ESP_OFFSET=4    # bytes pushed onto stack
 
-       /* Store vcpu_info pointer for easy access.  Do it this
-          way to avoid having to reload %fs */
+       /*
+        * Store vcpu_info pointer for easy access.  Do it this way to
+        * avoid having to reload %fs
+        */
 #ifdef CONFIG_SMP
        GET_THREAD_INFO(%eax)
-       movl TI_cpu(%eax),%eax
-       movl __per_cpu_offset(,%eax,4),%eax
-       mov per_cpu__xen_vcpu(%eax),%eax
+       movl TI_cpu(%eax), %eax
+       movl __per_cpu_offset(,%eax,4), %eax
+       mov per_cpu__xen_vcpu(%eax), %eax
 #else
        movl per_cpu__xen_vcpu, %eax
 #endif
@@ -172,37 +98,46 @@ ENTRY(xen_iret)
        /* check IF state we're restoring */
        testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp)
 
-       /* Maybe enable events.  Once this happens we could get a
-          recursive event, so the critical region starts immediately
-          afterwards.  However, if that happens we don't end up
-          resuming the code, so we don't have to be worried about
-          being preempted to another CPU. */
+       /*
+        * Maybe enable events.  Once this happens we could get a
+        * recursive event, so the critical region starts immediately
+        * afterwards.  However, if that happens we don't end up
+        * resuming the code, so we don't have to be worried about
+        * being preempted to another CPU.
+        */
        setz XEN_vcpu_info_mask(%eax)
 xen_iret_start_crit:
 
        /* check for unmasked and pending */
        cmpw $0x0001, XEN_vcpu_info_pending(%eax)
 
-       /* If there's something pending, mask events again so we
-          can jump back into xen_hypervisor_callback */
+       /*
+        * If there's something pending, mask events again so we can
+        * jump back into xen_hypervisor_callback
+        */
        sete XEN_vcpu_info_mask(%eax)
 
        popl %eax
 
-       /* From this point on the registers are restored and the stack
-          updated, so we don't need to worry about it if we're preempted */
+       /*
+        * From this point on the registers are restored and the stack
+        * updated, so we don't need to worry about it if we're
+        * preempted
+        */
 iret_restore_end:
 
-       /* Jump to hypervisor_callback after fixing up the stack.
-          Events are masked, so jumping out of the critical
-          region is OK. */
+       /*
+        * Jump to hypervisor_callback after fixing up the stack.
+        * Events are masked, so jumping out of the critical region is
+        * OK.
+        */
        je xen_hypervisor_callback
 
 1:     iret
 xen_iret_end_crit:
-.section __ex_table,"a"
+.section __ex_table, "a"
        .align 4
-       .long 1b,iret_exc
+       .long 1b, iret_exc
 .previous
 
 hyper_iret:
@@ -212,55 +147,55 @@ hyper_iret:
        .globl xen_iret_start_crit, xen_iret_end_crit
 
 /*
  This is called by xen_hypervisor_callback in entry.S when it sees
-   that the EIP at the time of interrupt was between xen_iret_start_crit
-   and xen_iret_end_crit.  We're passed the EIP in %eax so we can do
  a more refined determination of what to do.
-
  The stack format at this point is:
      ----------------
       ss             : (ss/esp may be present if we came from usermode)
       esp            :
       eflags         }  outer exception info
       cs             }
       eip            }
      ---------------- <- edi (copy dest)
       eax            :  outer eax if it hasn't been restored
      ----------------
       eflags         }  nested exception info
       cs             }   (no ss/esp because we're nested
       eip            }    from the same ring)
       orig_eax       }<- esi (copy src)
       - - - - - - - -
       fs             }
       es             }
       ds             }  SAVE_ALL state
       eax            }
        :             :
       ebx            }<- esp
      ----------------
-
  In order to deliver the nested exception properly, we need to shift
-   everything from the return addr up to the error code so it
  sits just under the outer exception info.  This means that when we
-   handle the exception, we do it in the context of the outer exception
  rather than starting a new one.
-
-   The only caveat is that if the outer eax hasn't been
-   restored yet (ie, it's still on stack), we need to insert
  its value into the SAVE_ALL state before going on, since
  it's usermode state which we eventually need to restore.
* This is called by xen_hypervisor_callback in entry.S when it sees
+ * that the EIP at the time of interrupt was between
+ * xen_iret_start_crit and xen_iret_end_crit.  We're passed the EIP in
* %eax so we can do a more refined determination of what to do.
+ *
* The stack format at this point is:
*     ----------------
*      ss             : (ss/esp may be present if we came from usermode)
*      esp            :
*      eflags         }  outer exception info
*      cs             }
*      eip            }
*     ---------------- <- edi (copy dest)
*      eax            :  outer eax if it hasn't been restored
*     ----------------
*      eflags         }  nested exception info
*      cs             }   (no ss/esp because we're nested
*      eip            }    from the same ring)
*      orig_eax       }<- esi (copy src)
*      - - - - - - - -
*      fs             }
*      es             }
*      ds             }  SAVE_ALL state
*      eax            }
*       :             :
*      ebx            }<- esp
*     ----------------
+ *
* In order to deliver the nested exception properly, we need to shift
+ * everything from the return addr up to the error code so it sits
* just under the outer exception info.  This means that when we
+ * handle the exception, we do it in the context of the outer
* exception rather than starting a new one.
+ *
+ * The only caveat is that if the outer eax hasn't been restored yet
+ * (ie, it's still on stack), we need to insert its value into the
* SAVE_ALL state before going on, since it's usermode state which we
* eventually need to restore.
  */
 ENTRY(xen_iret_crit_fixup)
        /*
-          Paranoia: Make sure we're really coming from kernel space.
-          One could imagine a case where userspace jumps into the
-          critical range address, but just before the CPU delivers a GP,
-          it decides to deliver an interrupt instead.  Unlikely?
-          Definitely.  Easy to avoid?  Yes.  The Intel documents
-          explicitly say that the reported EIP for a bad jump is the
-          jump instruction itself, not the destination, but some virtual
-          environments get this wrong.
+        * Paranoia: Make sure we're really coming from kernel space.
+        * One could imagine a case where userspace jumps into the
+        * critical range address, but just before the CPU delivers a
+        * GP, it decides to deliver an interrupt instead.  Unlikely?
+        * Definitely.  Easy to avoid?  Yes.  The Intel documents
+        * explicitly say that the reported EIP for a bad jump is the
+        * jump instruction itself, not the destination, but some
+        * virtual environments get this wrong.
         */
        movl PT_CS(%esp), %ecx
        andl $SEGMENT_RPL_MASK, %ecx
@@ -270,15 +205,17 @@ ENTRY(xen_iret_crit_fixup)
        lea PT_ORIG_EAX(%esp), %esi
        lea PT_EFLAGS(%esp), %edi
 
-       /* If eip is before iret_restore_end then stack
-          hasn't been restored yet. */
+       /*
+        * If eip is before iret_restore_end then stack
+        * hasn't been restored yet.
+        */
        cmp $iret_restore_end, %eax
        jae 1f
 
-       movl 0+4(%edi),%eax             /* copy EAX (just above top of frame) */
+       movl 0+4(%edi), %eax            /* copy EAX (just above top of frame) */
        movl %eax, PT_EAX(%esp)
 
-       lea ESP_OFFSET(%edi),%edi       /* move dest up over saved regs */
+       lea ESP_OFFSET(%edi), %edi      /* move dest up over saved regs */
 
        /* set up the copy */
 1:     std
@@ -286,20 +223,6 @@ ENTRY(xen_iret_crit_fixup)
        rep movsl
        cld
 
-       lea 4(%edi),%esp                /* point esp to new frame */
+       lea 4(%edi), %esp               /* point esp to new frame */
 2:     jmp xen_do_upcall
 
-
-/*
-       Force an event check by making a hypercall,
-       but preserve regs before making the call.
- */
-check_events:
-       push %eax
-       push %ecx
-       push %edx
-       call xen_force_evtchn_callback
-       pop %edx
-       pop %ecx
-       pop %eax
-       ret
index 05794c5..02f496a 100644 (file)
 /*
-       Asm versions of Xen pv-ops, suitable for either direct use or inlining.
      The inline versions are the same as the direct-use versions, with the
      pre- and post-amble chopped off.
-
-       This code is encoded for size rather than absolute efficiency,
      with a view to being able to inline as much as possible.
-
-       We only bother with direct forms (ie, vcpu in pda) of the operations
      here; the indirect forms are better handled in C, since they're
      generally too large to inline anyway.
+ * Asm versions of Xen pv-ops, suitable for either direct use or
* inlining.  The inline versions are the same as the direct-use
* versions, with the pre- and post-amble chopped off.
+ *
+ * This code is encoded for size rather than absolute efficiency, with
* a view to being able to inline as much as possible.
+ *
+ * We only bother with direct forms (ie, vcpu in pda) of the
* operations here; the indirect forms are better handled in C, since
* they're generally too large to inline anyway.
  */
 
-#include <linux/linkage.h>
-
-#include <asm/asm-offsets.h>
-#include <asm/processor-flags.h>
 #include <asm/errno.h>
+#include <asm/percpu.h>
+#include <asm/processor-flags.h>
 #include <asm/segment.h>
 
 #include <xen/interface/xen.h>
 
-#define RELOC(x, v)    .globl x##_reloc; x##_reloc=v
-#define ENDPATCH(x)    .globl x##_end; x##_end=.
-
-/* Pseudo-flag used for virtual NMI, which we don't implement yet */
-#define XEN_EFLAGS_NMI 0x80000000
-
-#if 1
-/*
-       x86-64 does not yet support direct access to percpu variables
-       via a segment override, so we just need to make sure this code
-       never gets used
- */
-#define BUG                    ud2a
-#define PER_CPU_VAR(var, off)  0xdeadbeef
-#endif
-
-/*
-       Enable events.  This clears the event mask and tests the pending
-       event status with one and operation.  If there are pending
-       events, then enter the hypervisor to get them handled.
- */
-ENTRY(xen_irq_enable_direct)
-       BUG
-
-       /* Unmask events */
-       movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
-
-       /* Preempt here doesn't matter because that will deal with
-          any pending interrupts.  The pending check may end up being
-          run on the wrong CPU, but that doesn't hurt. */
-
-       /* Test for pending */
-       testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
-       jz 1f
-
-2:     call check_events
-1:
-ENDPATCH(xen_irq_enable_direct)
-       ret
-       ENDPROC(xen_irq_enable_direct)
-       RELOC(xen_irq_enable_direct, 2b+1)
-
-/*
-       Disabling events is simply a matter of making the event mask
-       non-zero.
- */
-ENTRY(xen_irq_disable_direct)
-       BUG
-
-       movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
-ENDPATCH(xen_irq_disable_direct)
-       ret
-       ENDPROC(xen_irq_disable_direct)
-       RELOC(xen_irq_disable_direct, 0)
-
-/*
-       (xen_)save_fl is used to get the current interrupt enable status.
-       Callers expect the status to be in X86_EFLAGS_IF, and other bits
-       may be set in the return value.  We take advantage of this by
-       making sure that X86_EFLAGS_IF has the right value (and other bits
-       in that byte are 0), but other bits in the return value are
-       undefined.  We need to toggle the state of the bit, because
-       Xen and x86 use opposite senses (mask vs enable).
- */
-ENTRY(xen_save_fl_direct)
-       BUG
-
-       testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
-       setz %ah
-       addb %ah,%ah
-ENDPATCH(xen_save_fl_direct)
-       ret
-       ENDPROC(xen_save_fl_direct)
-       RELOC(xen_save_fl_direct, 0)
-
-/*
-       In principle the caller should be passing us a value return
-       from xen_save_fl_direct, but for robustness sake we test only
-       the X86_EFLAGS_IF flag rather than the whole byte. After
-       setting the interrupt mask state, it checks for unmasked
-       pending events and enters the hypervisor to get them delivered
-       if so.
- */
-ENTRY(xen_restore_fl_direct)
-       BUG
-
-       testb $X86_EFLAGS_IF>>8, %ah
-       setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
-       /* Preempt here doesn't matter because that will deal with
-          any pending interrupts.  The pending check may end up being
-          run on the wrong CPU, but that doesn't hurt. */
-
-       /* check for unmasked and pending */
-       cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
-       jz 1f
-2:     call check_events
-1:
-ENDPATCH(xen_restore_fl_direct)
-       ret
-       ENDPROC(xen_restore_fl_direct)
-       RELOC(xen_restore_fl_direct, 2b+1)
-
-
-/*
-       Force an event check by making a hypercall,
-       but preserve regs before making the call.
- */
-check_events:
-       push %rax
-       push %rcx
-       push %rdx
-       push %rsi
-       push %rdi
-       push %r8
-       push %r9
-       push %r10
-       push %r11
-       call xen_force_evtchn_callback
-       pop %r11
-       pop %r10
-       pop %r9
-       pop %r8
-       pop %rdi
-       pop %rsi
-       pop %rdx
-       pop %rcx
-       pop %rax
-       ret
+#include "xen-asm.h"
 
 ENTRY(xen_adjust_exception_frame)
-       mov 8+0(%rsp),%rcx
-       mov 8+8(%rsp),%r11
+       mov 8+0(%rsp), %rcx
+       mov 8+8(%rsp), %r11
        ret $16
 
 hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
 /*
      Xen64 iret frame:
-
      ss
      rsp
      rflags
      cs
      rip             <-- standard iret frame
-
      flags
-
      rcx             }
      r11             }<-- pushed by hypercall page
-rsp -> rax             }
* Xen64 iret frame:
+ *
*     ss
*     rsp
*     rflags
*     cs
*     rip             <-- standard iret frame
+ *
*     flags
+ *
*     rcx             }
*     r11             }<-- pushed by hypercall page
+ * rsp->rax            }
  */
 ENTRY(xen_iret)
        pushq $0
@@ -177,8 +48,8 @@ ENDPATCH(xen_iret)
 RELOC(xen_iret, 1b+1)
 
 /*
-       sysexit is not used for 64-bit processes, so it's
      only ever used to return to 32-bit compat userspace.
+ * sysexit is not used for 64-bit processes, so it's only ever used to
* return to 32-bit compat userspace.
  */
 ENTRY(xen_sysexit)
        pushq $__USER32_DS
@@ -193,13 +64,15 @@ ENDPATCH(xen_sysexit)
 RELOC(xen_sysexit, 1b+1)
 
 ENTRY(xen_sysret64)
-       /* We're already on the usermode stack at this point, but still
-          with the kernel gs, so we can easily switch back */
-       movq %rsp, %gs:pda_oldrsp
-       movq %gs:pda_kernelstack,%rsp
+       /*
+        * We're already on the usermode stack at this point, but
+        * still with the kernel gs, so we can easily switch back
+        */
+       movq %rsp, PER_CPU_VAR(old_rsp)
+       movq PER_CPU_VAR(kernel_stack), %rsp
 
        pushq $__USER_DS
-       pushq %gs:pda_oldrsp
+       pushq PER_CPU_VAR(old_rsp)
        pushq %r11
        pushq $__USER_CS
        pushq %rcx
@@ -210,13 +83,15 @@ ENDPATCH(xen_sysret64)
 RELOC(xen_sysret64, 1b+1)
 
 ENTRY(xen_sysret32)
-       /* We're already on the usermode stack at this point, but still
-          with the kernel gs, so we can easily switch back */
-       movq %rsp, %gs:pda_oldrsp
-       movq %gs:pda_kernelstack, %rsp
+       /*
+        * We're already on the usermode stack at this point, but
+        * still with the kernel gs, so we can easily switch back
+        */
+       movq %rsp, PER_CPU_VAR(old_rsp)
+       movq PER_CPU_VAR(kernel_stack), %rsp
 
        pushq $__USER32_DS
-       pushq %gs:pda_oldrsp
+       pushq PER_CPU_VAR(old_rsp)
        pushq %r11
        pushq $__USER32_CS
        pushq %rcx
@@ -227,28 +102,27 @@ ENDPATCH(xen_sysret32)
 RELOC(xen_sysret32, 1b+1)
 
 /*
-       Xen handles syscall callbacks much like ordinary exceptions,
-       which means we have:
-        - kernel gs
-        - kernel rsp
-        - an iret-like stack frame on the stack (including rcx and r11):
-               ss
-               rsp
-               rflags
-               cs
-               rip
-               r11
-       rsp->   rcx
-
-       In all the entrypoints, we undo all that to make it look
-       like a CPU-generated syscall/sysenter and jump to the normal
-       entrypoint.
+ * Xen handles syscall callbacks much like ordinary exceptions, which
+ * means we have:
+ * - kernel gs
+ * - kernel rsp
+ * - an iret-like stack frame on the stack (including rcx and r11):
+ *     ss
+ *     rsp
+ *     rflags
+ *     cs
+ *     rip
+ *     r11
+ * rsp->rcx
+ *
+ * In all the entrypoints, we undo all that to make it look like a
+ * CPU-generated syscall/sysenter and jump to the normal entrypoint.
  */
 
 .macro undo_xen_syscall
-       mov 0*8(%rsp),%rcx
-       mov 1*8(%rsp),%r11
-       mov 5*8(%rsp),%rsp
+       mov 0*8(%rsp), %rcx
+       mov 1*8(%rsp), %r11
+       mov 5*8(%rsp), %rsp
 .endm
 
 /* Normal 64-bit system call target */
@@ -275,7 +149,7 @@ ENDPROC(xen_sysenter_target)
 
 ENTRY(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
-       lea 16(%rsp), %rsp      /* strip %rcx,%r11 */
+       lea 16(%rsp), %rsp      /* strip %rcx, %r11 */
        mov $-ENOSYS, %rax
        pushq $VGCF_in_syscall
        jmp hypercall_iret
index c1f8faf..2f5ef26 100644 (file)
 extern const char xen_hypervisor_callback[];
 extern const char xen_failsafe_callback[];
 
+extern void *xen_initial_gdt;
+
 struct trap_info;
 void xen_copy_trap_info(struct trap_info *traps);
 
+DECLARE_PER_CPU(struct vcpu_info, xen_vcpu_info);
 DECLARE_PER_CPU(unsigned long, xen_cr3);
 DECLARE_PER_CPU(unsigned long, xen_current_cr3);
 
@@ -22,6 +25,13 @@ extern struct shared_info *HYPERVISOR_shared_info;
 
 void xen_setup_mfn_list_list(void);
 void xen_setup_shared_info(void);
+void xen_setup_machphys_mapping(void);
+pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
+void xen_ident_map_ISA(void);
+void xen_reserve_top(void);
+
+void xen_leave_lazy(void);
+void xen_post_allocator_init(void);
 
 char * __init xen_memory_setup(void);
 void __init xen_arch_setup(void);
index c3e841f..ab0aff3 100644 (file)
@@ -365,7 +365,7 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_get_table
+ * FUNCTION:    acpi_get_table_with_size
  *
  * PARAMETERS:  Signature           - ACPI signature of needed table
  *              Instance            - Which instance (for SSDTs)
@@ -377,8 +377,9 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
  *
  *****************************************************************************/
 acpi_status
-acpi_get_table(char *signature,
-              u32 instance, struct acpi_table_header **out_table)
+acpi_get_table_with_size(char *signature,
+              u32 instance, struct acpi_table_header **out_table,
+              acpi_size *tbl_size)
 {
        u32 i;
        u32 j;
@@ -408,6 +409,7 @@ acpi_get_table(char *signature,
                    acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]);
                if (ACPI_SUCCESS(status)) {
                        *out_table = acpi_gbl_root_table_list.tables[i].pointer;
+                       *tbl_size = acpi_gbl_root_table_list.tables[i].length;
                }
 
                if (!acpi_gbl_permanent_mmap) {
@@ -420,6 +422,15 @@ acpi_get_table(char *signature,
        return (AE_NOT_FOUND);
 }
 
+acpi_status
+acpi_get_table(char *signature,
+              u32 instance, struct acpi_table_header **out_table)
+{
+       acpi_size tbl_size;
+
+       return acpi_get_table_with_size(signature,
+                      instance, out_table, &tbl_size);
+}
 ACPI_EXPORT_SYMBOL(acpi_get_table)
 
 /*******************************************************************************
index b3193ec..d1dd516 100644 (file)
@@ -274,12 +274,19 @@ EXPORT_SYMBOL_GPL(acpi_os_map_memory);
 
 void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
 {
-       if (acpi_gbl_permanent_mmap) {
+       if (acpi_gbl_permanent_mmap)
                iounmap(virt);
-       }
+       else
+               __acpi_unmap_table(virt, size);
 }
 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
 
+void early_acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
+{
+       if (!acpi_gbl_permanent_mmap)
+               __acpi_unmap_table(virt, size);
+}
+
 #ifdef ACPI_FUTURE_USAGE
 acpi_status
 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
index a885295..fec1ae3 100644 (file)
@@ -181,14 +181,15 @@ acpi_table_parse_entries(char *id,
        struct acpi_subtable_header *entry;
        unsigned int count = 0;
        unsigned long table_end;
+       acpi_size tbl_size;
 
        if (!handler)
                return -EINVAL;
 
        if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
-               acpi_get_table(id, acpi_apic_instance, &table_header);
+               acpi_get_table_with_size(id, acpi_apic_instance, &table_header, &tbl_size);
        else
-               acpi_get_table(id, 0, &table_header);
+               acpi_get_table_with_size(id, 0, &table_header, &tbl_size);
 
        if (!table_header) {
                printk(KERN_WARNING PREFIX "%4.4s not present\n", id);
@@ -206,8 +207,10 @@ acpi_table_parse_entries(char *id,
               table_end) {
                if (entry->type == entry_id
                    && (!max_entries || count++ < max_entries))
-                       if (handler(entry, table_end))
+                       if (handler(entry, table_end)) {
+                               early_acpi_os_unmap_memory((char *)table_header, tbl_size);
                                return -EINVAL;
+                       }
 
                entry = (struct acpi_subtable_header *)
                    ((unsigned long)entry + entry->length);
@@ -217,6 +220,7 @@ acpi_table_parse_entries(char *id,
                       "%i found\n", id, entry_id, count - max_entries, count);
        }
 
+       early_acpi_os_unmap_memory((char *)table_header, tbl_size);
        return count;
 }
 
@@ -241,17 +245,19 @@ acpi_table_parse_madt(enum acpi_madt_type id,
 int __init acpi_table_parse(char *id, acpi_table_handler handler)
 {
        struct acpi_table_header *table = NULL;
+       acpi_size tbl_size;
 
        if (!handler)
                return -EINVAL;
 
        if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
-               acpi_get_table(id, acpi_apic_instance, &table);
+               acpi_get_table_with_size(id, acpi_apic_instance, &table, &tbl_size);
        else
-               acpi_get_table(id, 0, &table);
+               acpi_get_table_with_size(id, 0, &table, &tbl_size);
 
        if (table) {
                handler(table);
+               early_acpi_os_unmap_memory(table, tbl_size);
                return 0;
        } else
                return 1;
@@ -265,8 +271,9 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
 static void __init check_multiple_madt(void)
 {
        struct acpi_table_header *table = NULL;
+       acpi_size tbl_size;
 
-       acpi_get_table(ACPI_SIG_MADT, 2, &table);
+       acpi_get_table_with_size(ACPI_SIG_MADT, 2, &table, &tbl_size);
        if (table) {
                printk(KERN_WARNING PREFIX
                       "BIOS bug: multiple APIC/MADT found,"
@@ -275,6 +282,7 @@ static void __init check_multiple_madt(void)
                       "If \"acpi_apic_instance=%d\" works better, "
                       "notify linux-acpi@vger.kernel.org\n",
                       acpi_apic_instance ? 0 : 2);
+               early_acpi_os_unmap_memory(table, tbl_size);
 
        } else
                acpi_apic_instance = 0;
index 719ee5c..5b257a5 100644 (file)
@@ -107,7 +107,7 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
 /*
  * Print cpu online, possible, present, and system maps
  */
-static ssize_t print_cpus_map(char *buf, cpumask_t *map)
+static ssize_t print_cpus_map(char *buf, const struct cpumask *map)
 {
        int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map);
 
index a778fb5..bf6b132 100644 (file)
 #include <linux/hardirq.h>
 #include <linux/topology.h>
 
-#define define_one_ro(_name)           \
+#define define_one_ro_named(_name, _func)                              \
+static SYSDEV_ATTR(_name, 0444, _func, NULL)
+
+#define define_one_ro(_name)                           \
 static SYSDEV_ATTR(_name, 0444, show_##_name, NULL)
 
 #define define_id_show_func(name)                              \
@@ -42,8 +45,8 @@ static ssize_t show_##name(struct sys_device *dev,            \
        return sprintf(buf, "%d\n", topology_##name(cpu));      \
 }
 
-#if defined(topology_thread_siblings) || defined(topology_core_siblings)
-static ssize_t show_cpumap(int type, cpumask_t *mask, char *buf)
+#if defined(topology_thread_cpumask) || defined(topology_core_cpumask)
+static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf)
 {
        ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
        int n = 0;
@@ -65,7 +68,7 @@ static ssize_t show_##name(struct sys_device *dev,                    \
                           struct sysdev_attribute *attr, char *buf)    \
 {                                                                      \
        unsigned int cpu = dev->id;                                     \
-       return show_cpumap(0, &(topology_##name(cpu)), buf);            \
+       return show_cpumap(0, topology_##name(cpu), buf);               \
 }
 
 #define define_siblings_show_list(name)                                        \
@@ -74,7 +77,7 @@ static ssize_t show_##name##_list(struct sys_device *dev,             \
                                  char *buf)                            \
 {                                                                      \
        unsigned int cpu = dev->id;                                     \
-       return show_cpumap(1, &(topology_##name(cpu)), buf);            \
+       return show_cpumap(1, topology_##name(cpu), buf);               \
 }
 
 #else
@@ -82,9 +85,7 @@ static ssize_t show_##name##_list(struct sys_device *dev,             \
 static ssize_t show_##name(struct sys_device *dev,                     \
                           struct sysdev_attribute *attr, char *buf)    \
 {                                                                      \
-       unsigned int cpu = dev->id;                                     \
-       cpumask_t mask = topology_##name(cpu);                          \
-       return show_cpumap(0, &mask, buf);                              \
+       return show_cpumap(0, topology_##name(dev->id), buf);           \
 }
 
 #define define_siblings_show_list(name)                                        \
@@ -92,9 +93,7 @@ static ssize_t show_##name##_list(struct sys_device *dev,             \
                                  struct sysdev_attribute *attr,        \
                                  char *buf)                            \
 {                                                                      \
-       unsigned int cpu = dev->id;                                     \
-       cpumask_t mask = topology_##name(cpu);                          \
-       return show_cpumap(1, &mask, buf);                              \
+       return show_cpumap(1, topology_##name(dev->id), buf);           \
 }
 #endif
 
@@ -107,13 +106,13 @@ define_one_ro(physical_package_id);
 define_id_show_func(core_id);
 define_one_ro(core_id);
 
-define_siblings_show_func(thread_siblings);
-define_one_ro(thread_siblings);
-define_one_ro(thread_siblings_list);
+define_siblings_show_func(thread_cpumask);
+define_one_ro_named(thread_siblings, show_thread_cpumask);
+define_one_ro_named(thread_siblings_list, show_thread_cpumask_list);
 
-define_siblings_show_func(core_siblings);
-define_one_ro(core_siblings);
-define_one_ro(core_siblings_list);
+define_siblings_show_func(core_cpumask);
+define_one_ro_named(core_siblings, show_core_cpumask);
+define_one_ro_named(core_siblings_list, show_core_cpumask_list);
 
 static struct attribute *default_attrs[] = {
        &attr_physical_package_id.attr,
index e1129fa..ee19b6e 100644 (file)
@@ -143,7 +143,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE,
 #endif
 
 #ifndef CONFIG_X86_64
-#include "mach_timer.h"
+#include <asm/mach_timer.h>
 #define PMTMR_EXPECTED_RATE \
   ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (CLOCK_TICK_RATE>>10))
 /*
index 1bde303..8615059 100644 (file)
@@ -7,7 +7,7 @@
 #include <asm/pgtable.h>
 #include <asm/io.h>
 
-#include "mach_timer.h"
+#include <asm/mach_timer.h>
 
 #define CYCLONE_CBAR_ADDR      0xFEB00CD0      /* base address ptr */
 #define CYCLONE_PMCC_OFFSET    0x51A0          /* offset to control register */
index c064657..2705284 100644 (file)
@@ -3,7 +3,7 @@
 #
 config EISA_VLB_PRIMING
        bool "Vesa Local Bus priming"
-       depends on X86_PC && EISA
+       depends on X86 && EISA
        default n
        ---help---
          Activate this option if your system contains a Vesa Local
@@ -24,11 +24,11 @@ config EISA_PCI_EISA
          When in doubt, say Y.
 
 # Using EISA_VIRTUAL_ROOT on something other than an Alpha or
-# an X86_PC may lead to crashes...
+# an X86 may lead to crashes...
 
 config EISA_VIRTUAL_ROOT
        bool "EISA virtual root device"
-       depends on EISA && (ALPHA || X86_PC)
+       depends on EISA && (ALPHA || X86)
        default y
        ---help---
          Activate this option if your system only have EISA bus
index 777fba4..3009e01 100644 (file)
@@ -244,7 +244,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
  */
 int dcdbas_smi_request(struct smi_cmd *smi_cmd)
 {
-       cpumask_t old_mask;
+       cpumask_var_t old_mask;
        int ret = 0;
 
        if (smi_cmd->magic != SMI_CMD_MAGIC) {
@@ -254,8 +254,11 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
        }
 
        /* SMI requires CPU 0 */
-       old_mask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(0));
+       if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       cpumask_copy(old_mask, &current->cpus_allowed);
+       set_cpus_allowed_ptr(current, cpumask_of(0));
        if (smp_processor_id() != 0) {
                dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
                        __func__);
@@ -275,7 +278,8 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
        );
 
 out:
-       set_cpus_allowed_ptr(current, &old_mask);
+       set_cpus_allowed_ptr(current, old_mask);
+       free_cpumask_var(old_mask);
        return ret;
 }
 
index 3ab3e4a..7b7ddc2 100644 (file)
@@ -938,8 +938,8 @@ static int __init ibft_init(void)
                return -ENOMEM;
 
        if (ibft_addr) {
-               printk(KERN_INFO "iBFT detected at 0x%lx.\n",
-                      virt_to_phys((void *)ibft_addr));
+               printk(KERN_INFO "iBFT detected at 0x%llx.\n",
+                      (u64)virt_to_phys((void *)ibft_addr));
 
                rc = ibft_check_device();
                if (rc)
index 8df849f..b756f04 100644 (file)
@@ -678,9 +678,9 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
        *start = &buf[offset];
        *eof = 0;
 
-       DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
+       DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%llx\n",
                       atomic_read(&dev->vma_count),
-                      high_memory, virt_to_phys(high_memory));
+                      high_memory, (u64)virt_to_phys(high_memory));
        list_for_each_entry(pt, &dev->vmalist, head) {
                if (!(vma = pt->vma))
                        continue;
index 3556168..ea2638b 100644 (file)
@@ -13,11 +13,11 @@ menuconfig INPUT_KEYBOARD
 if INPUT_KEYBOARD
 
 config KEYBOARD_ATKBD
-       tristate "AT keyboard" if EMBEDDED || !X86_PC
+       tristate "AT keyboard" if EMBEDDED || !X86
        default y
        select SERIO
        select SERIO_LIBPS2
-       select SERIO_I8042 if X86_PC
+       select SERIO_I8042 if X86
        select SERIO_GSCPS2 if GSC
        help
          Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
index 093c8c1..9bef935 100644 (file)
@@ -17,7 +17,7 @@ config MOUSE_PS2
        default y
        select SERIO
        select SERIO_LIBPS2
-       select SERIO_I8042 if X86_PC
+       select SERIO_I8042 if X86
        select SERIO_GSCPS2 if GSC
        help
          Say Y here if you have a PS/2 mouse connected to your system. This
index c64e679..1c48408 100644 (file)
@@ -162,7 +162,7 @@ config ENCLOSURE_SERVICES
 config SGI_XP
        tristate "Support communication between SGI SSIs"
        depends on NET
-       depends on (IA64_GENERIC || IA64_SGI_SN2 || IA64_SGI_UV || X86_64) && SMP
+       depends on (IA64_GENERIC || IA64_SGI_SN2 || IA64_SGI_UV || X86_UV) && SMP
        select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
        select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
        select SGI_GRU if (IA64_GENERIC || IA64_SGI_UV || X86_64) && SMP
@@ -189,7 +189,7 @@ config HP_ILO
 
 config SGI_GRU
        tristate "SGI GRU driver"
-       depends on (X86_64 || IA64_SGI_UV || IA64_GENERIC) && SMP
+       depends on (X86_UV || IA64_SGI_UV || IA64_GENERIC) && SMP
        default n
        select MMU_NOTIFIER
        ---help---
index 6509838..c67e4e8 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/proc_fs.h>
 #include <linux/uaccess.h>
+#include <asm/uv/uv.h>
 #include "gru.h"
 #include "grulib.h"
 #include "grutables.h"
 
-#if defined CONFIG_X86_64
-#include <asm/genapic.h>
-#include <asm/irq.h>
-#define IS_UV()                is_uv_system()
-#elif defined CONFIG_IA64
-#include <asm/system.h>
-#include <asm/sn/simulator.h>
-/* temp support for running on hardware simulator */
-#define IS_UV()                IS_MEDUSA() || ia64_platform_is("uv")
-#else
-#define IS_UV()                0
-#endif
-
 #include <asm/uv/uv_hub.h>
 #include <asm/uv/uv_mmrs.h>
 
@@ -381,7 +369,7 @@ static int __init gru_init(void)
        char id[10];
        void *gru_start_vaddr;
 
-       if (!IS_UV())
+       if (!is_uv_system())
                return 0;
 
 #if defined CONFIG_IA64
@@ -451,7 +439,7 @@ static void __exit gru_exit(void)
        int order = get_order(sizeof(struct gru_state) *
                              GRU_CHIPLETS_PER_BLADE);
 
-       if (!IS_UV())
+       if (!is_uv_system())
                return;
 
        for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++)
index 7b4cbd5..2275126 100644 (file)
 
 #include <linux/mutex.h>
 
-#ifdef CONFIG_IA64
+#if defined CONFIG_X86_UV || defined CONFIG_IA64_SGI_UV
+#include <asm/uv/uv.h>
+#define is_uv()                is_uv_system()
+#endif
+
+#ifndef is_uv
+#define is_uv()                0
+#endif
+
+#if defined CONFIG_IA64
 #include <asm/system.h>
 #include <asm/sn/arch.h>       /* defines is_shub1() and is_shub2() */
 #define is_shub()      ia64_platform_is("sn2")
-#ifdef CONFIG_IA64_SGI_UV
-#define is_uv()                ia64_platform_is("uv")
-#else
-#define is_uv()                0
-#endif
-#endif
-#ifdef CONFIG_X86_64
-#include <asm/genapic.h>
-#define is_uv()                is_uv_system()
 #endif
 
 #ifndef is_shub1
 #define is_shub()      0
 #endif
 
-#ifndef is_uv
-#define is_uv()                0
-#endif
-
 #ifdef USE_DBUG_ON
 #define DBUG_ON(condition)     BUG_ON(condition)
 #else
index 89218f7..6576170 100644 (file)
@@ -318,7 +318,7 @@ xpc_hb_checker(void *ignore)
 
        /* this thread was marked active by xpc_hb_init() */
 
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU));
+       set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU));
 
        /* set our heartbeating to other partitions into motion */
        xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
index 8b12e6e..2ff8879 100644 (file)
@@ -273,7 +273,7 @@ config MTD_NAND_CAFE
 
 config MTD_NAND_CS553X
        tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
-       depends on X86_32 && (X86_PC || X86_GENERICARCH)
+       depends on X86_32
        help
          The CS553x companion chips for the AMD Geode processor
          include NAND flash controllers with built-in hardware ECC
index fac43fd..6a843f7 100644 (file)
@@ -150,7 +150,8 @@ static int __init ne3210_eisa_probe (struct device *device)
                if (phys_mem < virt_to_phys(high_memory)) {
                        printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n");
                        printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n");
-                       printk(KERN_CRIT "ne3210.c: or to an address above 0x%lx.\n", virt_to_phys(high_memory));
+                       printk(KERN_CRIT "ne3210.c: or to an address above 0x%llx.\n",
+                               (u64)virt_to_phys(high_memory));
                        printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n");
                        retval = -EINVAL;
                        goto out3;
index ab0e09b..847e9bb 100644 (file)
@@ -854,20 +854,27 @@ static void efx_fini_io(struct efx_nic *efx)
  * interrupts across them. */
 static int efx_wanted_rx_queues(void)
 {
-       cpumask_t core_mask;
+       cpumask_var_t core_mask;
        int count;
        int cpu;
 
-       cpus_clear(core_mask);
+       if (!alloc_cpumask_var(&core_mask, GFP_KERNEL)) {
+               printk(KERN_WARNING
+                      "efx.c: allocation failure, irq balancing hobbled\n");
+               return 1;
+       }
+
+       cpumask_clear(core_mask);
        count = 0;
        for_each_online_cpu(cpu) {
-               if (!cpu_isset(cpu, core_mask)) {
+               if (!cpumask_test_cpu(cpu, core_mask)) {
                        ++count;
-                       cpus_or(core_mask, core_mask,
-                               topology_core_siblings(cpu));
+                       cpumask_or(core_mask, core_mask,
+                                  topology_core_cpumask(cpu));
                }
        }
 
+       free_cpumask_var(core_mask);
        return count;
 }
 
index d5378e6..064307c 100644 (file)
@@ -338,10 +338,10 @@ static int falcon_alloc_special_buffer(struct efx_nic *efx,
        nic_data->next_buffer_table += buffer->entries;
 
        EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
-               "(virt %p phys %lx)\n", buffer->index,
+               "(virt %p phys %llx)\n", buffer->index,
                buffer->index + buffer->entries - 1,
-               (unsigned long long)buffer->dma_addr, len,
-               buffer->addr, virt_to_phys(buffer->addr));
+               (u64)buffer->dma_addr, len,
+               buffer->addr, (u64)virt_to_phys(buffer->addr));
 
        return 0;
 }
@@ -353,10 +353,10 @@ static void falcon_free_special_buffer(struct efx_nic *efx,
                return;
 
        EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
-               "(virt %p phys %lx)\n", buffer->index,
+               "(virt %p phys %llx)\n", buffer->index,
                buffer->index + buffer->entries - 1,
-               (unsigned long long)buffer->dma_addr, buffer->len,
-               buffer->addr, virt_to_phys(buffer->addr));
+               (u64)buffer->dma_addr, buffer->len,
+               buffer->addr, (u64)virt_to_phys(buffer->addr));
 
        pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
                            buffer->dma_addr);
@@ -2343,10 +2343,10 @@ int falcon_probe_port(struct efx_nic *efx)
                                 FALCON_MAC_STATS_SIZE);
        if (rc)
                return rc;
-       EFX_LOG(efx, "stats buffer at %llx (virt %p phys %lx)\n",
-               (unsigned long long)efx->stats_buffer.dma_addr,
+       EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
+               (u64)efx->stats_buffer.dma_addr,
                efx->stats_buffer.addr,
-               virt_to_phys(efx->stats_buffer.addr));
+               (u64)virt_to_phys(efx->stats_buffer.addr));
 
        return 0;
 }
@@ -2921,9 +2921,9 @@ int falcon_probe_nic(struct efx_nic *efx)
                goto fail4;
        BUG_ON(efx->irq_status.dma_addr & 0x0f);
 
-       EFX_LOG(efx, "INT_KER at %llx (virt %p phys %lx)\n",
-               (unsigned long long)efx->irq_status.dma_addr,
-               efx->irq_status.addr, virt_to_phys(efx->irq_status.addr));
+       EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
+               (u64)efx->irq_status.dma_addr,
+               efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr));
 
        falcon_probe_spi_devices(efx);
 
index bfca15d..14c1165 100644 (file)
@@ -1082,8 +1082,8 @@ static int __init arlan_probe_here(struct net_device *dev,
        if (arlan_check_fingerprint(memaddr))
                return -ENODEV;
 
-       printk(KERN_NOTICE "%s: Arlan found at %x, \n ", dev->name, 
-              (int) virt_to_phys((void*)memaddr));
+       printk(KERN_NOTICE "%s: Arlan found at %llx, \n ", dev->name, 
+              (u64) virt_to_phys((void*)memaddr));
 
        ap->card = (void *) memaddr;
        dev->mem_start = memaddr;
index 9da5a4b..c3ea5fa 100644 (file)
@@ -38,7 +38,7 @@
 
 static LIST_HEAD(dying_tasks);
 static LIST_HEAD(dead_tasks);
-static cpumask_t marked_cpus = CPU_MASK_NONE;
+static cpumask_var_t marked_cpus;
 static DEFINE_SPINLOCK(task_mortuary);
 static void process_task_mortuary(void);
 
@@ -456,10 +456,10 @@ static void mark_done(int cpu)
 {
        int i;
 
-       cpu_set(cpu, marked_cpus);
+       cpumask_set_cpu(cpu, marked_cpus);
 
        for_each_online_cpu(i) {
-               if (!cpu_isset(i, marked_cpus))
+               if (!cpumask_test_cpu(i, marked_cpus))
                        return;
        }
 
@@ -468,7 +468,7 @@ static void mark_done(int cpu)
         */
        process_task_mortuary();
 
-       cpus_clear(marked_cpus);
+       cpumask_clear(marked_cpus);
 }
 
 
@@ -565,6 +565,20 @@ void sync_buffer(int cpu)
        mutex_unlock(&buffer_mutex);
 }
 
+int __init buffer_sync_init(void)
+{
+       if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL))
+               return -ENOMEM;
+
+       cpumask_clear(marked_cpus);
+               return 0;
+}
+
+void __exit buffer_sync_cleanup(void)
+{
+       free_cpumask_var(marked_cpus);
+}
+
 /* The function can be used to add a buffer worth of data directly to
  * the kernel buffer. The buffer is assumed to be a circular buffer.
  * Take the entries from index start and end at index end, wrapping
index 3110732..0ebf5db 100644 (file)
@@ -19,4 +19,8 @@ void sync_stop(void);
 /* sync the given CPU's buffer */
 void sync_buffer(int cpu);
 
+/* initialize/destroy the buffer system. */
+int buffer_sync_init(void);
+void buffer_sync_cleanup(void);
+
 #endif /* OPROFILE_BUFFER_SYNC_H */
index 3cffce9..ced39f6 100644 (file)
@@ -183,6 +183,10 @@ static int __init oprofile_init(void)
 {
        int err;
 
+       err = buffer_sync_init();
+       if (err)
+               return err;
+
        err = oprofile_arch_init(&oprofile_ops);
 
        if (err < 0 || timer) {
@@ -191,8 +195,10 @@ static int __init oprofile_init(void)
        }
 
        err = oprofilefs_register();
-       if (err)
+       if (err) {
                oprofile_arch_exit();
+               buffer_sync_cleanup();
+       }
 
        return err;
 }
@@ -202,6 +208,7 @@ static void __exit oprofile_exit(void)
 {
        oprofilefs_unregister();
        oprofile_arch_exit();
+       buffer_sync_cleanup();
 }
 
 
index f5a662a..519f5f9 100644 (file)
@@ -42,6 +42,7 @@
 LIST_HEAD(dmar_drhd_units);
 
 static struct acpi_table_header * __initdata dmar_tbl;
+static acpi_size dmar_tbl_size;
 
 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
 {
@@ -288,8 +289,9 @@ static int __init dmar_table_detect(void)
        acpi_status status = AE_OK;
 
        /* if we could find DMAR table, then there are DMAR devices */
-       status = acpi_get_table(ACPI_SIG_DMAR, 0,
-                               (struct acpi_table_header **)&dmar_tbl);
+       status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
+                               (struct acpi_table_header **)&dmar_tbl,
+                               &dmar_tbl_size);
 
        if (ACPI_SUCCESS(status) && !dmar_tbl) {
                printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
@@ -481,6 +483,7 @@ void __init detect_intel_iommu(void)
                        iommu_detected = 1;
 #endif
        }
+       early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
        dmar_tbl = NULL;
 }
 
index f78371b..5a57753 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/irq.h>
 #include <asm/io_apic.h>
 #include <asm/smp.h>
+#include <asm/cpu.h>
 #include <linux/intel-iommu.h>
 #include "intr_remapping.h"
 
index bf92802..36e221b 100644 (file)
@@ -37,7 +37,7 @@
 #include <linux/io.h>
 #include <linux/uaccess.h>
 
-#include <asm/mach-rdc321x/rdc321x_defs.h>
+#include <asm/rdc321x_defs.h>
 
 #define RDC_WDT_MASK   0x80000000 /* Mask */
 #define RDC_WDT_EN     0x00800000 /* Enable bit */
index eb0dfde..30963af 100644 (file)
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <linux/string.h>
+#include <linux/bootmem.h>
 
 #include <asm/ptrace.h>
 #include <asm/irq.h>
+#include <asm/idle.h>
 #include <asm/sync_bitops.h>
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
@@ -50,36 +52,55 @@ static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
 /* IRQ <-> IPI mapping */
 static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
 
-/* Packed IRQ information: binding type, sub-type index, and event channel. */
-struct packed_irq
-{
-       unsigned short evtchn;
-       unsigned char index;
-       unsigned char type;
-};
-
-static struct packed_irq irq_info[NR_IRQS];
-
-/* Binding types. */
-enum {
-       IRQT_UNBOUND,
+/* Interrupt types. */
+enum xen_irq_type {
+       IRQT_UNBOUND = 0,
        IRQT_PIRQ,
        IRQT_VIRQ,
        IRQT_IPI,
        IRQT_EVTCHN
 };
 
-/* Convenient shorthand for packed representation of an unbound IRQ. */
-#define IRQ_UNBOUND    mk_irq_info(IRQT_UNBOUND, 0, 0)
+/*
+ * Packed IRQ information:
+ * type - enum xen_irq_type
+ * event channel - irq->event channel mapping
+ * cpu - cpu this event channel is bound to
+ * index - type-specific information:
+ *    PIRQ - vector, with MSB being "needs EIO"
+ *    VIRQ - virq number
+ *    IPI - IPI vector
+ *    EVTCHN -
+ */
+struct irq_info
+{
+       enum xen_irq_type type; /* type */
+       unsigned short evtchn;  /* event channel */
+       unsigned short cpu;     /* cpu bound */
+
+       union {
+               unsigned short virq;
+               enum ipi_vector ipi;
+               struct {
+                       unsigned short gsi;
+                       unsigned short vector;
+               } pirq;
+       } u;
+};
+
+static struct irq_info irq_info[NR_IRQS];
 
 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
        [0 ... NR_EVENT_CHANNELS-1] = -1
 };
-static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
-static u8 cpu_evtchn[NR_EVENT_CHANNELS];
-
-/* Reference counts for bindings to IRQs. */
-static int irq_bindcount[NR_IRQS];
+struct cpu_evtchn_s {
+       unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
+};
+static struct cpu_evtchn_s *cpu_evtchn_mask_p;
+static inline unsigned long *cpu_evtchn_mask(int cpu)
+{
+       return cpu_evtchn_mask_p[cpu].bits;
+}
 
 /* Xen will never allocate port zero for any purpose. */
 #define VALID_EVTCHN(chn)      ((chn) != 0)
@@ -87,27 +108,108 @@ static int irq_bindcount[NR_IRQS];
 static struct irq_chip xen_dynamic_chip;
 
 /* Constructor for packed IRQ information. */
-static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn)
+static struct irq_info mk_unbound_info(void)
+{
+       return (struct irq_info) { .type = IRQT_UNBOUND };
+}
+
+static struct irq_info mk_evtchn_info(unsigned short evtchn)
+{
+       return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
+                       .cpu = 0 };
+}
+
+static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
 {
-       return (struct packed_irq) { evtchn, index, type };
+       return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
+                       .cpu = 0, .u.ipi = ipi };
+}
+
+static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
+{
+       return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
+                       .cpu = 0, .u.virq = virq };
+}
+
+static struct irq_info mk_pirq_info(unsigned short evtchn,
+                                   unsigned short gsi, unsigned short vector)
+{
+       return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
+                       .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } };
 }
 
 /*
  * Accessors for packed IRQ information.
  */
-static inline unsigned int evtchn_from_irq(int irq)
+static struct irq_info *info_for_irq(unsigned irq)
+{
+       return &irq_info[irq];
+}
+
+static unsigned int evtchn_from_irq(unsigned irq)
 {
-       return irq_info[irq].evtchn;
+       return info_for_irq(irq)->evtchn;
 }
 
-static inline unsigned int index_from_irq(int irq)
+static enum ipi_vector ipi_from_irq(unsigned irq)
 {
-       return irq_info[irq].index;
+       struct irq_info *info = info_for_irq(irq);
+
+       BUG_ON(info == NULL);
+       BUG_ON(info->type != IRQT_IPI);
+
+       return info->u.ipi;
 }
 
-static inline unsigned int type_from_irq(int irq)
+static unsigned virq_from_irq(unsigned irq)
 {
-       return irq_info[irq].type;
+       struct irq_info *info = info_for_irq(irq);
+
+       BUG_ON(info == NULL);
+       BUG_ON(info->type != IRQT_VIRQ);
+
+       return info->u.virq;
+}
+
+static unsigned gsi_from_irq(unsigned irq)
+{
+       struct irq_info *info = info_for_irq(irq);
+
+       BUG_ON(info == NULL);
+       BUG_ON(info->type != IRQT_PIRQ);
+
+       return info->u.pirq.gsi;
+}
+
+static unsigned vector_from_irq(unsigned irq)
+{
+       struct irq_info *info = info_for_irq(irq);
+
+       BUG_ON(info == NULL);
+       BUG_ON(info->type != IRQT_PIRQ);
+
+       return info->u.pirq.vector;
+}
+
+static enum xen_irq_type type_from_irq(unsigned irq)
+{
+       return info_for_irq(irq)->type;
+}
+
+static unsigned cpu_from_irq(unsigned irq)
+{
+       return info_for_irq(irq)->cpu;
+}
+
+static unsigned int cpu_from_evtchn(unsigned int evtchn)
+{
+       int irq = evtchn_to_irq[evtchn];
+       unsigned ret = 0;
+
+       if (irq != -1)
+               ret = cpu_from_irq(irq);
+
+       return ret;
 }
 
 static inline unsigned long active_evtchns(unsigned int cpu,
@@ -115,7 +217,7 @@ static inline unsigned long active_evtchns(unsigned int cpu,
                                           unsigned int idx)
 {
        return (sh->evtchn_pending[idx] &
-               cpu_evtchn_mask[cpu][idx] &
+               cpu_evtchn_mask(cpu)[idx] &
                ~sh->evtchn_mask[idx]);
 }
 
@@ -125,13 +227,13 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
 
        BUG_ON(irq == -1);
 #ifdef CONFIG_SMP
-       irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu);
+       cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
 #endif
 
-       __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]);
-       __set_bit(chn, cpu_evtchn_mask[cpu]);
+       __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
+       __set_bit(chn, cpu_evtchn_mask(cpu));
 
-       cpu_evtchn[chn] = cpu;
+       irq_info[irq].cpu = cpu;
 }
 
 static void init_evtchn_cpu_bindings(void)
@@ -142,17 +244,11 @@ static void init_evtchn_cpu_bindings(void)
 
        /* By default all event channels notify CPU#0. */
        for_each_irq_desc(i, desc) {
-               desc->affinity = cpumask_of_cpu(0);
+               cpumask_copy(desc->affinity, cpumask_of(0));
        }
 #endif
 
-       memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
-       memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
-}
-
-static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
-{
-       return cpu_evtchn[evtchn];
+       memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
 }
 
 static inline void clear_evtchn(int port)
@@ -232,9 +328,8 @@ static int find_unbound_irq(void)
        int irq;
        struct irq_desc *desc;
 
-       /* Only allocate from dynirq range */
        for (irq = 0; irq < nr_irqs; irq++)
-               if (irq_bindcount[irq] == 0)
+               if (irq_info[irq].type == IRQT_UNBOUND)
                        break;
 
        if (irq == nr_irqs)
@@ -244,6 +339,8 @@ static int find_unbound_irq(void)
        if (WARN_ON(desc == NULL))
                return -1;
 
+       dynamic_irq_init(irq);
+
        return irq;
 }
 
@@ -258,16 +355,13 @@ int bind_evtchn_to_irq(unsigned int evtchn)
        if (irq == -1) {
                irq = find_unbound_irq();
 
-               dynamic_irq_init(irq);
                set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
                                              handle_level_irq, "event");
 
                evtchn_to_irq[evtchn] = irq;
-               irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
+               irq_info[irq] = mk_evtchn_info(evtchn);
        }
 
-       irq_bindcount[irq]++;
-
        spin_unlock(&irq_mapping_update_lock);
 
        return irq;
@@ -282,12 +376,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
        spin_lock(&irq_mapping_update_lock);
 
        irq = per_cpu(ipi_to_irq, cpu)[ipi];
+
        if (irq == -1) {
                irq = find_unbound_irq();
                if (irq < 0)
                        goto out;
 
-               dynamic_irq_init(irq);
                set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
                                              handle_level_irq, "ipi");
 
@@ -298,15 +392,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
                evtchn = bind_ipi.port;
 
                evtchn_to_irq[evtchn] = irq;
-               irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
-
+               irq_info[irq] = mk_ipi_info(evtchn, ipi);
                per_cpu(ipi_to_irq, cpu)[ipi] = irq;
 
                bind_evtchn_to_cpu(evtchn, cpu);
        }
 
-       irq_bindcount[irq]++;
-
  out:
        spin_unlock(&irq_mapping_update_lock);
        return irq;
@@ -332,20 +423,17 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
 
                irq = find_unbound_irq();
 
-               dynamic_irq_init(irq);
                set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
                                              handle_level_irq, "virq");
 
                evtchn_to_irq[evtchn] = irq;
-               irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
+               irq_info[irq] = mk_virq_info(evtchn, virq);
 
                per_cpu(virq_to_irq, cpu)[virq] = irq;
 
                bind_evtchn_to_cpu(evtchn, cpu);
        }
 
-       irq_bindcount[irq]++;
-
        spin_unlock(&irq_mapping_update_lock);
 
        return irq;
@@ -358,7 +446,7 @@ static void unbind_from_irq(unsigned int irq)
 
        spin_lock(&irq_mapping_update_lock);
 
-       if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
+       if (VALID_EVTCHN(evtchn)) {
                close.port = evtchn;
                if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
                        BUG();
@@ -366,11 +454,11 @@ static void unbind_from_irq(unsigned int irq)
                switch (type_from_irq(irq)) {
                case IRQT_VIRQ:
                        per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
-                               [index_from_irq(irq)] = -1;
+                               [virq_from_irq(irq)] = -1;
                        break;
                case IRQT_IPI:
                        per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
-                               [index_from_irq(irq)] = -1;
+                               [ipi_from_irq(irq)] = -1;
                        break;
                default:
                        break;
@@ -380,7 +468,7 @@ static void unbind_from_irq(unsigned int irq)
                bind_evtchn_to_cpu(evtchn, 0);
 
                evtchn_to_irq[evtchn] = -1;
-               irq_info[irq] = IRQ_UNBOUND;
+               irq_info[irq] = mk_unbound_info();
 
                dynamic_irq_cleanup(irq);
        }
@@ -498,8 +586,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
        for(i = 0; i < NR_EVENT_CHANNELS; i++) {
                if (sync_test_bit(i, sh->evtchn_pending)) {
                        printk("  %d: event %d -> irq %d\n",
-                               cpu_evtchn[i], i,
-                               evtchn_to_irq[i]);
+                              cpu_from_evtchn(i), i,
+                              evtchn_to_irq[i]);
                }
        }
 
@@ -508,7 +596,6 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-
 /*
  * Search the CPUs pending events bitmasks.  For each one found, map
  * the event number to an irq, and feed it into do_IRQ() for
@@ -521,11 +608,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
 void xen_evtchn_do_upcall(struct pt_regs *regs)
 {
        int cpu = get_cpu();
+       struct pt_regs *old_regs = set_irq_regs(regs);
        struct shared_info *s = HYPERVISOR_shared_info;
        struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
        static DEFINE_PER_CPU(unsigned, nesting_count);
        unsigned count;
 
+       exit_idle();
+       irq_enter();
+
        do {
                unsigned long pending_words;
 
@@ -550,7 +641,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
                                int irq = evtchn_to_irq[port];
 
                                if (irq != -1)
-                                       xen_do_IRQ(irq, regs);
+                                       handle_irq(irq, regs);
                        }
                }
 
@@ -561,12 +652,17 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
        } while(count != 1);
 
 out:
+       irq_exit();
+       set_irq_regs(old_regs);
+
        put_cpu();
 }
 
 /* Rebind a new event channel to an existing irq. */
 void rebind_evtchn_irq(int evtchn, int irq)
 {
+       struct irq_info *info = info_for_irq(irq);
+
        /* Make sure the irq is masked, since the new event channel
           will also be masked. */
        disable_irq(irq);
@@ -576,11 +672,11 @@ void rebind_evtchn_irq(int evtchn, int irq)
        /* After resume the irq<->evtchn mappings are all cleared out */
        BUG_ON(evtchn_to_irq[evtchn] != -1);
        /* Expect irq to have been bound before,
-          so the bindcount should be non-0 */
-       BUG_ON(irq_bindcount[irq] == 0);
+          so there should be a proper type */
+       BUG_ON(info->type == IRQT_UNBOUND);
 
        evtchn_to_irq[evtchn] = irq;
-       irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
+       irq_info[irq] = mk_evtchn_info(evtchn);
 
        spin_unlock(&irq_mapping_update_lock);
 
@@ -690,8 +786,7 @@ static void restore_cpu_virqs(unsigned int cpu)
                if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
                        continue;
 
-               BUG_ON(irq_info[irq].type != IRQT_VIRQ);
-               BUG_ON(irq_info[irq].index != virq);
+               BUG_ON(virq_from_irq(irq) != virq);
 
                /* Get a new binding from Xen. */
                bind_virq.virq = virq;
@@ -703,7 +798,7 @@ static void restore_cpu_virqs(unsigned int cpu)
 
                /* Record the new mapping. */
                evtchn_to_irq[evtchn] = irq;
-               irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
+               irq_info[irq] = mk_virq_info(evtchn, virq);
                bind_evtchn_to_cpu(evtchn, cpu);
 
                /* Ready for use. */
@@ -720,8 +815,7 @@ static void restore_cpu_ipis(unsigned int cpu)
                if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
                        continue;
 
-               BUG_ON(irq_info[irq].type != IRQT_IPI);
-               BUG_ON(irq_info[irq].index != ipi);
+               BUG_ON(ipi_from_irq(irq) != ipi);
 
                /* Get a new binding from Xen. */
                bind_ipi.vcpu = cpu;
@@ -732,7 +826,7 @@ static void restore_cpu_ipis(unsigned int cpu)
 
                /* Record the new mapping. */
                evtchn_to_irq[evtchn] = irq;
-               irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
+               irq_info[irq] = mk_ipi_info(evtchn, ipi);
                bind_evtchn_to_cpu(evtchn, cpu);
 
                /* Ready for use. */
@@ -812,8 +906,11 @@ void xen_irq_resume(void)
 
 static struct irq_chip xen_dynamic_chip __read_mostly = {
        .name           = "xen-dyn",
+
+       .disable        = disable_dynirq,
        .mask           = disable_dynirq,
        .unmask         = enable_dynirq,
+
        .ack            = ack_dynirq,
        .set_affinity   = set_affinity_irq,
        .retrigger      = retrigger_dynirq,
@@ -822,6 +919,10 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
 void __init xen_init_IRQ(void)
 {
        int i;
+       size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s);
+
+       cpu_evtchn_mask_p = alloc_bootmem(size);
+       BUG_ON(cpu_evtchn_mask_p == NULL);
 
        init_evtchn_cpu_bindings();
 
@@ -829,9 +930,5 @@ void __init xen_init_IRQ(void)
        for (i = 0; i < NR_EVENT_CHANNELS; i++)
                mask_evtchn(i);
 
-       /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
-       for (i = 0; i < nr_irqs; i++)
-               irq_bindcount[i] = 0;
-
        irq_ctx_init(smp_processor_id());
 }
index 9b91617..e7e83b6 100644 (file)
@@ -100,7 +100,7 @@ static void do_suspend(void)
        /* XXX use normal device tree? */
        xenbus_suspend();
 
-       err = stop_machine(xen_suspend, &cancelled, &cpumask_of_cpu(0));
+       err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));
        if (err) {
                printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
                goto out;
index a62720a..ab0b85c 100644 (file)
@@ -144,6 +144,7 @@ void __iomem *acpi_os_map_memory(acpi_physical_address where,
                                acpi_size length);
 
 void acpi_os_unmap_memory(void __iomem * logical_address, acpi_size size);
+void early_acpi_os_unmap_memory(void __iomem * virt, acpi_size size);
 
 #ifdef ACPI_FUTURE_USAGE
 acpi_status
index c8e8cf4..cc40102 100644 (file)
@@ -130,6 +130,10 @@ acpi_get_table_header(acpi_string signature,
                      struct acpi_table_header *out_table_header);
 
 acpi_status
+acpi_get_table_with_size(acpi_string signature,
+              u32 instance, struct acpi_table_header **out_table,
+              acpi_size *tbl_size);
+acpi_status
 acpi_get_table(acpi_string signature,
               u32 instance, struct acpi_table_header **out_table);
 
index b0e63c6..00f45ff 100644 (file)
@@ -80,4 +80,56 @@ extern void setup_per_cpu_areas(void);
 #define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \
                                        __typeof__(type) per_cpu_var(name)
 
+/*
+ * Optional methods for optimized non-lvalue per-cpu variable access.
+ *
+ * @var can be a percpu variable or a field of it and its size should
+ * equal char, int or long.  percpu_read() evaluates to a lvalue and
+ * all others to void.
+ *
+ * These operations are guaranteed to be atomic w.r.t. preemption.
+ * The generic versions use plain get/put_cpu_var().  Archs are
+ * encouraged to implement single-instruction alternatives which don't
+ * require preemption protection.
+ */
+#ifndef percpu_read
+# define percpu_read(var)                                              \
+  ({                                                                   \
+       typeof(per_cpu_var(var)) __tmp_var__;                           \
+       __tmp_var__ = get_cpu_var(var);                                 \
+       put_cpu_var(var);                                               \
+       __tmp_var__;                                                    \
+  })
+#endif
+
+#define __percpu_generic_to_op(var, val, op)                           \
+do {                                                                   \
+       get_cpu_var(var) op val;                                        \
+       put_cpu_var(var);                                               \
+} while (0)
+
+#ifndef percpu_write
+# define percpu_write(var, val)                __percpu_generic_to_op(var, (val), =)
+#endif
+
+#ifndef percpu_add
+# define percpu_add(var, val)          __percpu_generic_to_op(var, (val), +=)
+#endif
+
+#ifndef percpu_sub
+# define percpu_sub(var, val)          __percpu_generic_to_op(var, (val), -=)
+#endif
+
+#ifndef percpu_and
+# define percpu_and(var, val)          __percpu_generic_to_op(var, (val), &=)
+#endif
+
+#ifndef percpu_or
+# define percpu_or(var, val)           __percpu_generic_to_op(var, (val), |=)
+#endif
+
+#ifndef percpu_xor
+# define percpu_xor(var, val)          __percpu_generic_to_op(var, (val), ^=)
+#endif
+
 #endif /* _ASM_GENERIC_PERCPU_H_ */
index 79a7ff9..4ce48e8 100644 (file)
@@ -9,7 +9,7 @@ extern char __bss_start[], __bss_stop[];
 extern char __init_begin[], __init_end[];
 extern char _sinittext[], _einittext[];
 extern char _end[];
-extern char __per_cpu_start[], __per_cpu_end[];
+extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
 extern char __kprobes_text_start[], __kprobes_text_end[];
 extern char __initdata_begin[], __initdata_end[];
 extern char __start_rodata[], __end_rodata[];
index c61fab1..5406e70 100644 (file)
        *(.initcall7.init)                                              \
        *(.initcall7s.init)
 
+/**
+ * PERCPU_VADDR - define output section for percpu area
+ * @vaddr: explicit base address (optional)
+ * @phdr: destination PHDR (optional)
+ *
+ * Macro which expands to output section for percpu area.  If @vaddr
+ * is not blank, it specifies explicit base address and all percpu
+ * symbols will be offset from the given address.  If blank, @vaddr
+ * always equals @laddr + LOAD_OFFSET.
+ *
+ * @phdr defines the output PHDR to use if not blank.  Be warned that
+ * output PHDR is sticky.  If @phdr is specified, the next output
+ * section in the linker script will go there too.  @phdr should have
+ * a leading colon.
+ *
+ * Note that this macros defines __per_cpu_load as an absolute symbol.
+ * If there is no need to put the percpu section at a predetermined
+ * address, use PERCPU().
+ */
+#define PERCPU_VADDR(vaddr, phdr)                                      \
+       VMLINUX_SYMBOL(__per_cpu_load) = .;                             \
+       .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load)          \
+                               - LOAD_OFFSET) {                        \
+               VMLINUX_SYMBOL(__per_cpu_start) = .;                    \
+               *(.data.percpu.first)                                   \
+               *(.data.percpu.page_aligned)                            \
+               *(.data.percpu)                                         \
+               *(.data.percpu.shared_aligned)                          \
+               VMLINUX_SYMBOL(__per_cpu_end) = .;                      \
+       } phdr                                                          \
+       . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
+
+/**
+ * PERCPU - define output section for percpu area, simple version
+ * @align: required alignment
+ *
+ * Align to @align and outputs output section for percpu area.  This
+ * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
+ * __per_cpu_start will be identical.
+ *
+ * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
+ * that __per_cpu_load is defined as a relative symbol against
+ * .data.percpu which is required for relocatable x86_32
+ * configuration.
+ */
 #define PERCPU(align)                                                  \
        . = ALIGN(align);                                               \
-       VMLINUX_SYMBOL(__per_cpu_start) = .;                            \
-       .data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {          \
+       .data.percpu    : AT(ADDR(.data.percpu) - LOAD_OFFSET) {        \
+               VMLINUX_SYMBOL(__per_cpu_load) = .;                     \
+               VMLINUX_SYMBOL(__per_cpu_start) = .;                    \
+               *(.data.percpu.first)                                   \
                *(.data.percpu.page_aligned)                            \
                *(.data.percpu)                                         \
                *(.data.percpu.shared_aligned)                          \
-       }                                                               \
-       VMLINUX_SYMBOL(__per_cpu_end) = .;
+               VMLINUX_SYMBOL(__per_cpu_end) = .;                      \
+       }
index 6fce2fc..d59f0fa 100644 (file)
@@ -79,6 +79,7 @@ typedef int (*acpi_table_handler) (struct acpi_table_header *table);
 typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end);
 
 char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
+void __init __acpi_unmap_table(char *map, unsigned long size);
 int early_acpi_boot_init(void);
 int acpi_boot_init (void);
 int acpi_boot_table_init (void);
index 5ca54d7..7605c5e 100644 (file)
@@ -111,6 +111,15 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *re
 #endif
 }
 
+static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
+{
+#ifdef ELF_CORE_COPY_KERNEL_REGS
+       ELF_CORE_COPY_KERNEL_REGS((*elfregs), regs);
+#else
+       elf_core_copy_regs(elfregs, regs);
+#endif
+}
+
 static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
 {
 #ifdef ELF_CORE_COPY_TASK_REGS
index 9127f6b..472f117 100644 (file)
@@ -467,6 +467,7 @@ int show_interrupts(struct seq_file *p, void *v);
 struct irq_desc;
 
 extern int early_irq_init(void);
+extern int arch_probe_nr_irqs(void);
 extern int arch_early_irq_init(void);
 extern int arch_init_chip_data(struct irq_desc *desc, int cpu);
 
index f899b50..27a6753 100644 (file)
@@ -182,11 +182,11 @@ struct irq_desc {
        unsigned int            irqs_unhandled;
        spinlock_t              lock;
 #ifdef CONFIG_SMP
-       cpumask_t               affinity;
+       cpumask_var_t           affinity;
        unsigned int            cpu;
-#endif
 #ifdef CONFIG_GENERIC_PENDING_IRQ
-       cpumask_t               pending_mask;
+       cpumask_var_t           pending_mask;
+#endif
 #endif
 #ifdef CONFIG_PROC_FS
        struct proc_dir_entry   *dir;
@@ -422,4 +422,84 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
 
 #endif /* !CONFIG_S390 */
 
+#ifdef CONFIG_SMP
+/**
+ * init_alloc_desc_masks - allocate cpumasks for irq_desc
+ * @desc:      pointer to irq_desc struct
+ * @cpu:       cpu which will be handling the cpumasks
+ * @boot:      true if need bootmem
+ *
+ * Allocates affinity and pending_mask cpumask if required.
+ * Returns true if successful (or not required).
+ * Side effect: affinity has all bits set, pending_mask has all bits clear.
+ */
+static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
+                                                               bool boot)
+{
+       int node;
+
+       if (boot) {
+               alloc_bootmem_cpumask_var(&desc->affinity);
+               cpumask_setall(desc->affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+               alloc_bootmem_cpumask_var(&desc->pending_mask);
+               cpumask_clear(desc->pending_mask);
+#endif
+               return true;
+       }
+
+       node = cpu_to_node(cpu);
+
+       if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
+               return false;
+       cpumask_setall(desc->affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
+               free_cpumask_var(desc->affinity);
+               return false;
+       }
+       cpumask_clear(desc->pending_mask);
+#endif
+       return true;
+}
+
+/**
+ * init_copy_desc_masks - copy cpumasks for irq_desc
+ * @old_desc:  pointer to old irq_desc struct
+ * @new_desc:  pointer to new irq_desc struct
+ *
+ * Insures affinity and pending_masks are copied to new irq_desc.
+ * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
+ * irq_desc struct so the copy is redundant.
+ */
+
+static inline void init_copy_desc_masks(struct irq_desc *old_desc,
+                                       struct irq_desc *new_desc)
+{
+#ifdef CONFIG_CPUMASKS_OFFSTACK
+       cpumask_copy(new_desc->affinity, old_desc->affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
+#endif
+#endif
+}
+
+#else /* !CONFIG_SMP */
+
+static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
+                                                               bool boot)
+{
+       return true;
+}
+
+static inline void init_copy_desc_masks(struct irq_desc *old_desc,
+                                       struct irq_desc *new_desc)
+{
+}
+
+#endif /* CONFIG_SMP */
+
 #endif /* _LINUX_IRQ_H */
index 86af92e..887477b 100644 (file)
@@ -20,6 +20,7 @@
 
 # define for_each_irq_desc_reverse(irq, desc)                          \
        for (irq = nr_irqs - 1; irq >= 0; irq--)
+
 #else /* CONFIG_GENERIC_HARDIRQS */
 
 extern int nr_irqs;
index 0b4df7e..5b4e28b 100644 (file)
@@ -49,4 +49,5 @@
 #define FUTEXFS_SUPER_MAGIC    0xBAD1DEA
 #define INOTIFYFS_SUPER_MAGIC  0x2BAD1DEA
 
+#define STACK_END_MAGIC                0x57AC6E9D
 #endif /* __LINUX_MAGIC_H__ */
index 9f2a375..3577ffd 100644 (file)
@@ -8,35 +8,46 @@
 
 #include <asm/percpu.h>
 
+#ifndef PER_CPU_BASE_SECTION
+#ifdef CONFIG_SMP
+#define PER_CPU_BASE_SECTION ".data.percpu"
+#else
+#define PER_CPU_BASE_SECTION ".data"
+#endif
+#endif
+
 #ifdef CONFIG_SMP
-#define DEFINE_PER_CPU(type, name)                                     \
-       __attribute__((__section__(".data.percpu")))                    \
-       PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
 
 #ifdef MODULE
-#define SHARED_ALIGNED_SECTION ".data.percpu"
+#define PER_CPU_SHARED_ALIGNED_SECTION ""
 #else
-#define SHARED_ALIGNED_SECTION ".data.percpu.shared_aligned"
+#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
 #endif
+#define PER_CPU_FIRST_SECTION ".first"
 
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)                      \
-       __attribute__((__section__(SHARED_ALIGNED_SECTION)))            \
-       PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name             \
-       ____cacheline_aligned_in_smp
+#else
 
-#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)                        \
-       __attribute__((__section__(".data.percpu.page_aligned")))       \
+#define PER_CPU_SHARED_ALIGNED_SECTION ""
+#define PER_CPU_FIRST_SECTION ""
+
+#endif
+
+#define DEFINE_PER_CPU_SECTION(type, name, section)                    \
+       __attribute__((__section__(PER_CPU_BASE_SECTION section)))      \
        PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
-#else
+
 #define DEFINE_PER_CPU(type, name)                                     \
-       PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
+       DEFINE_PER_CPU_SECTION(type, name, "")
 
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)                    \
-       DEFINE_PER_CPU(type, name)
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)                      \
+       DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
+       ____cacheline_aligned_in_smp
 
-#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)                      \
-       DEFINE_PER_CPU(type, name)
-#endif
+#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)                                \
+       DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
+
+#define DEFINE_PER_CPU_FIRST(type, name)                               \
+       DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
 
 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
index 8981e52..f0a50b2 100644 (file)
@@ -1178,10 +1178,9 @@ struct task_struct {
        pid_t pid;
        pid_t tgid;
 
-#ifdef CONFIG_CC_STACKPROTECTOR
        /* Canary value for the -fstack-protector gcc feature */
        unsigned long stack_canary;
-#endif
+
        /* 
         * pointers to (original) parent process, youngest child, younger sibling,
         * older sibling, respectively.  (p->father can be replaced with 
@@ -2087,6 +2086,19 @@ static inline int object_is_on_stack(void *obj)
 
 extern void thread_info_cache_init(void);
 
+#ifdef CONFIG_DEBUG_STACK_USAGE
+static inline unsigned long stack_not_used(struct task_struct *p)
+{
+       unsigned long *n = end_of_stack(p);
+
+       do {    /* Skip over canary */
+               n++;
+       } while (!*n);
+
+       return (unsigned long)n - (unsigned long)end_of_stack(p);
+}
+#endif
+
 /* set thread flags in other task's structures
  * - see asm/thread_info.h for TIF_xxxx flags available
  */
index 715196b..bbacb7b 100644 (file)
@@ -176,6 +176,12 @@ static inline void init_call_single_data(void)
 #define put_cpu()              preempt_enable()
 #define put_cpu_no_resched()   preempt_enable_no_resched()
 
+/*
+ * Callback to arch code if there's nosmp or maxcpus=0 on the
+ * boot command line:
+ */
+extern void arch_disable_smp_support(void);
+
 void smp_setup_processor_id(void);
 
 #endif /* __LINUX_SMP_H */
diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h
new file mode 100644 (file)
index 0000000..6f3e54c
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _LINUX_STACKPROTECTOR_H
+#define _LINUX_STACKPROTECTOR_H 1
+
+#include <linux/compiler.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+# include <asm/stackprotector.h>
+#else
+static inline void boot_init_stack_canary(void)
+{
+}
+#endif
+
+#endif
index e632d29..a16b9e0 100644 (file)
@@ -193,5 +193,11 @@ int arch_update_cpu_topology(void);
 #ifndef topology_core_siblings
 #define topology_core_siblings(cpu)            cpumask_of_cpu(cpu)
 #endif
+#ifndef topology_thread_cpumask
+#define topology_thread_cpumask(cpu)           cpumask_of(cpu)
+#endif
+#ifndef topology_core_cpumask
+#define topology_core_cpumask(cpu)             cpumask_of(cpu)
+#endif
 
 #endif /* _LINUX_TOPOLOGY_H */
index 8442094..6441083 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/proc_fs.h>
 #include <linux/kernel.h>
 #include <linux/syscalls.h>
+#include <linux/stackprotector.h>
 #include <linux/string.h>
 #include <linux/ctype.h>
 #include <linux/delay.h>
@@ -135,14 +136,14 @@ unsigned int __initdata setup_max_cpus = NR_CPUS;
  * greater than 0, limits the maximum number of CPUs activated in
  * SMP mode to <NUM>.
  */
-#ifndef CONFIG_X86_IO_APIC
-static inline void disable_ioapic_setup(void) {};
-#endif
+
+void __weak arch_disable_smp_support(void) { }
 
 static int __init nosmp(char *str)
 {
        setup_max_cpus = 0;
-       disable_ioapic_setup();
+       arch_disable_smp_support();
+
        return 0;
 }
 
@@ -152,14 +153,14 @@ static int __init maxcpus(char *str)
 {
        get_option(&str, &setup_max_cpus);
        if (setup_max_cpus == 0)
-               disable_ioapic_setup();
+               arch_disable_smp_support();
 
        return 0;
 }
 
 early_param("maxcpus", maxcpus);
 #else
-#define setup_max_cpus NR_CPUS
+const unsigned int setup_max_cpus = NR_CPUS;
 #endif
 
 /*
@@ -539,6 +540,12 @@ asmlinkage void __init start_kernel(void)
         */
        lockdep_init();
        debug_objects_early_init();
+
+       /*
+        * Set up the the initial canary ASAP:
+        */
+       boot_init_stack_canary();
+
        cgroup_init_early();
 
        local_irq_disable();
index efd30cc..167e1e3 100644 (file)
@@ -980,12 +980,9 @@ static void check_stack_usage(void)
 {
        static DEFINE_SPINLOCK(low_water_lock);
        static int lowest_to_date = THREAD_SIZE;
-       unsigned long *n = end_of_stack(current);
        unsigned long free;
 
-       while (*n == 0)
-               n++;
-       free = (unsigned long)n - (unsigned long)end_of_stack(current);
+       free = stack_not_used(current);
 
        if (free >= lowest_to_date)
                return;
index a66fbde..8de303b 100644 (file)
@@ -61,6 +61,7 @@
 #include <linux/proc_fs.h>
 #include <linux/blkdev.h>
 #include <trace/sched.h>
+#include <linux/magic.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -212,6 +213,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
 {
        struct task_struct *tsk;
        struct thread_info *ti;
+       unsigned long *stackend;
+
        int err;
 
        prepare_to_copy(orig);
@@ -237,6 +240,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
                goto out;
 
        setup_thread_stack(tsk, orig);
+       stackend = end_of_stack(tsk);
+       *stackend = STACK_END_MAGIC;    /* for overflow detection */
 
 #ifdef CONFIG_CC_STACKPROTECTOR
        tsk->stack_canary = get_random_int();
index 7de11bd..122fef4 100644 (file)
@@ -46,7 +46,10 @@ void dynamic_irq_init(unsigned int irq)
        desc->irq_count = 0;
        desc->irqs_unhandled = 0;
 #ifdef CONFIG_SMP
-       cpumask_setall(&desc->affinity);
+       cpumask_setall(desc->affinity);
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       cpumask_clear(desc->pending_mask);
+#endif
 #endif
        spin_unlock_irqrestore(&desc->lock, flags);
 }
index 3aba8d1..f51eaee 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/rculist.h>
 #include <linux/hash.h>
+#include <linux/bootmem.h>
 
 #include "internals.h"
 
@@ -69,6 +70,7 @@ int nr_irqs = NR_IRQS;
 EXPORT_SYMBOL_GPL(nr_irqs);
 
 #ifdef CONFIG_SPARSE_IRQ
+
 static struct irq_desc irq_desc_init = {
        .irq        = -1,
        .status     = IRQ_DISABLED,
@@ -76,9 +78,6 @@ static struct irq_desc irq_desc_init = {
        .handle_irq = handle_bad_irq,
        .depth      = 1,
        .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
-#ifdef CONFIG_SMP
-       .affinity   = CPU_MASK_ALL
-#endif
 };
 
 void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
@@ -113,6 +112,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
                printk(KERN_ERR "can not alloc kstat_irqs\n");
                BUG_ON(1);
        }
+       if (!init_alloc_desc_masks(desc, cpu, false)) {
+               printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
+               BUG_ON(1);
+       }
        arch_init_chip_data(desc, cpu);
 }
 
@@ -121,7 +124,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
  */
 DEFINE_SPINLOCK(sparse_irq_lock);
 
-struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
+struct irq_desc **irq_desc_ptrs __read_mostly;
 
 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
        [0 ... NR_IRQS_LEGACY-1] = {
@@ -131,14 +134,10 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
                .handle_irq = handle_bad_irq,
                .depth      = 1,
                .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
-#ifdef CONFIG_SMP
-               .affinity   = CPU_MASK_ALL
-#endif
        }
 };
 
-/* FIXME: use bootmem alloc ...*/
-static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
+static unsigned int *kstat_irqs_legacy;
 
 int __init early_irq_init(void)
 {
@@ -148,18 +147,30 @@ int __init early_irq_init(void)
 
        init_irq_default_affinity();
 
+        /* initialize nr_irqs based on nr_cpu_ids */
+       arch_probe_nr_irqs();
+       printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
+
        desc = irq_desc_legacy;
        legacy_count = ARRAY_SIZE(irq_desc_legacy);
 
+       /* allocate irq_desc_ptrs array based on nr_irqs */
+       irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
+
+       /* allocate based on nr_cpu_ids */
+       /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
+       kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids *
+                                         sizeof(int));
+
        for (i = 0; i < legacy_count; i++) {
                desc[i].irq = i;
-               desc[i].kstat_irqs = kstat_irqs_legacy[i];
+               desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
                lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
-
+               init_alloc_desc_masks(&desc[i], 0, true);
                irq_desc_ptrs[i] = desc + i;
        }
 
-       for (i = legacy_count; i < NR_IRQS; i++)
+       for (i = legacy_count; i < nr_irqs; i++)
                irq_desc_ptrs[i] = NULL;
 
        return arch_early_irq_init();
@@ -167,7 +178,10 @@ int __init early_irq_init(void)
 
 struct irq_desc *irq_to_desc(unsigned int irq)
 {
-       return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
+       if (irq_desc_ptrs && irq < nr_irqs)
+               return irq_desc_ptrs[irq];
+
+       return NULL;
 }
 
 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
@@ -176,10 +190,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
        unsigned long flags;
        int node;
 
-       if (irq >= NR_IRQS) {
-               printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
-                               irq, NR_IRQS);
-               WARN_ON(1);
+       if (irq >= nr_irqs) {
+               WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
+                       irq, nr_irqs);
                return NULL;
        }
 
@@ -221,9 +234,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
                .handle_irq = handle_bad_irq,
                .depth = 1,
                .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
-#ifdef CONFIG_SMP
-               .affinity = CPU_MASK_ALL
-#endif
        }
 };
 
@@ -235,12 +245,15 @@ int __init early_irq_init(void)
 
        init_irq_default_affinity();
 
+       printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
+
        desc = irq_desc;
        count = ARRAY_SIZE(irq_desc);
 
-       for (i = 0; i < count; i++)
+       for (i = 0; i < count; i++) {
                desc[i].irq = i;
-
+               init_alloc_desc_masks(&desc[i], 0, true);
+       }
        return arch_early_irq_init();
 }
 
index e6d0a43..40416a8 100644 (file)
@@ -16,7 +16,14 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
 extern struct lock_class_key irq_desc_lock_class;
 extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
 extern spinlock_t sparse_irq_lock;
+
+#ifdef CONFIG_SPARSE_IRQ
+/* irq_desc_ptrs allocated at boot time */
+extern struct irq_desc **irq_desc_ptrs;
+#else
+/* irq_desc_ptrs is a fixed size array */
 extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
+#endif
 
 #ifdef CONFIG_PROC_FS
 extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
index 291f036..a3a5dc9 100644 (file)
@@ -90,14 +90,14 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
-               cpumask_copy(&desc->affinity, cpumask);
+               cpumask_copy(desc->affinity, cpumask);
                desc->chip->set_affinity(irq, cpumask);
        } else {
                desc->status |= IRQ_MOVE_PENDING;
-               cpumask_copy(&desc->pending_mask, cpumask);
+               cpumask_copy(desc->pending_mask, cpumask);
        }
 #else
-       cpumask_copy(&desc->affinity, cpumask);
+       cpumask_copy(desc->affinity, cpumask);
        desc->chip->set_affinity(irq, cpumask);
 #endif
        desc->status |= IRQ_AFFINITY_SET;
@@ -119,16 +119,16 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
         * one of the targets is online.
         */
        if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
-               if (cpumask_any_and(&desc->affinity, cpu_online_mask)
+               if (cpumask_any_and(desc->affinity, cpu_online_mask)
                    < nr_cpu_ids)
                        goto set_affinity;
                else
                        desc->status &= ~IRQ_AFFINITY_SET;
        }
 
-       cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
+       cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
 set_affinity:
-       desc->chip->set_affinity(irq, &desc->affinity);
+       desc->chip->set_affinity(irq, desc->affinity);
 
        return 0;
 }
index bd72329..e05ad9b 100644 (file)
@@ -18,7 +18,7 @@ void move_masked_irq(int irq)
 
        desc->status &= ~IRQ_MOVE_PENDING;
 
-       if (unlikely(cpumask_empty(&desc->pending_mask)))
+       if (unlikely(cpumask_empty(desc->pending_mask)))
                return;
 
        if (!desc->chip->set_affinity)
@@ -38,13 +38,13 @@ void move_masked_irq(int irq)
         * For correct operation this depends on the caller
         * masking the irqs.
         */
-       if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask)
+       if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
                   < nr_cpu_ids)) {
-               cpumask_and(&desc->affinity,
-                           &desc->pending_mask, cpu_online_mask);
-               desc->chip->set_affinity(irq, &desc->affinity);
+               cpumask_and(desc->affinity,
+                           desc->pending_mask, cpu_online_mask);
+               desc->chip->set_affinity(irq, desc->affinity);
        }
-       cpumask_clear(&desc->pending_mask);
+       cpumask_clear(desc->pending_mask);
 }
 
 void move_native_irq(int irq)
index acd8835..7f9b804 100644 (file)
@@ -38,15 +38,22 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
        old_desc->kstat_irqs = NULL;
 }
 
-static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
+static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
                 struct irq_desc *desc, int cpu)
 {
        memcpy(desc, old_desc, sizeof(struct irq_desc));
+       if (!init_alloc_desc_masks(desc, cpu, false)) {
+               printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
+                               "for migration.\n", irq);
+               return false;
+       }
        spin_lock_init(&desc->lock);
        desc->cpu = cpu;
        lockdep_set_class(&desc->lock, &irq_desc_lock_class);
        init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
+       init_copy_desc_masks(old_desc, desc);
        arch_init_copy_chip_data(old_desc, desc, cpu);
+       return true;
 }
 
 static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
@@ -76,12 +83,18 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
        node = cpu_to_node(cpu);
        desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
        if (!desc) {
-               printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq);
+               printk(KERN_ERR "irq %d: can not get new irq_desc "
+                               "for migration.\n", irq);
+               /* still use old one */
+               desc = old_desc;
+               goto out_unlock;
+       }
+       if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) {
                /* still use old one */
+               kfree(desc);
                desc = old_desc;
                goto out_unlock;
        }
-       init_copy_one_irq_desc(irq, old_desc, desc, cpu);
 
        irq_desc_ptrs[irq] = desc;
        spin_unlock_irqrestore(&sparse_irq_lock, flags);
index aae3f74..692363d 100644 (file)
@@ -20,11 +20,11 @@ static struct proc_dir_entry *root_irq_dir;
 static int irq_affinity_proc_show(struct seq_file *m, void *v)
 {
        struct irq_desc *desc = irq_to_desc((long)m->private);
-       const struct cpumask *mask = &desc->affinity;
+       const struct cpumask *mask = desc->affinity;
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        if (desc->status & IRQ_MOVE_PENDING)
-               mask = &desc->pending_mask;
+               mask = desc->pending_mask;
 #endif
        seq_cpumask(m, mask);
        seq_putc(m, '\n');
index 8a6d7b0..795e7b6 100644 (file)
@@ -1130,7 +1130,7 @@ void crash_save_cpu(struct pt_regs *regs, int cpu)
                return;
        memset(&prstatus, 0, sizeof(prstatus));
        prstatus.pr_pid = current->pid;
-       elf_core_copy_regs(&prstatus.pr_reg, regs);
+       elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
        buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
                              &prstatus, sizeof(prstatus));
        final_note(buf);
index 2a2ff36..32fe4ef 100644 (file)
@@ -74,6 +74,9 @@ NORET_TYPE void panic(const char * fmt, ...)
        vsnprintf(buf, sizeof(buf), fmt, args);
        va_end(args);
        printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+       dump_stack();
+#endif
        bust_spinlocks(0);
 
        /*
@@ -355,15 +358,18 @@ EXPORT_SYMBOL(warn_slowpath);
 #endif
 
 #ifdef CONFIG_CC_STACKPROTECTOR
+
 /*
  * Called when gcc's -fstack-protector feature is used, and
  * gcc detects corruption of the on-stack canary value
  */
 void __stack_chk_fail(void)
 {
-       panic("stack-protector: Kernel stack is corrupted");
+       panic("stack-protector: Kernel stack is corrupted in: %p\n",
+               __builtin_return_address(0));
 }
 EXPORT_SYMBOL(__stack_chk_fail);
+
 #endif
 
 core_param(panic, panic_timeout, int, 0644);
index c1d0ed3..61245b8 100644 (file)
@@ -5944,12 +5944,7 @@ void sched_show_task(struct task_struct *p)
                printk(KERN_CONT " %016lx ", thread_saved_pc(p));
 #endif
 #ifdef CONFIG_DEBUG_STACK_USAGE
-       {
-               unsigned long *n = end_of_stack(p);
-               while (!*n)
-                       n++;
-               free = (unsigned long)n - (unsigned long)end_of_stack(p);
-       }
+       free = stack_not_used(p);
 #endif
        printk(KERN_CONT "%5lu %5d %6d\n", free,
                task_pid_nr(p), task_pid_nr(p->real_parent));
index bac1061..da932f4 100644 (file)
@@ -960,12 +960,13 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
 
 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
 
-static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
+static inline int pick_optimal_cpu(int this_cpu,
+                                  const struct cpumask *mask)
 {
        int first;
 
        /* "this_cpu" is cheaper to preempt than a remote processor */
-       if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
+       if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask))
                return this_cpu;
 
        first = cpumask_first(mask);
@@ -981,6 +982,7 @@ static int find_lowest_rq(struct task_struct *task)
        struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
        int this_cpu = smp_processor_id();
        int cpu      = task_cpu(task);
+       cpumask_var_t domain_mask;
 
        if (task->rt.nr_cpus_allowed == 1)
                return -1; /* No other targets possible */
@@ -1013,19 +1015,25 @@ static int find_lowest_rq(struct task_struct *task)
        if (this_cpu == cpu)
                this_cpu = -1; /* Skip this_cpu opt if the same */
 
-       for_each_domain(cpu, sd) {
-               if (sd->flags & SD_WAKE_AFFINE) {
-                       cpumask_t domain_mask;
-                       int       best_cpu;
+       if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) {
+               for_each_domain(cpu, sd) {
+                       if (sd->flags & SD_WAKE_AFFINE) {
+                               int best_cpu;
 
-                       cpumask_and(&domain_mask, sched_domain_span(sd),
-                                   lowest_mask);
+                               cpumask_and(domain_mask,
+                                           sched_domain_span(sd),
+                                           lowest_mask);
 
-                       best_cpu = pick_optimal_cpu(this_cpu,
-                                                   &domain_mask);
-                       if (best_cpu != -1)
-                               return best_cpu;
+                               best_cpu = pick_optimal_cpu(this_cpu,
+                                                           domain_mask);
+
+                               if (best_cpu != -1) {
+                                       free_cpumask_var(domain_mask);
+                                       return best_cpu;
+                               }
+                       }
                }
+               free_cpumask_var(domain_mask);
        }
 
        /*
index bdbe9de..0365b48 100644 (file)
@@ -795,6 +795,11 @@ int __init __weak early_irq_init(void)
        return 0;
 }
 
+int __init __weak arch_probe_nr_irqs(void)
+{
+       return 0;
+}
+
 int __init __weak arch_early_irq_init(void)
 {
        return 0;
index 6106a5c..3c84128 100644 (file)
@@ -1079,7 +1079,7 @@ continue_unlock:
                pagevec_release(&pvec);
                cond_resched();
        }
-       if (!cycled) {
+       if (!cycled && !done) {
                /*
                 * range_cyclic:
                 * We hit the last page and there is more work to be done: wrap
index ddc41f3..4d00855 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4457,3 +4457,4 @@ size_t ksize(const void *objp)
 
        return obj_size(virt_to_cache(objp));
 }
+EXPORT_SYMBOL(ksize);
index bf7e8fc..52bc8a2 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -521,6 +521,7 @@ size_t ksize(const void *block)
        } else
                return sp->page.private;
 }
+EXPORT_SYMBOL(ksize);
 
 struct kmem_cache {
        unsigned int size, align;
index bdc9abb..0280eee 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2736,6 +2736,7 @@ size_t ksize(const void *object)
         */
        return s->size;
 }
+EXPORT_SYMBOL(ksize);
 
 void kfree(const void *x)
 {
diff --git a/scripts/gcc-x86_32-has-stack-protector.sh b/scripts/gcc-x86_32-has-stack-protector.sh
new file mode 100644 (file)
index 0000000..29493dc
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+if [ "$?" -eq "0" ] ; then
+       echo y
+else
+       echo n
+fi
index 325c0a1..afaec61 100644 (file)
@@ -1,6 +1,8 @@
 #!/bin/sh
 
-echo "int foo(void) { char X[200]; return 3; }" | $1 -S -xc -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
 if [ "$?" -eq "0" ] ; then
-       echo $2
+       echo y
+else
+       echo n
 fi
index 8892161..7e62303 100644 (file)
@@ -415,8 +415,9 @@ static int parse_elf(struct elf_info *info, const char *filename)
                const char *secstrings
                        = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
                const char *secname;
+               int nobits = sechdrs[i].sh_type == SHT_NOBITS;
 
-               if (sechdrs[i].sh_offset > info->size) {
+               if (!nobits && sechdrs[i].sh_offset > info->size) {
                        fatal("%s is truncated. sechdrs[i].sh_offset=%lu > "
                              "sizeof(*hrd)=%zu\n", filename,
                              (unsigned long)sechdrs[i].sh_offset,
@@ -425,6 +426,8 @@ static int parse_elf(struct elf_info *info, const char *filename)
                }
                secname = secstrings + sechdrs[i].sh_name;
                if (strcmp(secname, ".modinfo") == 0) {
+                       if (nobits)
+                               fatal("%s has NOBITS .modinfo\n", filename);
                        info->modinfo = (void *)hdr + sechdrs[i].sh_offset;
                        info->modinfo_len = sechdrs[i].sh_size;
                } else if (strcmp(secname, "__ksymtab") == 0)
index 0bcf146..84714a6 100644 (file)
@@ -33,7 +33,7 @@ if SND_DRIVERS
 
 config SND_PCSP
        tristate "PC-Speaker support (READ HELP!)"
-       depends on PCSPKR_PLATFORM && X86_PC && HIGH_RES_TIMERS
+       depends on PCSPKR_PLATFORM && X86 && HIGH_RES_TIMERS
        depends on INPUT
        depends on EXPERIMENTAL
        select SND_PCM