Merge branch 'linus' into x86/irq
authorIngo Molnar <mingo@elte.hu>
Mon, 16 Jun 2008 09:27:45 +0000 (11:27 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 16 Jun 2008 09:27:45 +0000 (11:27 +0200)
31 files changed:
arch/x86/kernel/Makefile
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apic_32.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/genx2apic_uv_x.c
arch/x86/kernel/i8259.c [moved from arch/x86/kernel/i8259_32.c with 70% similarity]
arch/x86/kernel/i8259_64.c [deleted file]
arch/x86/kernel/io_apic_32.c
arch/x86/kernel/io_apic_64.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/irqinit_32.c [new file with mode: 0644]
arch/x86/kernel/irqinit_64.c [new file with mode: 0644]
arch/x86/kernel/vmiclock_32.c
arch/x86/mach-visws/visws_apic.c
include/asm-x86/desc.h
include/asm-x86/genapic_64.h
include/asm-x86/hw_irq.h
include/asm-x86/hw_irq_32.h [deleted file]
include/asm-x86/hw_irq_64.h [deleted file]
include/asm-x86/i8259.h
include/asm-x86/irq.h
include/asm-x86/irq_32.h [deleted file]
include/asm-x86/irq_64.h [deleted file]
include/asm-x86/irq_vectors.h [new file with mode: 0644]
include/asm-x86/mach-default/irq_vectors.h [deleted file]
include/asm-x86/mach-default/irq_vectors_limits.h [deleted file]
include/asm-x86/mach-visws/irq_vectors.h [deleted file]
include/asm-x86/mach-voyager/irq_vectors.h [deleted file]
include/asm-x86/uv/uv_hub.h
include/asm-x86/uv/uv_mmrs.h
include/linux/kernel_stat.h

index 5e618c3..2a53ad2 100644 (file)
@@ -18,7 +18,7 @@ CFLAGS_tsc_64.o               := $(nostackp)
 obj-y                  := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o
 obj-y                  += traps_$(BITS).o irq_$(BITS).o
 obj-y                  += time_$(BITS).o ioport.o ldt.o
-obj-y                  += setup_$(BITS).o i8259_$(BITS).o setup.o
+obj-y                  += setup_$(BITS).o i8259.o irqinit_$(BITS).o setup.o
 obj-$(CONFIG_X86_32)   += sys_i386_32.o i386_ksyms_32.o
 obj-$(CONFIG_X86_64)   += sys_x86_64.o x8664_ksyms_64.o
 obj-$(CONFIG_X86_64)   += syscall_64.o vsyscall_64.o setup64.o
index 33c5216..ff1a7b4 100644 (file)
@@ -514,8 +514,6 @@ int acpi_register_gsi(u32 gsi, int triggering, int polarity)
         * Make sure all (legacy) PCI IRQs are set as level-triggered.
         */
        if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
-               extern void eisa_set_level_irq(unsigned int irq);
-
                if (triggering == ACPI_LEVEL_SENSITIVE)
                        eisa_set_level_irq(gsi);
        }
index 4b99b1b..d5767cb 100644 (file)
@@ -71,6 +71,10 @@ int local_apic_timer_disabled;
 int local_apic_timer_c2_ok;
 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
 
+int first_system_vector = 0xfe;
+
+char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE};
+
 /*
  * Debug level, exported for io_apic.c
  */
@@ -1351,13 +1355,13 @@ void __init smp_intr_init(void)
         * The reschedule interrupt is a CPU-to-CPU reschedule-helper
         * IPI, driven by wakeup.
         */
-       set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
+       alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
 
        /* IPI for invalidation */
-       set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
 
        /* IPI for generic function call */
-       set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
+       alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
 }
 #endif
 
@@ -1370,15 +1374,15 @@ void __init apic_intr_init(void)
        smp_intr_init();
 #endif
        /* self generated IPI for local APIC timer */
-       set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
+       alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
 
        /* IPI vectors for APIC spurious and error interrupts */
-       set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
-       set_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
+       alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
+       alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
 
        /* thermal monitor LVT interrupt */
 #ifdef CONFIG_X86_MCE_P4THERMAL
-       set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
+       alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
 #endif
 }
 
index c778e4f..159a1c7 100644 (file)
@@ -51,7 +51,7 @@
 #include <asm/percpu.h>
 #include <asm/dwarf2.h>
 #include <asm/processor-flags.h>
-#include "irq_vectors.h"
+#include <asm/irq_vectors.h>
 
 /*
  * We use macros for low-level operations which need to be overridden
index ebf1390..45e84ac 100644 (file)
@@ -5,7 +5,7 @@
  *
  * SGI UV APIC functions (note: not an Intel compatible APIC)
  *
- * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
  */
 
 #include <linux/threads.h>
@@ -55,37 +55,37 @@ static cpumask_t uv_vector_allocation_domain(int cpu)
 int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
 {
        unsigned long val;
-       int nasid;
+       int pnode;
 
-       nasid = uv_apicid_to_nasid(phys_apicid);
+       pnode = uv_apicid_to_pnode(phys_apicid);
        val = (1UL << UVH_IPI_INT_SEND_SHFT) |
            (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
            (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
            APIC_DM_INIT;
-       uv_write_global_mmr64(nasid, UVH_IPI_INT, val);
+       uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
        mdelay(10);
 
        val = (1UL << UVH_IPI_INT_SEND_SHFT) |
            (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
            (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
            APIC_DM_STARTUP;
-       uv_write_global_mmr64(nasid, UVH_IPI_INT, val);
+       uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
        return 0;
 }
 
 static void uv_send_IPI_one(int cpu, int vector)
 {
        unsigned long val, apicid, lapicid;
-       int nasid;
+       int pnode;
 
        apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */
        lapicid = apicid & 0x3f;                /* ZZZ macro needed */
-       nasid = uv_apicid_to_nasid(apicid);
+       pnode = uv_apicid_to_pnode(apicid);
        val =
            (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid <<
                                              UVH_IPI_INT_APIC_ID_SHFT) |
            (vector << UVH_IPI_INT_VECTOR_SHFT);
-       uv_write_global_mmr64(nasid, UVH_IPI_INT, val);
+       uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
 }
 
 static void uv_send_IPI_mask(cpumask_t mask, int vector)
@@ -159,39 +159,81 @@ struct genapic apic_x2apic_uv_x = {
        .phys_pkg_id = phys_pkg_id,     /* Fixme ZZZ */
 };
 
-static __cpuinit void set_x2apic_extra_bits(int nasid)
+static __cpuinit void set_x2apic_extra_bits(int pnode)
 {
-       __get_cpu_var(x2apic_extra_bits) = ((nasid >> 1) << 6);
+       __get_cpu_var(x2apic_extra_bits) = (pnode << 6);
 }
 
 /*
  * Called on boot cpu.
  */
+static __init int boot_pnode_to_blade(int pnode)
+{
+       int blade;
+
+       for (blade = 0; blade < uv_num_possible_blades(); blade++)
+               if (pnode == uv_blade_info[blade].pnode)
+                       return blade;
+       BUG();
+}
+
+struct redir_addr {
+       unsigned long redirect;
+       unsigned long alias;
+};
+
+#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
+
+static __initdata struct redir_addr redir_addrs[] = {
+       {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG},
+       {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG},
+       {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG},
+};
+
+static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
+{
+       union uvh_si_alias0_overlay_config_u alias;
+       union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
+               alias.v = uv_read_local_mmr(redir_addrs[i].alias);
+               if (alias.s.base == 0) {
+                       *size = (1UL << alias.s.m_alias);
+                       redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
+                       *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
+                       return;
+               }
+       }
+       BUG();
+}
+
 static __init void uv_system_init(void)
 {
        union uvh_si_addr_map_config_u m_n_config;
-       int bytes, nid, cpu, lcpu, nasid, last_nasid, blade;
-       unsigned long mmr_base;
+       union uvh_node_id_u node_id;
+       unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
+       int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
+       unsigned long mmr_base, present;
 
        m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
+       m_val = m_n_config.s.m_skt;
+       n_val = m_n_config.s.n_skt;
        mmr_base =
            uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
            ~UV_MMR_ENABLE;
        printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
 
-       last_nasid = -1;
-       for_each_possible_cpu(cpu) {
-               nid = cpu_to_node(cpu);
-               nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu));
-               if (nasid != last_nasid)
-                       uv_possible_blades++;
-               last_nasid = nasid;
-       }
+       for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
+               uv_possible_blades +=
+                 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
        printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
 
        bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
        uv_blade_info = alloc_bootmem_pages(bytes);
 
+       get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
+
        bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
        uv_node_to_blade = alloc_bootmem_pages(bytes);
        memset(uv_node_to_blade, 255, bytes);
@@ -200,43 +242,56 @@ static __init void uv_system_init(void)
        uv_cpu_to_blade = alloc_bootmem_pages(bytes);
        memset(uv_cpu_to_blade, 255, bytes);
 
-       last_nasid = -1;
-       blade = -1;
-       lcpu = -1;
-       for_each_possible_cpu(cpu) {
-               nid = cpu_to_node(cpu);
-               nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu));
-               if (nasid != last_nasid) {
-                       blade++;
-                       lcpu = -1;
-                       uv_blade_info[blade].nr_posible_cpus = 0;
+       blade = 0;
+       for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
+               present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
+               for (j = 0; j < 64; j++) {
+                       if (!test_bit(j, &present))
+                               continue;
+                       uv_blade_info[blade].pnode = (i * 64 + j);
+                       uv_blade_info[blade].nr_possible_cpus = 0;
                        uv_blade_info[blade].nr_online_cpus = 0;
+                       blade++;
                }
-               last_nasid = nasid;
-               lcpu++;
+       }
 
-               uv_cpu_hub_info(cpu)->m_val = m_n_config.s.m_skt;
-               uv_cpu_hub_info(cpu)->n_val = m_n_config.s.n_skt;
+       node_id.v = uv_read_local_mmr(UVH_NODE_ID);
+       gnode_upper = (((unsigned long)node_id.s.node_id) &
+                      ~((1 << n_val) - 1)) << m_val;
+
+       for_each_present_cpu(cpu) {
+               nid = cpu_to_node(cpu);
+               pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu));
+               blade = boot_pnode_to_blade(pnode);
+               lcpu = uv_blade_info[blade].nr_possible_cpus;
+               uv_blade_info[blade].nr_possible_cpus++;
+
+               uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
+               uv_cpu_hub_info(cpu)->lowmem_remap_top =
+                                       lowmem_redir_base + lowmem_redir_size;
+               uv_cpu_hub_info(cpu)->m_val = m_val;
+               uv_cpu_hub_info(cpu)->n_val = m_val;
                uv_cpu_hub_info(cpu)->numa_blade_id = blade;
                uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
-               uv_cpu_hub_info(cpu)->local_nasid = nasid;
-               uv_cpu_hub_info(cpu)->gnode_upper =
-                   nasid & ~((1 << uv_hub_info->n_val) - 1);
+               uv_cpu_hub_info(cpu)->pnode = pnode;
+               uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1;
+               uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
+               uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
                uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
                uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */
-               uv_blade_info[blade].nasid = nasid;
-               uv_blade_info[blade].nr_posible_cpus++;
                uv_node_to_blade[nid] = blade;
                uv_cpu_to_blade[cpu] = blade;
 
-               printk(KERN_DEBUG "UV cpu %d, apicid 0x%x, nasid %d, nid %d\n",
-                      cpu, per_cpu(x86_cpu_to_apicid, cpu), nasid, nid);
-               printk(KERN_DEBUG "UV   lcpu %d, blade %d\n", lcpu, blade);
+               printk(KERN_DEBUG "UV cpu %d, apicid 0x%x, pnode %d, nid %d, "
+                       "lcpu %d, blade %d\n",
+                       cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid,
+                       lcpu, blade);
        }
 }
 
 /*
  * Called on each cpu to initialize the per_cpu UV data area.
+ *     ZZZ hotplug not supported yet
  */
 void __cpuinit uv_cpu_init(void)
 {
@@ -246,5 +301,5 @@ void __cpuinit uv_cpu_init(void)
        uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
 
        if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
-               set_x2apic_extra_bits(uv_hub_info->local_nasid);
+               set_x2apic_extra_bits(uv_hub_info->pnode);
 }
similarity index 70%
rename from arch/x86/kernel/i8259_32.c
rename to arch/x86/kernel/i8259.c
index fe63196..dc92b49 100644 (file)
@@ -1,8 +1,10 @@
+#include <linux/linkage.h>
 #include <linux/errno.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
 #include <linux/ioport.h>
 #include <linux/interrupt.h>
+#include <linux/timex.h>
 #include <linux/slab.h>
 #include <linux/random.h>
 #include <linux/init.h>
 #include <linux/sysdev.h>
 #include <linux/bitops.h>
 
+#include <asm/acpi.h>
 #include <asm/atomic.h>
 #include <asm/system.h>
 #include <asm/io.h>
 #include <asm/timer.h>
+#include <asm/hw_irq.h>
 #include <asm/pgtable.h>
 #include <asm/delay.h>
 #include <asm/desc.h>
@@ -32,7 +36,7 @@ static int i8259A_auto_eoi;
 DEFINE_SPINLOCK(i8259A_lock);
 static void mask_and_ack_8259A(unsigned int);
 
-static struct irq_chip i8259A_chip = {
+struct irq_chip i8259A_chip = {
        .name           = "XT-PIC",
        .mask           = disable_8259A_irq,
        .disable        = disable_8259A_irq,
@@ -125,14 +129,14 @@ static inline int i8259A_irq_real(unsigned int irq)
        int irqmask = 1<<irq;
 
        if (irq < 8) {
-               outb(0x0B,PIC_MASTER_CMD);      /* ISR register */
+               outb(0x0B, PIC_MASTER_CMD);     /* ISR register */
                value = inb(PIC_MASTER_CMD) & irqmask;
-               outb(0x0A,PIC_MASTER_CMD);      /* back to the IRR register */
+               outb(0x0A, PIC_MASTER_CMD);     /* back to the IRR register */
                return value;
        }
-       outb(0x0B,PIC_SLAVE_CMD);       /* ISR register */
+       outb(0x0B, PIC_SLAVE_CMD);      /* ISR register */
        value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
-       outb(0x0A,PIC_SLAVE_CMD);       /* back to the IRR register */
+       outb(0x0A, PIC_SLAVE_CMD);      /* back to the IRR register */
        return value;
 }
 
@@ -171,12 +175,14 @@ handle_real_irq:
        if (irq & 8) {
                inb(PIC_SLAVE_IMR);     /* DUMMY - (do we need this?) */
                outb(cached_slave_mask, PIC_SLAVE_IMR);
-               outb(0x60+(irq&7),PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
-               outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
+               /* 'Specific EOI' to slave */
+               outb(0x60+(irq&7), PIC_SLAVE_CMD);
+                /* 'Specific EOI' to master-IRQ2 */
+               outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD);
        } else {
                inb(PIC_MASTER_IMR);    /* DUMMY - (do we need this?) */
                outb(cached_master_mask, PIC_MASTER_IMR);
-               outb(0x60+irq,PIC_MASTER_CMD);  /* 'Specific EOI to master */
+               outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
        }
        spin_unlock_irqrestore(&i8259A_lock, flags);
        return;
@@ -199,7 +205,8 @@ spurious_8259A_irq:
                 * lets ACK and report it. [once per IRQ]
                 */
                if (!(spurious_irq_mask & irqmask)) {
-                       printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
+                       printk(KERN_DEBUG
+                              "spurious 8259A interrupt: IRQ%d.\n", irq);
                        spurious_irq_mask |= irqmask;
                }
                atomic_inc(&irq_err_count);
@@ -290,17 +297,28 @@ void init_8259A(int auto_eoi)
         * outb_pic - this has to work on a wide range of PC hardware.
         */
        outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
-       outb_pic(0x20 + 0, PIC_MASTER_IMR);     /* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */
-       outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */
+
+       /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 on x86-64,
+                              to 0x20-0x27 on i386 */
+       outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR);
+
+       /* 8259A-1 (the master) has a slave on IR2 */
+       outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR);
+
        if (auto_eoi)   /* master does Auto EOI */
                outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
        else            /* master expects normal EOI */
                outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
 
        outb_pic(0x11, PIC_SLAVE_CMD);  /* ICW1: select 8259A-2 init */
-       outb_pic(0x20 + 8, PIC_SLAVE_IMR);      /* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */
-       outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR);        /* 8259A-2 is a slave on master's IR2 */
-       outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
+
+       /* ICW2: 8259A-2 IR0-7 mapped to IRQ8_VECTOR */
+       outb_pic(IRQ8_VECTOR, PIC_SLAVE_IMR);
+       /* 8259A-2 is a slave on master's IR2 */
+       outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR);
+       /* (slave's support for AEOI in flat mode is to be investigated) */
+       outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
+
        if (auto_eoi)
                /*
                 * In AEOI mode we just have to mask the interrupt
@@ -317,93 +335,3 @@ void init_8259A(int auto_eoi)
 
        spin_unlock_irqrestore(&i8259A_lock, flags);
 }
-
-/*
- * Note that on a 486, we don't want to do a SIGFPE on an irq13
- * as the irq is unreliable, and exception 16 works correctly
- * (ie as explained in the intel literature). On a 386, you
- * can't use exception 16 due to bad IBM design, so we have to
- * rely on the less exact irq13.
- *
- * Careful.. Not only is IRQ13 unreliable, but it is also
- * leads to races. IBM designers who came up with it should
- * be shot.
- */
-
-static irqreturn_t math_error_irq(int cpl, void *dev_id)
-{
-       extern void math_error(void __user *);
-       outb(0,0xF0);
-       if (ignore_fpu_irq || !boot_cpu_data.hard_math)
-               return IRQ_NONE;
-       math_error((void __user *)get_irq_regs()->ip);
-       return IRQ_HANDLED;
-}
-
-/*
- * New motherboards sometimes make IRQ 13 be a PCI interrupt,
- * so allow interrupt sharing.
- */
-static struct irqaction fpu_irq = {
-       .handler = math_error_irq,
-       .mask = CPU_MASK_NONE,
-       .name = "fpu",
-};
-
-void __init init_ISA_irqs (void)
-{
-       int i;
-
-#ifdef CONFIG_X86_LOCAL_APIC
-       init_bsp_APIC();
-#endif
-       init_8259A(0);
-
-       /*
-        * 16 old-style INTA-cycle interrupts:
-        */
-       for (i = 0; i < 16; i++) {
-               set_irq_chip_and_handler_name(i, &i8259A_chip,
-                                             handle_level_irq, "XT");
-       }
-}
-
-/* Overridden in paravirt.c */
-void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
-
-void __init native_init_IRQ(void)
-{
-       int i;
-
-       /* all the set up before the call gates are initialised */
-       pre_intr_init_hook();
-
-       /*
-        * Cover the whole vector space, no vector can escape
-        * us. (some of these will be overridden and become
-        * 'special' SMP interrupts)
-        */
-       for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
-               int vector = FIRST_EXTERNAL_VECTOR + i;
-               if (i >= NR_IRQS)
-                       break;
-               /* SYSCALL_VECTOR was reserved in trap_init. */
-               if (!test_bit(vector, used_vectors))
-                       set_intr_gate(vector, interrupt[i]);
-       }
-
-       /* setup after call gates are initialised (usually add in
-        * the architecture specific gates)
-        */
-       intr_init_hook();
-
-       /*
-        * External FPU? Set up irq13 if so, for
-        * original braindamaged IBM FERR coupling.
-        */
-       if (boot_cpu_data.hard_math && !cpu_has_fpu)
-               setup_irq(FPU_IRQ, &fpu_irq);
-
-       irq_ctx_init(smp_processor_id());
-}
diff --git a/arch/x86/kernel/i8259_64.c b/arch/x86/kernel/i8259_64.c
deleted file mode 100644 (file)
index fa57a15..0000000
+++ /dev/null
@@ -1,512 +0,0 @@
-#include <linux/linkage.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/timex.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/init.h>
-#include <linux/kernel_stat.h>
-#include <linux/sysdev.h>
-#include <linux/bitops.h>
-
-#include <asm/acpi.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/hw_irq.h>
-#include <asm/pgtable.h>
-#include <asm/delay.h>
-#include <asm/desc.h>
-#include <asm/apic.h>
-#include <asm/i8259.h>
-
-/*
- * Common place to define all x86 IRQ vectors
- *
- * This builds up the IRQ handler stubs using some ugly macros in irq.h
- *
- * These macros create the low-level assembly IRQ routines that save
- * register context and call do_IRQ(). do_IRQ() then does all the
- * operations that are needed to keep the AT (or SMP IOAPIC)
- * interrupt-controller happy.
- */
-
-#define BI(x,y) \
-       BUILD_IRQ(x##y)
-
-#define BUILD_16_IRQS(x) \
-       BI(x,0) BI(x,1) BI(x,2) BI(x,3) \
-       BI(x,4) BI(x,5) BI(x,6) BI(x,7) \
-       BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
-       BI(x,c) BI(x,d) BI(x,e) BI(x,f)
-
-/*
- * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
- * (these are usually mapped to vectors 0x30-0x3f)
- */
-
-/*
- * The IO-APIC gives us many more interrupt sources. Most of these
- * are unused but an SMP system is supposed to have enough memory ...
- * sometimes (mostly wrt. hw bugs) we get corrupted vectors all
- * across the spectrum, so we really want to be prepared to get all
- * of these. Plus, more powerful systems might have more than 64
- * IO-APIC registers.
- *
- * (these are usually mapped into the 0x30-0xff vector range)
- */
-                                     BUILD_16_IRQS(0x2) BUILD_16_IRQS(0x3)
-BUILD_16_IRQS(0x4) BUILD_16_IRQS(0x5) BUILD_16_IRQS(0x6) BUILD_16_IRQS(0x7)
-BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
-BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) BUILD_16_IRQS(0xe) BUILD_16_IRQS(0xf)
-
-#undef BUILD_16_IRQS
-#undef BI
-
-
-#define IRQ(x,y) \
-       IRQ##x##y##_interrupt
-
-#define IRQLIST_16(x) \
-       IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \
-       IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \
-       IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
-       IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
-
-/* for the irq vectors */
-static void (*__initdata interrupt[NR_VECTORS - FIRST_EXTERNAL_VECTOR])(void) = {
-                                         IRQLIST_16(0x2), IRQLIST_16(0x3),
-       IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7),
-       IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
-       IRQLIST_16(0xc), IRQLIST_16(0xd), IRQLIST_16(0xe), IRQLIST_16(0xf)
-};
-
-#undef IRQ
-#undef IRQLIST_16
-
-/*
- * This is the 'legacy' 8259A Programmable Interrupt Controller,
- * present in the majority of PC/AT boxes.
- * plus some generic x86 specific things if generic specifics makes
- * any sense at all.
- * this file should become arch/i386/kernel/irq.c when the old irq.c
- * moves to arch independent land
- */
-
-static int i8259A_auto_eoi;
-DEFINE_SPINLOCK(i8259A_lock);
-static void mask_and_ack_8259A(unsigned int);
-
-static struct irq_chip i8259A_chip = {
-       .name           = "XT-PIC",
-       .mask           = disable_8259A_irq,
-       .disable        = disable_8259A_irq,
-       .unmask         = enable_8259A_irq,
-       .mask_ack       = mask_and_ack_8259A,
-};
-
-/*
- * 8259A PIC functions to handle ISA devices:
- */
-
-/*
- * This contains the irq mask for both 8259A irq controllers,
- */
-unsigned int cached_irq_mask = 0xffff;
-
-/*
- * Not all IRQs can be routed through the IO-APIC, eg. on certain (older)
- * boards the timer interrupt is not really connected to any IO-APIC pin,
- * it's fed to the master 8259A's IR0 line only.
- *
- * Any '1' bit in this mask means the IRQ is routed through the IO-APIC.
- * this 'mixed mode' IRQ handling costs nothing because it's only used
- * at IRQ setup time.
- */
-unsigned long io_apic_irqs;
-
-void disable_8259A_irq(unsigned int irq)
-{
-       unsigned int mask = 1 << irq;
-       unsigned long flags;
-
-       spin_lock_irqsave(&i8259A_lock, flags);
-       cached_irq_mask |= mask;
-       if (irq & 8)
-               outb(cached_slave_mask, PIC_SLAVE_IMR);
-       else
-               outb(cached_master_mask, PIC_MASTER_IMR);
-       spin_unlock_irqrestore(&i8259A_lock, flags);
-}
-
-void enable_8259A_irq(unsigned int irq)
-{
-       unsigned int mask = ~(1 << irq);
-       unsigned long flags;
-
-       spin_lock_irqsave(&i8259A_lock, flags);
-       cached_irq_mask &= mask;
-       if (irq & 8)
-               outb(cached_slave_mask, PIC_SLAVE_IMR);
-       else
-               outb(cached_master_mask, PIC_MASTER_IMR);
-       spin_unlock_irqrestore(&i8259A_lock, flags);
-}
-
-int i8259A_irq_pending(unsigned int irq)
-{
-       unsigned int mask = 1<<irq;
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&i8259A_lock, flags);
-       if (irq < 8)
-               ret = inb(PIC_MASTER_CMD) & mask;
-       else
-               ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
-       spin_unlock_irqrestore(&i8259A_lock, flags);
-
-       return ret;
-}
-
-void make_8259A_irq(unsigned int irq)
-{
-       disable_irq_nosync(irq);
-       io_apic_irqs &= ~(1<<irq);
-       set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
-                                     "XT");
-       enable_irq(irq);
-}
-
-/*
- * This function assumes to be called rarely. Switching between
- * 8259A registers is slow.
- * This has to be protected by the irq controller spinlock
- * before being called.
- */
-static inline int i8259A_irq_real(unsigned int irq)
-{
-       int value;
-       int irqmask = 1<<irq;
-
-       if (irq < 8) {
-               outb(0x0B,PIC_MASTER_CMD);      /* ISR register */
-               value = inb(PIC_MASTER_CMD) & irqmask;
-               outb(0x0A,PIC_MASTER_CMD);      /* back to the IRR register */
-               return value;
-       }
-       outb(0x0B,PIC_SLAVE_CMD);       /* ISR register */
-       value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
-       outb(0x0A,PIC_SLAVE_CMD);       /* back to the IRR register */
-       return value;
-}
-
-/*
- * Careful! The 8259A is a fragile beast, it pretty
- * much _has_ to be done exactly like this (mask it
- * first, _then_ send the EOI, and the order of EOI
- * to the two 8259s is important!
- */
-static void mask_and_ack_8259A(unsigned int irq)
-{
-       unsigned int irqmask = 1 << irq;
-       unsigned long flags;
-
-       spin_lock_irqsave(&i8259A_lock, flags);
-       /*
-        * Lightweight spurious IRQ detection. We do not want
-        * to overdo spurious IRQ handling - it's usually a sign
-        * of hardware problems, so we only do the checks we can
-        * do without slowing down good hardware unnecessarily.
-        *
-        * Note that IRQ7 and IRQ15 (the two spurious IRQs
-        * usually resulting from the 8259A-1|2 PICs) occur
-        * even if the IRQ is masked in the 8259A. Thus we
-        * can check spurious 8259A IRQs without doing the
-        * quite slow i8259A_irq_real() call for every IRQ.
-        * This does not cover 100% of spurious interrupts,
-        * but should be enough to warn the user that there
-        * is something bad going on ...
-        */
-       if (cached_irq_mask & irqmask)
-               goto spurious_8259A_irq;
-       cached_irq_mask |= irqmask;
-
-handle_real_irq:
-       if (irq & 8) {
-               inb(PIC_SLAVE_IMR);     /* DUMMY - (do we need this?) */
-               outb(cached_slave_mask, PIC_SLAVE_IMR);
-               /* 'Specific EOI' to slave */
-               outb(0x60+(irq&7),PIC_SLAVE_CMD);
-                /* 'Specific EOI' to master-IRQ2 */
-               outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD);
-       } else {
-               inb(PIC_MASTER_IMR);    /* DUMMY - (do we need this?) */
-               outb(cached_master_mask, PIC_MASTER_IMR);
-               /* 'Specific EOI' to master */
-               outb(0x60+irq,PIC_MASTER_CMD);
-       }
-       spin_unlock_irqrestore(&i8259A_lock, flags);
-       return;
-
-spurious_8259A_irq:
-       /*
-        * this is the slow path - should happen rarely.
-        */
-       if (i8259A_irq_real(irq))
-               /*
-                * oops, the IRQ _is_ in service according to the
-                * 8259A - not spurious, go handle it.
-                */
-               goto handle_real_irq;
-
-       {
-               static int spurious_irq_mask;
-               /*
-                * At this point we can be sure the IRQ is spurious,
-                * lets ACK and report it. [once per IRQ]
-                */
-               if (!(spurious_irq_mask & irqmask)) {
-                       printk(KERN_DEBUG
-                              "spurious 8259A interrupt: IRQ%d.\n", irq);
-                       spurious_irq_mask |= irqmask;
-               }
-               atomic_inc(&irq_err_count);
-               /*
-                * Theoretically we do not have to handle this IRQ,
-                * but in Linux this does not cause problems and is
-                * simpler for us.
-                */
-               goto handle_real_irq;
-       }
-}
-
-static char irq_trigger[2];
-/**
- * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ
- */
-static void restore_ELCR(char *trigger)
-{
-       outb(trigger[0], 0x4d0);
-       outb(trigger[1], 0x4d1);
-}
-
-static void save_ELCR(char *trigger)
-{
-       /* IRQ 0,1,2,8,13 are marked as reserved */
-       trigger[0] = inb(0x4d0) & 0xF8;
-       trigger[1] = inb(0x4d1) & 0xDE;
-}
-
-static int i8259A_resume(struct sys_device *dev)
-{
-       init_8259A(i8259A_auto_eoi);
-       restore_ELCR(irq_trigger);
-       return 0;
-}
-
-static int i8259A_suspend(struct sys_device *dev, pm_message_t state)
-{
-       save_ELCR(irq_trigger);
-       return 0;
-}
-
-static int i8259A_shutdown(struct sys_device *dev)
-{
-       /* Put the i8259A into a quiescent state that
-        * the kernel initialization code can get it
-        * out of.
-        */
-       outb(0xff, PIC_MASTER_IMR);     /* mask all of 8259A-1 */
-       outb(0xff, PIC_SLAVE_IMR);      /* mask all of 8259A-1 */
-       return 0;
-}
-
-static struct sysdev_class i8259_sysdev_class = {
-       .name = "i8259",
-       .suspend = i8259A_suspend,
-       .resume = i8259A_resume,
-       .shutdown = i8259A_shutdown,
-};
-
-static struct sys_device device_i8259A = {
-       .id     = 0,
-       .cls    = &i8259_sysdev_class,
-};
-
-static int __init i8259A_init_sysfs(void)
-{
-       int error = sysdev_class_register(&i8259_sysdev_class);
-       if (!error)
-               error = sysdev_register(&device_i8259A);
-       return error;
-}
-
-device_initcall(i8259A_init_sysfs);
-
-void init_8259A(int auto_eoi)
-{
-       unsigned long flags;
-
-       i8259A_auto_eoi = auto_eoi;
-
-       spin_lock_irqsave(&i8259A_lock, flags);
-
-       outb(0xff, PIC_MASTER_IMR);     /* mask all of 8259A-1 */
-       outb(0xff, PIC_SLAVE_IMR);      /* mask all of 8259A-2 */
-
-       /*
-        * outb_pic - this has to work on a wide range of PC hardware.
-        */
-       outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
-       /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */
-       outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR);
-       /* 8259A-1 (the master) has a slave on IR2 */
-       outb_pic(0x04, PIC_MASTER_IMR);
-       if (auto_eoi)   /* master does Auto EOI */
-               outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
-       else            /* master expects normal EOI */
-               outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
-
-       outb_pic(0x11, PIC_SLAVE_CMD);  /* ICW1: select 8259A-2 init */
-       /* ICW2: 8259A-2 IR0-7 mapped to 0x38-0x3f */
-       outb_pic(IRQ8_VECTOR, PIC_SLAVE_IMR);
-       /* 8259A-2 is a slave on master's IR2 */
-       outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR);
-       /* (slave's support for AEOI in flat mode is to be investigated) */
-       outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
-
-       if (auto_eoi)
-               /*
-                * In AEOI mode we just have to mask the interrupt
-                * when acking.
-                */
-               i8259A_chip.mask_ack = disable_8259A_irq;
-       else
-               i8259A_chip.mask_ack = mask_and_ack_8259A;
-
-       udelay(100);            /* wait for 8259A to initialize */
-
-       outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
-       outb(cached_slave_mask, PIC_SLAVE_IMR);   /* restore slave IRQ mask */
-
-       spin_unlock_irqrestore(&i8259A_lock, flags);
-}
-
-
-
-
-/*
- * IRQ2 is cascade interrupt to second interrupt controller
- */
-
-static struct irqaction irq2 = {
-       .handler = no_action,
-       .mask = CPU_MASK_NONE,
-       .name = "cascade",
-};
-DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
-       [0 ... IRQ0_VECTOR - 1] = -1,
-       [IRQ0_VECTOR] = 0,
-       [IRQ1_VECTOR] = 1,
-       [IRQ2_VECTOR] = 2,
-       [IRQ3_VECTOR] = 3,
-       [IRQ4_VECTOR] = 4,
-       [IRQ5_VECTOR] = 5,
-       [IRQ6_VECTOR] = 6,
-       [IRQ7_VECTOR] = 7,
-       [IRQ8_VECTOR] = 8,
-       [IRQ9_VECTOR] = 9,
-       [IRQ10_VECTOR] = 10,
-       [IRQ11_VECTOR] = 11,
-       [IRQ12_VECTOR] = 12,
-       [IRQ13_VECTOR] = 13,
-       [IRQ14_VECTOR] = 14,
-       [IRQ15_VECTOR] = 15,
-       [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
-};
-
-void __init init_ISA_irqs (void)
-{
-       int i;
-
-       init_bsp_APIC();
-       init_8259A(0);
-
-       for (i = 0; i < NR_IRQS; i++) {
-               irq_desc[i].status = IRQ_DISABLED;
-               irq_desc[i].action = NULL;
-               irq_desc[i].depth = 1;
-
-               if (i < 16) {
-                       /*
-                        * 16 old-style INTA-cycle interrupts:
-                        */
-                       set_irq_chip_and_handler_name(i, &i8259A_chip,
-                                                     handle_level_irq, "XT");
-               } else {
-                       /*
-                        * 'high' PCI IRQs filled in on demand
-                        */
-                       irq_desc[i].chip = &no_irq_chip;
-               }
-       }
-}
-
-void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
-
-void __init native_init_IRQ(void)
-{
-       int i;
-
-       init_ISA_irqs();
-       /*
-        * Cover the whole vector space, no vector can escape
-        * us. (some of these will be overridden and become
-        * 'special' SMP interrupts)
-        */
-       for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
-               int vector = FIRST_EXTERNAL_VECTOR + i;
-               if (vector != IA32_SYSCALL_VECTOR)
-                       set_intr_gate(vector, interrupt[i]);
-       }
-
-#ifdef CONFIG_SMP
-       /*
-        * The reschedule interrupt is a CPU-to-CPU reschedule-helper
-        * IPI, driven by wakeup.
-        */
-       set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
-
-       /* IPIs for invalidation */
-       set_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
-       set_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
-       set_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
-       set_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
-       set_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
-       set_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
-       set_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
-       set_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
-
-       /* IPI for generic function call */
-       set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
-
-       /* Low priority IPI to cleanup after moving an irq */
-       set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
-#endif
-       set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
-       set_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
-
-       /* self generated IPI for local APIC timer */
-       set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
-
-       /* IPI vectors for APIC spurious and error interrupts */
-       set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
-       set_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
-
-       if (!acpi_ioapic)
-               setup_irq(2, &irq2);
-}
index 4dc8600..0774b23 100644 (file)
@@ -1176,7 +1176,7 @@ static int __assign_irq_vector(int irq)
        offset = current_offset;
 next:
        vector += 8;
-       if (vector >= FIRST_SYSTEM_VECTOR) {
+       if (vector >= first_system_vector) {
                offset = (offset + 1) % 8;
                vector = FIRST_DEVICE_VECTOR + offset;
        }
@@ -2261,7 +2261,7 @@ void __init setup_IO_APIC(void)
        int i;
 
        /* Reserve all the system vectors. */
-       for (i = FIRST_SYSTEM_VECTOR; i < NR_VECTORS; i++)
+       for (i = first_system_vector; i < NR_VECTORS; i++)
                set_bit(i, used_vectors);
 
        enable_IO_APIC();
index ef1a8df..f1e1ae3 100644 (file)
@@ -82,6 +82,10 @@ struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
 
 static int assign_irq_vector(int irq, cpumask_t mask);
 
+int first_system_vector = 0xfe;
+
+char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE};
+
 #define __apicdebuginit  __init
 
 int sis_apic_bug; /* not actually supported, dummy for compile */
@@ -730,7 +734,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask)
                offset = current_offset;
 next:
                vector += 8;
-               if (vector >= FIRST_SYSTEM_VECTOR) {
+               if (vector >= first_system_vector) {
                        /* If we run out of vectors on large boxen, must share them. */
                        offset = (offset + 1) % 8;
                        vector = FIRST_DEVICE_VECTOR + offset;
index 147352d..4e3e8ec 100644 (file)
@@ -48,6 +48,29 @@ void ack_bad_irq(unsigned int irq)
 #endif
 }
 
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+/* Debugging check for stack overflow: is there less than 1KB free? */
+static int check_stack_overflow(void)
+{
+       long sp;
+
+       __asm__ __volatile__("andl %%esp,%0" :
+                            "=r" (sp) : "0" (THREAD_SIZE - 1));
+
+       return sp < (sizeof(struct thread_info) + STACK_WARN);
+}
+
+static void print_stack_overflow(void)
+{
+       printk(KERN_WARNING "low stack detected by irq handler\n");
+       dump_stack();
+}
+
+#else
+static inline int check_stack_overflow(void) { return 0; }
+static inline void print_stack_overflow(void) { }
+#endif
+
 #ifdef CONFIG_4KSTACKS
 /*
  * per-CPU IRQ handling contexts (thread information and stack)
@@ -59,48 +82,29 @@ union irq_ctx {
 
 static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
 static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
-#endif
 
-/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- */
-unsigned int do_IRQ(struct pt_regs *regs)
-{      
-       struct pt_regs *old_regs;
-       /* high bit used in ret_from_ code */
-       int irq = ~regs->orig_ax;
-       struct irq_desc *desc = irq_desc + irq;
-#ifdef CONFIG_4KSTACKS
-       union irq_ctx *curctx, *irqctx;
-       u32 *isp;
-#endif
+static char softirq_stack[NR_CPUS * THREAD_SIZE]
+               __attribute__((__section__(".bss.page_aligned")));
 
-       if (unlikely((unsigned)irq >= NR_IRQS)) {
-               printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
-                                       __func__, irq);
-               BUG();
-       }
+static char hardirq_stack[NR_CPUS * THREAD_SIZE]
+               __attribute__((__section__(".bss.page_aligned")));
 
-       old_regs = set_irq_regs(regs);
-       irq_enter();
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
-       /* Debugging check for stack overflow: is there less than 1KB free? */
-       {
-               long sp;
-
-               __asm__ __volatile__("andl %%esp,%0" :
-                                       "=r" (sp) : "0" (THREAD_SIZE - 1));
-               if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
-                       printk("do_IRQ: stack overflow: %ld\n",
-                               sp - sizeof(struct thread_info));
-                       dump_stack();
-               }
-       }
-#endif
+static void call_on_stack(void *func, void *stack)
+{
+       asm volatile("xchgl     %%ebx,%%esp     \n"
+                    "call      *%%edi          \n"
+                    "movl      %%ebx,%%esp     \n"
+                    : "=b" (stack)
+                    : "0" (stack),
+                      "D"(func)
+                    : "memory", "cc", "edx", "ecx", "eax");
+}
 
-#ifdef CONFIG_4KSTACKS
+static inline int
+execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+{
+       union irq_ctx *curctx, *irqctx;
+       u32 *isp, arg1, arg2;
 
        curctx = (union irq_ctx *) current_thread_info();
        irqctx = hardirq_ctx[smp_processor_id()];
@@ -111,52 +115,39 @@ unsigned int do_IRQ(struct pt_regs *regs)
         * handler) we can't do that and just have to keep using the
         * current stack (which is the irq stack already after all)
         */
-       if (curctx != irqctx) {
-               int arg1, arg2, bx;
+       if (unlikely(curctx == irqctx))
+               return 0;
 
-               /* build the stack frame on the IRQ stack */
-               isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
-               irqctx->tinfo.task = curctx->tinfo.task;
-               irqctx->tinfo.previous_esp = current_stack_pointer;
+       /* build the stack frame on the IRQ stack */
+       isp = (u32 *) ((char*)irqctx + sizeof(*irqctx));
+       irqctx->tinfo.task = curctx->tinfo.task;
+       irqctx->tinfo.previous_esp = current_stack_pointer;
 
-               /*
-                * Copy the softirq bits in preempt_count so that the
-                * softirq checks work in the hardirq context.
-                */
-               irqctx->tinfo.preempt_count =
-                       (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
-                       (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
-
-               asm volatile(
-                       "       xchgl  %%ebx,%%esp    \n"
-                       "       call   *%%edi         \n"
-                       "       movl   %%ebx,%%esp    \n"
-                       : "=a" (arg1), "=d" (arg2), "=b" (bx)
-                       :  "0" (irq),   "1" (desc),  "2" (isp),
-                          "D" (desc->handle_irq)
-                       : "memory", "cc", "ecx"
-               );
-       } else
-#endif
-               desc->handle_irq(irq, desc);
-
-       irq_exit();
-       set_irq_regs(old_regs);
+       /*
+        * Copy the softirq bits in preempt_count so that the
+        * softirq checks work in the hardirq context.
+        */
+       irqctx->tinfo.preempt_count =
+               (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
+               (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
+
+       if (unlikely(overflow))
+               call_on_stack(print_stack_overflow, isp);
+
+       asm volatile("xchgl     %%ebx,%%esp     \n"
+                    "call      *%%edi          \n"
+                    "movl      %%ebx,%%esp     \n"
+                    : "=a" (arg1), "=d" (arg2), "=b" (isp)
+                    :  "0" (irq),   "1" (desc),  "2" (isp),
+                       "D" (desc->handle_irq)
+                    : "memory", "cc", "ecx");
        return 1;
 }
 
-#ifdef CONFIG_4KSTACKS
-
-static char softirq_stack[NR_CPUS * THREAD_SIZE]
-               __attribute__((__section__(".bss.page_aligned")));
-
-static char hardirq_stack[NR_CPUS * THREAD_SIZE]
-               __attribute__((__section__(".bss.page_aligned")));
-
 /*
  * allocate per-cpu stacks for hardirq and for softirq processing
  */
-void irq_ctx_init(int cpu)
+void __cpuinit irq_ctx_init(int cpu)
 {
        union irq_ctx *irqctx;
 
@@ -164,25 +155,25 @@ void irq_ctx_init(int cpu)
                return;
 
        irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
-       irqctx->tinfo.task              = NULL;
-       irqctx->tinfo.exec_domain       = NULL;
-       irqctx->tinfo.cpu               = cpu;
-       irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
-       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
+       irqctx->tinfo.task              = NULL;
+       irqctx->tinfo.exec_domain       = NULL;
+       irqctx->tinfo.cpu               = cpu;
+       irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
+       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
 
        hardirq_ctx[cpu] = irqctx;
 
        irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
-       irqctx->tinfo.task              = NULL;
-       irqctx->tinfo.exec_domain       = NULL;
-       irqctx->tinfo.cpu               = cpu;
-       irqctx->tinfo.preempt_count     = 0;
-       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
+       irqctx->tinfo.task              = NULL;
+       irqctx->tinfo.exec_domain       = NULL;
+       irqctx->tinfo.cpu               = cpu;
+       irqctx->tinfo.preempt_count     = 0;
+       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
 
        softirq_ctx[cpu] = irqctx;
 
-       printk("CPU %u irqstacks, hard=%p soft=%p\n",
-               cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
+       printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
+              cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
 }
 
 void irq_ctx_exit(int cpu)
@@ -211,24 +202,55 @@ asmlinkage void do_softirq(void)
                /* build the stack frame on the softirq stack */
                isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
 
-               asm volatile(
-                       "       xchgl   %%ebx,%%esp     \n"
-                       "       call    __do_softirq    \n"
-                       "       movl    %%ebx,%%esp     \n"
-                       : "=b"(isp)
-                       : "0"(isp)
-                       : "memory", "cc", "edx", "ecx", "eax"
-               );
+               call_on_stack(__do_softirq, isp);
                /*
                 * Shouldnt happen, we returned above if in_interrupt():
-                */
+                */
                WARN_ON_ONCE(softirq_count());
        }
 
        local_irq_restore(flags);
 }
+
+#else
+static inline int
+execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
 #endif
 
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+unsigned int do_IRQ(struct pt_regs *regs)
+{
+       struct pt_regs *old_regs;
+       /* high bit used in ret_from_ code */
+       int overflow, irq = ~regs->orig_ax;
+       struct irq_desc *desc = irq_desc + irq;
+
+       if (unlikely((unsigned)irq >= NR_IRQS)) {
+               printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
+                                       __func__, irq);
+               BUG();
+       }
+
+       old_regs = set_irq_regs(regs);
+       irq_enter();
+
+       overflow = check_stack_overflow();
+
+       if (!execute_on_irq_stack(overflow, desc, irq)) {
+               if (unlikely(overflow))
+                       print_stack_overflow();
+               desc->handle_irq(irq, desc);
+       }
+
+       irq_exit();
+       set_irq_regs(old_regs);
+       return 1;
+}
+
 /*
  * Interrupt statistics:
  */
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
new file mode 100644 (file)
index 0000000..d669142
--- /dev/null
@@ -0,0 +1,114 @@
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/init.h>
+#include <linux/kernel_stat.h>
+#include <linux/sysdev.h>
+#include <linux/bitops.h>
+
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/timer.h>
+#include <asm/pgtable.h>
+#include <asm/delay.h>
+#include <asm/desc.h>
+#include <asm/apic.h>
+#include <asm/arch_hooks.h>
+#include <asm/i8259.h>
+
+
+
+/*
+ * Note that on a 486, we don't want to do a SIGFPE on an irq13
+ * as the irq is unreliable, and exception 16 works correctly
+ * (ie as explained in the intel literature). On a 386, you
+ * can't use exception 16 due to bad IBM design, so we have to
+ * rely on the less exact irq13.
+ *
+ * Careful.. Not only is IRQ13 unreliable, but it is also
+ * leads to races. IBM designers who came up with it should
+ * be shot.
+ */
+
+static irqreturn_t math_error_irq(int cpl, void *dev_id)
+{
+       extern void math_error(void __user *);
+       outb(0,0xF0);
+       if (ignore_fpu_irq || !boot_cpu_data.hard_math)
+               return IRQ_NONE;
+       math_error((void __user *)get_irq_regs()->ip);
+       return IRQ_HANDLED;
+}
+
+/*
+ * New motherboards sometimes make IRQ 13 be a PCI interrupt,
+ * so allow interrupt sharing.
+ */
+static struct irqaction fpu_irq = {
+       .handler = math_error_irq,
+       .mask = CPU_MASK_NONE,
+       .name = "fpu",
+};
+
+void __init init_ISA_irqs (void)
+{
+       int i;
+
+#ifdef CONFIG_X86_LOCAL_APIC
+       init_bsp_APIC();
+#endif
+       init_8259A(0);
+
+       /*
+        * 16 old-style INTA-cycle interrupts:
+        */
+       for (i = 0; i < 16; i++) {
+               set_irq_chip_and_handler_name(i, &i8259A_chip,
+                                             handle_level_irq, "XT");
+       }
+}
+
+/* Overridden in paravirt.c */
+void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
+
+void __init native_init_IRQ(void)
+{
+       int i;
+
+       /* all the set up before the call gates are initialised */
+       pre_intr_init_hook();
+
+       /*
+        * Cover the whole vector space, no vector can escape
+        * us. (some of these will be overridden and become
+        * 'special' SMP interrupts)
+        */
+       for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
+               int vector = FIRST_EXTERNAL_VECTOR + i;
+               if (i >= NR_IRQS)
+                       break;
+               /* SYSCALL_VECTOR was reserved in trap_init. */
+               if (!test_bit(vector, used_vectors))
+                       set_intr_gate(vector, interrupt[i]);
+       }
+
+       /* setup after call gates are initialised (usually add in
+        * the architecture specific gates)
+        */
+       intr_init_hook();
+
+       /*
+        * External FPU? Set up irq13 if so, for
+        * original braindamaged IBM FERR coupling.
+        */
+       if (boot_cpu_data.hard_math && !cpu_has_fpu)
+               setup_irq(FPU_IRQ, &fpu_irq);
+
+       irq_ctx_init(smp_processor_id());
+}
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c
new file mode 100644 (file)
index 0000000..31f49e8
--- /dev/null
@@ -0,0 +1,217 @@
+#include <linux/linkage.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/init.h>
+#include <linux/kernel_stat.h>
+#include <linux/sysdev.h>
+#include <linux/bitops.h>
+
+#include <asm/acpi.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/hw_irq.h>
+#include <asm/pgtable.h>
+#include <asm/delay.h>
+#include <asm/desc.h>
+#include <asm/apic.h>
+#include <asm/i8259.h>
+
+/*
+ * Common place to define all x86 IRQ vectors
+ *
+ * This builds up the IRQ handler stubs using some ugly macros in irq.h
+ *
+ * These macros create the low-level assembly IRQ routines that save
+ * register context and call do_IRQ(). do_IRQ() then does all the
+ * operations that are needed to keep the AT (or SMP IOAPIC)
+ * interrupt-controller happy.
+ */
+
+#define IRQ_NAME2(nr) nr##_interrupt(void)
+#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
+
+/*
+ *     SMP has a few special interrupts for IPI messages
+ */
+
+#define BUILD_IRQ(nr)                          \
+       asmlinkage void IRQ_NAME(nr);           \
+       asm("\n.p2align\n"                      \
+           "IRQ" #nr "_interrupt:\n\t"         \
+           "push $~(" #nr ") ; "               \
+           "jmp common_interrupt");
+
+#define BI(x,y) \
+       BUILD_IRQ(x##y)
+
+#define BUILD_16_IRQS(x) \
+       BI(x,0) BI(x,1) BI(x,2) BI(x,3) \
+       BI(x,4) BI(x,5) BI(x,6) BI(x,7) \
+       BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
+       BI(x,c) BI(x,d) BI(x,e) BI(x,f)
+
+/*
+ * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
+ * (these are usually mapped to vectors 0x30-0x3f)
+ */
+
+/*
+ * The IO-APIC gives us many more interrupt sources. Most of these
+ * are unused but an SMP system is supposed to have enough memory ...
+ * sometimes (mostly wrt. hw bugs) we get corrupted vectors all
+ * across the spectrum, so we really want to be prepared to get all
+ * of these. Plus, more powerful systems might have more than 64
+ * IO-APIC registers.
+ *
+ * (these are usually mapped into the 0x30-0xff vector range)
+ */
+                                     BUILD_16_IRQS(0x2) BUILD_16_IRQS(0x3)
+BUILD_16_IRQS(0x4) BUILD_16_IRQS(0x5) BUILD_16_IRQS(0x6) BUILD_16_IRQS(0x7)
+BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
+BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) BUILD_16_IRQS(0xe) BUILD_16_IRQS(0xf)
+
+#undef BUILD_16_IRQS
+#undef BI
+
+
+#define IRQ(x,y) \
+       IRQ##x##y##_interrupt
+
+#define IRQLIST_16(x) \
+       IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \
+       IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \
+       IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
+       IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
+
+/* for the irq vectors */
+static void (*__initdata interrupt[NR_VECTORS - FIRST_EXTERNAL_VECTOR])(void) = {
+                                         IRQLIST_16(0x2), IRQLIST_16(0x3),
+       IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7),
+       IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
+       IRQLIST_16(0xc), IRQLIST_16(0xd), IRQLIST_16(0xe), IRQLIST_16(0xf)
+};
+
+#undef IRQ
+#undef IRQLIST_16
+
+
+
+
+/*
+ * IRQ2 is cascade interrupt to second interrupt controller
+ */
+
+static struct irqaction irq2 = {
+       .handler = no_action,
+       .mask = CPU_MASK_NONE,
+       .name = "cascade",
+};
+DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
+       [0 ... IRQ0_VECTOR - 1] = -1,
+       [IRQ0_VECTOR] = 0,
+       [IRQ1_VECTOR] = 1,
+       [IRQ2_VECTOR] = 2,
+       [IRQ3_VECTOR] = 3,
+       [IRQ4_VECTOR] = 4,
+       [IRQ5_VECTOR] = 5,
+       [IRQ6_VECTOR] = 6,
+       [IRQ7_VECTOR] = 7,
+       [IRQ8_VECTOR] = 8,
+       [IRQ9_VECTOR] = 9,
+       [IRQ10_VECTOR] = 10,
+       [IRQ11_VECTOR] = 11,
+       [IRQ12_VECTOR] = 12,
+       [IRQ13_VECTOR] = 13,
+       [IRQ14_VECTOR] = 14,
+       [IRQ15_VECTOR] = 15,
+       [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
+};
+
+static void __init init_ISA_irqs (void)
+{
+       int i;
+
+       init_bsp_APIC();
+       init_8259A(0);
+
+       for (i = 0; i < NR_IRQS; i++) {
+               irq_desc[i].status = IRQ_DISABLED;
+               irq_desc[i].action = NULL;
+               irq_desc[i].depth = 1;
+
+               if (i < 16) {
+                       /*
+                        * 16 old-style INTA-cycle interrupts:
+                        */
+                       set_irq_chip_and_handler_name(i, &i8259A_chip,
+                                                     handle_level_irq, "XT");
+               } else {
+                       /*
+                        * 'high' PCI IRQs filled in on demand
+                        */
+                       irq_desc[i].chip = &no_irq_chip;
+               }
+       }
+}
+
+void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
+
+void __init native_init_IRQ(void)
+{
+       int i;
+
+       init_ISA_irqs();
+       /*
+        * Cover the whole vector space, no vector can escape
+        * us. (some of these will be overridden and become
+        * 'special' SMP interrupts)
+        */
+       for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
+               int vector = FIRST_EXTERNAL_VECTOR + i;
+               if (vector != IA32_SYSCALL_VECTOR)
+                       set_intr_gate(vector, interrupt[i]);
+       }
+
+#ifdef CONFIG_SMP
+       /*
+        * The reschedule interrupt is a CPU-to-CPU reschedule-helper
+        * IPI, driven by wakeup.
+        */
+       alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
+
+       /* IPIs for invalidation */
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
+
+       /* IPI for generic function call */
+       alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
+
+       /* Low priority IPI to cleanup after moving an irq */
+       set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
+#endif
+       alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
+       alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
+
+       /* self generated IPI for local APIC timer */
+       alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
+
+       /* IPI vectors for APIC spurious and error interrupts */
+       alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
+       alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
+
+       if (!acpi_ioapic)
+               setup_irq(2, &irq2);
+}
index a2b0307..ba7d19e 100644 (file)
@@ -33,8 +33,7 @@
 #include <asm/apic.h>
 #include <asm/timer.h>
 #include <asm/i8253.h>
-
-#include <irq_vectors.h>
+#include <asm/irq_vectors.h>
 
 #define VMI_ONESHOT  (VMI_ALARM_IS_ONESHOT  | VMI_CYCLES_REAL | vmi_get_alarm_wiring())
 #define VMI_PERIODIC (VMI_ALARM_IS_PERIODIC | VMI_CYCLES_REAL | vmi_get_alarm_wiring())
index cef9cb1..d8b2cfd 100644 (file)
 #include <asm/io.h>
 #include <asm/apic.h>
 #include <asm/i8259.h>
+#include <asm/irq_vectors.h>
 
 #include "cobalt.h"
-#include "irq_vectors.h"
-
 
 static DEFINE_SPINLOCK(cobalt_lock);
 
index 268a012..b3875d4 100644 (file)
@@ -311,6 +311,28 @@ static inline void set_intr_gate(unsigned int n, void *addr)
        _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
 }
 
+#define SYS_VECTOR_FREE                0
+#define SYS_VECTOR_ALLOCED     1
+
+extern int first_system_vector;
+extern char system_vectors[];
+
+static inline void alloc_system_vector(int vector)
+{
+       if (system_vectors[vector] == SYS_VECTOR_FREE) {
+               system_vectors[vector] = SYS_VECTOR_ALLOCED;
+               if (first_system_vector > vector)
+                       first_system_vector = vector;
+       } else
+               BUG();
+}
+
+static inline void alloc_intr_gate(unsigned int n, void *addr)
+{
+       alloc_system_vector(n);
+       set_intr_gate(n, addr);
+}
+
 /*
  * This routine sets up an interrupt gate at directory privilege level 3.
  */
index 1de931b..0f85046 100644 (file)
@@ -44,4 +44,6 @@ DECLARE_PER_CPU(int, x2apic_extra_bits);
 extern void uv_cpu_init(void);
 extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
 
+extern void setup_apic_routing(void);
+
 #endif
index bf02539..1428b41 100644 (file)
@@ -1,5 +1,106 @@
+#ifndef _ASM_HW_IRQ_H
+#define _ASM_HW_IRQ_H
+
+/*
+ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
+ *
+ * moved some of the old arch/i386/kernel/irq.h to here. VY
+ *
+ * IRQ/IPI changes taken from work by Thomas Radke
+ * <tomsoft@informatik.tu-chemnitz.de>
+ *
+ * hacked by Andi Kleen for x86-64.
+ * unified by tglx
+ */
+
+#include <asm/irq_vectors.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/percpu.h>
+#include <linux/profile.h>
+#include <linux/smp.h>
+
+#include <asm/atomic.h>
+#include <asm/irq.h>
+#include <asm/sections.h>
+
+#define platform_legacy_irq(irq)       ((irq) < 16)
+
+/* Interrupt handlers registered during init_IRQ */
+extern void apic_timer_interrupt(void);
+extern void error_interrupt(void);
+extern void spurious_interrupt(void);
+extern void thermal_interrupt(void);
+extern void reschedule_interrupt(void);
+
+extern void invalidate_interrupt(void);
+extern void invalidate_interrupt0(void);
+extern void invalidate_interrupt1(void);
+extern void invalidate_interrupt2(void);
+extern void invalidate_interrupt3(void);
+extern void invalidate_interrupt4(void);
+extern void invalidate_interrupt5(void);
+extern void invalidate_interrupt6(void);
+extern void invalidate_interrupt7(void);
+
+extern void irq_move_cleanup_interrupt(void);
+extern void threshold_interrupt(void);
+
+extern void call_function_interrupt(void);
+
+/* PIC specific functions */
+extern void disable_8259A_irq(unsigned int irq);
+extern void enable_8259A_irq(unsigned int irq);
+extern int i8259A_irq_pending(unsigned int irq);
+extern void make_8259A_irq(unsigned int irq);
+extern void init_8259A(int aeoi);
+
+/* IOAPIC */
+#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
+extern unsigned long io_apic_irqs;
+
+extern void init_VISWS_APIC_irqs(void);
+extern void setup_IO_APIC(void);
+extern void disable_IO_APIC(void);
+extern void print_IO_APIC(void);
+extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
+extern void setup_ioapic_dest(void);
+
+#ifdef CONFIG_X86_64
+extern void enable_IO_APIC(void);
+#endif
+
+/* IPI functions */
+extern void send_IPI_self(int vector);
+extern void send_IPI(int dest, int vector);
+
+/* Statistics */
+extern atomic_t irq_err_count;
+extern atomic_t irq_mis_count;
+
+/* EISA */
+extern void eisa_set_level_irq(unsigned int irq);
+
+/* Voyager functions */
+extern asmlinkage void vic_cpi_interrupt(void);
+extern asmlinkage void vic_sys_interrupt(void);
+extern asmlinkage void vic_cmn_interrupt(void);
+extern asmlinkage void qic_timer_interrupt(void);
+extern asmlinkage void qic_invalidate_interrupt(void);
+extern asmlinkage void qic_reschedule_interrupt(void);
+extern asmlinkage void qic_enable_irq_interrupt(void);
+extern asmlinkage void qic_call_function_interrupt(void);
+
 #ifdef CONFIG_X86_32
-# include "hw_irq_32.h"
+extern void (*const interrupt[NR_IRQS])(void);
 #else
-# include "hw_irq_64.h"
+typedef int vector_irq_t[NR_VECTORS];
+DECLARE_PER_CPU(vector_irq_t, vector_irq);
+extern void __setup_vector_irq(int cpu);
+extern spinlock_t vector_lock;
+#endif
+
+#endif /* !ASSEMBLY_ */
+
 #endif
diff --git a/include/asm-x86/hw_irq_32.h b/include/asm-x86/hw_irq_32.h
deleted file mode 100644 (file)
index ea88054..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-#ifndef _ASM_HW_IRQ_H
-#define _ASM_HW_IRQ_H
-
-/*
- *     linux/include/asm/hw_irq.h
- *
- *     (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
- *
- *     moved some of the old arch/i386/kernel/irq.h to here. VY
- *
- *     IRQ/IPI changes taken from work by Thomas Radke
- *     <tomsoft@informatik.tu-chemnitz.de>
- */
-
-#include <linux/profile.h>
-#include <asm/atomic.h>
-#include <asm/irq.h>
-#include <asm/sections.h>
-
-#define NMI_VECTOR             0x02
-
-/*
- * Various low-level irq details needed by irq.c, process.c,
- * time.c, io_apic.c and smp.c
- *
- * Interrupt entry/exit code at both C and assembly level
- */
-
-extern void (*const interrupt[NR_IRQS])(void);
-
-#ifdef CONFIG_SMP
-void reschedule_interrupt(void);
-void invalidate_interrupt(void);
-void call_function_interrupt(void);
-#endif
-
-#ifdef CONFIG_X86_LOCAL_APIC
-void apic_timer_interrupt(void);
-void error_interrupt(void);
-void spurious_interrupt(void);
-void thermal_interrupt(void);
-#define platform_legacy_irq(irq)       ((irq) < 16)
-#endif
-
-void disable_8259A_irq(unsigned int irq);
-void enable_8259A_irq(unsigned int irq);
-int i8259A_irq_pending(unsigned int irq);
-void make_8259A_irq(unsigned int irq);
-void init_8259A(int aeoi);
-void send_IPI_self(int vector);
-void init_VISWS_APIC_irqs(void);
-void setup_IO_APIC(void);
-void disable_IO_APIC(void);
-void print_IO_APIC(void);
-int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
-void send_IPI(int dest, int vector);
-void setup_ioapic_dest(void);
-
-extern unsigned long io_apic_irqs;
-
-extern atomic_t irq_err_count;
-extern atomic_t irq_mis_count;
-
-#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
-
-#endif /* _ASM_HW_IRQ_H */
diff --git a/include/asm-x86/hw_irq_64.h b/include/asm-x86/hw_irq_64.h
deleted file mode 100644 (file)
index 0062ef3..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-#ifndef _ASM_HW_IRQ_H
-#define _ASM_HW_IRQ_H
-
-/*
- *     linux/include/asm/hw_irq.h
- *
- *     (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
- *
- *     moved some of the old arch/i386/kernel/irq.h to here. VY
- *
- *     IRQ/IPI changes taken from work by Thomas Radke
- *     <tomsoft@informatik.tu-chemnitz.de>
- *
- *     hacked by Andi Kleen for x86-64.
- */
-
-#ifndef __ASSEMBLY__
-#include <asm/atomic.h>
-#include <asm/irq.h>
-#include <linux/profile.h>
-#include <linux/smp.h>
-#include <linux/percpu.h>
-#endif
-
-#define NMI_VECTOR             0x02
-/*
- * IDT vectors usable for external interrupt sources start
- * at 0x20:
- */
-#define FIRST_EXTERNAL_VECTOR  0x20
-
-#define IA32_SYSCALL_VECTOR    0x80
-
-
-/* Reserve the lowest usable priority level 0x20 - 0x2f for triggering
- * cleanup after irq migration.
- */
-#define IRQ_MOVE_CLEANUP_VECTOR        FIRST_EXTERNAL_VECTOR
-
-/*
- * Vectors 0x30-0x3f are used for ISA interrupts.
- */
-#define IRQ0_VECTOR            (FIRST_EXTERNAL_VECTOR + 0x10)
-#define IRQ1_VECTOR            (IRQ0_VECTOR + 1)
-#define IRQ2_VECTOR            (IRQ0_VECTOR + 2)
-#define IRQ3_VECTOR            (IRQ0_VECTOR + 3)
-#define IRQ4_VECTOR            (IRQ0_VECTOR + 4)
-#define IRQ5_VECTOR            (IRQ0_VECTOR + 5)
-#define IRQ6_VECTOR            (IRQ0_VECTOR + 6)
-#define IRQ7_VECTOR            (IRQ0_VECTOR + 7)
-#define IRQ8_VECTOR            (IRQ0_VECTOR + 8)
-#define IRQ9_VECTOR            (IRQ0_VECTOR + 9)
-#define IRQ10_VECTOR           (IRQ0_VECTOR + 10)
-#define IRQ11_VECTOR           (IRQ0_VECTOR + 11)
-#define IRQ12_VECTOR           (IRQ0_VECTOR + 12)
-#define IRQ13_VECTOR           (IRQ0_VECTOR + 13)
-#define IRQ14_VECTOR           (IRQ0_VECTOR + 14)
-#define IRQ15_VECTOR           (IRQ0_VECTOR + 15)
-
-/*
- * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
- *
- *  some of the following vectors are 'rare', they are merged
- *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
- *  TLB, reschedule and local APIC vectors are performance-critical.
- */
-#define SPURIOUS_APIC_VECTOR   0xff
-#define ERROR_APIC_VECTOR      0xfe
-#define RESCHEDULE_VECTOR      0xfd
-#define CALL_FUNCTION_VECTOR   0xfc
-/* fb free - please don't readd KDB here because it's useless
-   (hint - think what a NMI bit does to a vector) */
-#define THERMAL_APIC_VECTOR    0xfa
-#define THRESHOLD_APIC_VECTOR   0xf9
-/* f8 free */
-#define INVALIDATE_TLB_VECTOR_END      0xf7
-#define INVALIDATE_TLB_VECTOR_START    0xf0    /* f0-f7 used for TLB flush */
-
-#define NUM_INVALIDATE_TLB_VECTORS     8
-
-/*
- * Local APIC timer IRQ vector is on a different priority level,
- * to work around the 'lost local interrupt if more than 2 IRQ
- * sources per level' errata.
- */
-#define LOCAL_TIMER_VECTOR     0xef
-
-/*
- * First APIC vector available to drivers: (vectors 0x30-0xee)
- * we start at 0x41 to spread out vectors evenly between priority
- * levels. (0x80 is the syscall vector)
- */
-#define FIRST_DEVICE_VECTOR    (IRQ15_VECTOR + 2)
-#define FIRST_SYSTEM_VECTOR    0xef   /* duplicated in irq.h */
-
-
-#ifndef __ASSEMBLY__
-
-/* Interrupt handlers registered during init_IRQ */
-void apic_timer_interrupt(void);
-void spurious_interrupt(void);
-void error_interrupt(void);
-void reschedule_interrupt(void);
-void call_function_interrupt(void);
-void irq_move_cleanup_interrupt(void);
-void invalidate_interrupt0(void);
-void invalidate_interrupt1(void);
-void invalidate_interrupt2(void);
-void invalidate_interrupt3(void);
-void invalidate_interrupt4(void);
-void invalidate_interrupt5(void);
-void invalidate_interrupt6(void);
-void invalidate_interrupt7(void);
-void thermal_interrupt(void);
-void threshold_interrupt(void);
-void i8254_timer_resume(void);
-
-typedef int vector_irq_t[NR_VECTORS];
-DECLARE_PER_CPU(vector_irq_t, vector_irq);
-extern void __setup_vector_irq(int cpu);
-extern spinlock_t vector_lock;
-
-/*
- * Various low-level irq details needed by irq.c, process.c,
- * time.c, io_apic.c and smp.c
- *
- * Interrupt entry/exit code at both C and assembly level
- */
-
-extern void disable_8259A_irq(unsigned int irq);
-extern void enable_8259A_irq(unsigned int irq);
-extern int i8259A_irq_pending(unsigned int irq);
-extern void make_8259A_irq(unsigned int irq);
-extern void init_8259A(int aeoi);
-extern void send_IPI_self(int vector);
-extern void init_VISWS_APIC_irqs(void);
-extern void setup_IO_APIC(void);
-extern void enable_IO_APIC(void);
-extern void disable_IO_APIC(void);
-extern void print_IO_APIC(void);
-extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
-extern void send_IPI(int dest, int vector);
-extern void setup_ioapic_dest(void);
-extern void native_init_IRQ(void);
-
-extern unsigned long io_apic_irqs;
-
-extern atomic_t irq_err_count;
-extern atomic_t irq_mis_count;
-
-#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
-
-#include <asm/ptrace.h>
-
-#define IRQ_NAME2(nr) nr##_interrupt(void)
-#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
-
-/*
- *     SMP has a few special interrupts for IPI messages
- */
-
-#define BUILD_IRQ(nr)                          \
-       asmlinkage void IRQ_NAME(nr);           \
-       asm("\n.p2align\n"                      \
-           "IRQ" #nr "_interrupt:\n\t"         \
-           "push $~(" #nr ") ; "               \
-           "jmp common_interrupt");
-
-#define platform_legacy_irq(irq)       ((irq) < 16)
-
-#endif
-
-#endif /* _ASM_HW_IRQ_H */
index 45d4df3..2f98df9 100644 (file)
@@ -55,4 +55,6 @@ static inline void outb_pic(unsigned char value, unsigned int port)
        udelay(2);
 }
 
+extern struct irq_chip i8259A_chip;
+
 #endif /* __ASM_I8259_H__ */
index 7ba9054..1a29257 100644 (file)
@@ -1,5 +1,50 @@
-#ifdef CONFIG_X86_32
-# include "irq_32.h"
+#ifndef _ASM_IRQ_H
+#define _ASM_IRQ_H
+/*
+ *     (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
+ *
+ *     IRQ/IPI changes taken from work by Thomas Radke
+ *     <tomsoft@informatik.tu-chemnitz.de>
+ */
+
+#include <asm/apicdef.h>
+#include <asm/irq_vectors.h>
+
+static inline int irq_canonicalize(int irq)
+{
+       return ((irq == 2) ? 9 : irq);
+}
+
+#ifdef CONFIG_X86_LOCAL_APIC
+# define ARCH_HAS_NMI_WATCHDOG
+#endif
+
+#ifdef CONFIG_4KSTACKS
+  extern void irq_ctx_init(int cpu);
+  extern void irq_ctx_exit(int cpu);
+# define __ARCH_HAS_DO_SOFTIRQ
 #else
-# include "irq_64.h"
+# define irq_ctx_init(cpu) do { } while (0)
+# define irq_ctx_exit(cpu) do { } while (0)
+# ifdef CONFIG_X86_64
+#  define __ARCH_HAS_DO_SOFTIRQ
+# endif
+#endif
+
+#ifdef CONFIG_IRQBALANCE
+extern int irqbalance_disable(char *str);
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+#include <linux/cpumask.h>
+extern void fixup_irqs(cpumask_t map);
 #endif
+
+extern unsigned int do_IRQ(struct pt_regs *regs);
+extern void init_IRQ(void);
+extern void native_init_IRQ(void);
+
+/* Interrupt vector management */
+extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
+
+#endif /* _ASM_IRQ_H */
diff --git a/include/asm-x86/irq_32.h b/include/asm-x86/irq_32.h
deleted file mode 100644 (file)
index 0b79f31..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef _ASM_IRQ_H
-#define _ASM_IRQ_H
-
-/*
- *     linux/include/asm/irq.h
- *
- *     (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
- *
- *     IRQ/IPI changes taken from work by Thomas Radke
- *     <tomsoft@informatik.tu-chemnitz.de>
- */
-
-#include <linux/sched.h>
-/* include comes from machine specific directory */
-#include "irq_vectors.h"
-#include <asm/thread_info.h>
-
-static inline int irq_canonicalize(int irq)
-{
-       return ((irq == 2) ? 9 : irq);
-}
-
-#ifdef CONFIG_X86_LOCAL_APIC
-# define ARCH_HAS_NMI_WATCHDOG         /* See include/linux/nmi.h */
-#endif
-
-#ifdef CONFIG_4KSTACKS
-  extern void irq_ctx_init(int cpu);
-  extern void irq_ctx_exit(int cpu);
-# define __ARCH_HAS_DO_SOFTIRQ
-#else
-# define irq_ctx_init(cpu) do { } while (0)
-# define irq_ctx_exit(cpu) do { } while (0)
-#endif
-
-#ifdef CONFIG_IRQBALANCE
-extern int irqbalance_disable(char *str);
-#endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-extern void fixup_irqs(cpumask_t map);
-#endif
-
-unsigned int do_IRQ(struct pt_regs *regs);
-void init_IRQ(void);
-void __init native_init_IRQ(void);
-
-/* Interrupt vector management */
-extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
-
-#endif /* _ASM_IRQ_H */
diff --git a/include/asm-x86/irq_64.h b/include/asm-x86/irq_64.h
deleted file mode 100644 (file)
index 083d35a..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef _ASM_IRQ_H
-#define _ASM_IRQ_H
-
-/*
- *     linux/include/asm/irq.h
- *
- *     (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
- *
- *     IRQ/IPI changes taken from work by Thomas Radke
- *     <tomsoft@informatik.tu-chemnitz.de>
- */
-
-#define TIMER_IRQ 0
-
-/*
- * 16 8259A IRQ's, 208 potential APIC interrupt sources.
- * Right now the APIC is mostly only used for SMP.
- * 256 vectors is an architectural limit. (we can have
- * more than 256 devices theoretically, but they will
- * have to use shared interrupts)
- * Since vectors 0x00-0x1f are used/reserved for the CPU,
- * the usable vector space is 0x20-0xff (224 vectors)
- */
-
-/*
- * The maximum number of vectors supported by x86_64 processors
- * is limited to 256. For processors other than x86_64, NR_VECTORS
- * should be changed accordingly.
- */
-#define NR_VECTORS 256
-
-#define FIRST_SYSTEM_VECTOR    0xef   /* duplicated in hw_irq.h */
-
-#define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
-#define NR_IRQ_VECTORS NR_IRQS
-
-static inline int irq_canonicalize(int irq)
-{
-       return ((irq == 2) ? 9 : irq);
-}
-
-#define ARCH_HAS_NMI_WATCHDOG          /* See include/linux/nmi.h */
-
-#ifdef CONFIG_HOTPLUG_CPU
-#include <linux/cpumask.h>
-extern void fixup_irqs(cpumask_t map);
-#endif
-
-#define __ARCH_HAS_DO_SOFTIRQ 1
-
-#endif /* _ASM_IRQ_H */
diff --git a/include/asm-x86/irq_vectors.h b/include/asm-x86/irq_vectors.h
new file mode 100644 (file)
index 0000000..b58581e
--- /dev/null
@@ -0,0 +1,169 @@
+#ifndef _ASM_IRQ_VECTORS_H
+#define _ASM_IRQ_VECTORS_H
+
+#include <linux/threads.h>
+
+#define NMI_VECTOR             0x02
+
+/*
+ * IDT vectors usable for external interrupt sources start
+ * at 0x20:
+ */
+#define FIRST_EXTERNAL_VECTOR  0x20
+
+#ifdef CONFIG_X86_32
+# define SYSCALL_VECTOR                0x80
+#else
+# define IA32_SYSCALL_VECTOR   0x80
+#endif
+
+/*
+ * Reserve the lowest usable priority level 0x20 - 0x2f for triggering
+ * cleanup after irq migration on 64 bit.
+ */
+#define IRQ_MOVE_CLEANUP_VECTOR        FIRST_EXTERNAL_VECTOR
+
+/*
+ * Vectors 0x20-0x2f are used for ISA interrupts on 32 bit.
+ * Vectors 0x30-0x3f are used for ISA interrupts on 64 bit.
+ */
+#ifdef CONFIG_X86_32
+#define IRQ0_VECTOR            (FIRST_EXTERNAL_VECTOR)
+#else
+#define IRQ0_VECTOR            (FIRST_EXTERNAL_VECTOR + 0x10)
+#endif
+#define IRQ1_VECTOR            (IRQ0_VECTOR + 1)
+#define IRQ2_VECTOR            (IRQ0_VECTOR + 2)
+#define IRQ3_VECTOR            (IRQ0_VECTOR + 3)
+#define IRQ4_VECTOR            (IRQ0_VECTOR + 4)
+#define IRQ5_VECTOR            (IRQ0_VECTOR + 5)
+#define IRQ6_VECTOR            (IRQ0_VECTOR + 6)
+#define IRQ7_VECTOR            (IRQ0_VECTOR + 7)
+#define IRQ8_VECTOR            (IRQ0_VECTOR + 8)
+#define IRQ9_VECTOR            (IRQ0_VECTOR + 9)
+#define IRQ10_VECTOR           (IRQ0_VECTOR + 10)
+#define IRQ11_VECTOR           (IRQ0_VECTOR + 11)
+#define IRQ12_VECTOR           (IRQ0_VECTOR + 12)
+#define IRQ13_VECTOR           (IRQ0_VECTOR + 13)
+#define IRQ14_VECTOR           (IRQ0_VECTOR + 14)
+#define IRQ15_VECTOR           (IRQ0_VECTOR + 15)
+
+/*
+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
+ *
+ *  some of the following vectors are 'rare', they are merged
+ *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
+ *  TLB, reschedule and local APIC vectors are performance-critical.
+ *
+ *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
+ */
+#ifdef CONFIG_X86_32
+
+# define SPURIOUS_APIC_VECTOR          0xff
+# define ERROR_APIC_VECTOR             0xfe
+# define INVALIDATE_TLB_VECTOR         0xfd
+# define RESCHEDULE_VECTOR             0xfc
+# define CALL_FUNCTION_VECTOR          0xfb
+# define THERMAL_APIC_VECTOR           0xf0
+
+#else
+
+#define SPURIOUS_APIC_VECTOR           0xff
+#define ERROR_APIC_VECTOR              0xfe
+#define RESCHEDULE_VECTOR              0xfd
+#define CALL_FUNCTION_VECTOR           0xfc
+#define THERMAL_APIC_VECTOR            0xfa
+#define THRESHOLD_APIC_VECTOR          0xf9
+#define INVALIDATE_TLB_VECTOR_END      0xf7
+#define INVALIDATE_TLB_VECTOR_START    0xf0    /* f0-f7 used for TLB flush */
+
+#define NUM_INVALIDATE_TLB_VECTORS     8
+
+#endif
+
+/*
+ * Local APIC timer IRQ vector is on a different priority level,
+ * to work around the 'lost local interrupt if more than 2 IRQ
+ * sources per level' errata.
+ */
+#define LOCAL_TIMER_VECTOR     0xef
+
+/*
+ * First APIC vector available to drivers: (vectors 0x30-0xee) we
+ * start at 0x31(0x41) to spread out vectors evenly between priority
+ * levels. (0x80 is the syscall vector)
+ */
+#ifdef CONFIG_X86_32
+# define FIRST_DEVICE_VECTOR   0x31
+#else
+# define FIRST_DEVICE_VECTOR   (IRQ15_VECTOR + 2)
+#endif
+
+#define NR_VECTORS             256
+
+#define FPU_IRQ                        13
+
+#define        FIRST_VM86_IRQ          3
+#define LAST_VM86_IRQ          15
+#define invalid_vm86_irq(irq)  ((irq) < 3 || (irq) > 15)
+
+#if !defined(CONFIG_X86_VISWS) && !defined(CONFIG_X86_VOYAGER)
+
+# if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT)
+
+#  define NR_IRQS              224
+
+#  if (224 >= 32 * NR_CPUS)
+#   define NR_IRQ_VECTORS      NR_IRQS
+#  else
+#   define NR_IRQ_VECTORS      (32 * NR_CPUS)
+#  endif
+
+# else /* IO_APIC || PARAVIRT */
+
+#  define NR_IRQS              16
+#  define NR_IRQ_VECTORS       NR_IRQS
+
+# endif
+
+#else /* !VISWS && !VOYAGER */
+
+# define NR_IRQS               224
+# define NR_IRQ_VECTORS                NR_IRQS
+
+#endif /* VISWS */
+
+/* Voyager specific defines */
+/* These define the CPIs we use in linux */
+#define VIC_CPI_LEVEL0                 0
+#define VIC_CPI_LEVEL1                 1
+/* now the fake CPIs */
+#define VIC_TIMER_CPI                  2
+#define VIC_INVALIDATE_CPI             3
+#define VIC_RESCHEDULE_CPI             4
+#define VIC_ENABLE_IRQ_CPI             5
+#define VIC_CALL_FUNCTION_CPI          6
+
+/* Now the QIC CPIs:  Since we don't need the two initial levels,
+ * these are 2 less than the VIC CPIs */
+#define QIC_CPI_OFFSET                 1
+#define QIC_TIMER_CPI                  (VIC_TIMER_CPI - QIC_CPI_OFFSET)
+#define QIC_INVALIDATE_CPI             (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
+#define QIC_RESCHEDULE_CPI             (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
+#define QIC_ENABLE_IRQ_CPI             (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
+#define QIC_CALL_FUNCTION_CPI          (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
+
+#define VIC_START_FAKE_CPI             VIC_TIMER_CPI
+#define VIC_END_FAKE_CPI               VIC_CALL_FUNCTION_CPI
+
+/* this is the SYS_INT CPI. */
+#define VIC_SYS_INT                    8
+#define VIC_CMN_INT                    15
+
+/* This is the boot CPI for alternate processors.  It gets overwritten
+ * by the above once the system has activated all available processors */
+#define VIC_CPU_BOOT_CPI               VIC_CPI_LEVEL0
+#define VIC_CPU_BOOT_ERRATA_CPI                (VIC_CPI_LEVEL0 + 8)
+
+
+#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/include/asm-x86/mach-default/irq_vectors.h b/include/asm-x86/mach-default/irq_vectors.h
deleted file mode 100644 (file)
index 881c63c..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * This file should contain #defines for all of the interrupt vector
- * numbers used by this architecture.
- *
- * In addition, there are some standard defines:
- *
- *     FIRST_EXTERNAL_VECTOR:
- *             The first free place for external interrupts
- *
- *     SYSCALL_VECTOR:
- *             The IRQ vector a syscall makes the user to kernel transition
- *             under.
- *
- *     TIMER_IRQ:
- *             The IRQ number the timer interrupt comes in at.
- *
- *     NR_IRQS:
- *             The total number of interrupt vectors (including all the
- *             architecture specific interrupts) needed.
- *
- */                    
-#ifndef _ASM_IRQ_VECTORS_H
-#define _ASM_IRQ_VECTORS_H
-
-/*
- * IDT vectors usable for external interrupt sources start
- * at 0x20:
- */
-#define FIRST_EXTERNAL_VECTOR  0x20
-
-#define SYSCALL_VECTOR         0x80
-
-/*
- * Vectors 0x20-0x2f are used for ISA interrupts.
- */
-
-/*
- * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
- *
- *  some of the following vectors are 'rare', they are merged
- *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
- *  TLB, reschedule and local APIC vectors are performance-critical.
- *
- *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
- */
-#define SPURIOUS_APIC_VECTOR   0xff
-#define ERROR_APIC_VECTOR      0xfe
-#define INVALIDATE_TLB_VECTOR  0xfd
-#define RESCHEDULE_VECTOR      0xfc
-#define CALL_FUNCTION_VECTOR   0xfb
-
-#define THERMAL_APIC_VECTOR    0xf0
-/*
- * Local APIC timer IRQ vector is on a different priority level,
- * to work around the 'lost local interrupt if more than 2 IRQ
- * sources per level' errata.
- */
-#define LOCAL_TIMER_VECTOR     0xef
-
-/*
- * First APIC vector available to drivers: (vectors 0x30-0xee)
- * we start at 0x31 to spread out vectors evenly between priority
- * levels. (0x80 is the syscall vector)
- */
-#define FIRST_DEVICE_VECTOR    0x31
-#define FIRST_SYSTEM_VECTOR    0xef
-
-#define TIMER_IRQ 0
-
-/*
- * 16 8259A IRQ's, 208 potential APIC interrupt sources.
- * Right now the APIC is mostly only used for SMP.
- * 256 vectors is an architectural limit. (we can have
- * more than 256 devices theoretically, but they will
- * have to use shared interrupts)
- * Since vectors 0x00-0x1f are used/reserved for the CPU,
- * the usable vector space is 0x20-0xff (224 vectors)
- */
-
-/*
- * The maximum number of vectors supported by i386 processors
- * is limited to 256. For processors other than i386, NR_VECTORS
- * should be changed accordingly.
- */
-#define NR_VECTORS 256
-
-#include "irq_vectors_limits.h"
-
-#define FPU_IRQ                        13
-
-#define        FIRST_VM86_IRQ          3
-#define LAST_VM86_IRQ          15
-#define invalid_vm86_irq(irq)  ((irq) < 3 || (irq) > 15)
-
-
-#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/include/asm-x86/mach-default/irq_vectors_limits.h b/include/asm-x86/mach-default/irq_vectors_limits.h
deleted file mode 100644 (file)
index a90c7a6..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef _ASM_IRQ_VECTORS_LIMITS_H
-#define _ASM_IRQ_VECTORS_LIMITS_H
-
-#if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT)
-#define NR_IRQS 224
-# if (224 >= 32 * NR_CPUS)
-# define NR_IRQ_VECTORS NR_IRQS
-# else
-# define NR_IRQ_VECTORS (32 * NR_CPUS)
-# endif
-#else
-#define NR_IRQS 16
-#define NR_IRQ_VECTORS NR_IRQS
-#endif
-
-#endif /* _ASM_IRQ_VECTORS_LIMITS_H */
diff --git a/include/asm-x86/mach-visws/irq_vectors.h b/include/asm-x86/mach-visws/irq_vectors.h
deleted file mode 100644 (file)
index cb572d8..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef _ASM_IRQ_VECTORS_H
-#define _ASM_IRQ_VECTORS_H
-
-/*
- * IDT vectors usable for external interrupt sources start
- * at 0x20:
- */
-#define FIRST_EXTERNAL_VECTOR  0x20
-
-#define SYSCALL_VECTOR         0x80
-
-/*
- * Vectors 0x20-0x2f are used for ISA interrupts.
- */
-
-/*
- * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
- *
- *  some of the following vectors are 'rare', they are merged
- *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
- *  TLB, reschedule and local APIC vectors are performance-critical.
- *
- *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
- */
-#define SPURIOUS_APIC_VECTOR   0xff
-#define ERROR_APIC_VECTOR      0xfe
-#define INVALIDATE_TLB_VECTOR  0xfd
-#define RESCHEDULE_VECTOR      0xfc
-#define CALL_FUNCTION_VECTOR   0xfb
-
-#define THERMAL_APIC_VECTOR    0xf0
-/*
- * Local APIC timer IRQ vector is on a different priority level,
- * to work around the 'lost local interrupt if more than 2 IRQ
- * sources per level' errata.
- */
-#define LOCAL_TIMER_VECTOR     0xef
-
-/*
- * First APIC vector available to drivers: (vectors 0x30-0xee)
- * we start at 0x31 to spread out vectors evenly between priority
- * levels. (0x80 is the syscall vector)
- */
-#define FIRST_DEVICE_VECTOR    0x31
-#define FIRST_SYSTEM_VECTOR    0xef
-
-#define TIMER_IRQ 0
-
-/*
- * IRQ definitions
- */
-#define NR_VECTORS 256
-#define NR_IRQS 224
-#define NR_IRQ_VECTORS NR_IRQS
-
-#define FPU_IRQ                        13
-
-#define        FIRST_VM86_IRQ          3
-#define LAST_VM86_IRQ          15
-#define invalid_vm86_irq(irq)  ((irq) < 3 || (irq) > 15)
-
-#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/include/asm-x86/mach-voyager/irq_vectors.h b/include/asm-x86/mach-voyager/irq_vectors.h
deleted file mode 100644 (file)
index 165421f..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* Copyright (C) 2002
- *
- * Author: James.Bottomley@HansenPartnership.com
- *
- * linux/arch/i386/voyager/irq_vectors.h
- *
- * This file provides definitions for the VIC and QIC CPIs
- */
-
-#ifndef _ASM_IRQ_VECTORS_H
-#define _ASM_IRQ_VECTORS_H
-
-/*
- * IDT vectors usable for external interrupt sources start
- * at 0x20:
- */
-#define FIRST_EXTERNAL_VECTOR  0x20
-
-#define SYSCALL_VECTOR         0x80
-
-/*
- * Vectors 0x20-0x2f are used for ISA interrupts.
- */
-
-/* These define the CPIs we use in linux */
-#define VIC_CPI_LEVEL0                 0
-#define VIC_CPI_LEVEL1                 1
-/* now the fake CPIs */
-#define VIC_TIMER_CPI                  2
-#define VIC_INVALIDATE_CPI             3
-#define VIC_RESCHEDULE_CPI             4
-#define VIC_ENABLE_IRQ_CPI             5
-#define VIC_CALL_FUNCTION_CPI          6
-
-/* Now the QIC CPIs:  Since we don't need the two initial levels,
- * these are 2 less than the VIC CPIs */
-#define QIC_CPI_OFFSET                 1
-#define QIC_TIMER_CPI                  (VIC_TIMER_CPI - QIC_CPI_OFFSET)
-#define QIC_INVALIDATE_CPI             (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
-#define QIC_RESCHEDULE_CPI             (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
-#define QIC_ENABLE_IRQ_CPI             (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
-#define QIC_CALL_FUNCTION_CPI          (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
-
-#define VIC_START_FAKE_CPI             VIC_TIMER_CPI
-#define VIC_END_FAKE_CPI               VIC_CALL_FUNCTION_CPI
-
-/* this is the SYS_INT CPI. */
-#define VIC_SYS_INT                    8
-#define VIC_CMN_INT                    15
-
-/* This is the boot CPI for alternate processors.  It gets overwritten
- * by the above once the system has activated all available processors */
-#define VIC_CPU_BOOT_CPI               VIC_CPI_LEVEL0
-#define VIC_CPU_BOOT_ERRATA_CPI                (VIC_CPI_LEVEL0 + 8)
-
-#define NR_VECTORS 256
-#define NR_IRQS 224
-#define NR_IRQ_VECTORS NR_IRQS
-
-#define FPU_IRQ                                13
-
-#define        FIRST_VM86_IRQ          3
-#define LAST_VM86_IRQ          15
-#define invalid_vm86_irq(irq)  ((irq) < 3 || (irq) > 15)
-
-#ifndef __ASSEMBLY__
-extern asmlinkage void vic_cpi_interrupt(void);
-extern asmlinkage void vic_sys_interrupt(void);
-extern asmlinkage void vic_cmn_interrupt(void);
-extern asmlinkage void qic_timer_interrupt(void);
-extern asmlinkage void qic_invalidate_interrupt(void);
-extern asmlinkage void qic_reschedule_interrupt(void);
-extern asmlinkage void qic_enable_irq_interrupt(void);
-extern asmlinkage void qic_call_function_interrupt(void);
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _ASM_IRQ_VECTORS_H */
index 26b9240..6500488 100644 (file)
@@ -5,7 +5,7 @@
  *
  * SGI UV architectural definitions
  *
- * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
  */
 
 #ifndef __ASM_X86_UV_HUB_H__
 /*
  * Addressing Terminology
  *
- *     NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
- *             routers always have low bit of 1, C/MBricks have low bit
- *             equal to 0. Most addressing macros that target UV hub chips
- *             right shift the NASID by 1 to exclude the always-zero bit.
+ *     M       - The low M bits of a physical address represent the offset
+ *               into the blade local memory. RAM memory on a blade is physically
+ *               contiguous (although various IO spaces may punch holes in
+ *               it)..
  *
- *     SNASID - NASID right shifted by 1 bit.
+ *     N       - Number of bits in the node portion of a socket physical
+ *               address.
+ *
+ *     NASID   - network ID of a router, Mbrick or Cbrick. Nasid values of
+ *               routers always have low bit of 1, C/MBricks have low bit
+ *               equal to 0. Most addressing macros that target UV hub chips
+ *               right shift the NASID by 1 to exclude the always-zero bit.
+ *               NASIDs contain up to 15 bits.
+ *
+ *     GNODE   - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
+ *               of nasids.
+ *
+ *     PNODE   - the low N bits of the GNODE. The PNODE is the most useful variant
+ *               of the nasid for socket usage.
+ *
+ *
+ *  NumaLink Global Physical Address Format:
+ *  +--------------------------------+---------------------+
+ *  |00..000|      GNODE             |      NodeOffset     |
+ *  +--------------------------------+---------------------+
+ *          |<-------53 - M bits --->|<--------M bits ----->
+ *
+ *     M - number of node offset bits (35 .. 40)
  *
  *
  *  Memory/UV-HUB Processor Socket Address Format:
- *  +--------+---------------+---------------------+
- *  |00..0000|    SNASID     |      NodeOffset     |
- *  +--------+---------------+---------------------+
- *           <--- N bits --->|<--------M bits ----->
+ *  +----------------+---------------+---------------------+
+ *  |00..000000000000|   PNODE       |      NodeOffset     |
+ *  +----------------+---------------+---------------------+
+ *                   <--- N bits --->|<--------M bits ----->
  *
- *     M number of node offset bits (35 .. 40)
- *     N number of SNASID bits (0 .. 10)
+ *     M number of node offset bits (35 .. 40)
+ *     N - number of PNODE bits (0 .. 10)
  *
  *             Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64).
  *             The actual values are configuration dependent and are set at
- *             boot time
+ *             boot time. M & N values are set by the hardware/BIOS at boot.
+ *
  *
  * APICID format
  *     NOTE!!!!!! This is the current format of the APICID. However, code
  *
  *             1111110000000000
  *             5432109876543210
- *             nnnnnnnnnnlc0cch
+ *             pppppppppplc0cch
  *             sssssssssss
  *
- *                     n  = snasid bits
+ *                     p  = pnode bits
  *                     l =  socket number on board
  *                     c  = core
  *                     h  = hyperthread
- *                     s  = bits that are in the socket CSR
+ *                     s  = bits that are in the SOCKET_ID CSR
  *
  *     Note: Processor only supports 12 bits in the APICID register. The ACPI
  *           tables hold all 16 bits. Software needs to be aware of this.
@@ -74,7 +97,7 @@
  * This value is also the value of the maximum number of non-router NASIDs
  * in the numalink fabric.
  *
- * NOTE: a brick may be 1 or 2 OS nodes. Don't get these confused.
+ * NOTE: a brick may contain 1 or 2 OS nodes. Don't get these confused.
  */
 #define UV_MAX_NUMALINK_BLADES 16384
 
  */
 struct uv_hub_info_s {
        unsigned long   global_mmr_base;
-       unsigned short  local_nasid;
-       unsigned short  gnode_upper;
+       unsigned long   gpa_mask;
+       unsigned long   gnode_upper;
+       unsigned long   lowmem_remap_top;
+       unsigned long   lowmem_remap_base;
+       unsigned short  pnode;
+       unsigned short  pnode_mask;
        unsigned short  coherency_domain_number;
        unsigned short  numa_blade_id;
        unsigned char   blade_processor_id;
@@ -112,83 +139,124 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
  * Local & Global MMR space macros.
  *     Note: macros are intended to be used ONLY by inline functions
  *     in this file - not by other kernel code.
+ *             n -  NASID (full 15-bit global nasid)
+ *             g -  GNODE (full 15-bit global nasid, right shifted 1)
+ *             p -  PNODE (local part of nsids, right shifted 1)
  */
-#define UV_SNASID(n)                   ((n) >> 1)
-#define UV_NASID(n)                    ((n) << 1)
+#define UV_NASID_TO_PNODE(n)           (((n) >> 1) & uv_hub_info->pnode_mask)
+#define UV_PNODE_TO_NASID(p)           (((p) << 1) | uv_hub_info->gnode_upper)
 
 #define UV_LOCAL_MMR_BASE              0xf4000000UL
 #define UV_GLOBAL_MMR32_BASE           0xf8000000UL
 #define UV_GLOBAL_MMR64_BASE           (uv_hub_info->global_mmr_base)
 
-#define UV_GLOBAL_MMR32_SNASID_MASK    0x3ff
-#define UV_GLOBAL_MMR32_SNASID_SHIFT   15
-#define UV_GLOBAL_MMR64_SNASID_SHIFT   26
+#define UV_GLOBAL_MMR32_PNODE_SHIFT    15
+#define UV_GLOBAL_MMR64_PNODE_SHIFT    26
 
-#define UV_GLOBAL_MMR32_NASID_BITS(n)                                  \
-               (((UV_SNASID(n) & UV_GLOBAL_MMR32_SNASID_MASK)) <<      \
-               (UV_GLOBAL_MMR32_SNASID_SHIFT))
+#define UV_GLOBAL_MMR32_PNODE_BITS(p)  ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
 
-#define UV_GLOBAL_MMR64_NASID_BITS(n)                                  \
-       ((unsigned long)UV_SNASID(n) << UV_GLOBAL_MMR64_SNASID_SHIFT)
+#define UV_GLOBAL_MMR64_PNODE_BITS(p)                                  \
+       ((unsigned long)(p) << UV_GLOBAL_MMR64_PNODE_SHIFT)
+
+#define UV_APIC_PNODE_SHIFT    6
+
+/*
+ * Macros for converting between kernel virtual addresses, socket local physical
+ * addresses, and UV global physical addresses.
+ *     Note: use the standard __pa() & __va() macros for converting
+ *           between socket virtual and socket physical addresses.
+ */
+
+/* socket phys RAM --> UV global physical address */
+static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
+{
+       if (paddr < uv_hub_info->lowmem_remap_top)
+               paddr += uv_hub_info->lowmem_remap_base;
+       return paddr | uv_hub_info->gnode_upper;
+}
+
+
+/* socket virtual --> UV global physical address */
+static inline unsigned long uv_gpa(void *v)
+{
+       return __pa(v) | uv_hub_info->gnode_upper;
+}
+
+/* socket virtual --> UV global physical address */
+static inline void *uv_vgpa(void *v)
+{
+       return (void *)uv_gpa(v);
+}
+
+/* UV global physical address --> socket virtual */
+static inline void *uv_va(unsigned long gpa)
+{
+       return __va(gpa & uv_hub_info->gpa_mask);
+}
+
+/* pnode, offset --> socket virtual */
+static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
+{
+       return __va(((unsigned long)pnode << uv_hub_info->m_val) | offset);
+}
 
-#define UV_APIC_NASID_SHIFT    6
 
 /*
- * Extract a NASID from an APICID (full apicid, not processor subset)
+ * Extract a PNODE from an APICID (full apicid, not processor subset)
  */
-static inline int uv_apicid_to_nasid(int apicid)
+static inline int uv_apicid_to_pnode(int apicid)
 {
-       return (UV_NASID(apicid >> UV_APIC_NASID_SHIFT));
+       return (apicid >> UV_APIC_PNODE_SHIFT);
 }
 
 /*
  * Access global MMRs using the low memory MMR32 space. This region supports
  * faster MMR access but not all MMRs are accessible in this space.
  */
-static inline unsigned long *uv_global_mmr32_address(int nasid,
+static inline unsigned long *uv_global_mmr32_address(int pnode,
                                unsigned long offset)
 {
        return __va(UV_GLOBAL_MMR32_BASE |
-                      UV_GLOBAL_MMR32_NASID_BITS(nasid) | offset);
+                      UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
 }
 
-static inline void uv_write_global_mmr32(int nasid, unsigned long offset,
+static inline void uv_write_global_mmr32(int pnode, unsigned long offset,
                                 unsigned long val)
 {
-       *uv_global_mmr32_address(nasid, offset) = val;
+       *uv_global_mmr32_address(pnode, offset) = val;
 }
 
-static inline unsigned long uv_read_global_mmr32(int nasid,
+static inline unsigned long uv_read_global_mmr32(int pnode,
                                                 unsigned long offset)
 {
-       return *uv_global_mmr32_address(nasid, offset);
+       return *uv_global_mmr32_address(pnode, offset);
 }
 
 /*
  * Access Global MMR space using the MMR space located at the top of physical
  * memory.
  */
-static inline unsigned long *uv_global_mmr64_address(int nasid,
+static inline unsigned long *uv_global_mmr64_address(int pnode,
                                unsigned long offset)
 {
        return __va(UV_GLOBAL_MMR64_BASE |
-                      UV_GLOBAL_MMR64_NASID_BITS(nasid) | offset);
+                   UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
 }
 
-static inline void uv_write_global_mmr64(int nasid, unsigned long offset,
+static inline void uv_write_global_mmr64(int pnode, unsigned long offset,
                                unsigned long val)
 {
-       *uv_global_mmr64_address(nasid, offset) = val;
+       *uv_global_mmr64_address(pnode, offset) = val;
 }
 
-static inline unsigned long uv_read_global_mmr64(int nasid,
+static inline unsigned long uv_read_global_mmr64(int pnode,
                                                 unsigned long offset)
 {
-       return *uv_global_mmr64_address(nasid, offset);
+       return *uv_global_mmr64_address(pnode, offset);
 }
 
 /*
- * Access node local MMRs. Faster than using global space but only local MMRs
+ * Access hub local MMRs. Faster than using global space but only local MMRs
  * are accessible.
  */
 static inline unsigned long *uv_local_mmr_address(unsigned long offset)
@@ -207,15 +275,15 @@ static inline void uv_write_local_mmr(unsigned long offset, unsigned long val)
 }
 
 /*
- * Structures and definitions for converting between cpu, node, and blade
+ * Structures and definitions for converting between cpu, node, pnode, and blade
  * numbers.
  */
 struct uv_blade_info {
-       unsigned short  nr_posible_cpus;
+       unsigned short  nr_possible_cpus;
        unsigned short  nr_online_cpus;
-       unsigned short  nasid;
+       unsigned short  pnode;
 };
-struct uv_blade_info *uv_blade_info;
+extern struct uv_blade_info *uv_blade_info;
 extern short *uv_node_to_blade;
 extern short *uv_cpu_to_blade;
 extern short uv_possible_blades;
@@ -244,16 +312,16 @@ static inline int uv_node_to_blade_id(int nid)
        return uv_node_to_blade[nid];
 }
 
-/* Convert a blade id to the NASID of the blade */
-static inline int uv_blade_to_nasid(int bid)
+/* Convert a blade id to the PNODE of the blade */
+static inline int uv_blade_to_pnode(int bid)
 {
-       return uv_blade_info[bid].nasid;
+       return uv_blade_info[bid].pnode;
 }
 
 /* Determine the number of possible cpus on a blade */
 static inline int uv_blade_nr_possible_cpus(int bid)
 {
-       return uv_blade_info[bid].nr_posible_cpus;
+       return uv_blade_info[bid].nr_possible_cpus;
 }
 
 /* Determine the number of online cpus on a blade */
@@ -262,16 +330,16 @@ static inline int uv_blade_nr_online_cpus(int bid)
        return uv_blade_info[bid].nr_online_cpus;
 }
 
-/* Convert a cpu id to the NASID of the blade containing the cpu */
-static inline int uv_cpu_to_nasid(int cpu)
+/* Convert a cpu id to the PNODE of the blade containing the cpu */
+static inline int uv_cpu_to_pnode(int cpu)
 {
-       return uv_blade_info[uv_cpu_to_blade_id(cpu)].nasid;
+       return uv_blade_info[uv_cpu_to_blade_id(cpu)].pnode;
 }
 
-/* Convert a node number to the NASID of the blade */
-static inline int uv_node_to_nasid(int nid)
+/* Convert a linux node number to the PNODE of the blade */
+static inline int uv_node_to_pnode(int nid)
 {
-       return uv_blade_info[uv_node_to_blade_id(nid)].nasid;
+       return uv_blade_info[uv_node_to_blade_id(nid)].pnode;
 }
 
 /* Maximum possible number of blades */
index 3b69fe6..ac98460 100644 (file)
 #ifndef __ASM_X86_UV_MMRS__
 #define __ASM_X86_UV_MMRS__
 
-/*
- *       AUTO GENERATED - Do not edit
- */
+#define UV_MMR_ENABLE          (1UL << 63)
 
- #define UV_MMR_ENABLE         (1UL << 63)
+/* ========================================================================= */
+/*                           UVH_BAU_DATA_CONFIG                             */
+/* ========================================================================= */
+#define UVH_BAU_DATA_CONFIG 0x61680UL
+#define UVH_BAU_DATA_CONFIG_32 0x0450
+
+#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
+#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_BAU_DATA_CONFIG_DM_SHFT 8
+#define UVH_BAU_DATA_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_BAU_DATA_CONFIG_DESTMODE_SHFT 11
+#define UVH_BAU_DATA_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_BAU_DATA_CONFIG_STATUS_SHFT 12
+#define UVH_BAU_DATA_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_BAU_DATA_CONFIG_P_SHFT 13
+#define UVH_BAU_DATA_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_BAU_DATA_CONFIG_T_SHFT 15
+#define UVH_BAU_DATA_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_BAU_DATA_CONFIG_M_SHFT 16
+#define UVH_BAU_DATA_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_BAU_DATA_CONFIG_APIC_ID_SHFT 32
+#define UVH_BAU_DATA_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_bau_data_config_u {
+    unsigned long      v;
+    struct uvh_bau_data_config_s {
+       unsigned long   vector_  :  8;  /* RW */
+       unsigned long   dm       :  3;  /* RW */
+       unsigned long   destmode :  1;  /* RW */
+       unsigned long   status   :  1;  /* RO */
+       unsigned long   p        :  1;  /* RO */
+       unsigned long   rsvd_14  :  1;  /*    */
+       unsigned long   t        :  1;  /* RO */
+       unsigned long   m        :  1;  /* RW */
+       unsigned long   rsvd_17_31: 15;  /*    */
+       unsigned long   apic_id  : 32;  /* RW */
+    } s;
+};
 
 /* ========================================================================= */
 /*                               UVH_IPI_INT                                 */
@@ -109,6 +144,7 @@ union uvh_lb_bau_intd_payload_queue_tail_u {
 /*                   UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE                    */
 /* ========================================================================= */
 #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0x0aa0
 
 #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
 #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
@@ -169,6 +205,7 @@ union uvh_lb_bau_intd_software_acknowledge_u {
 /*                UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS                 */
 /* ========================================================================= */
 #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0x0aa8
 
 /* ========================================================================= */
 /*                     UVH_LB_BAU_SB_ACTIVATION_CONTROL                      */
@@ -247,6 +284,331 @@ union uvh_lb_bau_sb_descriptor_base_u {
     } s;
 };
 
+/* ========================================================================= */
+/*                      UVH_LB_MCAST_AOERR0_RPT_ENABLE                       */
+/* ========================================================================= */
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE 0x50b20UL
+
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_SHFT 0
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_MASK 0x0000000000000001UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_SHFT 1
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_MASK 0x0000000000000002UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_SHFT 2
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_MASK 0x0000000000000004UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_SHFT 3
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_MASK 0x0000000000000008UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_SHFT 4
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_MASK 0x0000000000000010UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_SHFT 5
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_MASK 0x0000000000000020UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_SHFT 6
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_MASK 0x0000000000000040UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_SHFT 7
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_MASK 0x0000000000000080UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_SHFT 8
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_MASK 0x0000000000000100UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_SHFT 9
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_MASK 0x0000000000000200UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_SHFT 10
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_MASK 0x0000000000000400UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_SHFT 11
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_MASK 0x0000000000000800UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_SHFT 12
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_MASK 0x0000000000001000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_SHFT 13
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_MASK 0x0000000000002000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_SHFT 14
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_MASK 0x0000000000004000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_SHFT 15
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_MASK 0x0000000000008000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_SHFT 16
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_MASK 0x0000000000010000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_SHFT 17
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_MASK 0x0000000000020000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_SHFT 18
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_MASK 0x0000000000040000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_SHFT 19
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_MASK 0x0000000000080000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_SHFT 20
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_MASK 0x0000000000100000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_SHFT 21
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_MASK 0x0000000000200000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_TIMEOUT_SHFT 22
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_TIMEOUT_MASK 0x0000000000400000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_SHFT 23
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_MASK 0x0000000000800000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_SHFT 24
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_MASK 0x0000000001000000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_SHFT 25
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_MASK 0x0000000002000000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_SHFT 26
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_MASK 0x0000000004000000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_SHFT 27
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_MASK 0x0000000008000000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_SHFT 28
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_MASK 0x0000000010000000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_SHFT 29
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_MASK 0x0000000020000000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_SHFT 30
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_MASK 0x0000000040000000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_SHFT 31
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_MASK 0x0000000080000000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_SHFT 32
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_MASK 0x0000000100000000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_SHFT 33
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_MASK 0x0000000200000000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_SHFT 34
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_MASK 0x0000000400000000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_SHFT 35
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_MASK 0x0000000800000000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_SHFT 36
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_MASK 0x0000001000000000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_SHFT 37
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_MASK 0x0000002000000000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_SHFT 38
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_MASK 0x0000004000000000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_SHFT 39
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_MASK 0x0000008000000000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_SHFT 40
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_MASK 0x0000010000000000UL
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_SHFT 41
+#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_MASK 0x0000020000000000UL
+
+union uvh_lb_mcast_aoerr0_rpt_enable_u {
+    unsigned long      v;
+    struct uvh_lb_mcast_aoerr0_rpt_enable_s {
+       unsigned long   mcast_obese_msg                         :  1;  /* RW */
+       unsigned long   mcast_data_sb_err                       :  1;  /* RW */
+       unsigned long   mcast_nack_buff_parity                  :  1;  /* RW */
+       unsigned long   mcast_timeout                           :  1;  /* RW */
+       unsigned long   mcast_inactive_reply                    :  1;  /* RW */
+       unsigned long   mcast_upgrade_error                     :  1;  /* RW */
+       unsigned long   mcast_reg_count_underflow               :  1;  /* RW */
+       unsigned long   mcast_rep_obese_msg                     :  1;  /* RW */
+       unsigned long   ucache_req_runt_msg                     :  1;  /* RW */
+       unsigned long   ucache_req_obese_msg                    :  1;  /* RW */
+       unsigned long   ucache_req_data_sb_err                  :  1;  /* RW */
+       unsigned long   ucache_rep_runt_msg                     :  1;  /* RW */
+       unsigned long   ucache_rep_obese_msg                    :  1;  /* RW */
+       unsigned long   ucache_rep_data_sb_err                  :  1;  /* RW */
+       unsigned long   ucache_rep_command_err                  :  1;  /* RW */
+       unsigned long   ucache_pend_timeout                     :  1;  /* RW */
+       unsigned long   macc_req_runt_msg                       :  1;  /* RW */
+       unsigned long   macc_req_obese_msg                      :  1;  /* RW */
+       unsigned long   macc_req_data_sb_err                    :  1;  /* RW */
+       unsigned long   macc_rep_runt_msg                       :  1;  /* RW */
+       unsigned long   macc_rep_obese_msg                      :  1;  /* RW */
+       unsigned long   macc_rep_data_sb_err                    :  1;  /* RW */
+       unsigned long   macc_timeout                            :  1;  /* RW */
+       unsigned long   macc_spurious_event                     :  1;  /* RW */
+       unsigned long   ioh_destination_table_parity            :  1;  /* RW */
+       unsigned long   get_had_error_reply                     :  1;  /* RW */
+       unsigned long   get_timeout                             :  1;  /* RW */
+       unsigned long   lock_manager_had_error_reply            :  1;  /* RW */
+       unsigned long   put_had_error_reply                     :  1;  /* RW */
+       unsigned long   put_timeout                             :  1;  /* RW */
+       unsigned long   sb_activation_overrun                   :  1;  /* RW */
+       unsigned long   completed_gb_activation_had_error_reply :  1;  /* RW */
+       unsigned long   completed_gb_activation_timeout         :  1;  /* RW */
+       unsigned long   descriptor_buffer_0_parity              :  1;  /* RW */
+       unsigned long   descriptor_buffer_1_parity              :  1;  /* RW */
+       unsigned long   socket_destination_table_parity         :  1;  /* RW */
+       unsigned long   bau_reply_payload_corruption            :  1;  /* RW */
+       unsigned long   io_port_destination_table_parity        :  1;  /* RW */
+       unsigned long   intd_soft_ack_timeout                   :  1;  /* RW */
+       unsigned long   int_rep_obese_msg                       :  1;  /* RW */
+       unsigned long   int_rep_command_err                     :  1;  /* RW */
+       unsigned long   int_timeout                             :  1;  /* RW */
+       unsigned long   rsvd_42_63                              : 22;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                          UVH_LOCAL_INT0_CONFIG                            */
+/* ========================================================================= */
+#define UVH_LOCAL_INT0_CONFIG 0x61000UL
+
+#define UVH_LOCAL_INT0_CONFIG_VECTOR_SHFT 0
+#define UVH_LOCAL_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_LOCAL_INT0_CONFIG_DM_SHFT 8
+#define UVH_LOCAL_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_LOCAL_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVH_LOCAL_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_LOCAL_INT0_CONFIG_STATUS_SHFT 12
+#define UVH_LOCAL_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_LOCAL_INT0_CONFIG_P_SHFT 13
+#define UVH_LOCAL_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_LOCAL_INT0_CONFIG_T_SHFT 15
+#define UVH_LOCAL_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_LOCAL_INT0_CONFIG_M_SHFT 16
+#define UVH_LOCAL_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_LOCAL_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVH_LOCAL_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_local_int0_config_u {
+    unsigned long      v;
+    struct uvh_local_int0_config_s {
+       unsigned long   vector_  :  8;  /* RW */
+       unsigned long   dm       :  3;  /* RW */
+       unsigned long   destmode :  1;  /* RW */
+       unsigned long   status   :  1;  /* RO */
+       unsigned long   p        :  1;  /* RO */
+       unsigned long   rsvd_14  :  1;  /*    */
+       unsigned long   t        :  1;  /* RO */
+       unsigned long   m        :  1;  /* RW */
+       unsigned long   rsvd_17_31: 15;  /*    */
+       unsigned long   apic_id  : 32;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                          UVH_LOCAL_INT0_ENABLE                            */
+/* ========================================================================= */
+#define UVH_LOCAL_INT0_ENABLE 0x65000UL
+
+#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_SHFT 0
+#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_MASK 0x0000000000000001UL
+#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_SHFT 1
+#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_MASK 0x0000000000000002UL
+#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_SHFT 2
+#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_MASK 0x0000000000000004UL
+#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_SHFT 3
+#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_MASK 0x0000000000000008UL
+#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_SHFT 4
+#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_MASK 0x0000000000000010UL
+#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_SHFT 5
+#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_MASK 0x0000000000000020UL
+#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_SHFT 6
+#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_MASK 0x0000000000000040UL
+#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_SHFT 7
+#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_MASK 0x0000000000000080UL
+#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_SHFT 8
+#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_MASK 0x0000000000000100UL
+#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_SHFT 9
+#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_MASK 0x0000000000000200UL
+#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_SHFT 10
+#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_MASK 0x0000000000000400UL
+#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_SHFT 11
+#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_MASK 0x0000000000000800UL
+#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_SHFT 12
+#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_MASK 0x0000000000001000UL
+#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_SHFT 13
+#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_MASK 0x0000000000002000UL
+#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_SHFT 14
+#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_MASK 0x0000000000004000UL
+#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_SHFT 15
+#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_MASK 0x0000000000008000UL
+#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_SHFT 16
+#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_MASK 0x0000000000010000UL
+#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_SHFT 17
+#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_MASK 0x0000000000020000UL
+#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_SHFT 18
+#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_MASK 0x0000000000040000UL
+#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_SHFT 19
+#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_MASK 0x0000000000080000UL
+#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_SHFT 20
+#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_MASK 0x0000000000100000UL
+#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_SHFT 21
+#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_MASK 0x0000000000200000UL
+#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_SHFT 22
+#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_SHFT 23
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_MASK 0x0000000000800000UL
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_SHFT 24
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_MASK 0x0000000001000000UL
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_SHFT 25
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_MASK 0x0000000002000000UL
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_SHFT 26
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_MASK 0x0000000004000000UL
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_SHFT 27
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_MASK 0x0000000008000000UL
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_SHFT 28
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_MASK 0x0000000010000000UL
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_SHFT 29
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_MASK 0x0000000020000000UL
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_SHFT 30
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_MASK 0x0000000040000000UL
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_SHFT 31
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_MASK 0x0000000080000000UL
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_SHFT 32
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_MASK 0x0000000100000000UL
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_SHFT 33
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_MASK 0x0000000200000000UL
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_SHFT 34
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_MASK 0x0000000400000000UL
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_SHFT 35
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_MASK 0x0000000800000000UL
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_SHFT 36
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_MASK 0x0000001000000000UL
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_SHFT 37
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_MASK 0x0000002000000000UL
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_SHFT 38
+#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_MASK 0x0000004000000000UL
+#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_SHFT 39
+#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_MASK 0x0000008000000000UL
+#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_SHFT 40
+#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_MASK 0x0000010000000000UL
+#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_SHFT 41
+#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_MASK 0x0000020000000000UL
+#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_SHFT 42
+#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_MASK 0x0000040000000000UL
+#define UVH_LOCAL_INT0_ENABLE_LTC_INT_SHFT 43
+#define UVH_LOCAL_INT0_ENABLE_LTC_INT_MASK 0x0000080000000000UL
+#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_SHFT 44
+#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
+
+union uvh_local_int0_enable_u {
+    unsigned long      v;
+    struct uvh_local_int0_enable_s {
+       unsigned long   lb_hcerr            :  1;  /* RW */
+       unsigned long   gr0_hcerr           :  1;  /* RW */
+       unsigned long   gr1_hcerr           :  1;  /* RW */
+       unsigned long   lh_hcerr            :  1;  /* RW */
+       unsigned long   rh_hcerr            :  1;  /* RW */
+       unsigned long   xn_hcerr            :  1;  /* RW */
+       unsigned long   si_hcerr            :  1;  /* RW */
+       unsigned long   lb_aoerr0           :  1;  /* RW */
+       unsigned long   gr0_aoerr0          :  1;  /* RW */
+       unsigned long   gr1_aoerr0          :  1;  /* RW */
+       unsigned long   lh_aoerr0           :  1;  /* RW */
+       unsigned long   rh_aoerr0           :  1;  /* RW */
+       unsigned long   xn_aoerr0           :  1;  /* RW */
+       unsigned long   si_aoerr0           :  1;  /* RW */
+       unsigned long   lb_aoerr1           :  1;  /* RW */
+       unsigned long   gr0_aoerr1          :  1;  /* RW */
+       unsigned long   gr1_aoerr1          :  1;  /* RW */
+       unsigned long   lh_aoerr1           :  1;  /* RW */
+       unsigned long   rh_aoerr1           :  1;  /* RW */
+       unsigned long   xn_aoerr1           :  1;  /* RW */
+       unsigned long   si_aoerr1           :  1;  /* RW */
+       unsigned long   rh_vpi_int          :  1;  /* RW */
+       unsigned long   system_shutdown_int :  1;  /* RW */
+       unsigned long   lb_irq_int_0        :  1;  /* RW */
+       unsigned long   lb_irq_int_1        :  1;  /* RW */
+       unsigned long   lb_irq_int_2        :  1;  /* RW */
+       unsigned long   lb_irq_int_3        :  1;  /* RW */
+       unsigned long   lb_irq_int_4        :  1;  /* RW */
+       unsigned long   lb_irq_int_5        :  1;  /* RW */
+       unsigned long   lb_irq_int_6        :  1;  /* RW */
+       unsigned long   lb_irq_int_7        :  1;  /* RW */
+       unsigned long   lb_irq_int_8        :  1;  /* RW */
+       unsigned long   lb_irq_int_9        :  1;  /* RW */
+       unsigned long   lb_irq_int_10       :  1;  /* RW */
+       unsigned long   lb_irq_int_11       :  1;  /* RW */
+       unsigned long   lb_irq_int_12       :  1;  /* RW */
+       unsigned long   lb_irq_int_13       :  1;  /* RW */
+       unsigned long   lb_irq_int_14       :  1;  /* RW */
+       unsigned long   lb_irq_int_15       :  1;  /* RW */
+       unsigned long   l1_nmi_int          :  1;  /* RW */
+       unsigned long   stop_clock          :  1;  /* RW */
+       unsigned long   asic_to_l1          :  1;  /* RW */
+       unsigned long   l1_to_asic          :  1;  /* RW */
+       unsigned long   ltc_int             :  1;  /* RW */
+       unsigned long   la_seq_trigger      :  1;  /* RW */
+       unsigned long   rsvd_45_63          : 19;  /*    */
+    } s;
+};
+
 /* ========================================================================= */
 /*                               UVH_NODE_ID                                 */
 /* ========================================================================= */
@@ -283,6 +645,73 @@ union uvh_node_id_u {
     } s;
 };
 
+/* ========================================================================= */
+/*                          UVH_NODE_PRESENT_TABLE                           */
+/* ========================================================================= */
+#define UVH_NODE_PRESENT_TABLE 0x1400UL
+#define UVH_NODE_PRESENT_TABLE_DEPTH 16
+
+#define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0
+#define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL
+
+union uvh_node_present_table_u {
+    unsigned long      v;
+    struct uvh_node_present_table_s {
+       unsigned long   nodes : 64;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR                  */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
+
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
+    unsigned long      v;
+    struct uvh_rh_gam_alias210_redirect_config_0_mmr_s {
+       unsigned long   rsvd_0_23 : 24;  /*    */
+       unsigned long   dest_base : 22;  /* RW */
+       unsigned long   rsvd_46_63: 18;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR                  */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
+
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
+    unsigned long      v;
+    struct uvh_rh_gam_alias210_redirect_config_1_mmr_s {
+       unsigned long   rsvd_0_23 : 24;  /*    */
+       unsigned long   dest_base : 22;  /* RW */
+       unsigned long   rsvd_46_63: 18;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
+/*                UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR                  */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
+
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
+    unsigned long      v;
+    struct uvh_rh_gam_alias210_redirect_config_2_mmr_s {
+       unsigned long   rsvd_0_23 : 24;  /*    */
+       unsigned long   dest_base : 22;  /* RW */
+       unsigned long   rsvd_46_63: 18;  /*    */
+    } s;
+};
+
 /* ========================================================================= */
 /*                    UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR                      */
 /* ========================================================================= */
@@ -369,5 +798,77 @@ union uvh_si_addr_map_config_u {
     } s;
 };
 
+/* ========================================================================= */
+/*                       UVH_SI_ALIAS0_OVERLAY_CONFIG                        */
+/* ========================================================================= */
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL
+
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_si_alias0_overlay_config_u {
+    unsigned long      v;
+    struct uvh_si_alias0_overlay_config_s {
+       unsigned long   rsvd_0_23: 24;  /*    */
+       unsigned long   base    :  8;  /* RW */
+       unsigned long   rsvd_32_47: 16;  /*    */
+       unsigned long   m_alias :  5;  /* RW */
+       unsigned long   rsvd_53_62: 10;  /*    */
+       unsigned long   enable  :  1;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                       UVH_SI_ALIAS1_OVERLAY_CONFIG                        */
+/* ========================================================================= */
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL
+
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_si_alias1_overlay_config_u {
+    unsigned long      v;
+    struct uvh_si_alias1_overlay_config_s {
+       unsigned long   rsvd_0_23: 24;  /*    */
+       unsigned long   base    :  8;  /* RW */
+       unsigned long   rsvd_32_47: 16;  /*    */
+       unsigned long   m_alias :  5;  /* RW */
+       unsigned long   rsvd_53_62: 10;  /*    */
+       unsigned long   enable  :  1;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                       UVH_SI_ALIAS2_OVERLAY_CONFIG                        */
+/* ========================================================================= */
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL
+
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_si_alias2_overlay_config_u {
+    unsigned long      v;
+    struct uvh_si_alias2_overlay_config_s {
+       unsigned long   rsvd_0_23: 24;  /*    */
+       unsigned long   base    :  8;  /* RW */
+       unsigned long   rsvd_32_47: 16;  /*    */
+       unsigned long   m_alias :  5;  /* RW */
+       unsigned long   rsvd_53_62: 10;  /*    */
+       unsigned long   enable  :  1;  /* RW */
+    } s;
+};
+
 
 #endif /* __ASM_X86_UV_MMRS__ */
index e8ffce8..cf9f40a 100644 (file)
@@ -1,11 +1,11 @@
 #ifndef _LINUX_KERNEL_STAT_H
 #define _LINUX_KERNEL_STAT_H
 
-#include <asm/irq.h>
 #include <linux/smp.h>
 #include <linux/threads.h>
 #include <linux/percpu.h>
 #include <linux/cpumask.h>
+#include <asm/irq.h>
 #include <asm/cputime.h>
 
 /*