2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * SGI UV APIC functions (note: not an Intel compatible APIC)
8 * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <linux/proc_fs.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/string.h>
17 #include <linux/ctype.h>
18 #include <linux/sched.h>
19 #include <linux/timer.h>
20 #include <linux/slab.h>
21 #include <linux/cpu.h>
22 #include <linux/init.h>
24 #include <linux/pci.h>
25 #include <linux/kdebug.h>
26 #include <linux/delay.h>
27 #include <linux/crash_dump.h>
29 #include <asm/uv/uv_mmrs.h>
30 #include <asm/uv/uv_hub.h>
31 #include <asm/current.h>
32 #include <asm/pgtable.h>
33 #include <asm/uv/bios.h>
34 #include <asm/uv/uv.h>
38 #include <asm/x86_init.h>
39 #include <asm/emergency-restart.h>
42 /* BMC sets a bit this MMR non-zero before sending an NMI */
43 #define UVH_NMI_MMR UVH_SCRATCH5
44 #define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8)
45 #define UV_NMI_PENDING_MASK (1UL << 63)
46 DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count);
48 DEFINE_PER_CPU(int, x2apic_extra_bits);
50 #define PR_DEVEL(fmt, args...) pr_devel("%s: " fmt, __func__, args)
52 static enum uv_system_type uv_system_type;
53 static u64 gru_start_paddr, gru_end_paddr;
54 static union uvh_apicid uvh_apicid;
55 int uv_min_hub_revision_id;
56 EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
57 unsigned int uv_apicid_hibits;
58 EXPORT_SYMBOL_GPL(uv_apicid_hibits);
59 static DEFINE_SPINLOCK(uv_nmi_lock);
61 static struct apic apic_x2apic_uv_x;
63 static unsigned long __init uv_early_read_mmr(unsigned long addr)
65 unsigned long val, *mmr;
67 mmr = early_ioremap(UV_LOCAL_MMR_BASE | addr, sizeof(*mmr));
69 early_iounmap(mmr, sizeof(*mmr));
73 static inline bool is_GRU_range(u64 start, u64 end)
75 return start >= gru_start_paddr && end <= gru_end_paddr;
78 static bool uv_is_untracked_pat_range(u64 start, u64 end)
80 return is_ISA_range(start, end) || is_GRU_range(start, end);
83 static int __init early_get_pnodeid(void)
85 union uvh_node_id_u node_id;
86 union uvh_rh_gam_config_mmr_u m_n_config;
89 /* Currently, all blades have same revision number */
90 node_id.v = uv_early_read_mmr(UVH_NODE_ID);
91 m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
92 uv_min_hub_revision_id = node_id.s.revision;
94 pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
98 static void __init early_get_apic_pnode_shift(void)
100 uvh_apicid.v = uv_early_read_mmr(UVH_APICID);
103 * Old bios, use default value
105 uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT;
109 * Add an extra bit as dictated by bios to the destination apicid of
110 * interrupts potentially passing through the UV HUB. This prevents
111 * a deadlock between interrupts and IO port operations.
113 static void __init uv_set_apicid_hibit(void)
115 union uvh_lb_target_physical_apic_id_mask_u apicid_mask;
117 apicid_mask.v = uv_early_read_mmr(UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK);
118 uv_apicid_hibits = apicid_mask.s.bit_enables & UV_APICID_HIBIT_MASK;
121 static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
125 if (!strcmp(oem_id, "SGI")) {
126 pnodeid = early_get_pnodeid();
127 early_get_apic_pnode_shift();
128 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
129 x86_platform.nmi_init = uv_nmi_init;
130 if (!strcmp(oem_table_id, "UVL"))
131 uv_system_type = UV_LEGACY_APIC;
132 else if (!strcmp(oem_table_id, "UVX"))
133 uv_system_type = UV_X2APIC;
134 else if (!strcmp(oem_table_id, "UVH")) {
135 __this_cpu_write(x2apic_extra_bits,
136 pnodeid << uvh_apicid.s.pnode_shift);
137 uv_system_type = UV_NON_UNIQUE_APIC;
138 uv_set_apicid_hibit();
145 enum uv_system_type get_uv_system_type(void)
147 return uv_system_type;
150 int is_uv_system(void)
152 return uv_system_type != UV_NONE;
154 EXPORT_SYMBOL_GPL(is_uv_system);
156 DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
157 EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
159 struct uv_blade_info *uv_blade_info;
160 EXPORT_SYMBOL_GPL(uv_blade_info);
162 short *uv_node_to_blade;
163 EXPORT_SYMBOL_GPL(uv_node_to_blade);
165 short *uv_cpu_to_blade;
166 EXPORT_SYMBOL_GPL(uv_cpu_to_blade);
168 short uv_possible_blades;
169 EXPORT_SYMBOL_GPL(uv_possible_blades);
171 unsigned long sn_rtc_cycles_per_second;
172 EXPORT_SYMBOL(sn_rtc_cycles_per_second);
174 static const struct cpumask *uv_target_cpus(void)
176 return cpu_online_mask;
179 static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
181 cpumask_clear(retmask);
182 cpumask_set_cpu(cpu, retmask);
185 static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
191 pnode = uv_apicid_to_pnode(phys_apicid);
192 phys_apicid |= uv_apicid_hibits;
193 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
194 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
195 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
197 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
200 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
201 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
202 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
204 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
206 atomic_set(&init_deasserted, 1);
211 static void uv_send_IPI_one(int cpu, int vector)
213 unsigned long apicid;
216 apicid = per_cpu(x86_cpu_to_apicid, cpu);
217 pnode = uv_apicid_to_pnode(apicid);
218 uv_hub_send_ipi(pnode, apicid, vector);
221 static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
225 for_each_cpu(cpu, mask)
226 uv_send_IPI_one(cpu, vector);
229 static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
231 unsigned int this_cpu = smp_processor_id();
234 for_each_cpu(cpu, mask) {
236 uv_send_IPI_one(cpu, vector);
240 static void uv_send_IPI_allbutself(int vector)
242 unsigned int this_cpu = smp_processor_id();
245 for_each_online_cpu(cpu) {
247 uv_send_IPI_one(cpu, vector);
251 static void uv_send_IPI_all(int vector)
253 uv_send_IPI_mask(cpu_online_mask, vector);
256 static int uv_apic_id_registered(void)
261 static void uv_init_apic_ldr(void)
265 static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
268 * We're using fixed IRQ delivery, can only return one phys APIC ID.
269 * May as well be the first.
271 int cpu = cpumask_first(cpumask);
273 if ((unsigned)cpu < nr_cpu_ids)
274 return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
280 uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
281 const struct cpumask *andmask)
286 * We're using fixed IRQ delivery, can only return one phys APIC ID.
287 * May as well be the first.
289 for_each_cpu_and(cpu, cpumask, andmask) {
290 if (cpumask_test_cpu(cpu, cpu_online_mask))
293 return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
296 static unsigned int x2apic_get_apic_id(unsigned long x)
300 WARN_ON(preemptible() && num_online_cpus() > 1);
301 id = x | __this_cpu_read(x2apic_extra_bits);
306 static unsigned long set_apic_id(unsigned int id)
310 /* maskout x2apic_extra_bits ? */
315 static unsigned int uv_read_apic_id(void)
318 return x2apic_get_apic_id(apic_read(APIC_ID));
321 static int uv_phys_pkg_id(int initial_apicid, int index_msb)
323 return uv_read_apic_id() >> index_msb;
326 static void uv_send_IPI_self(int vector)
328 apic_write(APIC_SELF_IPI, vector);
331 static int uv_probe(void)
333 return apic == &apic_x2apic_uv_x;
336 static struct apic __refdata apic_x2apic_uv_x = {
338 .name = "UV large system",
340 .acpi_madt_oem_check = uv_acpi_madt_oem_check,
341 .apic_id_registered = uv_apic_id_registered,
343 .irq_delivery_mode = dest_Fixed,
344 .irq_dest_mode = 0, /* physical */
346 .target_cpus = uv_target_cpus,
348 .dest_logical = APIC_DEST_LOGICAL,
349 .check_apicid_used = NULL,
350 .check_apicid_present = NULL,
352 .vector_allocation_domain = uv_vector_allocation_domain,
353 .init_apic_ldr = uv_init_apic_ldr,
355 .ioapic_phys_id_map = NULL,
356 .setup_apic_routing = NULL,
357 .multi_timer_check = NULL,
358 .cpu_present_to_apicid = default_cpu_present_to_apicid,
359 .apicid_to_cpu_present = NULL,
360 .setup_portio_remap = NULL,
361 .check_phys_apicid_present = default_check_phys_apicid_present,
362 .enable_apic_mode = NULL,
363 .phys_pkg_id = uv_phys_pkg_id,
364 .mps_oem_check = NULL,
366 .get_apic_id = x2apic_get_apic_id,
367 .set_apic_id = set_apic_id,
368 .apic_id_mask = 0xFFFFFFFFu,
370 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
371 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
373 .send_IPI_mask = uv_send_IPI_mask,
374 .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
375 .send_IPI_allbutself = uv_send_IPI_allbutself,
376 .send_IPI_all = uv_send_IPI_all,
377 .send_IPI_self = uv_send_IPI_self,
379 .wakeup_secondary_cpu = uv_wakeup_secondary,
380 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
381 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
382 .wait_for_init_deassert = NULL,
383 .smp_callin_clear_local_apic = NULL,
384 .inquire_remote_apic = NULL,
386 .read = native_apic_msr_read,
387 .write = native_apic_msr_write,
388 .icr_read = native_x2apic_icr_read,
389 .icr_write = native_x2apic_icr_write,
390 .wait_icr_idle = native_x2apic_wait_icr_idle,
391 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
394 static __cpuinit void set_x2apic_extra_bits(int pnode)
396 __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
400 * Called on boot cpu.
402 static __init int boot_pnode_to_blade(int pnode)
406 for (blade = 0; blade < uv_num_possible_blades(); blade++)
407 if (pnode == uv_blade_info[blade].pnode)
413 unsigned long redirect;
417 #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
419 static __initdata struct redir_addr redir_addrs[] = {
420 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR},
421 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR},
422 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR},
425 static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
427 union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
428 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
431 for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
432 alias.v = uv_read_local_mmr(redir_addrs[i].alias);
433 if (alias.s.enable && alias.s.base == 0) {
434 *size = (1UL << alias.s.m_alias);
435 redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
436 *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
443 enum map_type {map_wb, map_uc};
445 static __init void map_high(char *id, unsigned long base, int pshift,
446 int bshift, int max_pnode, enum map_type map_type)
448 unsigned long bytes, paddr;
450 paddr = base << pshift;
451 bytes = (1UL << bshift) * (max_pnode + 1);
452 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
454 if (map_type == map_uc)
455 init_extra_mapping_uc(paddr, bytes);
457 init_extra_mapping_wb(paddr, bytes);
460 static __init void map_gru_high(int max_pnode)
462 union uvh_rh_gam_gru_overlay_config_mmr_u gru;
463 int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
465 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
467 map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb);
468 gru_start_paddr = ((u64)gru.s.base << shift);
469 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
474 static __init void map_mmr_high(int max_pnode)
476 union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
477 int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
479 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
481 map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
484 static __init void map_mmioh_high(int max_pnode)
486 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
487 int shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
489 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
491 map_high("MMIOH", mmioh.s.base, shift, mmioh.s.m_io,
495 static __init void map_low_mmrs(void)
497 init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
498 init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
501 static __init void uv_rtc_init(void)
506 status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK,
508 if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
510 "unable to determine platform RTC clock frequency, "
512 /* BIOS gives wrong value for clock freq. so guess */
513 sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
515 sn_rtc_cycles_per_second = ticks_per_sec;
519 * percpu heartbeat timer
521 static void uv_heartbeat(unsigned long ignored)
523 struct timer_list *timer = &uv_hub_info->scir.timer;
524 unsigned char bits = uv_hub_info->scir.state;
526 /* flip heartbeat bit */
527 bits ^= SCIR_CPU_HEARTBEAT;
529 /* is this cpu idle? */
530 if (idle_cpu(raw_smp_processor_id()))
531 bits &= ~SCIR_CPU_ACTIVITY;
533 bits |= SCIR_CPU_ACTIVITY;
535 /* update system controller interface reg */
536 uv_set_scir_bits(bits);
538 /* enable next timer period */
539 mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
542 static void __cpuinit uv_heartbeat_enable(int cpu)
544 while (!uv_cpu_hub_info(cpu)->scir.enabled) {
545 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
547 uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
548 setup_timer(timer, uv_heartbeat, cpu);
549 timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
550 add_timer_on(timer, cpu);
551 uv_cpu_hub_info(cpu)->scir.enabled = 1;
553 /* also ensure that boot cpu is enabled */
558 #ifdef CONFIG_HOTPLUG_CPU
559 static void __cpuinit uv_heartbeat_disable(int cpu)
561 if (uv_cpu_hub_info(cpu)->scir.enabled) {
562 uv_cpu_hub_info(cpu)->scir.enabled = 0;
563 del_timer(&uv_cpu_hub_info(cpu)->scir.timer);
565 uv_set_cpu_scir_bits(cpu, 0xff);
569 * cpu hotplug notifier
571 static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self,
572 unsigned long action, void *hcpu)
574 long cpu = (long)hcpu;
578 uv_heartbeat_enable(cpu);
580 case CPU_DOWN_PREPARE:
581 uv_heartbeat_disable(cpu);
589 static __init void uv_scir_register_cpu_notifier(void)
591 hotcpu_notifier(uv_scir_cpu_notify, 0);
594 #else /* !CONFIG_HOTPLUG_CPU */
596 static __init void uv_scir_register_cpu_notifier(void)
600 static __init int uv_init_heartbeat(void)
605 for_each_online_cpu(cpu)
606 uv_heartbeat_enable(cpu);
610 late_initcall(uv_init_heartbeat);
612 #endif /* !CONFIG_HOTPLUG_CPU */
614 /* Direct Legacy VGA I/O traffic to designated IOH */
615 int uv_set_vga_state(struct pci_dev *pdev, bool decode,
616 unsigned int command_bits, bool change_bridge)
620 PR_DEVEL("devfn %x decode %d cmd %x chg_brdg %d\n",
621 pdev->devfn, decode, command_bits, change_bridge);
626 if ((command_bits & PCI_COMMAND_IO) == 0)
629 domain = pci_domain_nr(pdev->bus);
630 bus = pdev->bus->number;
632 rc = uv_bios_set_legacy_vga_target(decode, domain, bus);
633 PR_DEVEL("vga decode %d %x:%x, rc: %d\n", decode, domain, bus, rc);
639 * Called on each cpu to initialize the per_cpu UV data area.
640 * FIXME: hotplug not supported yet
642 void __cpuinit uv_cpu_init(void)
644 /* CPU 0 initilization will be done via uv_system_init. */
648 uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
650 if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
651 set_x2apic_extra_bits(uv_hub_info->pnode);
655 * When NMI is received, print a stack trace.
657 int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
659 unsigned long real_uv_nmi;
662 if (reason != DIE_NMIUNKNOWN)
666 /* do nothing if entering the crash kernel */
670 * Each blade has an MMR that indicates when an NMI has been sent
671 * to cpus on the blade. If an NMI is detected, atomically
672 * clear the MMR and update a per-blade NMI count used to
673 * cause each cpu on the blade to notice a new NMI.
675 bid = uv_numa_blade_id();
676 real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
678 if (unlikely(real_uv_nmi)) {
679 spin_lock(&uv_blade_info[bid].nmi_lock);
680 real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
682 uv_blade_info[bid].nmi_count++;
683 uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK);
685 spin_unlock(&uv_blade_info[bid].nmi_lock);
688 if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
691 __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
694 * Use a lock so only one cpu prints at a time.
695 * This prevents intermixed output.
697 spin_lock(&uv_nmi_lock);
698 pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
700 spin_unlock(&uv_nmi_lock);
705 static struct notifier_block uv_dump_stack_nmi_nb = {
706 .notifier_call = uv_handle_nmi,
707 .priority = NMI_LOCAL_LOW_PRIOR - 1,
710 void uv_register_nmi_notifier(void)
712 if (register_die_notifier(&uv_dump_stack_nmi_nb))
713 printk(KERN_WARNING "UV NMI handler failed to register\n");
716 void uv_nmi_init(void)
721 * Unmask NMI on all cpus
723 value = apic_read(APIC_LVT1) | APIC_DM_NMI;
724 value &= ~APIC_LVT_MASKED;
725 apic_write(APIC_LVT1, value);
728 void __init uv_system_init(void)
730 union uvh_rh_gam_config_mmr_u m_n_config;
731 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
732 union uvh_node_id_u node_id;
733 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
734 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val, n_io;
735 int gnode_extra, max_pnode = 0;
736 unsigned long mmr_base, present, paddr;
737 unsigned short pnode_mask, pnode_io_mask;
741 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
742 m_val = m_n_config.s.m_skt;
743 n_val = m_n_config.s.n_skt;
744 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
747 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
749 pnode_mask = (1 << n_val) - 1;
750 pnode_io_mask = (1 << n_io) - 1;
752 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
753 gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1;
754 gnode_upper = ((unsigned long)gnode_extra << m_val);
755 printk(KERN_INFO "UV: N %d, M %d, N_IO: %d, gnode_upper 0x%lx, gnode_extra 0x%x, pnode_mask 0x%x, pnode_io_mask 0x%x\n",
756 n_val, m_val, n_io, gnode_upper, gnode_extra, pnode_mask, pnode_io_mask);
758 printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
760 for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
761 uv_possible_blades +=
762 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
763 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
765 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
766 uv_blade_info = kzalloc(bytes, GFP_KERNEL);
767 BUG_ON(!uv_blade_info);
769 for (blade = 0; blade < uv_num_possible_blades(); blade++)
770 uv_blade_info[blade].memory_nid = -1;
772 get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
774 bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
775 uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
776 BUG_ON(!uv_node_to_blade);
777 memset(uv_node_to_blade, 255, bytes);
779 bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
780 uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
781 BUG_ON(!uv_cpu_to_blade);
782 memset(uv_cpu_to_blade, 255, bytes);
785 for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
786 present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
787 for (j = 0; j < 64; j++) {
788 if (!test_bit(j, &present))
790 pnode = (i * 64 + j) & pnode_mask;
791 uv_blade_info[blade].pnode = pnode;
792 uv_blade_info[blade].nr_possible_cpus = 0;
793 uv_blade_info[blade].nr_online_cpus = 0;
794 spin_lock_init(&uv_blade_info[blade].nmi_lock);
795 max_pnode = max(pnode, max_pnode);
801 uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, &sn_coherency_id,
802 &sn_region_size, &system_serial_number);
805 for_each_present_cpu(cpu) {
806 int apicid = per_cpu(x86_cpu_to_apicid, cpu);
808 nid = cpu_to_node(cpu);
810 * apic_pnode_shift must be set before calling uv_apicid_to_pnode();
812 uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
813 uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
814 pnode = uv_apicid_to_pnode(apicid);
815 blade = boot_pnode_to_blade(pnode);
816 lcpu = uv_blade_info[blade].nr_possible_cpus;
817 uv_blade_info[blade].nr_possible_cpus++;
819 /* Any node on the blade, else will contain -1. */
820 uv_blade_info[blade].memory_nid = nid;
822 uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
823 uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
824 uv_cpu_hub_info(cpu)->m_val = m_val;
825 uv_cpu_hub_info(cpu)->n_val = n_val;
826 uv_cpu_hub_info(cpu)->numa_blade_id = blade;
827 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
828 uv_cpu_hub_info(cpu)->pnode = pnode;
829 uv_cpu_hub_info(cpu)->gpa_mask = (1UL << (m_val + n_val)) - 1;
830 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
831 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
832 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
833 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
834 uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
835 uv_node_to_blade[nid] = blade;
836 uv_cpu_to_blade[cpu] = blade;
839 /* Add blade/pnode info for nodes without cpus */
840 for_each_online_node(nid) {
841 if (uv_node_to_blade[nid] >= 0)
843 paddr = node_start_pfn(nid) << PAGE_SHIFT;
844 paddr = uv_soc_phys_ram_to_gpa(paddr);
845 pnode = (paddr >> m_val) & pnode_mask;
846 blade = boot_pnode_to_blade(pnode);
847 uv_node_to_blade[nid] = blade;
850 map_gru_high(max_pnode);
851 map_mmr_high(max_pnode);
852 map_mmioh_high(max_pnode & pnode_io_mask);
855 uv_scir_register_cpu_notifier();
856 uv_register_nmi_notifier();
857 proc_mkdir("sgi_uv", NULL);
859 /* register Legacy VGA I/O redirection handler */
860 pci_register_set_vga_state(uv_set_vga_state);
863 * For a kdump kernel the reset must be BOOT_ACPI, not BOOT_EFI, as
864 * EFI is not enabled in the kdump kernel.
866 if (is_kdump_kernel())
867 reboot_type = BOOT_ACPI;
870 apic_driver(apic_x2apic_uv_x);