pandora: defconfig: update
[pandora-kernel.git] / arch / x86 / kernel / apic / x2apic_uv_x.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * SGI UV APIC functions (note: not an Intel compatible APIC)
7  *
8  * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
9  */
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <linux/proc_fs.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/string.h>
17 #include <linux/ctype.h>
18 #include <linux/sched.h>
19 #include <linux/timer.h>
20 #include <linux/slab.h>
21 #include <linux/cpu.h>
22 #include <linux/init.h>
23 #include <linux/io.h>
24 #include <linux/pci.h>
25 #include <linux/kdebug.h>
26 #include <linux/delay.h>
27 #include <linux/crash_dump.h>
28
29 #include <asm/uv/uv_mmrs.h>
30 #include <asm/uv/uv_hub.h>
31 #include <asm/current.h>
32 #include <asm/pgtable.h>
33 #include <asm/uv/bios.h>
34 #include <asm/uv/uv.h>
35 #include <asm/apic.h>
36 #include <asm/ipi.h>
37 #include <asm/smp.h>
38 #include <asm/x86_init.h>
39 #include <asm/emergency-restart.h>
40 #include <asm/nmi.h>
41
42 /* BMC sets a bit this MMR non-zero before sending an NMI */
43 #define UVH_NMI_MMR                             UVH_SCRATCH5
44 #define UVH_NMI_MMR_CLEAR                       (UVH_NMI_MMR + 8)
45 #define UV_NMI_PENDING_MASK                     (1UL << 63)
46 DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count);
47
48 DEFINE_PER_CPU(int, x2apic_extra_bits);
49
50 #define PR_DEVEL(fmt, args...)  pr_devel("%s: " fmt, __func__, args)
51
52 static enum uv_system_type uv_system_type;
53 static u64 gru_start_paddr, gru_end_paddr;
54 static union uvh_apicid uvh_apicid;
55 int uv_min_hub_revision_id;
56 EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
57 unsigned int uv_apicid_hibits;
58 EXPORT_SYMBOL_GPL(uv_apicid_hibits);
59 static DEFINE_SPINLOCK(uv_nmi_lock);
60
61 static struct apic apic_x2apic_uv_x;
62
63 static unsigned long __init uv_early_read_mmr(unsigned long addr)
64 {
65         unsigned long val, *mmr;
66
67         mmr = early_ioremap(UV_LOCAL_MMR_BASE | addr, sizeof(*mmr));
68         val = *mmr;
69         early_iounmap(mmr, sizeof(*mmr));
70         return val;
71 }
72
73 static inline bool is_GRU_range(u64 start, u64 end)
74 {
75         return start >= gru_start_paddr && end <= gru_end_paddr;
76 }
77
78 static bool uv_is_untracked_pat_range(u64 start, u64 end)
79 {
80         return is_ISA_range(start, end) || is_GRU_range(start, end);
81 }
82
83 static int __init early_get_pnodeid(void)
84 {
85         union uvh_node_id_u node_id;
86         union uvh_rh_gam_config_mmr_u  m_n_config;
87         int pnode;
88
89         /* Currently, all blades have same revision number */
90         node_id.v = uv_early_read_mmr(UVH_NODE_ID);
91         m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
92         uv_min_hub_revision_id = node_id.s.revision;
93
94         if (node_id.s.part_number == UV2_HUB_PART_NUMBER)
95                 uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
96         if (node_id.s.part_number == UV2_HUB_PART_NUMBER_X)
97                 uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
98
99         uv_hub_info->hub_revision = uv_min_hub_revision_id;
100         pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
101         return pnode;
102 }
103
104 static void __init early_get_apic_pnode_shift(void)
105 {
106         uvh_apicid.v = uv_early_read_mmr(UVH_APICID);
107         if (!uvh_apicid.v)
108                 /*
109                  * Old bios, use default value
110                  */
111                 uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT;
112 }
113
114 /*
115  * Add an extra bit as dictated by bios to the destination apicid of
116  * interrupts potentially passing through the UV HUB.  This prevents
117  * a deadlock between interrupts and IO port operations.
118  */
119 static void __init uv_set_apicid_hibit(void)
120 {
121         union uv1h_lb_target_physical_apic_id_mask_u apicid_mask;
122
123         if (is_uv1_hub()) {
124                 apicid_mask.v =
125                         uv_early_read_mmr(UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK);
126                 uv_apicid_hibits =
127                         apicid_mask.s1.bit_enables & UV_APICID_HIBIT_MASK;
128         }
129 }
130
131 static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
132 {
133         int pnodeid, is_uv1, is_uv2;
134
135         is_uv1 = !strcmp(oem_id, "SGI");
136         is_uv2 = !strcmp(oem_id, "SGI2");
137         if (is_uv1 || is_uv2) {
138                 uv_hub_info->hub_revision =
139                         is_uv1 ? UV1_HUB_REVISION_BASE : UV2_HUB_REVISION_BASE;
140                 pnodeid = early_get_pnodeid();
141                 early_get_apic_pnode_shift();
142                 x86_platform.is_untracked_pat_range =  uv_is_untracked_pat_range;
143                 x86_platform.nmi_init = uv_nmi_init;
144                 if (!strcmp(oem_table_id, "UVL"))
145                         uv_system_type = UV_LEGACY_APIC;
146                 else if (!strcmp(oem_table_id, "UVX"))
147                         uv_system_type = UV_X2APIC;
148                 else if (!strcmp(oem_table_id, "UVH")) {
149                         __this_cpu_write(x2apic_extra_bits,
150                                 pnodeid << uvh_apicid.s.pnode_shift);
151                         uv_system_type = UV_NON_UNIQUE_APIC;
152                         uv_set_apicid_hibit();
153                         return 1;
154                 }
155         }
156         return 0;
157 }
158
159 enum uv_system_type get_uv_system_type(void)
160 {
161         return uv_system_type;
162 }
163
164 int is_uv_system(void)
165 {
166         return uv_system_type != UV_NONE;
167 }
168 EXPORT_SYMBOL_GPL(is_uv_system);
169
170 DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
171 EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
172
173 struct uv_blade_info *uv_blade_info;
174 EXPORT_SYMBOL_GPL(uv_blade_info);
175
176 short *uv_node_to_blade;
177 EXPORT_SYMBOL_GPL(uv_node_to_blade);
178
179 short *uv_cpu_to_blade;
180 EXPORT_SYMBOL_GPL(uv_cpu_to_blade);
181
182 short uv_possible_blades;
183 EXPORT_SYMBOL_GPL(uv_possible_blades);
184
185 unsigned long sn_rtc_cycles_per_second;
186 EXPORT_SYMBOL(sn_rtc_cycles_per_second);
187
188 static const struct cpumask *uv_target_cpus(void)
189 {
190         return cpu_online_mask;
191 }
192
193 static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
194 {
195         cpumask_clear(retmask);
196         cpumask_set_cpu(cpu, retmask);
197 }
198
199 static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
200 {
201 #ifdef CONFIG_SMP
202         unsigned long val;
203         int pnode;
204
205         pnode = uv_apicid_to_pnode(phys_apicid);
206         phys_apicid |= uv_apicid_hibits;
207         val = (1UL << UVH_IPI_INT_SEND_SHFT) |
208             (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
209             ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
210             APIC_DM_INIT;
211         uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
212
213         val = (1UL << UVH_IPI_INT_SEND_SHFT) |
214             (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
215             ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
216             APIC_DM_STARTUP;
217         uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
218
219         atomic_set(&init_deasserted, 1);
220 #endif
221         return 0;
222 }
223
224 static void uv_send_IPI_one(int cpu, int vector)
225 {
226         unsigned long apicid;
227         int pnode;
228
229         apicid = per_cpu(x86_cpu_to_apicid, cpu);
230         pnode = uv_apicid_to_pnode(apicid);
231         uv_hub_send_ipi(pnode, apicid, vector);
232 }
233
234 static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
235 {
236         unsigned int cpu;
237
238         for_each_cpu(cpu, mask)
239                 uv_send_IPI_one(cpu, vector);
240 }
241
242 static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
243 {
244         unsigned int this_cpu = smp_processor_id();
245         unsigned int cpu;
246
247         for_each_cpu(cpu, mask) {
248                 if (cpu != this_cpu)
249                         uv_send_IPI_one(cpu, vector);
250         }
251 }
252
253 static void uv_send_IPI_allbutself(int vector)
254 {
255         unsigned int this_cpu = smp_processor_id();
256         unsigned int cpu;
257
258         for_each_online_cpu(cpu) {
259                 if (cpu != this_cpu)
260                         uv_send_IPI_one(cpu, vector);
261         }
262 }
263
264 static void uv_send_IPI_all(int vector)
265 {
266         uv_send_IPI_mask(cpu_online_mask, vector);
267 }
268
269 static int uv_apic_id_registered(void)
270 {
271         return 1;
272 }
273
274 static void uv_init_apic_ldr(void)
275 {
276 }
277
278 static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
279 {
280         /*
281          * We're using fixed IRQ delivery, can only return one phys APIC ID.
282          * May as well be the first.
283          */
284         int cpu = cpumask_first(cpumask);
285
286         if ((unsigned)cpu < nr_cpu_ids)
287                 return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
288         else
289                 return BAD_APICID;
290 }
291
292 static unsigned int
293 uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
294                           const struct cpumask *andmask)
295 {
296         int cpu;
297
298         /*
299          * We're using fixed IRQ delivery, can only return one phys APIC ID.
300          * May as well be the first.
301          */
302         for_each_cpu_and(cpu, cpumask, andmask) {
303                 if (cpumask_test_cpu(cpu, cpu_online_mask))
304                         break;
305         }
306         return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
307 }
308
309 static unsigned int x2apic_get_apic_id(unsigned long x)
310 {
311         unsigned int id;
312
313         WARN_ON(preemptible() && num_online_cpus() > 1);
314         id = x | __this_cpu_read(x2apic_extra_bits);
315
316         return id;
317 }
318
319 static unsigned long set_apic_id(unsigned int id)
320 {
321         unsigned long x;
322
323         /* maskout x2apic_extra_bits ? */
324         x = id;
325         return x;
326 }
327
328 static unsigned int uv_read_apic_id(void)
329 {
330
331         return x2apic_get_apic_id(apic_read(APIC_ID));
332 }
333
334 static int uv_phys_pkg_id(int initial_apicid, int index_msb)
335 {
336         return uv_read_apic_id() >> index_msb;
337 }
338
339 static void uv_send_IPI_self(int vector)
340 {
341         apic_write(APIC_SELF_IPI, vector);
342 }
343
344 static int uv_probe(void)
345 {
346         return apic == &apic_x2apic_uv_x;
347 }
348
349 static struct apic __refdata apic_x2apic_uv_x = {
350
351         .name                           = "UV large system",
352         .probe                          = uv_probe,
353         .acpi_madt_oem_check            = uv_acpi_madt_oem_check,
354         .apic_id_registered             = uv_apic_id_registered,
355
356         .irq_delivery_mode              = dest_Fixed,
357         .irq_dest_mode                  = 0, /* physical */
358
359         .target_cpus                    = uv_target_cpus,
360         .disable_esr                    = 0,
361         .dest_logical                   = APIC_DEST_LOGICAL,
362         .check_apicid_used              = NULL,
363         .check_apicid_present           = NULL,
364
365         .vector_allocation_domain       = uv_vector_allocation_domain,
366         .init_apic_ldr                  = uv_init_apic_ldr,
367
368         .ioapic_phys_id_map             = NULL,
369         .setup_apic_routing             = NULL,
370         .multi_timer_check              = NULL,
371         .cpu_present_to_apicid          = default_cpu_present_to_apicid,
372         .apicid_to_cpu_present          = NULL,
373         .setup_portio_remap             = NULL,
374         .check_phys_apicid_present      = default_check_phys_apicid_present,
375         .enable_apic_mode               = NULL,
376         .phys_pkg_id                    = uv_phys_pkg_id,
377         .mps_oem_check                  = NULL,
378
379         .get_apic_id                    = x2apic_get_apic_id,
380         .set_apic_id                    = set_apic_id,
381         .apic_id_mask                   = 0xFFFFFFFFu,
382
383         .cpu_mask_to_apicid             = uv_cpu_mask_to_apicid,
384         .cpu_mask_to_apicid_and         = uv_cpu_mask_to_apicid_and,
385
386         .send_IPI_mask                  = uv_send_IPI_mask,
387         .send_IPI_mask_allbutself       = uv_send_IPI_mask_allbutself,
388         .send_IPI_allbutself            = uv_send_IPI_allbutself,
389         .send_IPI_all                   = uv_send_IPI_all,
390         .send_IPI_self                  = uv_send_IPI_self,
391
392         .wakeup_secondary_cpu           = uv_wakeup_secondary,
393         .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
394         .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
395         .wait_for_init_deassert         = NULL,
396         .smp_callin_clear_local_apic    = NULL,
397         .inquire_remote_apic            = NULL,
398
399         .read                           = native_apic_msr_read,
400         .write                          = native_apic_msr_write,
401         .icr_read                       = native_x2apic_icr_read,
402         .icr_write                      = native_x2apic_icr_write,
403         .wait_icr_idle                  = native_x2apic_wait_icr_idle,
404         .safe_wait_icr_idle             = native_safe_x2apic_wait_icr_idle,
405 };
406
407 static __cpuinit void set_x2apic_extra_bits(int pnode)
408 {
409         __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
410 }
411
412 /*
413  * Called on boot cpu.
414  */
415 static __init int boot_pnode_to_blade(int pnode)
416 {
417         int blade;
418
419         for (blade = 0; blade < uv_num_possible_blades(); blade++)
420                 if (pnode == uv_blade_info[blade].pnode)
421                         return blade;
422         BUG();
423 }
424
425 struct redir_addr {
426         unsigned long redirect;
427         unsigned long alias;
428 };
429
430 #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
431
432 static __initdata struct redir_addr redir_addrs[] = {
433         {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR},
434         {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR},
435         {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR},
436 };
437
438 static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
439 {
440         union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
441         union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
442         int i;
443
444         for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
445                 alias.v = uv_read_local_mmr(redir_addrs[i].alias);
446                 if (alias.s.enable && alias.s.base == 0) {
447                         *size = (1UL << alias.s.m_alias);
448                         redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
449                         *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
450                         return;
451                 }
452         }
453         *base = *size = 0;
454 }
455
456 enum map_type {map_wb, map_uc};
457
458 static __init void map_high(char *id, unsigned long base, int pshift,
459                         int bshift, int max_pnode, enum map_type map_type)
460 {
461         unsigned long bytes, paddr;
462
463         paddr = base << pshift;
464         bytes = (1UL << bshift) * (max_pnode + 1);
465         printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
466                                                 paddr + bytes);
467         if (map_type == map_uc)
468                 init_extra_mapping_uc(paddr, bytes);
469         else
470                 init_extra_mapping_wb(paddr, bytes);
471
472 }
473 static __init void map_gru_high(int max_pnode)
474 {
475         union uvh_rh_gam_gru_overlay_config_mmr_u gru;
476         int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
477
478         gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
479         if (gru.s.enable) {
480                 map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb);
481                 gru_start_paddr = ((u64)gru.s.base << shift);
482                 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
483
484         }
485 }
486
487 static __init void map_mmr_high(int max_pnode)
488 {
489         union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
490         int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
491
492         mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
493         if (mmr.s.enable)
494                 map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
495 }
496
497 static __init void map_mmioh_high(int max_pnode)
498 {
499         union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
500         int shift;
501
502         mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
503         if (is_uv1_hub() && mmioh.s1.enable) {
504                 shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
505                 map_high("MMIOH", mmioh.s1.base, shift, mmioh.s1.m_io,
506                         max_pnode, map_uc);
507         }
508         if (is_uv2_hub() && mmioh.s2.enable) {
509                 shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
510                 map_high("MMIOH", mmioh.s2.base, shift, mmioh.s2.m_io,
511                         max_pnode, map_uc);
512         }
513 }
514
515 static __init void map_low_mmrs(void)
516 {
517         init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
518         init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
519 }
520
521 static __init void uv_rtc_init(void)
522 {
523         long status;
524         u64 ticks_per_sec;
525
526         status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK,
527                                         &ticks_per_sec);
528         if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
529                 printk(KERN_WARNING
530                         "unable to determine platform RTC clock frequency, "
531                         "guessing.\n");
532                 /* BIOS gives wrong value for clock freq. so guess */
533                 sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
534         } else
535                 sn_rtc_cycles_per_second = ticks_per_sec;
536 }
537
538 /*
539  * percpu heartbeat timer
540  */
541 static void uv_heartbeat(unsigned long ignored)
542 {
543         struct timer_list *timer = &uv_hub_info->scir.timer;
544         unsigned char bits = uv_hub_info->scir.state;
545
546         /* flip heartbeat bit */
547         bits ^= SCIR_CPU_HEARTBEAT;
548
549         /* is this cpu idle? */
550         if (idle_cpu(raw_smp_processor_id()))
551                 bits &= ~SCIR_CPU_ACTIVITY;
552         else
553                 bits |= SCIR_CPU_ACTIVITY;
554
555         /* update system controller interface reg */
556         uv_set_scir_bits(bits);
557
558         /* enable next timer period */
559         mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
560 }
561
562 static void __cpuinit uv_heartbeat_enable(int cpu)
563 {
564         while (!uv_cpu_hub_info(cpu)->scir.enabled) {
565                 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
566
567                 uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
568                 setup_timer(timer, uv_heartbeat, cpu);
569                 timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
570                 add_timer_on(timer, cpu);
571                 uv_cpu_hub_info(cpu)->scir.enabled = 1;
572
573                 /* also ensure that boot cpu is enabled */
574                 cpu = 0;
575         }
576 }
577
578 #ifdef CONFIG_HOTPLUG_CPU
579 static void __cpuinit uv_heartbeat_disable(int cpu)
580 {
581         if (uv_cpu_hub_info(cpu)->scir.enabled) {
582                 uv_cpu_hub_info(cpu)->scir.enabled = 0;
583                 del_timer(&uv_cpu_hub_info(cpu)->scir.timer);
584         }
585         uv_set_cpu_scir_bits(cpu, 0xff);
586 }
587
588 /*
589  * cpu hotplug notifier
590  */
591 static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self,
592                                        unsigned long action, void *hcpu)
593 {
594         long cpu = (long)hcpu;
595
596         switch (action) {
597         case CPU_ONLINE:
598                 uv_heartbeat_enable(cpu);
599                 break;
600         case CPU_DOWN_PREPARE:
601                 uv_heartbeat_disable(cpu);
602                 break;
603         default:
604                 break;
605         }
606         return NOTIFY_OK;
607 }
608
609 static __init void uv_scir_register_cpu_notifier(void)
610 {
611         hotcpu_notifier(uv_scir_cpu_notify, 0);
612 }
613
614 #else /* !CONFIG_HOTPLUG_CPU */
615
616 static __init void uv_scir_register_cpu_notifier(void)
617 {
618 }
619
620 static __init int uv_init_heartbeat(void)
621 {
622         int cpu;
623
624         if (is_uv_system())
625                 for_each_online_cpu(cpu)
626                         uv_heartbeat_enable(cpu);
627         return 0;
628 }
629
630 late_initcall(uv_init_heartbeat);
631
632 #endif /* !CONFIG_HOTPLUG_CPU */
633
634 /* Direct Legacy VGA I/O traffic to designated IOH */
635 int uv_set_vga_state(struct pci_dev *pdev, bool decode,
636                       unsigned int command_bits, u32 flags)
637 {
638         int domain, bus, rc;
639
640         PR_DEVEL("devfn %x decode %d cmd %x flags %d\n",
641                         pdev->devfn, decode, command_bits, flags);
642
643         if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
644                 return 0;
645
646         if ((command_bits & PCI_COMMAND_IO) == 0)
647                 return 0;
648
649         domain = pci_domain_nr(pdev->bus);
650         bus = pdev->bus->number;
651
652         rc = uv_bios_set_legacy_vga_target(decode, domain, bus);
653         PR_DEVEL("vga decode %d %x:%x, rc: %d\n", decode, domain, bus, rc);
654
655         return rc;
656 }
657
658 /*
659  * Called on each cpu to initialize the per_cpu UV data area.
660  * FIXME: hotplug not supported yet
661  */
662 void __cpuinit uv_cpu_init(void)
663 {
664         /* CPU 0 initilization will be done via uv_system_init. */
665         if (!uv_blade_info)
666                 return;
667
668         uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
669
670         if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
671                 set_x2apic_extra_bits(uv_hub_info->pnode);
672 }
673
674 /*
675  * When NMI is received, print a stack trace.
676  */
677 int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
678 {
679         unsigned long real_uv_nmi;
680         int bid;
681
682         /*
683          * Each blade has an MMR that indicates when an NMI has been sent
684          * to cpus on the blade. If an NMI is detected, atomically
685          * clear the MMR and update a per-blade NMI count used to
686          * cause each cpu on the blade to notice a new NMI.
687          */
688         bid = uv_numa_blade_id();
689         real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
690
691         if (unlikely(real_uv_nmi)) {
692                 spin_lock(&uv_blade_info[bid].nmi_lock);
693                 real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
694                 if (real_uv_nmi) {
695                         uv_blade_info[bid].nmi_count++;
696                         uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK);
697                 }
698                 spin_unlock(&uv_blade_info[bid].nmi_lock);
699         }
700
701         if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
702                 return NMI_DONE;
703
704         __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
705
706         /*
707          * Use a lock so only one cpu prints at a time.
708          * This prevents intermixed output.
709          */
710         spin_lock(&uv_nmi_lock);
711         pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
712         dump_stack();
713         spin_unlock(&uv_nmi_lock);
714
715         return NMI_HANDLED;
716 }
717
718 void uv_register_nmi_notifier(void)
719 {
720         if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
721                 printk(KERN_WARNING "UV NMI handler failed to register\n");
722 }
723
724 void uv_nmi_init(void)
725 {
726         unsigned int value;
727
728         /*
729          * Unmask NMI on all cpus
730          */
731         value = apic_read(APIC_LVT1) | APIC_DM_NMI;
732         value &= ~APIC_LVT_MASKED;
733         apic_write(APIC_LVT1, value);
734 }
735
736 void __init uv_system_init(void)
737 {
738         union uvh_rh_gam_config_mmr_u  m_n_config;
739         union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
740         union uvh_node_id_u node_id;
741         unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
742         int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val, n_io;
743         int gnode_extra, max_pnode = 0;
744         unsigned long mmr_base, present, paddr;
745         unsigned short pnode_mask, pnode_io_mask;
746
747         printk(KERN_INFO "UV: Found %s hub\n", is_uv1_hub() ? "UV1" : "UV2");
748         map_low_mmrs();
749
750         m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
751         m_val = m_n_config.s.m_skt;
752         n_val = m_n_config.s.n_skt;
753         mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
754         n_io = is_uv1_hub() ? mmioh.s1.n_io : mmioh.s2.n_io;
755         mmr_base =
756             uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
757             ~UV_MMR_ENABLE;
758         pnode_mask = (1 << n_val) - 1;
759         pnode_io_mask = (1 << n_io) - 1;
760
761         node_id.v = uv_read_local_mmr(UVH_NODE_ID);
762         gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1;
763         gnode_upper = ((unsigned long)gnode_extra  << m_val);
764         printk(KERN_INFO "UV: N %d, M %d, N_IO: %d, gnode_upper 0x%lx, gnode_extra 0x%x, pnode_mask 0x%x, pnode_io_mask 0x%x\n",
765                         n_val, m_val, n_io, gnode_upper, gnode_extra, pnode_mask, pnode_io_mask);
766
767         printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
768
769         for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
770                 uv_possible_blades +=
771                   hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
772
773         /* uv_num_possible_blades() is really the hub count */
774         printk(KERN_INFO "UV: Found %d blades, %d hubs\n",
775                         is_uv1_hub() ? uv_num_possible_blades() :
776                         (uv_num_possible_blades() + 1) / 2,
777                         uv_num_possible_blades());
778
779         bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
780         uv_blade_info = kzalloc(bytes, GFP_KERNEL);
781         BUG_ON(!uv_blade_info);
782
783         for (blade = 0; blade < uv_num_possible_blades(); blade++)
784                 uv_blade_info[blade].memory_nid = -1;
785
786         get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
787
788         bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
789         uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
790         BUG_ON(!uv_node_to_blade);
791         memset(uv_node_to_blade, 255, bytes);
792
793         bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
794         uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
795         BUG_ON(!uv_cpu_to_blade);
796         memset(uv_cpu_to_blade, 255, bytes);
797
798         blade = 0;
799         for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
800                 present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
801                 for (j = 0; j < 64; j++) {
802                         if (!test_bit(j, &present))
803                                 continue;
804                         pnode = (i * 64 + j) & pnode_mask;
805                         uv_blade_info[blade].pnode = pnode;
806                         uv_blade_info[blade].nr_possible_cpus = 0;
807                         uv_blade_info[blade].nr_online_cpus = 0;
808                         spin_lock_init(&uv_blade_info[blade].nmi_lock);
809                         max_pnode = max(pnode, max_pnode);
810                         blade++;
811                 }
812         }
813
814         uv_bios_init();
815         uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, &sn_coherency_id,
816                             &sn_region_size, &system_serial_number);
817         uv_rtc_init();
818
819         for_each_present_cpu(cpu) {
820                 int apicid = per_cpu(x86_cpu_to_apicid, cpu);
821
822                 nid = cpu_to_node(cpu);
823                 /*
824                  * apic_pnode_shift must be set before calling uv_apicid_to_pnode();
825                  */
826                 uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
827                 uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
828                 uv_cpu_hub_info(cpu)->hub_revision = uv_hub_info->hub_revision;
829
830                 uv_cpu_hub_info(cpu)->m_shift = 64 - m_val;
831                 uv_cpu_hub_info(cpu)->n_lshift = is_uv2_1_hub() ?
832                                 (m_val == 40 ? 40 : 39) : m_val;
833
834                 pnode = uv_apicid_to_pnode(apicid);
835                 blade = boot_pnode_to_blade(pnode);
836                 lcpu = uv_blade_info[blade].nr_possible_cpus;
837                 uv_blade_info[blade].nr_possible_cpus++;
838
839                 /* Any node on the blade, else will contain -1. */
840                 uv_blade_info[blade].memory_nid = nid;
841
842                 uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
843                 uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
844                 uv_cpu_hub_info(cpu)->m_val = m_val;
845                 uv_cpu_hub_info(cpu)->n_val = n_val;
846                 uv_cpu_hub_info(cpu)->numa_blade_id = blade;
847                 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
848                 uv_cpu_hub_info(cpu)->pnode = pnode;
849                 uv_cpu_hub_info(cpu)->gpa_mask = (1UL << (m_val + n_val)) - 1;
850                 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
851                 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
852                 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
853                 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
854                 uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
855                 uv_node_to_blade[nid] = blade;
856                 uv_cpu_to_blade[cpu] = blade;
857         }
858
859         /* Add blade/pnode info for nodes without cpus */
860         for_each_online_node(nid) {
861                 if (uv_node_to_blade[nid] >= 0)
862                         continue;
863                 paddr = node_start_pfn(nid) << PAGE_SHIFT;
864                 pnode = uv_gpa_to_pnode(uv_soc_phys_ram_to_gpa(paddr));
865                 blade = boot_pnode_to_blade(pnode);
866                 uv_node_to_blade[nid] = blade;
867         }
868
869         map_gru_high(max_pnode);
870         map_mmr_high(max_pnode);
871         map_mmioh_high(max_pnode & pnode_io_mask);
872
873         uv_cpu_init();
874         uv_scir_register_cpu_notifier();
875         uv_register_nmi_notifier();
876         proc_mkdir("sgi_uv", NULL);
877
878         /* register Legacy VGA I/O redirection handler */
879         pci_register_set_vga_state(uv_set_vga_state);
880
881         /*
882          * For a kdump kernel the reset must be BOOT_ACPI, not BOOT_EFI, as
883          * EFI is not enabled in the kdump kernel.
884          */
885         if (is_kdump_kernel())
886                 reboot_type = BOOT_ACPI;
887 }
888
889 apic_driver(apic_x2apic_uv_x);