4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/cache.h>
29 #include <linux/err.h>
30 #include <linux/sysdev.h>
31 #include <linux/cpu.h>
32 #include <linux/notifier.h>
33 #include <linux/topology.h>
35 #include <asm/ptrace.h>
36 #include <asm/atomic.h>
39 #include <asm/pgtable.h>
43 #include <asm/machdep.h>
44 #include <asm/cputhreads.h>
45 #include <asm/cputable.h>
46 #include <asm/system.h>
48 #include <asm/vdso_datapage.h>
55 #define DBG(fmt...) udbg_printf(fmt)
60 struct thread_info *secondary_ti;
62 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
63 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
65 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
66 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
68 /* SMP operations for this machine */
69 struct smp_ops_t *smp_ops;
71 /* Can't be static due to PowerMac hackery */
72 volatile unsigned int cpu_callin_map[NR_CPUS];
74 int smt_enabled_at_boot = 1;
76 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
79 void __devinit smp_generic_kick_cpu(int nr)
81 BUG_ON(nr < 0 || nr >= NR_CPUS);
84 * The processor is currently spinning, waiting for the
85 * cpu_start field to become non-zero After we set cpu_start,
86 * the processor will continue on to secondary_start
88 paca[nr].cpu_start = 1;
93 void smp_message_recv(int msg)
96 case PPC_MSG_CALL_FUNCTION:
97 generic_smp_call_function_interrupt();
99 case PPC_MSG_RESCHEDULE:
100 /* we notice need_resched on exit */
102 case PPC_MSG_CALL_FUNC_SINGLE:
103 generic_smp_call_function_single_interrupt();
105 case PPC_MSG_DEBUGGER_BREAK:
106 if (crash_ipi_function_ptr) {
107 crash_ipi_function_ptr(get_irq_regs());
110 #ifdef CONFIG_DEBUGGER
111 debugger_ipi(get_irq_regs());
113 #endif /* CONFIG_DEBUGGER */
116 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
117 smp_processor_id(), msg);
122 static irqreturn_t call_function_action(int irq, void *data)
124 generic_smp_call_function_interrupt();
128 static irqreturn_t reschedule_action(int irq, void *data)
130 /* we just need the return path side effect of checking need_resched */
134 static irqreturn_t call_function_single_action(int irq, void *data)
136 generic_smp_call_function_single_interrupt();
140 static irqreturn_t debug_ipi_action(int irq, void *data)
142 smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
146 static irq_handler_t smp_ipi_action[] = {
147 [PPC_MSG_CALL_FUNCTION] = call_function_action,
148 [PPC_MSG_RESCHEDULE] = reschedule_action,
149 [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action,
150 [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
153 const char *smp_ipi_name[] = {
154 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
155 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
156 [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single",
157 [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
160 /* optional function to request ipi, for controllers with >= 4 ipis */
161 int smp_request_message_ipi(int virq, int msg)
165 if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
168 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
169 if (msg == PPC_MSG_DEBUGGER_BREAK) {
173 err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU,
174 smp_ipi_name[msg], 0);
175 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
176 virq, smp_ipi_name[msg], err);
181 void smp_send_reschedule(int cpu)
184 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
187 void arch_send_call_function_single_ipi(int cpu)
189 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
192 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
196 for_each_cpu(cpu, mask)
197 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
200 #ifdef CONFIG_DEBUGGER
201 void smp_send_debugger_break(int cpu)
204 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
209 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
211 crash_ipi_function_ptr = crash_ipi_callback;
212 if (crash_ipi_callback && smp_ops) {
214 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK);
219 static void stop_this_cpu(void *dummy)
221 /* Remove this CPU */
222 set_cpu_online(smp_processor_id(), false);
229 void smp_send_stop(void)
231 smp_call_function(stop_this_cpu, NULL, 0);
234 struct thread_info *current_set[NR_CPUS];
236 static void __devinit smp_store_cpu_info(int id)
238 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
241 static void __init smp_create_idle(unsigned int cpu)
243 struct task_struct *p;
245 /* create a process for the processor */
248 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
250 paca[cpu].__current = p;
251 paca[cpu].kstack = (unsigned long) task_thread_info(p)
252 + THREAD_SIZE - STACK_FRAME_OVERHEAD;
254 current_set[cpu] = task_thread_info(p);
255 task_thread_info(p)->cpu = cpu;
258 void __init smp_prepare_cpus(unsigned int max_cpus)
262 DBG("smp_prepare_cpus\n");
265 * setup_cpu may need to be called on the boot cpu. We havent
266 * spun any cpus up but lets be paranoid.
268 BUG_ON(boot_cpuid != smp_processor_id());
271 smp_store_cpu_info(boot_cpuid);
272 cpu_callin_map[boot_cpuid] = 1;
274 for_each_possible_cpu(cpu) {
275 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
276 GFP_KERNEL, cpu_to_node(cpu));
277 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
278 GFP_KERNEL, cpu_to_node(cpu));
281 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
282 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
286 max_cpus = smp_ops->probe();
292 for_each_possible_cpu(cpu)
293 if (cpu != boot_cpuid)
294 smp_create_idle(cpu);
297 void __devinit smp_prepare_boot_cpu(void)
299 BUG_ON(smp_processor_id() != boot_cpuid);
301 paca[boot_cpuid].__current = current;
303 current_set[boot_cpuid] = task_thread_info(current);
306 #ifdef CONFIG_HOTPLUG_CPU
307 /* State of each CPU during hotplug phases */
308 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
310 int generic_cpu_disable(void)
312 unsigned int cpu = smp_processor_id();
314 if (cpu == boot_cpuid)
317 set_cpu_online(cpu, false);
319 vdso_data->processorCount--;
325 void generic_cpu_die(unsigned int cpu)
329 for (i = 0; i < 100; i++) {
331 if (per_cpu(cpu_state, cpu) == CPU_DEAD)
335 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
338 void generic_mach_cpu_die(void)
344 cpu = smp_processor_id();
345 printk(KERN_DEBUG "CPU%d offline\n", cpu);
346 __get_cpu_var(cpu_state) = CPU_DEAD;
348 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
352 void generic_set_cpu_dead(unsigned int cpu)
354 per_cpu(cpu_state, cpu) = CPU_DEAD;
358 int __cpuinit __cpu_up(unsigned int cpu)
362 secondary_ti = current_set[cpu];
364 if (smp_ops == NULL ||
365 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
368 /* Make sure callin-map entry is 0 (can be leftover a CPU
371 cpu_callin_map[cpu] = 0;
373 /* The information for processor bringup must
374 * be written out to main store before we release
380 DBG("smp: kicking cpu %d\n", cpu);
381 smp_ops->kick_cpu(cpu);
384 * wait to see if the cpu made a callin (is actually up).
385 * use this value that I found through experimentation.
388 if (system_state < SYSTEM_RUNNING)
389 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
391 #ifdef CONFIG_HOTPLUG_CPU
394 * CPUs can take much longer to come up in the
395 * hotplug case. Wait five seconds.
397 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
401 if (!cpu_callin_map[cpu]) {
402 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
406 DBG("Processor %u found.\n", cpu);
408 if (smp_ops->give_timebase)
409 smp_ops->give_timebase();
411 /* Wait until cpu puts itself in the online map */
412 while (!cpu_online(cpu))
418 /* Return the value of the reg property corresponding to the given
421 int cpu_to_core_id(int cpu)
423 struct device_node *np;
427 np = of_get_cpu_node(cpu, NULL);
431 reg = of_get_property(np, "reg", NULL);
441 /* Helper routines for cpu to core mapping */
442 int cpu_core_index_of_thread(int cpu)
444 return cpu >> threads_shift;
446 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
448 int cpu_first_thread_of_core(int core)
450 return core << threads_shift;
452 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
454 /* Must be called when no change can occur to cpu_present_map,
455 * i.e. during cpu online or offline.
457 static struct device_node *cpu_to_l2cache(int cpu)
459 struct device_node *np;
460 struct device_node *cache;
462 if (!cpu_present(cpu))
465 np = of_get_cpu_node(cpu, NULL);
469 cache = of_find_next_cache_node(np);
476 /* Activate a secondary processor. */
477 void __devinit start_secondary(void *unused)
479 unsigned int cpu = smp_processor_id();
480 struct device_node *l2_cache;
483 atomic_inc(&init_mm.mm_count);
484 current->active_mm = &init_mm;
486 smp_store_cpu_info(cpu);
487 set_dec(tb_ticks_per_jiffy);
489 cpu_callin_map[cpu] = 1;
491 if (smp_ops->setup_cpu)
492 smp_ops->setup_cpu(cpu);
493 if (smp_ops->take_timebase)
494 smp_ops->take_timebase();
496 secondary_cpu_time_init();
499 notify_cpu_starting(cpu);
500 set_cpu_online(cpu, true);
501 /* Update sibling maps */
502 base = cpu_first_thread_sibling(cpu);
503 for (i = 0; i < threads_per_core; i++) {
504 if (cpu_is_offline(base + i))
506 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
507 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
509 /* cpu_core_map should be a superset of
510 * cpu_sibling_map even if we don't have cache
511 * information, so update the former here, too.
513 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
514 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
516 l2_cache = cpu_to_l2cache(cpu);
517 for_each_online_cpu(i) {
518 struct device_node *np = cpu_to_l2cache(i);
521 if (np == l2_cache) {
522 cpumask_set_cpu(cpu, cpu_core_mask(i));
523 cpumask_set_cpu(i, cpu_core_mask(cpu));
527 of_node_put(l2_cache);
537 int setup_profiling_timer(unsigned int multiplier)
542 void __init smp_cpus_done(unsigned int max_cpus)
544 cpumask_var_t old_mask;
546 /* We want the setup_cpu() here to be called from CPU 0, but our
547 * init thread may have been "borrowed" by another CPU in the meantime
548 * se we pin us down to CPU 0 for a short while
550 alloc_cpumask_var(&old_mask, GFP_NOWAIT);
551 cpumask_copy(old_mask, ¤t->cpus_allowed);
552 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
554 if (smp_ops && smp_ops->setup_cpu)
555 smp_ops->setup_cpu(boot_cpuid);
557 set_cpus_allowed_ptr(current, old_mask);
559 free_cpumask_var(old_mask);
561 if (smp_ops && smp_ops->bringup_done)
562 smp_ops->bringup_done();
564 dump_numa_cpu_topology();
568 int arch_sd_sibling_asym_packing(void)
570 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
571 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
572 return SD_ASYM_PACKING;
577 #ifdef CONFIG_HOTPLUG_CPU
578 int __cpu_disable(void)
580 struct device_node *l2_cache;
581 int cpu = smp_processor_id();
585 if (!smp_ops->cpu_disable)
588 err = smp_ops->cpu_disable();
592 /* Update sibling maps */
593 base = cpu_first_thread_sibling(cpu);
594 for (i = 0; i < threads_per_core; i++) {
595 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
596 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
597 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
598 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
601 l2_cache = cpu_to_l2cache(cpu);
602 for_each_present_cpu(i) {
603 struct device_node *np = cpu_to_l2cache(i);
606 if (np == l2_cache) {
607 cpumask_clear_cpu(cpu, cpu_core_mask(i));
608 cpumask_clear_cpu(i, cpu_core_mask(cpu));
612 of_node_put(l2_cache);
618 void __cpu_die(unsigned int cpu)
620 if (smp_ops->cpu_die)
621 smp_ops->cpu_die(cpu);
624 static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex);
626 void cpu_hotplug_driver_lock()
628 mutex_lock(&powerpc_cpu_hotplug_driver_mutex);
631 void cpu_hotplug_driver_unlock()
633 mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
641 /* If we return, we re-enter start_secondary */
642 start_secondary_resume();