4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/cache.h>
29 #include <linux/err.h>
30 #include <linux/sysdev.h>
31 #include <linux/cpu.h>
32 #include <linux/notifier.h>
33 #include <linux/topology.h>
35 #include <asm/ptrace.h>
36 #include <linux/atomic.h>
39 #include <asm/pgtable.h>
43 #include <asm/machdep.h>
44 #include <asm/cputhreads.h>
45 #include <asm/cputable.h>
46 #include <asm/system.h>
48 #include <asm/vdso_datapage.h>
55 #define DBG(fmt...) udbg_printf(fmt)
61 /* Store all idle threads, this can be reused instead of creating
62 * a new thread. Also avoids complicated thread destroy functionality
65 #ifdef CONFIG_HOTPLUG_CPU
67 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
68 * removed after init for !CONFIG_HOTPLUG_CPU.
70 static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
71 #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
72 #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
74 /* State of each CPU during hotplug phases */
75 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
78 static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
79 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
80 #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
83 struct thread_info *secondary_ti;
85 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
86 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
88 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
89 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
91 /* SMP operations for this machine */
92 struct smp_ops_t *smp_ops;
94 /* Can't be static due to PowerMac hackery */
95 volatile unsigned int cpu_callin_map[NR_CPUS];
97 int smt_enabled_at_boot = 1;
99 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
102 int __devinit smp_generic_kick_cpu(int nr)
104 BUG_ON(nr < 0 || nr >= NR_CPUS);
107 * The processor is currently spinning, waiting for the
108 * cpu_start field to become non-zero After we set cpu_start,
109 * the processor will continue on to secondary_start
111 if (!paca[nr].cpu_start) {
112 paca[nr].cpu_start = 1;
117 #ifdef CONFIG_HOTPLUG_CPU
119 * Ok it's not there, so it might be soft-unplugged, let's
120 * try to bring it back
122 per_cpu(cpu_state, nr) = CPU_UP_PREPARE;
124 smp_send_reschedule(nr);
125 #endif /* CONFIG_HOTPLUG_CPU */
129 #endif /* CONFIG_PPC64 */
131 static irqreturn_t call_function_action(int irq, void *data)
133 generic_smp_call_function_interrupt();
137 static irqreturn_t reschedule_action(int irq, void *data)
143 static irqreturn_t call_function_single_action(int irq, void *data)
145 generic_smp_call_function_single_interrupt();
149 static irqreturn_t debug_ipi_action(int irq, void *data)
151 if (crash_ipi_function_ptr) {
152 crash_ipi_function_ptr(get_irq_regs());
156 #ifdef CONFIG_DEBUGGER
157 debugger_ipi(get_irq_regs());
158 #endif /* CONFIG_DEBUGGER */
163 static irq_handler_t smp_ipi_action[] = {
164 [PPC_MSG_CALL_FUNCTION] = call_function_action,
165 [PPC_MSG_RESCHEDULE] = reschedule_action,
166 [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action,
167 [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
170 const char *smp_ipi_name[] = {
171 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
172 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
173 [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single",
174 [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
177 /* optional function to request ipi, for controllers with >= 4 ipis */
178 int smp_request_message_ipi(int virq, int msg)
182 if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
185 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
186 if (msg == PPC_MSG_DEBUGGER_BREAK) {
190 err = request_irq(virq, smp_ipi_action[msg], IRQF_PERCPU,
191 smp_ipi_name[msg], 0);
192 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
193 virq, smp_ipi_name[msg], err);
198 #ifdef CONFIG_PPC_SMP_MUXED_IPI
199 struct cpu_messages {
200 int messages; /* current messages */
201 unsigned long data; /* data for cause ipi */
203 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
205 void smp_muxed_ipi_set_data(int cpu, unsigned long data)
207 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
212 void smp_muxed_ipi_message_pass(int cpu, int msg)
214 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
215 char *message = (char *)&info->messages;
218 * Order previous accesses before accesses in the IPI handler.
223 * cause_ipi functions are required to include a full barrier
224 * before doing whatever causes the IPI.
226 smp_ops->cause_ipi(cpu, info->data);
229 irqreturn_t smp_ipi_demux(void)
231 struct cpu_messages *info = &__get_cpu_var(ipi_message);
234 mb(); /* order any irq clear */
237 all = xchg(&info->messages, 0);
240 if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
241 generic_smp_call_function_interrupt();
242 if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE)))
244 if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE)))
245 generic_smp_call_function_single_interrupt();
246 if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK)))
247 debug_ipi_action(0, NULL);
249 #error Unsupported ENDIAN
251 } while (info->messages);
255 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
257 static inline void do_message_pass(int cpu, int msg)
259 if (smp_ops->message_pass)
260 smp_ops->message_pass(cpu, msg);
261 #ifdef CONFIG_PPC_SMP_MUXED_IPI
263 smp_muxed_ipi_message_pass(cpu, msg);
267 void smp_send_reschedule(int cpu)
270 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
272 EXPORT_SYMBOL_GPL(smp_send_reschedule);
274 void arch_send_call_function_single_ipi(int cpu)
276 do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
279 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
283 for_each_cpu(cpu, mask)
284 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
287 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
288 void smp_send_debugger_break(void)
291 int me = raw_smp_processor_id();
293 if (unlikely(!smp_ops))
296 for_each_online_cpu(cpu)
298 do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
303 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
305 crash_ipi_function_ptr = crash_ipi_callback;
306 if (crash_ipi_callback) {
308 smp_send_debugger_break();
313 static void stop_this_cpu(void *dummy)
315 /* Remove this CPU */
316 set_cpu_online(smp_processor_id(), false);
323 void smp_send_stop(void)
325 smp_call_function(stop_this_cpu, NULL, 0);
328 struct thread_info *current_set[NR_CPUS];
330 static void __devinit smp_store_cpu_info(int id)
332 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
333 #ifdef CONFIG_PPC_FSL_BOOK3E
334 per_cpu(next_tlbcam_idx, id)
335 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
339 void __init smp_prepare_cpus(unsigned int max_cpus)
343 DBG("smp_prepare_cpus\n");
346 * setup_cpu may need to be called on the boot cpu. We havent
347 * spun any cpus up but lets be paranoid.
349 BUG_ON(boot_cpuid != smp_processor_id());
352 smp_store_cpu_info(boot_cpuid);
353 cpu_callin_map[boot_cpuid] = 1;
355 for_each_possible_cpu(cpu) {
356 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
357 GFP_KERNEL, cpu_to_node(cpu));
358 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
359 GFP_KERNEL, cpu_to_node(cpu));
362 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
363 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
367 max_cpus = smp_ops->probe();
374 void __devinit smp_prepare_boot_cpu(void)
376 BUG_ON(smp_processor_id() != boot_cpuid);
378 paca[boot_cpuid].__current = current;
380 current_set[boot_cpuid] = task_thread_info(current);
383 #ifdef CONFIG_HOTPLUG_CPU
385 int generic_cpu_disable(void)
387 unsigned int cpu = smp_processor_id();
389 if (cpu == boot_cpuid)
392 set_cpu_online(cpu, false);
394 vdso_data->processorCount--;
400 void generic_cpu_die(unsigned int cpu)
404 for (i = 0; i < 100; i++) {
406 if (per_cpu(cpu_state, cpu) == CPU_DEAD)
410 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
413 void generic_mach_cpu_die(void)
419 cpu = smp_processor_id();
420 printk(KERN_DEBUG "CPU%d offline\n", cpu);
421 __get_cpu_var(cpu_state) = CPU_DEAD;
423 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
427 void generic_set_cpu_dead(unsigned int cpu)
429 per_cpu(cpu_state, cpu) = CPU_DEAD;
432 int generic_check_cpu_restart(unsigned int cpu)
434 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
439 struct work_struct work;
440 struct task_struct *idle;
441 struct completion done;
445 static void __cpuinit do_fork_idle(struct work_struct *work)
447 struct create_idle *c_idle =
448 container_of(work, struct create_idle, work);
450 c_idle->idle = fork_idle(c_idle->cpu);
451 complete(&c_idle->done);
454 static int __cpuinit create_idle(unsigned int cpu)
456 struct thread_info *ti;
457 struct create_idle c_idle = {
459 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
461 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
463 c_idle.idle = get_idle_for_cpu(cpu);
465 /* We can't use kernel_thread since we must avoid to
466 * reschedule the child. We use a workqueue because
467 * we want to fork from a kernel thread, not whatever
468 * userspace process happens to be trying to online us.
471 schedule_work(&c_idle.work);
472 wait_for_completion(&c_idle.done);
474 init_idle(c_idle.idle, cpu);
475 if (IS_ERR(c_idle.idle)) {
476 pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
477 return PTR_ERR(c_idle.idle);
479 ti = task_thread_info(c_idle.idle);
482 paca[cpu].__current = c_idle.idle;
483 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
486 current_set[cpu] = ti;
491 int __cpuinit __cpu_up(unsigned int cpu)
495 if (smp_ops == NULL ||
496 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
499 /* Make sure we have an idle thread */
500 rc = create_idle(cpu);
504 secondary_ti = current_set[cpu];
506 /* Make sure callin-map entry is 0 (can be leftover a CPU
509 cpu_callin_map[cpu] = 0;
511 /* The information for processor bringup must
512 * be written out to main store before we release
518 DBG("smp: kicking cpu %d\n", cpu);
519 rc = smp_ops->kick_cpu(cpu);
521 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
526 * wait to see if the cpu made a callin (is actually up).
527 * use this value that I found through experimentation.
530 if (system_state < SYSTEM_RUNNING)
531 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
533 #ifdef CONFIG_HOTPLUG_CPU
536 * CPUs can take much longer to come up in the
537 * hotplug case. Wait five seconds.
539 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
543 if (!cpu_callin_map[cpu]) {
544 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
548 DBG("Processor %u found.\n", cpu);
550 if (smp_ops->give_timebase)
551 smp_ops->give_timebase();
553 /* Wait until cpu puts itself in the online map */
554 while (!cpu_online(cpu))
560 /* Return the value of the reg property corresponding to the given
563 int cpu_to_core_id(int cpu)
565 struct device_node *np;
569 np = of_get_cpu_node(cpu, NULL);
573 reg = of_get_property(np, "reg", NULL);
583 /* Helper routines for cpu to core mapping */
584 int cpu_core_index_of_thread(int cpu)
586 return cpu >> threads_shift;
588 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
590 int cpu_first_thread_of_core(int core)
592 return core << threads_shift;
594 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
596 /* Must be called when no change can occur to cpu_present_mask,
597 * i.e. during cpu online or offline.
599 static struct device_node *cpu_to_l2cache(int cpu)
601 struct device_node *np;
602 struct device_node *cache;
604 if (!cpu_present(cpu))
607 np = of_get_cpu_node(cpu, NULL);
611 cache = of_find_next_cache_node(np);
618 /* Activate a secondary processor. */
619 void __devinit start_secondary(void *unused)
621 unsigned int cpu = smp_processor_id();
622 struct device_node *l2_cache;
625 atomic_inc(&init_mm.mm_count);
626 current->active_mm = &init_mm;
628 smp_store_cpu_info(cpu);
629 set_dec(tb_ticks_per_jiffy);
631 cpu_callin_map[cpu] = 1;
633 if (smp_ops->setup_cpu)
634 smp_ops->setup_cpu(cpu);
635 if (smp_ops->take_timebase)
636 smp_ops->take_timebase();
638 secondary_cpu_time_init();
641 if (system_state == SYSTEM_RUNNING)
642 vdso_data->processorCount++;
645 notify_cpu_starting(cpu);
646 set_cpu_online(cpu, true);
647 /* Update sibling maps */
648 base = cpu_first_thread_sibling(cpu);
649 for (i = 0; i < threads_per_core; i++) {
650 if (cpu_is_offline(base + i))
652 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
653 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
655 /* cpu_core_map should be a superset of
656 * cpu_sibling_map even if we don't have cache
657 * information, so update the former here, too.
659 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
660 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
662 l2_cache = cpu_to_l2cache(cpu);
663 for_each_online_cpu(i) {
664 struct device_node *np = cpu_to_l2cache(i);
667 if (np == l2_cache) {
668 cpumask_set_cpu(cpu, cpu_core_mask(i));
669 cpumask_set_cpu(i, cpu_core_mask(cpu));
673 of_node_put(l2_cache);
683 int setup_profiling_timer(unsigned int multiplier)
688 void __init smp_cpus_done(unsigned int max_cpus)
690 cpumask_var_t old_mask;
692 /* We want the setup_cpu() here to be called from CPU 0, but our
693 * init thread may have been "borrowed" by another CPU in the meantime
694 * se we pin us down to CPU 0 for a short while
696 alloc_cpumask_var(&old_mask, GFP_NOWAIT);
697 cpumask_copy(old_mask, tsk_cpus_allowed(current));
698 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
700 if (smp_ops && smp_ops->setup_cpu)
701 smp_ops->setup_cpu(boot_cpuid);
703 set_cpus_allowed_ptr(current, old_mask);
705 free_cpumask_var(old_mask);
707 if (smp_ops && smp_ops->bringup_done)
708 smp_ops->bringup_done();
710 dump_numa_cpu_topology();
714 int arch_sd_sibling_asym_packing(void)
716 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
717 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
718 return SD_ASYM_PACKING;
723 #ifdef CONFIG_HOTPLUG_CPU
724 int __cpu_disable(void)
726 struct device_node *l2_cache;
727 int cpu = smp_processor_id();
731 if (!smp_ops->cpu_disable)
734 err = smp_ops->cpu_disable();
738 /* Update sibling maps */
739 base = cpu_first_thread_sibling(cpu);
740 for (i = 0; i < threads_per_core; i++) {
741 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
742 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
743 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
744 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
747 l2_cache = cpu_to_l2cache(cpu);
748 for_each_present_cpu(i) {
749 struct device_node *np = cpu_to_l2cache(i);
752 if (np == l2_cache) {
753 cpumask_clear_cpu(cpu, cpu_core_mask(i));
754 cpumask_clear_cpu(i, cpu_core_mask(cpu));
758 of_node_put(l2_cache);
764 void __cpu_die(unsigned int cpu)
766 if (smp_ops->cpu_die)
767 smp_ops->cpu_die(cpu);
770 static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex);
772 void cpu_hotplug_driver_lock()
774 mutex_lock(&powerpc_cpu_hotplug_driver_mutex);
777 void cpu_hotplug_driver_unlock()
779 mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
787 /* If we return, we re-enter start_secondary */
788 start_secondary_resume();