2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs.
8 * Much of the core SMP work is based on previous work by Thomas Radke, to
9 * whom a great many thanks are extended.
11 * Thanks to Intel for making available several different Pentium,
12 * Pentium Pro and Pentium-II/Xeon MP machines.
13 * Original development of Linux SMP code supported by Caldera.
15 * This code is released under the GNU General Public License version 2
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Andi Kleen : Changed for SMP boot into long mode.
33 * Rusty Russell : Hacked into shape for new "hotplug" boot process.
34 * Andi Kleen : Converted to new state machine.
36 * Probably mostly hotplug CPU ready now.
37 * Ashok Raj : CPU hotplug support
41 #include <linux/config.h>
42 #include <linux/init.h>
45 #include <linux/kernel_stat.h>
46 #include <linux/smp_lock.h>
47 #include <linux/bootmem.h>
48 #include <linux/thread_info.h>
49 #include <linux/module.h>
51 #include <linux/delay.h>
52 #include <linux/mc146818rtc.h>
54 #include <asm/pgalloc.h>
56 #include <asm/kdebug.h>
57 #include <asm/tlbflush.h>
58 #include <asm/proto.h>
61 #include <asm/hw_irq.h>
64 /* Number of siblings per CPU package */
65 int smp_num_siblings = 1;
66 /* Package ID of each logical CPU */
67 u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
68 /* core ID of each logical CPU */
69 u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
71 /* Last level cache ID of each logical CPU */
72 u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
74 /* Bitmask of currently online CPUs */
75 cpumask_t cpu_online_map __read_mostly;
77 EXPORT_SYMBOL(cpu_online_map);
80 * Private maps to synchronize booting between AP and BP.
81 * Probably not needed anymore, but it makes for easier debugging. -AK
83 cpumask_t cpu_callin_map;
84 cpumask_t cpu_callout_map;
86 cpumask_t cpu_possible_map;
87 EXPORT_SYMBOL(cpu_possible_map);
89 /* Per CPU bogomips and other parameters */
90 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
92 /* Set when the idlers are all forked */
93 int smp_threads_ready;
95 /* representing HT siblings of each logical CPU */
96 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
98 /* representing HT and core siblings of each logical CPU */
99 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
100 EXPORT_SYMBOL(cpu_core_map);
103 * Trampoline 80x86 program as an array.
106 extern unsigned char trampoline_data[];
107 extern unsigned char trampoline_end[];
109 /* State of each CPU */
110 DEFINE_PER_CPU(int, cpu_state) = { 0 };
113 * Store all idle threads, this can be reused instead of creating
114 * a new thread. Also avoids complicated thread destroy functionality
117 struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
119 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
120 #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
123 * Currently trivial. Write the real->protected mode
124 * bootstrap into the page concerned. The caller
125 * has made sure it's suitably aligned.
128 static unsigned long __cpuinit setup_trampoline(void)
130 void *tramp = __va(SMP_TRAMPOLINE_BASE);
131 memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
132 return virt_to_phys(tramp);
136 * The bootstrap kernel entry code has set these up. Save them for
140 static void __cpuinit smp_store_cpu_info(int id)
142 struct cpuinfo_x86 *c = cpu_data + id;
150 * New Funky TSC sync algorithm borrowed from IA64.
151 * Main advantage is that it doesn't reset the TSCs fully and
152 * in general looks more robust and it works better than my earlier
153 * attempts. I believe it was written by David Mosberger. Some minor
154 * adjustments for x86-64 by me -AK
156 * Original comment reproduced below.
158 * Synchronize TSC of the current (slave) CPU with the TSC of the
159 * MASTER CPU (normally the time-keeper CPU). We use a closed loop to
160 * eliminate the possibility of unaccounted-for errors (such as
161 * getting a machine check in the middle of a calibration step). The
162 * basic idea is for the slave to ask the master what itc value it has
163 * and to read its own itc before and after the master responds. Each
164 * iteration gives us three timestamps:
177 * The goal is to adjust the slave's TSC such that tm falls exactly
178 * half-way between t0 and t1. If we achieve this, the clocks are
179 * synchronized provided the interconnect between the slave and the
180 * master is symmetric. Even if the interconnect were asymmetric, we
181 * would still know that the synchronization error is smaller than the
182 * roundtrip latency (t0 - t1).
184 * When the interconnect is quiet and symmetric, this lets us
185 * synchronize the TSC to within one or two cycles. However, we can
186 * only *guarantee* that the synchronization is accurate to within a
187 * round-trip time, which is typically in the range of several hundred
188 * cycles (e.g., ~500 cycles). In practice, this means that the TSCs
189 * are usually almost perfectly synchronized, but we shouldn't assume
190 * that the accuracy is much better than half a micro second or so.
192 * [there are other errors like the latency of RDTSC and of the
193 * WRMSR. These can also account to hundreds of cycles. So it's
194 * probably worse. It claims 153 cycles error on a dual Opteron,
195 * but I suspect the numbers are actually somewhat worse -AK]
199 #define SLAVE (SMP_CACHE_BYTES/8)
201 /* Intentionally don't use cpu_relax() while TSC synchronization
202 because we don't want to go into funky power save modi or cause
203 hypervisors to schedule us away. Going to sleep would likely affect
204 latency and low latency is the primary objective here. -AK */
205 #define no_cpu_relax() barrier()
207 static __cpuinitdata DEFINE_SPINLOCK(tsc_sync_lock);
208 static volatile __cpuinitdata unsigned long go[SLAVE + 1];
209 static int notscsync __cpuinitdata;
211 #undef DEBUG_TSC_SYNC
213 #define NUM_ROUNDS 64 /* magic value */
214 #define NUM_ITERS 5 /* likewise */
216 /* Callback on boot CPU */
217 static __cpuinit void sync_master(void *arg)
219 unsigned long flags, i;
223 local_irq_save(flags);
225 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
232 local_irq_restore(flags);
236 * Return the number of cycles by which our tsc differs from the tsc
237 * on the master (time-keeper) CPU. A positive number indicates our
238 * tsc is ahead of the master, negative that it is behind.
241 get_delta(long *rt, long *master)
243 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
244 unsigned long tcenter, t0, t1, tm;
247 for (i = 0; i < NUM_ITERS; ++i) {
250 while (!(tm = go[SLAVE]))
255 if (t1 - t0 < best_t1 - best_t0)
256 best_t0 = t0, best_t1 = t1, best_tm = tm;
259 *rt = best_t1 - best_t0;
260 *master = best_tm - best_t0;
262 /* average best_t0 and best_t1 without overflow: */
263 tcenter = (best_t0/2 + best_t1/2);
264 if (best_t0 % 2 + best_t1 % 2 == 2)
266 return tcenter - best_tm;
269 static __cpuinit void sync_tsc(unsigned int master)
272 long delta, adj, adjust_latency = 0;
273 unsigned long flags, rt, master_time_stamp, bound;
274 #ifdef DEBUG_TSC_SYNC
275 static struct syncdebug {
276 long rt; /* roundtrip time */
277 long master; /* master's timestamp */
278 long diff; /* difference between midpoint and master's timestamp */
279 long lat; /* estimate of tsc adjustment latency */
280 } t[NUM_ROUNDS] __cpuinitdata;
283 printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n",
284 smp_processor_id(), master);
288 /* It is dangerous to broadcast IPI as cpus are coming up,
289 * as they may not be ready to accept them. So since
290 * we only need to send the ipi to the boot cpu direct
291 * the message, and avoid the race.
293 smp_call_function_single(master, sync_master, NULL, 1, 0);
295 while (go[MASTER]) /* wait for master to be ready */
298 spin_lock_irqsave(&tsc_sync_lock, flags);
300 for (i = 0; i < NUM_ROUNDS; ++i) {
301 delta = get_delta(&rt, &master_time_stamp);
303 done = 1; /* let's lock on to this... */
310 adjust_latency += -delta;
311 adj = -delta + adjust_latency/4;
316 wrmsrl(MSR_IA32_TSC, t + adj);
318 #ifdef DEBUG_TSC_SYNC
320 t[i].master = master_time_stamp;
322 t[i].lat = adjust_latency/4;
326 spin_unlock_irqrestore(&tsc_sync_lock, flags);
328 #ifdef DEBUG_TSC_SYNC
329 for (i = 0; i < NUM_ROUNDS; ++i)
330 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
331 t[i].rt, t[i].master, t[i].diff, t[i].lat);
335 "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
336 "maxerr %lu cycles)\n",
337 smp_processor_id(), master, delta, rt);
340 static void __cpuinit tsc_sync_wait(void)
343 * When the CPU has synchronized TSCs assume the BIOS
344 * or the hardware already synced. Otherwise we could
345 * mess up a possible perfect synchronization with a
346 * not-quite-perfect algorithm.
348 if (notscsync || !cpu_has_tsc || !unsynchronized_tsc())
353 static __init int notscsync_setup(char *s)
358 __setup("notscsync", notscsync_setup);
360 static atomic_t init_deasserted __cpuinitdata;
363 * Report back to the Boot Processor.
366 void __cpuinit smp_callin(void)
369 unsigned long timeout;
372 * If waken up by an INIT in an 82489DX configuration
373 * we may get here before an INIT-deassert IPI reaches
374 * our local APIC. We have to wait for the IPI or we'll
375 * lock up on an APIC access.
377 while (!atomic_read(&init_deasserted))
381 * (This works even if the APIC is not enabled.)
383 phys_id = GET_APIC_ID(apic_read(APIC_ID));
384 cpuid = smp_processor_id();
385 if (cpu_isset(cpuid, cpu_callin_map)) {
386 panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
389 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
392 * STARTUP IPIs are fragile beasts as they might sometimes
393 * trigger some glue motherboard logic. Complete APIC bus
394 * silence for 1 second, this overestimates the time the
395 * boot CPU is spending to send the up to 2 STARTUP IPIs
396 * by a factor of two. This should be enough.
400 * Waiting 2s total for startup (udelay is not yet working)
402 timeout = jiffies + 2*HZ;
403 while (time_before(jiffies, timeout)) {
405 * Has the boot CPU finished it's STARTUP sequence?
407 if (cpu_isset(cpuid, cpu_callout_map))
412 if (!time_before(jiffies, timeout)) {
413 panic("smp_callin: CPU%d started up but did not get a callout!\n",
418 * the boot CPU has finished the init stage and is spinning
419 * on callin_map until we finish. We are free to set up this
420 * CPU, first the APIC. (this is probably redundant on most
424 Dprintk("CALLIN, before setup_local_APIC().\n");
430 * Need to enable IRQs because it can take longer and then
431 * the NMI watchdog might kill us.
436 Dprintk("Stack at about %p\n",&cpuid);
438 disable_APIC_timer();
441 * Save our processor parameters
443 smp_store_cpu_info(cpuid);
446 * Allow the master to continue.
448 cpu_set(cpuid, cpu_callin_map);
451 /* maps the cpu to the sched domain representing multi-core */
452 cpumask_t cpu_coregroup_map(int cpu)
454 struct cpuinfo_x86 *c = cpu_data + cpu;
456 * For perf, we return last level cache shared map.
457 * TBD: when power saving sched policy is added, we will return
458 * cpu_core_map when power saving policy is enabled
460 return c->llc_shared_map;
463 /* representing cpus for which sibling maps can be computed */
464 static cpumask_t cpu_sibling_setup_map;
466 static inline void set_cpu_sibling_map(int cpu)
469 struct cpuinfo_x86 *c = cpu_data;
471 cpu_set(cpu, cpu_sibling_setup_map);
473 if (smp_num_siblings > 1) {
474 for_each_cpu_mask(i, cpu_sibling_setup_map) {
475 if (phys_proc_id[cpu] == phys_proc_id[i] &&
476 cpu_core_id[cpu] == cpu_core_id[i]) {
477 cpu_set(i, cpu_sibling_map[cpu]);
478 cpu_set(cpu, cpu_sibling_map[i]);
479 cpu_set(i, cpu_core_map[cpu]);
480 cpu_set(cpu, cpu_core_map[i]);
481 cpu_set(i, c[cpu].llc_shared_map);
482 cpu_set(cpu, c[i].llc_shared_map);
486 cpu_set(cpu, cpu_sibling_map[cpu]);
489 cpu_set(cpu, c[cpu].llc_shared_map);
491 if (current_cpu_data.x86_max_cores == 1) {
492 cpu_core_map[cpu] = cpu_sibling_map[cpu];
493 c[cpu].booted_cores = 1;
497 for_each_cpu_mask(i, cpu_sibling_setup_map) {
498 if (cpu_llc_id[cpu] != BAD_APICID &&
499 cpu_llc_id[cpu] == cpu_llc_id[i]) {
500 cpu_set(i, c[cpu].llc_shared_map);
501 cpu_set(cpu, c[i].llc_shared_map);
503 if (phys_proc_id[cpu] == phys_proc_id[i]) {
504 cpu_set(i, cpu_core_map[cpu]);
505 cpu_set(cpu, cpu_core_map[i]);
507 * Does this new cpu bringup a new core?
509 if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
511 * for each core in package, increment
512 * the booted_cores for this new cpu
514 if (first_cpu(cpu_sibling_map[i]) == i)
515 c[cpu].booted_cores++;
517 * increment the core count for all
518 * the other cpus in this package
522 } else if (i != cpu && !c[cpu].booted_cores)
523 c[cpu].booted_cores = c[i].booted_cores;
529 * Setup code on secondary processor (after comming out of the trampoline)
531 void __cpuinit start_secondary(void)
534 * Dont put anything before smp_callin(), SMP
535 * booting is too fragile that we want to limit the
536 * things done here to the most necessary things.
542 /* otherwise gcc will move up the smp_processor_id before the cpu_init */
545 Dprintk("cpu %d: setting up apic clock\n", smp_processor_id());
546 setup_secondary_APIC_clock();
548 Dprintk("cpu %d: enabling apic timer\n", smp_processor_id());
550 if (nmi_watchdog == NMI_IO_APIC) {
551 disable_8259A_irq(0);
552 enable_NMI_through_LVT0(NULL);
559 * The sibling maps must be set before turing the online map on for
562 set_cpu_sibling_map(smp_processor_id());
565 * Wait for TSC sync to not schedule things before.
566 * We still process interrupts, which could see an inconsistent
567 * time in that window unfortunately.
568 * Do this here because TSC sync has global unprotected state.
573 * We need to hold call_lock, so there is no inconsistency
574 * between the time smp_call_function() determines number of
575 * IPI receipients, and the time when the determination is made
576 * for which cpus receive the IPI in genapic_flat.c. Holding this
577 * lock helps us to not include this cpu in a currently in progress
578 * smp_call_function().
580 lock_ipi_call_lock();
583 * Allow the master to continue.
585 cpu_set(smp_processor_id(), cpu_online_map);
586 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
587 unlock_ipi_call_lock();
592 extern volatile unsigned long init_rsp;
593 extern void (*initial_code)(void);
596 static void inquire_remote_apic(int apicid)
598 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
599 char *names[] = { "ID", "VERSION", "SPIV" };
602 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
604 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
605 printk("... APIC #%d %s: ", apicid, names[i]);
610 apic_wait_icr_idle();
612 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
613 apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]);
618 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
619 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
622 case APIC_ICR_RR_VALID:
623 status = apic_read(APIC_RRR);
624 printk("%08x\n", status);
634 * Kick the secondary to wake up.
636 static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip)
638 unsigned long send_status = 0, accept_status = 0;
639 int maxlvt, timeout, num_starts, j;
641 Dprintk("Asserting INIT.\n");
644 * Turn INIT on target chip
646 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
651 apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
654 Dprintk("Waiting for send to finish...\n");
659 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
660 } while (send_status && (timeout++ < 1000));
664 Dprintk("Deasserting INIT.\n");
667 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
670 apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
672 Dprintk("Waiting for send to finish...\n");
677 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
678 } while (send_status && (timeout++ < 1000));
681 atomic_set(&init_deasserted, 1);
686 * Run STARTUP IPI loop.
688 Dprintk("#startup loops: %d.\n", num_starts);
690 maxlvt = get_maxlvt();
692 for (j = 1; j <= num_starts; j++) {
693 Dprintk("Sending STARTUP #%d.\n",j);
694 apic_write(APIC_ESR, 0);
696 Dprintk("After apic_write.\n");
703 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
705 /* Boot on the stack */
706 /* Kick the second */
707 apic_write(APIC_ICR, APIC_DM_STARTUP | (start_rip >> 12));
710 * Give the other CPU some time to accept the IPI.
714 Dprintk("Startup point 1.\n");
716 Dprintk("Waiting for send to finish...\n");
721 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
722 } while (send_status && (timeout++ < 1000));
725 * Give the other CPU some time to accept the IPI.
729 * Due to the Pentium erratum 3AP.
732 apic_write(APIC_ESR, 0);
734 accept_status = (apic_read(APIC_ESR) & 0xEF);
735 if (send_status || accept_status)
738 Dprintk("After Startup.\n");
741 printk(KERN_ERR "APIC never delivered???\n");
743 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
745 return (send_status | accept_status);
749 struct task_struct *idle;
750 struct completion done;
754 void do_fork_idle(void *_c_idle)
756 struct create_idle *c_idle = _c_idle;
758 c_idle->idle = fork_idle(c_idle->cpu);
759 complete(&c_idle->done);
765 static int __cpuinit do_boot_cpu(int cpu, int apicid)
767 unsigned long boot_error;
769 unsigned long start_rip;
770 struct create_idle c_idle = {
772 .done = COMPLETION_INITIALIZER(c_idle.done),
774 DECLARE_WORK(work, do_fork_idle, &c_idle);
776 /* allocate memory for gdts of secondary cpus. Hotplug is considered */
777 if (!cpu_gdt_descr[cpu].address &&
778 !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
779 printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu);
783 /* Allocate node local memory for AP pdas */
784 if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) {
785 struct x8664_pda *newpda, *pda;
786 int node = cpu_to_node(cpu);
788 newpda = kmalloc_node(sizeof (struct x8664_pda), GFP_ATOMIC,
791 memcpy(newpda, pda, sizeof (struct x8664_pda));
792 cpu_pda(cpu) = newpda;
795 "Could not allocate node local PDA for CPU %d on node %d\n",
800 alternatives_smp_switch(1);
802 c_idle.idle = get_idle_for_cpu(cpu);
805 c_idle.idle->thread.rsp = (unsigned long) (((struct pt_regs *)
806 (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
807 init_idle(c_idle.idle, cpu);
812 * During cold boot process, keventd thread is not spun up yet.
813 * When we do cpu hot-add, we create idle threads on the fly, we should
814 * not acquire any attributes from the calling context. Hence the clean
815 * way to create kernel_threads() is to do that from keventd().
816 * We do the current_is_keventd() due to the fact that ACPI notifier
817 * was also queuing to keventd() and when the caller is already running
818 * in context of keventd(), we would end up with locking up the keventd
821 if (!keventd_up() || current_is_keventd())
822 work.func(work.data);
824 schedule_work(&work);
825 wait_for_completion(&c_idle.done);
828 if (IS_ERR(c_idle.idle)) {
829 printk("failed fork for CPU %d\n", cpu);
830 return PTR_ERR(c_idle.idle);
833 set_idle_for_cpu(cpu, c_idle.idle);
837 cpu_pda(cpu)->pcurrent = c_idle.idle;
839 start_rip = setup_trampoline();
841 init_rsp = c_idle.idle->thread.rsp;
842 per_cpu(init_tss,cpu).rsp0 = init_rsp;
843 initial_code = start_secondary;
844 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
846 printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu,
847 cpus_weight(cpu_present_map),
851 * This grunge runs the startup process for
852 * the targeted processor.
855 atomic_set(&init_deasserted, 0);
857 Dprintk("Setting warm reset code and vector.\n");
859 CMOS_WRITE(0xa, 0xf);
862 *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
864 *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
868 * Be paranoid about clearing APIC errors.
870 apic_write(APIC_ESR, 0);
874 * Status is now clean
879 * Starting actual IPI sequence...
881 boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
885 * allow APs to start initializing.
887 Dprintk("Before Callout %d.\n", cpu);
888 cpu_set(cpu, cpu_callout_map);
889 Dprintk("After Callout %d.\n", cpu);
892 * Wait 5s total for a response
894 for (timeout = 0; timeout < 50000; timeout++) {
895 if (cpu_isset(cpu, cpu_callin_map))
896 break; /* It has booted */
900 if (cpu_isset(cpu, cpu_callin_map)) {
901 /* number CPUs logically, starting from 1 (BSP is 0) */
902 Dprintk("CPU has booted.\n");
905 if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
907 /* trampoline started but...? */
908 printk("Stuck ??\n");
910 /* trampoline code not run */
911 printk("Not responding.\n");
913 inquire_remote_apic(apicid);
918 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
919 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
920 clear_node_cpumask(cpu); /* was set by numa_add_cpu */
921 cpu_clear(cpu, cpu_present_map);
922 cpu_clear(cpu, cpu_possible_map);
923 x86_cpu_to_apicid[cpu] = BAD_APICID;
924 x86_cpu_to_log_apicid[cpu] = BAD_APICID;
931 cycles_t cacheflush_time;
932 unsigned long cache_decay_ticks;
935 * Cleanup possible dangling ends...
937 static __cpuinit void smp_cleanup_boot(void)
940 * Paranoid: Set warm reset code and vector here back
946 * Reset trampoline flag
948 *((volatile int *) phys_to_virt(0x467)) = 0;
952 * Fall back to non SMP mode after errors.
954 * RED-PEN audit/test this more. I bet there is more state messed up here.
956 static __init void disable_smp(void)
958 cpu_present_map = cpumask_of_cpu(0);
959 cpu_possible_map = cpumask_of_cpu(0);
960 if (smp_found_config)
961 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
963 phys_cpu_present_map = physid_mask_of_physid(0);
964 cpu_set(0, cpu_sibling_map[0]);
965 cpu_set(0, cpu_core_map[0]);
968 #ifdef CONFIG_HOTPLUG_CPU
970 int additional_cpus __initdata = -1;
973 * cpu_possible_map should be static, it cannot change as cpu's
974 * are onlined, or offlined. The reason is per-cpu data-structures
975 * are allocated by some modules at init time, and dont expect to
976 * do this dynamically on cpu arrival/departure.
977 * cpu_present_map on the other hand can change dynamically.
978 * In case when cpu_hotplug is not compiled, then we resort to current
979 * behaviour, which is cpu_possible == cpu_present.
982 * Three ways to find out the number of additional hotplug CPUs:
983 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
984 * - The user can overwrite it with additional_cpus=NUM
985 * - Otherwise don't reserve additional CPUs.
986 * We do this because additional CPUs waste a lot of memory.
989 __init void prefill_possible_map(void)
994 if (additional_cpus == -1) {
995 if (disabled_cpus > 0)
996 additional_cpus = disabled_cpus;
1000 possible = num_processors + additional_cpus;
1001 if (possible > NR_CPUS)
1004 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
1006 max_t(int, possible - num_processors, 0));
1008 for (i = 0; i < possible; i++)
1009 cpu_set(i, cpu_possible_map);
1014 * Various sanity checks.
1016 static int __init smp_sanity_check(unsigned max_cpus)
1018 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
1019 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
1020 hard_smp_processor_id());
1021 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1025 * If we couldn't find an SMP configuration at boot time,
1026 * get out of here now!
1028 if (!smp_found_config) {
1029 printk(KERN_NOTICE "SMP motherboard not detected.\n");
1031 if (APIC_init_uniprocessor())
1032 printk(KERN_NOTICE "Local APIC not detected."
1033 " Using dummy APIC emulation.\n");
1038 * Should not be necessary because the MP table should list the boot
1039 * CPU too, but we do it for the sake of robustness anyway.
1041 if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
1042 printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
1044 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1048 * If we couldn't find a local APIC, then get out of here now!
1050 if (!cpu_has_apic) {
1051 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1053 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1059 * If SMP should be disabled, then really disable it!
1062 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
1071 * Prepare for SMP bootup. The MP table or ACPI has been read
1072 * earlier. Just do some sanity checking here and enable APIC mode.
1074 void __init smp_prepare_cpus(unsigned int max_cpus)
1076 nmi_watchdog_default();
1077 current_cpu_data = boot_cpu_data;
1078 current_thread_info()->cpu = 0; /* needed? */
1079 set_cpu_sibling_map(0);
1081 if (smp_sanity_check(max_cpus) < 0) {
1082 printk(KERN_INFO "SMP disabled\n");
1089 * Switch from PIC to APIC mode.
1094 if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) {
1095 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1096 GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id);
1097 /* Or can we switch back to PIC here? */
1101 * Now start the IO-APICs
1103 if (!skip_ioapic_setup && nr_ioapics)
1109 * Set up local APIC timer on boot CPU.
1112 setup_boot_APIC_clock();
1116 * Early setup to make printk work.
1118 void __init smp_prepare_boot_cpu(void)
1120 int me = smp_processor_id();
1121 cpu_set(me, cpu_online_map);
1122 cpu_set(me, cpu_callout_map);
1123 per_cpu(cpu_state, me) = CPU_ONLINE;
1127 * Entry point to boot a CPU.
1129 int __cpuinit __cpu_up(unsigned int cpu)
1132 int apicid = cpu_present_to_apicid(cpu);
1134 WARN_ON(irqs_disabled());
1136 Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu);
1138 if (apicid == BAD_APICID || apicid == boot_cpu_id ||
1139 !physid_isset(apicid, phys_cpu_present_map)) {
1140 printk("__cpu_up: bad cpu %d\n", cpu);
1145 * Already booted CPU?
1147 if (cpu_isset(cpu, cpu_callin_map)) {
1148 Dprintk("do_boot_cpu %d Already started\n", cpu);
1152 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1154 err = do_boot_cpu(cpu, apicid);
1156 Dprintk("do_boot_cpu failed %d\n", err);
1160 /* Unleash the CPU! */
1161 Dprintk("waiting for cpu %d\n", cpu);
1163 while (!cpu_isset(cpu, cpu_online_map))
1171 * Finish the SMP boot.
1173 void __init smp_cpus_done(unsigned int max_cpus)
1177 #ifdef CONFIG_X86_IO_APIC
1178 setup_ioapic_dest();
1181 check_nmi_watchdog();
1184 #ifdef CONFIG_HOTPLUG_CPU
1186 static void remove_siblinginfo(int cpu)
1189 struct cpuinfo_x86 *c = cpu_data;
1191 for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
1192 cpu_clear(cpu, cpu_core_map[sibling]);
1194 * last thread sibling in this cpu core going down
1196 if (cpus_weight(cpu_sibling_map[cpu]) == 1)
1197 c[sibling].booted_cores--;
1200 for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
1201 cpu_clear(cpu, cpu_sibling_map[sibling]);
1202 cpus_clear(cpu_sibling_map[cpu]);
1203 cpus_clear(cpu_core_map[cpu]);
1204 phys_proc_id[cpu] = BAD_APICID;
1205 cpu_core_id[cpu] = BAD_APICID;
1206 cpu_clear(cpu, cpu_sibling_setup_map);
1209 void remove_cpu_from_maps(void)
1211 int cpu = smp_processor_id();
1213 cpu_clear(cpu, cpu_callout_map);
1214 cpu_clear(cpu, cpu_callin_map);
1215 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
1216 clear_node_cpumask(cpu);
1219 int __cpu_disable(void)
1221 int cpu = smp_processor_id();
1224 * Perhaps use cpufreq to drop frequency, but that could go
1225 * into generic code.
1227 * We won't take down the boot processor on i386 due to some
1228 * interrupts only being able to be serviced by the BSP.
1229 * Especially so if we're not using an IOAPIC -zwane
1238 * Allow any queued timer interrupts to get serviced
1239 * This is only a temporary solution until we cleanup
1240 * fixup_irqs as we do for IA64.
1245 local_irq_disable();
1246 remove_siblinginfo(cpu);
1248 /* It's now safe to remove this processor from the online map */
1249 cpu_clear(cpu, cpu_online_map);
1250 remove_cpu_from_maps();
1251 fixup_irqs(cpu_online_map);
1255 void __cpu_die(unsigned int cpu)
1257 /* We don't do anything here: idle task is faking death itself. */
1260 for (i = 0; i < 10; i++) {
1261 /* They ack this in play_dead by setting CPU_DEAD */
1262 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1263 printk ("CPU %d is now offline\n", cpu);
1264 if (1 == num_online_cpus())
1265 alternatives_smp_switch(0);
1270 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1273 __init int setup_additional_cpus(char *s)
1275 return get_option(&s, &additional_cpus);
1277 __setup("additional_cpus=", setup_additional_cpus);
1279 #else /* ... !CONFIG_HOTPLUG_CPU */
1281 int __cpu_disable(void)
1286 void __cpu_die(unsigned int cpu)
1288 /* We said "no" in __cpu_disable */
1291 #endif /* CONFIG_HOTPLUG_CPU */