2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs.
8 * Much of the core SMP work is based on previous work by Thomas Radke, to
9 * whom a great many thanks are extended.
11 * Thanks to Intel for making available several different Pentium,
12 * Pentium Pro and Pentium-II/Xeon MP machines.
13 * Original development of Linux SMP code supported by Caldera.
15 * This code is released under the GNU General Public License version 2
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Andi Kleen : Changed for SMP boot into long mode.
33 * Rusty Russell : Hacked into shape for new "hotplug" boot process.
34 * Andi Kleen : Converted to new state machine.
36 * Probably mostly hotplug CPU ready now.
37 * Ashok Raj : CPU hotplug support
41 #include <linux/init.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/smp_lock.h>
46 #include <linux/bootmem.h>
47 #include <linux/thread_info.h>
48 #include <linux/module.h>
49 #include <linux/delay.h>
50 #include <linux/mc146818rtc.h>
51 #include <linux/smp.h>
54 #include <asm/pgalloc.h>
56 #include <asm/kdebug.h>
57 #include <asm/tlbflush.h>
58 #include <asm/proto.h>
61 #include <asm/hw_irq.h>
63 #include <asm/genapic.h>
65 /* Number of siblings per CPU package */
66 int smp_num_siblings = 1;
67 EXPORT_SYMBOL(smp_num_siblings);
69 /* Last level cache ID of each logical CPU */
70 u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
71 EXPORT_SYMBOL(cpu_llc_id);
73 /* Bitmask of currently online CPUs */
74 cpumask_t cpu_online_map __read_mostly;
76 EXPORT_SYMBOL(cpu_online_map);
79 * Private maps to synchronize booting between AP and BP.
80 * Probably not needed anymore, but it makes for easier debugging. -AK
82 cpumask_t cpu_callin_map;
83 cpumask_t cpu_callout_map;
84 EXPORT_SYMBOL(cpu_callout_map);
86 cpumask_t cpu_possible_map;
87 EXPORT_SYMBOL(cpu_possible_map);
89 /* Per CPU bogomips and other parameters */
90 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
91 EXPORT_SYMBOL(cpu_data);
93 /* Set when the idlers are all forked */
94 int smp_threads_ready;
96 /* representing HT siblings of each logical CPU */
97 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
98 EXPORT_SYMBOL(cpu_sibling_map);
100 /* representing HT and core siblings of each logical CPU */
101 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
102 EXPORT_SYMBOL(cpu_core_map);
105 * Trampoline 80x86 program as an array.
108 extern unsigned char trampoline_data[];
109 extern unsigned char trampoline_end[];
111 /* State of each CPU */
112 DEFINE_PER_CPU(int, cpu_state) = { 0 };
115 * Store all idle threads, this can be reused instead of creating
116 * a new thread. Also avoids complicated thread destroy functionality
119 struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
121 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
122 #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
125 * Currently trivial. Write the real->protected mode
126 * bootstrap into the page concerned. The caller
127 * has made sure it's suitably aligned.
130 static unsigned long __cpuinit setup_trampoline(void)
132 void *tramp = __va(SMP_TRAMPOLINE_BASE);
133 memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
134 return virt_to_phys(tramp);
138 * The bootstrap kernel entry code has set these up. Save them for
142 static void __cpuinit smp_store_cpu_info(int id)
144 struct cpuinfo_x86 *c = cpu_data + id;
152 * New Funky TSC sync algorithm borrowed from IA64.
153 * Main advantage is that it doesn't reset the TSCs fully and
154 * in general looks more robust and it works better than my earlier
155 * attempts. I believe it was written by David Mosberger. Some minor
156 * adjustments for x86-64 by me -AK
158 * Original comment reproduced below.
160 * Synchronize TSC of the current (slave) CPU with the TSC of the
161 * MASTER CPU (normally the time-keeper CPU). We use a closed loop to
162 * eliminate the possibility of unaccounted-for errors (such as
163 * getting a machine check in the middle of a calibration step). The
164 * basic idea is for the slave to ask the master what itc value it has
165 * and to read its own itc before and after the master responds. Each
166 * iteration gives us three timestamps:
179 * The goal is to adjust the slave's TSC such that tm falls exactly
180 * half-way between t0 and t1. If we achieve this, the clocks are
181 * synchronized provided the interconnect between the slave and the
182 * master is symmetric. Even if the interconnect were asymmetric, we
183 * would still know that the synchronization error is smaller than the
184 * roundtrip latency (t0 - t1).
186 * When the interconnect is quiet and symmetric, this lets us
187 * synchronize the TSC to within one or two cycles. However, we can
188 * only *guarantee* that the synchronization is accurate to within a
189 * round-trip time, which is typically in the range of several hundred
190 * cycles (e.g., ~500 cycles). In practice, this means that the TSCs
191 * are usually almost perfectly synchronized, but we shouldn't assume
192 * that the accuracy is much better than half a micro second or so.
194 * [there are other errors like the latency of RDTSC and of the
195 * WRMSR. These can also account to hundreds of cycles. So it's
196 * probably worse. It claims 153 cycles error on a dual Opteron,
197 * but I suspect the numbers are actually somewhat worse -AK]
201 #define SLAVE (SMP_CACHE_BYTES/8)
203 /* Intentionally don't use cpu_relax() while TSC synchronization
204 because we don't want to go into funky power save modi or cause
205 hypervisors to schedule us away. Going to sleep would likely affect
206 latency and low latency is the primary objective here. -AK */
207 #define no_cpu_relax() barrier()
209 static __cpuinitdata DEFINE_SPINLOCK(tsc_sync_lock);
210 static volatile __cpuinitdata unsigned long go[SLAVE + 1];
211 static int notscsync __cpuinitdata;
213 #undef DEBUG_TSC_SYNC
215 #define NUM_ROUNDS 64 /* magic value */
216 #define NUM_ITERS 5 /* likewise */
218 /* Callback on boot CPU */
219 static __cpuinit void sync_master(void *arg)
221 unsigned long flags, i;
225 local_irq_save(flags);
227 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
234 local_irq_restore(flags);
238 * Return the number of cycles by which our tsc differs from the tsc
239 * on the master (time-keeper) CPU. A positive number indicates our
240 * tsc is ahead of the master, negative that it is behind.
243 get_delta(long *rt, long *master)
245 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
246 unsigned long tcenter, t0, t1, tm;
249 for (i = 0; i < NUM_ITERS; ++i) {
252 while (!(tm = go[SLAVE]))
257 if (t1 - t0 < best_t1 - best_t0)
258 best_t0 = t0, best_t1 = t1, best_tm = tm;
261 *rt = best_t1 - best_t0;
262 *master = best_tm - best_t0;
264 /* average best_t0 and best_t1 without overflow: */
265 tcenter = (best_t0/2 + best_t1/2);
266 if (best_t0 % 2 + best_t1 % 2 == 2)
268 return tcenter - best_tm;
271 static __cpuinit void sync_tsc(unsigned int master)
274 long delta, adj, adjust_latency = 0;
275 unsigned long flags, rt, master_time_stamp, bound;
276 #ifdef DEBUG_TSC_SYNC
277 static struct syncdebug {
278 long rt; /* roundtrip time */
279 long master; /* master's timestamp */
280 long diff; /* difference between midpoint and master's timestamp */
281 long lat; /* estimate of tsc adjustment latency */
282 } t[NUM_ROUNDS] __cpuinitdata;
285 printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n",
286 smp_processor_id(), master);
290 /* It is dangerous to broadcast IPI as cpus are coming up,
291 * as they may not be ready to accept them. So since
292 * we only need to send the ipi to the boot cpu direct
293 * the message, and avoid the race.
295 smp_call_function_single(master, sync_master, NULL, 1, 0);
297 while (go[MASTER]) /* wait for master to be ready */
300 spin_lock_irqsave(&tsc_sync_lock, flags);
302 for (i = 0; i < NUM_ROUNDS; ++i) {
303 delta = get_delta(&rt, &master_time_stamp);
305 done = 1; /* let's lock on to this... */
312 adjust_latency += -delta;
313 adj = -delta + adjust_latency/4;
318 wrmsrl(MSR_IA32_TSC, t + adj);
320 #ifdef DEBUG_TSC_SYNC
322 t[i].master = master_time_stamp;
324 t[i].lat = adjust_latency/4;
328 spin_unlock_irqrestore(&tsc_sync_lock, flags);
330 #ifdef DEBUG_TSC_SYNC
331 for (i = 0; i < NUM_ROUNDS; ++i)
332 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
333 t[i].rt, t[i].master, t[i].diff, t[i].lat);
337 "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
338 "maxerr %lu cycles)\n",
339 smp_processor_id(), master, delta, rt);
342 static void __cpuinit tsc_sync_wait(void)
345 * When the CPU has synchronized TSCs assume the BIOS
346 * or the hardware already synced. Otherwise we could
347 * mess up a possible perfect synchronization with a
348 * not-quite-perfect algorithm.
350 if (notscsync || !cpu_has_tsc || !unsynchronized_tsc())
355 static __init int notscsync_setup(char *s)
360 __setup("notscsync", notscsync_setup);
362 static atomic_t init_deasserted __cpuinitdata;
365 * Report back to the Boot Processor.
368 void __cpuinit smp_callin(void)
371 unsigned long timeout;
374 * If waken up by an INIT in an 82489DX configuration
375 * we may get here before an INIT-deassert IPI reaches
376 * our local APIC. We have to wait for the IPI or we'll
377 * lock up on an APIC access.
379 while (!atomic_read(&init_deasserted))
383 * (This works even if the APIC is not enabled.)
385 phys_id = GET_APIC_ID(apic_read(APIC_ID));
386 cpuid = smp_processor_id();
387 if (cpu_isset(cpuid, cpu_callin_map)) {
388 panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
391 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
394 * STARTUP IPIs are fragile beasts as they might sometimes
395 * trigger some glue motherboard logic. Complete APIC bus
396 * silence for 1 second, this overestimates the time the
397 * boot CPU is spending to send the up to 2 STARTUP IPIs
398 * by a factor of two. This should be enough.
402 * Waiting 2s total for startup (udelay is not yet working)
404 timeout = jiffies + 2*HZ;
405 while (time_before(jiffies, timeout)) {
407 * Has the boot CPU finished it's STARTUP sequence?
409 if (cpu_isset(cpuid, cpu_callout_map))
414 if (!time_before(jiffies, timeout)) {
415 panic("smp_callin: CPU%d started up but did not get a callout!\n",
420 * the boot CPU has finished the init stage and is spinning
421 * on callin_map until we finish. We are free to set up this
422 * CPU, first the APIC. (this is probably redundant on most
426 Dprintk("CALLIN, before setup_local_APIC().\n");
432 * Need to enable IRQs because it can take longer and then
433 * the NMI watchdog might kill us.
438 Dprintk("Stack at about %p\n",&cpuid);
440 disable_APIC_timer();
443 * Save our processor parameters
445 smp_store_cpu_info(cpuid);
448 * Allow the master to continue.
450 cpu_set(cpuid, cpu_callin_map);
453 /* maps the cpu to the sched domain representing multi-core */
454 cpumask_t cpu_coregroup_map(int cpu)
456 struct cpuinfo_x86 *c = cpu_data + cpu;
458 * For perf, we return last level cache shared map.
459 * And for power savings, we return cpu_core_map
461 if (sched_mc_power_savings || sched_smt_power_savings)
462 return cpu_core_map[cpu];
464 return c->llc_shared_map;
467 /* representing cpus for which sibling maps can be computed */
468 static cpumask_t cpu_sibling_setup_map;
470 static inline void set_cpu_sibling_map(int cpu)
473 struct cpuinfo_x86 *c = cpu_data;
475 cpu_set(cpu, cpu_sibling_setup_map);
477 if (smp_num_siblings > 1) {
478 for_each_cpu_mask(i, cpu_sibling_setup_map) {
479 if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
480 c[cpu].cpu_core_id == c[i].cpu_core_id) {
481 cpu_set(i, cpu_sibling_map[cpu]);
482 cpu_set(cpu, cpu_sibling_map[i]);
483 cpu_set(i, cpu_core_map[cpu]);
484 cpu_set(cpu, cpu_core_map[i]);
485 cpu_set(i, c[cpu].llc_shared_map);
486 cpu_set(cpu, c[i].llc_shared_map);
490 cpu_set(cpu, cpu_sibling_map[cpu]);
493 cpu_set(cpu, c[cpu].llc_shared_map);
495 if (current_cpu_data.x86_max_cores == 1) {
496 cpu_core_map[cpu] = cpu_sibling_map[cpu];
497 c[cpu].booted_cores = 1;
501 for_each_cpu_mask(i, cpu_sibling_setup_map) {
502 if (cpu_llc_id[cpu] != BAD_APICID &&
503 cpu_llc_id[cpu] == cpu_llc_id[i]) {
504 cpu_set(i, c[cpu].llc_shared_map);
505 cpu_set(cpu, c[i].llc_shared_map);
507 if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
508 cpu_set(i, cpu_core_map[cpu]);
509 cpu_set(cpu, cpu_core_map[i]);
511 * Does this new cpu bringup a new core?
513 if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
515 * for each core in package, increment
516 * the booted_cores for this new cpu
518 if (first_cpu(cpu_sibling_map[i]) == i)
519 c[cpu].booted_cores++;
521 * increment the core count for all
522 * the other cpus in this package
526 } else if (i != cpu && !c[cpu].booted_cores)
527 c[cpu].booted_cores = c[i].booted_cores;
533 * Setup code on secondary processor (after comming out of the trampoline)
535 void __cpuinit start_secondary(void)
538 * Dont put anything before smp_callin(), SMP
539 * booting is too fragile that we want to limit the
540 * things done here to the most necessary things.
546 /* otherwise gcc will move up the smp_processor_id before the cpu_init */
549 Dprintk("cpu %d: setting up apic clock\n", smp_processor_id());
550 setup_secondary_APIC_clock();
552 Dprintk("cpu %d: enabling apic timer\n", smp_processor_id());
554 if (nmi_watchdog == NMI_IO_APIC) {
555 disable_8259A_irq(0);
556 enable_NMI_through_LVT0(NULL);
563 * The sibling maps must be set before turing the online map on for
566 set_cpu_sibling_map(smp_processor_id());
569 * Wait for TSC sync to not schedule things before.
570 * We still process interrupts, which could see an inconsistent
571 * time in that window unfortunately.
572 * Do this here because TSC sync has global unprotected state.
577 * We need to hold call_lock, so there is no inconsistency
578 * between the time smp_call_function() determines number of
579 * IPI receipients, and the time when the determination is made
580 * for which cpus receive the IPI in genapic_flat.c. Holding this
581 * lock helps us to not include this cpu in a currently in progress
582 * smp_call_function().
584 lock_ipi_call_lock();
585 spin_lock(&vector_lock);
587 /* Setup the per cpu irq handling data structures */
588 __setup_vector_irq(smp_processor_id());
590 * Allow the master to continue.
592 cpu_set(smp_processor_id(), cpu_online_map);
593 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
594 spin_unlock(&vector_lock);
595 unlock_ipi_call_lock();
600 extern volatile unsigned long init_rsp;
601 extern void (*initial_code)(void);
604 static void inquire_remote_apic(int apicid)
606 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
607 char *names[] = { "ID", "VERSION", "SPIV" };
610 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
612 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
613 printk("... APIC #%d %s: ", apicid, names[i]);
618 apic_wait_icr_idle();
620 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
621 apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]);
626 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
627 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
630 case APIC_ICR_RR_VALID:
631 status = apic_read(APIC_RRR);
632 printk("%08x\n", status);
642 * Kick the secondary to wake up.
644 static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip)
646 unsigned long send_status = 0, accept_status = 0;
647 int maxlvt, timeout, num_starts, j;
649 Dprintk("Asserting INIT.\n");
652 * Turn INIT on target chip
654 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
659 apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
662 Dprintk("Waiting for send to finish...\n");
667 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
668 } while (send_status && (timeout++ < 1000));
672 Dprintk("Deasserting INIT.\n");
675 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
678 apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
680 Dprintk("Waiting for send to finish...\n");
685 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
686 } while (send_status && (timeout++ < 1000));
689 atomic_set(&init_deasserted, 1);
694 * Run STARTUP IPI loop.
696 Dprintk("#startup loops: %d.\n", num_starts);
698 maxlvt = get_maxlvt();
700 for (j = 1; j <= num_starts; j++) {
701 Dprintk("Sending STARTUP #%d.\n",j);
702 apic_write(APIC_ESR, 0);
704 Dprintk("After apic_write.\n");
711 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
713 /* Boot on the stack */
714 /* Kick the second */
715 apic_write(APIC_ICR, APIC_DM_STARTUP | (start_rip >> 12));
718 * Give the other CPU some time to accept the IPI.
722 Dprintk("Startup point 1.\n");
724 Dprintk("Waiting for send to finish...\n");
729 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
730 } while (send_status && (timeout++ < 1000));
733 * Give the other CPU some time to accept the IPI.
737 * Due to the Pentium erratum 3AP.
740 apic_write(APIC_ESR, 0);
742 accept_status = (apic_read(APIC_ESR) & 0xEF);
743 if (send_status || accept_status)
746 Dprintk("After Startup.\n");
749 printk(KERN_ERR "APIC never delivered???\n");
751 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
753 return (send_status | accept_status);
757 struct task_struct *idle;
758 struct completion done;
762 void do_fork_idle(void *_c_idle)
764 struct create_idle *c_idle = _c_idle;
766 c_idle->idle = fork_idle(c_idle->cpu);
767 complete(&c_idle->done);
773 static int __cpuinit do_boot_cpu(int cpu, int apicid)
775 unsigned long boot_error;
777 unsigned long start_rip;
778 struct create_idle c_idle = {
780 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
782 DECLARE_WORK(work, do_fork_idle, &c_idle);
784 /* allocate memory for gdts of secondary cpus. Hotplug is considered */
785 if (!cpu_gdt_descr[cpu].address &&
786 !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
787 printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu);
791 /* Allocate node local memory for AP pdas */
792 if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) {
793 struct x8664_pda *newpda, *pda;
794 int node = cpu_to_node(cpu);
796 newpda = kmalloc_node(sizeof (struct x8664_pda), GFP_ATOMIC,
799 memcpy(newpda, pda, sizeof (struct x8664_pda));
800 cpu_pda(cpu) = newpda;
803 "Could not allocate node local PDA for CPU %d on node %d\n",
807 alternatives_smp_switch(1);
809 c_idle.idle = get_idle_for_cpu(cpu);
812 c_idle.idle->thread.rsp = (unsigned long) (((struct pt_regs *)
813 (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
814 init_idle(c_idle.idle, cpu);
819 * During cold boot process, keventd thread is not spun up yet.
820 * When we do cpu hot-add, we create idle threads on the fly, we should
821 * not acquire any attributes from the calling context. Hence the clean
822 * way to create kernel_threads() is to do that from keventd().
823 * We do the current_is_keventd() due to the fact that ACPI notifier
824 * was also queuing to keventd() and when the caller is already running
825 * in context of keventd(), we would end up with locking up the keventd
828 if (!keventd_up() || current_is_keventd())
829 work.func(work.data);
831 schedule_work(&work);
832 wait_for_completion(&c_idle.done);
835 if (IS_ERR(c_idle.idle)) {
836 printk("failed fork for CPU %d\n", cpu);
837 return PTR_ERR(c_idle.idle);
840 set_idle_for_cpu(cpu, c_idle.idle);
844 cpu_pda(cpu)->pcurrent = c_idle.idle;
846 start_rip = setup_trampoline();
848 init_rsp = c_idle.idle->thread.rsp;
849 per_cpu(init_tss,cpu).rsp0 = init_rsp;
850 initial_code = start_secondary;
851 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
853 printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu,
854 cpus_weight(cpu_present_map),
858 * This grunge runs the startup process for
859 * the targeted processor.
862 atomic_set(&init_deasserted, 0);
864 Dprintk("Setting warm reset code and vector.\n");
866 CMOS_WRITE(0xa, 0xf);
869 *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
871 *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
875 * Be paranoid about clearing APIC errors.
877 apic_write(APIC_ESR, 0);
881 * Status is now clean
886 * Starting actual IPI sequence...
888 boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
892 * allow APs to start initializing.
894 Dprintk("Before Callout %d.\n", cpu);
895 cpu_set(cpu, cpu_callout_map);
896 Dprintk("After Callout %d.\n", cpu);
899 * Wait 5s total for a response
901 for (timeout = 0; timeout < 50000; timeout++) {
902 if (cpu_isset(cpu, cpu_callin_map))
903 break; /* It has booted */
907 if (cpu_isset(cpu, cpu_callin_map)) {
908 /* number CPUs logically, starting from 1 (BSP is 0) */
909 Dprintk("CPU has booted.\n");
912 if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
914 /* trampoline started but...? */
915 printk("Stuck ??\n");
917 /* trampoline code not run */
918 printk("Not responding.\n");
920 inquire_remote_apic(apicid);
925 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
926 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
927 clear_node_cpumask(cpu); /* was set by numa_add_cpu */
928 cpu_clear(cpu, cpu_present_map);
929 cpu_clear(cpu, cpu_possible_map);
930 x86_cpu_to_apicid[cpu] = BAD_APICID;
931 x86_cpu_to_log_apicid[cpu] = BAD_APICID;
938 cycles_t cacheflush_time;
939 unsigned long cache_decay_ticks;
942 * Cleanup possible dangling ends...
944 static __cpuinit void smp_cleanup_boot(void)
947 * Paranoid: Set warm reset code and vector here back
953 * Reset trampoline flag
955 *((volatile int *) phys_to_virt(0x467)) = 0;
959 * Fall back to non SMP mode after errors.
961 * RED-PEN audit/test this more. I bet there is more state messed up here.
963 static __init void disable_smp(void)
965 cpu_present_map = cpumask_of_cpu(0);
966 cpu_possible_map = cpumask_of_cpu(0);
967 if (smp_found_config)
968 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
970 phys_cpu_present_map = physid_mask_of_physid(0);
971 cpu_set(0, cpu_sibling_map[0]);
972 cpu_set(0, cpu_core_map[0]);
975 #ifdef CONFIG_HOTPLUG_CPU
977 int additional_cpus __initdata = -1;
980 * cpu_possible_map should be static, it cannot change as cpu's
981 * are onlined, or offlined. The reason is per-cpu data-structures
982 * are allocated by some modules at init time, and dont expect to
983 * do this dynamically on cpu arrival/departure.
984 * cpu_present_map on the other hand can change dynamically.
985 * In case when cpu_hotplug is not compiled, then we resort to current
986 * behaviour, which is cpu_possible == cpu_present.
989 * Three ways to find out the number of additional hotplug CPUs:
990 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
991 * - The user can overwrite it with additional_cpus=NUM
992 * - Otherwise don't reserve additional CPUs.
993 * We do this because additional CPUs waste a lot of memory.
996 __init void prefill_possible_map(void)
1001 if (additional_cpus == -1) {
1002 if (disabled_cpus > 0)
1003 additional_cpus = disabled_cpus;
1005 additional_cpus = 0;
1007 possible = num_processors + additional_cpus;
1008 if (possible > NR_CPUS)
1011 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
1013 max_t(int, possible - num_processors, 0));
1015 for (i = 0; i < possible; i++)
1016 cpu_set(i, cpu_possible_map);
1021 * Various sanity checks.
1023 static int __init smp_sanity_check(unsigned max_cpus)
1025 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
1026 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
1027 hard_smp_processor_id());
1028 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1032 * If we couldn't find an SMP configuration at boot time,
1033 * get out of here now!
1035 if (!smp_found_config) {
1036 printk(KERN_NOTICE "SMP motherboard not detected.\n");
1038 if (APIC_init_uniprocessor())
1039 printk(KERN_NOTICE "Local APIC not detected."
1040 " Using dummy APIC emulation.\n");
1045 * Should not be necessary because the MP table should list the boot
1046 * CPU too, but we do it for the sake of robustness anyway.
1048 if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
1049 printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
1051 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1055 * If we couldn't find a local APIC, then get out of here now!
1057 if (!cpu_has_apic) {
1058 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1060 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1066 * If SMP should be disabled, then really disable it!
1069 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
1078 * Prepare for SMP bootup. The MP table or ACPI has been read
1079 * earlier. Just do some sanity checking here and enable APIC mode.
1081 void __init smp_prepare_cpus(unsigned int max_cpus)
1083 nmi_watchdog_default();
1084 current_cpu_data = boot_cpu_data;
1085 current_thread_info()->cpu = 0; /* needed? */
1086 set_cpu_sibling_map(0);
1088 if (smp_sanity_check(max_cpus) < 0) {
1089 printk(KERN_INFO "SMP disabled\n");
1096 * Switch from PIC to APIC mode.
1100 if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) {
1101 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1102 GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id);
1103 /* Or can we switch back to PIC here? */
1107 * Now start the IO-APICs
1109 if (!skip_ioapic_setup && nr_ioapics)
1115 * Set up local APIC timer on boot CPU.
1118 setup_boot_APIC_clock();
1122 * Early setup to make printk work.
1124 void __init smp_prepare_boot_cpu(void)
1126 int me = smp_processor_id();
1127 cpu_set(me, cpu_online_map);
1128 cpu_set(me, cpu_callout_map);
1129 per_cpu(cpu_state, me) = CPU_ONLINE;
1133 * Entry point to boot a CPU.
1135 int __cpuinit __cpu_up(unsigned int cpu)
1138 int apicid = cpu_present_to_apicid(cpu);
1140 WARN_ON(irqs_disabled());
1142 Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu);
1144 if (apicid == BAD_APICID || apicid == boot_cpu_id ||
1145 !physid_isset(apicid, phys_cpu_present_map)) {
1146 printk("__cpu_up: bad cpu %d\n", cpu);
1151 * Already booted CPU?
1153 if (cpu_isset(cpu, cpu_callin_map)) {
1154 Dprintk("do_boot_cpu %d Already started\n", cpu);
1158 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1160 err = do_boot_cpu(cpu, apicid);
1162 Dprintk("do_boot_cpu failed %d\n", err);
1166 /* Unleash the CPU! */
1167 Dprintk("waiting for cpu %d\n", cpu);
1169 while (!cpu_isset(cpu, cpu_online_map))
1172 if (num_online_cpus() > 8 && genapic == &apic_flat) {
1174 "flat APIC routing can't be used with > 8 cpus\n");
1184 * Finish the SMP boot.
1186 void __init smp_cpus_done(unsigned int max_cpus)
1189 setup_ioapic_dest();
1190 check_nmi_watchdog();
1194 #ifdef CONFIG_HOTPLUG_CPU
1196 static void remove_siblinginfo(int cpu)
1199 struct cpuinfo_x86 *c = cpu_data;
1201 for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
1202 cpu_clear(cpu, cpu_core_map[sibling]);
1204 * last thread sibling in this cpu core going down
1206 if (cpus_weight(cpu_sibling_map[cpu]) == 1)
1207 c[sibling].booted_cores--;
1210 for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
1211 cpu_clear(cpu, cpu_sibling_map[sibling]);
1212 cpus_clear(cpu_sibling_map[cpu]);
1213 cpus_clear(cpu_core_map[cpu]);
1214 c[cpu].phys_proc_id = 0;
1215 c[cpu].cpu_core_id = 0;
1216 cpu_clear(cpu, cpu_sibling_setup_map);
1219 void remove_cpu_from_maps(void)
1221 int cpu = smp_processor_id();
1223 cpu_clear(cpu, cpu_callout_map);
1224 cpu_clear(cpu, cpu_callin_map);
1225 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
1226 clear_node_cpumask(cpu);
1229 int __cpu_disable(void)
1231 int cpu = smp_processor_id();
1234 * Perhaps use cpufreq to drop frequency, but that could go
1235 * into generic code.
1237 * We won't take down the boot processor on i386 due to some
1238 * interrupts only being able to be serviced by the BSP.
1239 * Especially so if we're not using an IOAPIC -zwane
1244 if (nmi_watchdog == NMI_LOCAL_APIC)
1245 stop_apic_nmi_watchdog(NULL);
1250 * Allow any queued timer interrupts to get serviced
1251 * This is only a temporary solution until we cleanup
1252 * fixup_irqs as we do for IA64.
1257 local_irq_disable();
1258 remove_siblinginfo(cpu);
1260 spin_lock(&vector_lock);
1261 /* It's now safe to remove this processor from the online map */
1262 cpu_clear(cpu, cpu_online_map);
1263 spin_unlock(&vector_lock);
1264 remove_cpu_from_maps();
1265 fixup_irqs(cpu_online_map);
1269 void __cpu_die(unsigned int cpu)
1271 /* We don't do anything here: idle task is faking death itself. */
1274 for (i = 0; i < 10; i++) {
1275 /* They ack this in play_dead by setting CPU_DEAD */
1276 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1277 printk ("CPU %d is now offline\n", cpu);
1278 if (1 == num_online_cpus())
1279 alternatives_smp_switch(0);
1284 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1287 static __init int setup_additional_cpus(char *s)
1289 return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
1291 early_param("additional_cpus", setup_additional_cpus);
1293 #else /* ... !CONFIG_HOTPLUG_CPU */
1295 int __cpu_disable(void)
1300 void __cpu_die(unsigned int cpu)
1302 /* We said "no" in __cpu_disable */
1305 #endif /* CONFIG_HOTPLUG_CPU */