2 * SMP related functions
4 * Copyright IBM Corp. 1999, 2012
5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
18 #define KMSG_COMPONENT "cpu"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21 #include <linux/workqueue.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
25 #include <linux/err.h>
26 #include <linux/spinlock.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/irqflags.h>
31 #include <linux/cpu.h>
32 #include <linux/slab.h>
33 #include <linux/crash_dump.h>
34 #include <asm/asm-offsets.h>
35 #include <asm/switch_to.h>
36 #include <asm/facility.h>
38 #include <asm/setup.h>
40 #include <asm/tlbflush.h>
41 #include <asm/vtimer.h>
42 #include <asm/lowcore.h>
45 #include <asm/debug.h>
46 #include <asm/os_info.h>
52 ec_call_function_single,
63 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
64 unsigned long async_stack; /* async stack for the cpu */
65 unsigned long panic_stack; /* panic stack for the cpu */
66 unsigned long ec_mask; /* bit mask for ec_xxx functions */
67 int state; /* physical cpu state */
68 int polarization; /* physical polarization */
69 u16 address; /* physical cpu address */
72 static u8 boot_cpu_type;
73 static u16 boot_cpu_address;
74 static struct pcpu pcpu_devices[NR_CPUS];
77 * The smp_cpu_state_mutex must be held when changing the state or polarization
78 * member of a pcpu data structure within the pcpu_devices arreay.
80 DEFINE_MUTEX(smp_cpu_state_mutex);
83 * Signal processor helper functions.
85 static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status)
87 register unsigned int reg1 asm ("1") = parm;
94 : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
95 if (status && cc == 1)
100 static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
105 cc = __pcpu_sigp(addr, order, parm, NULL);
106 if (cc != SIGP_CC_BUSY)
112 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
116 for (retry = 0; ; retry++) {
117 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
118 if (cc != SIGP_CC_BUSY)
126 static inline int pcpu_stopped(struct pcpu *pcpu)
128 u32 uninitialized_var(status);
130 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
131 0, &status) != SIGP_CC_STATUS_STORED)
133 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
136 static inline int pcpu_running(struct pcpu *pcpu)
138 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
139 0, NULL) != SIGP_CC_STATUS_STORED)
141 /* Status stored condition code is equivalent to cpu not running. */
146 * Find struct pcpu by cpu address.
148 static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address)
152 for_each_cpu(cpu, mask)
153 if (pcpu_devices[cpu].address == address)
154 return pcpu_devices + cpu;
158 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
162 if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
164 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
165 pcpu_sigp_retry(pcpu, order, 0);
168 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
172 if (pcpu != &pcpu_devices[0]) {
173 pcpu->lowcore = (struct _lowcore *)
174 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
175 pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
176 pcpu->panic_stack = __get_free_page(GFP_KERNEL);
177 if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
181 memcpy(lc, &S390_lowcore, 512);
182 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
183 lc->async_stack = pcpu->async_stack + ASYNC_SIZE
184 - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
185 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
186 - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
189 if (MACHINE_HAS_IEEE) {
190 lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
191 if (!lc->extended_save_area_addr)
195 if (vdso_alloc_per_cpu(lc))
198 lowcore_ptr[cpu] = lc;
199 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
202 if (pcpu != &pcpu_devices[0]) {
203 free_page(pcpu->panic_stack);
204 free_pages(pcpu->async_stack, ASYNC_ORDER);
205 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
210 #ifdef CONFIG_HOTPLUG_CPU
212 static void pcpu_free_lowcore(struct pcpu *pcpu)
214 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
215 lowcore_ptr[pcpu - pcpu_devices] = NULL;
217 if (MACHINE_HAS_IEEE) {
218 struct _lowcore *lc = pcpu->lowcore;
220 free_page((unsigned long) lc->extended_save_area_addr);
221 lc->extended_save_area_addr = 0;
224 vdso_free_per_cpu(pcpu->lowcore);
226 if (pcpu != &pcpu_devices[0]) {
227 free_page(pcpu->panic_stack);
228 free_pages(pcpu->async_stack, ASYNC_ORDER);
229 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
233 #endif /* CONFIG_HOTPLUG_CPU */
235 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
237 struct _lowcore *lc = pcpu->lowcore;
239 if (MACHINE_HAS_TLB_LC)
240 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
241 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
242 atomic_inc(&init_mm.context.attach_count);
244 lc->percpu_offset = __per_cpu_offset[cpu];
245 lc->kernel_asce = S390_lowcore.kernel_asce;
246 lc->machine_flags = S390_lowcore.machine_flags;
247 lc->ftrace_func = S390_lowcore.ftrace_func;
248 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
249 __ctl_store(lc->cregs_save_area, 0, 15);
250 save_access_regs((unsigned int *) lc->access_regs_save_area);
251 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
255 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
257 struct _lowcore *lc = pcpu->lowcore;
258 struct thread_info *ti = task_thread_info(tsk);
260 lc->kernel_stack = (unsigned long) task_stack_page(tsk)
261 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
262 lc->thread_info = (unsigned long) task_thread_info(tsk);
263 lc->current_task = (unsigned long) tsk;
264 lc->user_timer = ti->user_timer;
265 lc->system_timer = ti->system_timer;
269 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
271 struct _lowcore *lc = pcpu->lowcore;
273 lc->restart_stack = lc->kernel_stack;
274 lc->restart_fn = (unsigned long) func;
275 lc->restart_data = (unsigned long) data;
276 lc->restart_source = -1UL;
277 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
281 * Call function via PSW restart on pcpu and stop the current cpu.
283 static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
284 void *data, unsigned long stack)
286 struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
287 unsigned long source_cpu = stap();
289 __load_psw_mask(PSW_KERNEL_BITS);
290 if (pcpu->address == source_cpu)
291 func(data); /* should not return */
292 /* Stop target cpu (if func returns this stops the current cpu). */
293 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
294 /* Restart func on the target cpu and stop the current cpu. */
295 mem_assign_absolute(lc->restart_stack, stack);
296 mem_assign_absolute(lc->restart_fn, (unsigned long) func);
297 mem_assign_absolute(lc->restart_data, (unsigned long) data);
298 mem_assign_absolute(lc->restart_source, source_cpu);
300 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
301 " brc 2,0b # busy, try again\n"
302 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
303 " brc 2,1b # busy, try again\n"
304 : : "d" (pcpu->address), "d" (source_cpu),
305 "K" (SIGP_RESTART), "K" (SIGP_STOP)
311 * Call function on an online CPU.
313 void smp_call_online_cpu(void (*func)(void *), void *data)
317 /* Use the current cpu if it is online. */
318 pcpu = pcpu_find_address(cpu_online_mask, stap());
320 /* Use the first online cpu. */
321 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
322 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
326 * Call function on the ipl CPU.
328 void smp_call_ipl_cpu(void (*func)(void *), void *data)
330 pcpu_delegate(&pcpu_devices[0], func, data,
331 pcpu_devices->panic_stack + PAGE_SIZE);
334 int smp_find_processor_id(u16 address)
338 for_each_present_cpu(cpu)
339 if (pcpu_devices[cpu].address == address)
344 int smp_vcpu_scheduled(int cpu)
346 return pcpu_running(pcpu_devices + cpu);
351 if (MACHINE_HAS_DIAG44)
352 asm volatile("diag 0,0,0x44");
355 void smp_yield_cpu(int cpu)
357 if (MACHINE_HAS_DIAG9C)
358 asm volatile("diag %0,0,0x9c"
359 : : "d" (pcpu_devices[cpu].address));
360 else if (MACHINE_HAS_DIAG44)
361 asm volatile("diag 0,0,0x44");
365 * Send cpus emergency shutdown signal. This gives the cpus the
366 * opportunity to complete outstanding interrupts.
368 static void smp_emergency_stop(cpumask_t *cpumask)
373 end = get_tod_clock() + (1000000UL << 12);
374 for_each_cpu(cpu, cpumask) {
375 struct pcpu *pcpu = pcpu_devices + cpu;
376 set_bit(ec_stop_cpu, &pcpu->ec_mask);
377 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
378 0, NULL) == SIGP_CC_BUSY &&
379 get_tod_clock() < end)
382 while (get_tod_clock() < end) {
383 for_each_cpu(cpu, cpumask)
384 if (pcpu_stopped(pcpu_devices + cpu))
385 cpumask_clear_cpu(cpu, cpumask);
386 if (cpumask_empty(cpumask))
393 * Stop all cpus but the current one.
395 void smp_send_stop(void)
400 /* Disable all interrupts/machine checks */
401 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
402 trace_hardirqs_off();
404 debug_set_critical();
405 cpumask_copy(&cpumask, cpu_online_mask);
406 cpumask_clear_cpu(smp_processor_id(), &cpumask);
408 if (oops_in_progress)
409 smp_emergency_stop(&cpumask);
411 /* stop all processors */
412 for_each_cpu(cpu, &cpumask) {
413 struct pcpu *pcpu = pcpu_devices + cpu;
414 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
415 while (!pcpu_stopped(pcpu))
421 * Stop the current cpu.
423 void smp_stop_cpu(void)
425 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
430 * This is the main routine where commands issued by other
433 static void smp_handle_ext_call(void)
437 /* handle bit signal external calls */
438 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
439 if (test_bit(ec_stop_cpu, &bits))
441 if (test_bit(ec_schedule, &bits))
443 if (test_bit(ec_call_function_single, &bits))
444 generic_smp_call_function_single_interrupt();
447 static void do_ext_call_interrupt(struct ext_code ext_code,
448 unsigned int param32, unsigned long param64)
450 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
451 smp_handle_ext_call();
454 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
458 for_each_cpu(cpu, mask)
459 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
462 void arch_send_call_function_single_ipi(int cpu)
464 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
469 * this function sends a 'purge tlb' signal to another CPU.
471 static void smp_ptlb_callback(void *info)
476 void smp_ptlb_all(void)
478 on_each_cpu(smp_ptlb_callback, NULL, 1);
480 EXPORT_SYMBOL(smp_ptlb_all);
481 #endif /* ! CONFIG_64BIT */
484 * this function sends a 'reschedule' IPI to another CPU.
485 * it goes straight through and wastes no time serializing
486 * anything. Worst case is that we lose a reschedule ...
488 void smp_send_reschedule(int cpu)
490 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
494 * parameter area for the set/clear control bit callbacks
496 struct ec_creg_mask_parms {
498 unsigned long andval;
503 * callback for setting/clearing control bits
505 static void smp_ctl_bit_callback(void *info)
507 struct ec_creg_mask_parms *pp = info;
508 unsigned long cregs[16];
510 __ctl_store(cregs, 0, 15);
511 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
512 __ctl_load(cregs, 0, 15);
516 * Set a bit in a control register of all cpus
518 void smp_ctl_set_bit(int cr, int bit)
520 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
522 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
524 EXPORT_SYMBOL(smp_ctl_set_bit);
527 * Clear a bit in a control register of all cpus
529 void smp_ctl_clear_bit(int cr, int bit)
531 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
533 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
535 EXPORT_SYMBOL(smp_ctl_clear_bit);
537 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
539 static void __init smp_get_save_area(int cpu, u16 address)
541 void *lc = pcpu_devices[0].lowcore;
542 struct save_area *save_area;
544 if (is_kdump_kernel())
546 if (!OLDMEM_BASE && (address == boot_cpu_address ||
547 ipl_info.type != IPL_TYPE_FCP_DUMP))
549 save_area = dump_save_area_create(cpu);
551 panic("could not allocate memory for save area\n");
552 #ifdef CONFIG_CRASH_DUMP
553 if (address == boot_cpu_address) {
554 /* Copy the registers of the boot cpu. */
555 copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
556 SAVE_AREA_BASE - PAGE_SIZE, 0);
560 /* Get the registers of a non-boot cpu. */
561 __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL);
562 memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area));
565 int smp_store_status(int cpu)
569 pcpu = pcpu_devices + cpu;
570 if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS,
571 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED)
576 #else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
578 static inline void smp_get_save_area(int cpu, u16 address) { }
580 #endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
582 void smp_cpu_set_polarization(int cpu, int val)
584 pcpu_devices[cpu].polarization = val;
587 int smp_cpu_get_polarization(int cpu)
589 return pcpu_devices[cpu].polarization;
592 static struct sclp_cpu_info *smp_get_cpu_info(void)
594 static int use_sigp_detection;
595 struct sclp_cpu_info *info;
598 info = kzalloc(sizeof(*info), GFP_KERNEL);
599 if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
600 use_sigp_detection = 1;
601 for (address = 0; address <= MAX_CPU_ADDRESS; address++) {
602 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) ==
603 SIGP_CC_NOT_OPERATIONAL)
605 info->cpu[info->configured].address = address;
608 info->combined = info->configured;
613 static int smp_add_present_cpu(int cpu);
615 static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
622 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
623 cpu = cpumask_first(&avail);
624 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
625 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
627 if (pcpu_find_address(cpu_present_mask, info->cpu[i].address))
629 pcpu = pcpu_devices + cpu;
630 pcpu->address = info->cpu[i].address;
631 pcpu->state = (i >= info->configured) ?
632 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
633 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
634 set_cpu_present(cpu, true);
635 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
636 set_cpu_present(cpu, false);
639 cpu = cpumask_next(cpu, &avail);
644 static void __init smp_detect_cpus(void)
646 unsigned int cpu, c_cpus, s_cpus;
647 struct sclp_cpu_info *info;
649 info = smp_get_cpu_info();
651 panic("smp_detect_cpus failed to allocate memory\n");
652 if (info->has_cpu_type) {
653 for (cpu = 0; cpu < info->combined; cpu++) {
654 if (info->cpu[cpu].address != boot_cpu_address)
656 /* The boot cpu dictates the cpu type. */
657 boot_cpu_type = info->cpu[cpu].type;
662 for (cpu = 0; cpu < info->combined; cpu++) {
663 if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
665 if (cpu < info->configured) {
666 smp_get_save_area(c_cpus, info->cpu[cpu].address);
671 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
673 __smp_rescan_cpus(info, 0);
679 * Activate a secondary processor.
681 static void smp_start_secondary(void *cpuvoid)
683 S390_lowcore.last_update_clock = get_tod_clock();
684 S390_lowcore.restart_stack = (unsigned long) restart_stack;
685 S390_lowcore.restart_fn = (unsigned long) do_restart;
686 S390_lowcore.restart_data = 0;
687 S390_lowcore.restart_source = -1UL;
688 restore_access_regs(S390_lowcore.access_regs_save_area);
689 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
690 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
696 notify_cpu_starting(smp_processor_id());
697 set_cpu_online(smp_processor_id(), true);
698 inc_irq_stat(CPU_RST);
700 cpu_startup_entry(CPUHP_ONLINE);
703 /* Upping and downing of CPUs */
704 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
709 pcpu = pcpu_devices + cpu;
710 if (pcpu->state != CPU_STATE_CONFIGURED)
712 if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
713 SIGP_CC_ORDER_CODE_ACCEPTED)
716 rc = pcpu_alloc_lowcore(pcpu, cpu);
719 pcpu_prepare_secondary(pcpu, cpu);
720 pcpu_attach_task(pcpu, tidle);
721 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
722 while (!cpu_online(cpu))
727 static unsigned int setup_possible_cpus __initdata;
729 static int __init _setup_possible_cpus(char *s)
731 get_option(&s, &setup_possible_cpus);
734 early_param("possible_cpus", _setup_possible_cpus);
736 #ifdef CONFIG_HOTPLUG_CPU
738 int __cpu_disable(void)
740 unsigned long cregs[16];
742 /* Handle possible pending IPIs */
743 smp_handle_ext_call();
744 set_cpu_online(smp_processor_id(), false);
745 /* Disable pseudo page faults on this cpu. */
747 /* Disable interrupt sources via control register. */
748 __ctl_store(cregs, 0, 15);
749 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
750 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
751 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
752 __ctl_load(cregs, 0, 15);
756 void __cpu_die(unsigned int cpu)
760 /* Wait until target cpu is down */
761 pcpu = pcpu_devices + cpu;
762 while (!pcpu_stopped(pcpu))
764 pcpu_free_lowcore(pcpu);
765 atomic_dec(&init_mm.context.attach_count);
766 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
767 if (MACHINE_HAS_TLB_LC)
768 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
771 void __noreturn cpu_die(void)
774 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
778 #endif /* CONFIG_HOTPLUG_CPU */
780 void __init smp_fill_possible_mask(void)
782 unsigned int possible, sclp, cpu;
784 sclp = sclp_get_max_cpu() ?: nr_cpu_ids;
785 possible = setup_possible_cpus ?: nr_cpu_ids;
786 possible = min(possible, sclp);
787 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
788 set_cpu_possible(cpu, true);
791 void __init smp_prepare_cpus(unsigned int max_cpus)
793 /* request the 0x1201 emergency signal external interrupt */
794 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
795 panic("Couldn't request external interrupt 0x1201");
796 /* request the 0x1202 external call external interrupt */
797 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
798 panic("Couldn't request external interrupt 0x1202");
802 void __init smp_prepare_boot_cpu(void)
804 struct pcpu *pcpu = pcpu_devices;
806 boot_cpu_address = stap();
807 pcpu->state = CPU_STATE_CONFIGURED;
808 pcpu->address = boot_cpu_address;
809 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
810 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
811 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
812 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
813 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
814 S390_lowcore.percpu_offset = __per_cpu_offset[0];
815 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
816 set_cpu_present(0, true);
817 set_cpu_online(0, true);
820 void __init smp_cpus_done(unsigned int max_cpus)
824 void __init smp_setup_processor_id(void)
826 S390_lowcore.cpu_nr = 0;
830 * the frequency of the profiling timer can be changed
831 * by writing a multiplier value into /proc/profile.
833 * usually you want to run this on all CPUs ;)
835 int setup_profiling_timer(unsigned int multiplier)
840 #ifdef CONFIG_HOTPLUG_CPU
841 static ssize_t cpu_configure_show(struct device *dev,
842 struct device_attribute *attr, char *buf)
846 mutex_lock(&smp_cpu_state_mutex);
847 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
848 mutex_unlock(&smp_cpu_state_mutex);
852 static ssize_t cpu_configure_store(struct device *dev,
853 struct device_attribute *attr,
854 const char *buf, size_t count)
860 if (sscanf(buf, "%d %c", &val, &delim) != 1)
862 if (val != 0 && val != 1)
865 mutex_lock(&smp_cpu_state_mutex);
867 /* disallow configuration changes of online cpus and cpu 0 */
869 if (cpu_online(cpu) || cpu == 0)
871 pcpu = pcpu_devices + cpu;
875 if (pcpu->state != CPU_STATE_CONFIGURED)
877 rc = sclp_cpu_deconfigure(pcpu->address);
880 pcpu->state = CPU_STATE_STANDBY;
881 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
882 topology_expect_change();
885 if (pcpu->state != CPU_STATE_STANDBY)
887 rc = sclp_cpu_configure(pcpu->address);
890 pcpu->state = CPU_STATE_CONFIGURED;
891 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
892 topology_expect_change();
898 mutex_unlock(&smp_cpu_state_mutex);
900 return rc ? rc : count;
902 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
903 #endif /* CONFIG_HOTPLUG_CPU */
905 static ssize_t show_cpu_address(struct device *dev,
906 struct device_attribute *attr, char *buf)
908 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
910 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
912 static struct attribute *cpu_common_attrs[] = {
913 #ifdef CONFIG_HOTPLUG_CPU
914 &dev_attr_configure.attr,
916 &dev_attr_address.attr,
920 static struct attribute_group cpu_common_attr_group = {
921 .attrs = cpu_common_attrs,
924 static ssize_t show_idle_count(struct device *dev,
925 struct device_attribute *attr, char *buf)
927 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
928 unsigned long long idle_count;
929 unsigned int sequence;
932 sequence = ACCESS_ONCE(idle->sequence);
933 idle_count = ACCESS_ONCE(idle->idle_count);
934 if (ACCESS_ONCE(idle->clock_idle_enter))
936 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
937 return sprintf(buf, "%llu\n", idle_count);
939 static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
941 static ssize_t show_idle_time(struct device *dev,
942 struct device_attribute *attr, char *buf)
944 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
945 unsigned long long now, idle_time, idle_enter, idle_exit;
946 unsigned int sequence;
949 now = get_tod_clock();
950 sequence = ACCESS_ONCE(idle->sequence);
951 idle_time = ACCESS_ONCE(idle->idle_time);
952 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
953 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
954 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
955 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
956 return sprintf(buf, "%llu\n", idle_time >> 12);
958 static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
960 static struct attribute *cpu_online_attrs[] = {
961 &dev_attr_idle_count.attr,
962 &dev_attr_idle_time_us.attr,
966 static struct attribute_group cpu_online_attr_group = {
967 .attrs = cpu_online_attrs,
970 static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
973 unsigned int cpu = (unsigned int)(long)hcpu;
974 struct cpu *c = pcpu_devices[cpu].cpu;
975 struct device *s = &c->dev;
978 switch (action & ~CPU_TASKS_FROZEN) {
980 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
983 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
986 return notifier_from_errno(err);
989 static int smp_add_present_cpu(int cpu)
995 c = kzalloc(sizeof(*c), GFP_KERNEL);
998 pcpu_devices[cpu].cpu = c;
1000 c->hotpluggable = 1;
1001 rc = register_cpu(c, cpu);
1004 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1007 if (cpu_online(cpu)) {
1008 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1012 rc = topology_cpu_init(c);
1018 if (cpu_online(cpu))
1019 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1021 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1023 #ifdef CONFIG_HOTPLUG_CPU
1030 #ifdef CONFIG_HOTPLUG_CPU
1032 int __ref smp_rescan_cpus(void)
1034 struct sclp_cpu_info *info;
1037 info = smp_get_cpu_info();
1041 mutex_lock(&smp_cpu_state_mutex);
1042 nr = __smp_rescan_cpus(info, 1);
1043 mutex_unlock(&smp_cpu_state_mutex);
1047 topology_schedule_update();
1051 static ssize_t __ref rescan_store(struct device *dev,
1052 struct device_attribute *attr,
1058 rc = smp_rescan_cpus();
1059 return rc ? rc : count;
1061 static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1062 #endif /* CONFIG_HOTPLUG_CPU */
1064 static int __init s390_smp_init(void)
1068 #ifdef CONFIG_HOTPLUG_CPU
1069 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1073 cpu_notifier_register_begin();
1074 for_each_present_cpu(cpu) {
1075 rc = smp_add_present_cpu(cpu);
1080 __hotcpu_notifier(smp_cpu_notify, 0);
1083 cpu_notifier_register_done();
1086 subsys_initcall(s390_smp_init);