2 * arch/s390/kernel/smp.c
4 * Copyright IBM Corp. 1999, 2009
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
23 #define KMSG_COMPONENT "cpu"
24 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
26 #include <linux/module.h>
27 #include <linux/init.h>
29 #include <linux/err.h>
30 #include <linux/spinlock.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/delay.h>
33 #include <linux/cache.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqflags.h>
36 #include <linux/cpu.h>
37 #include <linux/timex.h>
38 #include <linux/bootmem.h>
39 #include <linux/slab.h>
40 #include <asm/asm-offsets.h>
42 #include <asm/setup.h>
44 #include <asm/pgalloc.h>
46 #include <asm/s390_ext.h>
47 #include <asm/cpcmd.h>
48 #include <asm/tlbflush.h>
49 #include <asm/timer.h>
50 #include <asm/lowcore.h>
52 #include <asm/cputime.h>
57 /* logical cpu to cpu address */
58 unsigned short __cpu_logical_map[NR_CPUS];
60 static struct task_struct *current_set[NR_CPUS];
62 static u8 smp_cpu_type;
63 static int smp_use_sigp_detection;
70 DEFINE_MUTEX(smp_cpu_state_mutex);
71 int smp_cpu_polarization[NR_CPUS];
72 static int smp_cpu_state[NR_CPUS];
73 static int cpu_management;
75 static DEFINE_PER_CPU(struct cpu, cpu_devices);
77 static void smp_ext_bitcall(int, int);
79 static int raw_cpu_stopped(int cpu)
83 switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) {
84 case sigp_status_stored:
85 /* Check for stopped and check stop state */
95 static inline int cpu_stopped(int cpu)
97 return raw_cpu_stopped(cpu_logical_map(cpu));
100 void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
102 struct _lowcore *lc, *current_lc;
103 struct stack_frame *sf;
104 struct pt_regs *regs;
107 if (smp_processor_id() == 0)
109 __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY);
110 /* Disable lowcore protection */
111 __ctl_clear_bit(0, 28);
112 current_lc = lowcore_ptr[smp_processor_id()];
116 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
117 lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu;
119 smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]);
120 while (sigp(0, sigp_stop_and_store_status) == sigp_busy)
122 sp = lc->panic_stack;
123 sp -= sizeof(struct pt_regs);
124 regs = (struct pt_regs *) sp;
125 memcpy(®s->gprs, ¤t_lc->gpregs_save_area, sizeof(regs->gprs));
126 regs->psw = lc->psw_save_area;
127 sp -= STACK_FRAME_OVERHEAD;
128 sf = (struct stack_frame *) sp;
129 sf->back_chain = regs->gprs[15];
130 smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]);
133 void smp_send_stop(void)
137 /* Disable all interrupts/machine checks */
138 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
139 trace_hardirqs_off();
141 /* stop all processors */
142 for_each_online_cpu(cpu) {
143 if (cpu == smp_processor_id())
146 rc = sigp(cpu, sigp_stop);
147 } while (rc == sigp_busy);
149 while (!cpu_stopped(cpu))
155 * This is the main routine where commands issued by other
159 static void do_ext_call_interrupt(unsigned int ext_int_code,
160 unsigned int param32, unsigned long param64)
164 kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++;
166 * handle bit signal external calls
168 * For the ec_schedule signal we have to do nothing. All the work
169 * is done automatically when we return from the interrupt.
171 bits = xchg(&S390_lowcore.ext_call_fast, 0);
173 if (test_bit(ec_call_function, &bits))
174 generic_smp_call_function_interrupt();
176 if (test_bit(ec_call_function_single, &bits))
177 generic_smp_call_function_single_interrupt();
181 * Send an external call sigp to another cpu and return without waiting
182 * for its completion.
184 static void smp_ext_bitcall(int cpu, int sig)
187 * Set signaling bit in lowcore of target cpu and kick it
189 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
190 while (sigp(cpu, sigp_emergency_signal) == sigp_busy)
194 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
198 for_each_cpu(cpu, mask)
199 smp_ext_bitcall(cpu, ec_call_function);
202 void arch_send_call_function_single_ipi(int cpu)
204 smp_ext_bitcall(cpu, ec_call_function_single);
209 * this function sends a 'purge tlb' signal to another CPU.
211 static void smp_ptlb_callback(void *info)
216 void smp_ptlb_all(void)
218 on_each_cpu(smp_ptlb_callback, NULL, 1);
220 EXPORT_SYMBOL(smp_ptlb_all);
221 #endif /* ! CONFIG_64BIT */
224 * this function sends a 'reschedule' IPI to another CPU.
225 * it goes straight through and wastes no time serializing
226 * anything. Worst case is that we lose a reschedule ...
228 void smp_send_reschedule(int cpu)
230 smp_ext_bitcall(cpu, ec_schedule);
234 * parameter area for the set/clear control bit callbacks
236 struct ec_creg_mask_parms {
237 unsigned long orvals[16];
238 unsigned long andvals[16];
242 * callback for setting/clearing control bits
244 static void smp_ctl_bit_callback(void *info)
246 struct ec_creg_mask_parms *pp = info;
247 unsigned long cregs[16];
250 __ctl_store(cregs, 0, 15);
251 for (i = 0; i <= 15; i++)
252 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
253 __ctl_load(cregs, 0, 15);
257 * Set a bit in a control register of all cpus
259 void smp_ctl_set_bit(int cr, int bit)
261 struct ec_creg_mask_parms parms;
263 memset(&parms.orvals, 0, sizeof(parms.orvals));
264 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
265 parms.orvals[cr] = 1 << bit;
266 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
268 EXPORT_SYMBOL(smp_ctl_set_bit);
271 * Clear a bit in a control register of all cpus
273 void smp_ctl_clear_bit(int cr, int bit)
275 struct ec_creg_mask_parms parms;
277 memset(&parms.orvals, 0, sizeof(parms.orvals));
278 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
279 parms.andvals[cr] = ~(1L << bit);
280 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
282 EXPORT_SYMBOL(smp_ctl_clear_bit);
284 #ifdef CONFIG_ZFCPDUMP
286 static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
288 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
290 if (cpu >= NR_CPUS) {
291 pr_warning("CPU %i exceeds the maximum %i and is excluded from "
292 "the dump\n", cpu, NR_CPUS - 1);
295 zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL);
296 while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy)
298 memcpy_real(zfcpdump_save_areas[cpu],
299 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
300 sizeof(struct save_area));
303 struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
304 EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
308 static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
310 #endif /* CONFIG_ZFCPDUMP */
312 static int cpu_known(int cpu_id)
316 for_each_present_cpu(cpu) {
317 if (__cpu_logical_map[cpu] == cpu_id)
323 static int smp_rescan_cpus_sigp(cpumask_t avail)
325 int cpu_id, logical_cpu;
327 logical_cpu = cpumask_first(&avail);
328 if (logical_cpu >= nr_cpu_ids)
330 for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) {
331 if (cpu_known(cpu_id))
333 __cpu_logical_map[logical_cpu] = cpu_id;
334 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
335 if (!cpu_stopped(logical_cpu))
337 cpu_set(logical_cpu, cpu_present_map);
338 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
339 logical_cpu = cpumask_next(logical_cpu, &avail);
340 if (logical_cpu >= nr_cpu_ids)
346 static int smp_rescan_cpus_sclp(cpumask_t avail)
348 struct sclp_cpu_info *info;
349 int cpu_id, logical_cpu, cpu;
352 logical_cpu = cpumask_first(&avail);
353 if (logical_cpu >= nr_cpu_ids)
355 info = kmalloc(sizeof(*info), GFP_KERNEL);
358 rc = sclp_get_cpu_info(info);
361 for (cpu = 0; cpu < info->combined; cpu++) {
362 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
364 cpu_id = info->cpu[cpu].address;
365 if (cpu_known(cpu_id))
367 __cpu_logical_map[logical_cpu] = cpu_id;
368 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
369 cpu_set(logical_cpu, cpu_present_map);
370 if (cpu >= info->configured)
371 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
373 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
374 logical_cpu = cpumask_next(logical_cpu, &avail);
375 if (logical_cpu >= nr_cpu_ids)
383 static int __smp_rescan_cpus(void)
387 cpus_xor(avail, cpu_possible_map, cpu_present_map);
388 if (smp_use_sigp_detection)
389 return smp_rescan_cpus_sigp(avail);
391 return smp_rescan_cpus_sclp(avail);
394 static void __init smp_detect_cpus(void)
396 unsigned int cpu, c_cpus, s_cpus;
397 struct sclp_cpu_info *info;
398 u16 boot_cpu_addr, cpu_addr;
402 boot_cpu_addr = __cpu_logical_map[0];
403 info = kmalloc(sizeof(*info), GFP_KERNEL);
405 panic("smp_detect_cpus failed to allocate memory\n");
406 /* Use sigp detection algorithm if sclp doesn't work. */
407 if (sclp_get_cpu_info(info)) {
408 smp_use_sigp_detection = 1;
409 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
410 if (cpu == boot_cpu_addr)
412 if (!raw_cpu_stopped(cpu))
414 smp_get_save_area(c_cpus, cpu);
420 if (info->has_cpu_type) {
421 for (cpu = 0; cpu < info->combined; cpu++) {
422 if (info->cpu[cpu].address == boot_cpu_addr) {
423 smp_cpu_type = info->cpu[cpu].type;
429 for (cpu = 0; cpu < info->combined; cpu++) {
430 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
432 cpu_addr = info->cpu[cpu].address;
433 if (cpu_addr == boot_cpu_addr)
435 if (!raw_cpu_stopped(cpu_addr)) {
439 smp_get_save_area(c_cpus, cpu_addr);
444 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
451 * Activate a secondary processor.
453 int __cpuinit start_secondary(void *cpuvoid)
458 /* Enable TOD clock interrupts on the secondary cpu. */
460 /* Enable cpu timer interrupts on the secondary cpu. */
462 /* Enable pfault pseudo page faults on this cpu. */
465 /* call cpu notifiers */
466 notify_cpu_starting(smp_processor_id());
467 /* Mark this cpu as online */
469 cpu_set(smp_processor_id(), cpu_online_map);
471 /* Switch on interrupts */
473 /* Print info about this processor */
475 /* cpu_idle will call schedule for us */
480 static void __init smp_create_idle(unsigned int cpu)
482 struct task_struct *p;
485 * don't care about the psw and regs settings since we'll never
486 * reschedule the forked task.
490 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
491 current_set[cpu] = p;
494 static int __cpuinit smp_alloc_lowcore(int cpu)
496 unsigned long async_stack, panic_stack;
497 struct _lowcore *lowcore;
499 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
502 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
503 panic_stack = __get_free_page(GFP_KERNEL);
504 if (!panic_stack || !async_stack)
506 memcpy(lowcore, &S390_lowcore, 512);
507 memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
508 lowcore->async_stack = async_stack + ASYNC_SIZE;
509 lowcore->panic_stack = panic_stack + PAGE_SIZE;
512 if (MACHINE_HAS_IEEE) {
513 unsigned long save_area;
515 save_area = get_zeroed_page(GFP_KERNEL);
518 lowcore->extended_save_area_addr = (u32) save_area;
521 if (vdso_alloc_per_cpu(cpu, lowcore))
524 lowcore_ptr[cpu] = lowcore;
528 free_page(panic_stack);
529 free_pages(async_stack, ASYNC_ORDER);
530 free_pages((unsigned long) lowcore, LC_ORDER);
534 static void smp_free_lowcore(int cpu)
536 struct _lowcore *lowcore;
538 lowcore = lowcore_ptr[cpu];
540 if (MACHINE_HAS_IEEE)
541 free_page((unsigned long) lowcore->extended_save_area_addr);
543 vdso_free_per_cpu(cpu, lowcore);
545 free_page(lowcore->panic_stack - PAGE_SIZE);
546 free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
547 free_pages((unsigned long) lowcore, LC_ORDER);
548 lowcore_ptr[cpu] = NULL;
551 /* Upping and downing of CPUs */
552 int __cpuinit __cpu_up(unsigned int cpu)
554 struct _lowcore *cpu_lowcore;
555 struct task_struct *idle;
556 struct stack_frame *sf;
560 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
562 if (smp_alloc_lowcore(cpu))
565 ccode = sigp(cpu, sigp_initial_cpu_reset);
566 if (ccode == sigp_busy)
568 if (ccode == sigp_not_operational)
570 } while (ccode == sigp_busy);
572 lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
573 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
576 idle = current_set[cpu];
577 cpu_lowcore = lowcore_ptr[cpu];
578 cpu_lowcore->kernel_stack = (unsigned long)
579 task_stack_page(idle) + THREAD_SIZE;
580 cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
581 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
582 - sizeof(struct pt_regs)
583 - sizeof(struct stack_frame));
584 memset(sf, 0, sizeof(struct stack_frame));
585 sf->gprs[9] = (unsigned long) sf;
586 cpu_lowcore->save_area[15] = (unsigned long) sf;
587 __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
588 atomic_inc(&init_mm.context.attach_count);
591 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
592 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
593 cpu_lowcore->current_task = (unsigned long) idle;
594 cpu_lowcore->cpu_nr = cpu;
595 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
596 cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
597 cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
598 memcpy(cpu_lowcore->stfle_fac_list, S390_lowcore.stfle_fac_list,
602 while (sigp(cpu, sigp_restart) == sigp_busy)
605 while (!cpu_online(cpu))
610 smp_free_lowcore(cpu);
614 static int __init setup_possible_cpus(char *s)
618 pcpus = simple_strtoul(s, NULL, 0);
619 init_cpu_possible(cpumask_of(0));
620 for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++)
621 set_cpu_possible(cpu, true);
624 early_param("possible_cpus", setup_possible_cpus);
626 #ifdef CONFIG_HOTPLUG_CPU
628 int __cpu_disable(void)
630 struct ec_creg_mask_parms cr_parms;
631 int cpu = smp_processor_id();
633 cpu_clear(cpu, cpu_online_map);
635 /* Disable pfault pseudo page faults on this cpu. */
638 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
639 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
641 /* disable all external interrupts */
642 cr_parms.orvals[0] = 0;
643 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
644 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4);
645 /* disable all I/O interrupts */
646 cr_parms.orvals[6] = 0;
647 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
648 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
649 /* disable most machine checks */
650 cr_parms.orvals[14] = 0;
651 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
654 smp_ctl_bit_callback(&cr_parms);
659 void __cpu_die(unsigned int cpu)
661 /* Wait until target cpu is down */
662 while (!cpu_stopped(cpu))
664 while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy)
666 smp_free_lowcore(cpu);
667 atomic_dec(&init_mm.context.attach_count);
668 pr_info("Processor %d stopped\n", cpu);
674 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
679 #endif /* CONFIG_HOTPLUG_CPU */
681 void __init smp_prepare_cpus(unsigned int max_cpus)
684 unsigned long save_area = 0;
686 unsigned long async_stack, panic_stack;
687 struct _lowcore *lowcore;
692 /* request the 0x1201 emergency signal external interrupt */
693 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
694 panic("Couldn't request external interrupt 0x1201");
697 /* Reallocate current lowcore, but keep its contents. */
698 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
699 panic_stack = __get_free_page(GFP_KERNEL);
700 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
701 BUG_ON(!lowcore || !panic_stack || !async_stack);
703 if (MACHINE_HAS_IEEE)
704 save_area = get_zeroed_page(GFP_KERNEL);
707 local_mcck_disable();
708 lowcore_ptr[smp_processor_id()] = lowcore;
709 *lowcore = S390_lowcore;
710 lowcore->panic_stack = panic_stack + PAGE_SIZE;
711 lowcore->async_stack = async_stack + ASYNC_SIZE;
713 if (MACHINE_HAS_IEEE)
714 lowcore->extended_save_area_addr = (u32) save_area;
716 set_prefix((u32)(unsigned long) lowcore);
720 if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
723 for_each_possible_cpu(cpu)
724 if (cpu != smp_processor_id())
725 smp_create_idle(cpu);
728 void __init smp_prepare_boot_cpu(void)
730 BUG_ON(smp_processor_id() != 0);
732 current_thread_info()->cpu = 0;
733 cpu_set(0, cpu_present_map);
734 cpu_set(0, cpu_online_map);
735 S390_lowcore.percpu_offset = __per_cpu_offset[0];
736 current_set[0] = current;
737 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
738 smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
741 void __init smp_cpus_done(unsigned int max_cpus)
745 void __init smp_setup_processor_id(void)
747 S390_lowcore.cpu_nr = 0;
748 __cpu_logical_map[0] = stap();
752 * the frequency of the profiling timer can be changed
753 * by writing a multiplier value into /proc/profile.
755 * usually you want to run this on all CPUs ;)
757 int setup_profiling_timer(unsigned int multiplier)
762 #ifdef CONFIG_HOTPLUG_CPU
763 static ssize_t cpu_configure_show(struct sys_device *dev,
764 struct sysdev_attribute *attr, char *buf)
768 mutex_lock(&smp_cpu_state_mutex);
769 count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
770 mutex_unlock(&smp_cpu_state_mutex);
774 static ssize_t cpu_configure_store(struct sys_device *dev,
775 struct sysdev_attribute *attr,
776 const char *buf, size_t count)
782 if (sscanf(buf, "%d %c", &val, &delim) != 1)
784 if (val != 0 && val != 1)
788 mutex_lock(&smp_cpu_state_mutex);
790 /* disallow configuration changes of online cpus and cpu 0 */
791 if (cpu_online(cpu) || cpu == 0)
796 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
797 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
799 smp_cpu_state[cpu] = CPU_STATE_STANDBY;
800 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
805 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
806 rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
808 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
809 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
817 mutex_unlock(&smp_cpu_state_mutex);
819 return rc ? rc : count;
821 static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
822 #endif /* CONFIG_HOTPLUG_CPU */
824 static ssize_t cpu_polarization_show(struct sys_device *dev,
825 struct sysdev_attribute *attr, char *buf)
830 mutex_lock(&smp_cpu_state_mutex);
831 switch (smp_cpu_polarization[cpu]) {
832 case POLARIZATION_HRZ:
833 count = sprintf(buf, "horizontal\n");
835 case POLARIZATION_VL:
836 count = sprintf(buf, "vertical:low\n");
838 case POLARIZATION_VM:
839 count = sprintf(buf, "vertical:medium\n");
841 case POLARIZATION_VH:
842 count = sprintf(buf, "vertical:high\n");
845 count = sprintf(buf, "unknown\n");
848 mutex_unlock(&smp_cpu_state_mutex);
851 static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
853 static ssize_t show_cpu_address(struct sys_device *dev,
854 struct sysdev_attribute *attr, char *buf)
856 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
858 static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
861 static struct attribute *cpu_common_attrs[] = {
862 #ifdef CONFIG_HOTPLUG_CPU
863 &attr_configure.attr,
866 &attr_polarization.attr,
870 static struct attribute_group cpu_common_attr_group = {
871 .attrs = cpu_common_attrs,
874 static ssize_t show_capability(struct sys_device *dev,
875 struct sysdev_attribute *attr, char *buf)
877 unsigned int capability;
880 rc = get_cpu_capability(&capability);
883 return sprintf(buf, "%u\n", capability);
885 static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
887 static ssize_t show_idle_count(struct sys_device *dev,
888 struct sysdev_attribute *attr, char *buf)
890 struct s390_idle_data *idle;
891 unsigned long long idle_count;
892 unsigned int sequence;
894 idle = &per_cpu(s390_idle, dev->id);
896 sequence = idle->sequence;
900 idle_count = idle->idle_count;
901 if (idle->idle_enter)
904 if (idle->sequence != sequence)
906 return sprintf(buf, "%llu\n", idle_count);
908 static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
910 static ssize_t show_idle_time(struct sys_device *dev,
911 struct sysdev_attribute *attr, char *buf)
913 struct s390_idle_data *idle;
914 unsigned long long now, idle_time, idle_enter;
915 unsigned int sequence;
917 idle = &per_cpu(s390_idle, dev->id);
920 sequence = idle->sequence;
924 idle_time = idle->idle_time;
925 idle_enter = idle->idle_enter;
926 if (idle_enter != 0ULL && idle_enter < now)
927 idle_time += now - idle_enter;
929 if (idle->sequence != sequence)
931 return sprintf(buf, "%llu\n", idle_time >> 12);
933 static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
935 static struct attribute *cpu_online_attrs[] = {
936 &attr_capability.attr,
937 &attr_idle_count.attr,
938 &attr_idle_time_us.attr,
942 static struct attribute_group cpu_online_attr_group = {
943 .attrs = cpu_online_attrs,
946 static int __cpuinit smp_cpu_notify(struct notifier_block *self,
947 unsigned long action, void *hcpu)
949 unsigned int cpu = (unsigned int)(long)hcpu;
950 struct cpu *c = &per_cpu(cpu_devices, cpu);
951 struct sys_device *s = &c->sysdev;
952 struct s390_idle_data *idle;
957 case CPU_ONLINE_FROZEN:
958 idle = &per_cpu(s390_idle, cpu);
959 memset(idle, 0, sizeof(struct s390_idle_data));
960 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
963 case CPU_DEAD_FROZEN:
964 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
967 return notifier_from_errno(err);
970 static struct notifier_block __cpuinitdata smp_cpu_nb = {
971 .notifier_call = smp_cpu_notify,
974 static int __devinit smp_add_present_cpu(int cpu)
976 struct cpu *c = &per_cpu(cpu_devices, cpu);
977 struct sys_device *s = &c->sysdev;
981 rc = register_cpu(c, cpu);
984 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
987 if (!cpu_online(cpu))
989 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
992 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
994 #ifdef CONFIG_HOTPLUG_CPU
1001 #ifdef CONFIG_HOTPLUG_CPU
1003 int __ref smp_rescan_cpus(void)
1010 mutex_lock(&smp_cpu_state_mutex);
1011 newcpus = cpu_present_map;
1012 rc = __smp_rescan_cpus();
1015 cpus_andnot(newcpus, cpu_present_map, newcpus);
1016 for_each_cpu_mask(cpu, newcpus) {
1017 rc = smp_add_present_cpu(cpu);
1019 cpu_clear(cpu, cpu_present_map);
1023 mutex_unlock(&smp_cpu_state_mutex);
1025 if (!cpus_empty(newcpus))
1026 topology_schedule_update();
1030 static ssize_t __ref rescan_store(struct sysdev_class *class,
1031 struct sysdev_class_attribute *attr,
1037 rc = smp_rescan_cpus();
1038 return rc ? rc : count;
1040 static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
1041 #endif /* CONFIG_HOTPLUG_CPU */
1043 static ssize_t dispatching_show(struct sysdev_class *class,
1044 struct sysdev_class_attribute *attr,
1049 mutex_lock(&smp_cpu_state_mutex);
1050 count = sprintf(buf, "%d\n", cpu_management);
1051 mutex_unlock(&smp_cpu_state_mutex);
1055 static ssize_t dispatching_store(struct sysdev_class *dev,
1056 struct sysdev_class_attribute *attr,
1063 if (sscanf(buf, "%d %c", &val, &delim) != 1)
1065 if (val != 0 && val != 1)
1069 mutex_lock(&smp_cpu_state_mutex);
1070 if (cpu_management == val)
1072 rc = topology_set_cpu_management(val);
1074 cpu_management = val;
1076 mutex_unlock(&smp_cpu_state_mutex);
1078 return rc ? rc : count;
1080 static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
1083 static int __init topology_init(void)
1088 register_cpu_notifier(&smp_cpu_nb);
1090 #ifdef CONFIG_HOTPLUG_CPU
1091 rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan);
1095 rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching);
1098 for_each_present_cpu(cpu) {
1099 rc = smp_add_present_cpu(cpu);
1105 subsys_initcall(topology_init);