Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / arch / s390 / kernel / smp.c
1 /*
2  *  arch/s390/kernel/smp.c
3  *
4  *    Copyright IBM Corp. 1999, 2009
5  *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
7  *               Heiko Carstens (heiko.carstens@de.ibm.com)
8  *
9  *  based on other smp stuff by
10  *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
11  *    (c) 1998 Ingo Molnar
12  *
13  * We work with logical cpu numbering everywhere we can. The only
14  * functions using the real cpu address (got from STAP) are the sigp
15  * functions. For all other functions we use the identity mapping.
16  * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17  * used e.g. to find the idle task belonging to a logical cpu. Every array
18  * in the kernel is sorted by the logical cpu number and not by the physical
19  * one which is causing all the confusion with __cpu_logical_map and
20  * cpu_number_map in other architectures.
21  */
22
23 #define KMSG_COMPONENT "cpu"
24 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
25
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/mm.h>
29 #include <linux/err.h>
30 #include <linux/spinlock.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/delay.h>
33 #include <linux/cache.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqflags.h>
36 #include <linux/cpu.h>
37 #include <linux/timex.h>
38 #include <linux/bootmem.h>
39 #include <linux/slab.h>
40 #include <asm/asm-offsets.h>
41 #include <asm/ipl.h>
42 #include <asm/setup.h>
43 #include <asm/sigp.h>
44 #include <asm/pgalloc.h>
45 #include <asm/irq.h>
46 #include <asm/s390_ext.h>
47 #include <asm/cpcmd.h>
48 #include <asm/tlbflush.h>
49 #include <asm/timer.h>
50 #include <asm/lowcore.h>
51 #include <asm/sclp.h>
52 #include <asm/cputime.h>
53 #include <asm/vdso.h>
54 #include <asm/cpu.h>
55 #include "entry.h"
56
57 /* logical cpu to cpu address */
58 unsigned short __cpu_logical_map[NR_CPUS];
59
60 static struct task_struct *current_set[NR_CPUS];
61
62 static u8 smp_cpu_type;
63 static int smp_use_sigp_detection;
64
65 enum s390_cpu_state {
66         CPU_STATE_STANDBY,
67         CPU_STATE_CONFIGURED,
68 };
69
70 DEFINE_MUTEX(smp_cpu_state_mutex);
71 int smp_cpu_polarization[NR_CPUS];
72 static int smp_cpu_state[NR_CPUS];
73 static int cpu_management;
74
75 static DEFINE_PER_CPU(struct cpu, cpu_devices);
76
77 static void smp_ext_bitcall(int, int);
78
79 static int raw_cpu_stopped(int cpu)
80 {
81         u32 status;
82
83         switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) {
84         case sigp_status_stored:
85                 /* Check for stopped and check stop state */
86                 if (status & 0x50)
87                         return 1;
88                 break;
89         default:
90                 break;
91         }
92         return 0;
93 }
94
95 static inline int cpu_stopped(int cpu)
96 {
97         return raw_cpu_stopped(cpu_logical_map(cpu));
98 }
99
100 void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
101 {
102         struct _lowcore *lc, *current_lc;
103         struct stack_frame *sf;
104         struct pt_regs *regs;
105         unsigned long sp;
106
107         if (smp_processor_id() == 0)
108                 func(data);
109         __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY);
110         /* Disable lowcore protection */
111         __ctl_clear_bit(0, 28);
112         current_lc = lowcore_ptr[smp_processor_id()];
113         lc = lowcore_ptr[0];
114         if (!lc)
115                 lc = current_lc;
116         lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
117         lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu;
118         if (!cpu_online(0))
119                 smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]);
120         while (sigp(0, sigp_stop_and_store_status) == sigp_busy)
121                 cpu_relax();
122         sp = lc->panic_stack;
123         sp -= sizeof(struct pt_regs);
124         regs = (struct pt_regs *) sp;
125         memcpy(&regs->gprs, &current_lc->gpregs_save_area, sizeof(regs->gprs));
126         regs->psw = lc->psw_save_area;
127         sp -= STACK_FRAME_OVERHEAD;
128         sf = (struct stack_frame *) sp;
129         sf->back_chain = regs->gprs[15];
130         smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]);
131 }
132
133 void smp_send_stop(void)
134 {
135         int cpu, rc;
136
137         /* Disable all interrupts/machine checks */
138         __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
139         trace_hardirqs_off();
140
141         /* stop all processors */
142         for_each_online_cpu(cpu) {
143                 if (cpu == smp_processor_id())
144                         continue;
145                 do {
146                         rc = sigp(cpu, sigp_stop);
147                 } while (rc == sigp_busy);
148
149                 while (!cpu_stopped(cpu))
150                         cpu_relax();
151         }
152 }
153
154 /*
155  * This is the main routine where commands issued by other
156  * cpus are handled.
157  */
158
159 static void do_ext_call_interrupt(__u16 code)
160 {
161         unsigned long bits;
162
163         /*
164          * handle bit signal external calls
165          *
166          * For the ec_schedule signal we have to do nothing. All the work
167          * is done automatically when we return from the interrupt.
168          */
169         bits = xchg(&S390_lowcore.ext_call_fast, 0);
170
171         if (test_bit(ec_call_function, &bits))
172                 generic_smp_call_function_interrupt();
173
174         if (test_bit(ec_call_function_single, &bits))
175                 generic_smp_call_function_single_interrupt();
176 }
177
178 /*
179  * Send an external call sigp to another cpu and return without waiting
180  * for its completion.
181  */
182 static void smp_ext_bitcall(int cpu, int sig)
183 {
184         /*
185          * Set signaling bit in lowcore of target cpu and kick it
186          */
187         set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
188         while (sigp(cpu, sigp_emergency_signal) == sigp_busy)
189                 udelay(10);
190 }
191
192 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
193 {
194         int cpu;
195
196         for_each_cpu(cpu, mask)
197                 smp_ext_bitcall(cpu, ec_call_function);
198 }
199
200 void arch_send_call_function_single_ipi(int cpu)
201 {
202         smp_ext_bitcall(cpu, ec_call_function_single);
203 }
204
205 #ifndef CONFIG_64BIT
206 /*
207  * this function sends a 'purge tlb' signal to another CPU.
208  */
209 static void smp_ptlb_callback(void *info)
210 {
211         __tlb_flush_local();
212 }
213
214 void smp_ptlb_all(void)
215 {
216         on_each_cpu(smp_ptlb_callback, NULL, 1);
217 }
218 EXPORT_SYMBOL(smp_ptlb_all);
219 #endif /* ! CONFIG_64BIT */
220
221 /*
222  * this function sends a 'reschedule' IPI to another CPU.
223  * it goes straight through and wastes no time serializing
224  * anything. Worst case is that we lose a reschedule ...
225  */
226 void smp_send_reschedule(int cpu)
227 {
228         smp_ext_bitcall(cpu, ec_schedule);
229 }
230
231 /*
232  * parameter area for the set/clear control bit callbacks
233  */
234 struct ec_creg_mask_parms {
235         unsigned long orvals[16];
236         unsigned long andvals[16];
237 };
238
239 /*
240  * callback for setting/clearing control bits
241  */
242 static void smp_ctl_bit_callback(void *info)
243 {
244         struct ec_creg_mask_parms *pp = info;
245         unsigned long cregs[16];
246         int i;
247
248         __ctl_store(cregs, 0, 15);
249         for (i = 0; i <= 15; i++)
250                 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
251         __ctl_load(cregs, 0, 15);
252 }
253
254 /*
255  * Set a bit in a control register of all cpus
256  */
257 void smp_ctl_set_bit(int cr, int bit)
258 {
259         struct ec_creg_mask_parms parms;
260
261         memset(&parms.orvals, 0, sizeof(parms.orvals));
262         memset(&parms.andvals, 0xff, sizeof(parms.andvals));
263         parms.orvals[cr] = 1 << bit;
264         on_each_cpu(smp_ctl_bit_callback, &parms, 1);
265 }
266 EXPORT_SYMBOL(smp_ctl_set_bit);
267
268 /*
269  * Clear a bit in a control register of all cpus
270  */
271 void smp_ctl_clear_bit(int cr, int bit)
272 {
273         struct ec_creg_mask_parms parms;
274
275         memset(&parms.orvals, 0, sizeof(parms.orvals));
276         memset(&parms.andvals, 0xff, sizeof(parms.andvals));
277         parms.andvals[cr] = ~(1L << bit);
278         on_each_cpu(smp_ctl_bit_callback, &parms, 1);
279 }
280 EXPORT_SYMBOL(smp_ctl_clear_bit);
281
282 #ifdef CONFIG_ZFCPDUMP
283
284 static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
285 {
286         if (ipl_info.type != IPL_TYPE_FCP_DUMP)
287                 return;
288         if (cpu >= NR_CPUS) {
289                 pr_warning("CPU %i exceeds the maximum %i and is excluded from "
290                            "the dump\n", cpu, NR_CPUS - 1);
291                 return;
292         }
293         zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL);
294         while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy)
295                 cpu_relax();
296         memcpy_real(zfcpdump_save_areas[cpu],
297                     (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
298                     sizeof(struct save_area));
299 }
300
301 struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
302 EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
303
304 #else
305
306 static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
307
308 #endif /* CONFIG_ZFCPDUMP */
309
310 static int cpu_known(int cpu_id)
311 {
312         int cpu;
313
314         for_each_present_cpu(cpu) {
315                 if (__cpu_logical_map[cpu] == cpu_id)
316                         return 1;
317         }
318         return 0;
319 }
320
321 static int smp_rescan_cpus_sigp(cpumask_t avail)
322 {
323         int cpu_id, logical_cpu;
324
325         logical_cpu = cpumask_first(&avail);
326         if (logical_cpu >= nr_cpu_ids)
327                 return 0;
328         for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) {
329                 if (cpu_known(cpu_id))
330                         continue;
331                 __cpu_logical_map[logical_cpu] = cpu_id;
332                 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
333                 if (!cpu_stopped(logical_cpu))
334                         continue;
335                 cpu_set(logical_cpu, cpu_present_map);
336                 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
337                 logical_cpu = cpumask_next(logical_cpu, &avail);
338                 if (logical_cpu >= nr_cpu_ids)
339                         break;
340         }
341         return 0;
342 }
343
344 static int smp_rescan_cpus_sclp(cpumask_t avail)
345 {
346         struct sclp_cpu_info *info;
347         int cpu_id, logical_cpu, cpu;
348         int rc;
349
350         logical_cpu = cpumask_first(&avail);
351         if (logical_cpu >= nr_cpu_ids)
352                 return 0;
353         info = kmalloc(sizeof(*info), GFP_KERNEL);
354         if (!info)
355                 return -ENOMEM;
356         rc = sclp_get_cpu_info(info);
357         if (rc)
358                 goto out;
359         for (cpu = 0; cpu < info->combined; cpu++) {
360                 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
361                         continue;
362                 cpu_id = info->cpu[cpu].address;
363                 if (cpu_known(cpu_id))
364                         continue;
365                 __cpu_logical_map[logical_cpu] = cpu_id;
366                 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
367                 cpu_set(logical_cpu, cpu_present_map);
368                 if (cpu >= info->configured)
369                         smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
370                 else
371                         smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
372                 logical_cpu = cpumask_next(logical_cpu, &avail);
373                 if (logical_cpu >= nr_cpu_ids)
374                         break;
375         }
376 out:
377         kfree(info);
378         return rc;
379 }
380
381 static int __smp_rescan_cpus(void)
382 {
383         cpumask_t avail;
384
385         cpus_xor(avail, cpu_possible_map, cpu_present_map);
386         if (smp_use_sigp_detection)
387                 return smp_rescan_cpus_sigp(avail);
388         else
389                 return smp_rescan_cpus_sclp(avail);
390 }
391
392 static void __init smp_detect_cpus(void)
393 {
394         unsigned int cpu, c_cpus, s_cpus;
395         struct sclp_cpu_info *info;
396         u16 boot_cpu_addr, cpu_addr;
397
398         c_cpus = 1;
399         s_cpus = 0;
400         boot_cpu_addr = __cpu_logical_map[0];
401         info = kmalloc(sizeof(*info), GFP_KERNEL);
402         if (!info)
403                 panic("smp_detect_cpus failed to allocate memory\n");
404         /* Use sigp detection algorithm if sclp doesn't work. */
405         if (sclp_get_cpu_info(info)) {
406                 smp_use_sigp_detection = 1;
407                 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
408                         if (cpu == boot_cpu_addr)
409                                 continue;
410                         if (!raw_cpu_stopped(cpu))
411                                 continue;
412                         smp_get_save_area(c_cpus, cpu);
413                         c_cpus++;
414                 }
415                 goto out;
416         }
417
418         if (info->has_cpu_type) {
419                 for (cpu = 0; cpu < info->combined; cpu++) {
420                         if (info->cpu[cpu].address == boot_cpu_addr) {
421                                 smp_cpu_type = info->cpu[cpu].type;
422                                 break;
423                         }
424                 }
425         }
426
427         for (cpu = 0; cpu < info->combined; cpu++) {
428                 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
429                         continue;
430                 cpu_addr = info->cpu[cpu].address;
431                 if (cpu_addr == boot_cpu_addr)
432                         continue;
433                 if (!raw_cpu_stopped(cpu_addr)) {
434                         s_cpus++;
435                         continue;
436                 }
437                 smp_get_save_area(c_cpus, cpu_addr);
438                 c_cpus++;
439         }
440 out:
441         kfree(info);
442         pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
443         get_online_cpus();
444         __smp_rescan_cpus();
445         put_online_cpus();
446 }
447
448 /*
449  *      Activate a secondary processor.
450  */
451 int __cpuinit start_secondary(void *cpuvoid)
452 {
453         /* Setup the cpu */
454         cpu_init();
455         preempt_disable();
456         /* Enable TOD clock interrupts on the secondary cpu. */
457         init_cpu_timer();
458         /* Enable cpu timer interrupts on the secondary cpu. */
459         init_cpu_vtimer();
460         /* Enable pfault pseudo page faults on this cpu. */
461         pfault_init();
462
463         /* call cpu notifiers */
464         notify_cpu_starting(smp_processor_id());
465         /* Mark this cpu as online */
466         ipi_call_lock();
467         cpu_set(smp_processor_id(), cpu_online_map);
468         ipi_call_unlock();
469         /* Switch on interrupts */
470         local_irq_enable();
471         /* Print info about this processor */
472         print_cpu_info();
473         /* cpu_idle will call schedule for us */
474         cpu_idle();
475         return 0;
476 }
477
478 static void __init smp_create_idle(unsigned int cpu)
479 {
480         struct task_struct *p;
481
482         /*
483          *  don't care about the psw and regs settings since we'll never
484          *  reschedule the forked task.
485          */
486         p = fork_idle(cpu);
487         if (IS_ERR(p))
488                 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
489         current_set[cpu] = p;
490 }
491
492 static int __cpuinit smp_alloc_lowcore(int cpu)
493 {
494         unsigned long async_stack, panic_stack;
495         struct _lowcore *lowcore;
496
497         lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
498         if (!lowcore)
499                 return -ENOMEM;
500         async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
501         panic_stack = __get_free_page(GFP_KERNEL);
502         if (!panic_stack || !async_stack)
503                 goto out;
504         memcpy(lowcore, &S390_lowcore, 512);
505         memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
506         lowcore->async_stack = async_stack + ASYNC_SIZE;
507         lowcore->panic_stack = panic_stack + PAGE_SIZE;
508
509 #ifndef CONFIG_64BIT
510         if (MACHINE_HAS_IEEE) {
511                 unsigned long save_area;
512
513                 save_area = get_zeroed_page(GFP_KERNEL);
514                 if (!save_area)
515                         goto out;
516                 lowcore->extended_save_area_addr = (u32) save_area;
517         }
518 #else
519         if (vdso_alloc_per_cpu(cpu, lowcore))
520                 goto out;
521 #endif
522         lowcore_ptr[cpu] = lowcore;
523         return 0;
524
525 out:
526         free_page(panic_stack);
527         free_pages(async_stack, ASYNC_ORDER);
528         free_pages((unsigned long) lowcore, LC_ORDER);
529         return -ENOMEM;
530 }
531
532 static void smp_free_lowcore(int cpu)
533 {
534         struct _lowcore *lowcore;
535
536         lowcore = lowcore_ptr[cpu];
537 #ifndef CONFIG_64BIT
538         if (MACHINE_HAS_IEEE)
539                 free_page((unsigned long) lowcore->extended_save_area_addr);
540 #else
541         vdso_free_per_cpu(cpu, lowcore);
542 #endif
543         free_page(lowcore->panic_stack - PAGE_SIZE);
544         free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
545         free_pages((unsigned long) lowcore, LC_ORDER);
546         lowcore_ptr[cpu] = NULL;
547 }
548
549 /* Upping and downing of CPUs */
550 int __cpuinit __cpu_up(unsigned int cpu)
551 {
552         struct _lowcore *cpu_lowcore;
553         struct task_struct *idle;
554         struct stack_frame *sf;
555         u32 lowcore;
556         int ccode;
557
558         if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
559                 return -EIO;
560         if (smp_alloc_lowcore(cpu))
561                 return -ENOMEM;
562         do {
563                 ccode = sigp(cpu, sigp_initial_cpu_reset);
564                 if (ccode == sigp_busy)
565                         udelay(10);
566                 if (ccode == sigp_not_operational)
567                         goto err_out;
568         } while (ccode == sigp_busy);
569
570         lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
571         while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
572                 udelay(10);
573
574         idle = current_set[cpu];
575         cpu_lowcore = lowcore_ptr[cpu];
576         cpu_lowcore->kernel_stack = (unsigned long)
577                 task_stack_page(idle) + THREAD_SIZE;
578         cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
579         sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
580                                      - sizeof(struct pt_regs)
581                                      - sizeof(struct stack_frame));
582         memset(sf, 0, sizeof(struct stack_frame));
583         sf->gprs[9] = (unsigned long) sf;
584         cpu_lowcore->save_area[15] = (unsigned long) sf;
585         __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
586         atomic_inc(&init_mm.context.attach_count);
587         asm volatile(
588                 "       stam    0,15,0(%0)"
589                 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
590         cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
591         cpu_lowcore->current_task = (unsigned long) idle;
592         cpu_lowcore->cpu_nr = cpu;
593         cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
594         cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
595         cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
596         eieio();
597
598         while (sigp(cpu, sigp_restart) == sigp_busy)
599                 udelay(10);
600
601         while (!cpu_online(cpu))
602                 cpu_relax();
603         return 0;
604
605 err_out:
606         smp_free_lowcore(cpu);
607         return -EIO;
608 }
609
610 static int __init setup_possible_cpus(char *s)
611 {
612         int pcpus, cpu;
613
614         pcpus = simple_strtoul(s, NULL, 0);
615         init_cpu_possible(cpumask_of(0));
616         for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++)
617                 set_cpu_possible(cpu, true);
618         return 0;
619 }
620 early_param("possible_cpus", setup_possible_cpus);
621
622 #ifdef CONFIG_HOTPLUG_CPU
623
624 int __cpu_disable(void)
625 {
626         struct ec_creg_mask_parms cr_parms;
627         int cpu = smp_processor_id();
628
629         cpu_clear(cpu, cpu_online_map);
630
631         /* Disable pfault pseudo page faults on this cpu. */
632         pfault_fini();
633
634         memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
635         memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
636
637         /* disable all external interrupts */
638         cr_parms.orvals[0] = 0;
639         cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
640                                 1 << 11 | 1 << 10 | 1 <<  6 | 1 <<  4);
641         /* disable all I/O interrupts */
642         cr_parms.orvals[6] = 0;
643         cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
644                                 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
645         /* disable most machine checks */
646         cr_parms.orvals[14] = 0;
647         cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
648                                  1 << 25 | 1 << 24);
649
650         smp_ctl_bit_callback(&cr_parms);
651
652         return 0;
653 }
654
655 void __cpu_die(unsigned int cpu)
656 {
657         /* Wait until target cpu is down */
658         while (!cpu_stopped(cpu))
659                 cpu_relax();
660         while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy)
661                 udelay(10);
662         smp_free_lowcore(cpu);
663         atomic_dec(&init_mm.context.attach_count);
664         pr_info("Processor %d stopped\n", cpu);
665 }
666
667 void cpu_die(void)
668 {
669         idle_task_exit();
670         while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
671                 cpu_relax();
672         for (;;);
673 }
674
675 #endif /* CONFIG_HOTPLUG_CPU */
676
677 void __init smp_prepare_cpus(unsigned int max_cpus)
678 {
679 #ifndef CONFIG_64BIT
680         unsigned long save_area = 0;
681 #endif
682         unsigned long async_stack, panic_stack;
683         struct _lowcore *lowcore;
684         unsigned int cpu;
685
686         smp_detect_cpus();
687
688         /* request the 0x1201 emergency signal external interrupt */
689         if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
690                 panic("Couldn't request external interrupt 0x1201");
691         print_cpu_info();
692
693         /* Reallocate current lowcore, but keep its contents. */
694         lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
695         panic_stack = __get_free_page(GFP_KERNEL);
696         async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
697         BUG_ON(!lowcore || !panic_stack || !async_stack);
698 #ifndef CONFIG_64BIT
699         if (MACHINE_HAS_IEEE)
700                 save_area = get_zeroed_page(GFP_KERNEL);
701 #endif
702         local_irq_disable();
703         local_mcck_disable();
704         lowcore_ptr[smp_processor_id()] = lowcore;
705         *lowcore = S390_lowcore;
706         lowcore->panic_stack = panic_stack + PAGE_SIZE;
707         lowcore->async_stack = async_stack + ASYNC_SIZE;
708 #ifndef CONFIG_64BIT
709         if (MACHINE_HAS_IEEE)
710                 lowcore->extended_save_area_addr = (u32) save_area;
711 #endif
712         set_prefix((u32)(unsigned long) lowcore);
713         local_mcck_enable();
714         local_irq_enable();
715 #ifdef CONFIG_64BIT
716         if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
717                 BUG();
718 #endif
719         for_each_possible_cpu(cpu)
720                 if (cpu != smp_processor_id())
721                         smp_create_idle(cpu);
722 }
723
724 void __init smp_prepare_boot_cpu(void)
725 {
726         BUG_ON(smp_processor_id() != 0);
727
728         current_thread_info()->cpu = 0;
729         cpu_set(0, cpu_present_map);
730         cpu_set(0, cpu_online_map);
731         S390_lowcore.percpu_offset = __per_cpu_offset[0];
732         current_set[0] = current;
733         smp_cpu_state[0] = CPU_STATE_CONFIGURED;
734         smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
735 }
736
737 void __init smp_cpus_done(unsigned int max_cpus)
738 {
739 }
740
741 void __init smp_setup_processor_id(void)
742 {
743         S390_lowcore.cpu_nr = 0;
744         __cpu_logical_map[0] = stap();
745 }
746
747 /*
748  * the frequency of the profiling timer can be changed
749  * by writing a multiplier value into /proc/profile.
750  *
751  * usually you want to run this on all CPUs ;)
752  */
753 int setup_profiling_timer(unsigned int multiplier)
754 {
755         return 0;
756 }
757
758 #ifdef CONFIG_HOTPLUG_CPU
759 static ssize_t cpu_configure_show(struct sys_device *dev,
760                                 struct sysdev_attribute *attr, char *buf)
761 {
762         ssize_t count;
763
764         mutex_lock(&smp_cpu_state_mutex);
765         count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
766         mutex_unlock(&smp_cpu_state_mutex);
767         return count;
768 }
769
770 static ssize_t cpu_configure_store(struct sys_device *dev,
771                                   struct sysdev_attribute *attr,
772                                   const char *buf, size_t count)
773 {
774         int cpu = dev->id;
775         int val, rc;
776         char delim;
777
778         if (sscanf(buf, "%d %c", &val, &delim) != 1)
779                 return -EINVAL;
780         if (val != 0 && val != 1)
781                 return -EINVAL;
782
783         get_online_cpus();
784         mutex_lock(&smp_cpu_state_mutex);
785         rc = -EBUSY;
786         /* disallow configuration changes of online cpus and cpu 0 */
787         if (cpu_online(cpu) || cpu == 0)
788                 goto out;
789         rc = 0;
790         switch (val) {
791         case 0:
792                 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
793                         rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
794                         if (!rc) {
795                                 smp_cpu_state[cpu] = CPU_STATE_STANDBY;
796                                 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
797                         }
798                 }
799                 break;
800         case 1:
801                 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
802                         rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
803                         if (!rc) {
804                                 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
805                                 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
806                         }
807                 }
808                 break;
809         default:
810                 break;
811         }
812 out:
813         mutex_unlock(&smp_cpu_state_mutex);
814         put_online_cpus();
815         return rc ? rc : count;
816 }
817 static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
818 #endif /* CONFIG_HOTPLUG_CPU */
819
820 static ssize_t cpu_polarization_show(struct sys_device *dev,
821                                      struct sysdev_attribute *attr, char *buf)
822 {
823         int cpu = dev->id;
824         ssize_t count;
825
826         mutex_lock(&smp_cpu_state_mutex);
827         switch (smp_cpu_polarization[cpu]) {
828         case POLARIZATION_HRZ:
829                 count = sprintf(buf, "horizontal\n");
830                 break;
831         case POLARIZATION_VL:
832                 count = sprintf(buf, "vertical:low\n");
833                 break;
834         case POLARIZATION_VM:
835                 count = sprintf(buf, "vertical:medium\n");
836                 break;
837         case POLARIZATION_VH:
838                 count = sprintf(buf, "vertical:high\n");
839                 break;
840         default:
841                 count = sprintf(buf, "unknown\n");
842                 break;
843         }
844         mutex_unlock(&smp_cpu_state_mutex);
845         return count;
846 }
847 static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
848
849 static ssize_t show_cpu_address(struct sys_device *dev,
850                                 struct sysdev_attribute *attr, char *buf)
851 {
852         return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
853 }
854 static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
855
856
857 static struct attribute *cpu_common_attrs[] = {
858 #ifdef CONFIG_HOTPLUG_CPU
859         &attr_configure.attr,
860 #endif
861         &attr_address.attr,
862         &attr_polarization.attr,
863         NULL,
864 };
865
866 static struct attribute_group cpu_common_attr_group = {
867         .attrs = cpu_common_attrs,
868 };
869
870 static ssize_t show_capability(struct sys_device *dev,
871                                 struct sysdev_attribute *attr, char *buf)
872 {
873         unsigned int capability;
874         int rc;
875
876         rc = get_cpu_capability(&capability);
877         if (rc)
878                 return rc;
879         return sprintf(buf, "%u\n", capability);
880 }
881 static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
882
883 static ssize_t show_idle_count(struct sys_device *dev,
884                                 struct sysdev_attribute *attr, char *buf)
885 {
886         struct s390_idle_data *idle;
887         unsigned long long idle_count;
888         unsigned int sequence;
889
890         idle = &per_cpu(s390_idle, dev->id);
891 repeat:
892         sequence = idle->sequence;
893         smp_rmb();
894         if (sequence & 1)
895                 goto repeat;
896         idle_count = idle->idle_count;
897         if (idle->idle_enter)
898                 idle_count++;
899         smp_rmb();
900         if (idle->sequence != sequence)
901                 goto repeat;
902         return sprintf(buf, "%llu\n", idle_count);
903 }
904 static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
905
906 static ssize_t show_idle_time(struct sys_device *dev,
907                                 struct sysdev_attribute *attr, char *buf)
908 {
909         struct s390_idle_data *idle;
910         unsigned long long now, idle_time, idle_enter;
911         unsigned int sequence;
912
913         idle = &per_cpu(s390_idle, dev->id);
914         now = get_clock();
915 repeat:
916         sequence = idle->sequence;
917         smp_rmb();
918         if (sequence & 1)
919                 goto repeat;
920         idle_time = idle->idle_time;
921         idle_enter = idle->idle_enter;
922         if (idle_enter != 0ULL && idle_enter < now)
923                 idle_time += now - idle_enter;
924         smp_rmb();
925         if (idle->sequence != sequence)
926                 goto repeat;
927         return sprintf(buf, "%llu\n", idle_time >> 12);
928 }
929 static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
930
931 static struct attribute *cpu_online_attrs[] = {
932         &attr_capability.attr,
933         &attr_idle_count.attr,
934         &attr_idle_time_us.attr,
935         NULL,
936 };
937
938 static struct attribute_group cpu_online_attr_group = {
939         .attrs = cpu_online_attrs,
940 };
941
942 static int __cpuinit smp_cpu_notify(struct notifier_block *self,
943                                     unsigned long action, void *hcpu)
944 {
945         unsigned int cpu = (unsigned int)(long)hcpu;
946         struct cpu *c = &per_cpu(cpu_devices, cpu);
947         struct sys_device *s = &c->sysdev;
948         struct s390_idle_data *idle;
949         int err = 0;
950
951         switch (action) {
952         case CPU_ONLINE:
953         case CPU_ONLINE_FROZEN:
954                 idle = &per_cpu(s390_idle, cpu);
955                 memset(idle, 0, sizeof(struct s390_idle_data));
956                 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
957                 break;
958         case CPU_DEAD:
959         case CPU_DEAD_FROZEN:
960                 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
961                 break;
962         }
963         return notifier_from_errno(err);
964 }
965
966 static struct notifier_block __cpuinitdata smp_cpu_nb = {
967         .notifier_call = smp_cpu_notify,
968 };
969
970 static int __devinit smp_add_present_cpu(int cpu)
971 {
972         struct cpu *c = &per_cpu(cpu_devices, cpu);
973         struct sys_device *s = &c->sysdev;
974         int rc;
975
976         c->hotpluggable = 1;
977         rc = register_cpu(c, cpu);
978         if (rc)
979                 goto out;
980         rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
981         if (rc)
982                 goto out_cpu;
983         if (!cpu_online(cpu))
984                 goto out;
985         rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
986         if (!rc)
987                 return 0;
988         sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
989 out_cpu:
990 #ifdef CONFIG_HOTPLUG_CPU
991         unregister_cpu(c);
992 #endif
993 out:
994         return rc;
995 }
996
997 #ifdef CONFIG_HOTPLUG_CPU
998
999 int __ref smp_rescan_cpus(void)
1000 {
1001         cpumask_t newcpus;
1002         int cpu;
1003         int rc;
1004
1005         get_online_cpus();
1006         mutex_lock(&smp_cpu_state_mutex);
1007         newcpus = cpu_present_map;
1008         rc = __smp_rescan_cpus();
1009         if (rc)
1010                 goto out;
1011         cpus_andnot(newcpus, cpu_present_map, newcpus);
1012         for_each_cpu_mask(cpu, newcpus) {
1013                 rc = smp_add_present_cpu(cpu);
1014                 if (rc)
1015                         cpu_clear(cpu, cpu_present_map);
1016         }
1017         rc = 0;
1018 out:
1019         mutex_unlock(&smp_cpu_state_mutex);
1020         put_online_cpus();
1021         if (!cpus_empty(newcpus))
1022                 topology_schedule_update();
1023         return rc;
1024 }
1025
1026 static ssize_t __ref rescan_store(struct sysdev_class *class,
1027                                   struct sysdev_class_attribute *attr,
1028                                   const char *buf,
1029                                   size_t count)
1030 {
1031         int rc;
1032
1033         rc = smp_rescan_cpus();
1034         return rc ? rc : count;
1035 }
1036 static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
1037 #endif /* CONFIG_HOTPLUG_CPU */
1038
1039 static ssize_t dispatching_show(struct sysdev_class *class,
1040                                 struct sysdev_class_attribute *attr,
1041                                 char *buf)
1042 {
1043         ssize_t count;
1044
1045         mutex_lock(&smp_cpu_state_mutex);
1046         count = sprintf(buf, "%d\n", cpu_management);
1047         mutex_unlock(&smp_cpu_state_mutex);
1048         return count;
1049 }
1050
1051 static ssize_t dispatching_store(struct sysdev_class *dev,
1052                                  struct sysdev_class_attribute *attr,
1053                                  const char *buf,
1054                                  size_t count)
1055 {
1056         int val, rc;
1057         char delim;
1058
1059         if (sscanf(buf, "%d %c", &val, &delim) != 1)
1060                 return -EINVAL;
1061         if (val != 0 && val != 1)
1062                 return -EINVAL;
1063         rc = 0;
1064         get_online_cpus();
1065         mutex_lock(&smp_cpu_state_mutex);
1066         if (cpu_management == val)
1067                 goto out;
1068         rc = topology_set_cpu_management(val);
1069         if (!rc)
1070                 cpu_management = val;
1071 out:
1072         mutex_unlock(&smp_cpu_state_mutex);
1073         put_online_cpus();
1074         return rc ? rc : count;
1075 }
1076 static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
1077                          dispatching_store);
1078
1079 static int __init topology_init(void)
1080 {
1081         int cpu;
1082         int rc;
1083
1084         register_cpu_notifier(&smp_cpu_nb);
1085
1086 #ifdef CONFIG_HOTPLUG_CPU
1087         rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan);
1088         if (rc)
1089                 return rc;
1090 #endif
1091         rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching);
1092         if (rc)
1093                 return rc;
1094         for_each_present_cpu(cpu) {
1095                 rc = smp_add_present_cpu(cpu);
1096                 if (rc)
1097                         return rc;
1098         }
1099         return 0;
1100 }
1101 subsys_initcall(topology_init);