Merge branches 'acpi-processor', 'acpi-hotplug' and 'acpi-battery'
[pandora-kernel.git] / arch / arm / mach-zynq / platsmp.c
1 /*
2  * This file contains Xilinx specific SMP code, used to start up
3  * the second processor.
4  *
5  * Copyright (C) 2011-2013 Xilinx
6  *
7  * based on linux/arch/arm/mach-realview/platsmp.c
8  *
9  * Copyright (C) 2002 ARM Ltd.
10  *
11  * This software is licensed under the terms of the GNU General Public
12  * License version 2, as published by the Free Software Foundation, and
13  * may be copied, distributed, and modified under those terms.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  */
20
21 #include <linux/export.h>
22 #include <linux/jiffies.h>
23 #include <linux/init.h>
24 #include <linux/io.h>
25 #include <asm/cacheflush.h>
26 #include <asm/smp_scu.h>
27 #include <linux/irqchip/arm-gic.h>
28 #include "common.h"
29
30 /*
31  * Store number of cores in the system
32  * Because of scu_get_core_count() must be in __init section and can't
33  * be called from zynq_cpun_start() because it is not in __init section.
34  */
35 static int ncores;
36
37 int zynq_cpun_start(u32 address, int cpu)
38 {
39         u32 trampoline_code_size = &zynq_secondary_trampoline_end -
40                                                 &zynq_secondary_trampoline;
41
42         /* MS: Expectation that SLCR are directly map and accessible */
43         /* Not possible to jump to non aligned address */
44         if (!(address & 3) && (!address || (address >= trampoline_code_size))) {
45                 /* Store pointer to ioremap area which points to address 0x0 */
46                 static u8 __iomem *zero;
47                 u32 trampoline_size = &zynq_secondary_trampoline_jump -
48                                                 &zynq_secondary_trampoline;
49
50                 zynq_slcr_cpu_stop(cpu);
51                 if (address) {
52                         if (__pa(PAGE_OFFSET)) {
53                                 zero = ioremap(0, trampoline_code_size);
54                                 if (!zero) {
55                                         pr_warn("BOOTUP jump vectors not accessible\n");
56                                         return -1;
57                                 }
58                         } else {
59                                 zero = (__force u8 __iomem *)PAGE_OFFSET;
60                         }
61
62                         /*
63                         * This is elegant way how to jump to any address
64                         * 0x0: Load address at 0x8 to r0
65                         * 0x4: Jump by mov instruction
66                         * 0x8: Jumping address
67                         */
68                         memcpy((__force void *)zero, &zynq_secondary_trampoline,
69                                                         trampoline_size);
70                         writel(address, zero + trampoline_size);
71
72                         flush_cache_all();
73                         outer_flush_range(0, trampoline_code_size);
74                         smp_wmb();
75
76                         if (__pa(PAGE_OFFSET))
77                                 iounmap(zero);
78                 }
79                 zynq_slcr_cpu_start(cpu);
80
81                 return 0;
82         }
83
84         pr_warn("Can't start CPU%d: Wrong starting address %x\n", cpu, address);
85
86         return -1;
87 }
88 EXPORT_SYMBOL(zynq_cpun_start);
89
90 static int zynq_boot_secondary(unsigned int cpu,
91                                                 struct task_struct *idle)
92 {
93         return zynq_cpun_start(virt_to_phys(zynq_secondary_startup), cpu);
94 }
95
96 /*
97  * Initialise the CPU possible map early - this describes the CPUs
98  * which may be present or become present in the system.
99  */
100 static void __init zynq_smp_init_cpus(void)
101 {
102         int i;
103
104         ncores = scu_get_core_count(zynq_scu_base);
105
106         for (i = 0; i < ncores && i < CONFIG_NR_CPUS; i++)
107                 set_cpu_possible(i, true);
108 }
109
110 static void __init zynq_smp_prepare_cpus(unsigned int max_cpus)
111 {
112         scu_enable(zynq_scu_base);
113 }
114
115 #ifdef CONFIG_HOTPLUG_CPU
116 static int zynq_cpu_kill(unsigned cpu)
117 {
118         zynq_slcr_cpu_stop(cpu);
119         return 1;
120 }
121 #endif
122
123 struct smp_operations zynq_smp_ops __initdata = {
124         .smp_init_cpus          = zynq_smp_init_cpus,
125         .smp_prepare_cpus       = zynq_smp_prepare_cpus,
126         .smp_boot_secondary     = zynq_boot_secondary,
127 #ifdef CONFIG_HOTPLUG_CPU
128         .cpu_die                = zynq_platform_cpu_die,
129         .cpu_kill               = zynq_cpu_kill,
130 #endif
131 };