Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / arch / x86 / kernel / setup_percpu.c
1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3 #include <linux/kernel.h>
4 #include <linux/module.h>
5 #include <linux/init.h>
6 #include <linux/bootmem.h>
7 #include <linux/percpu.h>
8 #include <linux/kexec.h>
9 #include <linux/crash_dump.h>
10 #include <linux/smp.h>
11 #include <linux/topology.h>
12 #include <linux/pfn.h>
13 #include <asm/sections.h>
14 #include <asm/processor.h>
15 #include <asm/setup.h>
16 #include <asm/mpspec.h>
17 #include <asm/apicdef.h>
18 #include <asm/highmem.h>
19 #include <asm/proto.h>
20 #include <asm/cpumask.h>
21 #include <asm/cpu.h>
22 #include <asm/stackprotector.h>
23
24 DEFINE_PER_CPU(int, cpu_number);
25 EXPORT_PER_CPU_SYMBOL(cpu_number);
26
27 #ifdef CONFIG_X86_64
28 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
29 #else
30 #define BOOT_PERCPU_OFFSET 0
31 #endif
32
33 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
34 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
35
36 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
37         [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
38 };
39 EXPORT_SYMBOL(__per_cpu_offset);
40
41 /*
42  * On x86_64 symbols referenced from code should be reachable using
43  * 32bit relocations.  Reserve space for static percpu variables in
44  * modules so that they are always served from the first chunk which
45  * is located at the percpu segment base.  On x86_32, anything can
46  * address anywhere.  No need to reserve space in the first chunk.
47  */
48 #ifdef CONFIG_X86_64
49 #define PERCPU_FIRST_CHUNK_RESERVE      PERCPU_MODULE_RESERVE
50 #else
51 #define PERCPU_FIRST_CHUNK_RESERVE      0
52 #endif
53
54 #ifdef CONFIG_X86_32
55 /**
56  * pcpu_need_numa - determine percpu allocation needs to consider NUMA
57  *
58  * If NUMA is not configured or there is only one NUMA node available,
59  * there is no reason to consider NUMA.  This function determines
60  * whether percpu allocation should consider NUMA or not.
61  *
62  * RETURNS:
63  * true if NUMA should be considered; otherwise, false.
64  */
65 static bool __init pcpu_need_numa(void)
66 {
67 #ifdef CONFIG_NEED_MULTIPLE_NODES
68         pg_data_t *last = NULL;
69         unsigned int cpu;
70
71         for_each_possible_cpu(cpu) {
72                 int node = early_cpu_to_node(cpu);
73
74                 if (node_online(node) && NODE_DATA(node) &&
75                     last && last != NODE_DATA(node))
76                         return true;
77
78                 last = NODE_DATA(node);
79         }
80 #endif
81         return false;
82 }
83 #endif
84
85 /**
86  * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
87  * @cpu: cpu to allocate for
88  * @size: size allocation in bytes
89  * @align: alignment
90  *
91  * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
92  * does the right thing for NUMA regardless of the current
93  * configuration.
94  *
95  * RETURNS:
96  * Pointer to the allocated area on success, NULL on failure.
97  */
98 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
99                                         unsigned long align)
100 {
101         const unsigned long goal = __pa(MAX_DMA_ADDRESS);
102 #ifdef CONFIG_NEED_MULTIPLE_NODES
103         int node = early_cpu_to_node(cpu);
104         void *ptr;
105
106         if (!node_online(node) || !NODE_DATA(node)) {
107                 ptr = __alloc_bootmem_nopanic(size, align, goal);
108                 pr_info("cpu %d has no node %d or node-local memory\n",
109                         cpu, node);
110                 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
111                          cpu, size, __pa(ptr));
112         } else {
113                 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
114                                                    size, align, goal);
115                 pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
116                          cpu, size, node, __pa(ptr));
117         }
118         return ptr;
119 #else
120         return __alloc_bootmem_nopanic(size, align, goal);
121 #endif
122 }
123
124 /*
125  * Helpers for first chunk memory allocation
126  */
127 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
128 {
129         return pcpu_alloc_bootmem(cpu, size, align);
130 }
131
132 static void __init pcpu_fc_free(void *ptr, size_t size)
133 {
134         free_bootmem(__pa(ptr), size);
135 }
136
137 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
138 {
139 #ifdef CONFIG_NEED_MULTIPLE_NODES
140         if (early_cpu_to_node(from) == early_cpu_to_node(to))
141                 return LOCAL_DISTANCE;
142         else
143                 return REMOTE_DISTANCE;
144 #else
145         return LOCAL_DISTANCE;
146 #endif
147 }
148
149 static void __init pcpup_populate_pte(unsigned long addr)
150 {
151         populate_extra_pte(addr);
152 }
153
154 static inline void setup_percpu_segment(int cpu)
155 {
156 #ifdef CONFIG_X86_32
157         struct desc_struct gdt;
158
159         pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
160                         0x2 | DESCTYPE_S, 0x8);
161         gdt.s = 1;
162         write_gdt_entry(get_cpu_gdt_table(cpu),
163                         GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
164 #endif
165 }
166
167 void __init setup_per_cpu_areas(void)
168 {
169         unsigned int cpu;
170         unsigned long delta;
171         int rc;
172
173         pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
174                 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
175
176         /*
177          * Allocate percpu area.  Embedding allocator is our favorite;
178          * however, on NUMA configurations, it can result in very
179          * sparse unit mapping and vmalloc area isn't spacious enough
180          * on 32bit.  Use page in that case.
181          */
182 #ifdef CONFIG_X86_32
183         if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa())
184                 pcpu_chosen_fc = PCPU_FC_PAGE;
185 #endif
186         rc = -EINVAL;
187         if (pcpu_chosen_fc != PCPU_FC_PAGE) {
188                 const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
189                 const size_t dyn_size = PERCPU_MODULE_RESERVE +
190                         PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
191
192                 rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
193                                             dyn_size, atom_size,
194                                             pcpu_cpu_distance,
195                                             pcpu_fc_alloc, pcpu_fc_free);
196                 if (rc < 0)
197                         pr_warning("%s allocator failed (%d), falling back to page size\n",
198                                    pcpu_fc_names[pcpu_chosen_fc], rc);
199         }
200         if (rc < 0)
201                 rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
202                                            pcpu_fc_alloc, pcpu_fc_free,
203                                            pcpup_populate_pte);
204         if (rc < 0)
205                 panic("cannot initialize percpu area (err=%d)", rc);
206
207         /* alrighty, percpu areas up and running */
208         delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
209         for_each_possible_cpu(cpu) {
210                 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
211                 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
212                 per_cpu(cpu_number, cpu) = cpu;
213                 setup_percpu_segment(cpu);
214                 setup_stack_canary_segment(cpu);
215                 /*
216                  * Copy data used in early init routines from the
217                  * initial arrays to the per cpu data areas.  These
218                  * arrays then become expendable and the *_early_ptr's
219                  * are zeroed indicating that the static arrays are
220                  * gone.
221                  */
222 #ifdef CONFIG_X86_LOCAL_APIC
223                 per_cpu(x86_cpu_to_apicid, cpu) =
224                         early_per_cpu_map(x86_cpu_to_apicid, cpu);
225                 per_cpu(x86_bios_cpu_apicid, cpu) =
226                         early_per_cpu_map(x86_bios_cpu_apicid, cpu);
227 #endif
228 #ifdef CONFIG_X86_64
229                 per_cpu(irq_stack_ptr, cpu) =
230                         per_cpu(irq_stack_union.irq_stack, cpu) +
231                         IRQ_STACK_SIZE - 64;
232 #ifdef CONFIG_NUMA
233                 per_cpu(x86_cpu_to_node_map, cpu) =
234                         early_per_cpu_map(x86_cpu_to_node_map, cpu);
235                 /*
236                  * Ensure that the boot cpu numa_node is correct when the boot
237                  * cpu is on a node that doesn't have memory installed.
238                  * Also cpu_up() will call cpu_to_node() for APs when
239                  * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set
240                  * up later with c_init aka intel_init/amd_init.
241                  * So set them all (boot cpu and all APs).
242                  */
243                 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
244 #endif
245 #endif
246                 /*
247                  * Up to this point, the boot CPU has been using .init.data
248                  * area.  Reload any changed state for the boot CPU.
249                  */
250                 if (!cpu)
251                         switch_to_new_gdt(cpu);
252         }
253
254         /* indicate the early static arrays will soon be gone */
255 #ifdef CONFIG_X86_LOCAL_APIC
256         early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
257         early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
258 #endif
259 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
260         early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
261 #endif
262
263         /* Setup node to cpumask map */
264         setup_node_to_cpumask_map();
265
266         /* Setup cpu initialized, callin, callout masks */
267         setup_cpu_local_masks();
268 }