regulatory: add NUL to alpha2
[pandora-kernel.git] / include / linux / percpu.h
1 #ifndef __LINUX_PERCPU_H
2 #define __LINUX_PERCPU_H
3
4 #include <linux/mmdebug.h>
5 #include <linux/preempt.h>
6 #include <linux/smp.h>
7 #include <linux/cpumask.h>
8 #include <linux/pfn.h>
9 #include <linux/init.h>
10
11 #include <asm/percpu.h>
12
13 /* enough to cover all DEFINE_PER_CPUs in modules */
14 #ifdef CONFIG_MODULES
15 #define PERCPU_MODULE_RESERVE           (8 << 10)
16 #else
17 #define PERCPU_MODULE_RESERVE           0
18 #endif
19
20 #ifndef PERCPU_ENOUGH_ROOM
21 #define PERCPU_ENOUGH_ROOM                                              \
22         (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) +      \
23          PERCPU_MODULE_RESERVE)
24 #endif
25
26 /* minimum unit size, also is the maximum supported allocation size */
27 #define PCPU_MIN_UNIT_SIZE              PFN_ALIGN(32 << 10)
28
29 /*
30  * Percpu allocator can serve percpu allocations before slab is
31  * initialized which allows slab to depend on the percpu allocator.
32  * The following two parameters decide how much resource to
33  * preallocate for this.  Keep PERCPU_DYNAMIC_RESERVE equal to or
34  * larger than PERCPU_DYNAMIC_EARLY_SIZE.
35  */
36 #define PERCPU_DYNAMIC_EARLY_SLOTS      128
37 #define PERCPU_DYNAMIC_EARLY_SIZE       (12 << 10)
38
39 /*
40  * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
41  * back on the first chunk for dynamic percpu allocation if arch is
42  * manually allocating and mapping it for faster access (as a part of
43  * large page mapping for example).
44  *
45  * The following values give between one and two pages of free space
46  * after typical minimal boot (2-way SMP, single disk and NIC) with
47  * both defconfig and a distro config on x86_64 and 32.  More
48  * intelligent way to determine this would be nice.
49  */
50 #if BITS_PER_LONG > 32
51 #define PERCPU_DYNAMIC_RESERVE          (20 << 10)
52 #else
53 #define PERCPU_DYNAMIC_RESERVE          (12 << 10)
54 #endif
55
56 extern void *pcpu_base_addr;
57 extern const unsigned long *pcpu_unit_offsets;
58
59 struct pcpu_group_info {
60         int                     nr_units;       /* aligned # of units */
61         unsigned long           base_offset;    /* base address offset */
62         unsigned int            *cpu_map;       /* unit->cpu map, empty
63                                                  * entries contain NR_CPUS */
64 };
65
66 struct pcpu_alloc_info {
67         size_t                  static_size;
68         size_t                  reserved_size;
69         size_t                  dyn_size;
70         size_t                  unit_size;
71         size_t                  atom_size;
72         size_t                  alloc_size;
73         size_t                  __ai_size;      /* internal, don't use */
74         int                     nr_groups;      /* 0 if grouping unnecessary */
75         struct pcpu_group_info  groups[];
76 };
77
78 enum pcpu_fc {
79         PCPU_FC_AUTO,
80         PCPU_FC_EMBED,
81         PCPU_FC_PAGE,
82
83         PCPU_FC_NR,
84 };
85 extern const char * const pcpu_fc_names[PCPU_FC_NR];
86
87 extern enum pcpu_fc pcpu_chosen_fc;
88
89 typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
90                                      size_t align);
91 typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
92 typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
93 typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
94
95 extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
96                                                              int nr_units);
97 extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
98
99 extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
100                                          void *base_addr);
101
102 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
103 extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
104                                 size_t atom_size,
105                                 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
106                                 pcpu_fc_alloc_fn_t alloc_fn,
107                                 pcpu_fc_free_fn_t free_fn);
108 #endif
109
110 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
111 extern int __init pcpu_page_first_chunk(size_t reserved_size,
112                                 pcpu_fc_alloc_fn_t alloc_fn,
113                                 pcpu_fc_free_fn_t free_fn,
114                                 pcpu_fc_populate_pte_fn_t populate_pte_fn);
115 #endif
116
117 extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
118 extern bool is_kernel_percpu_address(unsigned long addr);
119
120 #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
121 extern void __init setup_per_cpu_areas(void);
122 #endif
123 extern void __init percpu_init_late(void);
124
125 extern void __percpu *__alloc_percpu(size_t size, size_t align);
126 extern void free_percpu(void __percpu *__pdata);
127 extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
128
129 #define alloc_percpu(type)      \
130         (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
131
132 #endif /* __LINUX_PERCPU_H */