Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[pandora-kernel.git] / arch / ia64 / kernel / topology.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * This file contains NUMA specific variables and functions which can
7  * be split away from DISCONTIGMEM and are used on NUMA machines with
8  * contiguous memory.
9  *              2002/08/07 Erich Focht <efocht@ess.nec.de>
10  * Populate cpu entries in sysfs for non-numa systems as well
11  *      Intel Corporation - Ashok Raj
12  * 02/27/2006 Zhang, Yanmin
13  *      Populate cpu cache entries in sysfs for cpu cache info
14  */
15
16 #include <linux/cpu.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/node.h>
20 #include <linux/slab.h>
21 #include <linux/init.h>
22 #include <linux/bootmem.h>
23 #include <linux/nodemask.h>
24 #include <linux/notifier.h>
25 #include <linux/export.h>
26 #include <asm/mmzone.h>
27 #include <asm/numa.h>
28 #include <asm/cpu.h>
29
30 static struct ia64_cpu *sysfs_cpus;
31
32 void arch_fix_phys_package_id(int num, u32 slot)
33 {
34 #ifdef CONFIG_SMP
35         if (cpu_data(num)->socket_id == -1)
36                 cpu_data(num)->socket_id = slot;
37 #endif
38 }
39 EXPORT_SYMBOL_GPL(arch_fix_phys_package_id);
40
41
42 #ifdef CONFIG_HOTPLUG_CPU
43 int __ref arch_register_cpu(int num)
44 {
45 #ifdef CONFIG_ACPI
46         /*
47          * If CPEI can be re-targeted or if this is not
48          * CPEI target, then it is hotpluggable
49          */
50         if (can_cpei_retarget() || !is_cpu_cpei_target(num))
51                 sysfs_cpus[num].cpu.hotpluggable = 1;
52         map_cpu_to_node(num, node_cpuid[num].nid);
53 #endif
54         return register_cpu(&sysfs_cpus[num].cpu, num);
55 }
56 EXPORT_SYMBOL(arch_register_cpu);
57
58 void __ref arch_unregister_cpu(int num)
59 {
60         unregister_cpu(&sysfs_cpus[num].cpu);
61 #ifdef CONFIG_ACPI
62         unmap_cpu_from_node(num, cpu_to_node(num));
63 #endif
64 }
65 EXPORT_SYMBOL(arch_unregister_cpu);
66 #else
67 static int __init arch_register_cpu(int num)
68 {
69         return register_cpu(&sysfs_cpus[num].cpu, num);
70 }
71 #endif /*CONFIG_HOTPLUG_CPU*/
72
73
74 static int __init topology_init(void)
75 {
76         int i, err = 0;
77
78 #ifdef CONFIG_NUMA
79         /*
80          * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
81          */
82         for_each_online_node(i) {
83                 if ((err = register_one_node(i)))
84                         goto out;
85         }
86 #endif
87
88         sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
89         if (!sysfs_cpus)
90                 panic("kzalloc in topology_init failed - NR_CPUS too big?");
91
92         for_each_present_cpu(i) {
93                 if((err = arch_register_cpu(i)))
94                         goto out;
95         }
96 out:
97         return err;
98 }
99
100 subsys_initcall(topology_init);
101
102
103 /*
104  * Export cpu cache information through sysfs
105  */
106
107 /*
108  *  A bunch of string array to get pretty printing
109  */
110 static const char *cache_types[] = {
111         "",                     /* not used */
112         "Instruction",
113         "Data",
114         "Unified"       /* unified */
115 };
116
117 static const char *cache_mattrib[]={
118         "WriteThrough",
119         "WriteBack",
120         "",             /* reserved */
121         ""              /* reserved */
122 };
123
124 struct cache_info {
125         pal_cache_config_info_t cci;
126         cpumask_t shared_cpu_map;
127         int level;
128         int type;
129         struct kobject kobj;
130 };
131
132 struct cpu_cache_info {
133         struct cache_info *cache_leaves;
134         int     num_cache_leaves;
135         struct kobject kobj;
136 };
137
138 static struct cpu_cache_info    all_cpu_cache_info[NR_CPUS] __cpuinitdata;
139 #define LEAF_KOBJECT_PTR(x,y)    (&all_cpu_cache_info[x].cache_leaves[y])
140
141 #ifdef CONFIG_SMP
142 static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu,
143                 struct cache_info * this_leaf)
144 {
145         pal_cache_shared_info_t csi;
146         int num_shared, i = 0;
147         unsigned int j;
148
149         if (cpu_data(cpu)->threads_per_core <= 1 &&
150                 cpu_data(cpu)->cores_per_socket <= 1) {
151                 cpu_set(cpu, this_leaf->shared_cpu_map);
152                 return;
153         }
154
155         if (ia64_pal_cache_shared_info(this_leaf->level,
156                                         this_leaf->type,
157                                         0,
158                                         &csi) != PAL_STATUS_SUCCESS)
159                 return;
160
161         num_shared = (int) csi.num_shared;
162         do {
163                 for_each_possible_cpu(j)
164                         if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
165                                 && cpu_data(j)->core_id == csi.log1_cid
166                                 && cpu_data(j)->thread_id == csi.log1_tid)
167                                 cpu_set(j, this_leaf->shared_cpu_map);
168
169                 i++;
170         } while (i < num_shared &&
171                 ia64_pal_cache_shared_info(this_leaf->level,
172                                 this_leaf->type,
173                                 i,
174                                 &csi) == PAL_STATUS_SUCCESS);
175 }
176 #else
177 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu,
178                 struct cache_info * this_leaf)
179 {
180         cpu_set(cpu, this_leaf->shared_cpu_map);
181         return;
182 }
183 #endif
184
185 static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
186                                         char *buf)
187 {
188         return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
189 }
190
191 static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
192                                         char *buf)
193 {
194         return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
195 }
196
197 static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
198 {
199         return sprintf(buf,
200                         "%s\n",
201                         cache_mattrib[this_leaf->cci.pcci_cache_attr]);
202 }
203
204 static ssize_t show_size(struct cache_info *this_leaf, char *buf)
205 {
206         return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
207 }
208
209 static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
210 {
211         unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
212         number_of_sets /= this_leaf->cci.pcci_assoc;
213         number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
214
215         return sprintf(buf, "%u\n", number_of_sets);
216 }
217
218 static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
219 {
220         ssize_t len;
221         cpumask_t shared_cpu_map;
222
223         cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
224         len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map);
225         len += sprintf(buf+len, "\n");
226         return len;
227 }
228
229 static ssize_t show_type(struct cache_info *this_leaf, char *buf)
230 {
231         int type = this_leaf->type + this_leaf->cci.pcci_unified;
232         return sprintf(buf, "%s\n", cache_types[type]);
233 }
234
235 static ssize_t show_level(struct cache_info *this_leaf, char *buf)
236 {
237         return sprintf(buf, "%u\n", this_leaf->level);
238 }
239
240 struct cache_attr {
241         struct attribute attr;
242         ssize_t (*show)(struct cache_info *, char *);
243         ssize_t (*store)(struct cache_info *, const char *, size_t count);
244 };
245
246 #ifdef define_one_ro
247         #undef define_one_ro
248 #endif
249 #define define_one_ro(_name) \
250         static struct cache_attr _name = \
251 __ATTR(_name, 0444, show_##_name, NULL)
252
253 define_one_ro(level);
254 define_one_ro(type);
255 define_one_ro(coherency_line_size);
256 define_one_ro(ways_of_associativity);
257 define_one_ro(size);
258 define_one_ro(number_of_sets);
259 define_one_ro(shared_cpu_map);
260 define_one_ro(attributes);
261
262 static struct attribute * cache_default_attrs[] = {
263         &type.attr,
264         &level.attr,
265         &coherency_line_size.attr,
266         &ways_of_associativity.attr,
267         &attributes.attr,
268         &size.attr,
269         &number_of_sets.attr,
270         &shared_cpu_map.attr,
271         NULL
272 };
273
274 #define to_object(k) container_of(k, struct cache_info, kobj)
275 #define to_attr(a) container_of(a, struct cache_attr, attr)
276
277 static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
278 {
279         struct cache_attr *fattr = to_attr(attr);
280         struct cache_info *this_leaf = to_object(kobj);
281         ssize_t ret;
282
283         ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
284         return ret;
285 }
286
287 static const struct sysfs_ops cache_sysfs_ops = {
288         .show   = cache_show
289 };
290
291 static struct kobj_type cache_ktype = {
292         .sysfs_ops      = &cache_sysfs_ops,
293         .default_attrs  = cache_default_attrs,
294 };
295
296 static struct kobj_type cache_ktype_percpu_entry = {
297         .sysfs_ops      = &cache_sysfs_ops,
298 };
299
300 static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
301 {
302         kfree(all_cpu_cache_info[cpu].cache_leaves);
303         all_cpu_cache_info[cpu].cache_leaves = NULL;
304         all_cpu_cache_info[cpu].num_cache_leaves = 0;
305         memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
306         return;
307 }
308
309 static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
310 {
311         unsigned long i, levels, unique_caches;
312         pal_cache_config_info_t cci;
313         int j;
314         long status;
315         struct cache_info *this_cache;
316         int num_cache_leaves = 0;
317
318         if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
319                 printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
320                 return -1;
321         }
322
323         this_cache=kzalloc(sizeof(struct cache_info)*unique_caches,
324                         GFP_KERNEL);
325         if (this_cache == NULL)
326                 return -ENOMEM;
327
328         for (i=0; i < levels; i++) {
329                 for (j=2; j >0 ; j--) {
330                         if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
331                                         PAL_STATUS_SUCCESS)
332                                 continue;
333
334                         this_cache[num_cache_leaves].cci = cci;
335                         this_cache[num_cache_leaves].level = i + 1;
336                         this_cache[num_cache_leaves].type = j;
337
338                         cache_shared_cpu_map_setup(cpu,
339                                         &this_cache[num_cache_leaves]);
340                         num_cache_leaves ++;
341                 }
342         }
343
344         all_cpu_cache_info[cpu].cache_leaves = this_cache;
345         all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
346
347         memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
348
349         return 0;
350 }
351
352 /* Add cache interface for CPU device */
353 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
354 {
355         unsigned int cpu = sys_dev->id;
356         unsigned long i, j;
357         struct cache_info *this_object;
358         int retval = 0;
359         cpumask_t oldmask;
360
361         if (all_cpu_cache_info[cpu].kobj.parent)
362                 return 0;
363
364         oldmask = current->cpus_allowed;
365         retval = set_cpus_allowed_ptr(current, cpumask_of(cpu));
366         if (unlikely(retval))
367                 return retval;
368
369         retval = cpu_cache_sysfs_init(cpu);
370         set_cpus_allowed_ptr(current, &oldmask);
371         if (unlikely(retval < 0))
372                 return retval;
373
374         retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj,
375                                       &cache_ktype_percpu_entry, &sys_dev->kobj,
376                                       "%s", "cache");
377         if (unlikely(retval < 0)) {
378                 cpu_cache_sysfs_exit(cpu);
379                 return retval;
380         }
381
382         for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
383                 this_object = LEAF_KOBJECT_PTR(cpu,i);
384                 retval = kobject_init_and_add(&(this_object->kobj),
385                                               &cache_ktype,
386                                               &all_cpu_cache_info[cpu].kobj,
387                                               "index%1lu", i);
388                 if (unlikely(retval)) {
389                         for (j = 0; j < i; j++) {
390                                 kobject_put(&(LEAF_KOBJECT_PTR(cpu,j)->kobj));
391                         }
392                         kobject_put(&all_cpu_cache_info[cpu].kobj);
393                         cpu_cache_sysfs_exit(cpu);
394                         return retval;
395                 }
396                 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
397         }
398         kobject_uevent(&all_cpu_cache_info[cpu].kobj, KOBJ_ADD);
399         return retval;
400 }
401
402 /* Remove cache interface for CPU device */
403 static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
404 {
405         unsigned int cpu = sys_dev->id;
406         unsigned long i;
407
408         for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
409                 kobject_put(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
410
411         if (all_cpu_cache_info[cpu].kobj.parent) {
412                 kobject_put(&all_cpu_cache_info[cpu].kobj);
413                 memset(&all_cpu_cache_info[cpu].kobj,
414                         0,
415                         sizeof(struct kobject));
416         }
417
418         cpu_cache_sysfs_exit(cpu);
419
420         return 0;
421 }
422
423 /*
424  * When a cpu is hot-plugged, do a check and initiate
425  * cache kobject if necessary
426  */
427 static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
428                 unsigned long action, void *hcpu)
429 {
430         unsigned int cpu = (unsigned long)hcpu;
431         struct sys_device *sys_dev;
432
433         sys_dev = get_cpu_sysdev(cpu);
434         switch (action) {
435         case CPU_ONLINE:
436         case CPU_ONLINE_FROZEN:
437                 cache_add_dev(sys_dev);
438                 break;
439         case CPU_DEAD:
440         case CPU_DEAD_FROZEN:
441                 cache_remove_dev(sys_dev);
442                 break;
443         }
444         return NOTIFY_OK;
445 }
446
447 static struct notifier_block __cpuinitdata cache_cpu_notifier =
448 {
449         .notifier_call = cache_cpu_callback
450 };
451
452 static int __init cache_sysfs_init(void)
453 {
454         int i;
455
456         for_each_online_cpu(i) {
457                 struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i);
458                 cache_add_dev(sys_dev);
459         }
460
461         register_hotcpu_notifier(&cache_cpu_notifier);
462
463         return 0;
464 }
465
466 device_initcall(cache_sysfs_init);
467