Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzi...
[pandora-kernel.git] / arch / x86 / kernel / cpu / mcheck / mce_amd.c
1 /*
2  *  (c) 2005, 2006 Advanced Micro Devices, Inc.
3  *  Your use of this code is subject to the terms and conditions of the
4  *  GNU general public license version 2. See "COPYING" or
5  *  http://www.gnu.org/licenses/gpl.html
6  *
7  *  Written by Jacob Shin - AMD, Inc.
8  *
9  *  Support : jacob.shin@amd.com
10  *
11  *  April 2006
12  *     - added support for AMD Family 0x10 processors
13  *
14  *  All MC4_MISCi registers are shared between multi-cores
15  */
16 #include <linux/interrupt.h>
17 #include <linux/notifier.h>
18 #include <linux/kobject.h>
19 #include <linux/percpu.h>
20 #include <linux/sysdev.h>
21 #include <linux/errno.h>
22 #include <linux/sched.h>
23 #include <linux/sysfs.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/cpu.h>
27 #include <linux/smp.h>
28
29 #include <asm/apic.h>
30 #include <asm/idle.h>
31 #include <asm/mce.h>
32 #include <asm/msr.h>
33
34 #define NR_BANKS          6
35 #define NR_BLOCKS         9
36 #define THRESHOLD_MAX     0xFFF
37 #define INT_TYPE_APIC     0x00020000
38 #define MASK_VALID_HI     0x80000000
39 #define MASK_CNTP_HI      0x40000000
40 #define MASK_LOCKED_HI    0x20000000
41 #define MASK_LVTOFF_HI    0x00F00000
42 #define MASK_COUNT_EN_HI  0x00080000
43 #define MASK_INT_TYPE_HI  0x00060000
44 #define MASK_OVERFLOW_HI  0x00010000
45 #define MASK_ERR_COUNT_HI 0x00000FFF
46 #define MASK_BLKPTR_LO    0xFF000000
47 #define MCG_XBLK_ADDR     0xC0000400
48
49 struct threshold_block {
50         unsigned int            block;
51         unsigned int            bank;
52         unsigned int            cpu;
53         u32                     address;
54         u16                     interrupt_enable;
55         u16                     threshold_limit;
56         struct kobject          kobj;
57         struct list_head        miscj;
58 };
59
60 struct threshold_bank {
61         struct kobject          *kobj;
62         struct threshold_block  *blocks;
63         cpumask_var_t           cpus;
64 };
65 static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
66
67 #ifdef CONFIG_SMP
68 static unsigned char shared_bank[NR_BANKS] = {
69         0, 0, 0, 0, 1
70 };
71 #endif
72
73 static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
74
75 static void amd_threshold_interrupt(void);
76
77 /*
78  * CPU Initialization
79  */
80
81 struct thresh_restart {
82         struct threshold_block  *b;
83         int                     reset;
84         int                     set_lvt_off;
85         int                     lvt_off;
86         u16                     old_limit;
87 };
88
89 static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
90 {
91         int msr = (hi & MASK_LVTOFF_HI) >> 20;
92
93         if (apic < 0) {
94                 pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
95                        "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
96                        b->bank, b->block, b->address, hi, lo);
97                 return 0;
98         }
99
100         if (apic != msr) {
101                 pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
102                        "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
103                        b->cpu, apic, b->bank, b->block, b->address, hi, lo);
104                 return 0;
105         }
106
107         return 1;
108 };
109
110 /* must be called with correct cpu affinity */
111 /* Called via smp_call_function_single() */
112 static void threshold_restart_bank(void *_tr)
113 {
114         struct thresh_restart *tr = _tr;
115         u32 hi, lo;
116
117         rdmsr(tr->b->address, lo, hi);
118
119         if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
120                 tr->reset = 1;  /* limit cannot be lower than err count */
121
122         if (tr->reset) {                /* reset err count and overflow bit */
123                 hi =
124                     (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
125                     (THRESHOLD_MAX - tr->b->threshold_limit);
126         } else if (tr->old_limit) {     /* change limit w/o reset */
127                 int new_count = (hi & THRESHOLD_MAX) +
128                     (tr->old_limit - tr->b->threshold_limit);
129
130                 hi = (hi & ~MASK_ERR_COUNT_HI) |
131                     (new_count & THRESHOLD_MAX);
132         }
133
134         if (tr->set_lvt_off) {
135                 if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
136                         /* set new lvt offset */
137                         hi &= ~MASK_LVTOFF_HI;
138                         hi |= tr->lvt_off << 20;
139                 }
140         }
141
142         tr->b->interrupt_enable ?
143             (hi = (hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
144             (hi &= ~MASK_INT_TYPE_HI);
145
146         hi |= MASK_COUNT_EN_HI;
147         wrmsr(tr->b->address, lo, hi);
148 }
149
150 static void mce_threshold_block_init(struct threshold_block *b, int offset)
151 {
152         struct thresh_restart tr = {
153                 .b                      = b,
154                 .set_lvt_off            = 1,
155                 .lvt_off                = offset,
156         };
157
158         b->threshold_limit              = THRESHOLD_MAX;
159         threshold_restart_bank(&tr);
160 };
161
162 static int setup_APIC_mce(int reserved, int new)
163 {
164         if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
165                                               APIC_EILVT_MSG_FIX, 0))
166                 return new;
167
168         return reserved;
169 }
170
171 /* cpu init entry point, called from mce.c with preempt off */
172 void mce_amd_feature_init(struct cpuinfo_x86 *c)
173 {
174         struct threshold_block b;
175         unsigned int cpu = smp_processor_id();
176         u32 low = 0, high = 0, address = 0;
177         unsigned int bank, block;
178         int offset = -1;
179
180         for (bank = 0; bank < NR_BANKS; ++bank) {
181                 for (block = 0; block < NR_BLOCKS; ++block) {
182                         if (block == 0)
183                                 address = MSR_IA32_MC0_MISC + bank * 4;
184                         else if (block == 1) {
185                                 address = (low & MASK_BLKPTR_LO) >> 21;
186                                 if (!address)
187                                         break;
188
189                                 address += MCG_XBLK_ADDR;
190                         } else
191                                 ++address;
192
193                         if (rdmsr_safe(address, &low, &high))
194                                 break;
195
196                         if (!(high & MASK_VALID_HI))
197                                 continue;
198
199                         if (!(high & MASK_CNTP_HI)  ||
200                              (high & MASK_LOCKED_HI))
201                                 continue;
202
203                         if (!block)
204                                 per_cpu(bank_map, cpu) |= (1 << bank);
205 #ifdef CONFIG_SMP
206                         if (shared_bank[bank] && c->cpu_core_id)
207                                 break;
208 #endif
209                         offset = setup_APIC_mce(offset,
210                                                 (high & MASK_LVTOFF_HI) >> 20);
211
212                         memset(&b, 0, sizeof(b));
213                         b.cpu           = cpu;
214                         b.bank          = bank;
215                         b.block         = block;
216                         b.address       = address;
217
218                         mce_threshold_block_init(&b, offset);
219                         mce_threshold_vector = amd_threshold_interrupt;
220                 }
221         }
222 }
223
224 /*
225  * APIC Interrupt Handler
226  */
227
228 /*
229  * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
230  * the interrupt goes off when error_count reaches threshold_limit.
231  * the handler will simply log mcelog w/ software defined bank number.
232  */
233 static void amd_threshold_interrupt(void)
234 {
235         u32 low = 0, high = 0, address = 0;
236         unsigned int bank, block;
237         struct mce m;
238
239         mce_setup(&m);
240
241         /* assume first bank caused it */
242         for (bank = 0; bank < NR_BANKS; ++bank) {
243                 if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
244                         continue;
245                 for (block = 0; block < NR_BLOCKS; ++block) {
246                         if (block == 0) {
247                                 address = MSR_IA32_MC0_MISC + bank * 4;
248                         } else if (block == 1) {
249                                 address = (low & MASK_BLKPTR_LO) >> 21;
250                                 if (!address)
251                                         break;
252                                 address += MCG_XBLK_ADDR;
253                         } else {
254                                 ++address;
255                         }
256
257                         if (rdmsr_safe(address, &low, &high))
258                                 break;
259
260                         if (!(high & MASK_VALID_HI)) {
261                                 if (block)
262                                         continue;
263                                 else
264                                         break;
265                         }
266
267                         if (!(high & MASK_CNTP_HI)  ||
268                              (high & MASK_LOCKED_HI))
269                                 continue;
270
271                         /*
272                          * Log the machine check that caused the threshold
273                          * event.
274                          */
275                         machine_check_poll(MCP_TIMESTAMP,
276                                         &__get_cpu_var(mce_poll_banks));
277
278                         if (high & MASK_OVERFLOW_HI) {
279                                 rdmsrl(address, m.misc);
280                                 rdmsrl(MSR_IA32_MC0_STATUS + bank * 4,
281                                        m.status);
282                                 m.bank = K8_MCE_THRESHOLD_BASE
283                                        + bank * NR_BLOCKS
284                                        + block;
285                                 mce_log(&m);
286                                 return;
287                         }
288                 }
289         }
290 }
291
292 /*
293  * Sysfs Interface
294  */
295
296 struct threshold_attr {
297         struct attribute attr;
298         ssize_t (*show) (struct threshold_block *, char *);
299         ssize_t (*store) (struct threshold_block *, const char *, size_t count);
300 };
301
302 #define SHOW_FIELDS(name)                                               \
303 static ssize_t show_ ## name(struct threshold_block *b, char *buf)      \
304 {                                                                       \
305         return sprintf(buf, "%lx\n", (unsigned long) b->name);          \
306 }
307 SHOW_FIELDS(interrupt_enable)
308 SHOW_FIELDS(threshold_limit)
309
310 static ssize_t
311 store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
312 {
313         struct thresh_restart tr;
314         unsigned long new;
315
316         if (strict_strtoul(buf, 0, &new) < 0)
317                 return -EINVAL;
318
319         b->interrupt_enable = !!new;
320
321         memset(&tr, 0, sizeof(tr));
322         tr.b            = b;
323
324         smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
325
326         return size;
327 }
328
329 static ssize_t
330 store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
331 {
332         struct thresh_restart tr;
333         unsigned long new;
334
335         if (strict_strtoul(buf, 0, &new) < 0)
336                 return -EINVAL;
337
338         if (new > THRESHOLD_MAX)
339                 new = THRESHOLD_MAX;
340         if (new < 1)
341                 new = 1;
342
343         memset(&tr, 0, sizeof(tr));
344         tr.old_limit = b->threshold_limit;
345         b->threshold_limit = new;
346         tr.b = b;
347
348         smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
349
350         return size;
351 }
352
353 struct threshold_block_cross_cpu {
354         struct threshold_block  *tb;
355         long                    retval;
356 };
357
358 static void local_error_count_handler(void *_tbcc)
359 {
360         struct threshold_block_cross_cpu *tbcc = _tbcc;
361         struct threshold_block *b = tbcc->tb;
362         u32 low, high;
363
364         rdmsr(b->address, low, high);
365         tbcc->retval = (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
366 }
367
368 static ssize_t show_error_count(struct threshold_block *b, char *buf)
369 {
370         struct threshold_block_cross_cpu tbcc = { .tb = b, };
371
372         smp_call_function_single(b->cpu, local_error_count_handler, &tbcc, 1);
373         return sprintf(buf, "%lx\n", tbcc.retval);
374 }
375
376 static ssize_t store_error_count(struct threshold_block *b,
377                                  const char *buf, size_t count)
378 {
379         struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 };
380
381         smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
382         return 1;
383 }
384
385 #define RW_ATTR(val)                                                    \
386 static struct threshold_attr val = {                                    \
387         .attr   = {.name = __stringify(val), .mode = 0644 },            \
388         .show   = show_## val,                                          \
389         .store  = store_## val,                                         \
390 };
391
392 RW_ATTR(interrupt_enable);
393 RW_ATTR(threshold_limit);
394 RW_ATTR(error_count);
395
396 static struct attribute *default_attrs[] = {
397         &interrupt_enable.attr,
398         &threshold_limit.attr,
399         &error_count.attr,
400         NULL
401 };
402
403 #define to_block(k)     container_of(k, struct threshold_block, kobj)
404 #define to_attr(a)      container_of(a, struct threshold_attr, attr)
405
406 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
407 {
408         struct threshold_block *b = to_block(kobj);
409         struct threshold_attr *a = to_attr(attr);
410         ssize_t ret;
411
412         ret = a->show ? a->show(b, buf) : -EIO;
413
414         return ret;
415 }
416
417 static ssize_t store(struct kobject *kobj, struct attribute *attr,
418                      const char *buf, size_t count)
419 {
420         struct threshold_block *b = to_block(kobj);
421         struct threshold_attr *a = to_attr(attr);
422         ssize_t ret;
423
424         ret = a->store ? a->store(b, buf, count) : -EIO;
425
426         return ret;
427 }
428
429 static const struct sysfs_ops threshold_ops = {
430         .show                   = show,
431         .store                  = store,
432 };
433
434 static struct kobj_type threshold_ktype = {
435         .sysfs_ops              = &threshold_ops,
436         .default_attrs          = default_attrs,
437 };
438
439 static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
440                                                unsigned int bank,
441                                                unsigned int block,
442                                                u32 address)
443 {
444         struct threshold_block *b = NULL;
445         u32 low, high;
446         int err;
447
448         if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
449                 return 0;
450
451         if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
452                 return 0;
453
454         if (!(high & MASK_VALID_HI)) {
455                 if (block)
456                         goto recurse;
457                 else
458                         return 0;
459         }
460
461         if (!(high & MASK_CNTP_HI)  ||
462              (high & MASK_LOCKED_HI))
463                 goto recurse;
464
465         b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
466         if (!b)
467                 return -ENOMEM;
468
469         b->block                = block;
470         b->bank                 = bank;
471         b->cpu                  = cpu;
472         b->address              = address;
473         b->interrupt_enable     = 0;
474         b->threshold_limit      = THRESHOLD_MAX;
475
476         INIT_LIST_HEAD(&b->miscj);
477
478         if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
479                 list_add(&b->miscj,
480                          &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
481         } else {
482                 per_cpu(threshold_banks, cpu)[bank]->blocks = b;
483         }
484
485         err = kobject_init_and_add(&b->kobj, &threshold_ktype,
486                                    per_cpu(threshold_banks, cpu)[bank]->kobj,
487                                    "misc%i", block);
488         if (err)
489                 goto out_free;
490 recurse:
491         if (!block) {
492                 address = (low & MASK_BLKPTR_LO) >> 21;
493                 if (!address)
494                         return 0;
495                 address += MCG_XBLK_ADDR;
496         } else {
497                 ++address;
498         }
499
500         err = allocate_threshold_blocks(cpu, bank, ++block, address);
501         if (err)
502                 goto out_free;
503
504         if (b)
505                 kobject_uevent(&b->kobj, KOBJ_ADD);
506
507         return err;
508
509 out_free:
510         if (b) {
511                 kobject_put(&b->kobj);
512                 kfree(b);
513         }
514         return err;
515 }
516
517 static __cpuinit long
518 local_allocate_threshold_blocks(int cpu, unsigned int bank)
519 {
520         return allocate_threshold_blocks(cpu, bank, 0,
521                                          MSR_IA32_MC0_MISC + bank * 4);
522 }
523
524 /* symlinks sibling shared banks to first core.  first core owns dir/files. */
525 static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
526 {
527         int i, err = 0;
528         struct threshold_bank *b = NULL;
529         char name[32];
530 #ifdef CONFIG_SMP
531         struct cpuinfo_x86 *c = &cpu_data(cpu);
532 #endif
533
534         sprintf(name, "threshold_bank%i", bank);
535
536 #ifdef CONFIG_SMP
537         if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) {   /* symlink */
538                 i = cpumask_first(c->llc_shared_map);
539
540                 /* first core not up yet */
541                 if (cpu_data(i).cpu_core_id)
542                         goto out;
543
544                 /* already linked */
545                 if (per_cpu(threshold_banks, cpu)[bank])
546                         goto out;
547
548                 b = per_cpu(threshold_banks, i)[bank];
549
550                 if (!b)
551                         goto out;
552
553                 err = sysfs_create_link(&per_cpu(mce_dev, cpu).kobj,
554                                         b->kobj, name);
555                 if (err)
556                         goto out;
557
558                 cpumask_copy(b->cpus, c->llc_shared_map);
559                 per_cpu(threshold_banks, cpu)[bank] = b;
560
561                 goto out;
562         }
563 #endif
564
565         b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
566         if (!b) {
567                 err = -ENOMEM;
568                 goto out;
569         }
570         if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
571                 kfree(b);
572                 err = -ENOMEM;
573                 goto out;
574         }
575
576         b->kobj = kobject_create_and_add(name, &per_cpu(mce_dev, cpu).kobj);
577         if (!b->kobj)
578                 goto out_free;
579
580 #ifndef CONFIG_SMP
581         cpumask_setall(b->cpus);
582 #else
583         cpumask_set_cpu(cpu, b->cpus);
584 #endif
585
586         per_cpu(threshold_banks, cpu)[bank] = b;
587
588         err = local_allocate_threshold_blocks(cpu, bank);
589         if (err)
590                 goto out_free;
591
592         for_each_cpu(i, b->cpus) {
593                 if (i == cpu)
594                         continue;
595
596                 err = sysfs_create_link(&per_cpu(mce_dev, i).kobj,
597                                         b->kobj, name);
598                 if (err)
599                         goto out;
600
601                 per_cpu(threshold_banks, i)[bank] = b;
602         }
603
604         goto out;
605
606 out_free:
607         per_cpu(threshold_banks, cpu)[bank] = NULL;
608         free_cpumask_var(b->cpus);
609         kfree(b);
610 out:
611         return err;
612 }
613
614 /* create dir/files for all valid threshold banks */
615 static __cpuinit int threshold_create_device(unsigned int cpu)
616 {
617         unsigned int bank;
618         int err = 0;
619
620         for (bank = 0; bank < NR_BANKS; ++bank) {
621                 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
622                         continue;
623                 err = threshold_create_bank(cpu, bank);
624                 if (err)
625                         return err;
626         }
627
628         return err;
629 }
630
631 /*
632  * let's be hotplug friendly.
633  * in case of multiple core processors, the first core always takes ownership
634  *   of shared sysfs dir/files, and rest of the cores will be symlinked to it.
635  */
636
637 static void deallocate_threshold_block(unsigned int cpu,
638                                                  unsigned int bank)
639 {
640         struct threshold_block *pos = NULL;
641         struct threshold_block *tmp = NULL;
642         struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
643
644         if (!head)
645                 return;
646
647         list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
648                 kobject_put(&pos->kobj);
649                 list_del(&pos->miscj);
650                 kfree(pos);
651         }
652
653         kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
654         per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
655 }
656
657 static void threshold_remove_bank(unsigned int cpu, int bank)
658 {
659         struct threshold_bank *b;
660         char name[32];
661         int i = 0;
662
663         b = per_cpu(threshold_banks, cpu)[bank];
664         if (!b)
665                 return;
666         if (!b->blocks)
667                 goto free_out;
668
669         sprintf(name, "threshold_bank%i", bank);
670
671 #ifdef CONFIG_SMP
672         /* sibling symlink */
673         if (shared_bank[bank] && b->blocks->cpu != cpu) {
674                 sysfs_remove_link(&per_cpu(mce_dev, cpu).kobj, name);
675                 per_cpu(threshold_banks, cpu)[bank] = NULL;
676
677                 return;
678         }
679 #endif
680
681         /* remove all sibling symlinks before unregistering */
682         for_each_cpu(i, b->cpus) {
683                 if (i == cpu)
684                         continue;
685
686                 sysfs_remove_link(&per_cpu(mce_dev, i).kobj, name);
687                 per_cpu(threshold_banks, i)[bank] = NULL;
688         }
689
690         deallocate_threshold_block(cpu, bank);
691
692 free_out:
693         kobject_del(b->kobj);
694         kobject_put(b->kobj);
695         free_cpumask_var(b->cpus);
696         kfree(b);
697         per_cpu(threshold_banks, cpu)[bank] = NULL;
698 }
699
700 static void threshold_remove_device(unsigned int cpu)
701 {
702         unsigned int bank;
703
704         for (bank = 0; bank < NR_BANKS; ++bank) {
705                 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
706                         continue;
707                 threshold_remove_bank(cpu, bank);
708         }
709 }
710
711 /* get notified when a cpu comes on/off */
712 static void __cpuinit
713 amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
714 {
715         switch (action) {
716         case CPU_ONLINE:
717         case CPU_ONLINE_FROZEN:
718                 threshold_create_device(cpu);
719                 break;
720         case CPU_DEAD:
721         case CPU_DEAD_FROZEN:
722                 threshold_remove_device(cpu);
723                 break;
724         default:
725                 break;
726         }
727 }
728
729 static __init int threshold_init_device(void)
730 {
731         unsigned lcpu = 0;
732
733         /* to hit CPUs online before the notifier is up */
734         for_each_online_cpu(lcpu) {
735                 int err = threshold_create_device(lcpu);
736
737                 if (err)
738                         return err;
739         }
740         threshold_cpu_callback = amd_64_threshold_cpu_callback;
741
742         return 0;
743 }
744 device_initcall(threshold_init_device);