x86: Fix boot failures on older AMD CPU's
[pandora-kernel.git] / arch / x86 / kernel / cpu / perfctr-watchdog.c
1 /*
2  * local apic based NMI watchdog for various CPUs.
3  *
4  * This file also handles reservation of performance counters for coordination
5  * with other users (like oprofile).
6  *
7  * Note that these events normally don't tick when the CPU idles. This means
8  * the frequency varies with CPU load.
9  *
10  * Original code for K7/P6 written by Keith Owens
11  *
12  */
13
14 #include <linux/percpu.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/bitops.h>
18 #include <linux/smp.h>
19 #include <asm/nmi.h>
20 #include <linux/kprobes.h>
21
22 #include <asm/apic.h>
23 #include <asm/perf_event.h>
24
25 /*
26  * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
27  * offset from MSR_P4_BSU_ESCR0.
28  *
29  * It will be the max for all platforms (for now)
30  */
31 #define NMI_MAX_COUNTER_BITS 66
32
33 /*
34  * perfctr_nmi_owner tracks the ownership of the perfctr registers:
35  * evtsel_nmi_owner tracks the ownership of the event selection
36  * - different performance counters/ event selection may be reserved for
37  *   different subsystems this reservation system just tries to coordinate
38  *   things a little
39  */
40 static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
41 static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
42
43 /* converts an msr to an appropriate reservation bit */
44 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
45 {
46         /* returns the bit offset of the performance counter register */
47         switch (boot_cpu_data.x86_vendor) {
48         case X86_VENDOR_AMD:
49                 if (msr >= MSR_F15H_PERF_CTR)
50                         return (msr - MSR_F15H_PERF_CTR) >> 1;
51                 return msr - MSR_K7_PERFCTR0;
52         case X86_VENDOR_INTEL:
53                 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
54                         return msr - MSR_ARCH_PERFMON_PERFCTR0;
55
56                 switch (boot_cpu_data.x86) {
57                 case 6:
58                         return msr - MSR_P6_PERFCTR0;
59                 case 15:
60                         return msr - MSR_P4_BPU_PERFCTR0;
61                 }
62         }
63         return 0;
64 }
65
66 /*
67  * converts an msr to an appropriate reservation bit
68  * returns the bit offset of the event selection register
69  */
70 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
71 {
72         /* returns the bit offset of the event selection register */
73         switch (boot_cpu_data.x86_vendor) {
74         case X86_VENDOR_AMD:
75                 if (msr >= MSR_F15H_PERF_CTL)
76                         return (msr - MSR_F15H_PERF_CTL) >> 1;
77                 return msr - MSR_K7_EVNTSEL0;
78         case X86_VENDOR_INTEL:
79                 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
80                         return msr - MSR_ARCH_PERFMON_EVENTSEL0;
81
82                 switch (boot_cpu_data.x86) {
83                 case 6:
84                         return msr - MSR_P6_EVNTSEL0;
85                 case 15:
86                         return msr - MSR_P4_BSU_ESCR0;
87                 }
88         }
89         return 0;
90
91 }
92
93 /* checks for a bit availability (hack for oprofile) */
94 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
95 {
96         BUG_ON(counter > NMI_MAX_COUNTER_BITS);
97
98         return !test_bit(counter, perfctr_nmi_owner);
99 }
100 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
101
102 int reserve_perfctr_nmi(unsigned int msr)
103 {
104         unsigned int counter;
105
106         counter = nmi_perfctr_msr_to_bit(msr);
107         /* register not managed by the allocator? */
108         if (counter > NMI_MAX_COUNTER_BITS)
109                 return 1;
110
111         if (!test_and_set_bit(counter, perfctr_nmi_owner))
112                 return 1;
113         return 0;
114 }
115 EXPORT_SYMBOL(reserve_perfctr_nmi);
116
117 void release_perfctr_nmi(unsigned int msr)
118 {
119         unsigned int counter;
120
121         counter = nmi_perfctr_msr_to_bit(msr);
122         /* register not managed by the allocator? */
123         if (counter > NMI_MAX_COUNTER_BITS)
124                 return;
125
126         clear_bit(counter, perfctr_nmi_owner);
127 }
128 EXPORT_SYMBOL(release_perfctr_nmi);
129
130 int reserve_evntsel_nmi(unsigned int msr)
131 {
132         unsigned int counter;
133
134         counter = nmi_evntsel_msr_to_bit(msr);
135         /* register not managed by the allocator? */
136         if (counter > NMI_MAX_COUNTER_BITS)
137                 return 1;
138
139         if (!test_and_set_bit(counter, evntsel_nmi_owner))
140                 return 1;
141         return 0;
142 }
143 EXPORT_SYMBOL(reserve_evntsel_nmi);
144
145 void release_evntsel_nmi(unsigned int msr)
146 {
147         unsigned int counter;
148
149         counter = nmi_evntsel_msr_to_bit(msr);
150         /* register not managed by the allocator? */
151         if (counter > NMI_MAX_COUNTER_BITS)
152                 return;
153
154         clear_bit(counter, evntsel_nmi_owner);
155 }
156 EXPORT_SYMBOL(release_evntsel_nmi);