Merge branch 'stable-3.2' into pandora-3.2
[pandora-kernel.git] / arch / arm / kernel / perf_event_v7.c
1 /*
2  * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
3  *
4  * ARMv7 support: Jean Pihet <jpihet@mvista.com>
5  * 2010 (c) MontaVista Software, LLC.
6  *
7  * Copied from ARMv6 code, with the low level code inspired
8  *  by the ARMv7 Oprofile code.
9  *
10  * Cortex-A8 has up to 4 configurable performance counters and
11  *  a single cycle counter.
12  * Cortex-A9 has up to 31 configurable performance counters and
13  *  a single cycle counter.
14  *
15  * All counters can be enabled/disabled and IRQ masked separately. The cycle
16  *  counter and all 4 performance counters together can be reset separately.
17  */
18
19 #ifdef CONFIG_CPU_V7
20
21 static struct arm_pmu armv7pmu;
22
23 /*
24  * Common ARMv7 event types
25  *
26  * Note: An implementation may not be able to count all of these events
27  * but the encodings are considered to be `reserved' in the case that
28  * they are not available.
29  */
30 enum armv7_perf_types {
31         ARMV7_PERFCTR_PMNC_SW_INCR                      = 0x00,
32         ARMV7_PERFCTR_L1_ICACHE_REFILL                  = 0x01,
33         ARMV7_PERFCTR_ITLB_REFILL                       = 0x02,
34         ARMV7_PERFCTR_L1_DCACHE_REFILL                  = 0x03,
35         ARMV7_PERFCTR_L1_DCACHE_ACCESS                  = 0x04,
36         ARMV7_PERFCTR_DTLB_REFILL                       = 0x05,
37         ARMV7_PERFCTR_MEM_READ                          = 0x06,
38         ARMV7_PERFCTR_MEM_WRITE                         = 0x07,
39         ARMV7_PERFCTR_INSTR_EXECUTED                    = 0x08,
40         ARMV7_PERFCTR_EXC_TAKEN                         = 0x09,
41         ARMV7_PERFCTR_EXC_EXECUTED                      = 0x0A,
42         ARMV7_PERFCTR_CID_WRITE                         = 0x0B,
43
44         /*
45          * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
46          * It counts:
47          *  - all (taken) branch instructions,
48          *  - instructions that explicitly write the PC,
49          *  - exception generating instructions.
50          */
51         ARMV7_PERFCTR_PC_WRITE                          = 0x0C,
52         ARMV7_PERFCTR_PC_IMM_BRANCH                     = 0x0D,
53         ARMV7_PERFCTR_PC_PROC_RETURN                    = 0x0E,
54         ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS              = 0x0F,
55         ARMV7_PERFCTR_PC_BRANCH_MIS_PRED                = 0x10,
56         ARMV7_PERFCTR_CLOCK_CYCLES                      = 0x11,
57         ARMV7_PERFCTR_PC_BRANCH_PRED                    = 0x12,
58
59         /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
60         ARMV7_PERFCTR_MEM_ACCESS                        = 0x13,
61         ARMV7_PERFCTR_L1_ICACHE_ACCESS                  = 0x14,
62         ARMV7_PERFCTR_L1_DCACHE_WB                      = 0x15,
63         ARMV7_PERFCTR_L2_CACHE_ACCESS                   = 0x16,
64         ARMV7_PERFCTR_L2_CACHE_REFILL                   = 0x17,
65         ARMV7_PERFCTR_L2_CACHE_WB                       = 0x18,
66         ARMV7_PERFCTR_BUS_ACCESS                        = 0x19,
67         ARMV7_PERFCTR_MEM_ERROR                         = 0x1A,
68         ARMV7_PERFCTR_INSTR_SPEC                        = 0x1B,
69         ARMV7_PERFCTR_TTBR_WRITE                        = 0x1C,
70         ARMV7_PERFCTR_BUS_CYCLES                        = 0x1D,
71
72         ARMV7_PERFCTR_CPU_CYCLES                        = 0xFF
73 };
74
75 /* ARMv7 Cortex-A8 specific event types */
76 enum armv7_a8_perf_types {
77         ARMV7_A8_PERFCTR_L2_CACHE_ACCESS                = 0x43,
78         ARMV7_A8_PERFCTR_L2_CACHE_REFILL                = 0x44,
79         ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS               = 0x50,
80         ARMV7_A8_PERFCTR_STALL_ISIDE                    = 0x56,
81 };
82
83 /* ARMv7 Cortex-A9 specific event types */
84 enum armv7_a9_perf_types {
85         ARMV7_A9_PERFCTR_INSTR_CORE_RENAME              = 0x68,
86         ARMV7_A9_PERFCTR_STALL_ICACHE                   = 0x60,
87         ARMV7_A9_PERFCTR_STALL_DISPATCH                 = 0x66,
88 };
89
90 /* ARMv7 Cortex-A5 specific event types */
91 enum armv7_a5_perf_types {
92         ARMV7_A5_PERFCTR_PREFETCH_LINEFILL              = 0xc2,
93         ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP         = 0xc3,
94 };
95
96 /* ARMv7 Cortex-A15 specific event types */
97 enum armv7_a15_perf_types {
98         ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ         = 0x40,
99         ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE        = 0x41,
100         ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ         = 0x42,
101         ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE        = 0x43,
102
103         ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ           = 0x4C,
104         ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE          = 0x4D,
105
106         ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ          = 0x50,
107         ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE         = 0x51,
108         ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ          = 0x52,
109         ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE         = 0x53,
110
111         ARMV7_A15_PERFCTR_PC_WRITE_SPEC                 = 0x76,
112 };
113
114 /*
115  * Cortex-A8 HW events mapping
116  *
117  * The hardware events that we support. We do support cache operations but
118  * we have harvard caches and no way to combine instruction and data
119  * accesses/misses in hardware.
120  */
121 static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
122         [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
123         [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_PERFCTR_INSTR_EXECUTED,
124         [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
125         [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
126         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_PERFCTR_PC_WRITE,
127         [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
128         [PERF_COUNT_HW_BUS_CYCLES]              = HW_OP_UNSUPPORTED,
129         [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
130         [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = HW_OP_UNSUPPORTED,
131 };
132
133 static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
134                                           [PERF_COUNT_HW_CACHE_OP_MAX]
135                                           [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
136         [C(L1D)] = {
137                 /*
138                  * The performance counters don't differentiate between read
139                  * and write accesses/misses so this isn't strictly correct,
140                  * but it's the best we can do. Writes and reads get
141                  * combined.
142                  */
143                 [C(OP_READ)] = {
144                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
145                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
146                 },
147                 [C(OP_WRITE)] = {
148                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
149                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
150                 },
151                 [C(OP_PREFETCH)] = {
152                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
153                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
154                 },
155         },
156         [C(L1I)] = {
157                 [C(OP_READ)] = {
158                         [C(RESULT_ACCESS)]      = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
159                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_ICACHE_REFILL,
160                 },
161                 [C(OP_WRITE)] = {
162                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
163                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
164                 },
165                 [C(OP_PREFETCH)] = {
166                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
167                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
168                 },
169         },
170         [C(LL)] = {
171                 [C(OP_READ)] = {
172                         [C(RESULT_ACCESS)]      = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
173                         [C(RESULT_MISS)]        = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
174                 },
175                 [C(OP_WRITE)] = {
176                         [C(RESULT_ACCESS)]      = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
177                         [C(RESULT_MISS)]        = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
178                 },
179                 [C(OP_PREFETCH)] = {
180                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
181                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
182                 },
183         },
184         [C(DTLB)] = {
185                 [C(OP_READ)] = {
186                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
187                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
188                 },
189                 [C(OP_WRITE)] = {
190                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
191                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
192                 },
193                 [C(OP_PREFETCH)] = {
194                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
195                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
196                 },
197         },
198         [C(ITLB)] = {
199                 [C(OP_READ)] = {
200                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
201                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_REFILL,
202                 },
203                 [C(OP_WRITE)] = {
204                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
205                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_REFILL,
206                 },
207                 [C(OP_PREFETCH)] = {
208                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
209                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
210                 },
211         },
212         [C(BPU)] = {
213                 [C(OP_READ)] = {
214                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
215                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
216                 },
217                 [C(OP_WRITE)] = {
218                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
219                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
220                 },
221                 [C(OP_PREFETCH)] = {
222                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
223                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
224                 },
225         },
226         [C(NODE)] = {
227                 [C(OP_READ)] = {
228                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
229                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
230                 },
231                 [C(OP_WRITE)] = {
232                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
233                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
234                 },
235                 [C(OP_PREFETCH)] = {
236                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
237                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
238                 },
239         },
240 };
241
242 /*
243  * Cortex-A9 HW events mapping
244  */
245 static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
246         [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
247         [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
248         [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
249         [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
250         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_PERFCTR_PC_WRITE,
251         [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
252         [PERF_COUNT_HW_BUS_CYCLES]              = HW_OP_UNSUPPORTED,
253         [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
254         [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = ARMV7_A9_PERFCTR_STALL_DISPATCH,
255 };
256
257 static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
258                                           [PERF_COUNT_HW_CACHE_OP_MAX]
259                                           [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
260         [C(L1D)] = {
261                 /*
262                  * The performance counters don't differentiate between read
263                  * and write accesses/misses so this isn't strictly correct,
264                  * but it's the best we can do. Writes and reads get
265                  * combined.
266                  */
267                 [C(OP_READ)] = {
268                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
269                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
270                 },
271                 [C(OP_WRITE)] = {
272                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
273                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
274                 },
275                 [C(OP_PREFETCH)] = {
276                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
277                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
278                 },
279         },
280         [C(L1I)] = {
281                 [C(OP_READ)] = {
282                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
283                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_ICACHE_REFILL,
284                 },
285                 [C(OP_WRITE)] = {
286                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
287                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
288                 },
289                 [C(OP_PREFETCH)] = {
290                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
291                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
292                 },
293         },
294         [C(LL)] = {
295                 [C(OP_READ)] = {
296                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
297                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
298                 },
299                 [C(OP_WRITE)] = {
300                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
301                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
302                 },
303                 [C(OP_PREFETCH)] = {
304                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
305                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
306                 },
307         },
308         [C(DTLB)] = {
309                 [C(OP_READ)] = {
310                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
311                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
312                 },
313                 [C(OP_WRITE)] = {
314                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
315                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
316                 },
317                 [C(OP_PREFETCH)] = {
318                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
319                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
320                 },
321         },
322         [C(ITLB)] = {
323                 [C(OP_READ)] = {
324                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
325                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_REFILL,
326                 },
327                 [C(OP_WRITE)] = {
328                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
329                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_REFILL,
330                 },
331                 [C(OP_PREFETCH)] = {
332                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
333                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
334                 },
335         },
336         [C(BPU)] = {
337                 [C(OP_READ)] = {
338                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
339                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
340                 },
341                 [C(OP_WRITE)] = {
342                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
343                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
344                 },
345                 [C(OP_PREFETCH)] = {
346                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
347                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
348                 },
349         },
350         [C(NODE)] = {
351                 [C(OP_READ)] = {
352                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
353                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
354                 },
355                 [C(OP_WRITE)] = {
356                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
357                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
358                 },
359                 [C(OP_PREFETCH)] = {
360                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
361                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
362                 },
363         },
364 };
365
366 /*
367  * Cortex-A5 HW events mapping
368  */
369 static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
370         [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
371         [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_PERFCTR_INSTR_EXECUTED,
372         [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
373         [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
374         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_PERFCTR_PC_WRITE,
375         [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
376         [PERF_COUNT_HW_BUS_CYCLES]              = HW_OP_UNSUPPORTED,
377         [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
378         [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = HW_OP_UNSUPPORTED,
379 };
380
381 static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
382                                         [PERF_COUNT_HW_CACHE_OP_MAX]
383                                         [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
384         [C(L1D)] = {
385                 [C(OP_READ)] = {
386                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
387                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
388                 },
389                 [C(OP_WRITE)] = {
390                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
391                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
392                 },
393                 [C(OP_PREFETCH)] = {
394                         [C(RESULT_ACCESS)]      = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
395                         [C(RESULT_MISS)]        = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
396                 },
397         },
398         [C(L1I)] = {
399                 [C(OP_READ)] = {
400                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
401                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_ICACHE_REFILL,
402                 },
403                 [C(OP_WRITE)] = {
404                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
405                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
406                 },
407                 /*
408                  * The prefetch counters don't differentiate between the I
409                  * side and the D side.
410                  */
411                 [C(OP_PREFETCH)] = {
412                         [C(RESULT_ACCESS)]      = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
413                         [C(RESULT_MISS)]        = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
414                 },
415         },
416         [C(LL)] = {
417                 [C(OP_READ)] = {
418                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
419                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
420                 },
421                 [C(OP_WRITE)] = {
422                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
423                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
424                 },
425                 [C(OP_PREFETCH)] = {
426                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
427                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
428                 },
429         },
430         [C(DTLB)] = {
431                 [C(OP_READ)] = {
432                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
433                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
434                 },
435                 [C(OP_WRITE)] = {
436                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
437                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
438                 },
439                 [C(OP_PREFETCH)] = {
440                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
441                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
442                 },
443         },
444         [C(ITLB)] = {
445                 [C(OP_READ)] = {
446                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
447                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_REFILL,
448                 },
449                 [C(OP_WRITE)] = {
450                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
451                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_REFILL,
452                 },
453                 [C(OP_PREFETCH)] = {
454                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
455                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
456                 },
457         },
458         [C(BPU)] = {
459                 [C(OP_READ)] = {
460                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
461                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
462                 },
463                 [C(OP_WRITE)] = {
464                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
465                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
466                 },
467                 [C(OP_PREFETCH)] = {
468                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
469                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
470                 },
471         },
472         [C(NODE)] = {
473                 [C(OP_READ)] = {
474                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
475                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
476                 },
477                 [C(OP_WRITE)] = {
478                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
479                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
480                 },
481                 [C(OP_PREFETCH)] = {
482                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
483                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
484                 },
485         },
486 };
487
488 /*
489  * Cortex-A15 HW events mapping
490  */
491 static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
492         [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
493         [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_PERFCTR_INSTR_EXECUTED,
494         [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
495         [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
496         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
497         [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
498         [PERF_COUNT_HW_BUS_CYCLES]              = ARMV7_PERFCTR_BUS_CYCLES,
499         [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
500         [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = HW_OP_UNSUPPORTED,
501 };
502
503 static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
504                                         [PERF_COUNT_HW_CACHE_OP_MAX]
505                                         [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
506         [C(L1D)] = {
507                 [C(OP_READ)] = {
508                         [C(RESULT_ACCESS)]      = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
509                         [C(RESULT_MISS)]        = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
510                 },
511                 [C(OP_WRITE)] = {
512                         [C(RESULT_ACCESS)]      = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
513                         [C(RESULT_MISS)]        = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
514                 },
515                 [C(OP_PREFETCH)] = {
516                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
517                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
518                 },
519         },
520         [C(L1I)] = {
521                 /*
522                  * Not all performance counters differentiate between read
523                  * and write accesses/misses so we're not always strictly
524                  * correct, but it's the best we can do. Writes and reads get
525                  * combined in these cases.
526                  */
527                 [C(OP_READ)] = {
528                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
529                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_ICACHE_REFILL,
530                 },
531                 [C(OP_WRITE)] = {
532                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
533                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
534                 },
535                 [C(OP_PREFETCH)] = {
536                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
537                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
538                 },
539         },
540         [C(LL)] = {
541                 [C(OP_READ)] = {
542                         [C(RESULT_ACCESS)]      = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
543                         [C(RESULT_MISS)]        = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
544                 },
545                 [C(OP_WRITE)] = {
546                         [C(RESULT_ACCESS)]      = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
547                         [C(RESULT_MISS)]        = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
548                 },
549                 [C(OP_PREFETCH)] = {
550                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
551                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
552                 },
553         },
554         [C(DTLB)] = {
555                 [C(OP_READ)] = {
556                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
557                         [C(RESULT_MISS)]        = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
558                 },
559                 [C(OP_WRITE)] = {
560                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
561                         [C(RESULT_MISS)]        = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
562                 },
563                 [C(OP_PREFETCH)] = {
564                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
565                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
566                 },
567         },
568         [C(ITLB)] = {
569                 [C(OP_READ)] = {
570                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
571                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_REFILL,
572                 },
573                 [C(OP_WRITE)] = {
574                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
575                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_REFILL,
576                 },
577                 [C(OP_PREFETCH)] = {
578                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
579                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
580                 },
581         },
582         [C(BPU)] = {
583                 [C(OP_READ)] = {
584                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
585                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
586                 },
587                 [C(OP_WRITE)] = {
588                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
589                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
590                 },
591                 [C(OP_PREFETCH)] = {
592                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
593                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
594                 },
595         },
596         [C(NODE)] = {
597                 [C(OP_READ)] = {
598                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
599                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
600                 },
601                 [C(OP_WRITE)] = {
602                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
603                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
604                 },
605                 [C(OP_PREFETCH)] = {
606                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
607                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
608                 },
609         },
610 };
611
612 /*
613  * Perf Events' indices
614  */
615 #define ARMV7_IDX_CYCLE_COUNTER 0
616 #define ARMV7_IDX_COUNTER0      1
617 #define ARMV7_IDX_COUNTER_LAST  (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
618
619 #define ARMV7_MAX_COUNTERS      32
620 #define ARMV7_COUNTER_MASK      (ARMV7_MAX_COUNTERS - 1)
621
622 /*
623  * ARMv7 low level PMNC access
624  */
625
626 /*
627  * Perf Event to low level counters mapping
628  */
629 #define ARMV7_IDX_TO_COUNTER(x) \
630         (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
631
632 /*
633  * Per-CPU PMNC: config reg
634  */
635 #define ARMV7_PMNC_E            (1 << 0) /* Enable all counters */
636 #define ARMV7_PMNC_P            (1 << 1) /* Reset all counters */
637 #define ARMV7_PMNC_C            (1 << 2) /* Cycle counter reset */
638 #define ARMV7_PMNC_D            (1 << 3) /* CCNT counts every 64th cpu cycle */
639 #define ARMV7_PMNC_X            (1 << 4) /* Export to ETM */
640 #define ARMV7_PMNC_DP           (1 << 5) /* Disable CCNT if non-invasive debug*/
641 #define ARMV7_PMNC_N_SHIFT      11       /* Number of counters supported */
642 #define ARMV7_PMNC_N_MASK       0x1f
643 #define ARMV7_PMNC_MASK         0x3f     /* Mask for writable bits */
644
645 /*
646  * FLAG: counters overflow flag status reg
647  */
648 #define ARMV7_FLAG_MASK         0xffffffff      /* Mask for writable bits */
649 #define ARMV7_OVERFLOWED_MASK   ARMV7_FLAG_MASK
650
651 /*
652  * PMXEVTYPER: Event selection reg
653  */
654 #define ARMV7_EVTYPE_MASK       0xc80000ff      /* Mask for writable bits */
655 #define ARMV7_EVTYPE_EVENT      0xff            /* Mask for EVENT bits */
656
657 /*
658  * Event filters for PMUv2
659  */
660 #define ARMV7_EXCLUDE_PL1       (1 << 31)
661 #define ARMV7_EXCLUDE_USER      (1 << 30)
662 #define ARMV7_INCLUDE_HYP       (1 << 27)
663
664 static inline u32 armv7_pmnc_read(void)
665 {
666         u32 val;
667         asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
668         return val;
669 }
670
671 static inline void armv7_pmnc_write(u32 val)
672 {
673         val &= ARMV7_PMNC_MASK;
674         isb();
675         asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
676 }
677
678 static inline int armv7_pmnc_has_overflowed(u32 pmnc)
679 {
680         return pmnc & ARMV7_OVERFLOWED_MASK;
681 }
682
683 static inline int armv7_pmnc_counter_valid(int idx)
684 {
685         return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST;
686 }
687
688 static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
689 {
690         int ret = 0;
691         u32 counter;
692
693         if (!armv7_pmnc_counter_valid(idx)) {
694                 pr_err("CPU%u checking wrong counter %d overflow status\n",
695                         smp_processor_id(), idx);
696         } else {
697                 counter = ARMV7_IDX_TO_COUNTER(idx);
698                 ret = pmnc & BIT(counter);
699         }
700
701         return ret;
702 }
703
704 static inline int armv7_pmnc_select_counter(int idx)
705 {
706         u32 counter;
707
708         if (!armv7_pmnc_counter_valid(idx)) {
709                 pr_err("CPU%u selecting wrong PMNC counter %d\n",
710                         smp_processor_id(), idx);
711                 return -EINVAL;
712         }
713
714         counter = ARMV7_IDX_TO_COUNTER(idx);
715         asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
716         isb();
717
718         return idx;
719 }
720
721 static inline u32 armv7pmu_read_counter(int idx)
722 {
723         u32 value = 0;
724
725         if (!armv7_pmnc_counter_valid(idx))
726                 pr_err("CPU%u reading wrong counter %d\n",
727                         smp_processor_id(), idx);
728         else if (idx == ARMV7_IDX_CYCLE_COUNTER)
729                 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
730         else if (armv7_pmnc_select_counter(idx) == idx)
731                 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
732
733         return value;
734 }
735
736 static inline void armv7pmu_write_counter(int idx, u32 value)
737 {
738         if (!armv7_pmnc_counter_valid(idx))
739                 pr_err("CPU%u writing wrong counter %d\n",
740                         smp_processor_id(), idx);
741         else if (idx == ARMV7_IDX_CYCLE_COUNTER)
742                 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
743         else if (armv7_pmnc_select_counter(idx) == idx)
744                 asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
745 }
746
747 static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
748 {
749         if (armv7_pmnc_select_counter(idx) == idx) {
750                 val &= ARMV7_EVTYPE_MASK;
751                 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
752         }
753 }
754
755 static inline int armv7_pmnc_enable_counter(int idx)
756 {
757         u32 counter;
758
759         if (!armv7_pmnc_counter_valid(idx)) {
760                 pr_err("CPU%u enabling wrong PMNC counter %d\n",
761                         smp_processor_id(), idx);
762                 return -EINVAL;
763         }
764
765         counter = ARMV7_IDX_TO_COUNTER(idx);
766         asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
767         return idx;
768 }
769
770 static inline int armv7_pmnc_disable_counter(int idx)
771 {
772         u32 counter;
773
774         if (!armv7_pmnc_counter_valid(idx)) {
775                 pr_err("CPU%u disabling wrong PMNC counter %d\n",
776                         smp_processor_id(), idx);
777                 return -EINVAL;
778         }
779
780         counter = ARMV7_IDX_TO_COUNTER(idx);
781         asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
782         return idx;
783 }
784
785 static inline int armv7_pmnc_enable_intens(int idx)
786 {
787         u32 counter;
788
789         if (!armv7_pmnc_counter_valid(idx)) {
790                 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
791                         smp_processor_id(), idx);
792                 return -EINVAL;
793         }
794
795         counter = ARMV7_IDX_TO_COUNTER(idx);
796         asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
797         return idx;
798 }
799
800 static inline int armv7_pmnc_disable_intens(int idx)
801 {
802         u32 counter;
803
804         if (!armv7_pmnc_counter_valid(idx)) {
805                 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
806                         smp_processor_id(), idx);
807                 return -EINVAL;
808         }
809
810         counter = ARMV7_IDX_TO_COUNTER(idx);
811         asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
812         isb();
813         /* Clear the overflow flag in case an interrupt is pending. */
814         asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
815         isb();
816
817         return idx;
818 }
819
820 static inline u32 armv7_pmnc_getreset_flags(void)
821 {
822         u32 val;
823
824         /* Read */
825         asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
826
827         /* Write to clear flags */
828         val &= ARMV7_FLAG_MASK;
829         asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
830
831         return val;
832 }
833
834 #ifdef DEBUG
835 static void armv7_pmnc_dump_regs(void)
836 {
837         u32 val;
838         unsigned int cnt;
839
840         printk(KERN_INFO "PMNC registers dump:\n");
841
842         asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
843         printk(KERN_INFO "PMNC  =0x%08x\n", val);
844
845         asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
846         printk(KERN_INFO "CNTENS=0x%08x\n", val);
847
848         asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
849         printk(KERN_INFO "INTENS=0x%08x\n", val);
850
851         asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
852         printk(KERN_INFO "FLAGS =0x%08x\n", val);
853
854         asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
855         printk(KERN_INFO "SELECT=0x%08x\n", val);
856
857         asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
858         printk(KERN_INFO "CCNT  =0x%08x\n", val);
859
860         for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) {
861                 armv7_pmnc_select_counter(cnt);
862                 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
863                 printk(KERN_INFO "CNT[%d] count =0x%08x\n",
864                         ARMV7_IDX_TO_COUNTER(cnt), val);
865                 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
866                 printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
867                         ARMV7_IDX_TO_COUNTER(cnt), val);
868         }
869 }
870 #endif
871
872 static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
873 {
874         unsigned long flags;
875         struct pmu_hw_events *events = cpu_pmu->get_hw_events();
876
877         /*
878          * Enable counter and interrupt, and set the counter to count
879          * the event that we're interested in.
880          */
881         raw_spin_lock_irqsave(&events->pmu_lock, flags);
882
883         /*
884          * Disable counter
885          */
886         armv7_pmnc_disable_counter(idx);
887
888         /*
889          * Set event (if destined for PMNx counters)
890          * We only need to set the event for the cycle counter if we
891          * have the ability to perform event filtering.
892          */
893         if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
894                 armv7_pmnc_write_evtsel(idx, hwc->config_base);
895
896         /*
897          * Enable interrupt for this counter
898          */
899         armv7_pmnc_enable_intens(idx);
900
901         /*
902          * Enable counter
903          */
904         armv7_pmnc_enable_counter(idx);
905
906         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
907 }
908
909 static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
910 {
911         unsigned long flags;
912         struct pmu_hw_events *events = cpu_pmu->get_hw_events();
913
914         /*
915          * Disable counter and interrupt
916          */
917         raw_spin_lock_irqsave(&events->pmu_lock, flags);
918
919         /*
920          * Disable counter
921          */
922         armv7_pmnc_disable_counter(idx);
923
924         /*
925          * Disable interrupt for this counter
926          */
927         armv7_pmnc_disable_intens(idx);
928
929         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
930 }
931
932 static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
933 {
934         u32 pmnc;
935         struct perf_sample_data data;
936         struct pmu_hw_events *cpuc;
937         struct pt_regs *regs;
938         int idx;
939
940         /*
941          * Get and reset the IRQ flags
942          */
943         pmnc = armv7_pmnc_getreset_flags();
944
945         /*
946          * Did an overflow occur?
947          */
948         if (!armv7_pmnc_has_overflowed(pmnc))
949                 return IRQ_NONE;
950
951         /*
952          * Handle the counter(s) overflow(s)
953          */
954         regs = get_irq_regs();
955
956         perf_sample_data_init(&data, 0);
957
958         cpuc = &__get_cpu_var(cpu_hw_events);
959         for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
960                 struct perf_event *event = cpuc->events[idx];
961                 struct hw_perf_event *hwc;
962
963                 /* Ignore if we don't have an event. */
964                 if (!event)
965                         continue;
966
967                 /*
968                  * We have a single interrupt for all counters. Check that
969                  * each counter has overflowed before we process it.
970                  */
971                 if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
972                         continue;
973
974                 hwc = &event->hw;
975                 armpmu_event_update(event, hwc, idx);
976                 data.period = event->hw.last_period;
977                 if (!armpmu_event_set_period(event, hwc, idx))
978                         continue;
979
980                 if (perf_event_overflow(event, &data, regs))
981                         cpu_pmu->disable(hwc, idx);
982         }
983
984         /*
985          * Handle the pending perf events.
986          *
987          * Note: this call *must* be run with interrupts disabled. For
988          * platforms that can have the PMU interrupts raised as an NMI, this
989          * will not work.
990          */
991         irq_work_run();
992
993         return IRQ_HANDLED;
994 }
995
996 static void armv7pmu_start(void)
997 {
998         unsigned long flags;
999         struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1000
1001         raw_spin_lock_irqsave(&events->pmu_lock, flags);
1002         /* Enable all counters */
1003         armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1004         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1005 }
1006
1007 static void armv7pmu_stop(void)
1008 {
1009         unsigned long flags;
1010         struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1011
1012         raw_spin_lock_irqsave(&events->pmu_lock, flags);
1013         /* Disable all counters */
1014         armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1015         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1016 }
1017
1018 static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1019                                   struct hw_perf_event *event)
1020 {
1021         int idx;
1022         unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT;
1023
1024         /* Always place a cycle counter into the cycle counter. */
1025         if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1026                 if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
1027                         return -EAGAIN;
1028
1029                 return ARMV7_IDX_CYCLE_COUNTER;
1030         }
1031
1032         /*
1033          * For anything other than a cycle counter, try and use
1034          * the events counters
1035          */
1036         for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1037                 if (!test_and_set_bit(idx, cpuc->used_mask))
1038                         return idx;
1039         }
1040
1041         /* The counters are all in use. */
1042         return -EAGAIN;
1043 }
1044
1045 /*
1046  * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1047  */
1048 static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1049                                      struct perf_event_attr *attr)
1050 {
1051         unsigned long config_base = 0;
1052
1053         if (attr->exclude_idle)
1054                 return -EPERM;
1055         if (attr->exclude_user)
1056                 config_base |= ARMV7_EXCLUDE_USER;
1057         if (attr->exclude_kernel)
1058                 config_base |= ARMV7_EXCLUDE_PL1;
1059         if (!attr->exclude_hv)
1060                 config_base |= ARMV7_INCLUDE_HYP;
1061
1062         /*
1063          * Install the filter into config_base as this is used to
1064          * construct the event type.
1065          */
1066         event->config_base = config_base;
1067
1068         return 0;
1069 }
1070
1071 static void armv7pmu_reset(void *info)
1072 {
1073         u32 idx, nb_cnt = cpu_pmu->num_events;
1074
1075         /* The counter and interrupt enable registers are unknown at reset. */
1076         for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
1077                 armv7pmu_disable_event(NULL, idx);
1078
1079         /* Initialize & Reset PMNC: C and P bits */
1080         armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1081 }
1082
1083 static int armv7_a8_map_event(struct perf_event *event)
1084 {
1085         return map_cpu_event(event, &armv7_a8_perf_map,
1086                                 &armv7_a8_perf_cache_map, 0xFF);
1087 }
1088
1089 static int armv7_a9_map_event(struct perf_event *event)
1090 {
1091         return map_cpu_event(event, &armv7_a9_perf_map,
1092                                 &armv7_a9_perf_cache_map, 0xFF);
1093 }
1094
1095 static int armv7_a5_map_event(struct perf_event *event)
1096 {
1097         return map_cpu_event(event, &armv7_a5_perf_map,
1098                                 &armv7_a5_perf_cache_map, 0xFF);
1099 }
1100
1101 static int armv7_a15_map_event(struct perf_event *event)
1102 {
1103         return map_cpu_event(event, &armv7_a15_perf_map,
1104                                 &armv7_a15_perf_cache_map, 0xFF);
1105 }
1106
1107 static struct arm_pmu armv7pmu = {
1108         .handle_irq             = armv7pmu_handle_irq,
1109         .enable                 = armv7pmu_enable_event,
1110         .disable                = armv7pmu_disable_event,
1111         .read_counter           = armv7pmu_read_counter,
1112         .write_counter          = armv7pmu_write_counter,
1113         .get_event_idx          = armv7pmu_get_event_idx,
1114         .start                  = armv7pmu_start,
1115         .stop                   = armv7pmu_stop,
1116         .reset                  = armv7pmu_reset,
1117         .max_period             = (1LLU << 32) - 1,
1118 };
1119
1120 static u32 __init armv7_read_num_pmnc_events(void)
1121 {
1122         u32 nb_cnt;
1123
1124         /* Read the nb of CNTx counters supported from PMNC */
1125         nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1126
1127         /* Add the CPU cycles counter and return */
1128         return nb_cnt + 1;
1129 }
1130
1131 static struct arm_pmu *__init armv7_a8_pmu_init(void)
1132 {
1133         armv7pmu.id             = ARM_PERF_PMU_ID_CA8;
1134         armv7pmu.name           = "ARMv7 Cortex-A8";
1135         armv7pmu.map_event      = armv7_a8_map_event;
1136         armv7pmu.num_events     = armv7_read_num_pmnc_events();
1137         return &armv7pmu;
1138 }
1139
1140 static struct arm_pmu *__init armv7_a9_pmu_init(void)
1141 {
1142         armv7pmu.id             = ARM_PERF_PMU_ID_CA9;
1143         armv7pmu.name           = "ARMv7 Cortex-A9";
1144         armv7pmu.map_event      = armv7_a9_map_event;
1145         armv7pmu.num_events     = armv7_read_num_pmnc_events();
1146         return &armv7pmu;
1147 }
1148
1149 static struct arm_pmu *__init armv7_a5_pmu_init(void)
1150 {
1151         armv7pmu.id             = ARM_PERF_PMU_ID_CA5;
1152         armv7pmu.name           = "ARMv7 Cortex-A5";
1153         armv7pmu.map_event      = armv7_a5_map_event;
1154         armv7pmu.num_events     = armv7_read_num_pmnc_events();
1155         return &armv7pmu;
1156 }
1157
1158 static struct arm_pmu *__init armv7_a15_pmu_init(void)
1159 {
1160         armv7pmu.id             = ARM_PERF_PMU_ID_CA15;
1161         armv7pmu.name           = "ARMv7 Cortex-A15";
1162         armv7pmu.map_event      = armv7_a15_map_event;
1163         armv7pmu.num_events     = armv7_read_num_pmnc_events();
1164         armv7pmu.set_event_filter = armv7pmu_set_event_filter;
1165         return &armv7pmu;
1166 }
1167 #else
1168 static struct arm_pmu *__init armv7_a8_pmu_init(void)
1169 {
1170         return NULL;
1171 }
1172
1173 static struct arm_pmu *__init armv7_a9_pmu_init(void)
1174 {
1175         return NULL;
1176 }
1177
1178 static struct arm_pmu *__init armv7_a5_pmu_init(void)
1179 {
1180         return NULL;
1181 }
1182
1183 static struct arm_pmu *__init armv7_a15_pmu_init(void)
1184 {
1185         return NULL;
1186 }
1187 #endif  /* CONFIG_CPU_V7 */