cfc6f9dfcd907554559de1f396c631fc582104cb
[pandora-kernel.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.c
1 #include "perf_event_intel_uncore.h"
2
3 static struct intel_uncore_type *empty_uncore[] = { NULL, };
4 static struct intel_uncore_type **msr_uncores = empty_uncore;
5 static struct intel_uncore_type **pci_uncores = empty_uncore;
6 /* pci bus to socket mapping */
7 static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
8
9 static struct pci_dev *extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
10
11 static DEFINE_RAW_SPINLOCK(uncore_box_lock);
12
13 /* mask of cpus that collect uncore events */
14 static cpumask_t uncore_cpu_mask;
15
16 /* constraint for the fixed counter */
17 static struct event_constraint constraint_fixed =
18         EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
19 static struct event_constraint constraint_empty =
20         EVENT_CONSTRAINT(0, 0, 0);
21
22 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
23                                 ((1ULL << (n)) - 1)))
24
25 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
26 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
27 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
28 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
29 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
30 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
31 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
32 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
33 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
34 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
35 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
36 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
37 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
38 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
39 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
40 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
41 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
42 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
43 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
44 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
45 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
46 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
47 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
48 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
49 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
50 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
51 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
52 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
53 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
54 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
55 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
56 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
57 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
58 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
59 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
60 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
61 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
62 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
63 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
64 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
65 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
66 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
67 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
68
69 static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
70 static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
71 static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
72 static void uncore_pmu_event_read(struct perf_event *event);
73
74 static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
75 {
76         return container_of(event->pmu, struct intel_uncore_pmu, pmu);
77 }
78
79 static struct intel_uncore_box *
80 uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
81 {
82         struct intel_uncore_box *box;
83
84         box = *per_cpu_ptr(pmu->box, cpu);
85         if (box)
86                 return box;
87
88         raw_spin_lock(&uncore_box_lock);
89         list_for_each_entry(box, &pmu->box_list, list) {
90                 if (box->phys_id == topology_physical_package_id(cpu)) {
91                         atomic_inc(&box->refcnt);
92                         *per_cpu_ptr(pmu->box, cpu) = box;
93                         break;
94                 }
95         }
96         raw_spin_unlock(&uncore_box_lock);
97
98         return *per_cpu_ptr(pmu->box, cpu);
99 }
100
101 static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
102 {
103         /*
104          * perf core schedules event on the basis of cpu, uncore events are
105          * collected by one of the cpus inside a physical package.
106          */
107         return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
108 }
109
110 static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
111 {
112         u64 count;
113
114         rdmsrl(event->hw.event_base, count);
115
116         return count;
117 }
118
119 /*
120  * generic get constraint function for shared match/mask registers.
121  */
122 static struct event_constraint *
123 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
124 {
125         struct intel_uncore_extra_reg *er;
126         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
127         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
128         unsigned long flags;
129         bool ok = false;
130
131         /*
132          * reg->alloc can be set due to existing state, so for fake box we
133          * need to ignore this, otherwise we might fail to allocate proper
134          * fake state for this extra reg constraint.
135          */
136         if (reg1->idx == EXTRA_REG_NONE ||
137             (!uncore_box_is_fake(box) && reg1->alloc))
138                 return NULL;
139
140         er = &box->shared_regs[reg1->idx];
141         raw_spin_lock_irqsave(&er->lock, flags);
142         if (!atomic_read(&er->ref) ||
143             (er->config1 == reg1->config && er->config2 == reg2->config)) {
144                 atomic_inc(&er->ref);
145                 er->config1 = reg1->config;
146                 er->config2 = reg2->config;
147                 ok = true;
148         }
149         raw_spin_unlock_irqrestore(&er->lock, flags);
150
151         if (ok) {
152                 if (!uncore_box_is_fake(box))
153                         reg1->alloc = 1;
154                 return NULL;
155         }
156
157         return &constraint_empty;
158 }
159
160 static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
161 {
162         struct intel_uncore_extra_reg *er;
163         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
164
165         /*
166          * Only put constraint if extra reg was actually allocated. Also
167          * takes care of event which do not use an extra shared reg.
168          *
169          * Also, if this is a fake box we shouldn't touch any event state
170          * (reg->alloc) and we don't care about leaving inconsistent box
171          * state either since it will be thrown out.
172          */
173         if (uncore_box_is_fake(box) || !reg1->alloc)
174                 return;
175
176         er = &box->shared_regs[reg1->idx];
177         atomic_dec(&er->ref);
178         reg1->alloc = 0;
179 }
180
181 static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
182 {
183         struct intel_uncore_extra_reg *er;
184         unsigned long flags;
185         u64 config;
186
187         er = &box->shared_regs[idx];
188
189         raw_spin_lock_irqsave(&er->lock, flags);
190         config = er->config;
191         raw_spin_unlock_irqrestore(&er->lock, flags);
192
193         return config;
194 }
195
196 /* Sandy Bridge-EP uncore support */
197 static struct intel_uncore_type snbep_uncore_cbox;
198 static struct intel_uncore_type snbep_uncore_pcu;
199
200 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
201 {
202         struct pci_dev *pdev = box->pci_dev;
203         int box_ctl = uncore_pci_box_ctl(box);
204         u32 config = 0;
205
206         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
207                 config |= SNBEP_PMON_BOX_CTL_FRZ;
208                 pci_write_config_dword(pdev, box_ctl, config);
209         }
210 }
211
212 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
213 {
214         struct pci_dev *pdev = box->pci_dev;
215         int box_ctl = uncore_pci_box_ctl(box);
216         u32 config = 0;
217
218         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
219                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
220                 pci_write_config_dword(pdev, box_ctl, config);
221         }
222 }
223
224 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
225 {
226         struct pci_dev *pdev = box->pci_dev;
227         struct hw_perf_event *hwc = &event->hw;
228
229         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
230 }
231
232 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
233 {
234         struct pci_dev *pdev = box->pci_dev;
235         struct hw_perf_event *hwc = &event->hw;
236
237         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
238 }
239
240 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
241 {
242         struct pci_dev *pdev = box->pci_dev;
243         struct hw_perf_event *hwc = &event->hw;
244         u64 count = 0;
245
246         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
247         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
248
249         return count;
250 }
251
252 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
253 {
254         struct pci_dev *pdev = box->pci_dev;
255
256         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
257 }
258
259 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
260 {
261         u64 config;
262         unsigned msr;
263
264         msr = uncore_msr_box_ctl(box);
265         if (msr) {
266                 rdmsrl(msr, config);
267                 config |= SNBEP_PMON_BOX_CTL_FRZ;
268                 wrmsrl(msr, config);
269         }
270 }
271
272 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
273 {
274         u64 config;
275         unsigned msr;
276
277         msr = uncore_msr_box_ctl(box);
278         if (msr) {
279                 rdmsrl(msr, config);
280                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
281                 wrmsrl(msr, config);
282         }
283 }
284
285 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
286 {
287         struct hw_perf_event *hwc = &event->hw;
288         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
289
290         if (reg1->idx != EXTRA_REG_NONE)
291                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
292
293         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
294 }
295
296 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
297                                         struct perf_event *event)
298 {
299         struct hw_perf_event *hwc = &event->hw;
300
301         wrmsrl(hwc->config_base, hwc->config);
302 }
303
304 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
305 {
306         unsigned msr = uncore_msr_box_ctl(box);
307
308         if (msr)
309                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
310 }
311
312 static struct attribute *snbep_uncore_formats_attr[] = {
313         &format_attr_event.attr,
314         &format_attr_umask.attr,
315         &format_attr_edge.attr,
316         &format_attr_inv.attr,
317         &format_attr_thresh8.attr,
318         NULL,
319 };
320
321 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
322         &format_attr_event.attr,
323         &format_attr_umask.attr,
324         &format_attr_edge.attr,
325         &format_attr_inv.attr,
326         &format_attr_thresh5.attr,
327         NULL,
328 };
329
330 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
331         &format_attr_event.attr,
332         &format_attr_umask.attr,
333         &format_attr_edge.attr,
334         &format_attr_tid_en.attr,
335         &format_attr_inv.attr,
336         &format_attr_thresh8.attr,
337         &format_attr_filter_tid.attr,
338         &format_attr_filter_nid.attr,
339         &format_attr_filter_state.attr,
340         &format_attr_filter_opc.attr,
341         NULL,
342 };
343
344 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
345         &format_attr_event_ext.attr,
346         &format_attr_occ_sel.attr,
347         &format_attr_edge.attr,
348         &format_attr_inv.attr,
349         &format_attr_thresh5.attr,
350         &format_attr_occ_invert.attr,
351         &format_attr_occ_edge.attr,
352         &format_attr_filter_band0.attr,
353         &format_attr_filter_band1.attr,
354         &format_attr_filter_band2.attr,
355         &format_attr_filter_band3.attr,
356         NULL,
357 };
358
359 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
360         &format_attr_event_ext.attr,
361         &format_attr_umask.attr,
362         &format_attr_edge.attr,
363         &format_attr_inv.attr,
364         &format_attr_thresh8.attr,
365         &format_attr_match_rds.attr,
366         &format_attr_match_rnid30.attr,
367         &format_attr_match_rnid4.attr,
368         &format_attr_match_dnid.attr,
369         &format_attr_match_mc.attr,
370         &format_attr_match_opc.attr,
371         &format_attr_match_vnw.attr,
372         &format_attr_match0.attr,
373         &format_attr_match1.attr,
374         &format_attr_mask_rds.attr,
375         &format_attr_mask_rnid30.attr,
376         &format_attr_mask_rnid4.attr,
377         &format_attr_mask_dnid.attr,
378         &format_attr_mask_mc.attr,
379         &format_attr_mask_opc.attr,
380         &format_attr_mask_vnw.attr,
381         &format_attr_mask0.attr,
382         &format_attr_mask1.attr,
383         NULL,
384 };
385
386 static struct uncore_event_desc snbep_uncore_imc_events[] = {
387         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
388         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
389         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
390         { /* end: all zeroes */ },
391 };
392
393 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
394         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
395         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
396         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
397         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
398         { /* end: all zeroes */ },
399 };
400
401 static struct attribute_group snbep_uncore_format_group = {
402         .name = "format",
403         .attrs = snbep_uncore_formats_attr,
404 };
405
406 static struct attribute_group snbep_uncore_ubox_format_group = {
407         .name = "format",
408         .attrs = snbep_uncore_ubox_formats_attr,
409 };
410
411 static struct attribute_group snbep_uncore_cbox_format_group = {
412         .name = "format",
413         .attrs = snbep_uncore_cbox_formats_attr,
414 };
415
416 static struct attribute_group snbep_uncore_pcu_format_group = {
417         .name = "format",
418         .attrs = snbep_uncore_pcu_formats_attr,
419 };
420
421 static struct attribute_group snbep_uncore_qpi_format_group = {
422         .name = "format",
423         .attrs = snbep_uncore_qpi_formats_attr,
424 };
425
426 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
427         .init_box       = snbep_uncore_msr_init_box,            \
428         .disable_box    = snbep_uncore_msr_disable_box,         \
429         .enable_box     = snbep_uncore_msr_enable_box,          \
430         .disable_event  = snbep_uncore_msr_disable_event,       \
431         .enable_event   = snbep_uncore_msr_enable_event,        \
432         .read_counter   = uncore_msr_read_counter
433
434 static struct intel_uncore_ops snbep_uncore_msr_ops = {
435         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
436 };
437
438 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
439         .init_box       = snbep_uncore_pci_init_box,            \
440         .disable_box    = snbep_uncore_pci_disable_box,         \
441         .enable_box     = snbep_uncore_pci_enable_box,          \
442         .disable_event  = snbep_uncore_pci_disable_event,       \
443         .read_counter   = snbep_uncore_pci_read_counter
444
445 static struct intel_uncore_ops snbep_uncore_pci_ops = {
446         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
447         .enable_event   = snbep_uncore_pci_enable_event,        \
448 };
449
450 static struct event_constraint snbep_uncore_cbox_constraints[] = {
451         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
452         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
453         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
454         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
455         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
456         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
457         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
458         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
459         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
460         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
461         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
462         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
463         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
464         EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
465         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
466         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
467         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
468         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
469         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
470         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
471         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
472         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
473         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
474         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
475         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
476         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
477         EVENT_CONSTRAINT_END
478 };
479
480 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
481         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
482         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
483         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
484         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
485         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
486         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
487         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
488         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
489         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
490         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
491         EVENT_CONSTRAINT_END
492 };
493
494 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
495         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
496         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
497         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
498         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
499         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
500         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
501         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
502         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
503         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
504         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
505         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
506         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
507         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
508         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
509         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
510         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
511         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
512         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
513         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
514         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
515         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
516         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
517         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
518         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
519         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
520         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
521         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
522         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
523         EVENT_CONSTRAINT_END
524 };
525
526 static struct intel_uncore_type snbep_uncore_ubox = {
527         .name           = "ubox",
528         .num_counters   = 2,
529         .num_boxes      = 1,
530         .perf_ctr_bits  = 44,
531         .fixed_ctr_bits = 48,
532         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
533         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
534         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
535         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
536         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
537         .ops            = &snbep_uncore_msr_ops,
538         .format_group   = &snbep_uncore_ubox_format_group,
539 };
540
541 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
542         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
543                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
544         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
545         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
546         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
547         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
548         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
549         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
550         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
551         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
552         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
553         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
554         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
555         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
556         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
557         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
558         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
559         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
560         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
561         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
562         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
563         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
564         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
565         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
566         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
567         EVENT_EXTRA_END
568 };
569
570 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
571 {
572         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
573         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
574         int i;
575
576         if (uncore_box_is_fake(box))
577                 return;
578
579         for (i = 0; i < 5; i++) {
580                 if (reg1->alloc & (0x1 << i))
581                         atomic_sub(1 << (i * 6), &er->ref);
582         }
583         reg1->alloc = 0;
584 }
585
586 static struct event_constraint *
587 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
588                             u64 (*cbox_filter_mask)(int fields))
589 {
590         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
591         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
592         int i, alloc = 0;
593         unsigned long flags;
594         u64 mask;
595
596         if (reg1->idx == EXTRA_REG_NONE)
597                 return NULL;
598
599         raw_spin_lock_irqsave(&er->lock, flags);
600         for (i = 0; i < 5; i++) {
601                 if (!(reg1->idx & (0x1 << i)))
602                         continue;
603                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
604                         continue;
605
606                 mask = cbox_filter_mask(0x1 << i);
607                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
608                     !((reg1->config ^ er->config) & mask)) {
609                         atomic_add(1 << (i * 6), &er->ref);
610                         er->config &= ~mask;
611                         er->config |= reg1->config & mask;
612                         alloc |= (0x1 << i);
613                 } else {
614                         break;
615                 }
616         }
617         raw_spin_unlock_irqrestore(&er->lock, flags);
618         if (i < 5)
619                 goto fail;
620
621         if (!uncore_box_is_fake(box))
622                 reg1->alloc |= alloc;
623
624         return NULL;
625 fail:
626         for (; i >= 0; i--) {
627                 if (alloc & (0x1 << i))
628                         atomic_sub(1 << (i * 6), &er->ref);
629         }
630         return &constraint_empty;
631 }
632
633 static u64 snbep_cbox_filter_mask(int fields)
634 {
635         u64 mask = 0;
636
637         if (fields & 0x1)
638                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
639         if (fields & 0x2)
640                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
641         if (fields & 0x4)
642                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
643         if (fields & 0x8)
644                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
645
646         return mask;
647 }
648
649 static struct event_constraint *
650 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
651 {
652         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
653 }
654
655 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
656 {
657         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
658         struct extra_reg *er;
659         int idx = 0;
660
661         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
662                 if (er->event != (event->hw.config & er->config_mask))
663                         continue;
664                 idx |= er->idx;
665         }
666
667         if (idx) {
668                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
669                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
670                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
671                 reg1->idx = idx;
672         }
673         return 0;
674 }
675
676 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
677         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
678         .hw_config              = snbep_cbox_hw_config,
679         .get_constraint         = snbep_cbox_get_constraint,
680         .put_constraint         = snbep_cbox_put_constraint,
681 };
682
683 static struct intel_uncore_type snbep_uncore_cbox = {
684         .name                   = "cbox",
685         .num_counters           = 4,
686         .num_boxes              = 8,
687         .perf_ctr_bits          = 44,
688         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
689         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
690         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
691         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
692         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
693         .num_shared_regs        = 1,
694         .constraints            = snbep_uncore_cbox_constraints,
695         .ops                    = &snbep_uncore_cbox_ops,
696         .format_group           = &snbep_uncore_cbox_format_group,
697 };
698
699 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
700 {
701         struct hw_perf_event *hwc = &event->hw;
702         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
703         u64 config = reg1->config;
704
705         if (new_idx > reg1->idx)
706                 config <<= 8 * (new_idx - reg1->idx);
707         else
708                 config >>= 8 * (reg1->idx - new_idx);
709
710         if (modify) {
711                 hwc->config += new_idx - reg1->idx;
712                 reg1->config = config;
713                 reg1->idx = new_idx;
714         }
715         return config;
716 }
717
718 static struct event_constraint *
719 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
720 {
721         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
722         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
723         unsigned long flags;
724         int idx = reg1->idx;
725         u64 mask, config1 = reg1->config;
726         bool ok = false;
727
728         if (reg1->idx == EXTRA_REG_NONE ||
729             (!uncore_box_is_fake(box) && reg1->alloc))
730                 return NULL;
731 again:
732         mask = 0xffULL << (idx * 8);
733         raw_spin_lock_irqsave(&er->lock, flags);
734         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
735             !((config1 ^ er->config) & mask)) {
736                 atomic_add(1 << (idx * 8), &er->ref);
737                 er->config &= ~mask;
738                 er->config |= config1 & mask;
739                 ok = true;
740         }
741         raw_spin_unlock_irqrestore(&er->lock, flags);
742
743         if (!ok) {
744                 idx = (idx + 1) % 4;
745                 if (idx != reg1->idx) {
746                         config1 = snbep_pcu_alter_er(event, idx, false);
747                         goto again;
748                 }
749                 return &constraint_empty;
750         }
751
752         if (!uncore_box_is_fake(box)) {
753                 if (idx != reg1->idx)
754                         snbep_pcu_alter_er(event, idx, true);
755                 reg1->alloc = 1;
756         }
757         return NULL;
758 }
759
760 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
761 {
762         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
763         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
764
765         if (uncore_box_is_fake(box) || !reg1->alloc)
766                 return;
767
768         atomic_sub(1 << (reg1->idx * 8), &er->ref);
769         reg1->alloc = 0;
770 }
771
772 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
773 {
774         struct hw_perf_event *hwc = &event->hw;
775         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
776         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
777
778         if (ev_sel >= 0xb && ev_sel <= 0xe) {
779                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
780                 reg1->idx = ev_sel - 0xb;
781                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
782         }
783         return 0;
784 }
785
786 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
787         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
788         .hw_config              = snbep_pcu_hw_config,
789         .get_constraint         = snbep_pcu_get_constraint,
790         .put_constraint         = snbep_pcu_put_constraint,
791 };
792
793 static struct intel_uncore_type snbep_uncore_pcu = {
794         .name                   = "pcu",
795         .num_counters           = 4,
796         .num_boxes              = 1,
797         .perf_ctr_bits          = 48,
798         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
799         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
800         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
801         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
802         .num_shared_regs        = 1,
803         .ops                    = &snbep_uncore_pcu_ops,
804         .format_group           = &snbep_uncore_pcu_format_group,
805 };
806
807 static struct intel_uncore_type *snbep_msr_uncores[] = {
808         &snbep_uncore_ubox,
809         &snbep_uncore_cbox,
810         &snbep_uncore_pcu,
811         NULL,
812 };
813
814 enum {
815         SNBEP_PCI_QPI_PORT0_FILTER,
816         SNBEP_PCI_QPI_PORT1_FILTER,
817 };
818
819 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
820 {
821         struct hw_perf_event *hwc = &event->hw;
822         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
823         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
824
825         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
826                 reg1->idx = 0;
827                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
828                 reg1->config = event->attr.config1;
829                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
830                 reg2->config = event->attr.config2;
831         }
832         return 0;
833 }
834
835 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
836 {
837         struct pci_dev *pdev = box->pci_dev;
838         struct hw_perf_event *hwc = &event->hw;
839         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
840         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
841
842         if (reg1->idx != EXTRA_REG_NONE) {
843                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
844                 struct pci_dev *filter_pdev = extra_pci_dev[box->phys_id][idx];
845                 WARN_ON_ONCE(!filter_pdev);
846                 if (filter_pdev) {
847                         pci_write_config_dword(filter_pdev, reg1->reg,
848                                                 (u32)reg1->config);
849                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
850                                                 (u32)(reg1->config >> 32));
851                         pci_write_config_dword(filter_pdev, reg2->reg,
852                                                 (u32)reg2->config);
853                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
854                                                 (u32)(reg2->config >> 32));
855                 }
856         }
857
858         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
859 }
860
861 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
862         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
863         .enable_event           = snbep_qpi_enable_event,
864         .hw_config              = snbep_qpi_hw_config,
865         .get_constraint         = uncore_get_constraint,
866         .put_constraint         = uncore_put_constraint,
867 };
868
869 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
870         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
871         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
872         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
873         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
874         .ops            = &snbep_uncore_pci_ops,                \
875         .format_group   = &snbep_uncore_format_group
876
877 static struct intel_uncore_type snbep_uncore_ha = {
878         .name           = "ha",
879         .num_counters   = 4,
880         .num_boxes      = 1,
881         .perf_ctr_bits  = 48,
882         SNBEP_UNCORE_PCI_COMMON_INIT(),
883 };
884
885 static struct intel_uncore_type snbep_uncore_imc = {
886         .name           = "imc",
887         .num_counters   = 4,
888         .num_boxes      = 4,
889         .perf_ctr_bits  = 48,
890         .fixed_ctr_bits = 48,
891         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
892         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
893         .event_descs    = snbep_uncore_imc_events,
894         SNBEP_UNCORE_PCI_COMMON_INIT(),
895 };
896
897 static struct intel_uncore_type snbep_uncore_qpi = {
898         .name                   = "qpi",
899         .num_counters           = 4,
900         .num_boxes              = 2,
901         .perf_ctr_bits          = 48,
902         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
903         .event_ctl              = SNBEP_PCI_PMON_CTL0,
904         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
905         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
906         .num_shared_regs        = 1,
907         .ops                    = &snbep_uncore_qpi_ops,
908         .event_descs            = snbep_uncore_qpi_events,
909         .format_group           = &snbep_uncore_qpi_format_group,
910 };
911
912
913 static struct intel_uncore_type snbep_uncore_r2pcie = {
914         .name           = "r2pcie",
915         .num_counters   = 4,
916         .num_boxes      = 1,
917         .perf_ctr_bits  = 44,
918         .constraints    = snbep_uncore_r2pcie_constraints,
919         SNBEP_UNCORE_PCI_COMMON_INIT(),
920 };
921
922 static struct intel_uncore_type snbep_uncore_r3qpi = {
923         .name           = "r3qpi",
924         .num_counters   = 3,
925         .num_boxes      = 2,
926         .perf_ctr_bits  = 44,
927         .constraints    = snbep_uncore_r3qpi_constraints,
928         SNBEP_UNCORE_PCI_COMMON_INIT(),
929 };
930
931 enum {
932         SNBEP_PCI_UNCORE_HA,
933         SNBEP_PCI_UNCORE_IMC,
934         SNBEP_PCI_UNCORE_QPI,
935         SNBEP_PCI_UNCORE_R2PCIE,
936         SNBEP_PCI_UNCORE_R3QPI,
937 };
938
939 static struct intel_uncore_type *snbep_pci_uncores[] = {
940         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
941         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
942         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
943         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
944         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
945         NULL,
946 };
947
948 static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
949         { /* Home Agent */
950                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
951                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
952         },
953         { /* MC Channel 0 */
954                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
955                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
956         },
957         { /* MC Channel 1 */
958                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
959                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
960         },
961         { /* MC Channel 2 */
962                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
963                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
964         },
965         { /* MC Channel 3 */
966                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
967                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
968         },
969         { /* QPI Port 0 */
970                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
971                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
972         },
973         { /* QPI Port 1 */
974                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
975                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
976         },
977         { /* R2PCIe */
978                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
979                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
980         },
981         { /* R3QPI Link 0 */
982                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
983                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
984         },
985         { /* R3QPI Link 1 */
986                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
987                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
988         },
989         { /* QPI Port 0 filter  */
990                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
991                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
992                                                    SNBEP_PCI_QPI_PORT0_FILTER),
993         },
994         { /* QPI Port 0 filter  */
995                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
996                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
997                                                    SNBEP_PCI_QPI_PORT1_FILTER),
998         },
999         { /* end: all zeroes */ }
1000 };
1001
1002 static struct pci_driver snbep_uncore_pci_driver = {
1003         .name           = "snbep_uncore",
1004         .id_table       = snbep_uncore_pci_ids,
1005 };
1006
1007 /*
1008  * build pci bus to socket mapping
1009  */
1010 static int snbep_pci2phy_map_init(int devid)
1011 {
1012         struct pci_dev *ubox_dev = NULL;
1013         int i, bus, nodeid;
1014         int err = 0;
1015         u32 config = 0;
1016
1017         while (1) {
1018                 /* find the UBOX device */
1019                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1020                 if (!ubox_dev)
1021                         break;
1022                 bus = ubox_dev->bus->number;
1023                 /* get the Node ID of the local register */
1024                 err = pci_read_config_dword(ubox_dev, 0x40, &config);
1025                 if (err)
1026                         break;
1027                 nodeid = config;
1028                 /* get the Node ID mapping */
1029                 err = pci_read_config_dword(ubox_dev, 0x54, &config);
1030                 if (err)
1031                         break;
1032                 /*
1033                  * every three bits in the Node ID mapping register maps
1034                  * to a particular node.
1035                  */
1036                 for (i = 0; i < 8; i++) {
1037                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
1038                                 pcibus_to_physid[bus] = i;
1039                                 break;
1040                         }
1041                 }
1042         }
1043
1044         if (!err) {
1045                 /*
1046                  * For PCI bus with no UBOX device, find the next bus
1047                  * that has UBOX device and use its mapping.
1048                  */
1049                 i = -1;
1050                 for (bus = 255; bus >= 0; bus--) {
1051                         if (pcibus_to_physid[bus] >= 0)
1052                                 i = pcibus_to_physid[bus];
1053                         else
1054                                 pcibus_to_physid[bus] = i;
1055                 }
1056         }
1057
1058         if (ubox_dev)
1059                 pci_dev_put(ubox_dev);
1060
1061         return err ? pcibios_err_to_errno(err) : 0;
1062 }
1063 /* end of Sandy Bridge-EP uncore support */
1064
1065 /* IvyTown uncore support */
1066 static void ivt_uncore_msr_init_box(struct intel_uncore_box *box)
1067 {
1068         unsigned msr = uncore_msr_box_ctl(box);
1069         if (msr)
1070                 wrmsrl(msr, IVT_PMON_BOX_CTL_INT);
1071 }
1072
1073 static void ivt_uncore_pci_init_box(struct intel_uncore_box *box)
1074 {
1075         struct pci_dev *pdev = box->pci_dev;
1076
1077         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT);
1078 }
1079
1080 #define IVT_UNCORE_MSR_OPS_COMMON_INIT()                        \
1081         .init_box       = ivt_uncore_msr_init_box,              \
1082         .disable_box    = snbep_uncore_msr_disable_box,         \
1083         .enable_box     = snbep_uncore_msr_enable_box,          \
1084         .disable_event  = snbep_uncore_msr_disable_event,       \
1085         .enable_event   = snbep_uncore_msr_enable_event,        \
1086         .read_counter   = uncore_msr_read_counter
1087
1088 static struct intel_uncore_ops ivt_uncore_msr_ops = {
1089         IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1090 };
1091
1092 static struct intel_uncore_ops ivt_uncore_pci_ops = {
1093         .init_box       = ivt_uncore_pci_init_box,
1094         .disable_box    = snbep_uncore_pci_disable_box,
1095         .enable_box     = snbep_uncore_pci_enable_box,
1096         .disable_event  = snbep_uncore_pci_disable_event,
1097         .enable_event   = snbep_uncore_pci_enable_event,
1098         .read_counter   = snbep_uncore_pci_read_counter,
1099 };
1100
1101 #define IVT_UNCORE_PCI_COMMON_INIT()                            \
1102         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1103         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1104         .event_mask     = IVT_PMON_RAW_EVENT_MASK,              \
1105         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1106         .ops            = &ivt_uncore_pci_ops,                  \
1107         .format_group   = &ivt_uncore_format_group
1108
1109 static struct attribute *ivt_uncore_formats_attr[] = {
1110         &format_attr_event.attr,
1111         &format_attr_umask.attr,
1112         &format_attr_edge.attr,
1113         &format_attr_inv.attr,
1114         &format_attr_thresh8.attr,
1115         NULL,
1116 };
1117
1118 static struct attribute *ivt_uncore_ubox_formats_attr[] = {
1119         &format_attr_event.attr,
1120         &format_attr_umask.attr,
1121         &format_attr_edge.attr,
1122         &format_attr_inv.attr,
1123         &format_attr_thresh5.attr,
1124         NULL,
1125 };
1126
1127 static struct attribute *ivt_uncore_cbox_formats_attr[] = {
1128         &format_attr_event.attr,
1129         &format_attr_umask.attr,
1130         &format_attr_edge.attr,
1131         &format_attr_tid_en.attr,
1132         &format_attr_thresh8.attr,
1133         &format_attr_filter_tid.attr,
1134         &format_attr_filter_link.attr,
1135         &format_attr_filter_state2.attr,
1136         &format_attr_filter_nid2.attr,
1137         &format_attr_filter_opc2.attr,
1138         NULL,
1139 };
1140
1141 static struct attribute *ivt_uncore_pcu_formats_attr[] = {
1142         &format_attr_event_ext.attr,
1143         &format_attr_occ_sel.attr,
1144         &format_attr_edge.attr,
1145         &format_attr_thresh5.attr,
1146         &format_attr_occ_invert.attr,
1147         &format_attr_occ_edge.attr,
1148         &format_attr_filter_band0.attr,
1149         &format_attr_filter_band1.attr,
1150         &format_attr_filter_band2.attr,
1151         &format_attr_filter_band3.attr,
1152         NULL,
1153 };
1154
1155 static struct attribute *ivt_uncore_qpi_formats_attr[] = {
1156         &format_attr_event_ext.attr,
1157         &format_attr_umask.attr,
1158         &format_attr_edge.attr,
1159         &format_attr_thresh8.attr,
1160         &format_attr_match_rds.attr,
1161         &format_attr_match_rnid30.attr,
1162         &format_attr_match_rnid4.attr,
1163         &format_attr_match_dnid.attr,
1164         &format_attr_match_mc.attr,
1165         &format_attr_match_opc.attr,
1166         &format_attr_match_vnw.attr,
1167         &format_attr_match0.attr,
1168         &format_attr_match1.attr,
1169         &format_attr_mask_rds.attr,
1170         &format_attr_mask_rnid30.attr,
1171         &format_attr_mask_rnid4.attr,
1172         &format_attr_mask_dnid.attr,
1173         &format_attr_mask_mc.attr,
1174         &format_attr_mask_opc.attr,
1175         &format_attr_mask_vnw.attr,
1176         &format_attr_mask0.attr,
1177         &format_attr_mask1.attr,
1178         NULL,
1179 };
1180
1181 static struct attribute_group ivt_uncore_format_group = {
1182         .name = "format",
1183         .attrs = ivt_uncore_formats_attr,
1184 };
1185
1186 static struct attribute_group ivt_uncore_ubox_format_group = {
1187         .name = "format",
1188         .attrs = ivt_uncore_ubox_formats_attr,
1189 };
1190
1191 static struct attribute_group ivt_uncore_cbox_format_group = {
1192         .name = "format",
1193         .attrs = ivt_uncore_cbox_formats_attr,
1194 };
1195
1196 static struct attribute_group ivt_uncore_pcu_format_group = {
1197         .name = "format",
1198         .attrs = ivt_uncore_pcu_formats_attr,
1199 };
1200
1201 static struct attribute_group ivt_uncore_qpi_format_group = {
1202         .name = "format",
1203         .attrs = ivt_uncore_qpi_formats_attr,
1204 };
1205
1206 static struct intel_uncore_type ivt_uncore_ubox = {
1207         .name           = "ubox",
1208         .num_counters   = 2,
1209         .num_boxes      = 1,
1210         .perf_ctr_bits  = 44,
1211         .fixed_ctr_bits = 48,
1212         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1213         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1214         .event_mask     = IVT_U_MSR_PMON_RAW_EVENT_MASK,
1215         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1216         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1217         .ops            = &ivt_uncore_msr_ops,
1218         .format_group   = &ivt_uncore_ubox_format_group,
1219 };
1220
1221 static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1222         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1223                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1224         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1225
1226         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1227         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1228         SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1229         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1230         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1231         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1232         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1233         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1234         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1235         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1236         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1237         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1238         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1239         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1240         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1241         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1242         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1243         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1244         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1245         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1246         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1247         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1248         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1249         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1250         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1251         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1252         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1253         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1254         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1255         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1256         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1257         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1258         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1259         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1260         EVENT_EXTRA_END
1261 };
1262
1263 static u64 ivt_cbox_filter_mask(int fields)
1264 {
1265         u64 mask = 0;
1266
1267         if (fields & 0x1)
1268                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID;
1269         if (fields & 0x2)
1270                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK;
1271         if (fields & 0x4)
1272                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE;
1273         if (fields & 0x8)
1274                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID;
1275         if (fields & 0x10)
1276                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC;
1277
1278         return mask;
1279 }
1280
1281 static struct event_constraint *
1282 ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1283 {
1284         return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask);
1285 }
1286
1287 static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1288 {
1289         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1290         struct extra_reg *er;
1291         int idx = 0;
1292
1293         for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) {
1294                 if (er->event != (event->hw.config & er->config_mask))
1295                         continue;
1296                 idx |= er->idx;
1297         }
1298
1299         if (idx) {
1300                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1301                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1302                 reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx);
1303                 reg1->idx = idx;
1304         }
1305         return 0;
1306 }
1307
1308 static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1309 {
1310         struct hw_perf_event *hwc = &event->hw;
1311         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1312
1313         if (reg1->idx != EXTRA_REG_NONE) {
1314                 u64 filter = uncore_shared_reg_config(box, 0);
1315                 wrmsrl(reg1->reg, filter & 0xffffffff);
1316                 wrmsrl(reg1->reg + 6, filter >> 32);
1317         }
1318
1319         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1320 }
1321
1322 static struct intel_uncore_ops ivt_uncore_cbox_ops = {
1323         .init_box               = ivt_uncore_msr_init_box,
1324         .disable_box            = snbep_uncore_msr_disable_box,
1325         .enable_box             = snbep_uncore_msr_enable_box,
1326         .disable_event          = snbep_uncore_msr_disable_event,
1327         .enable_event           = ivt_cbox_enable_event,
1328         .read_counter           = uncore_msr_read_counter,
1329         .hw_config              = ivt_cbox_hw_config,
1330         .get_constraint         = ivt_cbox_get_constraint,
1331         .put_constraint         = snbep_cbox_put_constraint,
1332 };
1333
1334 static struct intel_uncore_type ivt_uncore_cbox = {
1335         .name                   = "cbox",
1336         .num_counters           = 4,
1337         .num_boxes              = 15,
1338         .perf_ctr_bits          = 44,
1339         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1340         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1341         .event_mask             = IVT_CBO_MSR_PMON_RAW_EVENT_MASK,
1342         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1343         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1344         .num_shared_regs        = 1,
1345         .constraints            = snbep_uncore_cbox_constraints,
1346         .ops                    = &ivt_uncore_cbox_ops,
1347         .format_group           = &ivt_uncore_cbox_format_group,
1348 };
1349
1350 static struct intel_uncore_ops ivt_uncore_pcu_ops = {
1351         IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1352         .hw_config              = snbep_pcu_hw_config,
1353         .get_constraint         = snbep_pcu_get_constraint,
1354         .put_constraint         = snbep_pcu_put_constraint,
1355 };
1356
1357 static struct intel_uncore_type ivt_uncore_pcu = {
1358         .name                   = "pcu",
1359         .num_counters           = 4,
1360         .num_boxes              = 1,
1361         .perf_ctr_bits          = 48,
1362         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1363         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1364         .event_mask             = IVT_PCU_MSR_PMON_RAW_EVENT_MASK,
1365         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1366         .num_shared_regs        = 1,
1367         .ops                    = &ivt_uncore_pcu_ops,
1368         .format_group           = &ivt_uncore_pcu_format_group,
1369 };
1370
1371 static struct intel_uncore_type *ivt_msr_uncores[] = {
1372         &ivt_uncore_ubox,
1373         &ivt_uncore_cbox,
1374         &ivt_uncore_pcu,
1375         NULL,
1376 };
1377
1378 static struct intel_uncore_type ivt_uncore_ha = {
1379         .name           = "ha",
1380         .num_counters   = 4,
1381         .num_boxes      = 2,
1382         .perf_ctr_bits  = 48,
1383         IVT_UNCORE_PCI_COMMON_INIT(),
1384 };
1385
1386 static struct intel_uncore_type ivt_uncore_imc = {
1387         .name           = "imc",
1388         .num_counters   = 4,
1389         .num_boxes      = 8,
1390         .perf_ctr_bits  = 48,
1391         .fixed_ctr_bits = 48,
1392         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1393         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1394         IVT_UNCORE_PCI_COMMON_INIT(),
1395 };
1396
1397 /* registers in IRP boxes are not properly aligned */
1398 static unsigned ivt_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1399 static unsigned ivt_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1400
1401 static void ivt_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1402 {
1403         struct pci_dev *pdev = box->pci_dev;
1404         struct hw_perf_event *hwc = &event->hw;
1405
1406         pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx],
1407                                hwc->config | SNBEP_PMON_CTL_EN);
1408 }
1409
1410 static void ivt_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1411 {
1412         struct pci_dev *pdev = box->pci_dev;
1413         struct hw_perf_event *hwc = &event->hw;
1414
1415         pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx], hwc->config);
1416 }
1417
1418 static u64 ivt_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1419 {
1420         struct pci_dev *pdev = box->pci_dev;
1421         struct hw_perf_event *hwc = &event->hw;
1422         u64 count = 0;
1423
1424         pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1425         pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1426
1427         return count;
1428 }
1429
1430 static struct intel_uncore_ops ivt_uncore_irp_ops = {
1431         .init_box       = ivt_uncore_pci_init_box,
1432         .disable_box    = snbep_uncore_pci_disable_box,
1433         .enable_box     = snbep_uncore_pci_enable_box,
1434         .disable_event  = ivt_uncore_irp_disable_event,
1435         .enable_event   = ivt_uncore_irp_enable_event,
1436         .read_counter   = ivt_uncore_irp_read_counter,
1437 };
1438
1439 static struct intel_uncore_type ivt_uncore_irp = {
1440         .name                   = "irp",
1441         .num_counters           = 4,
1442         .num_boxes              = 1,
1443         .perf_ctr_bits          = 48,
1444         .event_mask             = IVT_PMON_RAW_EVENT_MASK,
1445         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1446         .ops                    = &ivt_uncore_irp_ops,
1447         .format_group           = &ivt_uncore_format_group,
1448 };
1449
1450 static struct intel_uncore_ops ivt_uncore_qpi_ops = {
1451         .init_box       = ivt_uncore_pci_init_box,
1452         .disable_box    = snbep_uncore_pci_disable_box,
1453         .enable_box     = snbep_uncore_pci_enable_box,
1454         .disable_event  = snbep_uncore_pci_disable_event,
1455         .enable_event   = snbep_qpi_enable_event,
1456         .read_counter   = snbep_uncore_pci_read_counter,
1457         .hw_config      = snbep_qpi_hw_config,
1458         .get_constraint = uncore_get_constraint,
1459         .put_constraint = uncore_put_constraint,
1460 };
1461
1462 static struct intel_uncore_type ivt_uncore_qpi = {
1463         .name                   = "qpi",
1464         .num_counters           = 4,
1465         .num_boxes              = 3,
1466         .perf_ctr_bits          = 48,
1467         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1468         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1469         .event_mask             = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
1470         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1471         .num_shared_regs        = 1,
1472         .ops                    = &ivt_uncore_qpi_ops,
1473         .format_group           = &ivt_uncore_qpi_format_group,
1474 };
1475
1476 static struct intel_uncore_type ivt_uncore_r2pcie = {
1477         .name           = "r2pcie",
1478         .num_counters   = 4,
1479         .num_boxes      = 1,
1480         .perf_ctr_bits  = 44,
1481         .constraints    = snbep_uncore_r2pcie_constraints,
1482         IVT_UNCORE_PCI_COMMON_INIT(),
1483 };
1484
1485 static struct intel_uncore_type ivt_uncore_r3qpi = {
1486         .name           = "r3qpi",
1487         .num_counters   = 3,
1488         .num_boxes      = 2,
1489         .perf_ctr_bits  = 44,
1490         .constraints    = snbep_uncore_r3qpi_constraints,
1491         IVT_UNCORE_PCI_COMMON_INIT(),
1492 };
1493
1494 enum {
1495         IVT_PCI_UNCORE_HA,
1496         IVT_PCI_UNCORE_IMC,
1497         IVT_PCI_UNCORE_IRP,
1498         IVT_PCI_UNCORE_QPI,
1499         IVT_PCI_UNCORE_R2PCIE,
1500         IVT_PCI_UNCORE_R3QPI,
1501 };
1502
1503 static struct intel_uncore_type *ivt_pci_uncores[] = {
1504         [IVT_PCI_UNCORE_HA]     = &ivt_uncore_ha,
1505         [IVT_PCI_UNCORE_IMC]    = &ivt_uncore_imc,
1506         [IVT_PCI_UNCORE_IRP]    = &ivt_uncore_irp,
1507         [IVT_PCI_UNCORE_QPI]    = &ivt_uncore_qpi,
1508         [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
1509         [IVT_PCI_UNCORE_R3QPI]  = &ivt_uncore_r3qpi,
1510         NULL,
1511 };
1512
1513 static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
1514         { /* Home Agent 0 */
1515                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1516                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0),
1517         },
1518         { /* Home Agent 1 */
1519                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1520                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1),
1521         },
1522         { /* MC0 Channel 0 */
1523                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1524                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0),
1525         },
1526         { /* MC0 Channel 1 */
1527                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1528                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1),
1529         },
1530         { /* MC0 Channel 3 */
1531                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1532                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2),
1533         },
1534         { /* MC0 Channel 4 */
1535                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1536                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3),
1537         },
1538         { /* MC1 Channel 0 */
1539                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1540                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4),
1541         },
1542         { /* MC1 Channel 1 */
1543                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1544                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5),
1545         },
1546         { /* MC1 Channel 3 */
1547                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1548                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6),
1549         },
1550         { /* MC1 Channel 4 */
1551                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1552                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7),
1553         },
1554         { /* IRP */
1555                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1556                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IRP, 0),
1557         },
1558         { /* QPI0 Port 0 */
1559                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1560                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0),
1561         },
1562         { /* QPI0 Port 1 */
1563                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1564                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1),
1565         },
1566         { /* QPI1 Port 2 */
1567                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1568                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2),
1569         },
1570         { /* R2PCIe */
1571                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1572                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0),
1573         },
1574         { /* R3QPI0 Link 0 */
1575                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1576                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0),
1577         },
1578         { /* R3QPI0 Link 1 */
1579                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1580                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1),
1581         },
1582         { /* R3QPI1 Link 2 */
1583                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1584                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2),
1585         },
1586         { /* QPI Port 0 filter  */
1587                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1588                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1589                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1590         },
1591         { /* QPI Port 0 filter  */
1592                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1593                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1594                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1595         },
1596         { /* end: all zeroes */ }
1597 };
1598
1599 static struct pci_driver ivt_uncore_pci_driver = {
1600         .name           = "ivt_uncore",
1601         .id_table       = ivt_uncore_pci_ids,
1602 };
1603 /* end of IvyTown uncore support */
1604
1605 /* Sandy Bridge uncore support */
1606 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1607 {
1608         struct hw_perf_event *hwc = &event->hw;
1609
1610         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1611                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1612         else
1613                 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
1614 }
1615
1616 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1617 {
1618         wrmsrl(event->hw.config_base, 0);
1619 }
1620
1621 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
1622 {
1623         if (box->pmu->pmu_idx == 0) {
1624                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
1625                         SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
1626         }
1627 }
1628
1629 static struct uncore_event_desc snb_uncore_events[] = {
1630         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
1631         { /* end: all zeroes */ },
1632 };
1633
1634 static struct attribute *snb_uncore_formats_attr[] = {
1635         &format_attr_event.attr,
1636         &format_attr_umask.attr,
1637         &format_attr_edge.attr,
1638         &format_attr_inv.attr,
1639         &format_attr_cmask5.attr,
1640         NULL,
1641 };
1642
1643 static struct attribute_group snb_uncore_format_group = {
1644         .name           = "format",
1645         .attrs          = snb_uncore_formats_attr,
1646 };
1647
1648 static struct intel_uncore_ops snb_uncore_msr_ops = {
1649         .init_box       = snb_uncore_msr_init_box,
1650         .disable_event  = snb_uncore_msr_disable_event,
1651         .enable_event   = snb_uncore_msr_enable_event,
1652         .read_counter   = uncore_msr_read_counter,
1653 };
1654
1655 static struct event_constraint snb_uncore_cbox_constraints[] = {
1656         UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
1657         UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
1658         EVENT_CONSTRAINT_END
1659 };
1660
1661 static struct intel_uncore_type snb_uncore_cbox = {
1662         .name           = "cbox",
1663         .num_counters   = 2,
1664         .num_boxes      = 4,
1665         .perf_ctr_bits  = 44,
1666         .fixed_ctr_bits = 48,
1667         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
1668         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
1669         .fixed_ctr      = SNB_UNC_FIXED_CTR,
1670         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
1671         .single_fixed   = 1,
1672         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
1673         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
1674         .constraints    = snb_uncore_cbox_constraints,
1675         .ops            = &snb_uncore_msr_ops,
1676         .format_group   = &snb_uncore_format_group,
1677         .event_descs    = snb_uncore_events,
1678 };
1679
1680 static struct intel_uncore_type *snb_msr_uncores[] = {
1681         &snb_uncore_cbox,
1682         NULL,
1683 };
1684
1685 enum {
1686         SNB_PCI_UNCORE_IMC,
1687 };
1688
1689 static struct uncore_event_desc snb_uncore_imc_events[] = {
1690         INTEL_UNCORE_EVENT_DESC(data_reads,  "event=0x01"),
1691         INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
1692         INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
1693
1694         INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
1695         INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
1696         INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
1697
1698         { /* end: all zeroes */ },
1699 };
1700
1701 #define SNB_UNCORE_PCI_IMC_EVENT_MASK           0xff
1702 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET           0x48
1703
1704 /* page size multiple covering all config regs */
1705 #define SNB_UNCORE_PCI_IMC_MAP_SIZE             0x6000
1706
1707 #define SNB_UNCORE_PCI_IMC_DATA_READS           0x1
1708 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE      0x5050
1709 #define SNB_UNCORE_PCI_IMC_DATA_WRITES          0x2
1710 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE     0x5054
1711 #define SNB_UNCORE_PCI_IMC_CTR_BASE             SNB_UNCORE_PCI_IMC_DATA_READS_BASE
1712
1713 static struct attribute *snb_uncore_imc_formats_attr[] = {
1714         &format_attr_event.attr,
1715         NULL,
1716 };
1717
1718 static struct attribute_group snb_uncore_imc_format_group = {
1719         .name = "format",
1720         .attrs = snb_uncore_imc_formats_attr,
1721 };
1722
1723 static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
1724 {
1725         struct pci_dev *pdev = box->pci_dev;
1726         int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
1727         resource_size_t addr;
1728         u32 pci_dword;
1729
1730         pci_read_config_dword(pdev, where, &pci_dword);
1731         addr = pci_dword;
1732
1733 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1734         pci_read_config_dword(pdev, where + 4, &pci_dword);
1735         addr |= ((resource_size_t)pci_dword << 32);
1736 #endif
1737
1738         addr &= ~(PAGE_SIZE - 1);
1739
1740         box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
1741         box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
1742 }
1743
1744 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
1745 {}
1746
1747 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
1748 {}
1749
1750 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1751 {}
1752
1753 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1754 {}
1755
1756 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1757 {
1758         struct hw_perf_event *hwc = &event->hw;
1759
1760         return (u64)*(unsigned int *)(box->io_addr + hwc->event_base);
1761 }
1762
1763 /*
1764  * custom event_init() function because we define our own fixed, free
1765  * running counters, so we do not want to conflict with generic uncore
1766  * logic. Also simplifies processing
1767  */
1768 static int snb_uncore_imc_event_init(struct perf_event *event)
1769 {
1770         struct intel_uncore_pmu *pmu;
1771         struct intel_uncore_box *box;
1772         struct hw_perf_event *hwc = &event->hw;
1773         u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
1774         int idx, base;
1775
1776         if (event->attr.type != event->pmu->type)
1777                 return -ENOENT;
1778
1779         pmu = uncore_event_to_pmu(event);
1780         /* no device found for this pmu */
1781         if (pmu->func_id < 0)
1782                 return -ENOENT;
1783
1784         /* Sampling not supported yet */
1785         if (hwc->sample_period)
1786                 return -EINVAL;
1787
1788         /* unsupported modes and filters */
1789         if (event->attr.exclude_user   ||
1790             event->attr.exclude_kernel ||
1791             event->attr.exclude_hv     ||
1792             event->attr.exclude_idle   ||
1793             event->attr.exclude_host   ||
1794             event->attr.exclude_guest  ||
1795             event->attr.sample_period) /* no sampling */
1796                 return -EINVAL;
1797
1798         /*
1799          * Place all uncore events for a particular physical package
1800          * onto a single cpu
1801          */
1802         if (event->cpu < 0)
1803                 return -EINVAL;
1804
1805         /* check only supported bits are set */
1806         if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
1807                 return -EINVAL;
1808
1809         box = uncore_pmu_to_box(pmu, event->cpu);
1810         if (!box || box->cpu < 0)
1811                 return -EINVAL;
1812
1813         event->cpu = box->cpu;
1814
1815         event->hw.idx = -1;
1816         event->hw.last_tag = ~0ULL;
1817         event->hw.extra_reg.idx = EXTRA_REG_NONE;
1818         event->hw.branch_reg.idx = EXTRA_REG_NONE;
1819         /*
1820          * check event is known (whitelist, determines counter)
1821          */
1822         switch (cfg) {
1823         case SNB_UNCORE_PCI_IMC_DATA_READS:
1824                 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
1825                 idx = UNCORE_PMC_IDX_FIXED;
1826                 break;
1827         case SNB_UNCORE_PCI_IMC_DATA_WRITES:
1828                 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
1829                 idx = UNCORE_PMC_IDX_FIXED + 1;
1830                 break;
1831         default:
1832                 return -EINVAL;
1833         }
1834
1835         /* must be done before validate_group */
1836         event->hw.event_base = base;
1837         event->hw.config = cfg;
1838         event->hw.idx = idx;
1839
1840         /* no group validation needed, we have free running counters */
1841
1842         return 0;
1843 }
1844
1845 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1846 {
1847         return 0;
1848 }
1849
1850 static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
1851 {
1852         struct intel_uncore_box *box = uncore_event_to_box(event);
1853         u64 count;
1854
1855         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1856                 return;
1857
1858         event->hw.state = 0;
1859         box->n_active++;
1860
1861         list_add_tail(&event->active_entry, &box->active_list);
1862
1863         count = snb_uncore_imc_read_counter(box, event);
1864         local64_set(&event->hw.prev_count, count);
1865
1866         if (box->n_active == 1)
1867                 uncore_pmu_start_hrtimer(box);
1868 }
1869
1870 static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
1871 {
1872         struct intel_uncore_box *box = uncore_event_to_box(event);
1873         struct hw_perf_event *hwc = &event->hw;
1874
1875         if (!(hwc->state & PERF_HES_STOPPED)) {
1876                 box->n_active--;
1877
1878                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1879                 hwc->state |= PERF_HES_STOPPED;
1880
1881                 list_del(&event->active_entry);
1882
1883                 if (box->n_active == 0)
1884                         uncore_pmu_cancel_hrtimer(box);
1885         }
1886
1887         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1888                 /*
1889                  * Drain the remaining delta count out of a event
1890                  * that we are disabling:
1891                  */
1892                 uncore_perf_event_update(box, event);
1893                 hwc->state |= PERF_HES_UPTODATE;
1894         }
1895 }
1896
1897 static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
1898 {
1899         struct intel_uncore_box *box = uncore_event_to_box(event);
1900         struct hw_perf_event *hwc = &event->hw;
1901
1902         if (!box)
1903                 return -ENODEV;
1904
1905         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1906         if (!(flags & PERF_EF_START))
1907                 hwc->state |= PERF_HES_ARCH;
1908
1909         snb_uncore_imc_event_start(event, 0);
1910
1911         box->n_events++;
1912
1913         return 0;
1914 }
1915
1916 static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
1917 {
1918         struct intel_uncore_box *box = uncore_event_to_box(event);
1919         int i;
1920
1921         snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
1922
1923         for (i = 0; i < box->n_events; i++) {
1924                 if (event == box->event_list[i]) {
1925                         --box->n_events;
1926                         break;
1927                 }
1928         }
1929 }
1930
1931 static int snb_pci2phy_map_init(int devid)
1932 {
1933         struct pci_dev *dev = NULL;
1934         int bus;
1935
1936         dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
1937         if (!dev)
1938                 return -ENOTTY;
1939
1940         bus = dev->bus->number;
1941
1942         pcibus_to_physid[bus] = 0;
1943
1944         pci_dev_put(dev);
1945
1946         return 0;
1947 }
1948
1949 static struct pmu snb_uncore_imc_pmu = {
1950         .task_ctx_nr    = perf_invalid_context,
1951         .event_init     = snb_uncore_imc_event_init,
1952         .add            = snb_uncore_imc_event_add,
1953         .del            = snb_uncore_imc_event_del,
1954         .start          = snb_uncore_imc_event_start,
1955         .stop           = snb_uncore_imc_event_stop,
1956         .read           = uncore_pmu_event_read,
1957 };
1958
1959 static struct intel_uncore_ops snb_uncore_imc_ops = {
1960         .init_box       = snb_uncore_imc_init_box,
1961         .enable_box     = snb_uncore_imc_enable_box,
1962         .disable_box    = snb_uncore_imc_disable_box,
1963         .disable_event  = snb_uncore_imc_disable_event,
1964         .enable_event   = snb_uncore_imc_enable_event,
1965         .hw_config      = snb_uncore_imc_hw_config,
1966         .read_counter   = snb_uncore_imc_read_counter,
1967 };
1968
1969 static struct intel_uncore_type snb_uncore_imc = {
1970         .name           = "imc",
1971         .num_counters   = 2,
1972         .num_boxes      = 1,
1973         .fixed_ctr_bits = 32,
1974         .fixed_ctr      = SNB_UNCORE_PCI_IMC_CTR_BASE,
1975         .event_descs    = snb_uncore_imc_events,
1976         .format_group   = &snb_uncore_imc_format_group,
1977         .perf_ctr       = SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
1978         .event_mask     = SNB_UNCORE_PCI_IMC_EVENT_MASK,
1979         .ops            = &snb_uncore_imc_ops,
1980         .pmu            = &snb_uncore_imc_pmu,
1981 };
1982
1983 static struct intel_uncore_type *snb_pci_uncores[] = {
1984         [SNB_PCI_UNCORE_IMC]    = &snb_uncore_imc,
1985         NULL,
1986 };
1987
1988 static DEFINE_PCI_DEVICE_TABLE(snb_uncore_pci_ids) = {
1989         { /* IMC */
1990                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
1991                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1992         },
1993         { /* end: all zeroes */ },
1994 };
1995
1996 static DEFINE_PCI_DEVICE_TABLE(ivb_uncore_pci_ids) = {
1997         { /* IMC */
1998                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
1999                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
2000         },
2001         { /* end: all zeroes */ },
2002 };
2003
2004 static DEFINE_PCI_DEVICE_TABLE(hsw_uncore_pci_ids) = {
2005         { /* IMC */
2006                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
2007                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
2008         },
2009         { /* end: all zeroes */ },
2010 };
2011
2012 static struct pci_driver snb_uncore_pci_driver = {
2013         .name           = "snb_uncore",
2014         .id_table       = snb_uncore_pci_ids,
2015 };
2016
2017 static struct pci_driver ivb_uncore_pci_driver = {
2018         .name           = "ivb_uncore",
2019         .id_table       = ivb_uncore_pci_ids,
2020 };
2021
2022 static struct pci_driver hsw_uncore_pci_driver = {
2023         .name           = "hsw_uncore",
2024         .id_table       = hsw_uncore_pci_ids,
2025 };
2026
2027 /* end of Sandy Bridge uncore support */
2028
2029 /* Nehalem uncore support */
2030 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
2031 {
2032         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
2033 }
2034
2035 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
2036 {
2037         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
2038 }
2039
2040 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2041 {
2042         struct hw_perf_event *hwc = &event->hw;
2043
2044         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
2045                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
2046         else
2047                 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
2048 }
2049
2050 static struct attribute *nhm_uncore_formats_attr[] = {
2051         &format_attr_event.attr,
2052         &format_attr_umask.attr,
2053         &format_attr_edge.attr,
2054         &format_attr_inv.attr,
2055         &format_attr_cmask8.attr,
2056         NULL,
2057 };
2058
2059 static struct attribute_group nhm_uncore_format_group = {
2060         .name = "format",
2061         .attrs = nhm_uncore_formats_attr,
2062 };
2063
2064 static struct uncore_event_desc nhm_uncore_events[] = {
2065         INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
2066         INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
2067         INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
2068         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
2069         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
2070         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
2071         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
2072         INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
2073         INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
2074         { /* end: all zeroes */ },
2075 };
2076
2077 static struct intel_uncore_ops nhm_uncore_msr_ops = {
2078         .disable_box    = nhm_uncore_msr_disable_box,
2079         .enable_box     = nhm_uncore_msr_enable_box,
2080         .disable_event  = snb_uncore_msr_disable_event,
2081         .enable_event   = nhm_uncore_msr_enable_event,
2082         .read_counter   = uncore_msr_read_counter,
2083 };
2084
2085 static struct intel_uncore_type nhm_uncore = {
2086         .name           = "",
2087         .num_counters   = 8,
2088         .num_boxes      = 1,
2089         .perf_ctr_bits  = 48,
2090         .fixed_ctr_bits = 48,
2091         .event_ctl      = NHM_UNC_PERFEVTSEL0,
2092         .perf_ctr       = NHM_UNC_UNCORE_PMC0,
2093         .fixed_ctr      = NHM_UNC_FIXED_CTR,
2094         .fixed_ctl      = NHM_UNC_FIXED_CTR_CTRL,
2095         .event_mask     = NHM_UNC_RAW_EVENT_MASK,
2096         .event_descs    = nhm_uncore_events,
2097         .ops            = &nhm_uncore_msr_ops,
2098         .format_group   = &nhm_uncore_format_group,
2099 };
2100
2101 static struct intel_uncore_type *nhm_msr_uncores[] = {
2102         &nhm_uncore,
2103         NULL,
2104 };
2105 /* end of Nehalem uncore support */
2106
2107 /* Nehalem-EX uncore support */
2108 DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
2109 DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
2110 DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
2111 DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
2112
2113 static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
2114 {
2115         wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
2116 }
2117
2118 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
2119 {
2120         unsigned msr = uncore_msr_box_ctl(box);
2121         u64 config;
2122
2123         if (msr) {
2124                 rdmsrl(msr, config);
2125                 config &= ~((1ULL << uncore_num_counters(box)) - 1);
2126                 /* WBox has a fixed counter */
2127                 if (uncore_msr_fixed_ctl(box))
2128                         config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
2129                 wrmsrl(msr, config);
2130         }
2131 }
2132
2133 static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
2134 {
2135         unsigned msr = uncore_msr_box_ctl(box);
2136         u64 config;
2137
2138         if (msr) {
2139                 rdmsrl(msr, config);
2140                 config |= (1ULL << uncore_num_counters(box)) - 1;
2141                 /* WBox has a fixed counter */
2142                 if (uncore_msr_fixed_ctl(box))
2143                         config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
2144                 wrmsrl(msr, config);
2145         }
2146 }
2147
2148 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
2149 {
2150         wrmsrl(event->hw.config_base, 0);
2151 }
2152
2153 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2154 {
2155         struct hw_perf_event *hwc = &event->hw;
2156
2157         if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
2158                 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
2159         else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
2160                 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
2161         else
2162                 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
2163 }
2164
2165 #define NHMEX_UNCORE_OPS_COMMON_INIT()                          \
2166         .init_box       = nhmex_uncore_msr_init_box,            \
2167         .disable_box    = nhmex_uncore_msr_disable_box,         \
2168         .enable_box     = nhmex_uncore_msr_enable_box,          \
2169         .disable_event  = nhmex_uncore_msr_disable_event,       \
2170         .read_counter   = uncore_msr_read_counter
2171
2172 static struct intel_uncore_ops nhmex_uncore_ops = {
2173         NHMEX_UNCORE_OPS_COMMON_INIT(),
2174         .enable_event   = nhmex_uncore_msr_enable_event,
2175 };
2176
2177 static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
2178         &format_attr_event.attr,
2179         &format_attr_edge.attr,
2180         NULL,
2181 };
2182
2183 static struct attribute_group nhmex_uncore_ubox_format_group = {
2184         .name           = "format",
2185         .attrs          = nhmex_uncore_ubox_formats_attr,
2186 };
2187
2188 static struct intel_uncore_type nhmex_uncore_ubox = {
2189         .name           = "ubox",
2190         .num_counters   = 1,
2191         .num_boxes      = 1,
2192         .perf_ctr_bits  = 48,
2193         .event_ctl      = NHMEX_U_MSR_PMON_EV_SEL,
2194         .perf_ctr       = NHMEX_U_MSR_PMON_CTR,
2195         .event_mask     = NHMEX_U_PMON_RAW_EVENT_MASK,
2196         .box_ctl        = NHMEX_U_MSR_PMON_GLOBAL_CTL,
2197         .ops            = &nhmex_uncore_ops,
2198         .format_group   = &nhmex_uncore_ubox_format_group
2199 };
2200
2201 static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
2202         &format_attr_event.attr,
2203         &format_attr_umask.attr,
2204         &format_attr_edge.attr,
2205         &format_attr_inv.attr,
2206         &format_attr_thresh8.attr,
2207         NULL,
2208 };
2209
2210 static struct attribute_group nhmex_uncore_cbox_format_group = {
2211         .name = "format",
2212         .attrs = nhmex_uncore_cbox_formats_attr,
2213 };
2214
2215 /* msr offset for each instance of cbox */
2216 static unsigned nhmex_cbox_msr_offsets[] = {
2217         0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
2218 };
2219
2220 static struct intel_uncore_type nhmex_uncore_cbox = {
2221         .name                   = "cbox",
2222         .num_counters           = 6,
2223         .num_boxes              = 10,
2224         .perf_ctr_bits          = 48,
2225         .event_ctl              = NHMEX_C0_MSR_PMON_EV_SEL0,
2226         .perf_ctr               = NHMEX_C0_MSR_PMON_CTR0,
2227         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
2228         .box_ctl                = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
2229         .msr_offsets            = nhmex_cbox_msr_offsets,
2230         .pair_ctr_ctl           = 1,
2231         .ops                    = &nhmex_uncore_ops,
2232         .format_group           = &nhmex_uncore_cbox_format_group
2233 };
2234
2235 static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
2236         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
2237         { /* end: all zeroes */ },
2238 };
2239
2240 static struct intel_uncore_type nhmex_uncore_wbox = {
2241         .name                   = "wbox",
2242         .num_counters           = 4,
2243         .num_boxes              = 1,
2244         .perf_ctr_bits          = 48,
2245         .event_ctl              = NHMEX_W_MSR_PMON_CNT0,
2246         .perf_ctr               = NHMEX_W_MSR_PMON_EVT_SEL0,
2247         .fixed_ctr              = NHMEX_W_MSR_PMON_FIXED_CTR,
2248         .fixed_ctl              = NHMEX_W_MSR_PMON_FIXED_CTL,
2249         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
2250         .box_ctl                = NHMEX_W_MSR_GLOBAL_CTL,
2251         .pair_ctr_ctl           = 1,
2252         .event_descs            = nhmex_uncore_wbox_events,
2253         .ops                    = &nhmex_uncore_ops,
2254         .format_group           = &nhmex_uncore_cbox_format_group
2255 };
2256
2257 static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2258 {
2259         struct hw_perf_event *hwc = &event->hw;
2260         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2261         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2262         int ctr, ev_sel;
2263
2264         ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
2265                 NHMEX_B_PMON_CTR_SHIFT;
2266         ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
2267                   NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
2268
2269         /* events that do not use the match/mask registers */
2270         if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
2271             (ctr == 2 && ev_sel != 0x4) || ctr == 3)
2272                 return 0;
2273
2274         if (box->pmu->pmu_idx == 0)
2275                 reg1->reg = NHMEX_B0_MSR_MATCH;
2276         else
2277                 reg1->reg = NHMEX_B1_MSR_MATCH;
2278         reg1->idx = 0;
2279         reg1->config = event->attr.config1;
2280         reg2->config = event->attr.config2;
2281         return 0;
2282 }
2283
2284 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2285 {
2286         struct hw_perf_event *hwc = &event->hw;
2287         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2288         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2289
2290         if (reg1->idx != EXTRA_REG_NONE) {
2291                 wrmsrl(reg1->reg, reg1->config);
2292                 wrmsrl(reg1->reg + 1, reg2->config);
2293         }
2294         wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
2295                 (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
2296 }
2297
2298 /*
2299  * The Bbox has 4 counters, but each counter monitors different events.
2300  * Use bits 6-7 in the event config to select counter.
2301  */
2302 static struct event_constraint nhmex_uncore_bbox_constraints[] = {
2303         EVENT_CONSTRAINT(0 , 1, 0xc0),
2304         EVENT_CONSTRAINT(0x40, 2, 0xc0),
2305         EVENT_CONSTRAINT(0x80, 4, 0xc0),
2306         EVENT_CONSTRAINT(0xc0, 8, 0xc0),
2307         EVENT_CONSTRAINT_END,
2308 };
2309
2310 static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
2311         &format_attr_event5.attr,
2312         &format_attr_counter.attr,
2313         &format_attr_match.attr,
2314         &format_attr_mask.attr,
2315         NULL,
2316 };
2317
2318 static struct attribute_group nhmex_uncore_bbox_format_group = {
2319         .name = "format",
2320         .attrs = nhmex_uncore_bbox_formats_attr,
2321 };
2322
2323 static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
2324         NHMEX_UNCORE_OPS_COMMON_INIT(),
2325         .enable_event           = nhmex_bbox_msr_enable_event,
2326         .hw_config              = nhmex_bbox_hw_config,
2327         .get_constraint         = uncore_get_constraint,
2328         .put_constraint         = uncore_put_constraint,
2329 };
2330
2331 static struct intel_uncore_type nhmex_uncore_bbox = {
2332         .name                   = "bbox",
2333         .num_counters           = 4,
2334         .num_boxes              = 2,
2335         .perf_ctr_bits          = 48,
2336         .event_ctl              = NHMEX_B0_MSR_PMON_CTL0,
2337         .perf_ctr               = NHMEX_B0_MSR_PMON_CTR0,
2338         .event_mask             = NHMEX_B_PMON_RAW_EVENT_MASK,
2339         .box_ctl                = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
2340         .msr_offset             = NHMEX_B_MSR_OFFSET,
2341         .pair_ctr_ctl           = 1,
2342         .num_shared_regs        = 1,
2343         .constraints            = nhmex_uncore_bbox_constraints,
2344         .ops                    = &nhmex_uncore_bbox_ops,
2345         .format_group           = &nhmex_uncore_bbox_format_group
2346 };
2347
2348 static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2349 {
2350         struct hw_perf_event *hwc = &event->hw;
2351         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2352         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2353
2354         /* only TO_R_PROG_EV event uses the match/mask register */
2355         if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
2356             NHMEX_S_EVENT_TO_R_PROG_EV)
2357                 return 0;
2358
2359         if (box->pmu->pmu_idx == 0)
2360                 reg1->reg = NHMEX_S0_MSR_MM_CFG;
2361         else
2362                 reg1->reg = NHMEX_S1_MSR_MM_CFG;
2363         reg1->idx = 0;
2364         reg1->config = event->attr.config1;
2365         reg2->config = event->attr.config2;
2366         return 0;
2367 }
2368
2369 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2370 {
2371         struct hw_perf_event *hwc = &event->hw;
2372         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2373         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2374
2375         if (reg1->idx != EXTRA_REG_NONE) {
2376                 wrmsrl(reg1->reg, 0);
2377                 wrmsrl(reg1->reg + 1, reg1->config);
2378                 wrmsrl(reg1->reg + 2, reg2->config);
2379                 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
2380         }
2381         wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
2382 }
2383
2384 static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
2385         &format_attr_event.attr,
2386         &format_attr_umask.attr,
2387         &format_attr_edge.attr,
2388         &format_attr_inv.attr,
2389         &format_attr_thresh8.attr,
2390         &format_attr_match.attr,
2391         &format_attr_mask.attr,
2392         NULL,
2393 };
2394
2395 static struct attribute_group nhmex_uncore_sbox_format_group = {
2396         .name                   = "format",
2397         .attrs                  = nhmex_uncore_sbox_formats_attr,
2398 };
2399
2400 static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
2401         NHMEX_UNCORE_OPS_COMMON_INIT(),
2402         .enable_event           = nhmex_sbox_msr_enable_event,
2403         .hw_config              = nhmex_sbox_hw_config,
2404         .get_constraint         = uncore_get_constraint,
2405         .put_constraint         = uncore_put_constraint,
2406 };
2407
2408 static struct intel_uncore_type nhmex_uncore_sbox = {
2409         .name                   = "sbox",
2410         .num_counters           = 4,
2411         .num_boxes              = 2,
2412         .perf_ctr_bits          = 48,
2413         .event_ctl              = NHMEX_S0_MSR_PMON_CTL0,
2414         .perf_ctr               = NHMEX_S0_MSR_PMON_CTR0,
2415         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
2416         .box_ctl                = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
2417         .msr_offset             = NHMEX_S_MSR_OFFSET,
2418         .pair_ctr_ctl           = 1,
2419         .num_shared_regs        = 1,
2420         .ops                    = &nhmex_uncore_sbox_ops,
2421         .format_group           = &nhmex_uncore_sbox_format_group
2422 };
2423
2424 enum {
2425         EXTRA_REG_NHMEX_M_FILTER,
2426         EXTRA_REG_NHMEX_M_DSP,
2427         EXTRA_REG_NHMEX_M_ISS,
2428         EXTRA_REG_NHMEX_M_MAP,
2429         EXTRA_REG_NHMEX_M_MSC_THR,
2430         EXTRA_REG_NHMEX_M_PGT,
2431         EXTRA_REG_NHMEX_M_PLD,
2432         EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
2433 };
2434
2435 static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
2436         MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
2437         MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
2438         MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
2439         MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
2440         /* event 0xa uses two extra registers */
2441         MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
2442         MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
2443         MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
2444         /* events 0xd ~ 0x10 use the same extra register */
2445         MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
2446         MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
2447         MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
2448         MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
2449         MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
2450         MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
2451         MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
2452         MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
2453         MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
2454         EVENT_EXTRA_END
2455 };
2456
2457 /* Nehalem-EX or Westmere-EX ? */
2458 static bool uncore_nhmex;
2459
2460 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
2461 {
2462         struct intel_uncore_extra_reg *er;
2463         unsigned long flags;
2464         bool ret = false;
2465         u64 mask;
2466
2467         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2468                 er = &box->shared_regs[idx];
2469                 raw_spin_lock_irqsave(&er->lock, flags);
2470                 if (!atomic_read(&er->ref) || er->config == config) {
2471                         atomic_inc(&er->ref);
2472                         er->config = config;
2473                         ret = true;
2474                 }
2475                 raw_spin_unlock_irqrestore(&er->lock, flags);
2476
2477                 return ret;
2478         }
2479         /*
2480          * The ZDP_CTL_FVC MSR has 4 fields which are used to control
2481          * events 0xd ~ 0x10. Besides these 4 fields, there are additional
2482          * fields which are shared.
2483          */
2484         idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2485         if (WARN_ON_ONCE(idx >= 4))
2486                 return false;
2487
2488         /* mask of the shared fields */
2489         if (uncore_nhmex)
2490                 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
2491         else
2492                 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
2493         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2494
2495         raw_spin_lock_irqsave(&er->lock, flags);
2496         /* add mask of the non-shared field if it's in use */
2497         if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
2498                 if (uncore_nhmex)
2499                         mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2500                 else
2501                         mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2502         }
2503
2504         if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
2505                 atomic_add(1 << (idx * 8), &er->ref);
2506                 if (uncore_nhmex)
2507                         mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
2508                                 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2509                 else
2510                         mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
2511                                 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2512                 er->config &= ~mask;
2513                 er->config |= (config & mask);
2514                 ret = true;
2515         }
2516         raw_spin_unlock_irqrestore(&er->lock, flags);
2517
2518         return ret;
2519 }
2520
2521 static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
2522 {
2523         struct intel_uncore_extra_reg *er;
2524
2525         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2526                 er = &box->shared_regs[idx];
2527                 atomic_dec(&er->ref);
2528                 return;
2529         }
2530
2531         idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2532         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2533         atomic_sub(1 << (idx * 8), &er->ref);
2534 }
2535
2536 static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
2537 {
2538         struct hw_perf_event *hwc = &event->hw;
2539         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2540         u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
2541         u64 config = reg1->config;
2542
2543         /* get the non-shared control bits and shift them */
2544         idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2545         if (uncore_nhmex)
2546                 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2547         else
2548                 config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2549         if (new_idx > orig_idx) {
2550                 idx = new_idx - orig_idx;
2551                 config <<= 3 * idx;
2552         } else {
2553                 idx = orig_idx - new_idx;
2554                 config >>= 3 * idx;
2555         }
2556
2557         /* add the shared control bits back */
2558         if (uncore_nhmex)
2559                 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2560         else
2561                 config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2562         config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2563         if (modify) {
2564                 /* adjust the main event selector */
2565                 if (new_idx > orig_idx)
2566                         hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2567                 else
2568                         hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2569                 reg1->config = config;
2570                 reg1->idx = ~0xff | new_idx;
2571         }
2572         return config;
2573 }
2574
2575 static struct event_constraint *
2576 nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2577 {
2578         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2579         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2580         int i, idx[2], alloc = 0;
2581         u64 config1 = reg1->config;
2582
2583         idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
2584         idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
2585 again:
2586         for (i = 0; i < 2; i++) {
2587                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
2588                         idx[i] = 0xff;
2589
2590                 if (idx[i] == 0xff)
2591                         continue;
2592
2593                 if (!nhmex_mbox_get_shared_reg(box, idx[i],
2594                                 __BITS_VALUE(config1, i, 32)))
2595                         goto fail;
2596                 alloc |= (0x1 << i);
2597         }
2598
2599         /* for the match/mask registers */
2600         if (reg2->idx != EXTRA_REG_NONE &&
2601             (uncore_box_is_fake(box) || !reg2->alloc) &&
2602             !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
2603                 goto fail;
2604
2605         /*
2606          * If it's a fake box -- as per validate_{group,event}() we
2607          * shouldn't touch event state and we can avoid doing so
2608          * since both will only call get_event_constraints() once
2609          * on each event, this avoids the need for reg->alloc.
2610          */
2611         if (!uncore_box_is_fake(box)) {
2612                 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
2613                         nhmex_mbox_alter_er(event, idx[0], true);
2614                 reg1->alloc |= alloc;
2615                 if (reg2->idx != EXTRA_REG_NONE)
2616                         reg2->alloc = 1;
2617         }
2618         return NULL;
2619 fail:
2620         if (idx[0] != 0xff && !(alloc & 0x1) &&
2621             idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2622                 /*
2623                  * events 0xd ~ 0x10 are functional identical, but are
2624                  * controlled by different fields in the ZDP_CTL_FVC
2625                  * register. If we failed to take one field, try the
2626                  * rest 3 choices.
2627                  */
2628                 BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
2629                 idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2630                 idx[0] = (idx[0] + 1) % 4;
2631                 idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2632                 if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
2633                         config1 = nhmex_mbox_alter_er(event, idx[0], false);
2634                         goto again;
2635                 }
2636         }
2637
2638         if (alloc & 0x1)
2639                 nhmex_mbox_put_shared_reg(box, idx[0]);
2640         if (alloc & 0x2)
2641                 nhmex_mbox_put_shared_reg(box, idx[1]);
2642         return &constraint_empty;
2643 }
2644
2645 static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
2646 {
2647         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2648         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2649
2650         if (uncore_box_is_fake(box))
2651                 return;
2652
2653         if (reg1->alloc & 0x1)
2654                 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
2655         if (reg1->alloc & 0x2)
2656                 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
2657         reg1->alloc = 0;
2658
2659         if (reg2->alloc) {
2660                 nhmex_mbox_put_shared_reg(box, reg2->idx);
2661                 reg2->alloc = 0;
2662         }
2663 }
2664
2665 static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
2666 {
2667         if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2668                 return er->idx;
2669         return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
2670 }
2671
2672 static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2673 {
2674         struct intel_uncore_type *type = box->pmu->type;
2675         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2676         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2677         struct extra_reg *er;
2678         unsigned msr;
2679         int reg_idx = 0;
2680         /*
2681          * The mbox events may require 2 extra MSRs at the most. But only
2682          * the lower 32 bits in these MSRs are significant, so we can use
2683          * config1 to pass two MSRs' config.
2684          */
2685         for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
2686                 if (er->event != (event->hw.config & er->config_mask))
2687                         continue;
2688                 if (event->attr.config1 & ~er->valid_mask)
2689                         return -EINVAL;
2690
2691                 msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
2692                 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
2693                         return -EINVAL;
2694
2695                 /* always use the 32~63 bits to pass the PLD config */
2696                 if (er->idx == EXTRA_REG_NHMEX_M_PLD)
2697                         reg_idx = 1;
2698                 else if (WARN_ON_ONCE(reg_idx > 0))
2699                         return -EINVAL;
2700
2701                 reg1->idx &= ~(0xff << (reg_idx * 8));
2702                 reg1->reg &= ~(0xffff << (reg_idx * 16));
2703                 reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
2704                 reg1->reg |= msr << (reg_idx * 16);
2705                 reg1->config = event->attr.config1;
2706                 reg_idx++;
2707         }
2708         /*
2709          * The mbox only provides ability to perform address matching
2710          * for the PLD events.
2711          */
2712         if (reg_idx == 2) {
2713                 reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
2714                 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
2715                         reg2->config = event->attr.config2;
2716                 else
2717                         reg2->config = ~0ULL;
2718                 if (box->pmu->pmu_idx == 0)
2719                         reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
2720                 else
2721                         reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
2722         }
2723         return 0;
2724 }
2725
2726 static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
2727 {
2728         struct intel_uncore_extra_reg *er;
2729         unsigned long flags;
2730         u64 config;
2731
2732         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2733                 return box->shared_regs[idx].config;
2734
2735         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2736         raw_spin_lock_irqsave(&er->lock, flags);
2737         config = er->config;
2738         raw_spin_unlock_irqrestore(&er->lock, flags);
2739         return config;
2740 }
2741
2742 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2743 {
2744         struct hw_perf_event *hwc = &event->hw;
2745         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2746         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2747         int idx;
2748
2749         idx = __BITS_VALUE(reg1->idx, 0, 8);
2750         if (idx != 0xff)
2751                 wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
2752                         nhmex_mbox_shared_reg_config(box, idx));
2753         idx = __BITS_VALUE(reg1->idx, 1, 8);
2754         if (idx != 0xff)
2755                 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
2756                         nhmex_mbox_shared_reg_config(box, idx));
2757
2758         if (reg2->idx != EXTRA_REG_NONE) {
2759                 wrmsrl(reg2->reg, 0);
2760                 if (reg2->config != ~0ULL) {
2761                         wrmsrl(reg2->reg + 1,
2762                                 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
2763                         wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
2764                                 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
2765                         wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
2766                 }
2767         }
2768
2769         wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
2770 }
2771
2772 DEFINE_UNCORE_FORMAT_ATTR(count_mode,           count_mode,     "config:2-3");
2773 DEFINE_UNCORE_FORMAT_ATTR(storage_mode,         storage_mode,   "config:4-5");
2774 DEFINE_UNCORE_FORMAT_ATTR(wrap_mode,            wrap_mode,      "config:6");
2775 DEFINE_UNCORE_FORMAT_ATTR(flag_mode,            flag_mode,      "config:7");
2776 DEFINE_UNCORE_FORMAT_ATTR(inc_sel,              inc_sel,        "config:9-13");
2777 DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel,         set_flag_sel,   "config:19-21");
2778 DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en,        filter_cfg_en,  "config2:63");
2779 DEFINE_UNCORE_FORMAT_ATTR(filter_match,         filter_match,   "config2:0-33");
2780 DEFINE_UNCORE_FORMAT_ATTR(filter_mask,          filter_mask,    "config2:34-61");
2781 DEFINE_UNCORE_FORMAT_ATTR(dsp,                  dsp,            "config1:0-31");
2782 DEFINE_UNCORE_FORMAT_ATTR(thr,                  thr,            "config1:0-31");
2783 DEFINE_UNCORE_FORMAT_ATTR(fvc,                  fvc,            "config1:0-31");
2784 DEFINE_UNCORE_FORMAT_ATTR(pgt,                  pgt,            "config1:0-31");
2785 DEFINE_UNCORE_FORMAT_ATTR(map,                  map,            "config1:0-31");
2786 DEFINE_UNCORE_FORMAT_ATTR(iss,                  iss,            "config1:0-31");
2787 DEFINE_UNCORE_FORMAT_ATTR(pld,                  pld,            "config1:32-63");
2788
2789 static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
2790         &format_attr_count_mode.attr,
2791         &format_attr_storage_mode.attr,
2792         &format_attr_wrap_mode.attr,
2793         &format_attr_flag_mode.attr,
2794         &format_attr_inc_sel.attr,
2795         &format_attr_set_flag_sel.attr,
2796         &format_attr_filter_cfg_en.attr,
2797         &format_attr_filter_match.attr,
2798         &format_attr_filter_mask.attr,
2799         &format_attr_dsp.attr,
2800         &format_attr_thr.attr,
2801         &format_attr_fvc.attr,
2802         &format_attr_pgt.attr,
2803         &format_attr_map.attr,
2804         &format_attr_iss.attr,
2805         &format_attr_pld.attr,
2806         NULL,
2807 };
2808
2809 static struct attribute_group nhmex_uncore_mbox_format_group = {
2810         .name           = "format",
2811         .attrs          = nhmex_uncore_mbox_formats_attr,
2812 };
2813
2814 static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
2815         INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
2816         INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
2817         { /* end: all zeroes */ },
2818 };
2819
2820 static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
2821         INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
2822         INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
2823         { /* end: all zeroes */ },
2824 };
2825
2826 static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
2827         NHMEX_UNCORE_OPS_COMMON_INIT(),
2828         .enable_event   = nhmex_mbox_msr_enable_event,
2829         .hw_config      = nhmex_mbox_hw_config,
2830         .get_constraint = nhmex_mbox_get_constraint,
2831         .put_constraint = nhmex_mbox_put_constraint,
2832 };
2833
2834 static struct intel_uncore_type nhmex_uncore_mbox = {
2835         .name                   = "mbox",
2836         .num_counters           = 6,
2837         .num_boxes              = 2,
2838         .perf_ctr_bits          = 48,
2839         .event_ctl              = NHMEX_M0_MSR_PMU_CTL0,
2840         .perf_ctr               = NHMEX_M0_MSR_PMU_CNT0,
2841         .event_mask             = NHMEX_M_PMON_RAW_EVENT_MASK,
2842         .box_ctl                = NHMEX_M0_MSR_GLOBAL_CTL,
2843         .msr_offset             = NHMEX_M_MSR_OFFSET,
2844         .pair_ctr_ctl           = 1,
2845         .num_shared_regs        = 8,
2846         .event_descs            = nhmex_uncore_mbox_events,
2847         .ops                    = &nhmex_uncore_mbox_ops,
2848         .format_group           = &nhmex_uncore_mbox_format_group,
2849 };
2850
2851 static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
2852 {
2853         struct hw_perf_event *hwc = &event->hw;
2854         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2855
2856         /* adjust the main event selector and extra register index */
2857         if (reg1->idx % 2) {
2858                 reg1->idx--;
2859                 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2860         } else {
2861                 reg1->idx++;
2862                 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2863         }
2864
2865         /* adjust extra register config */
2866         switch (reg1->idx % 6) {
2867         case 2:
2868                 /* shift the 8~15 bits to the 0~7 bits */
2869                 reg1->config >>= 8;
2870                 break;
2871         case 3:
2872                 /* shift the 0~7 bits to the 8~15 bits */
2873                 reg1->config <<= 8;
2874                 break;
2875         };
2876 }
2877
2878 /*
2879  * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
2880  * An event set consists of 6 events, the 3rd and 4th events in
2881  * an event set use the same extra register. So an event set uses
2882  * 5 extra registers.
2883  */
2884 static struct event_constraint *
2885 nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2886 {
2887         struct hw_perf_event *hwc = &event->hw;
2888         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2889         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2890         struct intel_uncore_extra_reg *er;
2891         unsigned long flags;
2892         int idx, er_idx;
2893         u64 config1;
2894         bool ok = false;
2895
2896         if (!uncore_box_is_fake(box) && reg1->alloc)
2897                 return NULL;
2898
2899         idx = reg1->idx % 6;
2900         config1 = reg1->config;
2901 again:
2902         er_idx = idx;
2903         /* the 3rd and 4th events use the same extra register */
2904         if (er_idx > 2)
2905                 er_idx--;
2906         er_idx += (reg1->idx / 6) * 5;
2907
2908         er = &box->shared_regs[er_idx];
2909         raw_spin_lock_irqsave(&er->lock, flags);
2910         if (idx < 2) {
2911                 if (!atomic_read(&er->ref) || er->config == reg1->config) {
2912                         atomic_inc(&er->ref);
2913                         er->config = reg1->config;
2914                         ok = true;
2915                 }
2916         } else if (idx == 2 || idx == 3) {
2917                 /*
2918                  * these two events use different fields in a extra register,
2919                  * the 0~7 bits and the 8~15 bits respectively.
2920                  */
2921                 u64 mask = 0xff << ((idx - 2) * 8);
2922                 if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
2923                                 !((er->config ^ config1) & mask)) {
2924                         atomic_add(1 << ((idx - 2) * 8), &er->ref);
2925                         er->config &= ~mask;
2926                         er->config |= config1 & mask;
2927                         ok = true;
2928                 }
2929         } else {
2930                 if (!atomic_read(&er->ref) ||
2931                                 (er->config == (hwc->config >> 32) &&
2932                                  er->config1 == reg1->config &&
2933                                  er->config2 == reg2->config)) {
2934                         atomic_inc(&er->ref);
2935                         er->config = (hwc->config >> 32);
2936                         er->config1 = reg1->config;
2937                         er->config2 = reg2->config;
2938                         ok = true;
2939                 }
2940         }
2941         raw_spin_unlock_irqrestore(&er->lock, flags);
2942
2943         if (!ok) {
2944                 /*
2945                  * The Rbox events are always in pairs. The paired
2946                  * events are functional identical, but use different
2947                  * extra registers. If we failed to take an extra
2948                  * register, try the alternative.
2949                  */
2950                 idx ^= 1;
2951                 if (idx != reg1->idx % 6) {
2952                         if (idx == 2)
2953                                 config1 >>= 8;
2954                         else if (idx == 3)
2955                                 config1 <<= 8;
2956                         goto again;
2957                 }
2958         } else {
2959                 if (!uncore_box_is_fake(box)) {
2960                         if (idx != reg1->idx % 6)
2961                                 nhmex_rbox_alter_er(box, event);
2962                         reg1->alloc = 1;
2963                 }
2964                 return NULL;
2965         }
2966         return &constraint_empty;
2967 }
2968
2969 static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
2970 {
2971         struct intel_uncore_extra_reg *er;
2972         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2973         int idx, er_idx;
2974
2975         if (uncore_box_is_fake(box) || !reg1->alloc)
2976                 return;
2977
2978         idx = reg1->idx % 6;
2979         er_idx = idx;
2980         if (er_idx > 2)
2981                 er_idx--;
2982         er_idx += (reg1->idx / 6) * 5;
2983
2984         er = &box->shared_regs[er_idx];
2985         if (idx == 2 || idx == 3)
2986                 atomic_sub(1 << ((idx - 2) * 8), &er->ref);
2987         else
2988                 atomic_dec(&er->ref);
2989
2990         reg1->alloc = 0;
2991 }
2992
2993 static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2994 {
2995         struct hw_perf_event *hwc = &event->hw;
2996         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2997         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2998         int idx;
2999
3000         idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
3001                 NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
3002         if (idx >= 0x18)
3003                 return -EINVAL;
3004
3005         reg1->idx = idx;
3006         reg1->config = event->attr.config1;
3007
3008         switch (idx % 6) {
3009         case 4:
3010         case 5:
3011                 hwc->config |= event->attr.config & (~0ULL << 32);
3012                 reg2->config = event->attr.config2;
3013                 break;
3014         };
3015         return 0;
3016 }
3017
3018 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
3019 {
3020         struct hw_perf_event *hwc = &event->hw;
3021         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
3022         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
3023         int idx, port;
3024
3025         idx = reg1->idx;
3026         port = idx / 6 + box->pmu->pmu_idx * 4;
3027
3028         switch (idx % 6) {
3029         case 0:
3030                 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
3031                 break;
3032         case 1:
3033                 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
3034                 break;
3035         case 2:
3036         case 3:
3037                 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
3038                         uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
3039                 break;
3040         case 4:
3041                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
3042                         hwc->config >> 32);
3043                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
3044                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
3045                 break;
3046         case 5:
3047                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
3048                         hwc->config >> 32);
3049                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
3050                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
3051                 break;
3052         };
3053
3054         wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
3055                 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
3056 }
3057
3058 DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
3059 DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
3060 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
3061 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
3062 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
3063
3064 static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
3065         &format_attr_event5.attr,
3066         &format_attr_xbr_mm_cfg.attr,
3067         &format_attr_xbr_match.attr,
3068         &format_attr_xbr_mask.attr,
3069         &format_attr_qlx_cfg.attr,
3070         &format_attr_iperf_cfg.attr,
3071         NULL,
3072 };
3073
3074 static struct attribute_group nhmex_uncore_rbox_format_group = {
3075         .name = "format",
3076         .attrs = nhmex_uncore_rbox_formats_attr,
3077 };
3078
3079 static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
3080         INTEL_UNCORE_EVENT_DESC(qpi0_flit_send,         "event=0x0,iperf_cfg=0x80000000"),
3081         INTEL_UNCORE_EVENT_DESC(qpi1_filt_send,         "event=0x6,iperf_cfg=0x80000000"),
3082         INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt,         "event=0x0,iperf_cfg=0x40000000"),
3083         INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt,         "event=0x6,iperf_cfg=0x40000000"),
3084         INTEL_UNCORE_EVENT_DESC(qpi0_date_response,     "event=0x0,iperf_cfg=0xc4"),
3085         INTEL_UNCORE_EVENT_DESC(qpi1_date_response,     "event=0x6,iperf_cfg=0xc4"),
3086         { /* end: all zeroes */ },
3087 };
3088
3089 static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
3090         NHMEX_UNCORE_OPS_COMMON_INIT(),
3091         .enable_event           = nhmex_rbox_msr_enable_event,
3092         .hw_config              = nhmex_rbox_hw_config,
3093         .get_constraint         = nhmex_rbox_get_constraint,
3094         .put_constraint         = nhmex_rbox_put_constraint,
3095 };
3096
3097 static struct intel_uncore_type nhmex_uncore_rbox = {
3098         .name                   = "rbox",
3099         .num_counters           = 8,
3100         .num_boxes              = 2,
3101         .perf_ctr_bits          = 48,
3102         .event_ctl              = NHMEX_R_MSR_PMON_CTL0,
3103         .perf_ctr               = NHMEX_R_MSR_PMON_CNT0,
3104         .event_mask             = NHMEX_R_PMON_RAW_EVENT_MASK,
3105         .box_ctl                = NHMEX_R_MSR_GLOBAL_CTL,
3106         .msr_offset             = NHMEX_R_MSR_OFFSET,
3107         .pair_ctr_ctl           = 1,
3108         .num_shared_regs        = 20,
3109         .event_descs            = nhmex_uncore_rbox_events,
3110         .ops                    = &nhmex_uncore_rbox_ops,
3111         .format_group           = &nhmex_uncore_rbox_format_group
3112 };
3113
3114 static struct intel_uncore_type *nhmex_msr_uncores[] = {
3115         &nhmex_uncore_ubox,
3116         &nhmex_uncore_cbox,
3117         &nhmex_uncore_bbox,
3118         &nhmex_uncore_sbox,
3119         &nhmex_uncore_mbox,
3120         &nhmex_uncore_rbox,
3121         &nhmex_uncore_wbox,
3122         NULL,
3123 };
3124 /* end of Nehalem-EX uncore support */
3125
3126 static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
3127 {
3128         struct hw_perf_event *hwc = &event->hw;
3129
3130         hwc->idx = idx;
3131         hwc->last_tag = ++box->tags[idx];
3132
3133         if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
3134                 hwc->event_base = uncore_fixed_ctr(box);
3135                 hwc->config_base = uncore_fixed_ctl(box);
3136                 return;
3137         }
3138
3139         hwc->config_base = uncore_event_ctl(box, hwc->idx);
3140         hwc->event_base  = uncore_perf_ctr(box, hwc->idx);
3141 }
3142
3143 static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
3144 {
3145         u64 prev_count, new_count, delta;
3146         int shift;
3147
3148         if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
3149                 shift = 64 - uncore_fixed_ctr_bits(box);
3150         else
3151                 shift = 64 - uncore_perf_ctr_bits(box);
3152
3153         /* the hrtimer might modify the previous event value */
3154 again:
3155         prev_count = local64_read(&event->hw.prev_count);
3156         new_count = uncore_read_counter(box, event);
3157         if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
3158                 goto again;
3159
3160         delta = (new_count << shift) - (prev_count << shift);
3161         delta >>= shift;
3162
3163         local64_add(delta, &event->count);
3164 }
3165
3166 /*
3167  * The overflow interrupt is unavailable for SandyBridge-EP, is broken
3168  * for SandyBridge. So we use hrtimer to periodically poll the counter
3169  * to avoid overflow.
3170  */
3171 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
3172 {
3173         struct intel_uncore_box *box;
3174         struct perf_event *event;
3175         unsigned long flags;
3176         int bit;
3177
3178         box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
3179         if (!box->n_active || box->cpu != smp_processor_id())
3180                 return HRTIMER_NORESTART;
3181         /*
3182          * disable local interrupt to prevent uncore_pmu_event_start/stop
3183          * to interrupt the update process
3184          */
3185         local_irq_save(flags);
3186
3187         /*
3188          * handle boxes with an active event list as opposed to active
3189          * counters
3190          */
3191         list_for_each_entry(event, &box->active_list, active_entry) {
3192                 uncore_perf_event_update(box, event);
3193         }
3194
3195         for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
3196                 uncore_perf_event_update(box, box->events[bit]);
3197
3198         local_irq_restore(flags);
3199
3200         hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
3201         return HRTIMER_RESTART;
3202 }
3203
3204 static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
3205 {
3206         __hrtimer_start_range_ns(&box->hrtimer,
3207                         ns_to_ktime(box->hrtimer_duration), 0,
3208                         HRTIMER_MODE_REL_PINNED, 0);
3209 }
3210
3211 static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
3212 {
3213         hrtimer_cancel(&box->hrtimer);
3214 }
3215
3216 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
3217 {
3218         hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3219         box->hrtimer.function = uncore_pmu_hrtimer;
3220 }
3221
3222 static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
3223 {
3224         struct intel_uncore_box *box;
3225         int i, size;
3226
3227         size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
3228
3229         box = kzalloc_node(size, GFP_KERNEL, node);
3230         if (!box)
3231                 return NULL;
3232
3233         for (i = 0; i < type->num_shared_regs; i++)
3234                 raw_spin_lock_init(&box->shared_regs[i].lock);
3235
3236         uncore_pmu_init_hrtimer(box);
3237         atomic_set(&box->refcnt, 1);
3238         box->cpu = -1;
3239         box->phys_id = -1;
3240
3241         /* set default hrtimer timeout */
3242         box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
3243
3244         INIT_LIST_HEAD(&box->active_list);
3245
3246         return box;
3247 }
3248
3249 static int
3250 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
3251 {
3252         struct perf_event *event;
3253         int n, max_count;
3254
3255         max_count = box->pmu->type->num_counters;
3256         if (box->pmu->type->fixed_ctl)
3257                 max_count++;
3258
3259         if (box->n_events >= max_count)
3260                 return -EINVAL;
3261
3262         n = box->n_events;
3263         box->event_list[n] = leader;
3264         n++;
3265         if (!dogrp)
3266                 return n;
3267
3268         list_for_each_entry(event, &leader->sibling_list, group_entry) {
3269                 if (event->state <= PERF_EVENT_STATE_OFF)
3270                         continue;
3271
3272                 if (n >= max_count)
3273                         return -EINVAL;
3274
3275                 box->event_list[n] = event;
3276                 n++;
3277         }
3278         return n;
3279 }
3280
3281 static struct event_constraint *
3282 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
3283 {
3284         struct intel_uncore_type *type = box->pmu->type;
3285         struct event_constraint *c;
3286
3287         if (type->ops->get_constraint) {
3288                 c = type->ops->get_constraint(box, event);
3289                 if (c)
3290                         return c;
3291         }
3292
3293         if (event->attr.config == UNCORE_FIXED_EVENT)
3294                 return &constraint_fixed;
3295
3296         if (type->constraints) {
3297                 for_each_event_constraint(c, type->constraints) {
3298                         if ((event->hw.config & c->cmask) == c->code)
3299                                 return c;
3300                 }
3301         }
3302
3303         return &type->unconstrainted;
3304 }
3305
3306 static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
3307 {
3308         if (box->pmu->type->ops->put_constraint)
3309                 box->pmu->type->ops->put_constraint(box, event);
3310 }
3311
3312 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
3313 {
3314         unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
3315         struct event_constraint *c;
3316         int i, wmin, wmax, ret = 0;
3317         struct hw_perf_event *hwc;
3318
3319         bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
3320
3321         for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
3322                 hwc = &box->event_list[i]->hw;
3323                 c = uncore_get_event_constraint(box, box->event_list[i]);
3324                 hwc->constraint = c;
3325                 wmin = min(wmin, c->weight);
3326                 wmax = max(wmax, c->weight);
3327         }
3328
3329         /* fastpath, try to reuse previous register */
3330         for (i = 0; i < n; i++) {
3331                 hwc = &box->event_list[i]->hw;
3332                 c = hwc->constraint;
3333
3334                 /* never assigned */
3335                 if (hwc->idx == -1)
3336                         break;
3337
3338                 /* constraint still honored */
3339                 if (!test_bit(hwc->idx, c->idxmsk))
3340                         break;
3341
3342                 /* not already used */
3343                 if (test_bit(hwc->idx, used_mask))
3344                         break;
3345
3346                 __set_bit(hwc->idx, used_mask);
3347                 if (assign)
3348                         assign[i] = hwc->idx;
3349         }
3350         /* slow path */
3351         if (i != n)
3352                 ret = perf_assign_events(box->event_list, n,
3353                                          wmin, wmax, assign);
3354
3355         if (!assign || ret) {
3356                 for (i = 0; i < n; i++)
3357                         uncore_put_event_constraint(box, box->event_list[i]);
3358         }
3359         return ret ? -EINVAL : 0;
3360 }
3361
3362 static void uncore_pmu_event_start(struct perf_event *event, int flags)
3363 {
3364         struct intel_uncore_box *box = uncore_event_to_box(event);
3365         int idx = event->hw.idx;
3366
3367         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
3368                 return;
3369
3370         if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
3371                 return;
3372
3373         event->hw.state = 0;
3374         box->events[idx] = event;
3375         box->n_active++;
3376         __set_bit(idx, box->active_mask);
3377
3378         local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
3379         uncore_enable_event(box, event);
3380
3381         if (box->n_active == 1) {
3382                 uncore_enable_box(box);
3383                 uncore_pmu_start_hrtimer(box);
3384         }
3385 }
3386
3387 static void uncore_pmu_event_stop(struct perf_event *event, int flags)
3388 {
3389         struct intel_uncore_box *box = uncore_event_to_box(event);
3390         struct hw_perf_event *hwc = &event->hw;
3391
3392         if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
3393                 uncore_disable_event(box, event);
3394                 box->n_active--;
3395                 box->events[hwc->idx] = NULL;
3396                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
3397                 hwc->state |= PERF_HES_STOPPED;
3398
3399                 if (box->n_active == 0) {
3400                         uncore_disable_box(box);
3401                         uncore_pmu_cancel_hrtimer(box);
3402                 }
3403         }
3404
3405         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
3406                 /*
3407                  * Drain the remaining delta count out of a event
3408                  * that we are disabling:
3409                  */
3410                 uncore_perf_event_update(box, event);
3411                 hwc->state |= PERF_HES_UPTODATE;
3412         }
3413 }
3414
3415 static int uncore_pmu_event_add(struct perf_event *event, int flags)
3416 {
3417         struct intel_uncore_box *box = uncore_event_to_box(event);
3418         struct hw_perf_event *hwc = &event->hw;
3419         int assign[UNCORE_PMC_IDX_MAX];
3420         int i, n, ret;
3421
3422         if (!box)
3423                 return -ENODEV;
3424
3425         ret = n = uncore_collect_events(box, event, false);
3426         if (ret < 0)
3427                 return ret;
3428
3429         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
3430         if (!(flags & PERF_EF_START))
3431                 hwc->state |= PERF_HES_ARCH;
3432
3433         ret = uncore_assign_events(box, assign, n);
3434         if (ret)
3435                 return ret;
3436
3437         /* save events moving to new counters */
3438         for (i = 0; i < box->n_events; i++) {
3439                 event = box->event_list[i];
3440                 hwc = &event->hw;
3441
3442                 if (hwc->idx == assign[i] &&
3443                         hwc->last_tag == box->tags[assign[i]])
3444                         continue;
3445                 /*
3446                  * Ensure we don't accidentally enable a stopped
3447                  * counter simply because we rescheduled.
3448                  */
3449                 if (hwc->state & PERF_HES_STOPPED)
3450                         hwc->state |= PERF_HES_ARCH;
3451
3452                 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3453         }
3454
3455         /* reprogram moved events into new counters */
3456         for (i = 0; i < n; i++) {
3457                 event = box->event_list[i];
3458                 hwc = &event->hw;
3459
3460                 if (hwc->idx != assign[i] ||
3461                         hwc->last_tag != box->tags[assign[i]])
3462                         uncore_assign_hw_event(box, event, assign[i]);
3463                 else if (i < box->n_events)
3464                         continue;
3465
3466                 if (hwc->state & PERF_HES_ARCH)
3467                         continue;
3468
3469                 uncore_pmu_event_start(event, 0);
3470         }
3471         box->n_events = n;
3472
3473         return 0;
3474 }
3475
3476 static void uncore_pmu_event_del(struct perf_event *event, int flags)
3477 {
3478         struct intel_uncore_box *box = uncore_event_to_box(event);
3479         int i;
3480
3481         uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3482
3483         for (i = 0; i < box->n_events; i++) {
3484                 if (event == box->event_list[i]) {
3485                         uncore_put_event_constraint(box, event);
3486
3487                         while (++i < box->n_events)
3488                                 box->event_list[i - 1] = box->event_list[i];
3489
3490                         --box->n_events;
3491                         break;
3492                 }
3493         }
3494
3495         event->hw.idx = -1;
3496         event->hw.last_tag = ~0ULL;
3497 }
3498
3499 static void uncore_pmu_event_read(struct perf_event *event)
3500 {
3501         struct intel_uncore_box *box = uncore_event_to_box(event);
3502         uncore_perf_event_update(box, event);
3503 }
3504
3505 /*
3506  * validation ensures the group can be loaded onto the
3507  * PMU if it was the only group available.
3508  */
3509 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
3510                                 struct perf_event *event)
3511 {
3512         struct perf_event *leader = event->group_leader;
3513         struct intel_uncore_box *fake_box;
3514         int ret = -EINVAL, n;
3515
3516         fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
3517         if (!fake_box)
3518                 return -ENOMEM;
3519
3520         fake_box->pmu = pmu;
3521         /*
3522          * the event is not yet connected with its
3523          * siblings therefore we must first collect
3524          * existing siblings, then add the new event
3525          * before we can simulate the scheduling
3526          */
3527         n = uncore_collect_events(fake_box, leader, true);
3528         if (n < 0)
3529                 goto out;
3530
3531         fake_box->n_events = n;
3532         n = uncore_collect_events(fake_box, event, false);
3533         if (n < 0)
3534                 goto out;
3535
3536         fake_box->n_events = n;
3537
3538         ret = uncore_assign_events(fake_box, NULL, n);
3539 out:
3540         kfree(fake_box);
3541         return ret;
3542 }
3543
3544 static int uncore_pmu_event_init(struct perf_event *event)
3545 {
3546         struct intel_uncore_pmu *pmu;
3547         struct intel_uncore_box *box;
3548         struct hw_perf_event *hwc = &event->hw;
3549         int ret;
3550
3551         if (event->attr.type != event->pmu->type)
3552                 return -ENOENT;
3553
3554         pmu = uncore_event_to_pmu(event);
3555         /* no device found for this pmu */
3556         if (pmu->func_id < 0)
3557                 return -ENOENT;
3558
3559         /*
3560          * Uncore PMU does measure at all privilege level all the time.
3561          * So it doesn't make sense to specify any exclude bits.
3562          */
3563         if (event->attr.exclude_user || event->attr.exclude_kernel ||
3564                         event->attr.exclude_hv || event->attr.exclude_idle)
3565                 return -EINVAL;
3566
3567         /* Sampling not supported yet */
3568         if (hwc->sample_period)
3569                 return -EINVAL;
3570
3571         /*
3572          * Place all uncore events for a particular physical package
3573          * onto a single cpu
3574          */
3575         if (event->cpu < 0)
3576                 return -EINVAL;
3577         box = uncore_pmu_to_box(pmu, event->cpu);
3578         if (!box || box->cpu < 0)
3579                 return -EINVAL;
3580         event->cpu = box->cpu;
3581
3582         event->hw.idx = -1;
3583         event->hw.last_tag = ~0ULL;
3584         event->hw.extra_reg.idx = EXTRA_REG_NONE;
3585         event->hw.branch_reg.idx = EXTRA_REG_NONE;
3586
3587         if (event->attr.config == UNCORE_FIXED_EVENT) {
3588                 /* no fixed counter */
3589                 if (!pmu->type->fixed_ctl)
3590                         return -EINVAL;
3591                 /*
3592                  * if there is only one fixed counter, only the first pmu
3593                  * can access the fixed counter
3594                  */
3595                 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
3596                         return -EINVAL;
3597
3598                 /* fixed counters have event field hardcoded to zero */
3599                 hwc->config = 0ULL;
3600         } else {
3601                 hwc->config = event->attr.config & pmu->type->event_mask;
3602                 if (pmu->type->ops->hw_config) {
3603                         ret = pmu->type->ops->hw_config(box, event);
3604                         if (ret)
3605                                 return ret;
3606                 }
3607         }
3608
3609         if (event->group_leader != event)
3610                 ret = uncore_validate_group(pmu, event);
3611         else
3612                 ret = 0;
3613
3614         return ret;
3615 }
3616
3617 static ssize_t uncore_get_attr_cpumask(struct device *dev,
3618                                 struct device_attribute *attr, char *buf)
3619 {
3620         int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &uncore_cpu_mask);
3621
3622         buf[n++] = '\n';
3623         buf[n] = '\0';
3624         return n;
3625 }
3626
3627 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
3628
3629 static struct attribute *uncore_pmu_attrs[] = {
3630         &dev_attr_cpumask.attr,
3631         NULL,
3632 };
3633
3634 static struct attribute_group uncore_pmu_attr_group = {
3635         .attrs = uncore_pmu_attrs,
3636 };
3637
3638 static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
3639 {
3640         int ret;
3641
3642         if (!pmu->type->pmu) {
3643                 pmu->pmu = (struct pmu) {
3644                         .attr_groups    = pmu->type->attr_groups,
3645                         .task_ctx_nr    = perf_invalid_context,
3646                         .event_init     = uncore_pmu_event_init,
3647                         .add            = uncore_pmu_event_add,
3648                         .del            = uncore_pmu_event_del,
3649                         .start          = uncore_pmu_event_start,
3650                         .stop           = uncore_pmu_event_stop,
3651                         .read           = uncore_pmu_event_read,
3652                 };
3653         } else {
3654                 pmu->pmu = *pmu->type->pmu;
3655                 pmu->pmu.attr_groups = pmu->type->attr_groups;
3656         }
3657
3658         if (pmu->type->num_boxes == 1) {
3659                 if (strlen(pmu->type->name) > 0)
3660                         sprintf(pmu->name, "uncore_%s", pmu->type->name);
3661                 else
3662                         sprintf(pmu->name, "uncore");
3663         } else {
3664                 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
3665                         pmu->pmu_idx);
3666         }
3667
3668         ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
3669         return ret;
3670 }
3671
3672 static void __init uncore_type_exit(struct intel_uncore_type *type)
3673 {
3674         int i;
3675
3676         for (i = 0; i < type->num_boxes; i++)
3677                 free_percpu(type->pmus[i].box);
3678         kfree(type->pmus);
3679         type->pmus = NULL;
3680         kfree(type->events_group);
3681         type->events_group = NULL;
3682 }
3683
3684 static void __init uncore_types_exit(struct intel_uncore_type **types)
3685 {
3686         int i;
3687         for (i = 0; types[i]; i++)
3688                 uncore_type_exit(types[i]);
3689 }
3690
3691 static int __init uncore_type_init(struct intel_uncore_type *type)
3692 {
3693         struct intel_uncore_pmu *pmus;
3694         struct attribute_group *attr_group;
3695         struct attribute **attrs;
3696         int i, j;
3697
3698         pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
3699         if (!pmus)
3700                 return -ENOMEM;
3701
3702         type->pmus = pmus;
3703
3704         type->unconstrainted = (struct event_constraint)
3705                 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
3706                                 0, type->num_counters, 0, 0);
3707
3708         for (i = 0; i < type->num_boxes; i++) {
3709                 pmus[i].func_id = -1;
3710                 pmus[i].pmu_idx = i;
3711                 pmus[i].type = type;
3712                 INIT_LIST_HEAD(&pmus[i].box_list);
3713                 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
3714                 if (!pmus[i].box)
3715                         goto fail;
3716         }
3717
3718         if (type->event_descs) {
3719                 i = 0;
3720                 while (type->event_descs[i].attr.attr.name)
3721                         i++;
3722
3723                 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
3724                                         sizeof(*attr_group), GFP_KERNEL);
3725                 if (!attr_group)
3726                         goto fail;
3727
3728                 attrs = (struct attribute **)(attr_group + 1);
3729                 attr_group->name = "events";
3730                 attr_group->attrs = attrs;
3731
3732                 for (j = 0; j < i; j++)
3733                         attrs[j] = &type->event_descs[j].attr.attr;
3734
3735                 type->events_group = attr_group;
3736         }
3737
3738         type->pmu_group = &uncore_pmu_attr_group;
3739         return 0;
3740 fail:
3741         uncore_type_exit(type);
3742         return -ENOMEM;
3743 }
3744
3745 static int __init uncore_types_init(struct intel_uncore_type **types)
3746 {
3747         int i, ret;
3748
3749         for (i = 0; types[i]; i++) {
3750                 ret = uncore_type_init(types[i]);
3751                 if (ret)
3752                         goto fail;
3753         }
3754         return 0;
3755 fail:
3756         while (--i >= 0)
3757                 uncore_type_exit(types[i]);
3758         return ret;
3759 }
3760
3761 static struct pci_driver *uncore_pci_driver;
3762 static bool pcidrv_registered;
3763
3764 /*
3765  * add a pci uncore device
3766  */
3767 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3768 {
3769         struct intel_uncore_pmu *pmu;
3770         struct intel_uncore_box *box;
3771         struct intel_uncore_type *type;
3772         int phys_id;
3773
3774         phys_id = pcibus_to_physid[pdev->bus->number];
3775         if (phys_id < 0)
3776                 return -ENODEV;
3777
3778         if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
3779                 extra_pci_dev[phys_id][UNCORE_PCI_DEV_IDX(id->driver_data)] = pdev;
3780                 pci_set_drvdata(pdev, NULL);
3781                 return 0;
3782         }
3783
3784         type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
3785         box = uncore_alloc_box(type, NUMA_NO_NODE);
3786         if (!box)
3787                 return -ENOMEM;
3788
3789         /*
3790          * for performance monitoring unit with multiple boxes,
3791          * each box has a different function id.
3792          */
3793         pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
3794         if (pmu->func_id < 0)
3795                 pmu->func_id = pdev->devfn;
3796         else
3797                 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
3798
3799         box->phys_id = phys_id;
3800         box->pci_dev = pdev;
3801         box->pmu = pmu;
3802         uncore_box_init(box);
3803         pci_set_drvdata(pdev, box);
3804
3805         raw_spin_lock(&uncore_box_lock);
3806         list_add_tail(&box->list, &pmu->box_list);
3807         raw_spin_unlock(&uncore_box_lock);
3808
3809         return 0;
3810 }
3811
3812 static void uncore_pci_remove(struct pci_dev *pdev)
3813 {
3814         struct intel_uncore_box *box = pci_get_drvdata(pdev);
3815         struct intel_uncore_pmu *pmu;
3816         int i, cpu, phys_id = pcibus_to_physid[pdev->bus->number];
3817
3818         box = pci_get_drvdata(pdev);
3819         if (!box) {
3820                 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
3821                         if (extra_pci_dev[phys_id][i] == pdev) {
3822                                 extra_pci_dev[phys_id][i] = NULL;
3823                                 break;
3824                         }
3825                 }
3826                 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
3827                 return;
3828         }
3829
3830         pmu = box->pmu;
3831         if (WARN_ON_ONCE(phys_id != box->phys_id))
3832                 return;
3833
3834         pci_set_drvdata(pdev, NULL);
3835
3836         raw_spin_lock(&uncore_box_lock);
3837         list_del(&box->list);
3838         raw_spin_unlock(&uncore_box_lock);
3839
3840         for_each_possible_cpu(cpu) {
3841                 if (*per_cpu_ptr(pmu->box, cpu) == box) {
3842                         *per_cpu_ptr(pmu->box, cpu) = NULL;
3843                         atomic_dec(&box->refcnt);
3844                 }
3845         }
3846
3847         WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
3848         kfree(box);
3849 }
3850
3851 static int __init uncore_pci_init(void)
3852 {
3853         int ret;
3854
3855         switch (boot_cpu_data.x86_model) {
3856         case 45: /* Sandy Bridge-EP */
3857                 ret = snbep_pci2phy_map_init(0x3ce0);
3858                 if (ret)
3859                         return ret;
3860                 pci_uncores = snbep_pci_uncores;
3861                 uncore_pci_driver = &snbep_uncore_pci_driver;
3862                 break;
3863         case 62: /* IvyTown */
3864                 ret = snbep_pci2phy_map_init(0x0e1e);
3865                 if (ret)
3866                         return ret;
3867                 pci_uncores = ivt_pci_uncores;
3868                 uncore_pci_driver = &ivt_uncore_pci_driver;
3869                 break;
3870         case 42: /* Sandy Bridge */
3871                 ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_SNB_IMC);
3872                 if (ret)
3873                         return ret;
3874                 pci_uncores = snb_pci_uncores;
3875                 uncore_pci_driver = &snb_uncore_pci_driver;
3876                 break;
3877         case 58: /* Ivy Bridge */
3878                 ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_IVB_IMC);
3879                 if (ret)
3880                         return ret;
3881                 pci_uncores = snb_pci_uncores;
3882                 uncore_pci_driver = &ivb_uncore_pci_driver;
3883                 break;
3884         case 60: /* Haswell */
3885         case 69: /* Haswell Celeron */
3886                 ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_HSW_IMC);
3887                 if (ret)
3888                         return ret;
3889                 pci_uncores = snb_pci_uncores;
3890                 uncore_pci_driver = &hsw_uncore_pci_driver;
3891                 break;
3892         default:
3893                 return 0;
3894         }
3895
3896         ret = uncore_types_init(pci_uncores);
3897         if (ret)
3898                 return ret;
3899
3900         uncore_pci_driver->probe = uncore_pci_probe;
3901         uncore_pci_driver->remove = uncore_pci_remove;
3902
3903         ret = pci_register_driver(uncore_pci_driver);
3904         if (ret == 0)
3905                 pcidrv_registered = true;
3906         else
3907                 uncore_types_exit(pci_uncores);
3908
3909         return ret;
3910 }
3911
3912 static void __init uncore_pci_exit(void)
3913 {
3914         if (pcidrv_registered) {
3915                 pcidrv_registered = false;
3916                 pci_unregister_driver(uncore_pci_driver);
3917                 uncore_types_exit(pci_uncores);
3918         }
3919 }
3920
3921 /* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3922 static LIST_HEAD(boxes_to_free);
3923
3924 static void uncore_kfree_boxes(void)
3925 {
3926         struct intel_uncore_box *box;
3927
3928         while (!list_empty(&boxes_to_free)) {
3929                 box = list_entry(boxes_to_free.next,
3930                                  struct intel_uncore_box, list);
3931                 list_del(&box->list);
3932                 kfree(box);
3933         }
3934 }
3935
3936 static void uncore_cpu_dying(int cpu)
3937 {
3938         struct intel_uncore_type *type;
3939         struct intel_uncore_pmu *pmu;
3940         struct intel_uncore_box *box;
3941         int i, j;
3942
3943         for (i = 0; msr_uncores[i]; i++) {
3944                 type = msr_uncores[i];
3945                 for (j = 0; j < type->num_boxes; j++) {
3946                         pmu = &type->pmus[j];
3947                         box = *per_cpu_ptr(pmu->box, cpu);
3948                         *per_cpu_ptr(pmu->box, cpu) = NULL;
3949                         if (box && atomic_dec_and_test(&box->refcnt))
3950                                 list_add(&box->list, &boxes_to_free);
3951                 }
3952         }
3953 }
3954
3955 static int uncore_cpu_starting(int cpu)
3956 {
3957         struct intel_uncore_type *type;
3958         struct intel_uncore_pmu *pmu;
3959         struct intel_uncore_box *box, *exist;
3960         int i, j, k, phys_id;
3961
3962         phys_id = topology_physical_package_id(cpu);
3963
3964         for (i = 0; msr_uncores[i]; i++) {
3965                 type = msr_uncores[i];
3966                 for (j = 0; j < type->num_boxes; j++) {
3967                         pmu = &type->pmus[j];
3968                         box = *per_cpu_ptr(pmu->box, cpu);
3969                         /* called by uncore_cpu_init? */
3970                         if (box && box->phys_id >= 0) {
3971                                 uncore_box_init(box);
3972                                 continue;
3973                         }
3974
3975                         for_each_online_cpu(k) {
3976                                 exist = *per_cpu_ptr(pmu->box, k);
3977                                 if (exist && exist->phys_id == phys_id) {
3978                                         atomic_inc(&exist->refcnt);
3979                                         *per_cpu_ptr(pmu->box, cpu) = exist;
3980                                         if (box) {
3981                                                 list_add(&box->list,
3982                                                          &boxes_to_free);
3983                                                 box = NULL;
3984                                         }
3985                                         break;
3986                                 }
3987                         }
3988
3989                         if (box) {
3990                                 box->phys_id = phys_id;
3991                                 uncore_box_init(box);
3992                         }
3993                 }
3994         }
3995         return 0;
3996 }
3997
3998 static int uncore_cpu_prepare(int cpu, int phys_id)
3999 {
4000         struct intel_uncore_type *type;
4001         struct intel_uncore_pmu *pmu;
4002         struct intel_uncore_box *box;
4003         int i, j;
4004
4005         for (i = 0; msr_uncores[i]; i++) {
4006                 type = msr_uncores[i];
4007                 for (j = 0; j < type->num_boxes; j++) {
4008                         pmu = &type->pmus[j];
4009                         if (pmu->func_id < 0)
4010                                 pmu->func_id = j;
4011
4012                         box = uncore_alloc_box(type, cpu_to_node(cpu));
4013                         if (!box)
4014                                 return -ENOMEM;
4015
4016                         box->pmu = pmu;
4017                         box->phys_id = phys_id;
4018                         *per_cpu_ptr(pmu->box, cpu) = box;
4019                 }
4020         }
4021         return 0;
4022 }
4023
4024 static void
4025 uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
4026 {
4027         struct intel_uncore_type *type;
4028         struct intel_uncore_pmu *pmu;
4029         struct intel_uncore_box *box;
4030         int i, j;
4031
4032         for (i = 0; uncores[i]; i++) {
4033                 type = uncores[i];
4034                 for (j = 0; j < type->num_boxes; j++) {
4035                         pmu = &type->pmus[j];
4036                         if (old_cpu < 0)
4037                                 box = uncore_pmu_to_box(pmu, new_cpu);
4038                         else
4039                                 box = uncore_pmu_to_box(pmu, old_cpu);
4040                         if (!box)
4041                                 continue;
4042
4043                         if (old_cpu < 0) {
4044                                 WARN_ON_ONCE(box->cpu != -1);
4045                                 box->cpu = new_cpu;
4046                                 continue;
4047                         }
4048
4049                         WARN_ON_ONCE(box->cpu != old_cpu);
4050                         if (new_cpu >= 0) {
4051                                 uncore_pmu_cancel_hrtimer(box);
4052                                 perf_pmu_migrate_context(&pmu->pmu,
4053                                                 old_cpu, new_cpu);
4054                                 box->cpu = new_cpu;
4055                         } else {
4056                                 box->cpu = -1;
4057                         }
4058                 }
4059         }
4060 }
4061
4062 static void uncore_event_exit_cpu(int cpu)
4063 {
4064         int i, phys_id, target;
4065
4066         /* if exiting cpu is used for collecting uncore events */
4067         if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
4068                 return;
4069
4070         /* find a new cpu to collect uncore events */
4071         phys_id = topology_physical_package_id(cpu);
4072         target = -1;
4073         for_each_online_cpu(i) {
4074                 if (i == cpu)
4075                         continue;
4076                 if (phys_id == topology_physical_package_id(i)) {
4077                         target = i;
4078                         break;
4079                 }
4080         }
4081
4082         /* migrate uncore events to the new cpu */
4083         if (target >= 0)
4084                 cpumask_set_cpu(target, &uncore_cpu_mask);
4085
4086         uncore_change_context(msr_uncores, cpu, target);
4087         uncore_change_context(pci_uncores, cpu, target);
4088 }
4089
4090 static void uncore_event_init_cpu(int cpu)
4091 {
4092         int i, phys_id;
4093
4094         phys_id = topology_physical_package_id(cpu);
4095         for_each_cpu(i, &uncore_cpu_mask) {
4096                 if (phys_id == topology_physical_package_id(i))
4097                         return;
4098         }
4099
4100         cpumask_set_cpu(cpu, &uncore_cpu_mask);
4101
4102         uncore_change_context(msr_uncores, -1, cpu);
4103         uncore_change_context(pci_uncores, -1, cpu);
4104 }
4105
4106 static int uncore_cpu_notifier(struct notifier_block *self,
4107                                unsigned long action, void *hcpu)
4108 {
4109         unsigned int cpu = (long)hcpu;
4110
4111         /* allocate/free data structure for uncore box */
4112         switch (action & ~CPU_TASKS_FROZEN) {
4113         case CPU_UP_PREPARE:
4114                 uncore_cpu_prepare(cpu, -1);
4115                 break;
4116         case CPU_STARTING:
4117                 uncore_cpu_starting(cpu);
4118                 break;
4119         case CPU_UP_CANCELED:
4120         case CPU_DYING:
4121                 uncore_cpu_dying(cpu);
4122                 break;
4123         case CPU_ONLINE:
4124         case CPU_DEAD:
4125                 uncore_kfree_boxes();
4126                 break;
4127         default:
4128                 break;
4129         }
4130
4131         /* select the cpu that collects uncore events */
4132         switch (action & ~CPU_TASKS_FROZEN) {
4133         case CPU_DOWN_FAILED:
4134         case CPU_STARTING:
4135                 uncore_event_init_cpu(cpu);
4136                 break;
4137         case CPU_DOWN_PREPARE:
4138                 uncore_event_exit_cpu(cpu);
4139                 break;
4140         default:
4141                 break;
4142         }
4143
4144         return NOTIFY_OK;
4145 }
4146
4147 static struct notifier_block uncore_cpu_nb = {
4148         .notifier_call  = uncore_cpu_notifier,
4149         /*
4150          * to migrate uncore events, our notifier should be executed
4151          * before perf core's notifier.
4152          */
4153         .priority       = CPU_PRI_PERF + 1,
4154 };
4155
4156 static void __init uncore_cpu_setup(void *dummy)
4157 {
4158         uncore_cpu_starting(smp_processor_id());
4159 }
4160
4161 static int __init uncore_cpu_init(void)
4162 {
4163         int ret, max_cores;
4164
4165         max_cores = boot_cpu_data.x86_max_cores;
4166         switch (boot_cpu_data.x86_model) {
4167         case 26: /* Nehalem */
4168         case 30:
4169         case 37: /* Westmere */
4170         case 44:
4171                 msr_uncores = nhm_msr_uncores;
4172                 break;
4173         case 42: /* Sandy Bridge */
4174         case 58: /* Ivy Bridge */
4175                 if (snb_uncore_cbox.num_boxes > max_cores)
4176                         snb_uncore_cbox.num_boxes = max_cores;
4177                 msr_uncores = snb_msr_uncores;
4178                 break;
4179         case 45: /* Sandy Bridge-EP */
4180                 if (snbep_uncore_cbox.num_boxes > max_cores)
4181                         snbep_uncore_cbox.num_boxes = max_cores;
4182                 msr_uncores = snbep_msr_uncores;
4183                 break;
4184         case 46: /* Nehalem-EX */
4185                 uncore_nhmex = true;
4186         case 47: /* Westmere-EX aka. Xeon E7 */
4187                 if (!uncore_nhmex)
4188                         nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
4189                 if (nhmex_uncore_cbox.num_boxes > max_cores)
4190                         nhmex_uncore_cbox.num_boxes = max_cores;
4191                 msr_uncores = nhmex_msr_uncores;
4192                 break;
4193         case 62: /* IvyTown */
4194                 if (ivt_uncore_cbox.num_boxes > max_cores)
4195                         ivt_uncore_cbox.num_boxes = max_cores;
4196                 msr_uncores = ivt_msr_uncores;
4197                 break;
4198
4199         default:
4200                 return 0;
4201         }
4202
4203         ret = uncore_types_init(msr_uncores);
4204         if (ret)
4205                 return ret;
4206
4207         return 0;
4208 }
4209
4210 static int __init uncore_pmus_register(void)
4211 {
4212         struct intel_uncore_pmu *pmu;
4213         struct intel_uncore_type *type;
4214         int i, j;
4215
4216         for (i = 0; msr_uncores[i]; i++) {
4217                 type = msr_uncores[i];
4218                 for (j = 0; j < type->num_boxes; j++) {
4219                         pmu = &type->pmus[j];
4220                         uncore_pmu_register(pmu);
4221                 }
4222         }
4223
4224         for (i = 0; pci_uncores[i]; i++) {
4225                 type = pci_uncores[i];
4226                 for (j = 0; j < type->num_boxes; j++) {
4227                         pmu = &type->pmus[j];
4228                         uncore_pmu_register(pmu);
4229                 }
4230         }
4231
4232         return 0;
4233 }
4234
4235 static void __init uncore_cpumask_init(void)
4236 {
4237         int cpu;
4238
4239         /*
4240          * ony invoke once from msr or pci init code
4241          */
4242         if (!cpumask_empty(&uncore_cpu_mask))
4243                 return;
4244
4245         cpu_notifier_register_begin();
4246
4247         for_each_online_cpu(cpu) {
4248                 int i, phys_id = topology_physical_package_id(cpu);
4249
4250                 for_each_cpu(i, &uncore_cpu_mask) {
4251                         if (phys_id == topology_physical_package_id(i)) {
4252                                 phys_id = -1;
4253                                 break;
4254                         }
4255                 }
4256                 if (phys_id < 0)
4257                         continue;
4258
4259                 uncore_cpu_prepare(cpu, phys_id);
4260                 uncore_event_init_cpu(cpu);
4261         }
4262         on_each_cpu(uncore_cpu_setup, NULL, 1);
4263
4264         __register_cpu_notifier(&uncore_cpu_nb);
4265
4266         cpu_notifier_register_done();
4267 }
4268
4269
4270 static int __init intel_uncore_init(void)
4271 {
4272         int ret;
4273
4274         if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
4275                 return -ENODEV;
4276
4277         if (cpu_has_hypervisor)
4278                 return -ENODEV;
4279
4280         ret = uncore_pci_init();
4281         if (ret)
4282                 goto fail;
4283         ret = uncore_cpu_init();
4284         if (ret) {
4285                 uncore_pci_exit();
4286                 goto fail;
4287         }
4288         uncore_cpumask_init();
4289
4290         uncore_pmus_register();
4291         return 0;
4292 fail:
4293         return ret;
4294 }
4295 device_initcall(intel_uncore_init);