aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorSuravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>2013-06-05 17:11:49 -0400
committerIngo Molnar <mingo@kernel.org>2013-06-19 07:04:53 -0400
commit7be6296fdd75f716f7348251433ea68c4b362cf3 (patch)
tree8120dcc5ba7c63669447d74f2d1f0458dd7fc0d3 /arch/x86
parent30861ddc9cca479a7fc6a5efef4e5c69d6b274f4 (diff)
perf/x86/amd: AMD IOMMU Performance Counter PERF uncore PMU implementation
Implement a perf PMU to handle IOMMU performance counters and events. The PMU only supports counting mode (e.g. perf stat). Since the counters are shared across all cores, the PMU is implemented as "system-wide" mode. To invoke the AMD IOMMU PMU, issue a perf tool command such as: ./perf stat -a -e amd_iommu/<events>/ <command> or: ./perf stat -a -e amd_iommu/config=<config-data>,config1=<config1-data>/ <command> For example: ./perf stat -a -e amd_iommu/mem_trans_total/ <command> The resulting count will be how many IOMMU total peripheral memory operations were performed during the command execution window. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1370466709-3212-3-git-send-email-suravee.suthikulpanit@amd.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/Makefile4
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_iommu.c504
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_iommu.h40
3 files changed, 548 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index b0684e4a73aa..47b56a7e99cb 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -31,11 +31,15 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o
31 31
32ifdef CONFIG_PERF_EVENTS 32ifdef CONFIG_PERF_EVENTS
33obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o perf_event_amd_uncore.o 33obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o perf_event_amd_uncore.o
34ifdef CONFIG_AMD_IOMMU
35obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o
36endif
34obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o 37obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o
35obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o 38obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
36obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o 39obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o
37endif 40endif
38 41
42
39obj-$(CONFIG_X86_MCE) += mcheck/ 43obj-$(CONFIG_X86_MCE) += mcheck/
40obj-$(CONFIG_MTRR) += mtrr/ 44obj-$(CONFIG_MTRR) += mtrr/
41 45
diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
new file mode 100644
index 000000000000..0db655ef3918
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
@@ -0,0 +1,504 @@
1/*
2 * Copyright (C) 2013 Advanced Micro Devices, Inc.
3 *
4 * Author: Steven Kinney <Steven.Kinney@amd.com>
5 * Author: Suravee Suthikulpanit <Suraveee.Suthikulpanit@amd.com>
6 *
7 * Perf: amd_iommu - AMD IOMMU Performance Counter PMU implementation
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/perf_event.h>
15#include <linux/module.h>
16#include <linux/cpumask.h>
17#include <linux/slab.h>
18
19#include "perf_event.h"
20#include "perf_event_amd_iommu.h"
21
22#define COUNTER_SHIFT 16
23
24#define _GET_BANK(ev) ((u8)(ev->hw.extra_reg.reg >> 8))
25#define _GET_CNTR(ev) ((u8)(ev->hw.extra_reg.reg))
26
27/* iommu pmu config masks */
28#define _GET_CSOURCE(ev) ((ev->hw.config & 0xFFULL))
29#define _GET_DEVID(ev) ((ev->hw.config >> 8) & 0xFFFFULL)
30#define _GET_PASID(ev) ((ev->hw.config >> 24) & 0xFFFFULL)
31#define _GET_DOMID(ev) ((ev->hw.config >> 40) & 0xFFFFULL)
32#define _GET_DEVID_MASK(ev) ((ev->hw.extra_reg.config) & 0xFFFFULL)
33#define _GET_PASID_MASK(ev) ((ev->hw.extra_reg.config >> 16) & 0xFFFFULL)
34#define _GET_DOMID_MASK(ev) ((ev->hw.extra_reg.config >> 32) & 0xFFFFULL)
35
36static struct perf_amd_iommu __perf_iommu;
37
38struct perf_amd_iommu {
39 struct pmu pmu;
40 u8 max_banks;
41 u8 max_counters;
42 u64 cntr_assign_mask;
43 raw_spinlock_t lock;
44 const struct attribute_group *attr_groups[4];
45};
46
47#define format_group attr_groups[0]
48#define cpumask_group attr_groups[1]
49#define events_group attr_groups[2]
50#define null_group attr_groups[3]
51
52/*---------------------------------------------
53 * sysfs format attributes
54 *---------------------------------------------*/
55PMU_FORMAT_ATTR(csource, "config:0-7");
56PMU_FORMAT_ATTR(devid, "config:8-23");
57PMU_FORMAT_ATTR(pasid, "config:24-39");
58PMU_FORMAT_ATTR(domid, "config:40-55");
59PMU_FORMAT_ATTR(devid_mask, "config1:0-15");
60PMU_FORMAT_ATTR(pasid_mask, "config1:16-31");
61PMU_FORMAT_ATTR(domid_mask, "config1:32-47");
62
63static struct attribute *iommu_format_attrs[] = {
64 &format_attr_csource.attr,
65 &format_attr_devid.attr,
66 &format_attr_pasid.attr,
67 &format_attr_domid.attr,
68 &format_attr_devid_mask.attr,
69 &format_attr_pasid_mask.attr,
70 &format_attr_domid_mask.attr,
71 NULL,
72};
73
74static struct attribute_group amd_iommu_format_group = {
75 .name = "format",
76 .attrs = iommu_format_attrs,
77};
78
79/*---------------------------------------------
80 * sysfs events attributes
81 *---------------------------------------------*/
82struct amd_iommu_event_desc {
83 struct kobj_attribute attr;
84 const char *event;
85};
86
87static ssize_t _iommu_event_show(struct kobject *kobj,
88 struct kobj_attribute *attr, char *buf)
89{
90 struct amd_iommu_event_desc *event =
91 container_of(attr, struct amd_iommu_event_desc, attr);
92 return sprintf(buf, "%s\n", event->event);
93}
94
95#define AMD_IOMMU_EVENT_DESC(_name, _event) \
96{ \
97 .attr = __ATTR(_name, 0444, _iommu_event_show, NULL), \
98 .event = _event, \
99}
100
101static struct amd_iommu_event_desc amd_iommu_v2_event_descs[] = {
102 AMD_IOMMU_EVENT_DESC(mem_pass_untrans, "csource=0x01"),
103 AMD_IOMMU_EVENT_DESC(mem_pass_pretrans, "csource=0x02"),
104 AMD_IOMMU_EVENT_DESC(mem_pass_excl, "csource=0x03"),
105 AMD_IOMMU_EVENT_DESC(mem_target_abort, "csource=0x04"),
106 AMD_IOMMU_EVENT_DESC(mem_trans_total, "csource=0x05"),
107 AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_hit, "csource=0x06"),
108 AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_mis, "csource=0x07"),
109 AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_hit, "csource=0x08"),
110 AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_mis, "csource=0x09"),
111 AMD_IOMMU_EVENT_DESC(mem_dte_hit, "csource=0x0a"),
112 AMD_IOMMU_EVENT_DESC(mem_dte_mis, "csource=0x0b"),
113 AMD_IOMMU_EVENT_DESC(page_tbl_read_tot, "csource=0x0c"),
114 AMD_IOMMU_EVENT_DESC(page_tbl_read_nst, "csource=0x0d"),
115 AMD_IOMMU_EVENT_DESC(page_tbl_read_gst, "csource=0x0e"),
116 AMD_IOMMU_EVENT_DESC(int_dte_hit, "csource=0x0f"),
117 AMD_IOMMU_EVENT_DESC(int_dte_mis, "csource=0x10"),
118 AMD_IOMMU_EVENT_DESC(cmd_processed, "csource=0x11"),
119 AMD_IOMMU_EVENT_DESC(cmd_processed_inv, "csource=0x12"),
120 AMD_IOMMU_EVENT_DESC(tlb_inv, "csource=0x13"),
121 { /* end: all zeroes */ },
122};
123
124/*---------------------------------------------
125 * sysfs cpumask attributes
126 *---------------------------------------------*/
127static cpumask_t iommu_cpumask;
128
129static ssize_t _iommu_cpumask_show(struct device *dev,
130 struct device_attribute *attr,
131 char *buf)
132{
133 int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &iommu_cpumask);
134 buf[n++] = '\n';
135 buf[n] = '\0';
136 return n;
137}
138static DEVICE_ATTR(cpumask, S_IRUGO, _iommu_cpumask_show, NULL);
139
140static struct attribute *iommu_cpumask_attrs[] = {
141 &dev_attr_cpumask.attr,
142 NULL,
143};
144
145static struct attribute_group amd_iommu_cpumask_group = {
146 .attrs = iommu_cpumask_attrs,
147};
148
149/*---------------------------------------------*/
150
151static int get_next_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu)
152{
153 unsigned long flags;
154 int shift, bank, cntr, retval;
155 int max_banks = perf_iommu->max_banks;
156 int max_cntrs = perf_iommu->max_counters;
157
158 raw_spin_lock_irqsave(&perf_iommu->lock, flags);
159
160 for (bank = 0, shift = 0; bank < max_banks; bank++) {
161 for (cntr = 0; cntr < max_cntrs; cntr++) {
162 shift = bank + (bank*3) + cntr;
163 if (perf_iommu->cntr_assign_mask & (1ULL<<shift)) {
164 continue;
165 } else {
166 perf_iommu->cntr_assign_mask |= (1ULL<<shift);
167 retval = ((u16)((u16)bank<<8) | (u8)(cntr));
168 goto out;
169 }
170 }
171 }
172 retval = -ENOSPC;
173out:
174 raw_spin_unlock_irqrestore(&perf_iommu->lock, flags);
175 return retval;
176}
177
178static int clear_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu,
179 u8 bank, u8 cntr)
180{
181 unsigned long flags;
182 int max_banks, max_cntrs;
183 int shift = 0;
184
185 max_banks = perf_iommu->max_banks;
186 max_cntrs = perf_iommu->max_counters;
187
188 if ((bank > max_banks) || (cntr > max_cntrs))
189 return -EINVAL;
190
191 shift = bank + cntr + (bank*3);
192
193 raw_spin_lock_irqsave(&perf_iommu->lock, flags);
194 perf_iommu->cntr_assign_mask &= ~(1ULL<<shift);
195 raw_spin_unlock_irqrestore(&perf_iommu->lock, flags);
196
197 return 0;
198}
199
200static int perf_iommu_event_init(struct perf_event *event)
201{
202 struct hw_perf_event *hwc = &event->hw;
203 struct perf_amd_iommu *perf_iommu;
204 u64 config, config1;
205
206 /* test the event attr type check for PMU enumeration */
207 if (event->attr.type != event->pmu->type)
208 return -ENOENT;
209
210 /*
211 * IOMMU counters are shared across all cores.
212 * Therefore, it does not support per-process mode.
213 * Also, it does not support event sampling mode.
214 */
215 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
216 return -EINVAL;
217
218 /* IOMMU counters do not have usr/os/guest/host bits */
219 if (event->attr.exclude_user || event->attr.exclude_kernel ||
220 event->attr.exclude_host || event->attr.exclude_guest)
221 return -EINVAL;
222
223 if (event->cpu < 0)
224 return -EINVAL;
225
226 perf_iommu = &__perf_iommu;
227
228 if (event->pmu != &perf_iommu->pmu)
229 return -ENOENT;
230
231 if (perf_iommu) {
232 config = event->attr.config;
233 config1 = event->attr.config1;
234 } else {
235 return -EINVAL;
236 }
237
238 /* integrate with iommu base devid (0000), assume one iommu */
239 perf_iommu->max_banks =
240 amd_iommu_pc_get_max_banks(IOMMU_BASE_DEVID);
241 perf_iommu->max_counters =
242 amd_iommu_pc_get_max_counters(IOMMU_BASE_DEVID);
243 if ((perf_iommu->max_banks == 0) || (perf_iommu->max_counters == 0))
244 return -EINVAL;
245
246 /* update the hw_perf_event struct with the iommu config data */
247 hwc->config = config;
248 hwc->extra_reg.config = config1;
249
250 return 0;
251}
252
253static void perf_iommu_enable_event(struct perf_event *ev)
254{
255 u8 csource = _GET_CSOURCE(ev);
256 u16 devid = _GET_DEVID(ev);
257 u64 reg = 0ULL;
258
259 reg = csource;
260 amd_iommu_pc_get_set_reg_val(devid,
261 _GET_BANK(ev), _GET_CNTR(ev) ,
262 IOMMU_PC_COUNTER_SRC_REG, &reg, true);
263
264 reg = 0ULL | devid | (_GET_DEVID_MASK(ev) << 32);
265 if (reg)
266 reg |= (1UL << 31);
267 amd_iommu_pc_get_set_reg_val(devid,
268 _GET_BANK(ev), _GET_CNTR(ev) ,
269 IOMMU_PC_DEVID_MATCH_REG, &reg, true);
270
271 reg = 0ULL | _GET_PASID(ev) | (_GET_PASID_MASK(ev) << 32);
272 if (reg)
273 reg |= (1UL << 31);
274 amd_iommu_pc_get_set_reg_val(devid,
275 _GET_BANK(ev), _GET_CNTR(ev) ,
276 IOMMU_PC_PASID_MATCH_REG, &reg, true);
277
278 reg = 0ULL | _GET_DOMID(ev) | (_GET_DOMID_MASK(ev) << 32);
279 if (reg)
280 reg |= (1UL << 31);
281 amd_iommu_pc_get_set_reg_val(devid,
282 _GET_BANK(ev), _GET_CNTR(ev) ,
283 IOMMU_PC_DOMID_MATCH_REG, &reg, true);
284}
285
286static void perf_iommu_disable_event(struct perf_event *event)
287{
288 u64 reg = 0ULL;
289
290 amd_iommu_pc_get_set_reg_val(_GET_DEVID(event),
291 _GET_BANK(event), _GET_CNTR(event),
292 IOMMU_PC_COUNTER_SRC_REG, &reg, true);
293}
294
295static void perf_iommu_start(struct perf_event *event, int flags)
296{
297 struct hw_perf_event *hwc = &event->hw;
298
299 pr_debug("perf: amd_iommu:perf_iommu_start\n");
300 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
301 return;
302
303 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
304 hwc->state = 0;
305
306 if (flags & PERF_EF_RELOAD) {
307 u64 prev_raw_count = local64_read(&hwc->prev_count);
308 amd_iommu_pc_get_set_reg_val(_GET_DEVID(event),
309 _GET_BANK(event), _GET_CNTR(event),
310 IOMMU_PC_COUNTER_REG, &prev_raw_count, true);
311 }
312
313 perf_iommu_enable_event(event);
314 perf_event_update_userpage(event);
315
316}
317
318static void perf_iommu_read(struct perf_event *event)
319{
320 u64 count = 0ULL;
321 u64 prev_raw_count = 0ULL;
322 u64 delta = 0ULL;
323 struct hw_perf_event *hwc = &event->hw;
324 pr_debug("perf: amd_iommu:perf_iommu_read\n");
325
326 amd_iommu_pc_get_set_reg_val(_GET_DEVID(event),
327 _GET_BANK(event), _GET_CNTR(event),
328 IOMMU_PC_COUNTER_REG, &count, false);
329
330 /* IOMMU pc counter register is only 48 bits */
331 count &= 0xFFFFFFFFFFFFULL;
332
333 prev_raw_count = local64_read(&hwc->prev_count);
334 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
335 count) != prev_raw_count)
336 return;
337
338 /* Handling 48-bit counter overflowing */
339 delta = (count << COUNTER_SHIFT) - (prev_raw_count << COUNTER_SHIFT);
340 delta >>= COUNTER_SHIFT;
341 local64_add(delta, &event->count);
342
343}
344
345static void perf_iommu_stop(struct perf_event *event, int flags)
346{
347 struct hw_perf_event *hwc = &event->hw;
348 u64 config;
349
350 pr_debug("perf: amd_iommu:perf_iommu_stop\n");
351
352 if (hwc->state & PERF_HES_UPTODATE)
353 return;
354
355 perf_iommu_disable_event(event);
356 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
357 hwc->state |= PERF_HES_STOPPED;
358
359 if (hwc->state & PERF_HES_UPTODATE)
360 return;
361
362 config = hwc->config;
363 perf_iommu_read(event);
364 hwc->state |= PERF_HES_UPTODATE;
365}
366
367static int perf_iommu_add(struct perf_event *event, int flags)
368{
369 int retval;
370 struct perf_amd_iommu *perf_iommu =
371 container_of(event->pmu, struct perf_amd_iommu, pmu);
372
373 pr_debug("perf: amd_iommu:perf_iommu_add\n");
374 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
375
376 /* request an iommu bank/counter */
377 retval = get_next_avail_iommu_bnk_cntr(perf_iommu);
378 if (retval != -ENOSPC)
379 event->hw.extra_reg.reg = (u16)retval;
380 else
381 return retval;
382
383 if (flags & PERF_EF_START)
384 perf_iommu_start(event, PERF_EF_RELOAD);
385
386 return 0;
387}
388
389static void perf_iommu_del(struct perf_event *event, int flags)
390{
391 struct perf_amd_iommu *perf_iommu =
392 container_of(event->pmu, struct perf_amd_iommu, pmu);
393
394 pr_debug("perf: amd_iommu:perf_iommu_del\n");
395 perf_iommu_stop(event, PERF_EF_UPDATE);
396
397 /* clear the assigned iommu bank/counter */
398 clear_avail_iommu_bnk_cntr(perf_iommu,
399 _GET_BANK(event),
400 _GET_CNTR(event));
401
402 perf_event_update_userpage(event);
403}
404
405static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
406{
407 struct attribute **attrs;
408 struct attribute_group *attr_group;
409 int i = 0, j;
410
411 while (amd_iommu_v2_event_descs[i].attr.attr.name)
412 i++;
413
414 attr_group = kzalloc(sizeof(struct attribute *)
415 * (i + 1) + sizeof(*attr_group), GFP_KERNEL);
416 if (!attr_group)
417 return -ENOMEM;
418
419 attrs = (struct attribute **)(attr_group + 1);
420 for (j = 0; j < i; j++)
421 attrs[j] = &amd_iommu_v2_event_descs[j].attr.attr;
422
423 attr_group->name = "events";
424 attr_group->attrs = attrs;
425 perf_iommu->events_group = attr_group;
426
427 return 0;
428}
429
430static __init void amd_iommu_pc_exit(void)
431{
432 if (__perf_iommu.events_group != NULL) {
433 kfree(__perf_iommu.events_group);
434 __perf_iommu.events_group = NULL;
435 }
436}
437
438static __init int _init_perf_amd_iommu(
439 struct perf_amd_iommu *perf_iommu, char *name)
440{
441 int ret;
442
443 raw_spin_lock_init(&perf_iommu->lock);
444
445 /* Init format attributes */
446 perf_iommu->format_group = &amd_iommu_format_group;
447
448 /* Init cpumask attributes to only core 0 */
449 cpumask_set_cpu(0, &iommu_cpumask);
450 perf_iommu->cpumask_group = &amd_iommu_cpumask_group;
451
452 /* Init events attributes */
453 if (_init_events_attrs(perf_iommu) != 0)
454 pr_err("perf: amd_iommu: Only support raw events.\n");
455
456 /* Init null attributes */
457 perf_iommu->null_group = NULL;
458 perf_iommu->pmu.attr_groups = perf_iommu->attr_groups;
459
460 ret = perf_pmu_register(&perf_iommu->pmu, name, -1);
461 if (ret) {
462 pr_err("perf: amd_iommu: Failed to initialized.\n");
463 amd_iommu_pc_exit();
464 } else {
465 pr_info("perf: amd_iommu: Detected. (%d banks, %d counters/bank)\n",
466 amd_iommu_pc_get_max_banks(IOMMU_BASE_DEVID),
467 amd_iommu_pc_get_max_counters(IOMMU_BASE_DEVID));
468 }
469
470 return ret;
471}
472
473static struct perf_amd_iommu __perf_iommu = {
474 .pmu = {
475 .event_init = perf_iommu_event_init,
476 .add = perf_iommu_add,
477 .del = perf_iommu_del,
478 .start = perf_iommu_start,
479 .stop = perf_iommu_stop,
480 .read = perf_iommu_read,
481 },
482 .max_banks = 0x00,
483 .max_counters = 0x00,
484 .cntr_assign_mask = 0ULL,
485 .format_group = NULL,
486 .cpumask_group = NULL,
487 .events_group = NULL,
488 .null_group = NULL,
489};
490
491static __init int amd_iommu_pc_init(void)
492{
493 /* Make sure the IOMMU PC resource is available */
494 if (!amd_iommu_pc_supported()) {
495 pr_err("perf: amd_iommu PMU not installed. No support!\n");
496 return -ENODEV;
497 }
498
499 _init_perf_amd_iommu(&__perf_iommu, "amd_iommu");
500
501 return 0;
502}
503
504device_initcall(amd_iommu_pc_init);
diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.h b/arch/x86/kernel/cpu/perf_event_amd_iommu.h
new file mode 100644
index 000000000000..845d173278e3
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.h
@@ -0,0 +1,40 @@
1/*
2 * Copyright (C) 2013 Advanced Micro Devices, Inc.
3 *
4 * Author: Steven Kinney <Steven.Kinney@amd.com>
5 * Author: Suravee Suthikulpanit <Suraveee.Suthikulpanit@amd.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _PERF_EVENT_AMD_IOMMU_H_
13#define _PERF_EVENT_AMD_IOMMU_H_
14
15/* iommu pc mmio region register indexes */
16#define IOMMU_PC_COUNTER_REG 0x00
17#define IOMMU_PC_COUNTER_SRC_REG 0x08
18#define IOMMU_PC_PASID_MATCH_REG 0x10
19#define IOMMU_PC_DOMID_MATCH_REG 0x18
20#define IOMMU_PC_DEVID_MATCH_REG 0x20
21#define IOMMU_PC_COUNTER_REPORT_REG 0x28
22
23/* maximun specified bank/counters */
24#define PC_MAX_SPEC_BNKS 64
25#define PC_MAX_SPEC_CNTRS 16
26
27/* iommu pc reg masks*/
28#define IOMMU_BASE_DEVID 0x0000
29
30/* amd_iommu_init.c external support functions */
31extern bool amd_iommu_pc_supported(void);
32
33extern u8 amd_iommu_pc_get_max_banks(u16 devid);
34
35extern u8 amd_iommu_pc_get_max_counters(u16 devid);
36
37extern int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr,
38 u8 fxn, u64 *value, bool is_write);
39
40#endif /*_PERF_EVENT_AMD_IOMMU_H_*/