diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-31 18:34:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-31 18:34:13 -0400 |
commit | bca1a5c0eabe0f17081760c61e8d08e73dd6b6a6 (patch) | |
tree | f939c6f42bf459786eb0050578044fdde56fec90 /arch/x86/kernel/cpu | |
parent | ec7a19bfec544aa73e347369232f9bd654954aa3 (diff) | |
parent | 194f8dcbe9629d8e9346cf96345a9c0bbf0e67ae (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar:
"The biggest changes are Intel Nehalem-EX PMU uncore support, uprobes
updates/cleanups/fixes from Oleg and diverse tooling updates (mostly
fixes) now that Arnaldo is back from vacation."
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (40 commits)
uprobes: __replace_page() needs munlock_vma_page()
uprobes: Rename vma_address() and make it return "unsigned long"
uprobes: Fix register_for_each_vma()->vma_address() check
uprobes: Introduce vaddr_to_offset(vma, vaddr)
uprobes: Teach build_probe_list() to consider the range
uprobes: Remove insert_vm_struct()->uprobe_mmap()
uprobes: Remove copy_vma()->uprobe_mmap()
uprobes: Fix overflow in vma_address()/find_active_uprobe()
uprobes: Suppress uprobe_munmap() from mmput()
uprobes: Uprobe_mmap/munmap needs list_for_each_entry_safe()
uprobes: Clean up and document write_opcode()->lock_page(old_page)
uprobes: Kill write_opcode()->lock_page(new_page)
uprobes: __replace_page() should not use page_address_in_vma()
uprobes: Don't recheck vma/f_mapping in write_opcode()
perf/x86: Fix missing struct before structure name
perf/x86: Fix format definition of SNB-EP uncore QPI box
perf/x86: Make bitfield unsigned
perf/x86: Fix LLC-* and node-* events on Intel SandyBridge
perf/x86: Add Intel Nehalem-EX uncore support
perf/x86: Fix typo in format definition of uncore PCU filter
...
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 92 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore.c | 1316 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore.h | 207 |
4 files changed, 1472 insertions, 145 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index a15df4be151f..821d53b696d1 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -374,7 +374,7 @@ struct x86_pmu { | |||
374 | /* | 374 | /* |
375 | * Intel DebugStore bits | 375 | * Intel DebugStore bits |
376 | */ | 376 | */ |
377 | int bts :1, | 377 | unsigned int bts :1, |
378 | bts_active :1, | 378 | bts_active :1, |
379 | pebs :1, | 379 | pebs :1, |
380 | pebs_active :1, | 380 | pebs_active :1, |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 7a8b9d0abcaa..382366977d4c 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -138,6 +138,84 @@ static u64 intel_pmu_event_map(int hw_event) | |||
138 | return intel_perfmon_event_map[hw_event]; | 138 | return intel_perfmon_event_map[hw_event]; |
139 | } | 139 | } |
140 | 140 | ||
141 | #define SNB_DMND_DATA_RD (1ULL << 0) | ||
142 | #define SNB_DMND_RFO (1ULL << 1) | ||
143 | #define SNB_DMND_IFETCH (1ULL << 2) | ||
144 | #define SNB_DMND_WB (1ULL << 3) | ||
145 | #define SNB_PF_DATA_RD (1ULL << 4) | ||
146 | #define SNB_PF_RFO (1ULL << 5) | ||
147 | #define SNB_PF_IFETCH (1ULL << 6) | ||
148 | #define SNB_LLC_DATA_RD (1ULL << 7) | ||
149 | #define SNB_LLC_RFO (1ULL << 8) | ||
150 | #define SNB_LLC_IFETCH (1ULL << 9) | ||
151 | #define SNB_BUS_LOCKS (1ULL << 10) | ||
152 | #define SNB_STRM_ST (1ULL << 11) | ||
153 | #define SNB_OTHER (1ULL << 15) | ||
154 | #define SNB_RESP_ANY (1ULL << 16) | ||
155 | #define SNB_NO_SUPP (1ULL << 17) | ||
156 | #define SNB_LLC_HITM (1ULL << 18) | ||
157 | #define SNB_LLC_HITE (1ULL << 19) | ||
158 | #define SNB_LLC_HITS (1ULL << 20) | ||
159 | #define SNB_LLC_HITF (1ULL << 21) | ||
160 | #define SNB_LOCAL (1ULL << 22) | ||
161 | #define SNB_REMOTE (0xffULL << 23) | ||
162 | #define SNB_SNP_NONE (1ULL << 31) | ||
163 | #define SNB_SNP_NOT_NEEDED (1ULL << 32) | ||
164 | #define SNB_SNP_MISS (1ULL << 33) | ||
165 | #define SNB_NO_FWD (1ULL << 34) | ||
166 | #define SNB_SNP_FWD (1ULL << 35) | ||
167 | #define SNB_HITM (1ULL << 36) | ||
168 | #define SNB_NON_DRAM (1ULL << 37) | ||
169 | |||
170 | #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD) | ||
171 | #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO) | ||
172 | #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) | ||
173 | |||
174 | #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \ | ||
175 | SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \ | ||
176 | SNB_HITM) | ||
177 | |||
178 | #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY) | ||
179 | #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY) | ||
180 | |||
181 | #define SNB_L3_ACCESS SNB_RESP_ANY | ||
182 | #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM) | ||
183 | |||
184 | static __initconst const u64 snb_hw_cache_extra_regs | ||
185 | [PERF_COUNT_HW_CACHE_MAX] | ||
186 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
187 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
188 | { | ||
189 | [ C(LL ) ] = { | ||
190 | [ C(OP_READ) ] = { | ||
191 | [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS, | ||
192 | [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS, | ||
193 | }, | ||
194 | [ C(OP_WRITE) ] = { | ||
195 | [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS, | ||
196 | [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS, | ||
197 | }, | ||
198 | [ C(OP_PREFETCH) ] = { | ||
199 | [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS, | ||
200 | [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS, | ||
201 | }, | ||
202 | }, | ||
203 | [ C(NODE) ] = { | ||
204 | [ C(OP_READ) ] = { | ||
205 | [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY, | ||
206 | [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE, | ||
207 | }, | ||
208 | [ C(OP_WRITE) ] = { | ||
209 | [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY, | ||
210 | [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE, | ||
211 | }, | ||
212 | [ C(OP_PREFETCH) ] = { | ||
213 | [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY, | ||
214 | [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE, | ||
215 | }, | ||
216 | }, | ||
217 | }; | ||
218 | |||
141 | static __initconst const u64 snb_hw_cache_event_ids | 219 | static __initconst const u64 snb_hw_cache_event_ids |
142 | [PERF_COUNT_HW_CACHE_MAX] | 220 | [PERF_COUNT_HW_CACHE_MAX] |
143 | [PERF_COUNT_HW_CACHE_OP_MAX] | 221 | [PERF_COUNT_HW_CACHE_OP_MAX] |
@@ -235,16 +313,16 @@ static __initconst const u64 snb_hw_cache_event_ids | |||
235 | }, | 313 | }, |
236 | [ C(NODE) ] = { | 314 | [ C(NODE) ] = { |
237 | [ C(OP_READ) ] = { | 315 | [ C(OP_READ) ] = { |
238 | [ C(RESULT_ACCESS) ] = -1, | 316 | [ C(RESULT_ACCESS) ] = 0x01b7, |
239 | [ C(RESULT_MISS) ] = -1, | 317 | [ C(RESULT_MISS) ] = 0x01b7, |
240 | }, | 318 | }, |
241 | [ C(OP_WRITE) ] = { | 319 | [ C(OP_WRITE) ] = { |
242 | [ C(RESULT_ACCESS) ] = -1, | 320 | [ C(RESULT_ACCESS) ] = 0x01b7, |
243 | [ C(RESULT_MISS) ] = -1, | 321 | [ C(RESULT_MISS) ] = 0x01b7, |
244 | }, | 322 | }, |
245 | [ C(OP_PREFETCH) ] = { | 323 | [ C(OP_PREFETCH) ] = { |
246 | [ C(RESULT_ACCESS) ] = -1, | 324 | [ C(RESULT_ACCESS) ] = 0x01b7, |
247 | [ C(RESULT_MISS) ] = -1, | 325 | [ C(RESULT_MISS) ] = 0x01b7, |
248 | }, | 326 | }, |
249 | }, | 327 | }, |
250 | 328 | ||
@@ -1964,6 +2042,8 @@ __init int intel_pmu_init(void) | |||
1964 | case 58: /* IvyBridge */ | 2042 | case 58: /* IvyBridge */ |
1965 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, | 2043 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, |
1966 | sizeof(hw_cache_event_ids)); | 2044 | sizeof(hw_cache_event_ids)); |
2045 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, | ||
2046 | sizeof(hw_cache_extra_regs)); | ||
1967 | 2047 | ||
1968 | intel_pmu_lbr_init_snb(); | 2048 | intel_pmu_lbr_init_snb(); |
1969 | 2049 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 19faffc60886..7563fda9f033 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -18,6 +18,7 @@ static struct event_constraint constraint_empty = | |||
18 | EVENT_CONSTRAINT(0, 0, 0); | 18 | EVENT_CONSTRAINT(0, 0, 0); |
19 | 19 | ||
20 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); | 20 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); |
21 | DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); | ||
21 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); | 22 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); |
22 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); | 23 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); |
23 | DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); | 24 | DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); |
@@ -33,10 +34,81 @@ DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4"); | |||
33 | DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); | 34 | DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); |
34 | DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); | 35 | DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); |
35 | DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); | 36 | DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); |
36 | DEFINE_UNCORE_FORMAT_ATTR(filter_brand0, filter_brand0, "config1:0-7"); | 37 | DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7"); |
37 | DEFINE_UNCORE_FORMAT_ATTR(filter_brand1, filter_brand1, "config1:8-15"); | 38 | DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15"); |
38 | DEFINE_UNCORE_FORMAT_ATTR(filter_brand2, filter_brand2, "config1:16-23"); | 39 | DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23"); |
39 | DEFINE_UNCORE_FORMAT_ATTR(filter_brand3, filter_brand3, "config1:24-31"); | 40 | DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31"); |
41 | |||
42 | static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) | ||
43 | { | ||
44 | u64 count; | ||
45 | |||
46 | rdmsrl(event->hw.event_base, count); | ||
47 | |||
48 | return count; | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * generic get constraint function for shared match/mask registers. | ||
53 | */ | ||
54 | static struct event_constraint * | ||
55 | uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
56 | { | ||
57 | struct intel_uncore_extra_reg *er; | ||
58 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
59 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
60 | unsigned long flags; | ||
61 | bool ok = false; | ||
62 | |||
63 | /* | ||
64 | * reg->alloc can be set due to existing state, so for fake box we | ||
65 | * need to ignore this, otherwise we might fail to allocate proper | ||
66 | * fake state for this extra reg constraint. | ||
67 | */ | ||
68 | if (reg1->idx == EXTRA_REG_NONE || | ||
69 | (!uncore_box_is_fake(box) && reg1->alloc)) | ||
70 | return NULL; | ||
71 | |||
72 | er = &box->shared_regs[reg1->idx]; | ||
73 | raw_spin_lock_irqsave(&er->lock, flags); | ||
74 | if (!atomic_read(&er->ref) || | ||
75 | (er->config1 == reg1->config && er->config2 == reg2->config)) { | ||
76 | atomic_inc(&er->ref); | ||
77 | er->config1 = reg1->config; | ||
78 | er->config2 = reg2->config; | ||
79 | ok = true; | ||
80 | } | ||
81 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
82 | |||
83 | if (ok) { | ||
84 | if (!uncore_box_is_fake(box)) | ||
85 | reg1->alloc = 1; | ||
86 | return NULL; | ||
87 | } | ||
88 | |||
89 | return &constraint_empty; | ||
90 | } | ||
91 | |||
92 | static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
93 | { | ||
94 | struct intel_uncore_extra_reg *er; | ||
95 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
96 | |||
97 | /* | ||
98 | * Only put constraint if extra reg was actually allocated. Also | ||
99 | * takes care of event which do not use an extra shared reg. | ||
100 | * | ||
101 | * Also, if this is a fake box we shouldn't touch any event state | ||
102 | * (reg->alloc) and we don't care about leaving inconsistent box | ||
103 | * state either since it will be thrown out. | ||
104 | */ | ||
105 | if (uncore_box_is_fake(box) || !reg1->alloc) | ||
106 | return; | ||
107 | |||
108 | er = &box->shared_regs[reg1->idx]; | ||
109 | atomic_dec(&er->ref); | ||
110 | reg1->alloc = 0; | ||
111 | } | ||
40 | 112 | ||
41 | /* Sandy Bridge-EP uncore support */ | 113 | /* Sandy Bridge-EP uncore support */ |
42 | static struct intel_uncore_type snbep_uncore_cbox; | 114 | static struct intel_uncore_type snbep_uncore_cbox; |
@@ -64,18 +136,15 @@ static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box) | |||
64 | pci_write_config_dword(pdev, box_ctl, config); | 136 | pci_write_config_dword(pdev, box_ctl, config); |
65 | } | 137 | } |
66 | 138 | ||
67 | static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, | 139 | static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) |
68 | struct perf_event *event) | ||
69 | { | 140 | { |
70 | struct pci_dev *pdev = box->pci_dev; | 141 | struct pci_dev *pdev = box->pci_dev; |
71 | struct hw_perf_event *hwc = &event->hw; | 142 | struct hw_perf_event *hwc = &event->hw; |
72 | 143 | ||
73 | pci_write_config_dword(pdev, hwc->config_base, hwc->config | | 144 | pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); |
74 | SNBEP_PMON_CTL_EN); | ||
75 | } | 145 | } |
76 | 146 | ||
77 | static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, | 147 | static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event) |
78 | struct perf_event *event) | ||
79 | { | 148 | { |
80 | struct pci_dev *pdev = box->pci_dev; | 149 | struct pci_dev *pdev = box->pci_dev; |
81 | struct hw_perf_event *hwc = &event->hw; | 150 | struct hw_perf_event *hwc = &event->hw; |
@@ -83,8 +152,7 @@ static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, | |||
83 | pci_write_config_dword(pdev, hwc->config_base, hwc->config); | 152 | pci_write_config_dword(pdev, hwc->config_base, hwc->config); |
84 | } | 153 | } |
85 | 154 | ||
86 | static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, | 155 | static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event) |
87 | struct perf_event *event) | ||
88 | { | 156 | { |
89 | struct pci_dev *pdev = box->pci_dev; | 157 | struct pci_dev *pdev = box->pci_dev; |
90 | struct hw_perf_event *hwc = &event->hw; | 158 | struct hw_perf_event *hwc = &event->hw; |
@@ -92,14 +160,15 @@ static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, | |||
92 | 160 | ||
93 | pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count); | 161 | pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count); |
94 | pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1); | 162 | pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1); |
163 | |||
95 | return count; | 164 | return count; |
96 | } | 165 | } |
97 | 166 | ||
98 | static void snbep_uncore_pci_init_box(struct intel_uncore_box *box) | 167 | static void snbep_uncore_pci_init_box(struct intel_uncore_box *box) |
99 | { | 168 | { |
100 | struct pci_dev *pdev = box->pci_dev; | 169 | struct pci_dev *pdev = box->pci_dev; |
101 | pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, | 170 | |
102 | SNBEP_PMON_BOX_CTL_INT); | 171 | pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT); |
103 | } | 172 | } |
104 | 173 | ||
105 | static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) | 174 | static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) |
@@ -112,7 +181,6 @@ static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) | |||
112 | rdmsrl(msr, config); | 181 | rdmsrl(msr, config); |
113 | config |= SNBEP_PMON_BOX_CTL_FRZ; | 182 | config |= SNBEP_PMON_BOX_CTL_FRZ; |
114 | wrmsrl(msr, config); | 183 | wrmsrl(msr, config); |
115 | return; | ||
116 | } | 184 | } |
117 | } | 185 | } |
118 | 186 | ||
@@ -126,12 +194,10 @@ static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box) | |||
126 | rdmsrl(msr, config); | 194 | rdmsrl(msr, config); |
127 | config &= ~SNBEP_PMON_BOX_CTL_FRZ; | 195 | config &= ~SNBEP_PMON_BOX_CTL_FRZ; |
128 | wrmsrl(msr, config); | 196 | wrmsrl(msr, config); |
129 | return; | ||
130 | } | 197 | } |
131 | } | 198 | } |
132 | 199 | ||
133 | static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, | 200 | static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) |
134 | struct perf_event *event) | ||
135 | { | 201 | { |
136 | struct hw_perf_event *hwc = &event->hw; | 202 | struct hw_perf_event *hwc = &event->hw; |
137 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | 203 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; |
@@ -150,68 +216,15 @@ static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box, | |||
150 | wrmsrl(hwc->config_base, hwc->config); | 216 | wrmsrl(hwc->config_base, hwc->config); |
151 | } | 217 | } |
152 | 218 | ||
153 | static u64 snbep_uncore_msr_read_counter(struct intel_uncore_box *box, | ||
154 | struct perf_event *event) | ||
155 | { | ||
156 | struct hw_perf_event *hwc = &event->hw; | ||
157 | u64 count; | ||
158 | |||
159 | rdmsrl(hwc->event_base, count); | ||
160 | return count; | ||
161 | } | ||
162 | |||
163 | static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) | 219 | static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) |
164 | { | 220 | { |
165 | unsigned msr = uncore_msr_box_ctl(box); | 221 | unsigned msr = uncore_msr_box_ctl(box); |
222 | |||
166 | if (msr) | 223 | if (msr) |
167 | wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); | 224 | wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); |
168 | } | 225 | } |
169 | 226 | ||
170 | static struct event_constraint * | 227 | static int snbep_uncore_hw_config(struct intel_uncore_box *box, struct perf_event *event) |
171 | snbep_uncore_get_constraint(struct intel_uncore_box *box, | ||
172 | struct perf_event *event) | ||
173 | { | ||
174 | struct intel_uncore_extra_reg *er; | ||
175 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
176 | unsigned long flags; | ||
177 | bool ok = false; | ||
178 | |||
179 | if (reg1->idx == EXTRA_REG_NONE || (box->phys_id >= 0 && reg1->alloc)) | ||
180 | return NULL; | ||
181 | |||
182 | er = &box->shared_regs[reg1->idx]; | ||
183 | raw_spin_lock_irqsave(&er->lock, flags); | ||
184 | if (!atomic_read(&er->ref) || er->config1 == reg1->config) { | ||
185 | atomic_inc(&er->ref); | ||
186 | er->config1 = reg1->config; | ||
187 | ok = true; | ||
188 | } | ||
189 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
190 | |||
191 | if (ok) { | ||
192 | if (box->phys_id >= 0) | ||
193 | reg1->alloc = 1; | ||
194 | return NULL; | ||
195 | } | ||
196 | return &constraint_empty; | ||
197 | } | ||
198 | |||
199 | static void snbep_uncore_put_constraint(struct intel_uncore_box *box, | ||
200 | struct perf_event *event) | ||
201 | { | ||
202 | struct intel_uncore_extra_reg *er; | ||
203 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
204 | |||
205 | if (box->phys_id < 0 || !reg1->alloc) | ||
206 | return; | ||
207 | |||
208 | er = &box->shared_regs[reg1->idx]; | ||
209 | atomic_dec(&er->ref); | ||
210 | reg1->alloc = 0; | ||
211 | } | ||
212 | |||
213 | static int snbep_uncore_hw_config(struct intel_uncore_box *box, | ||
214 | struct perf_event *event) | ||
215 | { | 228 | { |
216 | struct hw_perf_event *hwc = &event->hw; | 229 | struct hw_perf_event *hwc = &event->hw; |
217 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | 230 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; |
@@ -221,14 +234,16 @@ static int snbep_uncore_hw_config(struct intel_uncore_box *box, | |||
221 | SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; | 234 | SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; |
222 | reg1->config = event->attr.config1 & | 235 | reg1->config = event->attr.config1 & |
223 | SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK; | 236 | SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK; |
224 | } else if (box->pmu->type == &snbep_uncore_pcu) { | ||
225 | reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER; | ||
226 | reg1->config = event->attr.config1 & | ||
227 | SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK; | ||
228 | } else { | 237 | } else { |
229 | return 0; | 238 | if (box->pmu->type == &snbep_uncore_pcu) { |
239 | reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER; | ||
240 | reg1->config = event->attr.config1 & SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK; | ||
241 | } else { | ||
242 | return 0; | ||
243 | } | ||
230 | } | 244 | } |
231 | reg1->idx = 0; | 245 | reg1->idx = 0; |
246 | |||
232 | return 0; | 247 | return 0; |
233 | } | 248 | } |
234 | 249 | ||
@@ -272,10 +287,19 @@ static struct attribute *snbep_uncore_pcu_formats_attr[] = { | |||
272 | &format_attr_thresh5.attr, | 287 | &format_attr_thresh5.attr, |
273 | &format_attr_occ_invert.attr, | 288 | &format_attr_occ_invert.attr, |
274 | &format_attr_occ_edge.attr, | 289 | &format_attr_occ_edge.attr, |
275 | &format_attr_filter_brand0.attr, | 290 | &format_attr_filter_band0.attr, |
276 | &format_attr_filter_brand1.attr, | 291 | &format_attr_filter_band1.attr, |
277 | &format_attr_filter_brand2.attr, | 292 | &format_attr_filter_band2.attr, |
278 | &format_attr_filter_brand3.attr, | 293 | &format_attr_filter_band3.attr, |
294 | NULL, | ||
295 | }; | ||
296 | |||
297 | static struct attribute *snbep_uncore_qpi_formats_attr[] = { | ||
298 | &format_attr_event_ext.attr, | ||
299 | &format_attr_umask.attr, | ||
300 | &format_attr_edge.attr, | ||
301 | &format_attr_inv.attr, | ||
302 | &format_attr_thresh8.attr, | ||
279 | NULL, | 303 | NULL, |
280 | }; | 304 | }; |
281 | 305 | ||
@@ -314,15 +338,20 @@ static struct attribute_group snbep_uncore_pcu_format_group = { | |||
314 | .attrs = snbep_uncore_pcu_formats_attr, | 338 | .attrs = snbep_uncore_pcu_formats_attr, |
315 | }; | 339 | }; |
316 | 340 | ||
341 | static struct attribute_group snbep_uncore_qpi_format_group = { | ||
342 | .name = "format", | ||
343 | .attrs = snbep_uncore_qpi_formats_attr, | ||
344 | }; | ||
345 | |||
317 | static struct intel_uncore_ops snbep_uncore_msr_ops = { | 346 | static struct intel_uncore_ops snbep_uncore_msr_ops = { |
318 | .init_box = snbep_uncore_msr_init_box, | 347 | .init_box = snbep_uncore_msr_init_box, |
319 | .disable_box = snbep_uncore_msr_disable_box, | 348 | .disable_box = snbep_uncore_msr_disable_box, |
320 | .enable_box = snbep_uncore_msr_enable_box, | 349 | .enable_box = snbep_uncore_msr_enable_box, |
321 | .disable_event = snbep_uncore_msr_disable_event, | 350 | .disable_event = snbep_uncore_msr_disable_event, |
322 | .enable_event = snbep_uncore_msr_enable_event, | 351 | .enable_event = snbep_uncore_msr_enable_event, |
323 | .read_counter = snbep_uncore_msr_read_counter, | 352 | .read_counter = uncore_msr_read_counter, |
324 | .get_constraint = snbep_uncore_get_constraint, | 353 | .get_constraint = uncore_get_constraint, |
325 | .put_constraint = snbep_uncore_put_constraint, | 354 | .put_constraint = uncore_put_constraint, |
326 | .hw_config = snbep_uncore_hw_config, | 355 | .hw_config = snbep_uncore_hw_config, |
327 | }; | 356 | }; |
328 | 357 | ||
@@ -485,8 +514,13 @@ static struct intel_uncore_type snbep_uncore_qpi = { | |||
485 | .num_counters = 4, | 514 | .num_counters = 4, |
486 | .num_boxes = 2, | 515 | .num_boxes = 2, |
487 | .perf_ctr_bits = 48, | 516 | .perf_ctr_bits = 48, |
517 | .perf_ctr = SNBEP_PCI_PMON_CTR0, | ||
518 | .event_ctl = SNBEP_PCI_PMON_CTL0, | ||
519 | .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, | ||
520 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, | ||
521 | .ops = &snbep_uncore_pci_ops, | ||
488 | .event_descs = snbep_uncore_qpi_events, | 522 | .event_descs = snbep_uncore_qpi_events, |
489 | SNBEP_UNCORE_PCI_COMMON_INIT(), | 523 | .format_group = &snbep_uncore_qpi_format_group, |
490 | }; | 524 | }; |
491 | 525 | ||
492 | 526 | ||
@@ -603,10 +637,8 @@ static void snbep_pci2phy_map_init(void) | |||
603 | } | 637 | } |
604 | /* end of Sandy Bridge-EP uncore support */ | 638 | /* end of Sandy Bridge-EP uncore support */ |
605 | 639 | ||
606 | |||
607 | /* Sandy Bridge uncore support */ | 640 | /* Sandy Bridge uncore support */ |
608 | static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, | 641 | static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) |
609 | struct perf_event *event) | ||
610 | { | 642 | { |
611 | struct hw_perf_event *hwc = &event->hw; | 643 | struct hw_perf_event *hwc = &event->hw; |
612 | 644 | ||
@@ -616,20 +648,11 @@ static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, | |||
616 | wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); | 648 | wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); |
617 | } | 649 | } |
618 | 650 | ||
619 | static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, | 651 | static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) |
620 | struct perf_event *event) | ||
621 | { | 652 | { |
622 | wrmsrl(event->hw.config_base, 0); | 653 | wrmsrl(event->hw.config_base, 0); |
623 | } | 654 | } |
624 | 655 | ||
625 | static u64 snb_uncore_msr_read_counter(struct intel_uncore_box *box, | ||
626 | struct perf_event *event) | ||
627 | { | ||
628 | u64 count; | ||
629 | rdmsrl(event->hw.event_base, count); | ||
630 | return count; | ||
631 | } | ||
632 | |||
633 | static void snb_uncore_msr_init_box(struct intel_uncore_box *box) | 656 | static void snb_uncore_msr_init_box(struct intel_uncore_box *box) |
634 | { | 657 | { |
635 | if (box->pmu->pmu_idx == 0) { | 658 | if (box->pmu->pmu_idx == 0) { |
@@ -648,15 +671,15 @@ static struct attribute *snb_uncore_formats_attr[] = { | |||
648 | }; | 671 | }; |
649 | 672 | ||
650 | static struct attribute_group snb_uncore_format_group = { | 673 | static struct attribute_group snb_uncore_format_group = { |
651 | .name = "format", | 674 | .name = "format", |
652 | .attrs = snb_uncore_formats_attr, | 675 | .attrs = snb_uncore_formats_attr, |
653 | }; | 676 | }; |
654 | 677 | ||
655 | static struct intel_uncore_ops snb_uncore_msr_ops = { | 678 | static struct intel_uncore_ops snb_uncore_msr_ops = { |
656 | .init_box = snb_uncore_msr_init_box, | 679 | .init_box = snb_uncore_msr_init_box, |
657 | .disable_event = snb_uncore_msr_disable_event, | 680 | .disable_event = snb_uncore_msr_disable_event, |
658 | .enable_event = snb_uncore_msr_enable_event, | 681 | .enable_event = snb_uncore_msr_enable_event, |
659 | .read_counter = snb_uncore_msr_read_counter, | 682 | .read_counter = uncore_msr_read_counter, |
660 | }; | 683 | }; |
661 | 684 | ||
662 | static struct event_constraint snb_uncore_cbox_constraints[] = { | 685 | static struct event_constraint snb_uncore_cbox_constraints[] = { |
@@ -697,12 +720,10 @@ static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) | |||
697 | 720 | ||
698 | static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) | 721 | static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) |
699 | { | 722 | { |
700 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, | 723 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); |
701 | NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); | ||
702 | } | 724 | } |
703 | 725 | ||
704 | static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, | 726 | static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) |
705 | struct perf_event *event) | ||
706 | { | 727 | { |
707 | struct hw_perf_event *hwc = &event->hw; | 728 | struct hw_perf_event *hwc = &event->hw; |
708 | 729 | ||
@@ -744,7 +765,7 @@ static struct intel_uncore_ops nhm_uncore_msr_ops = { | |||
744 | .enable_box = nhm_uncore_msr_enable_box, | 765 | .enable_box = nhm_uncore_msr_enable_box, |
745 | .disable_event = snb_uncore_msr_disable_event, | 766 | .disable_event = snb_uncore_msr_disable_event, |
746 | .enable_event = nhm_uncore_msr_enable_event, | 767 | .enable_event = nhm_uncore_msr_enable_event, |
747 | .read_counter = snb_uncore_msr_read_counter, | 768 | .read_counter = uncore_msr_read_counter, |
748 | }; | 769 | }; |
749 | 770 | ||
750 | static struct intel_uncore_type nhm_uncore = { | 771 | static struct intel_uncore_type nhm_uncore = { |
@@ -769,8 +790,1041 @@ static struct intel_uncore_type *nhm_msr_uncores[] = { | |||
769 | }; | 790 | }; |
770 | /* end of Nehalem uncore support */ | 791 | /* end of Nehalem uncore support */ |
771 | 792 | ||
772 | static void uncore_assign_hw_event(struct intel_uncore_box *box, | 793 | /* Nehalem-EX uncore support */ |
773 | struct perf_event *event, int idx) | 794 | #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ |
795 | ((1ULL << (n)) - 1))) | ||
796 | |||
797 | DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); | ||
798 | DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); | ||
799 | DEFINE_UNCORE_FORMAT_ATTR(mm_cfg, mm_cfg, "config:63"); | ||
800 | DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63"); | ||
801 | DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63"); | ||
802 | |||
803 | static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box) | ||
804 | { | ||
805 | wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL); | ||
806 | } | ||
807 | |||
808 | static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box) | ||
809 | { | ||
810 | unsigned msr = uncore_msr_box_ctl(box); | ||
811 | u64 config; | ||
812 | |||
813 | if (msr) { | ||
814 | rdmsrl(msr, config); | ||
815 | config &= ~((1ULL << uncore_num_counters(box)) - 1); | ||
816 | /* WBox has a fixed counter */ | ||
817 | if (uncore_msr_fixed_ctl(box)) | ||
818 | config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN; | ||
819 | wrmsrl(msr, config); | ||
820 | } | ||
821 | } | ||
822 | |||
823 | static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box) | ||
824 | { | ||
825 | unsigned msr = uncore_msr_box_ctl(box); | ||
826 | u64 config; | ||
827 | |||
828 | if (msr) { | ||
829 | rdmsrl(msr, config); | ||
830 | config |= (1ULL << uncore_num_counters(box)) - 1; | ||
831 | /* WBox has a fixed counter */ | ||
832 | if (uncore_msr_fixed_ctl(box)) | ||
833 | config |= NHMEX_W_PMON_GLOBAL_FIXED_EN; | ||
834 | wrmsrl(msr, config); | ||
835 | } | ||
836 | } | ||
837 | |||
838 | static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
839 | { | ||
840 | wrmsrl(event->hw.config_base, 0); | ||
841 | } | ||
842 | |||
843 | static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
844 | { | ||
845 | struct hw_perf_event *hwc = &event->hw; | ||
846 | |||
847 | if (hwc->idx >= UNCORE_PMC_IDX_FIXED) | ||
848 | wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); | ||
849 | else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) | ||
850 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); | ||
851 | else | ||
852 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); | ||
853 | } | ||
854 | |||
855 | #define NHMEX_UNCORE_OPS_COMMON_INIT() \ | ||
856 | .init_box = nhmex_uncore_msr_init_box, \ | ||
857 | .disable_box = nhmex_uncore_msr_disable_box, \ | ||
858 | .enable_box = nhmex_uncore_msr_enable_box, \ | ||
859 | .disable_event = nhmex_uncore_msr_disable_event, \ | ||
860 | .read_counter = uncore_msr_read_counter | ||
861 | |||
862 | static struct intel_uncore_ops nhmex_uncore_ops = { | ||
863 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
864 | .enable_event = nhmex_uncore_msr_enable_event, | ||
865 | }; | ||
866 | |||
867 | static struct attribute *nhmex_uncore_ubox_formats_attr[] = { | ||
868 | &format_attr_event.attr, | ||
869 | &format_attr_edge.attr, | ||
870 | NULL, | ||
871 | }; | ||
872 | |||
873 | static struct attribute_group nhmex_uncore_ubox_format_group = { | ||
874 | .name = "format", | ||
875 | .attrs = nhmex_uncore_ubox_formats_attr, | ||
876 | }; | ||
877 | |||
878 | static struct intel_uncore_type nhmex_uncore_ubox = { | ||
879 | .name = "ubox", | ||
880 | .num_counters = 1, | ||
881 | .num_boxes = 1, | ||
882 | .perf_ctr_bits = 48, | ||
883 | .event_ctl = NHMEX_U_MSR_PMON_EV_SEL, | ||
884 | .perf_ctr = NHMEX_U_MSR_PMON_CTR, | ||
885 | .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK, | ||
886 | .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL, | ||
887 | .ops = &nhmex_uncore_ops, | ||
888 | .format_group = &nhmex_uncore_ubox_format_group | ||
889 | }; | ||
890 | |||
891 | static struct attribute *nhmex_uncore_cbox_formats_attr[] = { | ||
892 | &format_attr_event.attr, | ||
893 | &format_attr_umask.attr, | ||
894 | &format_attr_edge.attr, | ||
895 | &format_attr_inv.attr, | ||
896 | &format_attr_thresh8.attr, | ||
897 | NULL, | ||
898 | }; | ||
899 | |||
900 | static struct attribute_group nhmex_uncore_cbox_format_group = { | ||
901 | .name = "format", | ||
902 | .attrs = nhmex_uncore_cbox_formats_attr, | ||
903 | }; | ||
904 | |||
905 | static struct intel_uncore_type nhmex_uncore_cbox = { | ||
906 | .name = "cbox", | ||
907 | .num_counters = 6, | ||
908 | .num_boxes = 8, | ||
909 | .perf_ctr_bits = 48, | ||
910 | .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0, | ||
911 | .perf_ctr = NHMEX_C0_MSR_PMON_CTR0, | ||
912 | .event_mask = NHMEX_PMON_RAW_EVENT_MASK, | ||
913 | .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL, | ||
914 | .msr_offset = NHMEX_C_MSR_OFFSET, | ||
915 | .pair_ctr_ctl = 1, | ||
916 | .ops = &nhmex_uncore_ops, | ||
917 | .format_group = &nhmex_uncore_cbox_format_group | ||
918 | }; | ||
919 | |||
920 | static struct uncore_event_desc nhmex_uncore_wbox_events[] = { | ||
921 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"), | ||
922 | { /* end: all zeroes */ }, | ||
923 | }; | ||
924 | |||
925 | static struct intel_uncore_type nhmex_uncore_wbox = { | ||
926 | .name = "wbox", | ||
927 | .num_counters = 4, | ||
928 | .num_boxes = 1, | ||
929 | .perf_ctr_bits = 48, | ||
930 | .event_ctl = NHMEX_W_MSR_PMON_CNT0, | ||
931 | .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0, | ||
932 | .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR, | ||
933 | .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL, | ||
934 | .event_mask = NHMEX_PMON_RAW_EVENT_MASK, | ||
935 | .box_ctl = NHMEX_W_MSR_GLOBAL_CTL, | ||
936 | .pair_ctr_ctl = 1, | ||
937 | .event_descs = nhmex_uncore_wbox_events, | ||
938 | .ops = &nhmex_uncore_ops, | ||
939 | .format_group = &nhmex_uncore_cbox_format_group | ||
940 | }; | ||
941 | |||
942 | static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
943 | { | ||
944 | struct hw_perf_event *hwc = &event->hw; | ||
945 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
946 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
947 | int ctr, ev_sel; | ||
948 | |||
949 | ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >> | ||
950 | NHMEX_B_PMON_CTR_SHIFT; | ||
951 | ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >> | ||
952 | NHMEX_B_PMON_CTL_EV_SEL_SHIFT; | ||
953 | |||
954 | /* events that do not use the match/mask registers */ | ||
955 | if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) || | ||
956 | (ctr == 2 && ev_sel != 0x4) || ctr == 3) | ||
957 | return 0; | ||
958 | |||
959 | if (box->pmu->pmu_idx == 0) | ||
960 | reg1->reg = NHMEX_B0_MSR_MATCH; | ||
961 | else | ||
962 | reg1->reg = NHMEX_B1_MSR_MATCH; | ||
963 | reg1->idx = 0; | ||
964 | reg1->config = event->attr.config1; | ||
965 | reg2->config = event->attr.config2; | ||
966 | return 0; | ||
967 | } | ||
968 | |||
969 | static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
970 | { | ||
971 | struct hw_perf_event *hwc = &event->hw; | ||
972 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
973 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
974 | |||
975 | if (reg1->idx != EXTRA_REG_NONE) { | ||
976 | wrmsrl(reg1->reg, reg1->config); | ||
977 | wrmsrl(reg1->reg + 1, reg2->config); | ||
978 | } | ||
979 | wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | | ||
980 | (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK)); | ||
981 | } | ||
982 | |||
983 | /* | ||
984 | * The Bbox has 4 counters, but each counter monitors different events. | ||
985 | * Use bits 6-7 in the event config to select counter. | ||
986 | */ | ||
987 | static struct event_constraint nhmex_uncore_bbox_constraints[] = { | ||
988 | EVENT_CONSTRAINT(0 , 1, 0xc0), | ||
989 | EVENT_CONSTRAINT(0x40, 2, 0xc0), | ||
990 | EVENT_CONSTRAINT(0x80, 4, 0xc0), | ||
991 | EVENT_CONSTRAINT(0xc0, 8, 0xc0), | ||
992 | EVENT_CONSTRAINT_END, | ||
993 | }; | ||
994 | |||
995 | static struct attribute *nhmex_uncore_bbox_formats_attr[] = { | ||
996 | &format_attr_event5.attr, | ||
997 | &format_attr_counter.attr, | ||
998 | &format_attr_match.attr, | ||
999 | &format_attr_mask.attr, | ||
1000 | NULL, | ||
1001 | }; | ||
1002 | |||
1003 | static struct attribute_group nhmex_uncore_bbox_format_group = { | ||
1004 | .name = "format", | ||
1005 | .attrs = nhmex_uncore_bbox_formats_attr, | ||
1006 | }; | ||
1007 | |||
1008 | static struct intel_uncore_ops nhmex_uncore_bbox_ops = { | ||
1009 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
1010 | .enable_event = nhmex_bbox_msr_enable_event, | ||
1011 | .hw_config = nhmex_bbox_hw_config, | ||
1012 | .get_constraint = uncore_get_constraint, | ||
1013 | .put_constraint = uncore_put_constraint, | ||
1014 | }; | ||
1015 | |||
1016 | static struct intel_uncore_type nhmex_uncore_bbox = { | ||
1017 | .name = "bbox", | ||
1018 | .num_counters = 4, | ||
1019 | .num_boxes = 2, | ||
1020 | .perf_ctr_bits = 48, | ||
1021 | .event_ctl = NHMEX_B0_MSR_PMON_CTL0, | ||
1022 | .perf_ctr = NHMEX_B0_MSR_PMON_CTR0, | ||
1023 | .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK, | ||
1024 | .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL, | ||
1025 | .msr_offset = NHMEX_B_MSR_OFFSET, | ||
1026 | .pair_ctr_ctl = 1, | ||
1027 | .num_shared_regs = 1, | ||
1028 | .constraints = nhmex_uncore_bbox_constraints, | ||
1029 | .ops = &nhmex_uncore_bbox_ops, | ||
1030 | .format_group = &nhmex_uncore_bbox_format_group | ||
1031 | }; | ||
1032 | |||
1033 | static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
1034 | { | ||
1035 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1036 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
1037 | |||
1038 | if (event->attr.config & NHMEX_S_PMON_MM_CFG_EN) { | ||
1039 | reg1->config = event->attr.config1; | ||
1040 | reg2->config = event->attr.config2; | ||
1041 | } else { | ||
1042 | reg1->config = ~0ULL; | ||
1043 | reg2->config = ~0ULL; | ||
1044 | } | ||
1045 | |||
1046 | if (box->pmu->pmu_idx == 0) | ||
1047 | reg1->reg = NHMEX_S0_MSR_MM_CFG; | ||
1048 | else | ||
1049 | reg1->reg = NHMEX_S1_MSR_MM_CFG; | ||
1050 | |||
1051 | reg1->idx = 0; | ||
1052 | |||
1053 | return 0; | ||
1054 | } | ||
1055 | |||
1056 | static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1057 | { | ||
1058 | struct hw_perf_event *hwc = &event->hw; | ||
1059 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1060 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
1061 | |||
1062 | wrmsrl(reg1->reg, 0); | ||
1063 | if (reg1->config != ~0ULL || reg2->config != ~0ULL) { | ||
1064 | wrmsrl(reg1->reg + 1, reg1->config); | ||
1065 | wrmsrl(reg1->reg + 2, reg2->config); | ||
1066 | wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); | ||
1067 | } | ||
1068 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); | ||
1069 | } | ||
1070 | |||
1071 | static struct attribute *nhmex_uncore_sbox_formats_attr[] = { | ||
1072 | &format_attr_event.attr, | ||
1073 | &format_attr_umask.attr, | ||
1074 | &format_attr_edge.attr, | ||
1075 | &format_attr_inv.attr, | ||
1076 | &format_attr_thresh8.attr, | ||
1077 | &format_attr_mm_cfg.attr, | ||
1078 | &format_attr_match.attr, | ||
1079 | &format_attr_mask.attr, | ||
1080 | NULL, | ||
1081 | }; | ||
1082 | |||
1083 | static struct attribute_group nhmex_uncore_sbox_format_group = { | ||
1084 | .name = "format", | ||
1085 | .attrs = nhmex_uncore_sbox_formats_attr, | ||
1086 | }; | ||
1087 | |||
1088 | static struct intel_uncore_ops nhmex_uncore_sbox_ops = { | ||
1089 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
1090 | .enable_event = nhmex_sbox_msr_enable_event, | ||
1091 | .hw_config = nhmex_sbox_hw_config, | ||
1092 | .get_constraint = uncore_get_constraint, | ||
1093 | .put_constraint = uncore_put_constraint, | ||
1094 | }; | ||
1095 | |||
1096 | static struct intel_uncore_type nhmex_uncore_sbox = { | ||
1097 | .name = "sbox", | ||
1098 | .num_counters = 4, | ||
1099 | .num_boxes = 2, | ||
1100 | .perf_ctr_bits = 48, | ||
1101 | .event_ctl = NHMEX_S0_MSR_PMON_CTL0, | ||
1102 | .perf_ctr = NHMEX_S0_MSR_PMON_CTR0, | ||
1103 | .event_mask = NHMEX_PMON_RAW_EVENT_MASK, | ||
1104 | .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL, | ||
1105 | .msr_offset = NHMEX_S_MSR_OFFSET, | ||
1106 | .pair_ctr_ctl = 1, | ||
1107 | .num_shared_regs = 1, | ||
1108 | .ops = &nhmex_uncore_sbox_ops, | ||
1109 | .format_group = &nhmex_uncore_sbox_format_group | ||
1110 | }; | ||
1111 | |||
1112 | enum { | ||
1113 | EXTRA_REG_NHMEX_M_FILTER, | ||
1114 | EXTRA_REG_NHMEX_M_DSP, | ||
1115 | EXTRA_REG_NHMEX_M_ISS, | ||
1116 | EXTRA_REG_NHMEX_M_MAP, | ||
1117 | EXTRA_REG_NHMEX_M_MSC_THR, | ||
1118 | EXTRA_REG_NHMEX_M_PGT, | ||
1119 | EXTRA_REG_NHMEX_M_PLD, | ||
1120 | EXTRA_REG_NHMEX_M_ZDP_CTL_FVC, | ||
1121 | }; | ||
1122 | |||
1123 | static struct extra_reg nhmex_uncore_mbox_extra_regs[] = { | ||
1124 | MBOX_INC_SEL_EXTAR_REG(0x0, DSP), | ||
1125 | MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR), | ||
1126 | MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR), | ||
1127 | MBOX_INC_SEL_EXTAR_REG(0x9, ISS), | ||
1128 | /* event 0xa uses two extra registers */ | ||
1129 | MBOX_INC_SEL_EXTAR_REG(0xa, ISS), | ||
1130 | MBOX_INC_SEL_EXTAR_REG(0xa, PLD), | ||
1131 | MBOX_INC_SEL_EXTAR_REG(0xb, PLD), | ||
1132 | /* events 0xd ~ 0x10 use the same extra register */ | ||
1133 | MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC), | ||
1134 | MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC), | ||
1135 | MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC), | ||
1136 | MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC), | ||
1137 | MBOX_INC_SEL_EXTAR_REG(0x16, PGT), | ||
1138 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP), | ||
1139 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS), | ||
1140 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT), | ||
1141 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP), | ||
1142 | EVENT_EXTRA_END | ||
1143 | }; | ||
1144 | |||
1145 | static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) | ||
1146 | { | ||
1147 | struct intel_uncore_extra_reg *er; | ||
1148 | unsigned long flags; | ||
1149 | bool ret = false; | ||
1150 | u64 mask; | ||
1151 | |||
1152 | if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { | ||
1153 | er = &box->shared_regs[idx]; | ||
1154 | raw_spin_lock_irqsave(&er->lock, flags); | ||
1155 | if (!atomic_read(&er->ref) || er->config == config) { | ||
1156 | atomic_inc(&er->ref); | ||
1157 | er->config = config; | ||
1158 | ret = true; | ||
1159 | } | ||
1160 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
1161 | |||
1162 | return ret; | ||
1163 | } | ||
1164 | /* | ||
1165 | * The ZDP_CTL_FVC MSR has 4 fields which are used to control | ||
1166 | * events 0xd ~ 0x10. Besides these 4 fields, there are additional | ||
1167 | * fields which are shared. | ||
1168 | */ | ||
1169 | idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
1170 | if (WARN_ON_ONCE(idx >= 4)) | ||
1171 | return false; | ||
1172 | |||
1173 | /* mask of the shared fields */ | ||
1174 | mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; | ||
1175 | er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; | ||
1176 | |||
1177 | raw_spin_lock_irqsave(&er->lock, flags); | ||
1178 | /* add mask of the non-shared field if it's in use */ | ||
1179 | if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) | ||
1180 | mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
1181 | |||
1182 | if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) { | ||
1183 | atomic_add(1 << (idx * 8), &er->ref); | ||
1184 | mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | | ||
1185 | NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
1186 | er->config &= ~mask; | ||
1187 | er->config |= (config & mask); | ||
1188 | ret = true; | ||
1189 | } | ||
1190 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
1191 | |||
1192 | return ret; | ||
1193 | } | ||
1194 | |||
1195 | static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx) | ||
1196 | { | ||
1197 | struct intel_uncore_extra_reg *er; | ||
1198 | |||
1199 | if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { | ||
1200 | er = &box->shared_regs[idx]; | ||
1201 | atomic_dec(&er->ref); | ||
1202 | return; | ||
1203 | } | ||
1204 | |||
1205 | idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
1206 | er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; | ||
1207 | atomic_sub(1 << (idx * 8), &er->ref); | ||
1208 | } | ||
1209 | |||
1210 | u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) | ||
1211 | { | ||
1212 | struct hw_perf_event *hwc = &event->hw; | ||
1213 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1214 | int idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8); | ||
1215 | u64 config = reg1->config; | ||
1216 | |||
1217 | /* get the non-shared control bits and shift them */ | ||
1218 | idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
1219 | config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
1220 | if (new_idx > orig_idx) { | ||
1221 | idx = new_idx - orig_idx; | ||
1222 | config <<= 3 * idx; | ||
1223 | } else { | ||
1224 | idx = orig_idx - new_idx; | ||
1225 | config >>= 3 * idx; | ||
1226 | } | ||
1227 | |||
1228 | /* add the shared control bits back */ | ||
1229 | config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; | ||
1230 | if (modify) { | ||
1231 | /* adjust the main event selector */ | ||
1232 | if (new_idx > orig_idx) | ||
1233 | hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; | ||
1234 | else | ||
1235 | hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; | ||
1236 | reg1->config = config; | ||
1237 | reg1->idx = ~0xff | new_idx; | ||
1238 | } | ||
1239 | return config; | ||
1240 | } | ||
1241 | |||
1242 | static struct event_constraint * | ||
1243 | nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
1244 | { | ||
1245 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1246 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
1247 | int i, idx[2], alloc = 0; | ||
1248 | u64 config1 = reg1->config; | ||
1249 | |||
1250 | idx[0] = __BITS_VALUE(reg1->idx, 0, 8); | ||
1251 | idx[1] = __BITS_VALUE(reg1->idx, 1, 8); | ||
1252 | again: | ||
1253 | for (i = 0; i < 2; i++) { | ||
1254 | if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) | ||
1255 | idx[i] = 0xff; | ||
1256 | |||
1257 | if (idx[i] == 0xff) | ||
1258 | continue; | ||
1259 | |||
1260 | if (!nhmex_mbox_get_shared_reg(box, idx[i], | ||
1261 | __BITS_VALUE(config1, i, 32))) | ||
1262 | goto fail; | ||
1263 | alloc |= (0x1 << i); | ||
1264 | } | ||
1265 | |||
1266 | /* for the match/mask registers */ | ||
1267 | if ((uncore_box_is_fake(box) || !reg2->alloc) && | ||
1268 | !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config)) | ||
1269 | goto fail; | ||
1270 | |||
1271 | /* | ||
1272 | * If it's a fake box -- as per validate_{group,event}() we | ||
1273 | * shouldn't touch event state and we can avoid doing so | ||
1274 | * since both will only call get_event_constraints() once | ||
1275 | * on each event, this avoids the need for reg->alloc. | ||
1276 | */ | ||
1277 | if (!uncore_box_is_fake(box)) { | ||
1278 | if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) | ||
1279 | nhmex_mbox_alter_er(event, idx[0], true); | ||
1280 | reg1->alloc |= alloc; | ||
1281 | reg2->alloc = 1; | ||
1282 | } | ||
1283 | return NULL; | ||
1284 | fail: | ||
1285 | if (idx[0] != 0xff && !(alloc & 0x1) && | ||
1286 | idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { | ||
1287 | /* | ||
1288 | * events 0xd ~ 0x10 are functional identical, but are | ||
1289 | * controlled by different fields in the ZDP_CTL_FVC | ||
1290 | * register. If we failed to take one field, try the | ||
1291 | * rest 3 choices. | ||
1292 | */ | ||
1293 | BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff); | ||
1294 | idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
1295 | idx[0] = (idx[0] + 1) % 4; | ||
1296 | idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
1297 | if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) { | ||
1298 | config1 = nhmex_mbox_alter_er(event, idx[0], false); | ||
1299 | goto again; | ||
1300 | } | ||
1301 | } | ||
1302 | |||
1303 | if (alloc & 0x1) | ||
1304 | nhmex_mbox_put_shared_reg(box, idx[0]); | ||
1305 | if (alloc & 0x2) | ||
1306 | nhmex_mbox_put_shared_reg(box, idx[1]); | ||
1307 | return &constraint_empty; | ||
1308 | } | ||
1309 | |||
1310 | static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
1311 | { | ||
1312 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1313 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
1314 | |||
1315 | if (uncore_box_is_fake(box)) | ||
1316 | return; | ||
1317 | |||
1318 | if (reg1->alloc & 0x1) | ||
1319 | nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8)); | ||
1320 | if (reg1->alloc & 0x2) | ||
1321 | nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8)); | ||
1322 | reg1->alloc = 0; | ||
1323 | |||
1324 | if (reg2->alloc) { | ||
1325 | nhmex_mbox_put_shared_reg(box, reg2->idx); | ||
1326 | reg2->alloc = 0; | ||
1327 | } | ||
1328 | } | ||
1329 | |||
1330 | static int nhmex_mbox_extra_reg_idx(struct extra_reg *er) | ||
1331 | { | ||
1332 | if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) | ||
1333 | return er->idx; | ||
1334 | return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd; | ||
1335 | } | ||
1336 | |||
1337 | static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
1338 | { | ||
1339 | struct intel_uncore_type *type = box->pmu->type; | ||
1340 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1341 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
1342 | struct extra_reg *er; | ||
1343 | unsigned msr; | ||
1344 | int reg_idx = 0; | ||
1345 | |||
1346 | if (WARN_ON_ONCE(reg1->idx != -1)) | ||
1347 | return -EINVAL; | ||
1348 | /* | ||
1349 | * The mbox events may require 2 extra MSRs at the most. But only | ||
1350 | * the lower 32 bits in these MSRs are significant, so we can use | ||
1351 | * config1 to pass two MSRs' config. | ||
1352 | */ | ||
1353 | for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) { | ||
1354 | if (er->event != (event->hw.config & er->config_mask)) | ||
1355 | continue; | ||
1356 | if (event->attr.config1 & ~er->valid_mask) | ||
1357 | return -EINVAL; | ||
1358 | if (er->idx == __BITS_VALUE(reg1->idx, 0, 8) || | ||
1359 | er->idx == __BITS_VALUE(reg1->idx, 1, 8)) | ||
1360 | continue; | ||
1361 | if (WARN_ON_ONCE(reg_idx >= 2)) | ||
1362 | return -EINVAL; | ||
1363 | |||
1364 | msr = er->msr + type->msr_offset * box->pmu->pmu_idx; | ||
1365 | if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff)) | ||
1366 | return -EINVAL; | ||
1367 | |||
1368 | /* always use the 32~63 bits to pass the PLD config */ | ||
1369 | if (er->idx == EXTRA_REG_NHMEX_M_PLD) | ||
1370 | reg_idx = 1; | ||
1371 | |||
1372 | reg1->idx &= ~(0xff << (reg_idx * 8)); | ||
1373 | reg1->reg &= ~(0xffff << (reg_idx * 16)); | ||
1374 | reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8); | ||
1375 | reg1->reg |= msr << (reg_idx * 16); | ||
1376 | reg1->config = event->attr.config1; | ||
1377 | reg_idx++; | ||
1378 | } | ||
1379 | /* use config2 to pass the filter config */ | ||
1380 | reg2->idx = EXTRA_REG_NHMEX_M_FILTER; | ||
1381 | if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) | ||
1382 | reg2->config = event->attr.config2; | ||
1383 | else | ||
1384 | reg2->config = ~0ULL; | ||
1385 | if (box->pmu->pmu_idx == 0) | ||
1386 | reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; | ||
1387 | else | ||
1388 | reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; | ||
1389 | |||
1390 | return 0; | ||
1391 | } | ||
1392 | |||
1393 | static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx) | ||
1394 | { | ||
1395 | struct intel_uncore_extra_reg *er; | ||
1396 | unsigned long flags; | ||
1397 | u64 config; | ||
1398 | |||
1399 | if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) | ||
1400 | return box->shared_regs[idx].config; | ||
1401 | |||
1402 | er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; | ||
1403 | raw_spin_lock_irqsave(&er->lock, flags); | ||
1404 | config = er->config; | ||
1405 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
1406 | return config; | ||
1407 | } | ||
1408 | |||
1409 | static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1410 | { | ||
1411 | struct hw_perf_event *hwc = &event->hw; | ||
1412 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1413 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
1414 | int idx; | ||
1415 | |||
1416 | idx = __BITS_VALUE(reg1->idx, 0, 8); | ||
1417 | if (idx != 0xff) | ||
1418 | wrmsrl(__BITS_VALUE(reg1->reg, 0, 16), | ||
1419 | nhmex_mbox_shared_reg_config(box, idx)); | ||
1420 | idx = __BITS_VALUE(reg1->idx, 1, 8); | ||
1421 | if (idx != 0xff) | ||
1422 | wrmsrl(__BITS_VALUE(reg1->reg, 1, 16), | ||
1423 | nhmex_mbox_shared_reg_config(box, idx)); | ||
1424 | |||
1425 | wrmsrl(reg2->reg, 0); | ||
1426 | if (reg2->config != ~0ULL) { | ||
1427 | wrmsrl(reg2->reg + 1, | ||
1428 | reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); | ||
1429 | wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & | ||
1430 | (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); | ||
1431 | wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); | ||
1432 | } | ||
1433 | |||
1434 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); | ||
1435 | } | ||
1436 | |||
1437 | DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); | ||
1438 | DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5"); | ||
1439 | DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6"); | ||
1440 | DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7"); | ||
1441 | DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13"); | ||
1442 | DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21"); | ||
1443 | DEFINE_UNCORE_FORMAT_ATTR(filter_cfg, filter_cfg, "config2:63"); | ||
1444 | DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33"); | ||
1445 | DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61"); | ||
1446 | DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31"); | ||
1447 | DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31"); | ||
1448 | DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31"); | ||
1449 | DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31"); | ||
1450 | DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31"); | ||
1451 | DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31"); | ||
1452 | DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63"); | ||
1453 | |||
1454 | static struct attribute *nhmex_uncore_mbox_formats_attr[] = { | ||
1455 | &format_attr_count_mode.attr, | ||
1456 | &format_attr_storage_mode.attr, | ||
1457 | &format_attr_wrap_mode.attr, | ||
1458 | &format_attr_flag_mode.attr, | ||
1459 | &format_attr_inc_sel.attr, | ||
1460 | &format_attr_set_flag_sel.attr, | ||
1461 | &format_attr_filter_cfg.attr, | ||
1462 | &format_attr_filter_match.attr, | ||
1463 | &format_attr_filter_mask.attr, | ||
1464 | &format_attr_dsp.attr, | ||
1465 | &format_attr_thr.attr, | ||
1466 | &format_attr_fvc.attr, | ||
1467 | &format_attr_pgt.attr, | ||
1468 | &format_attr_map.attr, | ||
1469 | &format_attr_iss.attr, | ||
1470 | &format_attr_pld.attr, | ||
1471 | NULL, | ||
1472 | }; | ||
1473 | |||
1474 | static struct attribute_group nhmex_uncore_mbox_format_group = { | ||
1475 | .name = "format", | ||
1476 | .attrs = nhmex_uncore_mbox_formats_attr, | ||
1477 | }; | ||
1478 | |||
1479 | static struct uncore_event_desc nhmex_uncore_mbox_events[] = { | ||
1480 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"), | ||
1481 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"), | ||
1482 | { /* end: all zeroes */ }, | ||
1483 | }; | ||
1484 | |||
1485 | static struct intel_uncore_ops nhmex_uncore_mbox_ops = { | ||
1486 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
1487 | .enable_event = nhmex_mbox_msr_enable_event, | ||
1488 | .hw_config = nhmex_mbox_hw_config, | ||
1489 | .get_constraint = nhmex_mbox_get_constraint, | ||
1490 | .put_constraint = nhmex_mbox_put_constraint, | ||
1491 | }; | ||
1492 | |||
1493 | static struct intel_uncore_type nhmex_uncore_mbox = { | ||
1494 | .name = "mbox", | ||
1495 | .num_counters = 6, | ||
1496 | .num_boxes = 2, | ||
1497 | .perf_ctr_bits = 48, | ||
1498 | .event_ctl = NHMEX_M0_MSR_PMU_CTL0, | ||
1499 | .perf_ctr = NHMEX_M0_MSR_PMU_CNT0, | ||
1500 | .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK, | ||
1501 | .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL, | ||
1502 | .msr_offset = NHMEX_M_MSR_OFFSET, | ||
1503 | .pair_ctr_ctl = 1, | ||
1504 | .num_shared_regs = 8, | ||
1505 | .event_descs = nhmex_uncore_mbox_events, | ||
1506 | .ops = &nhmex_uncore_mbox_ops, | ||
1507 | .format_group = &nhmex_uncore_mbox_format_group, | ||
1508 | }; | ||
1509 | |||
1510 | void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) | ||
1511 | { | ||
1512 | struct hw_perf_event *hwc = &event->hw; | ||
1513 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1514 | int port; | ||
1515 | |||
1516 | /* adjust the main event selector */ | ||
1517 | if (reg1->idx % 2) { | ||
1518 | reg1->idx--; | ||
1519 | hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; | ||
1520 | } else { | ||
1521 | reg1->idx++; | ||
1522 | hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; | ||
1523 | } | ||
1524 | |||
1525 | /* adjust address or config of extra register */ | ||
1526 | port = reg1->idx / 6 + box->pmu->pmu_idx * 4; | ||
1527 | switch (reg1->idx % 6) { | ||
1528 | case 0: | ||
1529 | reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port); | ||
1530 | break; | ||
1531 | case 1: | ||
1532 | reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port); | ||
1533 | break; | ||
1534 | case 2: | ||
1535 | /* the 8~15 bits to the 0~7 bits */ | ||
1536 | reg1->config >>= 8; | ||
1537 | break; | ||
1538 | case 3: | ||
1539 | /* the 0~7 bits to the 8~15 bits */ | ||
1540 | reg1->config <<= 8; | ||
1541 | break; | ||
1542 | case 4: | ||
1543 | reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port); | ||
1544 | break; | ||
1545 | case 5: | ||
1546 | reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port); | ||
1547 | break; | ||
1548 | }; | ||
1549 | } | ||
1550 | |||
1551 | /* | ||
1552 | * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7. | ||
1553 | * An event set consists of 6 events, the 3rd and 4th events in | ||
1554 | * an event set use the same extra register. So an event set uses | ||
1555 | * 5 extra registers. | ||
1556 | */ | ||
1557 | static struct event_constraint * | ||
1558 | nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
1559 | { | ||
1560 | struct hw_perf_event *hwc = &event->hw; | ||
1561 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1562 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
1563 | struct intel_uncore_extra_reg *er; | ||
1564 | unsigned long flags; | ||
1565 | int idx, er_idx; | ||
1566 | u64 config1; | ||
1567 | bool ok = false; | ||
1568 | |||
1569 | if (!uncore_box_is_fake(box) && reg1->alloc) | ||
1570 | return NULL; | ||
1571 | |||
1572 | idx = reg1->idx % 6; | ||
1573 | config1 = reg1->config; | ||
1574 | again: | ||
1575 | er_idx = idx; | ||
1576 | /* the 3rd and 4th events use the same extra register */ | ||
1577 | if (er_idx > 2) | ||
1578 | er_idx--; | ||
1579 | er_idx += (reg1->idx / 6) * 5; | ||
1580 | |||
1581 | er = &box->shared_regs[er_idx]; | ||
1582 | raw_spin_lock_irqsave(&er->lock, flags); | ||
1583 | if (idx < 2) { | ||
1584 | if (!atomic_read(&er->ref) || er->config == reg1->config) { | ||
1585 | atomic_inc(&er->ref); | ||
1586 | er->config = reg1->config; | ||
1587 | ok = true; | ||
1588 | } | ||
1589 | } else if (idx == 2 || idx == 3) { | ||
1590 | /* | ||
1591 | * these two events use different fields in a extra register, | ||
1592 | * the 0~7 bits and the 8~15 bits respectively. | ||
1593 | */ | ||
1594 | u64 mask = 0xff << ((idx - 2) * 8); | ||
1595 | if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) || | ||
1596 | !((er->config ^ config1) & mask)) { | ||
1597 | atomic_add(1 << ((idx - 2) * 8), &er->ref); | ||
1598 | er->config &= ~mask; | ||
1599 | er->config |= config1 & mask; | ||
1600 | ok = true; | ||
1601 | } | ||
1602 | } else { | ||
1603 | if (!atomic_read(&er->ref) || | ||
1604 | (er->config == (hwc->config >> 32) && | ||
1605 | er->config1 == reg1->config && | ||
1606 | er->config2 == reg2->config)) { | ||
1607 | atomic_inc(&er->ref); | ||
1608 | er->config = (hwc->config >> 32); | ||
1609 | er->config1 = reg1->config; | ||
1610 | er->config2 = reg2->config; | ||
1611 | ok = true; | ||
1612 | } | ||
1613 | } | ||
1614 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
1615 | |||
1616 | if (!ok) { | ||
1617 | /* | ||
1618 | * The Rbox events are always in pairs. The paired | ||
1619 | * events are functional identical, but use different | ||
1620 | * extra registers. If we failed to take an extra | ||
1621 | * register, try the alternative. | ||
1622 | */ | ||
1623 | if (idx % 2) | ||
1624 | idx--; | ||
1625 | else | ||
1626 | idx++; | ||
1627 | if (idx != reg1->idx % 6) { | ||
1628 | if (idx == 2) | ||
1629 | config1 >>= 8; | ||
1630 | else if (idx == 3) | ||
1631 | config1 <<= 8; | ||
1632 | goto again; | ||
1633 | } | ||
1634 | } else { | ||
1635 | if (!uncore_box_is_fake(box)) { | ||
1636 | if (idx != reg1->idx % 6) | ||
1637 | nhmex_rbox_alter_er(box, event); | ||
1638 | reg1->alloc = 1; | ||
1639 | } | ||
1640 | return NULL; | ||
1641 | } | ||
1642 | return &constraint_empty; | ||
1643 | } | ||
1644 | |||
1645 | static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
1646 | { | ||
1647 | struct intel_uncore_extra_reg *er; | ||
1648 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1649 | int idx, er_idx; | ||
1650 | |||
1651 | if (uncore_box_is_fake(box) || !reg1->alloc) | ||
1652 | return; | ||
1653 | |||
1654 | idx = reg1->idx % 6; | ||
1655 | er_idx = idx; | ||
1656 | if (er_idx > 2) | ||
1657 | er_idx--; | ||
1658 | er_idx += (reg1->idx / 6) * 5; | ||
1659 | |||
1660 | er = &box->shared_regs[er_idx]; | ||
1661 | if (idx == 2 || idx == 3) | ||
1662 | atomic_sub(1 << ((idx - 2) * 8), &er->ref); | ||
1663 | else | ||
1664 | atomic_dec(&er->ref); | ||
1665 | |||
1666 | reg1->alloc = 0; | ||
1667 | } | ||
1668 | |||
1669 | static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
1670 | { | ||
1671 | struct hw_perf_event *hwc = &event->hw; | ||
1672 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1673 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
1674 | int port, idx; | ||
1675 | |||
1676 | idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >> | ||
1677 | NHMEX_R_PMON_CTL_EV_SEL_SHIFT; | ||
1678 | if (idx >= 0x18) | ||
1679 | return -EINVAL; | ||
1680 | |||
1681 | reg1->idx = idx; | ||
1682 | reg1->config = event->attr.config1; | ||
1683 | |||
1684 | port = idx / 6 + box->pmu->pmu_idx * 4; | ||
1685 | idx %= 6; | ||
1686 | switch (idx) { | ||
1687 | case 0: | ||
1688 | reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port); | ||
1689 | break; | ||
1690 | case 1: | ||
1691 | reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port); | ||
1692 | break; | ||
1693 | case 2: | ||
1694 | case 3: | ||
1695 | reg1->reg = NHMEX_R_MSR_PORTN_QLX_CFG(port); | ||
1696 | break; | ||
1697 | case 4: | ||
1698 | case 5: | ||
1699 | if (idx == 4) | ||
1700 | reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port); | ||
1701 | else | ||
1702 | reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port); | ||
1703 | reg2->config = event->attr.config2; | ||
1704 | hwc->config |= event->attr.config & (~0ULL << 32); | ||
1705 | break; | ||
1706 | }; | ||
1707 | return 0; | ||
1708 | } | ||
1709 | |||
1710 | static u64 nhmex_rbox_shared_reg_config(struct intel_uncore_box *box, int idx) | ||
1711 | { | ||
1712 | struct intel_uncore_extra_reg *er; | ||
1713 | unsigned long flags; | ||
1714 | u64 config; | ||
1715 | |||
1716 | er = &box->shared_regs[idx]; | ||
1717 | |||
1718 | raw_spin_lock_irqsave(&er->lock, flags); | ||
1719 | config = er->config; | ||
1720 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
1721 | |||
1722 | return config; | ||
1723 | } | ||
1724 | |||
1725 | static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1726 | { | ||
1727 | struct hw_perf_event *hwc = &event->hw; | ||
1728 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1729 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
1730 | int idx, er_idx; | ||
1731 | |||
1732 | idx = reg1->idx % 6; | ||
1733 | er_idx = idx; | ||
1734 | if (er_idx > 2) | ||
1735 | er_idx--; | ||
1736 | er_idx += (reg1->idx / 6) * 5; | ||
1737 | |||
1738 | switch (idx) { | ||
1739 | case 0: | ||
1740 | case 1: | ||
1741 | wrmsrl(reg1->reg, reg1->config); | ||
1742 | break; | ||
1743 | case 2: | ||
1744 | case 3: | ||
1745 | wrmsrl(reg1->reg, nhmex_rbox_shared_reg_config(box, er_idx)); | ||
1746 | break; | ||
1747 | case 4: | ||
1748 | case 5: | ||
1749 | wrmsrl(reg1->reg, reg1->config); | ||
1750 | wrmsrl(reg1->reg + 1, hwc->config >> 32); | ||
1751 | wrmsrl(reg1->reg + 2, reg2->config); | ||
1752 | break; | ||
1753 | }; | ||
1754 | |||
1755 | wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | | ||
1756 | (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK)); | ||
1757 | } | ||
1758 | |||
1759 | DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config:32-63"); | ||
1760 | DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config1:0-63"); | ||
1761 | DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63"); | ||
1762 | DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15"); | ||
1763 | DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31"); | ||
1764 | |||
1765 | static struct attribute *nhmex_uncore_rbox_formats_attr[] = { | ||
1766 | &format_attr_event5.attr, | ||
1767 | &format_attr_xbr_mm_cfg.attr, | ||
1768 | &format_attr_xbr_match.attr, | ||
1769 | &format_attr_xbr_mask.attr, | ||
1770 | &format_attr_qlx_cfg.attr, | ||
1771 | &format_attr_iperf_cfg.attr, | ||
1772 | NULL, | ||
1773 | }; | ||
1774 | |||
1775 | static struct attribute_group nhmex_uncore_rbox_format_group = { | ||
1776 | .name = "format", | ||
1777 | .attrs = nhmex_uncore_rbox_formats_attr, | ||
1778 | }; | ||
1779 | |||
1780 | static struct uncore_event_desc nhmex_uncore_rbox_events[] = { | ||
1781 | INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"), | ||
1782 | INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"), | ||
1783 | INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"), | ||
1784 | INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"), | ||
1785 | INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"), | ||
1786 | INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"), | ||
1787 | { /* end: all zeroes */ }, | ||
1788 | }; | ||
1789 | |||
1790 | static struct intel_uncore_ops nhmex_uncore_rbox_ops = { | ||
1791 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
1792 | .enable_event = nhmex_rbox_msr_enable_event, | ||
1793 | .hw_config = nhmex_rbox_hw_config, | ||
1794 | .get_constraint = nhmex_rbox_get_constraint, | ||
1795 | .put_constraint = nhmex_rbox_put_constraint, | ||
1796 | }; | ||
1797 | |||
1798 | static struct intel_uncore_type nhmex_uncore_rbox = { | ||
1799 | .name = "rbox", | ||
1800 | .num_counters = 8, | ||
1801 | .num_boxes = 2, | ||
1802 | .perf_ctr_bits = 48, | ||
1803 | .event_ctl = NHMEX_R_MSR_PMON_CTL0, | ||
1804 | .perf_ctr = NHMEX_R_MSR_PMON_CNT0, | ||
1805 | .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK, | ||
1806 | .box_ctl = NHMEX_R_MSR_GLOBAL_CTL, | ||
1807 | .msr_offset = NHMEX_R_MSR_OFFSET, | ||
1808 | .pair_ctr_ctl = 1, | ||
1809 | .num_shared_regs = 20, | ||
1810 | .event_descs = nhmex_uncore_rbox_events, | ||
1811 | .ops = &nhmex_uncore_rbox_ops, | ||
1812 | .format_group = &nhmex_uncore_rbox_format_group | ||
1813 | }; | ||
1814 | |||
1815 | static struct intel_uncore_type *nhmex_msr_uncores[] = { | ||
1816 | &nhmex_uncore_ubox, | ||
1817 | &nhmex_uncore_cbox, | ||
1818 | &nhmex_uncore_bbox, | ||
1819 | &nhmex_uncore_sbox, | ||
1820 | &nhmex_uncore_mbox, | ||
1821 | &nhmex_uncore_rbox, | ||
1822 | &nhmex_uncore_wbox, | ||
1823 | NULL, | ||
1824 | }; | ||
1825 | /* end of Nehalem-EX uncore support */ | ||
1826 | |||
1827 | static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx) | ||
774 | { | 1828 | { |
775 | struct hw_perf_event *hwc = &event->hw; | 1829 | struct hw_perf_event *hwc = &event->hw; |
776 | 1830 | ||
@@ -787,8 +1841,7 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box, | |||
787 | hwc->event_base = uncore_perf_ctr(box, hwc->idx); | 1841 | hwc->event_base = uncore_perf_ctr(box, hwc->idx); |
788 | } | 1842 | } |
789 | 1843 | ||
790 | static void uncore_perf_event_update(struct intel_uncore_box *box, | 1844 | static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event) |
791 | struct perf_event *event) | ||
792 | { | 1845 | { |
793 | u64 prev_count, new_count, delta; | 1846 | u64 prev_count, new_count, delta; |
794 | int shift; | 1847 | int shift; |
@@ -858,14 +1911,12 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box) | |||
858 | box->hrtimer.function = uncore_pmu_hrtimer; | 1911 | box->hrtimer.function = uncore_pmu_hrtimer; |
859 | } | 1912 | } |
860 | 1913 | ||
861 | struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, | 1914 | struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu) |
862 | int cpu) | ||
863 | { | 1915 | { |
864 | struct intel_uncore_box *box; | 1916 | struct intel_uncore_box *box; |
865 | int i, size; | 1917 | int i, size; |
866 | 1918 | ||
867 | size = sizeof(*box) + type->num_shared_regs * | 1919 | size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg); |
868 | sizeof(struct intel_uncore_extra_reg); | ||
869 | 1920 | ||
870 | box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu)); | 1921 | box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu)); |
871 | if (!box) | 1922 | if (!box) |
@@ -915,12 +1966,11 @@ static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) | |||
915 | * perf core schedules event on the basis of cpu, uncore events are | 1966 | * perf core schedules event on the basis of cpu, uncore events are |
916 | * collected by one of the cpus inside a physical package. | 1967 | * collected by one of the cpus inside a physical package. |
917 | */ | 1968 | */ |
918 | return uncore_pmu_to_box(uncore_event_to_pmu(event), | 1969 | return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id()); |
919 | smp_processor_id()); | ||
920 | } | 1970 | } |
921 | 1971 | ||
922 | static int uncore_collect_events(struct intel_uncore_box *box, | 1972 | static int |
923 | struct perf_event *leader, bool dogrp) | 1973 | uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp) |
924 | { | 1974 | { |
925 | struct perf_event *event; | 1975 | struct perf_event *event; |
926 | int n, max_count; | 1976 | int n, max_count; |
@@ -952,8 +2002,7 @@ static int uncore_collect_events(struct intel_uncore_box *box, | |||
952 | } | 2002 | } |
953 | 2003 | ||
954 | static struct event_constraint * | 2004 | static struct event_constraint * |
955 | uncore_get_event_constraint(struct intel_uncore_box *box, | 2005 | uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event) |
956 | struct perf_event *event) | ||
957 | { | 2006 | { |
958 | struct intel_uncore_type *type = box->pmu->type; | 2007 | struct intel_uncore_type *type = box->pmu->type; |
959 | struct event_constraint *c; | 2008 | struct event_constraint *c; |
@@ -977,15 +2026,13 @@ uncore_get_event_constraint(struct intel_uncore_box *box, | |||
977 | return &type->unconstrainted; | 2026 | return &type->unconstrainted; |
978 | } | 2027 | } |
979 | 2028 | ||
980 | static void uncore_put_event_constraint(struct intel_uncore_box *box, | 2029 | static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event) |
981 | struct perf_event *event) | ||
982 | { | 2030 | { |
983 | if (box->pmu->type->ops->put_constraint) | 2031 | if (box->pmu->type->ops->put_constraint) |
984 | box->pmu->type->ops->put_constraint(box, event); | 2032 | box->pmu->type->ops->put_constraint(box, event); |
985 | } | 2033 | } |
986 | 2034 | ||
987 | static int uncore_assign_events(struct intel_uncore_box *box, | 2035 | static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n) |
988 | int assign[], int n) | ||
989 | { | 2036 | { |
990 | unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; | 2037 | unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; |
991 | struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX]; | 2038 | struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX]; |
@@ -1407,8 +2454,7 @@ static bool pcidrv_registered; | |||
1407 | /* | 2454 | /* |
1408 | * add a pci uncore device | 2455 | * add a pci uncore device |
1409 | */ | 2456 | */ |
1410 | static int __devinit uncore_pci_add(struct intel_uncore_type *type, | 2457 | static int __devinit uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev) |
1411 | struct pci_dev *pdev) | ||
1412 | { | 2458 | { |
1413 | struct intel_uncore_pmu *pmu; | 2459 | struct intel_uncore_pmu *pmu; |
1414 | struct intel_uncore_box *box; | 2460 | struct intel_uncore_box *box; |
@@ -1485,6 +2531,7 @@ static int __devinit uncore_pci_probe(struct pci_dev *pdev, | |||
1485 | struct intel_uncore_type *type; | 2531 | struct intel_uncore_type *type; |
1486 | 2532 | ||
1487 | type = (struct intel_uncore_type *)id->driver_data; | 2533 | type = (struct intel_uncore_type *)id->driver_data; |
2534 | |||
1488 | return uncore_pci_add(type, pdev); | 2535 | return uncore_pci_add(type, pdev); |
1489 | } | 2536 | } |
1490 | 2537 | ||
@@ -1612,8 +2659,8 @@ static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id) | |||
1612 | return 0; | 2659 | return 0; |
1613 | } | 2660 | } |
1614 | 2661 | ||
1615 | static void __cpuinit uncore_change_context(struct intel_uncore_type **uncores, | 2662 | static void __cpuinit |
1616 | int old_cpu, int new_cpu) | 2663 | uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu) |
1617 | { | 2664 | { |
1618 | struct intel_uncore_type *type; | 2665 | struct intel_uncore_type *type; |
1619 | struct intel_uncore_pmu *pmu; | 2666 | struct intel_uncore_pmu *pmu; |
@@ -1694,8 +2741,8 @@ static void __cpuinit uncore_event_init_cpu(int cpu) | |||
1694 | uncore_change_context(pci_uncores, -1, cpu); | 2741 | uncore_change_context(pci_uncores, -1, cpu); |
1695 | } | 2742 | } |
1696 | 2743 | ||
1697 | static int __cpuinit uncore_cpu_notifier(struct notifier_block *self, | 2744 | static int |
1698 | unsigned long action, void *hcpu) | 2745 | __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) |
1699 | { | 2746 | { |
1700 | unsigned int cpu = (long)hcpu; | 2747 | unsigned int cpu = (long)hcpu; |
1701 | 2748 | ||
@@ -1732,12 +2779,12 @@ static int __cpuinit uncore_cpu_notifier(struct notifier_block *self, | |||
1732 | } | 2779 | } |
1733 | 2780 | ||
1734 | static struct notifier_block uncore_cpu_nb __cpuinitdata = { | 2781 | static struct notifier_block uncore_cpu_nb __cpuinitdata = { |
1735 | .notifier_call = uncore_cpu_notifier, | 2782 | .notifier_call = uncore_cpu_notifier, |
1736 | /* | 2783 | /* |
1737 | * to migrate uncore events, our notifier should be executed | 2784 | * to migrate uncore events, our notifier should be executed |
1738 | * before perf core's notifier. | 2785 | * before perf core's notifier. |
1739 | */ | 2786 | */ |
1740 | .priority = CPU_PRI_PERF + 1, | 2787 | .priority = CPU_PRI_PERF + 1, |
1741 | }; | 2788 | }; |
1742 | 2789 | ||
1743 | static void __init uncore_cpu_setup(void *dummy) | 2790 | static void __init uncore_cpu_setup(void *dummy) |
@@ -1767,6 +2814,9 @@ static int __init uncore_cpu_init(void) | |||
1767 | snbep_uncore_cbox.num_boxes = max_cores; | 2814 | snbep_uncore_cbox.num_boxes = max_cores; |
1768 | msr_uncores = snbep_msr_uncores; | 2815 | msr_uncores = snbep_msr_uncores; |
1769 | break; | 2816 | break; |
2817 | case 46: | ||
2818 | msr_uncores = nhmex_msr_uncores; | ||
2819 | break; | ||
1770 | default: | 2820 | default: |
1771 | return 0; | 2821 | return 0; |
1772 | } | 2822 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index b13e9ea81def..f3851892e077 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h | |||
@@ -5,8 +5,6 @@ | |||
5 | #include "perf_event.h" | 5 | #include "perf_event.h" |
6 | 6 | ||
7 | #define UNCORE_PMU_NAME_LEN 32 | 7 | #define UNCORE_PMU_NAME_LEN 32 |
8 | #define UNCORE_BOX_HASH_SIZE 8 | ||
9 | |||
10 | #define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC) | 8 | #define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC) |
11 | 9 | ||
12 | #define UNCORE_FIXED_EVENT 0xff | 10 | #define UNCORE_FIXED_EVENT 0xff |
@@ -115,6 +113,10 @@ | |||
115 | SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ | 113 | SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ |
116 | SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) | 114 | SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) |
117 | 115 | ||
116 | #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \ | ||
117 | (SNBEP_PMON_RAW_EVENT_MASK | \ | ||
118 | SNBEP_PMON_CTL_EV_SEL_EXT) | ||
119 | |||
118 | /* SNB-EP pci control register */ | 120 | /* SNB-EP pci control register */ |
119 | #define SNBEP_PCI_PMON_BOX_CTL 0xf4 | 121 | #define SNBEP_PCI_PMON_BOX_CTL 0xf4 |
120 | #define SNBEP_PCI_PMON_CTL0 0xd8 | 122 | #define SNBEP_PCI_PMON_CTL0 0xd8 |
@@ -158,6 +160,193 @@ | |||
158 | #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc | 160 | #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc |
159 | #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd | 161 | #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd |
160 | 162 | ||
163 | /* NHM-EX event control */ | ||
164 | #define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff | ||
165 | #define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00 | ||
166 | #define NHMEX_PMON_CTL_EN_BIT0 (1 << 0) | ||
167 | #define NHMEX_PMON_CTL_EDGE_DET (1 << 18) | ||
168 | #define NHMEX_PMON_CTL_PMI_EN (1 << 20) | ||
169 | #define NHMEX_PMON_CTL_EN_BIT22 (1 << 22) | ||
170 | #define NHMEX_PMON_CTL_INVERT (1 << 23) | ||
171 | #define NHMEX_PMON_CTL_TRESH_MASK 0xff000000 | ||
172 | #define NHMEX_PMON_RAW_EVENT_MASK (NHMEX_PMON_CTL_EV_SEL_MASK | \ | ||
173 | NHMEX_PMON_CTL_UMASK_MASK | \ | ||
174 | NHMEX_PMON_CTL_EDGE_DET | \ | ||
175 | NHMEX_PMON_CTL_INVERT | \ | ||
176 | NHMEX_PMON_CTL_TRESH_MASK) | ||
177 | |||
178 | /* NHM-EX Ubox */ | ||
179 | #define NHMEX_U_MSR_PMON_GLOBAL_CTL 0xc00 | ||
180 | #define NHMEX_U_MSR_PMON_CTR 0xc11 | ||
181 | #define NHMEX_U_MSR_PMON_EV_SEL 0xc10 | ||
182 | |||
183 | #define NHMEX_U_PMON_GLOBAL_EN (1 << 0) | ||
184 | #define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL 0x0000001e | ||
185 | #define NHMEX_U_PMON_GLOBAL_EN_ALL (1 << 28) | ||
186 | #define NHMEX_U_PMON_GLOBAL_RST_ALL (1 << 29) | ||
187 | #define NHMEX_U_PMON_GLOBAL_FRZ_ALL (1 << 31) | ||
188 | |||
189 | #define NHMEX_U_PMON_RAW_EVENT_MASK \ | ||
190 | (NHMEX_PMON_CTL_EV_SEL_MASK | \ | ||
191 | NHMEX_PMON_CTL_EDGE_DET) | ||
192 | |||
193 | /* NHM-EX Cbox */ | ||
194 | #define NHMEX_C0_MSR_PMON_GLOBAL_CTL 0xd00 | ||
195 | #define NHMEX_C0_MSR_PMON_CTR0 0xd11 | ||
196 | #define NHMEX_C0_MSR_PMON_EV_SEL0 0xd10 | ||
197 | #define NHMEX_C_MSR_OFFSET 0x20 | ||
198 | |||
199 | /* NHM-EX Bbox */ | ||
200 | #define NHMEX_B0_MSR_PMON_GLOBAL_CTL 0xc20 | ||
201 | #define NHMEX_B0_MSR_PMON_CTR0 0xc31 | ||
202 | #define NHMEX_B0_MSR_PMON_CTL0 0xc30 | ||
203 | #define NHMEX_B_MSR_OFFSET 0x40 | ||
204 | #define NHMEX_B0_MSR_MATCH 0xe45 | ||
205 | #define NHMEX_B0_MSR_MASK 0xe46 | ||
206 | #define NHMEX_B1_MSR_MATCH 0xe4d | ||
207 | #define NHMEX_B1_MSR_MASK 0xe4e | ||
208 | |||
209 | #define NHMEX_B_PMON_CTL_EN (1 << 0) | ||
210 | #define NHMEX_B_PMON_CTL_EV_SEL_SHIFT 1 | ||
211 | #define NHMEX_B_PMON_CTL_EV_SEL_MASK \ | ||
212 | (0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT) | ||
213 | #define NHMEX_B_PMON_CTR_SHIFT 6 | ||
214 | #define NHMEX_B_PMON_CTR_MASK \ | ||
215 | (0x3 << NHMEX_B_PMON_CTR_SHIFT) | ||
216 | #define NHMEX_B_PMON_RAW_EVENT_MASK \ | ||
217 | (NHMEX_B_PMON_CTL_EV_SEL_MASK | \ | ||
218 | NHMEX_B_PMON_CTR_MASK) | ||
219 | |||
220 | /* NHM-EX Sbox */ | ||
221 | #define NHMEX_S0_MSR_PMON_GLOBAL_CTL 0xc40 | ||
222 | #define NHMEX_S0_MSR_PMON_CTR0 0xc51 | ||
223 | #define NHMEX_S0_MSR_PMON_CTL0 0xc50 | ||
224 | #define NHMEX_S_MSR_OFFSET 0x80 | ||
225 | #define NHMEX_S0_MSR_MM_CFG 0xe48 | ||
226 | #define NHMEX_S0_MSR_MATCH 0xe49 | ||
227 | #define NHMEX_S0_MSR_MASK 0xe4a | ||
228 | #define NHMEX_S1_MSR_MM_CFG 0xe58 | ||
229 | #define NHMEX_S1_MSR_MATCH 0xe59 | ||
230 | #define NHMEX_S1_MSR_MASK 0xe5a | ||
231 | |||
232 | #define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63) | ||
233 | |||
234 | /* NHM-EX Mbox */ | ||
235 | #define NHMEX_M0_MSR_GLOBAL_CTL 0xca0 | ||
236 | #define NHMEX_M0_MSR_PMU_DSP 0xca5 | ||
237 | #define NHMEX_M0_MSR_PMU_ISS 0xca6 | ||
238 | #define NHMEX_M0_MSR_PMU_MAP 0xca7 | ||
239 | #define NHMEX_M0_MSR_PMU_MSC_THR 0xca8 | ||
240 | #define NHMEX_M0_MSR_PMU_PGT 0xca9 | ||
241 | #define NHMEX_M0_MSR_PMU_PLD 0xcaa | ||
242 | #define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC 0xcab | ||
243 | #define NHMEX_M0_MSR_PMU_CTL0 0xcb0 | ||
244 | #define NHMEX_M0_MSR_PMU_CNT0 0xcb1 | ||
245 | #define NHMEX_M_MSR_OFFSET 0x40 | ||
246 | #define NHMEX_M0_MSR_PMU_MM_CFG 0xe54 | ||
247 | #define NHMEX_M1_MSR_PMU_MM_CFG 0xe5c | ||
248 | |||
249 | #define NHMEX_M_PMON_MM_CFG_EN (1ULL << 63) | ||
250 | #define NHMEX_M_PMON_ADDR_MATCH_MASK 0x3ffffffffULL | ||
251 | #define NHMEX_M_PMON_ADDR_MASK_MASK 0x7ffffffULL | ||
252 | #define NHMEX_M_PMON_ADDR_MASK_SHIFT 34 | ||
253 | |||
254 | #define NHMEX_M_PMON_CTL_EN (1 << 0) | ||
255 | #define NHMEX_M_PMON_CTL_PMI_EN (1 << 1) | ||
256 | #define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT 2 | ||
257 | #define NHMEX_M_PMON_CTL_COUNT_MODE_MASK \ | ||
258 | (0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT) | ||
259 | #define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT 4 | ||
260 | #define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK \ | ||
261 | (0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT) | ||
262 | #define NHMEX_M_PMON_CTL_WRAP_MODE (1 << 6) | ||
263 | #define NHMEX_M_PMON_CTL_FLAG_MODE (1 << 7) | ||
264 | #define NHMEX_M_PMON_CTL_INC_SEL_SHIFT 9 | ||
265 | #define NHMEX_M_PMON_CTL_INC_SEL_MASK \ | ||
266 | (0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) | ||
267 | #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT 19 | ||
268 | #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK \ | ||
269 | (0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | ||
270 | #define NHMEX_M_PMON_RAW_EVENT_MASK \ | ||
271 | (NHMEX_M_PMON_CTL_COUNT_MODE_MASK | \ | ||
272 | NHMEX_M_PMON_CTL_STORAGE_MODE_MASK | \ | ||
273 | NHMEX_M_PMON_CTL_WRAP_MODE | \ | ||
274 | NHMEX_M_PMON_CTL_FLAG_MODE | \ | ||
275 | NHMEX_M_PMON_CTL_INC_SEL_MASK | \ | ||
276 | NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK) | ||
277 | |||
278 | |||
279 | #define NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK 0x1f | ||
280 | #define NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK (0x7 << 5) | ||
281 | #define NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK (0x7 << 8) | ||
282 | #define NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR (1 << 23) | ||
283 | #define NHMEX_M_PMON_ZDP_CTL_FVC_MASK \ | ||
284 | (NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK | \ | ||
285 | NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK | \ | ||
286 | NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK | \ | ||
287 | NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR) | ||
288 | #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (11 + 3 * (n))) | ||
289 | |||
290 | /* | ||
291 | * use the 9~13 bits to select event If the 7th bit is not set, | ||
292 | * otherwise use the 19~21 bits to select event. | ||
293 | */ | ||
294 | #define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) | ||
295 | #define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \ | ||
296 | NHMEX_M_PMON_CTL_FLAG_MODE) | ||
297 | #define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \ | ||
298 | NHMEX_M_PMON_CTL_FLAG_MODE) | ||
299 | #define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \ | ||
300 | NHMEX_M_PMON_CTL_FLAG_MODE) | ||
301 | #define MBOX_INC_SEL_EXTAR_REG(c, r) \ | ||
302 | EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \ | ||
303 | MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r) | ||
304 | #define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \ | ||
305 | EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \ | ||
306 | MBOX_SET_FLAG_SEL_MASK, \ | ||
307 | (u64)-1, NHMEX_M_##r) | ||
308 | |||
309 | /* NHM-EX Rbox */ | ||
310 | #define NHMEX_R_MSR_GLOBAL_CTL 0xe00 | ||
311 | #define NHMEX_R_MSR_PMON_CTL0 0xe10 | ||
312 | #define NHMEX_R_MSR_PMON_CNT0 0xe11 | ||
313 | #define NHMEX_R_MSR_OFFSET 0x20 | ||
314 | |||
315 | #define NHMEX_R_MSR_PORTN_QLX_CFG(n) \ | ||
316 | ((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4)) | ||
317 | #define NHMEX_R_MSR_PORTN_IPERF_CFG0(n) (0xe04 + (n)) | ||
318 | #define NHMEX_R_MSR_PORTN_IPERF_CFG1(n) (0xe24 + (n)) | ||
319 | #define NHMEX_R_MSR_PORTN_XBR_OFFSET(n) \ | ||
320 | (((n) < 4 ? 0 : 0x10) + (n) * 4) | ||
321 | #define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) \ | ||
322 | (0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) | ||
323 | #define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n) \ | ||
324 | (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1) | ||
325 | #define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n) \ | ||
326 | (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2) | ||
327 | #define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) \ | ||
328 | (0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) | ||
329 | #define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n) \ | ||
330 | (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1) | ||
331 | #define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n) \ | ||
332 | (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2) | ||
333 | |||
334 | #define NHMEX_R_PMON_CTL_EN (1 << 0) | ||
335 | #define NHMEX_R_PMON_CTL_EV_SEL_SHIFT 1 | ||
336 | #define NHMEX_R_PMON_CTL_EV_SEL_MASK \ | ||
337 | (0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT) | ||
338 | #define NHMEX_R_PMON_CTL_PMI_EN (1 << 6) | ||
339 | #define NHMEX_R_PMON_RAW_EVENT_MASK NHMEX_R_PMON_CTL_EV_SEL_MASK | ||
340 | |||
341 | /* NHM-EX Wbox */ | ||
342 | #define NHMEX_W_MSR_GLOBAL_CTL 0xc80 | ||
343 | #define NHMEX_W_MSR_PMON_CNT0 0xc90 | ||
344 | #define NHMEX_W_MSR_PMON_EVT_SEL0 0xc91 | ||
345 | #define NHMEX_W_MSR_PMON_FIXED_CTR 0x394 | ||
346 | #define NHMEX_W_MSR_PMON_FIXED_CTL 0x395 | ||
347 | |||
348 | #define NHMEX_W_PMON_GLOBAL_FIXED_EN (1ULL << 31) | ||
349 | |||
161 | struct intel_uncore_ops; | 350 | struct intel_uncore_ops; |
162 | struct intel_uncore_pmu; | 351 | struct intel_uncore_pmu; |
163 | struct intel_uncore_box; | 352 | struct intel_uncore_box; |
@@ -178,6 +367,7 @@ struct intel_uncore_type { | |||
178 | unsigned msr_offset; | 367 | unsigned msr_offset; |
179 | unsigned num_shared_regs:8; | 368 | unsigned num_shared_regs:8; |
180 | unsigned single_fixed:1; | 369 | unsigned single_fixed:1; |
370 | unsigned pair_ctr_ctl:1; | ||
181 | struct event_constraint unconstrainted; | 371 | struct event_constraint unconstrainted; |
182 | struct event_constraint *constraints; | 372 | struct event_constraint *constraints; |
183 | struct intel_uncore_pmu *pmus; | 373 | struct intel_uncore_pmu *pmus; |
@@ -213,7 +403,7 @@ struct intel_uncore_pmu { | |||
213 | 403 | ||
214 | struct intel_uncore_extra_reg { | 404 | struct intel_uncore_extra_reg { |
215 | raw_spinlock_t lock; | 405 | raw_spinlock_t lock; |
216 | u64 config1; | 406 | u64 config, config1, config2; |
217 | atomic_t ref; | 407 | atomic_t ref; |
218 | }; | 408 | }; |
219 | 409 | ||
@@ -323,14 +513,16 @@ unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) | |||
323 | static inline | 513 | static inline |
324 | unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) | 514 | unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) |
325 | { | 515 | { |
326 | return idx + box->pmu->type->event_ctl + | 516 | return box->pmu->type->event_ctl + |
517 | (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + | ||
327 | box->pmu->type->msr_offset * box->pmu->pmu_idx; | 518 | box->pmu->type->msr_offset * box->pmu->pmu_idx; |
328 | } | 519 | } |
329 | 520 | ||
330 | static inline | 521 | static inline |
331 | unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) | 522 | unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) |
332 | { | 523 | { |
333 | return idx + box->pmu->type->perf_ctr + | 524 | return box->pmu->type->perf_ctr + |
525 | (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + | ||
334 | box->pmu->type->msr_offset * box->pmu->pmu_idx; | 526 | box->pmu->type->msr_offset * box->pmu->pmu_idx; |
335 | } | 527 | } |
336 | 528 | ||
@@ -422,3 +614,8 @@ static inline void uncore_box_init(struct intel_uncore_box *box) | |||
422 | box->pmu->type->ops->init_box(box); | 614 | box->pmu->type->ops->init_box(box); |
423 | } | 615 | } |
424 | } | 616 | } |
617 | |||
618 | static inline bool uncore_box_is_fake(struct intel_uncore_box *box) | ||
619 | { | ||
620 | return (box->phys_id < 0); | ||
621 | } | ||