aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWei Huang <wehuang@redhat.com>2015-06-19 08:15:28 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2015-06-19 11:16:30 -0400
commite84cfe4ce0113a6c5e3bdf70e20a21552ad3a28d (patch)
tree9b18afdbe80b4db40e42298ab242da93c6745e23
parent212dba1267a1be228635014fa35c98a59853de9e (diff)
KVM: x86/vPMU: whitespace and stylistic adjustments in PMU code
Signed-off-by: Wei Huang <wei@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/pmu.c112
1 files changed, 64 insertions, 48 deletions
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 38819bc923cf..24d213bd42d4 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -103,25 +103,31 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
103{ 103{
104 struct kvm_pmc *pmc = perf_event->overflow_handler_context; 104 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
105 struct kvm_pmu *pmu = pmc_to_pmu(pmc); 105 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
106 if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) { 106
107 if (!test_and_set_bit(pmc->idx,
108 (unsigned long *)&pmu->reprogram_pmi)) {
107 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); 109 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
108 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); 110 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
109 } 111 }
110} 112}
111 113
112static void kvm_perf_overflow_intr(struct perf_event *perf_event, 114static void kvm_perf_overflow_intr(struct perf_event *perf_event,
113 struct perf_sample_data *data, struct pt_regs *regs) 115 struct perf_sample_data *data,
116 struct pt_regs *regs)
114{ 117{
115 struct kvm_pmc *pmc = perf_event->overflow_handler_context; 118 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
116 struct kvm_pmu *pmu = pmc_to_pmu(pmc); 119 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
117 if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) { 120
121 if (!test_and_set_bit(pmc->idx,
122 (unsigned long *)&pmu->reprogram_pmi)) {
118 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); 123 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
119 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); 124 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
125
120 /* 126 /*
121 * Inject PMI. If vcpu was in a guest mode during NMI PMI 127 * Inject PMI. If vcpu was in a guest mode during NMI PMI
122 * can be ejected on a guest mode re-entry. Otherwise we can't 128 * can be ejected on a guest mode re-entry. Otherwise we can't
123 * be sure that vcpu wasn't executing hlt instruction at the 129 * be sure that vcpu wasn't executing hlt instruction at the
124 * time of vmexit and is not going to re-enter guest mode until, 130 * time of vmexit and is not going to re-enter guest mode until
125 * woken up. So we should wake it, but this is impossible from 131 * woken up. So we should wake it, but this is impossible from
126 * NMI context. Do it from irq work instead. 132 * NMI context. Do it from irq work instead.
127 */ 133 */
@@ -157,8 +163,9 @@ static void pmc_stop_counter(struct kvm_pmc *pmc)
157} 163}
158 164
159static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, 165static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
160 unsigned config, bool exclude_user, bool exclude_kernel, 166 unsigned config, bool exclude_user,
161 bool intr, bool in_tx, bool in_tx_cp) 167 bool exclude_kernel, bool intr,
168 bool in_tx, bool in_tx_cp)
162{ 169{
163 struct perf_event *event; 170 struct perf_event *event;
164 struct perf_event_attr attr = { 171 struct perf_event_attr attr = {
@@ -171,6 +178,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
171 .exclude_kernel = exclude_kernel, 178 .exclude_kernel = exclude_kernel,
172 .config = config, 179 .config = config,
173 }; 180 };
181
174 if (in_tx) 182 if (in_tx)
175 attr.config |= HSW_IN_TX; 183 attr.config |= HSW_IN_TX;
176 if (in_tx_cp) 184 if (in_tx_cp)
@@ -182,8 +190,8 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
182 intr ? kvm_perf_overflow_intr : 190 intr ? kvm_perf_overflow_intr :
183 kvm_perf_overflow, pmc); 191 kvm_perf_overflow, pmc);
184 if (IS_ERR(event)) { 192 if (IS_ERR(event)) {
185 printk_once("kvm: pmu event creation failed %ld\n", 193 printk_once("kvm_pmu: event creation failed %ld\n",
186 PTR_ERR(event)); 194 PTR_ERR(event));
187 return; 195 return;
188 } 196 }
189 197
@@ -227,10 +235,10 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
227 unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; 235 unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
228 236
229 if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE | 237 if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
230 ARCH_PERFMON_EVENTSEL_INV | 238 ARCH_PERFMON_EVENTSEL_INV |
231 ARCH_PERFMON_EVENTSEL_CMASK | 239 ARCH_PERFMON_EVENTSEL_CMASK |
232 HSW_IN_TX | 240 HSW_IN_TX |
233 HSW_IN_TX_CHECKPOINTED))) { 241 HSW_IN_TX_CHECKPOINTED))) {
234 config = find_arch_event(pmc_to_pmu(pmc), event_select, 242 config = find_arch_event(pmc_to_pmu(pmc), event_select,
235 unit_mask); 243 unit_mask);
236 if (config != PERF_COUNT_HW_MAX) 244 if (config != PERF_COUNT_HW_MAX)
@@ -241,28 +249,28 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
241 config = eventsel & X86_RAW_EVENT_MASK; 249 config = eventsel & X86_RAW_EVENT_MASK;
242 250
243 pmc_reprogram_counter(pmc, type, config, 251 pmc_reprogram_counter(pmc, type, config,
244 !(eventsel & ARCH_PERFMON_EVENTSEL_USR), 252 !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
245 !(eventsel & ARCH_PERFMON_EVENTSEL_OS), 253 !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
246 eventsel & ARCH_PERFMON_EVENTSEL_INT, 254 eventsel & ARCH_PERFMON_EVENTSEL_INT,
247 (eventsel & HSW_IN_TX), 255 (eventsel & HSW_IN_TX),
248 (eventsel & HSW_IN_TX_CHECKPOINTED)); 256 (eventsel & HSW_IN_TX_CHECKPOINTED));
249} 257}
250 258
251static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx) 259static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
252{ 260{
253 unsigned en = en_pmi & 0x3; 261 unsigned en_field = ctrl & 0x3;
254 bool pmi = en_pmi & 0x8; 262 bool pmi = ctrl & 0x8;
255 263
256 pmc_stop_counter(pmc); 264 pmc_stop_counter(pmc);
257 265
258 if (!en || !pmc_is_enabled(pmc)) 266 if (!en_field || !pmc_is_enabled(pmc))
259 return; 267 return;
260 268
261 pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE, 269 pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
262 arch_events[fixed_pmc_events[idx]].event_type, 270 arch_events[fixed_pmc_events[idx]].event_type,
263 !(en & 0x2), /* exclude user */ 271 !(en_field & 0x2), /* exclude user */
264 !(en & 0x1), /* exclude kernel */ 272 !(en_field & 0x1), /* exclude kernel */
265 pmi, false, false); 273 pmi, false, false);
266} 274}
267 275
268static inline u8 fixed_ctrl_field(u64 ctrl, int idx) 276static inline u8 fixed_ctrl_field(u64 ctrl, int idx)
@@ -275,21 +283,22 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
275 int i; 283 int i;
276 284
277 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { 285 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
278 u8 en_pmi = fixed_ctrl_field(data, i); 286 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
287 u8 new_ctrl = fixed_ctrl_field(data, i);
279 struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i); 288 struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
280 289
281 if (fixed_ctrl_field(pmu->fixed_ctr_ctrl, i) == en_pmi) 290 if (old_ctrl == new_ctrl)
282 continue; 291 continue;
283 292
284 reprogram_fixed_counter(pmc, en_pmi, i); 293 reprogram_fixed_counter(pmc, new_ctrl, i);
285 } 294 }
286 295
287 pmu->fixed_ctr_ctrl = data; 296 pmu->fixed_ctr_ctrl = data;
288} 297}
289 298
290static void reprogram_counter(struct kvm_pmu *pmu, int idx) 299static void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
291{ 300{
292 struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx); 301 struct kvm_pmc *pmc = global_idx_to_pmc(pmu, pmc_idx);
293 302
294 if (!pmc) 303 if (!pmc)
295 return; 304 return;
@@ -297,9 +306,10 @@ static void reprogram_counter(struct kvm_pmu *pmu, int idx)
297 if (pmc_is_gp(pmc)) 306 if (pmc_is_gp(pmc))
298 reprogram_gp_counter(pmc, pmc->eventsel); 307 reprogram_gp_counter(pmc, pmc->eventsel);
299 else { 308 else {
300 int fidx = idx - INTEL_PMC_IDX_FIXED; 309 int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
301 reprogram_fixed_counter(pmc, 310 u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
302 fixed_ctrl_field(pmu->fixed_ctr_ctrl, fidx), fidx); 311
312 reprogram_fixed_counter(pmc, ctrl, idx);
303 } 313 }
304} 314}
305 315
@@ -423,37 +433,43 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
423 return 1; 433 return 1;
424} 434}
425 435
426int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned pmc) 436/* check if idx is a valid index to access PMU */
437int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
427{ 438{
428 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 439 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
429 bool fixed = pmc & (1u << 30); 440 bool fixed = idx & (1u << 30);
430 pmc &= ~(3u << 30); 441 idx &= ~(3u << 30);
431 return (!fixed && pmc >= pmu->nr_arch_gp_counters) || 442 return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
432 (fixed && pmc >= pmu->nr_arch_fixed_counters); 443 (fixed && idx >= pmu->nr_arch_fixed_counters);
433} 444}
434 445
435int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data) 446int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
436{ 447{
437 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 448 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
438 bool fast_mode = pmc & (1u << 31); 449 bool fast_mode = idx & (1u << 31);
439 bool fixed = pmc & (1u << 30); 450 bool fixed = idx & (1u << 30);
440 struct kvm_pmc *counters; 451 struct kvm_pmc *counters;
441 u64 ctr; 452 u64 ctr_val;
442 453
443 pmc &= ~(3u << 30); 454 idx &= ~(3u << 30);
444 if (!fixed && pmc >= pmu->nr_arch_gp_counters) 455 if (!fixed && idx >= pmu->nr_arch_gp_counters)
445 return 1; 456 return 1;
446 if (fixed && pmc >= pmu->nr_arch_fixed_counters) 457 if (fixed && idx >= pmu->nr_arch_fixed_counters)
447 return 1; 458 return 1;
448 counters = fixed ? pmu->fixed_counters : pmu->gp_counters; 459 counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
449 ctr = pmc_read_counter(&counters[pmc]); 460
461 ctr_val = pmc_read_counter(&counters[idx]);
450 if (fast_mode) 462 if (fast_mode)
451 ctr = (u32)ctr; 463 ctr_val = (u32)ctr_val;
452 *data = ctr;
453 464
465 *data = ctr_val;
454 return 0; 466 return 0;
455} 467}
456 468
469/* refresh PMU settings. This function generally is called when underlying
470 * settings are changed (such as changes of PMU CPUID by guest VMs), which
471 * should rarely happen.
472 */
457void kvm_pmu_refresh(struct kvm_vcpu *vcpu) 473void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
458{ 474{
459 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 475 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);