aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/oprofile
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 11:03:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 11:03:38 -0400
commit7115e3fcf45514db7525a05365b10454ff7f345e (patch)
tree17450e6337d559cc35dae6a7a73abab01ac63f00 /arch/x86/oprofile
parent1f6e05171bb5cc32a4d6437ab2269fc21d169ca7 (diff)
parentc752d04066a36ae30b29795f3fa3f536292c1f8c (diff)
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (121 commits) perf symbols: Increase symbol KSYM_NAME_LEN size perf hists browser: Refuse 'a' hotkey on non symbolic views perf ui browser: Use libslang to read keys perf tools: Fix tracing info recording perf hists browser: Elide DSO column when it is set to just one DSO, ditto for threads perf hists: Don't consider filtered entries when calculating column widths perf hists: Don't decay total_period for filtered entries perf hists browser: Honour symbol_conf.show_{nr_samples,total_period} perf hists browser: Do not exit on tab key with single event perf annotate browser: Don't change selection line when returning from callq perf tools: handle endianness of feature bitmap perf tools: Add prelink suggestion to dso update message perf script: Fix unknown feature comment perf hists browser: Apply the dso and thread filters when merging new batches perf hists: Move the dso and thread filters from hist_browser perf ui browser: Honour the xterm colors perf top tui: Give color hints just on the percentage, like on --stdio perf ui browser: Make the colors configurable and change the defaults perf tui: Remove unneeded call to newtCls on startup perf hists: Don't format the percentage on hist_entry__snprintf ... Fix up conflicts in arch/x86/kernel/kprobes.c manually. Ingo's tree did the insane "add volatile to const array", which just doesn't make sense ("volatile const"?). But we could remove the const *and* make the array volatile to make doubly sure that gcc doesn't optimize it away.. Also fix up kernel/trace/ring_buffer.c non-data-conflicts manually: the reader_lock has been turned into a raw lock by the core locking merge, and there was a new user of it introduced in this perf core merge. Make sure that new use also uses the raw accessor functions.
Diffstat (limited to 'arch/x86/oprofile')
-rw-r--r--arch/x86/oprofile/nmi_int.c40
-rw-r--r--arch/x86/oprofile/nmi_timer_int.c28
-rw-r--r--arch/x86/oprofile/op_model_amd.c234
-rw-r--r--arch/x86/oprofile/op_model_ppro.c27
-rw-r--r--arch/x86/oprofile/op_x86_model.h1
5 files changed, 22 insertions, 308 deletions
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 96646b3aeca8..75f9528e0372 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -61,26 +61,15 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
61} 61}
62 62
63 63
64static int profile_exceptions_notify(struct notifier_block *self, 64static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs)
65 unsigned long val, void *data)
66{ 65{
67 struct die_args *args = (struct die_args *)data; 66 if (ctr_running)
68 int ret = NOTIFY_DONE; 67 model->check_ctrs(regs, &__get_cpu_var(cpu_msrs));
69 68 else if (!nmi_enabled)
70 switch (val) { 69 return NMI_DONE;
71 case DIE_NMI: 70 else
72 if (ctr_running) 71 model->stop(&__get_cpu_var(cpu_msrs));
73 model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs)); 72 return NMI_HANDLED;
74 else if (!nmi_enabled)
75 break;
76 else
77 model->stop(&__get_cpu_var(cpu_msrs));
78 ret = NOTIFY_STOP;
79 break;
80 default:
81 break;
82 }
83 return ret;
84} 73}
85 74
86static void nmi_cpu_save_registers(struct op_msrs *msrs) 75static void nmi_cpu_save_registers(struct op_msrs *msrs)
@@ -363,12 +352,6 @@ static void nmi_cpu_setup(void *dummy)
363 apic_write(APIC_LVTPC, APIC_DM_NMI); 352 apic_write(APIC_LVTPC, APIC_DM_NMI);
364} 353}
365 354
366static struct notifier_block profile_exceptions_nb = {
367 .notifier_call = profile_exceptions_notify,
368 .next = NULL,
369 .priority = NMI_LOCAL_LOW_PRIOR,
370};
371
372static void nmi_cpu_restore_registers(struct op_msrs *msrs) 355static void nmi_cpu_restore_registers(struct op_msrs *msrs)
373{ 356{
374 struct op_msr *counters = msrs->counters; 357 struct op_msr *counters = msrs->counters;
@@ -402,8 +385,6 @@ static void nmi_cpu_shutdown(void *dummy)
402 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); 385 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
403 apic_write(APIC_LVTERR, v); 386 apic_write(APIC_LVTERR, v);
404 nmi_cpu_restore_registers(msrs); 387 nmi_cpu_restore_registers(msrs);
405 if (model->cpu_down)
406 model->cpu_down();
407} 388}
408 389
409static void nmi_cpu_up(void *dummy) 390static void nmi_cpu_up(void *dummy)
@@ -508,7 +489,8 @@ static int nmi_setup(void)
508 ctr_running = 0; 489 ctr_running = 0;
509 /* make variables visible to the nmi handler: */ 490 /* make variables visible to the nmi handler: */
510 smp_mb(); 491 smp_mb();
511 err = register_die_notifier(&profile_exceptions_nb); 492 err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
493 0, "oprofile");
512 if (err) 494 if (err)
513 goto fail; 495 goto fail;
514 496
@@ -538,7 +520,7 @@ static void nmi_shutdown(void)
538 put_online_cpus(); 520 put_online_cpus();
539 /* make variables visible to the nmi handler: */ 521 /* make variables visible to the nmi handler: */
540 smp_mb(); 522 smp_mb();
541 unregister_die_notifier(&profile_exceptions_nb); 523 unregister_nmi_handler(NMI_LOCAL, "oprofile");
542 msrs = &get_cpu_var(cpu_msrs); 524 msrs = &get_cpu_var(cpu_msrs);
543 model->shutdown(msrs); 525 model->shutdown(msrs);
544 free_msrs(); 526 free_msrs();
diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c
index 720bf5a53c51..7f8052cd6620 100644
--- a/arch/x86/oprofile/nmi_timer_int.c
+++ b/arch/x86/oprofile/nmi_timer_int.c
@@ -18,32 +18,16 @@
18#include <asm/apic.h> 18#include <asm/apic.h>
19#include <asm/ptrace.h> 19#include <asm/ptrace.h>
20 20
21static int profile_timer_exceptions_notify(struct notifier_block *self, 21static int profile_timer_exceptions_notify(unsigned int val, struct pt_regs *regs)
22 unsigned long val, void *data)
23{ 22{
24 struct die_args *args = (struct die_args *)data; 23 oprofile_add_sample(regs, 0);
25 int ret = NOTIFY_DONE; 24 return NMI_HANDLED;
26
27 switch (val) {
28 case DIE_NMI:
29 oprofile_add_sample(args->regs, 0);
30 ret = NOTIFY_STOP;
31 break;
32 default:
33 break;
34 }
35 return ret;
36} 25}
37 26
38static struct notifier_block profile_timer_exceptions_nb = {
39 .notifier_call = profile_timer_exceptions_notify,
40 .next = NULL,
41 .priority = NMI_LOW_PRIOR,
42};
43
44static int timer_start(void) 27static int timer_start(void)
45{ 28{
46 if (register_die_notifier(&profile_timer_exceptions_nb)) 29 if (register_nmi_handler(NMI_LOCAL, profile_timer_exceptions_notify,
30 0, "oprofile-timer"))
47 return 1; 31 return 1;
48 return 0; 32 return 0;
49} 33}
@@ -51,7 +35,7 @@ static int timer_start(void)
51 35
52static void timer_stop(void) 36static void timer_stop(void)
53{ 37{
54 unregister_die_notifier(&profile_timer_exceptions_nb); 38 unregister_nmi_handler(NMI_LOCAL, "oprofile-timer");
55 synchronize_sched(); /* Allow already-started NMIs to complete. */ 39 synchronize_sched(); /* Allow already-started NMIs to complete. */
56} 40}
57 41
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 9cbb710dc94b..303f08637826 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -29,8 +29,6 @@
29#include "op_x86_model.h" 29#include "op_x86_model.h"
30#include "op_counter.h" 30#include "op_counter.h"
31 31
32#define NUM_COUNTERS 4
33#define NUM_COUNTERS_F15H 6
34#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 32#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
35#define NUM_VIRT_COUNTERS 32 33#define NUM_VIRT_COUNTERS 32
36#else 34#else
@@ -70,62 +68,12 @@ static struct ibs_config ibs_config;
70static struct ibs_state ibs_state; 68static struct ibs_state ibs_state;
71 69
72/* 70/*
73 * IBS cpuid feature detection
74 */
75
76#define IBS_CPUID_FEATURES 0x8000001b
77
78/*
79 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
80 * bit 0 is used to indicate the existence of IBS.
81 */
82#define IBS_CAPS_AVAIL (1U<<0)
83#define IBS_CAPS_FETCHSAM (1U<<1)
84#define IBS_CAPS_OPSAM (1U<<2)
85#define IBS_CAPS_RDWROPCNT (1U<<3)
86#define IBS_CAPS_OPCNT (1U<<4)
87#define IBS_CAPS_BRNTRGT (1U<<5)
88#define IBS_CAPS_OPCNTEXT (1U<<6)
89
90#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
91 | IBS_CAPS_FETCHSAM \
92 | IBS_CAPS_OPSAM)
93
94/*
95 * IBS APIC setup
96 */
97#define IBSCTL 0x1cc
98#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
99#define IBSCTL_LVT_OFFSET_MASK 0x0F
100
101/*
102 * IBS randomization macros 71 * IBS randomization macros
103 */ 72 */
104#define IBS_RANDOM_BITS 12 73#define IBS_RANDOM_BITS 12
105#define IBS_RANDOM_MASK ((1ULL << IBS_RANDOM_BITS) - 1) 74#define IBS_RANDOM_MASK ((1ULL << IBS_RANDOM_BITS) - 1)
106#define IBS_RANDOM_MAXCNT_OFFSET (1ULL << (IBS_RANDOM_BITS - 5)) 75#define IBS_RANDOM_MAXCNT_OFFSET (1ULL << (IBS_RANDOM_BITS - 5))
107 76
108static u32 get_ibs_caps(void)
109{
110 u32 ibs_caps;
111 unsigned int max_level;
112
113 if (!boot_cpu_has(X86_FEATURE_IBS))
114 return 0;
115
116 /* check IBS cpuid feature flags */
117 max_level = cpuid_eax(0x80000000);
118 if (max_level < IBS_CPUID_FEATURES)
119 return IBS_CAPS_DEFAULT;
120
121 ibs_caps = cpuid_eax(IBS_CPUID_FEATURES);
122 if (!(ibs_caps & IBS_CAPS_AVAIL))
123 /* cpuid flags not valid */
124 return IBS_CAPS_DEFAULT;
125
126 return ibs_caps;
127}
128
129/* 77/*
130 * 16-bit Linear Feedback Shift Register (LFSR) 78 * 16-bit Linear Feedback Shift Register (LFSR)
131 * 79 *
@@ -316,81 +264,6 @@ static void op_amd_stop_ibs(void)
316 wrmsrl(MSR_AMD64_IBSOPCTL, 0); 264 wrmsrl(MSR_AMD64_IBSOPCTL, 0);
317} 265}
318 266
319static inline int get_eilvt(int offset)
320{
321 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
322}
323
324static inline int put_eilvt(int offset)
325{
326 return !setup_APIC_eilvt(offset, 0, 0, 1);
327}
328
329static inline int ibs_eilvt_valid(void)
330{
331 int offset;
332 u64 val;
333 int valid = 0;
334
335 preempt_disable();
336
337 rdmsrl(MSR_AMD64_IBSCTL, val);
338 offset = val & IBSCTL_LVT_OFFSET_MASK;
339
340 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
341 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
342 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
343 goto out;
344 }
345
346 if (!get_eilvt(offset)) {
347 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
348 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
349 goto out;
350 }
351
352 valid = 1;
353out:
354 preempt_enable();
355
356 return valid;
357}
358
359static inline int get_ibs_offset(void)
360{
361 u64 val;
362
363 rdmsrl(MSR_AMD64_IBSCTL, val);
364 if (!(val & IBSCTL_LVT_OFFSET_VALID))
365 return -EINVAL;
366
367 return val & IBSCTL_LVT_OFFSET_MASK;
368}
369
370static void setup_APIC_ibs(void)
371{
372 int offset;
373
374 offset = get_ibs_offset();
375 if (offset < 0)
376 goto failed;
377
378 if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
379 return;
380failed:
381 pr_warn("oprofile: IBS APIC setup failed on cpu #%d\n",
382 smp_processor_id());
383}
384
385static void clear_APIC_ibs(void)
386{
387 int offset;
388
389 offset = get_ibs_offset();
390 if (offset >= 0)
391 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
392}
393
394#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 267#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
395 268
396static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, 269static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
@@ -439,7 +312,7 @@ static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
439 goto fail; 312 goto fail;
440 } 313 }
441 /* both registers must be reserved */ 314 /* both registers must be reserved */
442 if (num_counters == NUM_COUNTERS_F15H) { 315 if (num_counters == AMD64_NUM_COUNTERS_F15H) {
443 msrs->counters[i].addr = MSR_F15H_PERF_CTR + (i << 1); 316 msrs->counters[i].addr = MSR_F15H_PERF_CTR + (i << 1);
444 msrs->controls[i].addr = MSR_F15H_PERF_CTL + (i << 1); 317 msrs->controls[i].addr = MSR_F15H_PERF_CTL + (i << 1);
445 } else { 318 } else {
@@ -504,15 +377,6 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
504 val |= op_x86_get_ctrl(model, &counter_config[virt]); 377 val |= op_x86_get_ctrl(model, &counter_config[virt]);
505 wrmsrl(msrs->controls[i].addr, val); 378 wrmsrl(msrs->controls[i].addr, val);
506 } 379 }
507
508 if (ibs_caps)
509 setup_APIC_ibs();
510}
511
512static void op_amd_cpu_shutdown(void)
513{
514 if (ibs_caps)
515 clear_APIC_ibs();
516} 380}
517 381
518static int op_amd_check_ctrs(struct pt_regs * const regs, 382static int op_amd_check_ctrs(struct pt_regs * const regs,
@@ -575,86 +439,6 @@ static void op_amd_stop(struct op_msrs const * const msrs)
575 op_amd_stop_ibs(); 439 op_amd_stop_ibs();
576} 440}
577 441
578static int setup_ibs_ctl(int ibs_eilvt_off)
579{
580 struct pci_dev *cpu_cfg;
581 int nodes;
582 u32 value = 0;
583
584 nodes = 0;
585 cpu_cfg = NULL;
586 do {
587 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
588 PCI_DEVICE_ID_AMD_10H_NB_MISC,
589 cpu_cfg);
590 if (!cpu_cfg)
591 break;
592 ++nodes;
593 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
594 | IBSCTL_LVT_OFFSET_VALID);
595 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
596 if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
597 pci_dev_put(cpu_cfg);
598 printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
599 "IBSCTL = 0x%08x\n", value);
600 return -EINVAL;
601 }
602 } while (1);
603
604 if (!nodes) {
605 printk(KERN_DEBUG "No CPU node configured for IBS\n");
606 return -ENODEV;
607 }
608
609 return 0;
610}
611
612/*
613 * This runs only on the current cpu. We try to find an LVT offset and
614 * setup the local APIC. For this we must disable preemption. On
615 * success we initialize all nodes with this offset. This updates then
616 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
617 * the IBS interrupt vector is called from op_amd_setup_ctrs()/op_-
618 * amd_cpu_shutdown() using the new offset.
619 */
620static int force_ibs_eilvt_setup(void)
621{
622 int offset;
623 int ret;
624
625 preempt_disable();
626 /* find the next free available EILVT entry, skip offset 0 */
627 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
628 if (get_eilvt(offset))
629 break;
630 }
631 preempt_enable();
632
633 if (offset == APIC_EILVT_NR_MAX) {
634 printk(KERN_DEBUG "No EILVT entry available\n");
635 return -EBUSY;
636 }
637
638 ret = setup_ibs_ctl(offset);
639 if (ret)
640 goto out;
641
642 if (!ibs_eilvt_valid()) {
643 ret = -EFAULT;
644 goto out;
645 }
646
647 pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
648 pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
649
650 return 0;
651out:
652 preempt_disable();
653 put_eilvt(offset);
654 preempt_enable();
655 return ret;
656}
657
658/* 442/*
659 * check and reserve APIC extended interrupt LVT offset for IBS if 443 * check and reserve APIC extended interrupt LVT offset for IBS if
660 * available 444 * available
@@ -667,17 +451,6 @@ static void init_ibs(void)
667 if (!ibs_caps) 451 if (!ibs_caps)
668 return; 452 return;
669 453
670 if (ibs_eilvt_valid())
671 goto out;
672
673 if (!force_ibs_eilvt_setup())
674 goto out;
675
676 /* Failed to setup ibs */
677 ibs_caps = 0;
678 return;
679
680out:
681 printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps); 454 printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
682} 455}
683 456
@@ -741,9 +514,9 @@ static int op_amd_init(struct oprofile_operations *ops)
741 ops->create_files = setup_ibs_files; 514 ops->create_files = setup_ibs_files;
742 515
743 if (boot_cpu_data.x86 == 0x15) { 516 if (boot_cpu_data.x86 == 0x15) {
744 num_counters = NUM_COUNTERS_F15H; 517 num_counters = AMD64_NUM_COUNTERS_F15H;
745 } else { 518 } else {
746 num_counters = NUM_COUNTERS; 519 num_counters = AMD64_NUM_COUNTERS;
747 } 520 }
748 521
749 op_amd_spec.num_counters = num_counters; 522 op_amd_spec.num_counters = num_counters;
@@ -760,7 +533,6 @@ struct op_x86_model_spec op_amd_spec = {
760 .init = op_amd_init, 533 .init = op_amd_init,
761 .fill_in_addresses = &op_amd_fill_in_addresses, 534 .fill_in_addresses = &op_amd_fill_in_addresses,
762 .setup_ctrs = &op_amd_setup_ctrs, 535 .setup_ctrs = &op_amd_setup_ctrs,
763 .cpu_down = &op_amd_cpu_shutdown,
764 .check_ctrs = &op_amd_check_ctrs, 536 .check_ctrs = &op_amd_check_ctrs,
765 .start = &op_amd_start, 537 .start = &op_amd_start,
766 .stop = &op_amd_stop, 538 .stop = &op_amd_stop,
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 94b745045e45..d90528ea5412 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -28,7 +28,7 @@ static int counter_width = 32;
28 28
29#define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21)) 29#define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21))
30 30
31static u64 *reset_value; 31static u64 reset_value[OP_MAX_COUNTER];
32 32
33static void ppro_shutdown(struct op_msrs const * const msrs) 33static void ppro_shutdown(struct op_msrs const * const msrs)
34{ 34{
@@ -40,10 +40,6 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
40 release_perfctr_nmi(MSR_P6_PERFCTR0 + i); 40 release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
41 release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); 41 release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
42 } 42 }
43 if (reset_value) {
44 kfree(reset_value);
45 reset_value = NULL;
46 }
47} 43}
48 44
49static int ppro_fill_in_addresses(struct op_msrs * const msrs) 45static int ppro_fill_in_addresses(struct op_msrs * const msrs)
@@ -79,13 +75,6 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
79 u64 val; 75 u64 val;
80 int i; 76 int i;
81 77
82 if (!reset_value) {
83 reset_value = kzalloc(sizeof(reset_value[0]) * num_counters,
84 GFP_ATOMIC);
85 if (!reset_value)
86 return;
87 }
88
89 if (cpu_has_arch_perfmon) { 78 if (cpu_has_arch_perfmon) {
90 union cpuid10_eax eax; 79 union cpuid10_eax eax;
91 eax.full = cpuid_eax(0xa); 80 eax.full = cpuid_eax(0xa);
@@ -141,13 +130,6 @@ static int ppro_check_ctrs(struct pt_regs * const regs,
141 u64 val; 130 u64 val;
142 int i; 131 int i;
143 132
144 /*
145 * This can happen if perf counters are in use when
146 * we steal the die notifier NMI.
147 */
148 if (unlikely(!reset_value))
149 goto out;
150
151 for (i = 0; i < num_counters; ++i) { 133 for (i = 0; i < num_counters; ++i) {
152 if (!reset_value[i]) 134 if (!reset_value[i])
153 continue; 135 continue;
@@ -158,7 +140,6 @@ static int ppro_check_ctrs(struct pt_regs * const regs,
158 wrmsrl(msrs->counters[i].addr, -reset_value[i]); 140 wrmsrl(msrs->counters[i].addr, -reset_value[i]);
159 } 141 }
160 142
161out:
162 /* Only P6 based Pentium M need to re-unmask the apic vector but it 143 /* Only P6 based Pentium M need to re-unmask the apic vector but it
163 * doesn't hurt other P6 variant */ 144 * doesn't hurt other P6 variant */
164 apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); 145 apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
@@ -179,8 +160,6 @@ static void ppro_start(struct op_msrs const * const msrs)
179 u64 val; 160 u64 val;
180 int i; 161 int i;
181 162
182 if (!reset_value)
183 return;
184 for (i = 0; i < num_counters; ++i) { 163 for (i = 0; i < num_counters; ++i) {
185 if (reset_value[i]) { 164 if (reset_value[i]) {
186 rdmsrl(msrs->controls[i].addr, val); 165 rdmsrl(msrs->controls[i].addr, val);
@@ -196,8 +175,6 @@ static void ppro_stop(struct op_msrs const * const msrs)
196 u64 val; 175 u64 val;
197 int i; 176 int i;
198 177
199 if (!reset_value)
200 return;
201 for (i = 0; i < num_counters; ++i) { 178 for (i = 0; i < num_counters; ++i) {
202 if (!reset_value[i]) 179 if (!reset_value[i])
203 continue; 180 continue;
@@ -242,7 +219,7 @@ static void arch_perfmon_setup_counters(void)
242 eax.split.bit_width = 40; 219 eax.split.bit_width = 40;
243 } 220 }
244 221
245 num_counters = eax.split.num_counters; 222 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
246 223
247 op_arch_perfmon_spec.num_counters = num_counters; 224 op_arch_perfmon_spec.num_counters = num_counters;
248 op_arch_perfmon_spec.num_controls = num_counters; 225 op_arch_perfmon_spec.num_controls = num_counters;
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index 89017fa1fd63..71e8a67337e2 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -43,7 +43,6 @@ struct op_x86_model_spec {
43 int (*fill_in_addresses)(struct op_msrs * const msrs); 43 int (*fill_in_addresses)(struct op_msrs * const msrs);
44 void (*setup_ctrs)(struct op_x86_model_spec const *model, 44 void (*setup_ctrs)(struct op_x86_model_spec const *model,
45 struct op_msrs const * const msrs); 45 struct op_msrs const * const msrs);
46 void (*cpu_down)(void);
47 int (*check_ctrs)(struct pt_regs * const regs, 46 int (*check_ctrs)(struct pt_regs * const regs,
48 struct op_msrs const * const msrs); 47 struct op_msrs const * const msrs);
49 void (*start)(struct op_msrs const * const msrs); 48 void (*start)(struct op_msrs const * const msrs);