diff options
| -rw-r--r-- | arch/mips/kernel/perf_event_mipsxx.c | 21 |
1 files changed, 3 insertions, 18 deletions
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 413863508f6f..d67fb64e908c 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c | |||
| @@ -64,17 +64,11 @@ struct mips_perf_event { | |||
| 64 | #define CNTR_EVEN 0x55555555 | 64 | #define CNTR_EVEN 0x55555555 |
| 65 | #define CNTR_ODD 0xaaaaaaaa | 65 | #define CNTR_ODD 0xaaaaaaaa |
| 66 | #define CNTR_ALL 0xffffffff | 66 | #define CNTR_ALL 0xffffffff |
| 67 | #ifdef CONFIG_MIPS_MT_SMP | ||
| 68 | enum { | 67 | enum { |
| 69 | T = 0, | 68 | T = 0, |
| 70 | V = 1, | 69 | V = 1, |
| 71 | P = 2, | 70 | P = 2, |
| 72 | } range; | 71 | } range; |
| 73 | #else | ||
| 74 | #define T | ||
| 75 | #define V | ||
| 76 | #define P | ||
| 77 | #endif | ||
| 78 | }; | 72 | }; |
| 79 | 73 | ||
| 80 | static struct mips_perf_event raw_event; | 74 | static struct mips_perf_event raw_event; |
| @@ -325,9 +319,7 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) | |||
| 325 | { | 319 | { |
| 326 | struct perf_event *event = container_of(evt, struct perf_event, hw); | 320 | struct perf_event *event = container_of(evt, struct perf_event, hw); |
| 327 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 321 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
| 328 | #ifdef CONFIG_MIPS_MT_SMP | ||
| 329 | unsigned int range = evt->event_base >> 24; | 322 | unsigned int range = evt->event_base >> 24; |
| 330 | #endif /* CONFIG_MIPS_MT_SMP */ | ||
| 331 | 323 | ||
| 332 | WARN_ON(idx < 0 || idx >= mipspmu.num_counters); | 324 | WARN_ON(idx < 0 || idx >= mipspmu.num_counters); |
| 333 | 325 | ||
| @@ -336,21 +328,15 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) | |||
| 336 | /* Make sure interrupt enabled. */ | 328 | /* Make sure interrupt enabled. */ |
| 337 | MIPS_PERFCTRL_IE; | 329 | MIPS_PERFCTRL_IE; |
| 338 | 330 | ||
| 339 | #ifdef CONFIG_CPU_BMIPS5000 | 331 | if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) { |
| 340 | { | ||
| 341 | /* enable the counter for the calling thread */ | 332 | /* enable the counter for the calling thread */ |
| 342 | cpuc->saved_ctrl[idx] |= | 333 | cpuc->saved_ctrl[idx] |= |
| 343 | (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC; | 334 | (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC; |
| 344 | } | 335 | } else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) { |
| 345 | #else | ||
| 346 | #ifdef CONFIG_MIPS_MT_SMP | ||
| 347 | if (range > V) { | ||
| 348 | /* The counter is processor wide. Set it up to count all TCs. */ | 336 | /* The counter is processor wide. Set it up to count all TCs. */ |
| 349 | pr_debug("Enabling perf counter for all TCs\n"); | 337 | pr_debug("Enabling perf counter for all TCs\n"); |
| 350 | cpuc->saved_ctrl[idx] |= M_TC_EN_ALL; | 338 | cpuc->saved_ctrl[idx] |= M_TC_EN_ALL; |
| 351 | } else | 339 | } else { |
| 352 | #endif /* CONFIG_MIPS_MT_SMP */ | ||
| 353 | { | ||
| 354 | unsigned int cpu, ctrl; | 340 | unsigned int cpu, ctrl; |
| 355 | 341 | ||
| 356 | /* | 342 | /* |
| @@ -365,7 +351,6 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) | |||
| 365 | cpuc->saved_ctrl[idx] |= ctrl; | 351 | cpuc->saved_ctrl[idx] |= ctrl; |
| 366 | pr_debug("Enabling perf counter for CPU%d\n", cpu); | 352 | pr_debug("Enabling perf counter for CPU%d\n", cpu); |
| 367 | } | 353 | } |
| 368 | #endif /* CONFIG_CPU_BMIPS5000 */ | ||
| 369 | /* | 354 | /* |
| 370 | * We do not actually let the counter run. Leave it until start(). | 355 | * We do not actually let the counter run. Leave it until start(). |
| 371 | */ | 356 | */ |
