aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKan Liang <kan.liang@linux.intel.com>2018-08-08 03:12:06 -0400
committerIngo Molnar <mingo@kernel.org>2018-10-02 04:14:30 -0400
commitba12d20edc5caf9835006d8f3efd4ed18465c75b (patch)
treeb3b16c6b6feb47c286c4c5eae4779158aae4392b
parenta4c9f26533eb547c8123e9a5f77517f61d19d2c2 (diff)
perf/x86/intel: Factor out common code of PMI handler
The Arch Perfmon v4 PMI handler is substantially different than the older PMI handler. Instead of adding more and more ifs cleanly fork the new handler into a new function, with the main common code factored out into a common function. Fix complaint from checkpatch.pl by removing "false" from "static bool warned". No functional change. Based-on-code-from: Andi Kleen <ak@linux.intel.com> Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: acme@kernel.org Link: http://lkml.kernel.org/r/1533712328-2834-1-git-send-email-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/events/intel/core.c109
1 files changed, 60 insertions, 49 deletions
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 035c37481f57..9b320a51f82f 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2200,59 +2200,15 @@ static void intel_pmu_reset(void)
2200 local_irq_restore(flags); 2200 local_irq_restore(flags);
2201} 2201}
2202 2202
2203/* 2203static int handle_pmi_common(struct pt_regs *regs, u64 status)
2204 * This handler is triggered by the local APIC, so the APIC IRQ handling
2205 * rules apply:
2206 */
2207static int intel_pmu_handle_irq(struct pt_regs *regs)
2208{ 2204{
2209 struct perf_sample_data data; 2205 struct perf_sample_data data;
2210 struct cpu_hw_events *cpuc; 2206 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2211 int bit, loops; 2207 int bit;
2212 u64 status; 2208 int handled = 0;
2213 int handled;
2214 int pmu_enabled;
2215
2216 cpuc = this_cpu_ptr(&cpu_hw_events);
2217
2218 /*
2219 * Save the PMU state.
2220 * It needs to be restored when leaving the handler.
2221 */
2222 pmu_enabled = cpuc->enabled;
2223 /*
2224 * No known reason to not always do late ACK,
2225 * but just in case do it opt-in.
2226 */
2227 if (!x86_pmu.late_ack)
2228 apic_write(APIC_LVTPC, APIC_DM_NMI);
2229 intel_bts_disable_local();
2230 cpuc->enabled = 0;
2231 __intel_pmu_disable_all();
2232 handled = intel_pmu_drain_bts_buffer();
2233 handled += intel_bts_interrupt();
2234 status = intel_pmu_get_status();
2235 if (!status)
2236 goto done;
2237
2238 loops = 0;
2239again:
2240 intel_pmu_lbr_read();
2241 intel_pmu_ack_status(status);
2242 if (++loops > 100) {
2243 static bool warned = false;
2244 if (!warned) {
2245 WARN(1, "perfevents: irq loop stuck!\n");
2246 perf_event_print_debug();
2247 warned = true;
2248 }
2249 intel_pmu_reset();
2250 goto done;
2251 }
2252 2209
2253 inc_irq_stat(apic_perf_irqs); 2210 inc_irq_stat(apic_perf_irqs);
2254 2211
2255
2256 /* 2212 /*
2257 * Ignore a range of extra bits in status that do not indicate 2213 * Ignore a range of extra bits in status that do not indicate
2258 * overflow by themselves. 2214 * overflow by themselves.
@@ -2261,7 +2217,7 @@ again:
2261 GLOBAL_STATUS_ASIF | 2217 GLOBAL_STATUS_ASIF |
2262 GLOBAL_STATUS_LBRS_FROZEN); 2218 GLOBAL_STATUS_LBRS_FROZEN);
2263 if (!status) 2219 if (!status)
2264 goto done; 2220 return 0;
2265 /* 2221 /*
2266 * In case multiple PEBS events are sampled at the same time, 2222 * In case multiple PEBS events are sampled at the same time,
2267 * it is possible to have GLOBAL_STATUS bit 62 set indicating 2223 * it is possible to have GLOBAL_STATUS bit 62 set indicating
@@ -2331,6 +2287,61 @@ again:
2331 x86_pmu_stop(event, 0); 2287 x86_pmu_stop(event, 0);
2332 } 2288 }
2333 2289
2290 return handled;
2291}
2292
2293/*
2294 * This handler is triggered by the local APIC, so the APIC IRQ handling
2295 * rules apply:
2296 */
2297static int intel_pmu_handle_irq(struct pt_regs *regs)
2298{
2299 struct cpu_hw_events *cpuc;
2300 int loops;
2301 u64 status;
2302 int handled;
2303 int pmu_enabled;
2304
2305 cpuc = this_cpu_ptr(&cpu_hw_events);
2306
2307 /*
2308 * Save the PMU state.
2309 * It needs to be restored when leaving the handler.
2310 */
2311 pmu_enabled = cpuc->enabled;
2312 /*
2313 * No known reason to not always do late ACK,
2314 * but just in case do it opt-in.
2315 */
2316 if (!x86_pmu.late_ack)
2317 apic_write(APIC_LVTPC, APIC_DM_NMI);
2318 intel_bts_disable_local();
2319 cpuc->enabled = 0;
2320 __intel_pmu_disable_all();
2321 handled = intel_pmu_drain_bts_buffer();
2322 handled += intel_bts_interrupt();
2323 status = intel_pmu_get_status();
2324 if (!status)
2325 goto done;
2326
2327 loops = 0;
2328again:
2329 intel_pmu_lbr_read();
2330 intel_pmu_ack_status(status);
2331 if (++loops > 100) {
2332 static bool warned;
2333
2334 if (!warned) {
2335 WARN(1, "perfevents: irq loop stuck!\n");
2336 perf_event_print_debug();
2337 warned = true;
2338 }
2339 intel_pmu_reset();
2340 goto done;
2341 }
2342
2343 handled += handle_pmi_common(regs, status);
2344
2334 /* 2345 /*
2335 * Repeat if there is more work to be done: 2346 * Repeat if there is more work to be done:
2336 */ 2347 */