aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c2
-rw-r--r--kernel/events/core.c19
-rw-r--r--tools/perf/bench/mem-memcpy-x86-64-asm.S6
-rw-r--r--tools/perf/util/event.c2
-rw-r--r--tools/perf/util/evsel.c1
6 files changed, 22 insertions, 9 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 73da6b64f5b..d6bd49faa40 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -439,7 +439,6 @@ void intel_pmu_pebs_enable(struct perf_event *event)
439 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; 439 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
440 440
441 cpuc->pebs_enabled |= 1ULL << hwc->idx; 441 cpuc->pebs_enabled |= 1ULL << hwc->idx;
442 WARN_ON_ONCE(cpuc->enabled);
443 442
444 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) 443 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
445 intel_pmu_lbr_enable(event); 444 intel_pmu_lbr_enable(event);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 3fab3de3ce9..47a7e63bfe5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -72,8 +72,6 @@ void intel_pmu_lbr_enable(struct perf_event *event)
72 if (!x86_pmu.lbr_nr) 72 if (!x86_pmu.lbr_nr)
73 return; 73 return;
74 74
75 WARN_ON_ONCE(cpuc->enabled);
76
77 /* 75 /*
78 * Reset the LBR stack if we changed task context to 76 * Reset the LBR stack if we changed task context to
79 * avoid data leaks. 77 * avoid data leaks.
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ba36013cfb2..1b5c081d8b9 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2303,7 +2303,7 @@ do { \
2303static DEFINE_PER_CPU(int, perf_throttled_count); 2303static DEFINE_PER_CPU(int, perf_throttled_count);
2304static DEFINE_PER_CPU(u64, perf_throttled_seq); 2304static DEFINE_PER_CPU(u64, perf_throttled_seq);
2305 2305
2306static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) 2306static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
2307{ 2307{
2308 struct hw_perf_event *hwc = &event->hw; 2308 struct hw_perf_event *hwc = &event->hw;
2309 s64 period, sample_period; 2309 s64 period, sample_period;
@@ -2322,9 +2322,13 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
2322 hwc->sample_period = sample_period; 2322 hwc->sample_period = sample_period;
2323 2323
2324 if (local64_read(&hwc->period_left) > 8*sample_period) { 2324 if (local64_read(&hwc->period_left) > 8*sample_period) {
2325 event->pmu->stop(event, PERF_EF_UPDATE); 2325 if (disable)
2326 event->pmu->stop(event, PERF_EF_UPDATE);
2327
2326 local64_set(&hwc->period_left, 0); 2328 local64_set(&hwc->period_left, 0);
2327 event->pmu->start(event, PERF_EF_RELOAD); 2329
2330 if (disable)
2331 event->pmu->start(event, PERF_EF_RELOAD);
2328 } 2332 }
2329} 2333}
2330 2334
@@ -2350,6 +2354,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2350 return; 2354 return;
2351 2355
2352 raw_spin_lock(&ctx->lock); 2356 raw_spin_lock(&ctx->lock);
2357 perf_pmu_disable(ctx->pmu);
2353 2358
2354 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 2359 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2355 if (event->state != PERF_EVENT_STATE_ACTIVE) 2360 if (event->state != PERF_EVENT_STATE_ACTIVE)
@@ -2381,13 +2386,17 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2381 /* 2386 /*
2382 * restart the event 2387 * restart the event
2383 * reload only if value has changed 2388 * reload only if value has changed
2389 * we have stopped the event so tell that
2390 * to perf_adjust_period() to avoid stopping it
2391 * twice.
2384 */ 2392 */
2385 if (delta > 0) 2393 if (delta > 0)
2386 perf_adjust_period(event, period, delta); 2394 perf_adjust_period(event, period, delta, false);
2387 2395
2388 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); 2396 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
2389 } 2397 }
2390 2398
2399 perf_pmu_enable(ctx->pmu);
2391 raw_spin_unlock(&ctx->lock); 2400 raw_spin_unlock(&ctx->lock);
2392} 2401}
2393 2402
@@ -4562,7 +4571,7 @@ static int __perf_event_overflow(struct perf_event *event,
4562 hwc->freq_time_stamp = now; 4571 hwc->freq_time_stamp = now;
4563 4572
4564 if (delta > 0 && delta < 2*TICK_NSEC) 4573 if (delta > 0 && delta < 2*TICK_NSEC)
4565 perf_adjust_period(event, delta, hwc->last_period); 4574 perf_adjust_period(event, delta, hwc->last_period, true);
4566 } 4575 }
4567 4576
4568 /* 4577 /*
diff --git a/tools/perf/bench/mem-memcpy-x86-64-asm.S b/tools/perf/bench/mem-memcpy-x86-64-asm.S
index a57b66e853c..185a96d66dd 100644
--- a/tools/perf/bench/mem-memcpy-x86-64-asm.S
+++ b/tools/perf/bench/mem-memcpy-x86-64-asm.S
@@ -1,2 +1,8 @@
1 1
2#include "../../../arch/x86/lib/memcpy_64.S" 2#include "../../../arch/x86/lib/memcpy_64.S"
3/*
4 * We need to provide note.GNU-stack section, saying that we want
5 * NOT executable stack. Otherwise the final linking will assume that
6 * the ELF stack should not be restricted at all and set it RWX.
7 */
8.section .note.GNU-stack,"",@progbits
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 73ddaf06b8e..2044324b755 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -554,7 +554,7 @@ static int perf_event__process_kernel_mmap(struct perf_tool *tool __used,
554 554
555 is_kernel_mmap = memcmp(event->mmap.filename, 555 is_kernel_mmap = memcmp(event->mmap.filename,
556 kmmap_prefix, 556 kmmap_prefix,
557 strlen(kmmap_prefix)) == 0; 557 strlen(kmmap_prefix) - 1) == 0;
558 if (event->mmap.filename[0] == '/' || 558 if (event->mmap.filename[0] == '/' ||
559 (!is_kernel_mmap && event->mmap.filename[0] == '[')) { 559 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
560 560
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 667f3b78bb2..7132ee834e0 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -463,6 +463,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
463 memset(data, 0, sizeof(*data)); 463 memset(data, 0, sizeof(*data));
464 data->cpu = data->pid = data->tid = -1; 464 data->cpu = data->pid = data->tid = -1;
465 data->stream_id = data->id = data->time = -1ULL; 465 data->stream_id = data->id = data->time = -1ULL;
466 data->period = 1;
466 467
467 if (event->header.type != PERF_RECORD_SAMPLE) { 468 if (event->header.type != PERF_RECORD_SAMPLE) {
468 if (!sample_id_all) 469 if (!sample_id_all)