diff options
author | Ingo Molnar <mingo@elte.hu> | 2011-02-16 07:27:18 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-02-16 07:27:23 -0500 |
commit | b00560f2d4de69bb12f66f9605985b516df98d77 (patch) | |
tree | 0c92fc994125dc3ddb635842715be29d8b16808b /kernel/perf_event.c | |
parent | bf1af3a809506645b9130755b713b008da14737f (diff) | |
parent | 4fe757dd48a9e95e1a071291f15dda5421dacb66 (diff) |
Merge branch 'perf/urgent' into perf/core
Merge reason: we need to queue up dependent patch
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 19 |
1 files changed, 15 insertions, 4 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index a353a4d6d00d..3d3f282fa50e 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -851,6 +851,10 @@ retry: | |||
851 | raw_spin_unlock_irq(&ctx->lock); | 851 | raw_spin_unlock_irq(&ctx->lock); |
852 | } | 852 | } |
853 | 853 | ||
854 | #define MAX_INTERRUPTS (~0ULL) | ||
855 | |||
856 | static void perf_log_throttle(struct perf_event *event, int enable); | ||
857 | |||
854 | static int | 858 | static int |
855 | event_sched_in(struct perf_event *event, | 859 | event_sched_in(struct perf_event *event, |
856 | struct perf_cpu_context *cpuctx, | 860 | struct perf_cpu_context *cpuctx, |
@@ -863,6 +867,17 @@ event_sched_in(struct perf_event *event, | |||
863 | 867 | ||
864 | event->state = PERF_EVENT_STATE_ACTIVE; | 868 | event->state = PERF_EVENT_STATE_ACTIVE; |
865 | event->oncpu = smp_processor_id(); | 869 | event->oncpu = smp_processor_id(); |
870 | |||
871 | /* | ||
872 | * Unthrottle events, since we scheduled we might have missed several | ||
873 | * ticks already, also for a heavily scheduling task there is little | ||
874 | * guarantee it'll get a tick in a timely manner. | ||
875 | */ | ||
876 | if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { | ||
877 | perf_log_throttle(event, 1); | ||
878 | event->hw.interrupts = 0; | ||
879 | } | ||
880 | |||
866 | /* | 881 | /* |
867 | * The new state must be visible before we turn it on in the hardware: | 882 | * The new state must be visible before we turn it on in the hardware: |
868 | */ | 883 | */ |
@@ -1661,10 +1676,6 @@ void __perf_event_task_sched_in(struct task_struct *task) | |||
1661 | } | 1676 | } |
1662 | } | 1677 | } |
1663 | 1678 | ||
1664 | #define MAX_INTERRUPTS (~0ULL) | ||
1665 | |||
1666 | static void perf_log_throttle(struct perf_event *event, int enable); | ||
1667 | |||
1668 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) | 1679 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) |
1669 | { | 1680 | { |
1670 | u64 frequency = event->attr.sample_freq; | 1681 | u64 frequency = event->attr.sample_freq; |