diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_event.c | 19 |
1 files changed, 15 insertions, 4 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 999835b6112b..656222fcf767 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -782,6 +782,10 @@ retry: | |||
782 | raw_spin_unlock_irq(&ctx->lock); | 782 | raw_spin_unlock_irq(&ctx->lock); |
783 | } | 783 | } |
784 | 784 | ||
785 | #define MAX_INTERRUPTS (~0ULL) | ||
786 | |||
787 | static void perf_log_throttle(struct perf_event *event, int enable); | ||
788 | |||
785 | static int | 789 | static int |
786 | event_sched_in(struct perf_event *event, | 790 | event_sched_in(struct perf_event *event, |
787 | struct perf_cpu_context *cpuctx, | 791 | struct perf_cpu_context *cpuctx, |
@@ -794,6 +798,17 @@ event_sched_in(struct perf_event *event, | |||
794 | 798 | ||
795 | event->state = PERF_EVENT_STATE_ACTIVE; | 799 | event->state = PERF_EVENT_STATE_ACTIVE; |
796 | event->oncpu = smp_processor_id(); | 800 | event->oncpu = smp_processor_id(); |
801 | |||
802 | /* | ||
803 | * Unthrottle events, since we scheduled we might have missed several | ||
804 | * ticks already, also for a heavily scheduling task there is little | ||
805 | * guarantee it'll get a tick in a timely manner. | ||
806 | */ | ||
807 | if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { | ||
808 | perf_log_throttle(event, 1); | ||
809 | event->hw.interrupts = 0; | ||
810 | } | ||
811 | |||
797 | /* | 812 | /* |
798 | * The new state must be visible before we turn it on in the hardware: | 813 | * The new state must be visible before we turn it on in the hardware: |
799 | */ | 814 | */ |
@@ -1596,10 +1611,6 @@ void __perf_event_task_sched_in(struct task_struct *task) | |||
1596 | } | 1611 | } |
1597 | } | 1612 | } |
1598 | 1613 | ||
1599 | #define MAX_INTERRUPTS (~0ULL) | ||
1600 | |||
1601 | static void perf_log_throttle(struct perf_event *event, int enable); | ||
1602 | |||
1603 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) | 1614 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) |
1604 | { | 1615 | { |
1605 | u64 frequency = event->attr.sample_freq; | 1616 | u64 frequency = event->attr.sample_freq; |