diff options
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 29 |
1 files changed, 20 insertions, 9 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 126a302c481c..656222fcf767 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -782,6 +782,10 @@ retry: | |||
782 | raw_spin_unlock_irq(&ctx->lock); | 782 | raw_spin_unlock_irq(&ctx->lock); |
783 | } | 783 | } |
784 | 784 | ||
785 | #define MAX_INTERRUPTS (~0ULL) | ||
786 | |||
787 | static void perf_log_throttle(struct perf_event *event, int enable); | ||
788 | |||
785 | static int | 789 | static int |
786 | event_sched_in(struct perf_event *event, | 790 | event_sched_in(struct perf_event *event, |
787 | struct perf_cpu_context *cpuctx, | 791 | struct perf_cpu_context *cpuctx, |
@@ -794,6 +798,17 @@ event_sched_in(struct perf_event *event, | |||
794 | 798 | ||
795 | event->state = PERF_EVENT_STATE_ACTIVE; | 799 | event->state = PERF_EVENT_STATE_ACTIVE; |
796 | event->oncpu = smp_processor_id(); | 800 | event->oncpu = smp_processor_id(); |
801 | |||
802 | /* | ||
803 | * Unthrottle events, since we scheduled we might have missed several | ||
804 | * ticks already, also for a heavily scheduling task there is little | ||
805 | * guarantee it'll get a tick in a timely manner. | ||
806 | */ | ||
807 | if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { | ||
808 | perf_log_throttle(event, 1); | ||
809 | event->hw.interrupts = 0; | ||
810 | } | ||
811 | |||
797 | /* | 812 | /* |
798 | * The new state must be visible before we turn it on in the hardware: | 813 | * The new state must be visible before we turn it on in the hardware: |
799 | */ | 814 | */ |
@@ -1596,10 +1611,6 @@ void __perf_event_task_sched_in(struct task_struct *task) | |||
1596 | } | 1611 | } |
1597 | } | 1612 | } |
1598 | 1613 | ||
1599 | #define MAX_INTERRUPTS (~0ULL) | ||
1600 | |||
1601 | static void perf_log_throttle(struct perf_event *event, int enable); | ||
1602 | |||
1603 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) | 1614 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) |
1604 | { | 1615 | { |
1605 | u64 frequency = event->attr.sample_freq; | 1616 | u64 frequency = event->attr.sample_freq; |
@@ -1901,11 +1912,12 @@ static void __perf_event_read(void *info) | |||
1901 | return; | 1912 | return; |
1902 | 1913 | ||
1903 | raw_spin_lock(&ctx->lock); | 1914 | raw_spin_lock(&ctx->lock); |
1904 | update_context_time(ctx); | 1915 | if (ctx->is_active) |
1916 | update_context_time(ctx); | ||
1905 | update_event_times(event); | 1917 | update_event_times(event); |
1918 | if (event->state == PERF_EVENT_STATE_ACTIVE) | ||
1919 | event->pmu->read(event); | ||
1906 | raw_spin_unlock(&ctx->lock); | 1920 | raw_spin_unlock(&ctx->lock); |
1907 | |||
1908 | event->pmu->read(event); | ||
1909 | } | 1921 | } |
1910 | 1922 | ||
1911 | static inline u64 perf_event_count(struct perf_event *event) | 1923 | static inline u64 perf_event_count(struct perf_event *event) |
@@ -1999,8 +2011,7 @@ static int alloc_callchain_buffers(void) | |||
1999 | * accessed from NMI. Use a temporary manual per cpu allocation | 2011 | * accessed from NMI. Use a temporary manual per cpu allocation |
2000 | * until that gets sorted out. | 2012 | * until that gets sorted out. |
2001 | */ | 2013 | */ |
2002 | size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) * | 2014 | size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); |
2003 | num_possible_cpus(); | ||
2004 | 2015 | ||
2005 | entries = kzalloc(size, GFP_KERNEL); | 2016 | entries = kzalloc(size, GFP_KERNEL); |
2006 | if (!entries) | 2017 | if (!entries) |