diff options
-rw-r--r-- | kernel/events/core.c | 180 |
1 files changed, 76 insertions, 104 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 39cf4a40aa4c..c3d61b92d805 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -126,6 +126,37 @@ static int cpu_function_call(int cpu, remote_function_f func, void *info) | |||
126 | return data.ret; | 126 | return data.ret; |
127 | } | 127 | } |
128 | 128 | ||
129 | static void event_function_call(struct perf_event *event, | ||
130 | int (*active)(void *), | ||
131 | void (*inactive)(void *), | ||
132 | void *data) | ||
133 | { | ||
134 | struct perf_event_context *ctx = event->ctx; | ||
135 | struct task_struct *task = ctx->task; | ||
136 | |||
137 | if (!task) { | ||
138 | cpu_function_call(event->cpu, active, data); | ||
139 | return; | ||
140 | } | ||
141 | |||
142 | again: | ||
143 | if (!task_function_call(task, active, data)) | ||
144 | return; | ||
145 | |||
146 | raw_spin_lock_irq(&ctx->lock); | ||
147 | if (ctx->is_active) { | ||
148 | /* | ||
149 | * Reload the task pointer, it might have been changed by | ||
150 | * a concurrent perf_event_context_sched_out(). | ||
151 | */ | ||
152 | task = ctx->task; | ||
153 | raw_spin_unlock_irq(&ctx->lock); | ||
154 | goto again; | ||
155 | } | ||
156 | inactive(data); | ||
157 | raw_spin_unlock_irq(&ctx->lock); | ||
158 | } | ||
159 | |||
129 | #define EVENT_OWNER_KERNEL ((void *) -1) | 160 | #define EVENT_OWNER_KERNEL ((void *) -1) |
130 | 161 | ||
131 | static bool is_kernel_event(struct perf_event *event) | 162 | static bool is_kernel_event(struct perf_event *event) |
@@ -1629,6 +1660,17 @@ struct remove_event { | |||
1629 | bool detach_group; | 1660 | bool detach_group; |
1630 | }; | 1661 | }; |
1631 | 1662 | ||
1663 | static void ___perf_remove_from_context(void *info) | ||
1664 | { | ||
1665 | struct remove_event *re = info; | ||
1666 | struct perf_event *event = re->event; | ||
1667 | struct perf_event_context *ctx = event->ctx; | ||
1668 | |||
1669 | if (re->detach_group) | ||
1670 | perf_group_detach(event); | ||
1671 | list_del_event(event, ctx); | ||
1672 | } | ||
1673 | |||
1632 | /* | 1674 | /* |
1633 | * Cross CPU call to remove a performance event | 1675 | * Cross CPU call to remove a performance event |
1634 | * | 1676 | * |
@@ -1656,7 +1698,6 @@ static int __perf_remove_from_context(void *info) | |||
1656 | return 0; | 1698 | return 0; |
1657 | } | 1699 | } |
1658 | 1700 | ||
1659 | |||
1660 | /* | 1701 | /* |
1661 | * Remove the event from a task's (or a CPU's) list of events. | 1702 | * Remove the event from a task's (or a CPU's) list of events. |
1662 | * | 1703 | * |
@@ -1673,7 +1714,6 @@ static int __perf_remove_from_context(void *info) | |||
1673 | static void perf_remove_from_context(struct perf_event *event, bool detach_group) | 1714 | static void perf_remove_from_context(struct perf_event *event, bool detach_group) |
1674 | { | 1715 | { |
1675 | struct perf_event_context *ctx = event->ctx; | 1716 | struct perf_event_context *ctx = event->ctx; |
1676 | struct task_struct *task = ctx->task; | ||
1677 | struct remove_event re = { | 1717 | struct remove_event re = { |
1678 | .event = event, | 1718 | .event = event, |
1679 | .detach_group = detach_group, | 1719 | .detach_group = detach_group, |
@@ -1681,44 +1721,8 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group | |||
1681 | 1721 | ||
1682 | lockdep_assert_held(&ctx->mutex); | 1722 | lockdep_assert_held(&ctx->mutex); |
1683 | 1723 | ||
1684 | if (!task) { | 1724 | event_function_call(event, __perf_remove_from_context, |
1685 | /* | 1725 | ___perf_remove_from_context, &re); |
1686 | * Per cpu events are removed via an smp call. The removal can | ||
1687 | * fail if the CPU is currently offline, but in that case we | ||
1688 | * already called __perf_remove_from_context from | ||
1689 | * perf_event_exit_cpu. | ||
1690 | */ | ||
1691 | cpu_function_call(event->cpu, __perf_remove_from_context, &re); | ||
1692 | return; | ||
1693 | } | ||
1694 | |||
1695 | retry: | ||
1696 | if (!task_function_call(task, __perf_remove_from_context, &re)) | ||
1697 | return; | ||
1698 | |||
1699 | raw_spin_lock_irq(&ctx->lock); | ||
1700 | /* | ||
1701 | * If we failed to find a running task, but find the context active now | ||
1702 | * that we've acquired the ctx->lock, retry. | ||
1703 | */ | ||
1704 | if (ctx->is_active) { | ||
1705 | raw_spin_unlock_irq(&ctx->lock); | ||
1706 | /* | ||
1707 | * Reload the task pointer, it might have been changed by | ||
1708 | * a concurrent perf_event_context_sched_out(). | ||
1709 | */ | ||
1710 | task = ctx->task; | ||
1711 | goto retry; | ||
1712 | } | ||
1713 | |||
1714 | /* | ||
1715 | * Since the task isn't running, its safe to remove the event, us | ||
1716 | * holding the ctx->lock ensures the task won't get scheduled in. | ||
1717 | */ | ||
1718 | if (detach_group) | ||
1719 | perf_group_detach(event); | ||
1720 | list_del_event(event, ctx); | ||
1721 | raw_spin_unlock_irq(&ctx->lock); | ||
1722 | } | 1726 | } |
1723 | 1727 | ||
1724 | /* | 1728 | /* |
@@ -2067,6 +2071,18 @@ static void perf_event_sched_in(struct perf_cpu_context *cpuctx, | |||
2067 | ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); | 2071 | ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); |
2068 | } | 2072 | } |
2069 | 2073 | ||
2074 | static void ___perf_install_in_context(void *info) | ||
2075 | { | ||
2076 | struct perf_event *event = info; | ||
2077 | struct perf_event_context *ctx = event->ctx; | ||
2078 | |||
2079 | /* | ||
2080 | * Since the task isn't running, its safe to add the event, us holding | ||
2081 | * the ctx->lock ensures the task won't get scheduled in. | ||
2082 | */ | ||
2083 | add_event_to_ctx(event, ctx); | ||
2084 | } | ||
2085 | |||
2070 | /* | 2086 | /* |
2071 | * Cross CPU call to install and enable a performance event | 2087 | * Cross CPU call to install and enable a performance event |
2072 | * | 2088 | * |
@@ -2143,48 +2159,14 @@ perf_install_in_context(struct perf_event_context *ctx, | |||
2143 | struct perf_event *event, | 2159 | struct perf_event *event, |
2144 | int cpu) | 2160 | int cpu) |
2145 | { | 2161 | { |
2146 | struct task_struct *task = ctx->task; | ||
2147 | |||
2148 | lockdep_assert_held(&ctx->mutex); | 2162 | lockdep_assert_held(&ctx->mutex); |
2149 | 2163 | ||
2150 | event->ctx = ctx; | 2164 | event->ctx = ctx; |
2151 | if (event->cpu != -1) | 2165 | if (event->cpu != -1) |
2152 | event->cpu = cpu; | 2166 | event->cpu = cpu; |
2153 | 2167 | ||
2154 | if (!task) { | 2168 | event_function_call(event, __perf_install_in_context, |
2155 | /* | 2169 | ___perf_install_in_context, event); |
2156 | * Per cpu events are installed via an smp call and | ||
2157 | * the install is always successful. | ||
2158 | */ | ||
2159 | cpu_function_call(cpu, __perf_install_in_context, event); | ||
2160 | return; | ||
2161 | } | ||
2162 | |||
2163 | retry: | ||
2164 | if (!task_function_call(task, __perf_install_in_context, event)) | ||
2165 | return; | ||
2166 | |||
2167 | raw_spin_lock_irq(&ctx->lock); | ||
2168 | /* | ||
2169 | * If we failed to find a running task, but find the context active now | ||
2170 | * that we've acquired the ctx->lock, retry. | ||
2171 | */ | ||
2172 | if (ctx->is_active) { | ||
2173 | raw_spin_unlock_irq(&ctx->lock); | ||
2174 | /* | ||
2175 | * Reload the task pointer, it might have been changed by | ||
2176 | * a concurrent perf_event_context_sched_out(). | ||
2177 | */ | ||
2178 | task = ctx->task; | ||
2179 | goto retry; | ||
2180 | } | ||
2181 | |||
2182 | /* | ||
2183 | * Since the task isn't running, its safe to add the event, us holding | ||
2184 | * the ctx->lock ensures the task won't get scheduled in. | ||
2185 | */ | ||
2186 | add_event_to_ctx(event, ctx); | ||
2187 | raw_spin_unlock_irq(&ctx->lock); | ||
2188 | } | 2170 | } |
2189 | 2171 | ||
2190 | /* | 2172 | /* |
@@ -4154,6 +4136,22 @@ struct period_event { | |||
4154 | u64 value; | 4136 | u64 value; |
4155 | }; | 4137 | }; |
4156 | 4138 | ||
4139 | static void ___perf_event_period(void *info) | ||
4140 | { | ||
4141 | struct period_event *pe = info; | ||
4142 | struct perf_event *event = pe->event; | ||
4143 | u64 value = pe->value; | ||
4144 | |||
4145 | if (event->attr.freq) { | ||
4146 | event->attr.sample_freq = value; | ||
4147 | } else { | ||
4148 | event->attr.sample_period = value; | ||
4149 | event->hw.sample_period = value; | ||
4150 | } | ||
4151 | |||
4152 | local64_set(&event->hw.period_left, 0); | ||
4153 | } | ||
4154 | |||
4157 | static int __perf_event_period(void *info) | 4155 | static int __perf_event_period(void *info) |
4158 | { | 4156 | { |
4159 | struct period_event *pe = info; | 4157 | struct period_event *pe = info; |
@@ -4190,8 +4188,6 @@ static int __perf_event_period(void *info) | |||
4190 | static int perf_event_period(struct perf_event *event, u64 __user *arg) | 4188 | static int perf_event_period(struct perf_event *event, u64 __user *arg) |
4191 | { | 4189 | { |
4192 | struct period_event pe = { .event = event, }; | 4190 | struct period_event pe = { .event = event, }; |
4193 | struct perf_event_context *ctx = event->ctx; | ||
4194 | struct task_struct *task; | ||
4195 | u64 value; | 4191 | u64 value; |
4196 | 4192 | ||
4197 | if (!is_sampling_event(event)) | 4193 | if (!is_sampling_event(event)) |
@@ -4206,34 +4202,10 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) | |||
4206 | if (event->attr.freq && value > sysctl_perf_event_sample_rate) | 4202 | if (event->attr.freq && value > sysctl_perf_event_sample_rate) |
4207 | return -EINVAL; | 4203 | return -EINVAL; |
4208 | 4204 | ||
4209 | task = ctx->task; | ||
4210 | pe.value = value; | 4205 | pe.value = value; |
4211 | 4206 | ||
4212 | if (!task) { | 4207 | event_function_call(event, __perf_event_period, |
4213 | cpu_function_call(event->cpu, __perf_event_period, &pe); | 4208 | ___perf_event_period, &pe); |
4214 | return 0; | ||
4215 | } | ||
4216 | |||
4217 | retry: | ||
4218 | if (!task_function_call(task, __perf_event_period, &pe)) | ||
4219 | return 0; | ||
4220 | |||
4221 | raw_spin_lock_irq(&ctx->lock); | ||
4222 | if (ctx->is_active) { | ||
4223 | raw_spin_unlock_irq(&ctx->lock); | ||
4224 | task = ctx->task; | ||
4225 | goto retry; | ||
4226 | } | ||
4227 | |||
4228 | if (event->attr.freq) { | ||
4229 | event->attr.sample_freq = value; | ||
4230 | } else { | ||
4231 | event->attr.sample_period = value; | ||
4232 | event->hw.sample_period = value; | ||
4233 | } | ||
4234 | |||
4235 | local64_set(&event->hw.period_left, 0); | ||
4236 | raw_spin_unlock_irq(&ctx->lock); | ||
4237 | 4209 | ||
4238 | return 0; | 4210 | return 0; |
4239 | } | 4211 | } |