aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/hwtracing/coresight
diff options
context:
space:
mode:
authorSuzuki K Poulose <suzuki.poulose@arm.com>2018-09-20 15:17:47 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-09-25 14:09:17 -0400
commit5ecabe4a76e8cdb61fa3e24862d9ca240a1c4ddf (patch)
tree376811345a0bb70fa463145dd845a9cab8db6e56 /drivers/hwtracing/coresight
parent987d1e8dcd370d96029a3d76a0031b043c4a69ae (diff)
coresight: perf: Fix per cpu path management
We create a coresight trace path for each online CPU when we start the event. We rely on the number of online CPUs and then go on to allocate an array matching the "number of online CPUs" for holding the path and then uses normal CPU id as the index to the array. This is problematic as we could have some offline CPUs causing us to access beyond the actual array size (e.g, on a dual SMP system, if CPU0 is offline, CPU1 could be really accessing beyond the array). The solution is to switch to per-cpu array for holding the path. Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/hwtracing/coresight')
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c55
1 files changed, 40 insertions, 15 deletions
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 677695635211..6338dd180031 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -12,6 +12,7 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/perf_event.h> 14#include <linux/perf_event.h>
15#include <linux/percpu-defs.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16#include <linux/types.h> 17#include <linux/types.h>
17#include <linux/workqueue.h> 18#include <linux/workqueue.h>
@@ -33,7 +34,7 @@ struct etm_event_data {
33 struct work_struct work; 34 struct work_struct work;
34 cpumask_t mask; 35 cpumask_t mask;
35 void *snk_config; 36 void *snk_config;
36 struct list_head **path; 37 struct list_head * __percpu *path;
37}; 38};
38 39
39static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle); 40static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
@@ -61,6 +62,18 @@ static const struct attribute_group *etm_pmu_attr_groups[] = {
61 NULL, 62 NULL,
62}; 63};
63 64
65static inline struct list_head **
66etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu)
67{
68 return per_cpu_ptr(data->path, cpu);
69}
70
71static inline struct list_head *
72etm_event_cpu_path(struct etm_event_data *data, int cpu)
73{
74 return *etm_event_cpu_path_ptr(data, cpu);
75}
76
64static void etm_event_read(struct perf_event *event) {} 77static void etm_event_read(struct perf_event *event) {}
65 78
66static int etm_addr_filters_alloc(struct perf_event *event) 79static int etm_addr_filters_alloc(struct perf_event *event)
@@ -120,23 +133,26 @@ static void free_event_data(struct work_struct *work)
120 */ 133 */
121 if (event_data->snk_config) { 134 if (event_data->snk_config) {
122 cpu = cpumask_first(mask); 135 cpu = cpumask_first(mask);
123 sink = coresight_get_sink(event_data->path[cpu]); 136 sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
124 if (sink_ops(sink)->free_buffer) 137 if (sink_ops(sink)->free_buffer)
125 sink_ops(sink)->free_buffer(event_data->snk_config); 138 sink_ops(sink)->free_buffer(event_data->snk_config);
126 } 139 }
127 140
128 for_each_cpu(cpu, mask) { 141 for_each_cpu(cpu, mask) {
129 if (!(IS_ERR_OR_NULL(event_data->path[cpu]))) 142 struct list_head **ppath;
130 coresight_release_path(event_data->path[cpu]); 143
144 ppath = etm_event_cpu_path_ptr(event_data, cpu);
145 if (!(IS_ERR_OR_NULL(*ppath)))
146 coresight_release_path(*ppath);
147 *ppath = NULL;
131 } 148 }
132 149
133 kfree(event_data->path); 150 free_percpu(event_data->path);
134 kfree(event_data); 151 kfree(event_data);
135} 152}
136 153
137static void *alloc_event_data(int cpu) 154static void *alloc_event_data(int cpu)
138{ 155{
139 int size;
140 cpumask_t *mask; 156 cpumask_t *mask;
141 struct etm_event_data *event_data; 157 struct etm_event_data *event_data;
142 158
@@ -147,7 +163,6 @@ static void *alloc_event_data(int cpu)
147 163
148 /* Make sure nothing disappears under us */ 164 /* Make sure nothing disappears under us */
149 get_online_cpus(); 165 get_online_cpus();
150 size = num_online_cpus();
151 166
152 mask = &event_data->mask; 167 mask = &event_data->mask;
153 if (cpu != -1) 168 if (cpu != -1)
@@ -164,8 +179,8 @@ static void *alloc_event_data(int cpu)
164 * unused memory when dealing with single CPU trace scenarios is small 179 * unused memory when dealing with single CPU trace scenarios is small
165 * compared to the cost of searching through an optimized array. 180 * compared to the cost of searching through an optimized array.
166 */ 181 */
167 event_data->path = kcalloc(size, 182 event_data->path = alloc_percpu(struct list_head *);
168 sizeof(struct list_head *), GFP_KERNEL); 183
169 if (!event_data->path) { 184 if (!event_data->path) {
170 kfree(event_data); 185 kfree(event_data);
171 return NULL; 186 return NULL;
@@ -213,6 +228,7 @@ static void *etm_setup_aux(int event_cpu, void **pages,
213 228
214 /* Setup the path for each CPU in a trace session */ 229 /* Setup the path for each CPU in a trace session */
215 for_each_cpu(cpu, mask) { 230 for_each_cpu(cpu, mask) {
231 struct list_head *path;
216 struct coresight_device *csdev; 232 struct coresight_device *csdev;
217 233
218 csdev = per_cpu(csdev_src, cpu); 234 csdev = per_cpu(csdev_src, cpu);
@@ -224,9 +240,11 @@ static void *etm_setup_aux(int event_cpu, void **pages,
224 * list of devices from source to sink that can be 240 * list of devices from source to sink that can be
225 * referenced later when the path is actually needed. 241 * referenced later when the path is actually needed.
226 */ 242 */
227 event_data->path[cpu] = coresight_build_path(csdev, sink); 243 path = coresight_build_path(csdev, sink);
228 if (IS_ERR(event_data->path[cpu])) 244 if (IS_ERR(path))
229 goto err; 245 goto err;
246
247 *etm_event_cpu_path_ptr(event_data, cpu) = path;
230 } 248 }
231 249
232 if (!sink_ops(sink)->alloc_buffer) 250 if (!sink_ops(sink)->alloc_buffer)
@@ -255,6 +273,7 @@ static void etm_event_start(struct perf_event *event, int flags)
255 struct etm_event_data *event_data; 273 struct etm_event_data *event_data;
256 struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); 274 struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
257 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); 275 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
276 struct list_head *path;
258 277
259 if (!csdev) 278 if (!csdev)
260 goto fail; 279 goto fail;
@@ -267,8 +286,9 @@ static void etm_event_start(struct perf_event *event, int flags)
267 if (!event_data) 286 if (!event_data)
268 goto fail; 287 goto fail;
269 288
289 path = etm_event_cpu_path(event_data, cpu);
270 /* We need a sink, no need to continue without one */ 290 /* We need a sink, no need to continue without one */
271 sink = coresight_get_sink(event_data->path[cpu]); 291 sink = coresight_get_sink(path);
272 if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer)) 292 if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer))
273 goto fail_end_stop; 293 goto fail_end_stop;
274 294
@@ -278,7 +298,7 @@ static void etm_event_start(struct perf_event *event, int flags)
278 goto fail_end_stop; 298 goto fail_end_stop;
279 299
280 /* Nothing will happen without a path */ 300 /* Nothing will happen without a path */
281 if (coresight_enable_path(event_data->path[cpu], CS_MODE_PERF)) 301 if (coresight_enable_path(path, CS_MODE_PERF))
282 goto fail_end_stop; 302 goto fail_end_stop;
283 303
284 /* Tell the perf core the event is alive */ 304 /* Tell the perf core the event is alive */
@@ -306,6 +326,7 @@ static void etm_event_stop(struct perf_event *event, int mode)
306 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); 326 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
307 struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); 327 struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
308 struct etm_event_data *event_data = perf_get_aux(handle); 328 struct etm_event_data *event_data = perf_get_aux(handle);
329 struct list_head *path;
309 330
310 if (event->hw.state == PERF_HES_STOPPED) 331 if (event->hw.state == PERF_HES_STOPPED)
311 return; 332 return;
@@ -313,7 +334,11 @@ static void etm_event_stop(struct perf_event *event, int mode)
313 if (!csdev) 334 if (!csdev)
314 return; 335 return;
315 336
316 sink = coresight_get_sink(event_data->path[cpu]); 337 path = etm_event_cpu_path(event_data, cpu);
338 if (!path)
339 return;
340
341 sink = coresight_get_sink(path);
317 if (!sink) 342 if (!sink)
318 return; 343 return;
319 344
@@ -344,7 +369,7 @@ static void etm_event_stop(struct perf_event *event, int mode)
344 } 369 }
345 370
346 /* Disabling the path make its elements available to other sessions */ 371 /* Disabling the path make its elements available to other sessions */
347 coresight_disable_path(event_data->path[cpu]); 372 coresight_disable_path(path);
348} 373}
349 374
350static int etm_event_add(struct perf_event *event, int mode) 375static int etm_event_add(struct perf_event *event, int mode)