aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2010-02-11 07:21:58 -0500
committerIngo Molnar <mingo@elte.hu>2010-02-26 04:56:53 -0500
commit6e37738a2fac964583debe91099bc3248554f6e5 (patch)
tree1f1bd19fcfcd328be139a796e2016877814436ad /kernel
parent38331f62c20456454eed9ebea2525f072c6f1d2e (diff)
perf_events: Simplify code by removing cpu argument to hw_perf_group_sched_in()
Since the cpu argument to hw_perf_group_sched_in() is always smp_processor_id(), simplify the code a little by removing this argument and using the current cpu where needed. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: David Miller <davem@davemloft.net> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <1265890918.5396.3.camel@laptop> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_event.c45
1 files changed, 18 insertions, 27 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index fb4e56eb58f4..05b6c6b825e3 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -103,7 +103,7 @@ void __weak hw_perf_event_setup_offline(int cpu) { barrier(); }
103int __weak 103int __weak
104hw_perf_group_sched_in(struct perf_event *group_leader, 104hw_perf_group_sched_in(struct perf_event *group_leader,
105 struct perf_cpu_context *cpuctx, 105 struct perf_cpu_context *cpuctx,
106 struct perf_event_context *ctx, int cpu) 106 struct perf_event_context *ctx)
107{ 107{
108 return 0; 108 return 0;
109} 109}
@@ -633,14 +633,13 @@ void perf_event_disable(struct perf_event *event)
633static int 633static int
634event_sched_in(struct perf_event *event, 634event_sched_in(struct perf_event *event,
635 struct perf_cpu_context *cpuctx, 635 struct perf_cpu_context *cpuctx,
636 struct perf_event_context *ctx, 636 struct perf_event_context *ctx)
637 int cpu)
638{ 637{
639 if (event->state <= PERF_EVENT_STATE_OFF) 638 if (event->state <= PERF_EVENT_STATE_OFF)
640 return 0; 639 return 0;
641 640
642 event->state = PERF_EVENT_STATE_ACTIVE; 641 event->state = PERF_EVENT_STATE_ACTIVE;
643 event->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ 642 event->oncpu = smp_processor_id();
644 /* 643 /*
645 * The new state must be visible before we turn it on in the hardware: 644 * The new state must be visible before we turn it on in the hardware:
646 */ 645 */
@@ -667,8 +666,7 @@ event_sched_in(struct perf_event *event,
667static int 666static int
668group_sched_in(struct perf_event *group_event, 667group_sched_in(struct perf_event *group_event,
669 struct perf_cpu_context *cpuctx, 668 struct perf_cpu_context *cpuctx,
670 struct perf_event_context *ctx, 669 struct perf_event_context *ctx)
671 int cpu)
672{ 670{
673 struct perf_event *event, *partial_group; 671 struct perf_event *event, *partial_group;
674 int ret; 672 int ret;
@@ -676,18 +674,18 @@ group_sched_in(struct perf_event *group_event,
676 if (group_event->state == PERF_EVENT_STATE_OFF) 674 if (group_event->state == PERF_EVENT_STATE_OFF)
677 return 0; 675 return 0;
678 676
679 ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu); 677 ret = hw_perf_group_sched_in(group_event, cpuctx, ctx);
680 if (ret) 678 if (ret)
681 return ret < 0 ? ret : 0; 679 return ret < 0 ? ret : 0;
682 680
683 if (event_sched_in(group_event, cpuctx, ctx, cpu)) 681 if (event_sched_in(group_event, cpuctx, ctx))
684 return -EAGAIN; 682 return -EAGAIN;
685 683
686 /* 684 /*
687 * Schedule in siblings as one group (if any): 685 * Schedule in siblings as one group (if any):
688 */ 686 */
689 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 687 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
690 if (event_sched_in(event, cpuctx, ctx, cpu)) { 688 if (event_sched_in(event, cpuctx, ctx)) {
691 partial_group = event; 689 partial_group = event;
692 goto group_error; 690 goto group_error;
693 } 691 }
@@ -761,7 +759,6 @@ static void __perf_install_in_context(void *info)
761 struct perf_event *event = info; 759 struct perf_event *event = info;
762 struct perf_event_context *ctx = event->ctx; 760 struct perf_event_context *ctx = event->ctx;
763 struct perf_event *leader = event->group_leader; 761 struct perf_event *leader = event->group_leader;
764 int cpu = smp_processor_id();
765 int err; 762 int err;
766 763
767 /* 764 /*
@@ -808,7 +805,7 @@ static void __perf_install_in_context(void *info)
808 if (!group_can_go_on(event, cpuctx, 1)) 805 if (!group_can_go_on(event, cpuctx, 1))
809 err = -EEXIST; 806 err = -EEXIST;
810 else 807 else
811 err = event_sched_in(event, cpuctx, ctx, cpu); 808 err = event_sched_in(event, cpuctx, ctx);
812 809
813 if (err) { 810 if (err) {
814 /* 811 /*
@@ -950,11 +947,9 @@ static void __perf_event_enable(void *info)
950 } else { 947 } else {
951 perf_disable(); 948 perf_disable();
952 if (event == leader) 949 if (event == leader)
953 err = group_sched_in(event, cpuctx, ctx, 950 err = group_sched_in(event, cpuctx, ctx);
954 smp_processor_id());
955 else 951 else
956 err = event_sched_in(event, cpuctx, ctx, 952 err = event_sched_in(event, cpuctx, ctx);
957 smp_processor_id());
958 perf_enable(); 953 perf_enable();
959 } 954 }
960 955
@@ -1281,19 +1276,18 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1281 1276
1282static void 1277static void
1283ctx_pinned_sched_in(struct perf_event_context *ctx, 1278ctx_pinned_sched_in(struct perf_event_context *ctx,
1284 struct perf_cpu_context *cpuctx, 1279 struct perf_cpu_context *cpuctx)
1285 int cpu)
1286{ 1280{
1287 struct perf_event *event; 1281 struct perf_event *event;
1288 1282
1289 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { 1283 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1290 if (event->state <= PERF_EVENT_STATE_OFF) 1284 if (event->state <= PERF_EVENT_STATE_OFF)
1291 continue; 1285 continue;
1292 if (event->cpu != -1 && event->cpu != cpu) 1286 if (event->cpu != -1 && event->cpu != smp_processor_id())
1293 continue; 1287 continue;
1294 1288
1295 if (group_can_go_on(event, cpuctx, 1)) 1289 if (group_can_go_on(event, cpuctx, 1))
1296 group_sched_in(event, cpuctx, ctx, cpu); 1290 group_sched_in(event, cpuctx, ctx);
1297 1291
1298 /* 1292 /*
1299 * If this pinned group hasn't been scheduled, 1293 * If this pinned group hasn't been scheduled,
@@ -1308,8 +1302,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,
1308 1302
1309static void 1303static void
1310ctx_flexible_sched_in(struct perf_event_context *ctx, 1304ctx_flexible_sched_in(struct perf_event_context *ctx,
1311 struct perf_cpu_context *cpuctx, 1305 struct perf_cpu_context *cpuctx)
1312 int cpu)
1313{ 1306{
1314 struct perf_event *event; 1307 struct perf_event *event;
1315 int can_add_hw = 1; 1308 int can_add_hw = 1;
@@ -1322,11 +1315,11 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
1322 * Listen to the 'cpu' scheduling filter constraint 1315 * Listen to the 'cpu' scheduling filter constraint
1323 * of events: 1316 * of events:
1324 */ 1317 */
1325 if (event->cpu != -1 && event->cpu != cpu) 1318 if (event->cpu != -1 && event->cpu != smp_processor_id())
1326 continue; 1319 continue;
1327 1320
1328 if (group_can_go_on(event, cpuctx, can_add_hw)) 1321 if (group_can_go_on(event, cpuctx, can_add_hw))
1329 if (group_sched_in(event, cpuctx, ctx, cpu)) 1322 if (group_sched_in(event, cpuctx, ctx))
1330 can_add_hw = 0; 1323 can_add_hw = 0;
1331 } 1324 }
1332} 1325}
@@ -1336,8 +1329,6 @@ ctx_sched_in(struct perf_event_context *ctx,
1336 struct perf_cpu_context *cpuctx, 1329 struct perf_cpu_context *cpuctx,
1337 enum event_type_t event_type) 1330 enum event_type_t event_type)
1338{ 1331{
1339 int cpu = smp_processor_id();
1340
1341 raw_spin_lock(&ctx->lock); 1332 raw_spin_lock(&ctx->lock);
1342 ctx->is_active = 1; 1333 ctx->is_active = 1;
1343 if (likely(!ctx->nr_events)) 1334 if (likely(!ctx->nr_events))
@@ -1352,11 +1343,11 @@ ctx_sched_in(struct perf_event_context *ctx,
1352 * in order to give them the best chance of going on. 1343 * in order to give them the best chance of going on.
1353 */ 1344 */
1354 if (event_type & EVENT_PINNED) 1345 if (event_type & EVENT_PINNED)
1355 ctx_pinned_sched_in(ctx, cpuctx, cpu); 1346 ctx_pinned_sched_in(ctx, cpuctx);
1356 1347
1357 /* Then walk through the lower prio flexible groups */ 1348 /* Then walk through the lower prio flexible groups */
1358 if (event_type & EVENT_FLEXIBLE) 1349 if (event_type & EVENT_FLEXIBLE)
1359 ctx_flexible_sched_in(ctx, cpuctx, cpu); 1350 ctx_flexible_sched_in(ctx, cpuctx);
1360 1351
1361 perf_enable(); 1352 perf_enable();
1362 out: 1353 out: