aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-01-17 05:59:13 -0500
committerFrederic Weisbecker <fweisbec@gmail.com>2010-01-17 07:08:57 -0500
commit5b0311e1f2464547fc6f17a82d7ea2538c8c7a70 (patch)
tree93590c875873a6d1d6431f4dd13fb52115da30cf /kernel
parent42cce92f4ddfa41e2dfe26fdcad4887943c032f2 (diff)
perf: Allow pinned and flexible groups to be scheduled separately
Tune the scheduling helpers so that we can choose to schedule either pinned and/or flexible groups from a context. And while at it, refactor a bit the naming of these helpers to make these more consistent and flexible. There is no (intended) change in scheduling behaviour in this patch. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_event.c137
1 files changed, 93 insertions, 44 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index c4e90b8cd60d..bfc4ee015c87 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1049,8 +1049,15 @@ static int perf_event_refresh(struct perf_event *event, int refresh)
1049 return 0; 1049 return 0;
1050} 1050}
1051 1051
1052static void __perf_event_sched_out(struct perf_event_context *ctx, 1052enum event_type_t {
1053 struct perf_cpu_context *cpuctx) 1053 EVENT_FLEXIBLE = 0x1,
1054 EVENT_PINNED = 0x2,
1055 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
1056};
1057
1058static void ctx_sched_out(struct perf_event_context *ctx,
1059 struct perf_cpu_context *cpuctx,
1060 enum event_type_t event_type)
1054{ 1061{
1055 struct perf_event *event; 1062 struct perf_event *event;
1056 1063
@@ -1061,13 +1068,18 @@ static void __perf_event_sched_out(struct perf_event_context *ctx,
1061 update_context_time(ctx); 1068 update_context_time(ctx);
1062 1069
1063 perf_disable(); 1070 perf_disable();
1064 if (ctx->nr_active) { 1071 if (!ctx->nr_active)
1072 goto out_enable;
1073
1074 if (event_type & EVENT_PINNED)
1065 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 1075 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1066 group_sched_out(event, cpuctx, ctx); 1076 group_sched_out(event, cpuctx, ctx);
1067 1077
1078 if (event_type & EVENT_FLEXIBLE)
1068 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 1079 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1069 group_sched_out(event, cpuctx, ctx); 1080 group_sched_out(event, cpuctx, ctx);
1070 } 1081
1082 out_enable:
1071 perf_enable(); 1083 perf_enable();
1072 out: 1084 out:
1073 raw_spin_unlock(&ctx->lock); 1085 raw_spin_unlock(&ctx->lock);
@@ -1229,15 +1241,13 @@ void perf_event_task_sched_out(struct task_struct *task,
1229 rcu_read_unlock(); 1241 rcu_read_unlock();
1230 1242
1231 if (do_switch) { 1243 if (do_switch) {
1232 __perf_event_sched_out(ctx, cpuctx); 1244 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
1233 cpuctx->task_ctx = NULL; 1245 cpuctx->task_ctx = NULL;
1234 } 1246 }
1235} 1247}
1236 1248
1237/* 1249static void task_ctx_sched_out(struct perf_event_context *ctx,
1238 * Called with IRQs disabled 1250 enum event_type_t event_type)
1239 */
1240static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1241{ 1251{
1242 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 1252 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1243 1253
@@ -1247,39 +1257,34 @@ static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1247 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 1257 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1248 return; 1258 return;
1249 1259
1250 __perf_event_sched_out(ctx, cpuctx); 1260 ctx_sched_out(ctx, cpuctx, event_type);
1251 cpuctx->task_ctx = NULL; 1261 cpuctx->task_ctx = NULL;
1252} 1262}
1253 1263
1254/* 1264/*
1255 * Called with IRQs disabled 1265 * Called with IRQs disabled
1256 */ 1266 */
1257static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx) 1267static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1258{ 1268{
1259 __perf_event_sched_out(&cpuctx->ctx, cpuctx); 1269 task_ctx_sched_out(ctx, EVENT_ALL);
1270}
1271
1272/*
1273 * Called with IRQs disabled
1274 */
1275static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1276 enum event_type_t event_type)
1277{
1278 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
1260} 1279}
1261 1280
1262static void 1281static void
1263__perf_event_sched_in(struct perf_event_context *ctx, 1282ctx_pinned_sched_in(struct perf_event_context *ctx,
1264 struct perf_cpu_context *cpuctx) 1283 struct perf_cpu_context *cpuctx,
1284 int cpu)
1265{ 1285{
1266 int cpu = smp_processor_id();
1267 struct perf_event *event; 1286 struct perf_event *event;
1268 int can_add_hw = 1;
1269
1270 raw_spin_lock(&ctx->lock);
1271 ctx->is_active = 1;
1272 if (likely(!ctx->nr_events))
1273 goto out;
1274
1275 ctx->timestamp = perf_clock();
1276
1277 perf_disable();
1278 1287
1279 /*
1280 * First go through the list and put on any pinned groups
1281 * in order to give them the best chance of going on.
1282 */
1283 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { 1288 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1284 if (event->state <= PERF_EVENT_STATE_OFF) 1289 if (event->state <= PERF_EVENT_STATE_OFF)
1285 continue; 1290 continue;
@@ -1298,6 +1303,15 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1298 event->state = PERF_EVENT_STATE_ERROR; 1303 event->state = PERF_EVENT_STATE_ERROR;
1299 } 1304 }
1300 } 1305 }
1306}
1307
1308static void
1309ctx_flexible_sched_in(struct perf_event_context *ctx,
1310 struct perf_cpu_context *cpuctx,
1311 int cpu)
1312{
1313 struct perf_event *event;
1314 int can_add_hw = 1;
1301 1315
1302 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { 1316 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1303 /* Ignore events in OFF or ERROR state */ 1317 /* Ignore events in OFF or ERROR state */
@@ -1314,11 +1328,53 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1314 if (group_sched_in(event, cpuctx, ctx, cpu)) 1328 if (group_sched_in(event, cpuctx, ctx, cpu))
1315 can_add_hw = 0; 1329 can_add_hw = 0;
1316 } 1330 }
1331}
1332
1333static void
1334ctx_sched_in(struct perf_event_context *ctx,
1335 struct perf_cpu_context *cpuctx,
1336 enum event_type_t event_type)
1337{
1338 int cpu = smp_processor_id();
1339
1340 raw_spin_lock(&ctx->lock);
1341 ctx->is_active = 1;
1342 if (likely(!ctx->nr_events))
1343 goto out;
1344
1345 ctx->timestamp = perf_clock();
1346
1347 perf_disable();
1348
1349 /*
1350 * First go through the list and put on any pinned groups
1351 * in order to give them the best chance of going on.
1352 */
1353 if (event_type & EVENT_PINNED)
1354 ctx_pinned_sched_in(ctx, cpuctx, cpu);
1355
1356 /* Then walk through the lower prio flexible groups */
1357 if (event_type & EVENT_FLEXIBLE)
1358 ctx_flexible_sched_in(ctx, cpuctx, cpu);
1359
1317 perf_enable(); 1360 perf_enable();
1318 out: 1361 out:
1319 raw_spin_unlock(&ctx->lock); 1362 raw_spin_unlock(&ctx->lock);
1320} 1363}
1321 1364
1365static void task_ctx_sched_in(struct task_struct *task,
1366 enum event_type_t event_type)
1367{
1368 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1369 struct perf_event_context *ctx = task->perf_event_ctxp;
1370
1371 if (likely(!ctx))
1372 return;
1373 if (cpuctx->task_ctx == ctx)
1374 return;
1375 ctx_sched_in(ctx, cpuctx, event_type);
1376 cpuctx->task_ctx = ctx;
1377}
1322/* 1378/*
1323 * Called from scheduler to add the events of the current task 1379 * Called from scheduler to add the events of the current task
1324 * with interrupts disabled. 1380 * with interrupts disabled.
@@ -1332,22 +1388,15 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1332 */ 1388 */
1333void perf_event_task_sched_in(struct task_struct *task) 1389void perf_event_task_sched_in(struct task_struct *task)
1334{ 1390{
1335 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 1391 task_ctx_sched_in(task, EVENT_ALL);
1336 struct perf_event_context *ctx = task->perf_event_ctxp;
1337
1338 if (likely(!ctx))
1339 return;
1340 if (cpuctx->task_ctx == ctx)
1341 return;
1342 __perf_event_sched_in(ctx, cpuctx);
1343 cpuctx->task_ctx = ctx;
1344} 1392}
1345 1393
1346static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx) 1394static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
1395 enum event_type_t event_type)
1347{ 1396{
1348 struct perf_event_context *ctx = &cpuctx->ctx; 1397 struct perf_event_context *ctx = &cpuctx->ctx;
1349 1398
1350 __perf_event_sched_in(ctx, cpuctx); 1399 ctx_sched_in(ctx, cpuctx, event_type);
1351} 1400}
1352 1401
1353#define MAX_INTERRUPTS (~0ULL) 1402#define MAX_INTERRUPTS (~0ULL)
@@ -1476,17 +1525,17 @@ void perf_event_task_tick(struct task_struct *curr)
1476 if (ctx) 1525 if (ctx)
1477 perf_ctx_adjust_freq(ctx); 1526 perf_ctx_adjust_freq(ctx);
1478 1527
1479 perf_event_cpu_sched_out(cpuctx); 1528 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
1480 if (ctx) 1529 if (ctx)
1481 __perf_event_task_sched_out(ctx); 1530 task_ctx_sched_out(ctx, EVENT_ALL);
1482 1531
1483 rotate_ctx(&cpuctx->ctx); 1532 rotate_ctx(&cpuctx->ctx);
1484 if (ctx) 1533 if (ctx)
1485 rotate_ctx(ctx); 1534 rotate_ctx(ctx);
1486 1535
1487 perf_event_cpu_sched_in(cpuctx); 1536 cpu_ctx_sched_in(cpuctx, EVENT_ALL);
1488 if (ctx) 1537 if (ctx)
1489 perf_event_task_sched_in(curr); 1538 task_ctx_sched_in(curr, EVENT_ALL);
1490} 1539}
1491 1540
1492static int event_enable_on_exec(struct perf_event *event, 1541static int event_enable_on_exec(struct perf_event *event,