diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-01-18 02:56:41 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-01-18 02:56:41 -0500 |
commit | f426a7e02918e2e992b28adeb02e5a0ab39a7a25 (patch) | |
tree | 40047bb25f52bdf3138a9dea399ef82894c880ed | |
parent | 231e36f4d2e63dd770db80b9f5113310c2bcfcfd (diff) | |
parent | 329c0e012b99fa2325a0be205c052e4aba690f16 (diff) |
Merge branch 'perf/scheduling' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into perf/core
-rw-r--r-- | kernel/perf_event.c | 149 |
1 files changed, 109 insertions, 40 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index eae6ff693604..edc46b92b508 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1049,8 +1049,15 @@ static int perf_event_refresh(struct perf_event *event, int refresh) | |||
1049 | return 0; | 1049 | return 0; |
1050 | } | 1050 | } |
1051 | 1051 | ||
1052 | void __perf_event_sched_out(struct perf_event_context *ctx, | 1052 | enum event_type_t { |
1053 | struct perf_cpu_context *cpuctx) | 1053 | EVENT_FLEXIBLE = 0x1, |
1054 | EVENT_PINNED = 0x2, | ||
1055 | EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, | ||
1056 | }; | ||
1057 | |||
1058 | static void ctx_sched_out(struct perf_event_context *ctx, | ||
1059 | struct perf_cpu_context *cpuctx, | ||
1060 | enum event_type_t event_type) | ||
1054 | { | 1061 | { |
1055 | struct perf_event *event; | 1062 | struct perf_event *event; |
1056 | 1063 | ||
@@ -1061,13 +1068,18 @@ void __perf_event_sched_out(struct perf_event_context *ctx, | |||
1061 | update_context_time(ctx); | 1068 | update_context_time(ctx); |
1062 | 1069 | ||
1063 | perf_disable(); | 1070 | perf_disable(); |
1064 | if (ctx->nr_active) { | 1071 | if (!ctx->nr_active) |
1072 | goto out_enable; | ||
1073 | |||
1074 | if (event_type & EVENT_PINNED) | ||
1065 | list_for_each_entry(event, &ctx->pinned_groups, group_entry) | 1075 | list_for_each_entry(event, &ctx->pinned_groups, group_entry) |
1066 | group_sched_out(event, cpuctx, ctx); | 1076 | group_sched_out(event, cpuctx, ctx); |
1067 | 1077 | ||
1078 | if (event_type & EVENT_FLEXIBLE) | ||
1068 | list_for_each_entry(event, &ctx->flexible_groups, group_entry) | 1079 | list_for_each_entry(event, &ctx->flexible_groups, group_entry) |
1069 | group_sched_out(event, cpuctx, ctx); | 1080 | group_sched_out(event, cpuctx, ctx); |
1070 | } | 1081 | |
1082 | out_enable: | ||
1071 | perf_enable(); | 1083 | perf_enable(); |
1072 | out: | 1084 | out: |
1073 | raw_spin_unlock(&ctx->lock); | 1085 | raw_spin_unlock(&ctx->lock); |
@@ -1229,15 +1241,13 @@ void perf_event_task_sched_out(struct task_struct *task, | |||
1229 | rcu_read_unlock(); | 1241 | rcu_read_unlock(); |
1230 | 1242 | ||
1231 | if (do_switch) { | 1243 | if (do_switch) { |
1232 | __perf_event_sched_out(ctx, cpuctx); | 1244 | ctx_sched_out(ctx, cpuctx, EVENT_ALL); |
1233 | cpuctx->task_ctx = NULL; | 1245 | cpuctx->task_ctx = NULL; |
1234 | } | 1246 | } |
1235 | } | 1247 | } |
1236 | 1248 | ||
1237 | /* | 1249 | static void task_ctx_sched_out(struct perf_event_context *ctx, |
1238 | * Called with IRQs disabled | 1250 | enum event_type_t event_type) |
1239 | */ | ||
1240 | static void __perf_event_task_sched_out(struct perf_event_context *ctx) | ||
1241 | { | 1251 | { |
1242 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 1252 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); |
1243 | 1253 | ||
@@ -1247,39 +1257,34 @@ static void __perf_event_task_sched_out(struct perf_event_context *ctx) | |||
1247 | if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) | 1257 | if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) |
1248 | return; | 1258 | return; |
1249 | 1259 | ||
1250 | __perf_event_sched_out(ctx, cpuctx); | 1260 | ctx_sched_out(ctx, cpuctx, event_type); |
1251 | cpuctx->task_ctx = NULL; | 1261 | cpuctx->task_ctx = NULL; |
1252 | } | 1262 | } |
1253 | 1263 | ||
1254 | /* | 1264 | /* |
1255 | * Called with IRQs disabled | 1265 | * Called with IRQs disabled |
1256 | */ | 1266 | */ |
1257 | static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx) | 1267 | static void __perf_event_task_sched_out(struct perf_event_context *ctx) |
1258 | { | 1268 | { |
1259 | __perf_event_sched_out(&cpuctx->ctx, cpuctx); | 1269 | task_ctx_sched_out(ctx, EVENT_ALL); |
1270 | } | ||
1271 | |||
1272 | /* | ||
1273 | * Called with IRQs disabled | ||
1274 | */ | ||
1275 | static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, | ||
1276 | enum event_type_t event_type) | ||
1277 | { | ||
1278 | ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); | ||
1260 | } | 1279 | } |
1261 | 1280 | ||
1262 | static void | 1281 | static void |
1263 | __perf_event_sched_in(struct perf_event_context *ctx, | 1282 | ctx_pinned_sched_in(struct perf_event_context *ctx, |
1264 | struct perf_cpu_context *cpuctx) | 1283 | struct perf_cpu_context *cpuctx, |
1284 | int cpu) | ||
1265 | { | 1285 | { |
1266 | int cpu = smp_processor_id(); | ||
1267 | struct perf_event *event; | 1286 | struct perf_event *event; |
1268 | int can_add_hw = 1; | ||
1269 | 1287 | ||
1270 | raw_spin_lock(&ctx->lock); | ||
1271 | ctx->is_active = 1; | ||
1272 | if (likely(!ctx->nr_events)) | ||
1273 | goto out; | ||
1274 | |||
1275 | ctx->timestamp = perf_clock(); | ||
1276 | |||
1277 | perf_disable(); | ||
1278 | |||
1279 | /* | ||
1280 | * First go through the list and put on any pinned groups | ||
1281 | * in order to give them the best chance of going on. | ||
1282 | */ | ||
1283 | list_for_each_entry(event, &ctx->pinned_groups, group_entry) { | 1288 | list_for_each_entry(event, &ctx->pinned_groups, group_entry) { |
1284 | if (event->state <= PERF_EVENT_STATE_OFF) | 1289 | if (event->state <= PERF_EVENT_STATE_OFF) |
1285 | continue; | 1290 | continue; |
@@ -1298,6 +1303,15 @@ __perf_event_sched_in(struct perf_event_context *ctx, | |||
1298 | event->state = PERF_EVENT_STATE_ERROR; | 1303 | event->state = PERF_EVENT_STATE_ERROR; |
1299 | } | 1304 | } |
1300 | } | 1305 | } |
1306 | } | ||
1307 | |||
1308 | static void | ||
1309 | ctx_flexible_sched_in(struct perf_event_context *ctx, | ||
1310 | struct perf_cpu_context *cpuctx, | ||
1311 | int cpu) | ||
1312 | { | ||
1313 | struct perf_event *event; | ||
1314 | int can_add_hw = 1; | ||
1301 | 1315 | ||
1302 | list_for_each_entry(event, &ctx->flexible_groups, group_entry) { | 1316 | list_for_each_entry(event, &ctx->flexible_groups, group_entry) { |
1303 | /* Ignore events in OFF or ERROR state */ | 1317 | /* Ignore events in OFF or ERROR state */ |
@@ -1314,11 +1328,61 @@ __perf_event_sched_in(struct perf_event_context *ctx, | |||
1314 | if (group_sched_in(event, cpuctx, ctx, cpu)) | 1328 | if (group_sched_in(event, cpuctx, ctx, cpu)) |
1315 | can_add_hw = 0; | 1329 | can_add_hw = 0; |
1316 | } | 1330 | } |
1331 | } | ||
1332 | |||
1333 | static void | ||
1334 | ctx_sched_in(struct perf_event_context *ctx, | ||
1335 | struct perf_cpu_context *cpuctx, | ||
1336 | enum event_type_t event_type) | ||
1337 | { | ||
1338 | int cpu = smp_processor_id(); | ||
1339 | |||
1340 | raw_spin_lock(&ctx->lock); | ||
1341 | ctx->is_active = 1; | ||
1342 | if (likely(!ctx->nr_events)) | ||
1343 | goto out; | ||
1344 | |||
1345 | ctx->timestamp = perf_clock(); | ||
1346 | |||
1347 | perf_disable(); | ||
1348 | |||
1349 | /* | ||
1350 | * First go through the list and put on any pinned groups | ||
1351 | * in order to give them the best chance of going on. | ||
1352 | */ | ||
1353 | if (event_type & EVENT_PINNED) | ||
1354 | ctx_pinned_sched_in(ctx, cpuctx, cpu); | ||
1355 | |||
1356 | /* Then walk through the lower prio flexible groups */ | ||
1357 | if (event_type & EVENT_FLEXIBLE) | ||
1358 | ctx_flexible_sched_in(ctx, cpuctx, cpu); | ||
1359 | |||
1317 | perf_enable(); | 1360 | perf_enable(); |
1318 | out: | 1361 | out: |
1319 | raw_spin_unlock(&ctx->lock); | 1362 | raw_spin_unlock(&ctx->lock); |
1320 | } | 1363 | } |
1321 | 1364 | ||
1365 | static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, | ||
1366 | enum event_type_t event_type) | ||
1367 | { | ||
1368 | struct perf_event_context *ctx = &cpuctx->ctx; | ||
1369 | |||
1370 | ctx_sched_in(ctx, cpuctx, event_type); | ||
1371 | } | ||
1372 | |||
1373 | static void task_ctx_sched_in(struct task_struct *task, | ||
1374 | enum event_type_t event_type) | ||
1375 | { | ||
1376 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
1377 | struct perf_event_context *ctx = task->perf_event_ctxp; | ||
1378 | |||
1379 | if (likely(!ctx)) | ||
1380 | return; | ||
1381 | if (cpuctx->task_ctx == ctx) | ||
1382 | return; | ||
1383 | ctx_sched_in(ctx, cpuctx, event_type); | ||
1384 | cpuctx->task_ctx = ctx; | ||
1385 | } | ||
1322 | /* | 1386 | /* |
1323 | * Called from scheduler to add the events of the current task | 1387 | * Called from scheduler to add the events of the current task |
1324 | * with interrupts disabled. | 1388 | * with interrupts disabled. |
@@ -1337,17 +1401,22 @@ void perf_event_task_sched_in(struct task_struct *task) | |||
1337 | 1401 | ||
1338 | if (likely(!ctx)) | 1402 | if (likely(!ctx)) |
1339 | return; | 1403 | return; |
1404 | |||
1340 | if (cpuctx->task_ctx == ctx) | 1405 | if (cpuctx->task_ctx == ctx) |
1341 | return; | 1406 | return; |
1342 | __perf_event_sched_in(ctx, cpuctx); | ||
1343 | cpuctx->task_ctx = ctx; | ||
1344 | } | ||
1345 | 1407 | ||
1346 | static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx) | 1408 | /* |
1347 | { | 1409 | * We want to keep the following priority order: |
1348 | struct perf_event_context *ctx = &cpuctx->ctx; | 1410 | * cpu pinned (that don't need to move), task pinned, |
1411 | * cpu flexible, task flexible. | ||
1412 | */ | ||
1413 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); | ||
1349 | 1414 | ||
1350 | __perf_event_sched_in(ctx, cpuctx); | 1415 | ctx_sched_in(ctx, cpuctx, EVENT_PINNED); |
1416 | cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); | ||
1417 | ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE); | ||
1418 | |||
1419 | cpuctx->task_ctx = ctx; | ||
1351 | } | 1420 | } |
1352 | 1421 | ||
1353 | #define MAX_INTERRUPTS (~0ULL) | 1422 | #define MAX_INTERRUPTS (~0ULL) |
@@ -1476,17 +1545,17 @@ void perf_event_task_tick(struct task_struct *curr) | |||
1476 | if (ctx) | 1545 | if (ctx) |
1477 | perf_ctx_adjust_freq(ctx); | 1546 | perf_ctx_adjust_freq(ctx); |
1478 | 1547 | ||
1479 | perf_event_cpu_sched_out(cpuctx); | 1548 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); |
1480 | if (ctx) | 1549 | if (ctx) |
1481 | __perf_event_task_sched_out(ctx); | 1550 | task_ctx_sched_out(ctx, EVENT_FLEXIBLE); |
1482 | 1551 | ||
1483 | rotate_ctx(&cpuctx->ctx); | 1552 | rotate_ctx(&cpuctx->ctx); |
1484 | if (ctx) | 1553 | if (ctx) |
1485 | rotate_ctx(ctx); | 1554 | rotate_ctx(ctx); |
1486 | 1555 | ||
1487 | perf_event_cpu_sched_in(cpuctx); | 1556 | cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); |
1488 | if (ctx) | 1557 | if (ctx) |
1489 | perf_event_task_sched_in(curr); | 1558 | task_ctx_sched_in(curr, EVENT_FLEXIBLE); |
1490 | } | 1559 | } |
1491 | 1560 | ||
1492 | static int event_enable_on_exec(struct perf_event *event, | 1561 | static int event_enable_on_exec(struct perf_event *event, |