diff options
-rw-r--r-- | kernel/events/core.c | 80 |
1 files changed, 35 insertions, 45 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 66b3dd809409..60b333ae0bcf 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -1469,8 +1469,12 @@ static void add_event_to_ctx(struct perf_event *event, | |||
1469 | event->tstamp_stopped = tstamp; | 1469 | event->tstamp_stopped = tstamp; |
1470 | } | 1470 | } |
1471 | 1471 | ||
1472 | static void perf_event_context_sched_in(struct perf_event_context *ctx, | 1472 | static void task_ctx_sched_out(struct perf_event_context *ctx); |
1473 | struct task_struct *tsk); | 1473 | static void |
1474 | ctx_sched_in(struct perf_event_context *ctx, | ||
1475 | struct perf_cpu_context *cpuctx, | ||
1476 | enum event_type_t event_type, | ||
1477 | struct task_struct *task); | ||
1474 | 1478 | ||
1475 | /* | 1479 | /* |
1476 | * Cross CPU call to install and enable a performance event | 1480 | * Cross CPU call to install and enable a performance event |
@@ -1481,20 +1485,31 @@ static int __perf_install_in_context(void *info) | |||
1481 | { | 1485 | { |
1482 | struct perf_event *event = info; | 1486 | struct perf_event *event = info; |
1483 | struct perf_event_context *ctx = event->ctx; | 1487 | struct perf_event_context *ctx = event->ctx; |
1484 | struct perf_event *leader = event->group_leader; | ||
1485 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | 1488 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
1486 | int err; | 1489 | struct perf_event_context *task_ctx = cpuctx->task_ctx; |
1490 | struct task_struct *task = current; | ||
1491 | |||
1492 | perf_ctx_lock(cpuctx, cpuctx->task_ctx); | ||
1493 | perf_pmu_disable(cpuctx->ctx.pmu); | ||
1487 | 1494 | ||
1488 | /* | 1495 | /* |
1489 | * In case we're installing a new context to an already running task, | 1496 | * If there was an active task_ctx schedule it out. |
1490 | * could also happen before perf_event_task_sched_in() on architectures | ||
1491 | * which do context switches with IRQs enabled. | ||
1492 | */ | 1497 | */ |
1493 | if (ctx->task && !cpuctx->task_ctx) | 1498 | if (task_ctx) { |
1494 | perf_event_context_sched_in(ctx, ctx->task); | 1499 | task_ctx_sched_out(task_ctx); |
1500 | /* | ||
1501 | * If the context we're installing events in is not the | ||
1502 | * active task_ctx, flip them. | ||
1503 | */ | ||
1504 | if (ctx->task && task_ctx != ctx) { | ||
1505 | raw_spin_unlock(&cpuctx->ctx.lock); | ||
1506 | raw_spin_lock(&ctx->lock); | ||
1507 | cpuctx->task_ctx = task_ctx = ctx; | ||
1508 | } | ||
1509 | task = task_ctx->task; | ||
1510 | } | ||
1511 | cpu_ctx_sched_out(cpuctx, EVENT_ALL); | ||
1495 | 1512 | ||
1496 | raw_spin_lock(&ctx->lock); | ||
1497 | ctx->is_active = 1; | ||
1498 | update_context_time(ctx); | 1513 | update_context_time(ctx); |
1499 | /* | 1514 | /* |
1500 | * update cgrp time only if current cgrp | 1515 | * update cgrp time only if current cgrp |
@@ -1505,43 +1520,18 @@ static int __perf_install_in_context(void *info) | |||
1505 | 1520 | ||
1506 | add_event_to_ctx(event, ctx); | 1521 | add_event_to_ctx(event, ctx); |
1507 | 1522 | ||
1508 | if (!event_filter_match(event)) | ||
1509 | goto unlock; | ||
1510 | |||
1511 | /* | ||
1512 | * Don't put the event on if it is disabled or if | ||
1513 | * it is in a group and the group isn't on. | ||
1514 | */ | ||
1515 | if (event->state != PERF_EVENT_STATE_INACTIVE || | ||
1516 | (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)) | ||
1517 | goto unlock; | ||
1518 | |||
1519 | /* | 1523 | /* |
1520 | * An exclusive event can't go on if there are already active | 1524 | * Schedule everything back in |
1521 | * hardware events, and no hardware event can go on if there | ||
1522 | * is already an exclusive event on. | ||
1523 | */ | 1525 | */ |
1524 | if (!group_can_go_on(event, cpuctx, 1)) | 1526 | cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); |
1525 | err = -EEXIST; | 1527 | if (task_ctx) |
1526 | else | 1528 | ctx_sched_in(task_ctx, cpuctx, EVENT_PINNED, task); |
1527 | err = event_sched_in(event, cpuctx, ctx); | 1529 | cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); |
1528 | 1530 | if (task_ctx) | |
1529 | if (err) { | 1531 | ctx_sched_in(task_ctx, cpuctx, EVENT_FLEXIBLE, task); |
1530 | /* | ||
1531 | * This event couldn't go on. If it is in a group | ||
1532 | * then we have to pull the whole group off. | ||
1533 | * If the event group is pinned then put it in error state. | ||
1534 | */ | ||
1535 | if (leader != event) | ||
1536 | group_sched_out(leader, cpuctx, ctx); | ||
1537 | if (leader->attr.pinned) { | ||
1538 | update_group_times(leader); | ||
1539 | leader->state = PERF_EVENT_STATE_ERROR; | ||
1540 | } | ||
1541 | } | ||
1542 | 1532 | ||
1543 | unlock: | 1533 | perf_pmu_enable(cpuctx->ctx.pmu); |
1544 | raw_spin_unlock(&ctx->lock); | 1534 | perf_ctx_unlock(cpuctx, task_ctx); |
1545 | 1535 | ||
1546 | return 0; | 1536 | return 0; |
1547 | } | 1537 | } |