aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-03-08 07:51:20 -0500
committerIngo Molnar <mingo@elte.hu>2010-03-10 07:22:36 -0500
commitd4944a06666054707d23e11888e480af239e5abf (patch)
tree58d8e847f198c29a2a2ddee60fe862f8973794d1 /kernel/perf_event.c
parentf3d46b2e6fa57547f9884330798792afc83f4b04 (diff)
perf: Provide better condition for event rotation
Try to avoid useless rotation and PMU disables. [ Could be improved by keeping a nr_runnable count to better account for the < PERF_STAT_INACTIVE counters ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@infradead.org> Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index d8108465397d..52c69a34d697 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1524,12 +1524,15 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1524 */ 1524 */
1525 if (interrupts == MAX_INTERRUPTS) { 1525 if (interrupts == MAX_INTERRUPTS) {
1526 perf_log_throttle(event, 1); 1526 perf_log_throttle(event, 1);
1527 perf_disable();
1527 event->pmu->unthrottle(event); 1528 event->pmu->unthrottle(event);
1529 perf_enable();
1528 } 1530 }
1529 1531
1530 if (!event->attr.freq || !event->attr.sample_freq) 1532 if (!event->attr.freq || !event->attr.sample_freq)
1531 continue; 1533 continue;
1532 1534
1535 perf_disable();
1533 event->pmu->read(event); 1536 event->pmu->read(event);
1534 now = atomic64_read(&event->count); 1537 now = atomic64_read(&event->count);
1535 delta = now - hwc->freq_count_stamp; 1538 delta = now - hwc->freq_count_stamp;
@@ -1537,6 +1540,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1537 1540
1538 if (delta > 0) 1541 if (delta > 0)
1539 perf_adjust_period(event, TICK_NSEC, delta); 1542 perf_adjust_period(event, TICK_NSEC, delta);
1543 perf_enable();
1540 } 1544 }
1541 raw_spin_unlock(&ctx->lock); 1545 raw_spin_unlock(&ctx->lock);
1542} 1546}
@@ -1546,9 +1550,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1546 */ 1550 */
1547static void rotate_ctx(struct perf_event_context *ctx) 1551static void rotate_ctx(struct perf_event_context *ctx)
1548{ 1552{
1549 if (!ctx->nr_events)
1550 return;
1551
1552 raw_spin_lock(&ctx->lock); 1553 raw_spin_lock(&ctx->lock);
1553 1554
1554 /* Rotate the first entry last of non-pinned groups */ 1555 /* Rotate the first entry last of non-pinned groups */
@@ -1561,19 +1562,28 @@ void perf_event_task_tick(struct task_struct *curr)
1561{ 1562{
1562 struct perf_cpu_context *cpuctx; 1563 struct perf_cpu_context *cpuctx;
1563 struct perf_event_context *ctx; 1564 struct perf_event_context *ctx;
1565 int rotate = 0;
1564 1566
1565 if (!atomic_read(&nr_events)) 1567 if (!atomic_read(&nr_events))
1566 return; 1568 return;
1567 1569
1568 cpuctx = &__get_cpu_var(perf_cpu_context); 1570 cpuctx = &__get_cpu_var(perf_cpu_context);
1569 ctx = curr->perf_event_ctxp; 1571 if (cpuctx->ctx.nr_events &&
1572 cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1573 rotate = 1;
1570 1574
1571 perf_disable(); 1575 ctx = curr->perf_event_ctxp;
1576 if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
1577 rotate = 1;
1572 1578
1573 perf_ctx_adjust_freq(&cpuctx->ctx); 1579 perf_ctx_adjust_freq(&cpuctx->ctx);
1574 if (ctx) 1580 if (ctx)
1575 perf_ctx_adjust_freq(ctx); 1581 perf_ctx_adjust_freq(ctx);
1576 1582
1583 if (!rotate)
1584 return;
1585
1586 perf_disable();
1577 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 1587 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1578 if (ctx) 1588 if (ctx)
1579 task_ctx_sched_out(ctx, EVENT_FLEXIBLE); 1589 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
@@ -1585,7 +1595,6 @@ void perf_event_task_tick(struct task_struct *curr)
1585 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); 1595 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1586 if (ctx) 1596 if (ctx)
1587 task_ctx_sched_in(curr, EVENT_FLEXIBLE); 1597 task_ctx_sched_in(curr, EVENT_FLEXIBLE);
1588
1589 perf_enable(); 1598 perf_enable();
1590} 1599}
1591 1600