aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-06-11 11:32:03 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-09 14:46:29 -0400
commit24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7 (patch)
treea37d3a4cb101e3f67635a1920f447c9e9e8d8ab2 /kernel/perf_event.c
parent9ed6060d286b1eb55974d09080f442f809408c42 (diff)
perf: Reduce perf_disable() usage
Since the current perf_disable() usage is only an optimization, remove it for now. This eases the removal of the __weak hw_perf_enable() interface. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> Cc: stephane eranian <eranian@googlemail.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Lin Ming <ming.m.lin@intel.com> Cc: Yanmin <yanmin_zhang@linux.intel.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: David Miller <davem@davemloft.net> Cc: Michael Cree <mcree@orcon.net.nz> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c37
1 files changed, 1 insertions, 36 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 149ca18371b7..9a98ce953561 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -478,11 +478,6 @@ static void __perf_event_remove_from_context(void *info)
478 return; 478 return;
479 479
480 raw_spin_lock(&ctx->lock); 480 raw_spin_lock(&ctx->lock);
481 /*
482 * Protect the list operation against NMI by disabling the
483 * events on a global level.
484 */
485 perf_disable();
486 481
487 event_sched_out(event, cpuctx, ctx); 482 event_sched_out(event, cpuctx, ctx);
488 483
@@ -498,7 +493,6 @@ static void __perf_event_remove_from_context(void *info)
498 perf_max_events - perf_reserved_percpu); 493 perf_max_events - perf_reserved_percpu);
499 } 494 }
500 495
501 perf_enable();
502 raw_spin_unlock(&ctx->lock); 496 raw_spin_unlock(&ctx->lock);
503} 497}
504 498
@@ -803,12 +797,6 @@ static void __perf_install_in_context(void *info)
803 ctx->is_active = 1; 797 ctx->is_active = 1;
804 update_context_time(ctx); 798 update_context_time(ctx);
805 799
806 /*
807 * Protect the list operation against NMI by disabling the
808 * events on a global level. NOP for non NMI based events.
809 */
810 perf_disable();
811
812 add_event_to_ctx(event, ctx); 800 add_event_to_ctx(event, ctx);
813 801
814 if (event->cpu != -1 && event->cpu != smp_processor_id()) 802 if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -850,8 +838,6 @@ static void __perf_install_in_context(void *info)
850 cpuctx->max_pertask--; 838 cpuctx->max_pertask--;
851 839
852unlock: 840unlock:
853 perf_enable();
854
855 raw_spin_unlock(&ctx->lock); 841 raw_spin_unlock(&ctx->lock);
856} 842}
857 843
@@ -972,12 +958,10 @@ static void __perf_event_enable(void *info)
972 if (!group_can_go_on(event, cpuctx, 1)) { 958 if (!group_can_go_on(event, cpuctx, 1)) {
973 err = -EEXIST; 959 err = -EEXIST;
974 } else { 960 } else {
975 perf_disable();
976 if (event == leader) 961 if (event == leader)
977 err = group_sched_in(event, cpuctx, ctx); 962 err = group_sched_in(event, cpuctx, ctx);
978 else 963 else
979 err = event_sched_in(event, cpuctx, ctx); 964 err = event_sched_in(event, cpuctx, ctx);
980 perf_enable();
981 } 965 }
982 966
983 if (err) { 967 if (err) {
@@ -1090,9 +1074,8 @@ static void ctx_sched_out(struct perf_event_context *ctx,
1090 goto out; 1074 goto out;
1091 update_context_time(ctx); 1075 update_context_time(ctx);
1092 1076
1093 perf_disable();
1094 if (!ctx->nr_active) 1077 if (!ctx->nr_active)
1095 goto out_enable; 1078 goto out;
1096 1079
1097 if (event_type & EVENT_PINNED) { 1080 if (event_type & EVENT_PINNED) {
1098 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 1081 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
@@ -1103,9 +1086,6 @@ static void ctx_sched_out(struct perf_event_context *ctx,
1103 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 1086 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1104 group_sched_out(event, cpuctx, ctx); 1087 group_sched_out(event, cpuctx, ctx);
1105 } 1088 }
1106
1107 out_enable:
1108 perf_enable();
1109out: 1089out:
1110 raw_spin_unlock(&ctx->lock); 1090 raw_spin_unlock(&ctx->lock);
1111} 1091}
@@ -1364,8 +1344,6 @@ ctx_sched_in(struct perf_event_context *ctx,
1364 1344
1365 ctx->timestamp = perf_clock(); 1345 ctx->timestamp = perf_clock();
1366 1346
1367 perf_disable();
1368
1369 /* 1347 /*
1370 * First go through the list and put on any pinned groups 1348 * First go through the list and put on any pinned groups
1371 * in order to give them the best chance of going on. 1349 * in order to give them the best chance of going on.
@@ -1377,7 +1355,6 @@ ctx_sched_in(struct perf_event_context *ctx,
1377 if (event_type & EVENT_FLEXIBLE) 1355 if (event_type & EVENT_FLEXIBLE)
1378 ctx_flexible_sched_in(ctx, cpuctx); 1356 ctx_flexible_sched_in(ctx, cpuctx);
1379 1357
1380 perf_enable();
1381out: 1358out:
1382 raw_spin_unlock(&ctx->lock); 1359 raw_spin_unlock(&ctx->lock);
1383} 1360}
@@ -1425,8 +1402,6 @@ void perf_event_task_sched_in(struct task_struct *task)
1425 if (cpuctx->task_ctx == ctx) 1402 if (cpuctx->task_ctx == ctx)
1426 return; 1403 return;
1427 1404
1428 perf_disable();
1429
1430 /* 1405 /*
1431 * We want to keep the following priority order: 1406 * We want to keep the following priority order:
1432 * cpu pinned (that don't need to move), task pinned, 1407 * cpu pinned (that don't need to move), task pinned,
@@ -1439,8 +1414,6 @@ void perf_event_task_sched_in(struct task_struct *task)
1439 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE); 1414 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1440 1415
1441 cpuctx->task_ctx = ctx; 1416 cpuctx->task_ctx = ctx;
1442
1443 perf_enable();
1444} 1417}
1445 1418
1446#define MAX_INTERRUPTS (~0ULL) 1419#define MAX_INTERRUPTS (~0ULL)
@@ -1555,11 +1528,9 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
1555 hwc->sample_period = sample_period; 1528 hwc->sample_period = sample_period;
1556 1529
1557 if (local64_read(&hwc->period_left) > 8*sample_period) { 1530 if (local64_read(&hwc->period_left) > 8*sample_period) {
1558 perf_disable();
1559 perf_event_stop(event); 1531 perf_event_stop(event);
1560 local64_set(&hwc->period_left, 0); 1532 local64_set(&hwc->period_left, 0);
1561 perf_event_start(event); 1533 perf_event_start(event);
1562 perf_enable();
1563 } 1534 }
1564} 1535}
1565 1536
@@ -1588,15 +1559,12 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1588 */ 1559 */
1589 if (interrupts == MAX_INTERRUPTS) { 1560 if (interrupts == MAX_INTERRUPTS) {
1590 perf_log_throttle(event, 1); 1561 perf_log_throttle(event, 1);
1591 perf_disable();
1592 event->pmu->unthrottle(event); 1562 event->pmu->unthrottle(event);
1593 perf_enable();
1594 } 1563 }
1595 1564
1596 if (!event->attr.freq || !event->attr.sample_freq) 1565 if (!event->attr.freq || !event->attr.sample_freq)
1597 continue; 1566 continue;
1598 1567
1599 perf_disable();
1600 event->pmu->read(event); 1568 event->pmu->read(event);
1601 now = local64_read(&event->count); 1569 now = local64_read(&event->count);
1602 delta = now - hwc->freq_count_stamp; 1570 delta = now - hwc->freq_count_stamp;
@@ -1604,7 +1572,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1604 1572
1605 if (delta > 0) 1573 if (delta > 0)
1606 perf_adjust_period(event, TICK_NSEC, delta); 1574 perf_adjust_period(event, TICK_NSEC, delta);
1607 perf_enable();
1608 } 1575 }
1609 raw_spin_unlock(&ctx->lock); 1576 raw_spin_unlock(&ctx->lock);
1610} 1577}
@@ -1647,7 +1614,6 @@ void perf_event_task_tick(struct task_struct *curr)
1647 if (!rotate) 1614 if (!rotate)
1648 return; 1615 return;
1649 1616
1650 perf_disable();
1651 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 1617 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1652 if (ctx) 1618 if (ctx)
1653 task_ctx_sched_out(ctx, EVENT_FLEXIBLE); 1619 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
@@ -1659,7 +1625,6 @@ void perf_event_task_tick(struct task_struct *curr)
1659 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); 1625 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1660 if (ctx) 1626 if (ctx)
1661 task_ctx_sched_in(curr, EVENT_FLEXIBLE); 1627 task_ctx_sched_in(curr, EVENT_FLEXIBLE);
1662 perf_enable();
1663} 1628}
1664 1629
1665static int event_enable_on_exec(struct perf_event *event, 1630static int event_enable_on_exec(struct perf_event *event,