aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events/core.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-09 15:17:45 -0400
committerIngo Molnar <mingo@elte.hu>2011-05-28 12:01:17 -0400
commitdb24d33e08b88e990991760a44d72006a5dc6102 (patch)
treea36c4aba0e221e5833b15432971e526959a3aff1 /kernel/events/core.c
parent2c29ef0fef8aaff1f91263fc75c749d659da6972 (diff)
perf: Change and simplify ctx::is_active semantics
Instead of tracking if a context is active or not, track which events of the context are active. By making it a bitmask of EVENT_PINNED|EVENT_FLEXIBLE we can simplify some of the scheduling routines since it can avoid adding events that are already active. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110409192141.930282378@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/events/core.c')
-rw-r--r--kernel/events/core.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 60b333ae0bcf..71c2d44ff95d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1763,8 +1763,9 @@ static void ctx_sched_out(struct perf_event_context *ctx,
1763 enum event_type_t event_type) 1763 enum event_type_t event_type)
1764{ 1764{
1765 struct perf_event *event; 1765 struct perf_event *event;
1766 int is_active = ctx->is_active;
1766 1767
1767 ctx->is_active = 0; 1768 ctx->is_active &= ~event_type;
1768 if (likely(!ctx->nr_events)) 1769 if (likely(!ctx->nr_events))
1769 return; 1770 return;
1770 1771
@@ -1774,12 +1775,12 @@ static void ctx_sched_out(struct perf_event_context *ctx,
1774 return; 1775 return;
1775 1776
1776 perf_pmu_disable(ctx->pmu); 1777 perf_pmu_disable(ctx->pmu);
1777 if (event_type & EVENT_PINNED) { 1778 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
1778 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 1779 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1779 group_sched_out(event, cpuctx, ctx); 1780 group_sched_out(event, cpuctx, ctx);
1780 } 1781 }
1781 1782
1782 if (event_type & EVENT_FLEXIBLE) { 1783 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
1783 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 1784 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1784 group_sched_out(event, cpuctx, ctx); 1785 group_sched_out(event, cpuctx, ctx);
1785 } 1786 }
@@ -2058,8 +2059,9 @@ ctx_sched_in(struct perf_event_context *ctx,
2058 struct task_struct *task) 2059 struct task_struct *task)
2059{ 2060{
2060 u64 now; 2061 u64 now;
2062 int is_active = ctx->is_active;
2061 2063
2062 ctx->is_active = 1; 2064 ctx->is_active |= event_type;
2063 if (likely(!ctx->nr_events)) 2065 if (likely(!ctx->nr_events))
2064 return; 2066 return;
2065 2067
@@ -2070,11 +2072,11 @@ ctx_sched_in(struct perf_event_context *ctx,
2070 * First go through the list and put on any pinned groups 2072 * First go through the list and put on any pinned groups
2071 * in order to give them the best chance of going on. 2073 * in order to give them the best chance of going on.
2072 */ 2074 */
2073 if (event_type & EVENT_PINNED) 2075 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
2074 ctx_pinned_sched_in(ctx, cpuctx); 2076 ctx_pinned_sched_in(ctx, cpuctx);
2075 2077
2076 /* Then walk through the lower prio flexible groups */ 2078 /* Then walk through the lower prio flexible groups */
2077 if (event_type & EVENT_FLEXIBLE) 2079 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
2078 ctx_flexible_sched_in(ctx, cpuctx); 2080 ctx_flexible_sched_in(ctx, cpuctx);
2079} 2081}
2080 2082