aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-06-11 11:36:35 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-09 14:46:28 -0400
commit9ed6060d286b1eb55974d09080f442f809408c42 (patch)
tree529961474ca96bef34ec0b75e35fe991aee0e80d /kernel/perf_event.c
parentb0a873ebbf87bf38bf70b5e39a7cadc96099fa13 (diff)
perf: Unindent labels
Fixup random annoying style bits. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c43
1 files changed, 24 insertions, 19 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 288ce43de57c..149ca18371b7 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -147,7 +147,7 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
147 struct perf_event_context *ctx; 147 struct perf_event_context *ctx;
148 148
149 rcu_read_lock(); 149 rcu_read_lock();
150 retry: 150retry:
151 ctx = rcu_dereference(task->perf_event_ctxp); 151 ctx = rcu_dereference(task->perf_event_ctxp);
152 if (ctx) { 152 if (ctx) {
153 /* 153 /*
@@ -619,7 +619,7 @@ void perf_event_disable(struct perf_event *event)
619 return; 619 return;
620 } 620 }
621 621
622 retry: 622retry:
623 task_oncpu_function_call(task, __perf_event_disable, event); 623 task_oncpu_function_call(task, __perf_event_disable, event);
624 624
625 raw_spin_lock_irq(&ctx->lock); 625 raw_spin_lock_irq(&ctx->lock);
@@ -849,7 +849,7 @@ static void __perf_install_in_context(void *info)
849 if (!err && !ctx->task && cpuctx->max_pertask) 849 if (!err && !ctx->task && cpuctx->max_pertask)
850 cpuctx->max_pertask--; 850 cpuctx->max_pertask--;
851 851
852 unlock: 852unlock:
853 perf_enable(); 853 perf_enable();
854 854
855 raw_spin_unlock(&ctx->lock); 855 raw_spin_unlock(&ctx->lock);
@@ -922,10 +922,12 @@ static void __perf_event_mark_enabled(struct perf_event *event,
922 922
923 event->state = PERF_EVENT_STATE_INACTIVE; 923 event->state = PERF_EVENT_STATE_INACTIVE;
924 event->tstamp_enabled = ctx->time - event->total_time_enabled; 924 event->tstamp_enabled = ctx->time - event->total_time_enabled;
925 list_for_each_entry(sub, &event->sibling_list, group_entry) 925 list_for_each_entry(sub, &event->sibling_list, group_entry) {
926 if (sub->state >= PERF_EVENT_STATE_INACTIVE) 926 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
927 sub->tstamp_enabled = 927 sub->tstamp_enabled =
928 ctx->time - sub->total_time_enabled; 928 ctx->time - sub->total_time_enabled;
929 }
930 }
929} 931}
930 932
931/* 933/*
@@ -991,7 +993,7 @@ static void __perf_event_enable(void *info)
991 } 993 }
992 } 994 }
993 995
994 unlock: 996unlock:
995 raw_spin_unlock(&ctx->lock); 997 raw_spin_unlock(&ctx->lock);
996} 998}
997 999
@@ -1032,7 +1034,7 @@ void perf_event_enable(struct perf_event *event)
1032 if (event->state == PERF_EVENT_STATE_ERROR) 1034 if (event->state == PERF_EVENT_STATE_ERROR)
1033 event->state = PERF_EVENT_STATE_OFF; 1035 event->state = PERF_EVENT_STATE_OFF;
1034 1036
1035 retry: 1037retry:
1036 raw_spin_unlock_irq(&ctx->lock); 1038 raw_spin_unlock_irq(&ctx->lock);
1037 task_oncpu_function_call(task, __perf_event_enable, event); 1039 task_oncpu_function_call(task, __perf_event_enable, event);
1038 1040
@@ -1052,7 +1054,7 @@ void perf_event_enable(struct perf_event *event)
1052 if (event->state == PERF_EVENT_STATE_OFF) 1054 if (event->state == PERF_EVENT_STATE_OFF)
1053 __perf_event_mark_enabled(event, ctx); 1055 __perf_event_mark_enabled(event, ctx);
1054 1056
1055 out: 1057out:
1056 raw_spin_unlock_irq(&ctx->lock); 1058 raw_spin_unlock_irq(&ctx->lock);
1057} 1059}
1058 1060
@@ -1092,17 +1094,19 @@ static void ctx_sched_out(struct perf_event_context *ctx,
1092 if (!ctx->nr_active) 1094 if (!ctx->nr_active)
1093 goto out_enable; 1095 goto out_enable;
1094 1096
1095 if (event_type & EVENT_PINNED) 1097 if (event_type & EVENT_PINNED) {
1096 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 1098 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1097 group_sched_out(event, cpuctx, ctx); 1099 group_sched_out(event, cpuctx, ctx);
1100 }
1098 1101
1099 if (event_type & EVENT_FLEXIBLE) 1102 if (event_type & EVENT_FLEXIBLE) {
1100 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 1103 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1101 group_sched_out(event, cpuctx, ctx); 1104 group_sched_out(event, cpuctx, ctx);
1105 }
1102 1106
1103 out_enable: 1107 out_enable:
1104 perf_enable(); 1108 perf_enable();
1105 out: 1109out:
1106 raw_spin_unlock(&ctx->lock); 1110 raw_spin_unlock(&ctx->lock);
1107} 1111}
1108 1112
@@ -1341,9 +1345,10 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
1341 if (event->cpu != -1 && event->cpu != smp_processor_id()) 1345 if (event->cpu != -1 && event->cpu != smp_processor_id())
1342 continue; 1346 continue;
1343 1347
1344 if (group_can_go_on(event, cpuctx, can_add_hw)) 1348 if (group_can_go_on(event, cpuctx, can_add_hw)) {
1345 if (group_sched_in(event, cpuctx, ctx)) 1349 if (group_sched_in(event, cpuctx, ctx))
1346 can_add_hw = 0; 1350 can_add_hw = 0;
1351 }
1347 } 1352 }
1348} 1353}
1349 1354
@@ -1373,7 +1378,7 @@ ctx_sched_in(struct perf_event_context *ctx,
1373 ctx_flexible_sched_in(ctx, cpuctx); 1378 ctx_flexible_sched_in(ctx, cpuctx);
1374 1379
1375 perf_enable(); 1380 perf_enable();
1376 out: 1381out:
1377 raw_spin_unlock(&ctx->lock); 1382 raw_spin_unlock(&ctx->lock);
1378} 1383}
1379 1384
@@ -1714,7 +1719,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
1714 raw_spin_unlock(&ctx->lock); 1719 raw_spin_unlock(&ctx->lock);
1715 1720
1716 perf_event_task_sched_in(task); 1721 perf_event_task_sched_in(task);
1717 out: 1722out:
1718 local_irq_restore(flags); 1723 local_irq_restore(flags);
1719} 1724}
1720 1725
@@ -2053,7 +2058,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
2053 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 2058 if (!ptrace_may_access(task, PTRACE_MODE_READ))
2054 goto errout; 2059 goto errout;
2055 2060
2056 retry: 2061retry:
2057 ctx = perf_lock_task_context(task, &flags); 2062 ctx = perf_lock_task_context(task, &flags);
2058 if (ctx) { 2063 if (ctx) {
2059 unclone_ctx(ctx); 2064 unclone_ctx(ctx);
@@ -2081,7 +2086,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
2081 put_task_struct(task); 2086 put_task_struct(task);
2082 return ctx; 2087 return ctx;
2083 2088
2084 errout: 2089errout:
2085 put_task_struct(task); 2090 put_task_struct(task);
2086 return ERR_PTR(err); 2091 return ERR_PTR(err);
2087} 2092}
@@ -3264,7 +3269,7 @@ again:
3264 if (handle->wakeup != local_read(&buffer->wakeup)) 3269 if (handle->wakeup != local_read(&buffer->wakeup))
3265 perf_output_wakeup(handle); 3270 perf_output_wakeup(handle);
3266 3271
3267 out: 3272out:
3268 preempt_enable(); 3273 preempt_enable();
3269} 3274}
3270 3275
@@ -4562,7 +4567,7 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
4562 rcu_assign_pointer(cpuctx->swevent_hlist, hlist); 4567 rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
4563 } 4568 }
4564 cpuctx->hlist_refcount++; 4569 cpuctx->hlist_refcount++;
4565 exit: 4570exit:
4566 mutex_unlock(&cpuctx->hlist_mutex); 4571 mutex_unlock(&cpuctx->hlist_mutex);
4567 4572
4568 return err; 4573 return err;
@@ -4587,7 +4592,7 @@ static int swevent_hlist_get(struct perf_event *event)
4587 put_online_cpus(); 4592 put_online_cpus();
4588 4593
4589 return 0; 4594 return 0;
4590 fail: 4595fail:
4591 for_each_possible_cpu(cpu) { 4596 for_each_possible_cpu(cpu) {
4592 if (cpu == failed_cpu) 4597 if (cpu == failed_cpu)
4593 break; 4598 break;