aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-10-17 15:46:10 -0400
committerIngo Molnar <mingo@elte.hu>2010-10-18 12:41:56 -0400
commit4924627423d5e286136ad2520f5be536345ae590 (patch)
tree0ad0151d0a359edb89d99fb5c5c1dfebd64e5a14
parent864616ee6785d9fac7a2cd80c01a2da89579f2e4 (diff)
sched: Unindent labels
Labels should be on column 0. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched.c12
-rw-r--r--kernel/sched_rt.c6
2 files changed, 9 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 2111491f6424..7f522832250c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4891,7 +4891,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
4891 4891
4892 cpuset_cpus_allowed(p, cpus_allowed); 4892 cpuset_cpus_allowed(p, cpus_allowed);
4893 cpumask_and(new_mask, in_mask, cpus_allowed); 4893 cpumask_and(new_mask, in_mask, cpus_allowed);
4894 again: 4894again:
4895 retval = set_cpus_allowed_ptr(p, new_mask); 4895 retval = set_cpus_allowed_ptr(p, new_mask);
4896 4896
4897 if (!retval) { 4897 if (!retval) {
@@ -8141,9 +8141,9 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8141 8141
8142 return 1; 8142 return 1;
8143 8143
8144 err_free_rq: 8144err_free_rq:
8145 kfree(cfs_rq); 8145 kfree(cfs_rq);
8146 err: 8146err:
8147 return 0; 8147 return 0;
8148} 8148}
8149 8149
@@ -8231,9 +8231,9 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
8231 8231
8232 return 1; 8232 return 1;
8233 8233
8234 err_free_rq: 8234err_free_rq:
8235 kfree(rt_rq); 8235 kfree(rt_rq);
8236 err: 8236err:
8237 return 0; 8237 return 0;
8238} 8238}
8239 8239
@@ -8591,7 +8591,7 @@ static int tg_set_bandwidth(struct task_group *tg,
8591 raw_spin_unlock(&rt_rq->rt_runtime_lock); 8591 raw_spin_unlock(&rt_rq->rt_runtime_lock);
8592 } 8592 }
8593 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 8593 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
8594 unlock: 8594unlock:
8595 read_unlock(&tasklist_lock); 8595 read_unlock(&tasklist_lock);
8596 mutex_unlock(&rt_constraints_mutex); 8596 mutex_unlock(&rt_constraints_mutex);
8597 8597
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index baef30f08405..ab77aa00b7b1 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1140,7 +1140,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1140 for_each_leaf_rt_rq(rt_rq, rq) { 1140 for_each_leaf_rt_rq(rt_rq, rq) {
1141 array = &rt_rq->active; 1141 array = &rt_rq->active;
1142 idx = sched_find_first_bit(array->bitmap); 1142 idx = sched_find_first_bit(array->bitmap);
1143 next_idx: 1143next_idx:
1144 if (idx >= MAX_RT_PRIO) 1144 if (idx >= MAX_RT_PRIO)
1145 continue; 1145 continue;
1146 if (next && next->prio < idx) 1146 if (next && next->prio < idx)
@@ -1316,7 +1316,7 @@ static int push_rt_task(struct rq *rq)
1316 if (!next_task) 1316 if (!next_task)
1317 return 0; 1317 return 0;
1318 1318
1319 retry: 1319retry:
1320 if (unlikely(next_task == rq->curr)) { 1320 if (unlikely(next_task == rq->curr)) {
1321 WARN_ON(1); 1321 WARN_ON(1);
1322 return 0; 1322 return 0;
@@ -1464,7 +1464,7 @@ static int pull_rt_task(struct rq *this_rq)
1464 * but possible) 1464 * but possible)
1465 */ 1465 */
1466 } 1466 }
1467 skip: 1467skip:
1468 double_unlock_balance(this_rq, src_rq); 1468 double_unlock_balance(this_rq, src_rq);
1469 } 1469 }
1470 1470