From 0763a660a84220cc3900fd32abdd7ad109e2278d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Sep 2009 19:37:39 +0200 Subject: sched: Rename select_task_rq() argument In order to be able to rename the sync argument, we need to rename the current flag argument. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/sched.h | 2 +- kernel/sched_fair.c | 14 +++++++------- kernel/sched_idletask.c | 2 +- kernel/sched_rt.c | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index fc4c0f9393d2..5c116f03d74c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1037,7 +1037,7 @@ struct sched_class { void (*put_prev_task) (struct rq *rq, struct task_struct *p); #ifdef CONFIG_SMP - int (*select_task_rq)(struct task_struct *p, int flag, int sync); + int (*select_task_rq)(struct task_struct *p, int sd_flag, int sync); unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, struct rq *busiest, unsigned long max_load_move, diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 19593568031a..b554e63c521a 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1331,7 +1331,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) * * preempt must be disabled. */ -static int select_task_rq_fair(struct task_struct *p, int flag, int sync) +static int select_task_rq_fair(struct task_struct *p, int sd_flag, int sync) { struct sched_domain *tmp, *sd = NULL; int cpu = smp_processor_id(); @@ -1339,7 +1339,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync) int new_cpu = cpu; int want_affine = 0; - if (flag & SD_BALANCE_WAKE) { + if (sd_flag & SD_BALANCE_WAKE) { if (sched_feat(AFFINE_WAKEUPS)) want_affine = 1; new_cpu = prev_cpu; @@ -1368,7 +1368,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync) break; } - switch (flag) { + switch (sd_flag) { case SD_BALANCE_WAKE: if (!sched_feat(LB_WAKEUP_UPDATE)) break; @@ -1392,7 +1392,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync) want_affine = 0; } - if (!(tmp->flags & flag)) + if (!(tmp->flags & sd_flag)) continue; sd = tmp; @@ -1402,12 +1402,12 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync) struct sched_group *group; int weight; - if (!(sd->flags & flag)) { + if (!(sd->flags & sd_flag)) { sd = sd->child; continue; } - group = find_idlest_group(sd, p, cpu, flag); + group = find_idlest_group(sd, p, cpu, sd_flag); if (!group) { sd = sd->child; continue; @@ -1427,7 +1427,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync) for_each_domain(cpu, tmp) { if (weight <= cpumask_weight(sched_domain_span(tmp))) break; - if (tmp->flags & flag) + if (tmp->flags & sd_flag) sd = tmp; } /* while loop will break here if sd == NULL */ diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 99b2f0337609..9ff7697e5dc4 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c @@ -6,7 +6,7 @@ */ #ifdef CONFIG_SMP -static int select_task_rq_idle(struct task_struct *p, int flag, int sync) +static int select_task_rq_idle(struct task_struct *p, int sd_flag, int sync) { return task_cpu(p); /* IDLE tasks as never migrated */ } diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 438380810ac4..97c53f3f51a7 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -938,11 +938,11 @@ static void yield_task_rt(struct rq *rq) #ifdef CONFIG_SMP static int find_lowest_rq(struct task_struct *task); -static int select_task_rq_rt(struct task_struct *p, int flag, int sync) +static int select_task_rq_rt(struct task_struct *p, int sd_flag, int sync) { struct rq *rq = task_rq(p); - if (flag != SD_BALANCE_WAKE) + if (sd_flag != SD_BALANCE_WAKE) return smp_processor_id(); /* -- cgit v1.2.2