diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-09-10 07:42:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-15 10:01:04 -0400 |
commit | 5f3edc1b1ead6d9bd45a85c551f44eff8fe76b9f (patch) | |
tree | 46f987010ca017be945831b76d8ea846f1ba8fc9 | |
parent | aaee1203ca52b9db799433c33c9bffc33cdf8909 (diff) |
sched: Hook sched_balance_self() into sched_class::select_task_rq()
Rather ugly patch to fully place the sched_balance_self() code
inside the fair class.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/sched.h | 3 | ||||
-rw-r--r-- | kernel/sched.c | 14 | ||||
-rw-r--r-- | kernel/sched_fair.c | 7 | ||||
-rw-r--r-- | kernel/sched_idletask.c | 2 | ||||
-rw-r--r-- | kernel/sched_rt.c | 5 |
5 files changed, 20 insertions, 11 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index f3d74bd04d18..5d3c9900943e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -811,6 +811,7 @@ enum cpu_idle_type { | |||
811 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ | 811 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ |
812 | #define SD_WAKE_IDLE_FAR 0x0800 /* Gain latency sacrificing cache hit */ | 812 | #define SD_WAKE_IDLE_FAR 0x0800 /* Gain latency sacrificing cache hit */ |
813 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ | 813 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ |
814 | #define SD_BALANCE_WAKE 0x2000 /* Balance on wakeup */ | ||
814 | 815 | ||
815 | enum powersavings_balance_level { | 816 | enum powersavings_balance_level { |
816 | POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ | 817 | POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ |
@@ -1032,7 +1033,7 @@ struct sched_class { | |||
1032 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | 1033 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
1033 | 1034 | ||
1034 | #ifdef CONFIG_SMP | 1035 | #ifdef CONFIG_SMP |
1035 | int (*select_task_rq)(struct task_struct *p, int sync); | 1036 | int (*select_task_rq)(struct task_struct *p, int flag, int sync); |
1036 | 1037 | ||
1037 | unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, | 1038 | unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, |
1038 | struct rq *busiest, unsigned long max_load_move, | 1039 | struct rq *busiest, unsigned long max_load_move, |
diff --git a/kernel/sched.c b/kernel/sched.c index 60400a22401f..32b7a81230c2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2350,7 +2350,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2350 | if (unlikely(task_running(rq, p))) | 2350 | if (unlikely(task_running(rq, p))) |
2351 | goto out_activate; | 2351 | goto out_activate; |
2352 | 2352 | ||
2353 | cpu = p->sched_class->select_task_rq(p, sync); | 2353 | cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, sync); |
2354 | if (cpu != orig_cpu) { | 2354 | if (cpu != orig_cpu) { |
2355 | set_task_cpu(p, cpu); | 2355 | set_task_cpu(p, cpu); |
2356 | task_rq_unlock(rq, &flags); | 2356 | task_rq_unlock(rq, &flags); |
@@ -2525,11 +2525,6 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2525 | 2525 | ||
2526 | __sched_fork(p); | 2526 | __sched_fork(p); |
2527 | 2527 | ||
2528 | #ifdef CONFIG_SMP | ||
2529 | cpu = sched_balance_self(cpu, SD_BALANCE_FORK); | ||
2530 | #endif | ||
2531 | set_task_cpu(p, cpu); | ||
2532 | |||
2533 | /* | 2528 | /* |
2534 | * Make sure we do not leak PI boosting priority to the child. | 2529 | * Make sure we do not leak PI boosting priority to the child. |
2535 | */ | 2530 | */ |
@@ -2560,6 +2555,11 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2560 | if (!rt_prio(p->prio)) | 2555 | if (!rt_prio(p->prio)) |
2561 | p->sched_class = &fair_sched_class; | 2556 | p->sched_class = &fair_sched_class; |
2562 | 2557 | ||
2558 | #ifdef CONFIG_SMP | ||
2559 | cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0); | ||
2560 | #endif | ||
2561 | set_task_cpu(p, cpu); | ||
2562 | |||
2563 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 2563 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
2564 | if (likely(sched_info_on())) | 2564 | if (likely(sched_info_on())) |
2565 | memset(&p->sched_info, 0, sizeof(p->sched_info)); | 2565 | memset(&p->sched_info, 0, sizeof(p->sched_info)); |
@@ -3114,7 +3114,7 @@ out: | |||
3114 | void sched_exec(void) | 3114 | void sched_exec(void) |
3115 | { | 3115 | { |
3116 | int new_cpu, this_cpu = get_cpu(); | 3116 | int new_cpu, this_cpu = get_cpu(); |
3117 | new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC); | 3117 | new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0); |
3118 | put_cpu(); | 3118 | put_cpu(); |
3119 | if (new_cpu != this_cpu) | 3119 | if (new_cpu != this_cpu) |
3120 | sched_migrate_task(current, new_cpu); | 3120 | sched_migrate_task(current, new_cpu); |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index a82d71d3afed..f2eb5b934715 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1300,7 +1300,9 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq, | |||
1300 | return 0; | 1300 | return 0; |
1301 | } | 1301 | } |
1302 | 1302 | ||
1303 | static int select_task_rq_fair(struct task_struct *p, int sync) | 1303 | static int sched_balance_self(int cpu, int flag); |
1304 | |||
1305 | static int select_task_rq_fair(struct task_struct *p, int flag, int sync) | ||
1304 | { | 1306 | { |
1305 | struct sched_domain *sd, *this_sd = NULL; | 1307 | struct sched_domain *sd, *this_sd = NULL; |
1306 | int prev_cpu, this_cpu, new_cpu; | 1308 | int prev_cpu, this_cpu, new_cpu; |
@@ -1314,6 +1316,9 @@ static int select_task_rq_fair(struct task_struct *p, int sync) | |||
1314 | this_rq = cpu_rq(this_cpu); | 1316 | this_rq = cpu_rq(this_cpu); |
1315 | new_cpu = prev_cpu; | 1317 | new_cpu = prev_cpu; |
1316 | 1318 | ||
1319 | if (flag != SD_BALANCE_WAKE) | ||
1320 | return sched_balance_self(this_cpu, flag); | ||
1321 | |||
1317 | /* | 1322 | /* |
1318 | * 'this_sd' is the first domain that both | 1323 | * 'this_sd' is the first domain that both |
1319 | * this_cpu and prev_cpu are present in: | 1324 | * this_cpu and prev_cpu are present in: |
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 499672c10cbd..99b2f0337609 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c | |||
@@ -6,7 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifdef CONFIG_SMP | 8 | #ifdef CONFIG_SMP |
9 | static int select_task_rq_idle(struct task_struct *p, int sync) | 9 | static int select_task_rq_idle(struct task_struct *p, int flag, int sync) |
10 | { | 10 | { |
11 | return task_cpu(p); /* IDLE tasks as never migrated */ | 11 | return task_cpu(p); /* IDLE tasks as never migrated */ |
12 | } | 12 | } |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 2eb4bd6a526c..438380810ac4 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -938,10 +938,13 @@ static void yield_task_rt(struct rq *rq) | |||
938 | #ifdef CONFIG_SMP | 938 | #ifdef CONFIG_SMP |
939 | static int find_lowest_rq(struct task_struct *task); | 939 | static int find_lowest_rq(struct task_struct *task); |
940 | 940 | ||
941 | static int select_task_rq_rt(struct task_struct *p, int sync) | 941 | static int select_task_rq_rt(struct task_struct *p, int flag, int sync) |
942 | { | 942 | { |
943 | struct rq *rq = task_rq(p); | 943 | struct rq *rq = task_rq(p); |
944 | 944 | ||
945 | if (flag != SD_BALANCE_WAKE) | ||
946 | return smp_processor_id(); | ||
947 | |||
945 | /* | 948 | /* |
946 | * If the current task is an RT task, then | 949 | * If the current task is an RT task, then |
947 | * try to see if we can wake this RT task up on another | 950 | * try to see if we can wake this RT task up on another |