aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Williams <pwil3058@bigpond.net.au>2007-10-24 12:23:51 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-24 12:23:51 -0400
commit681f3e68541d6f03e3e05d21fe15093578b8b539 (patch)
tree82e13a7b15a2bb15233ba964dd1eb352b007cae4 /kernel
parente1d1484f72127a5580d37c379f6a5b2c2786434c (diff)
sched: isolate SMP balancing code a bit more
At the moment, a lot of load balancing code that is irrelevant to non SMP systems gets included during non SMP builds. This patch addresses this issue and reduces the binary size on non SMP systems: text data bss dec hex filename 10983 28 1192 12203 2fab sched.o.before 10739 28 1192 11959 2eb7 sched.o.after Signed-off-by: Peter Williams <pwil3058@bigpond.net.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c17
-rw-r--r--kernel/sched_fair.c4
-rw-r--r--kernel/sched_idletask.c4
-rw-r--r--kernel/sched_rt.c4
4 files changed, 12 insertions, 17 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8607795fad69..b4fbbc440453 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -849,23 +849,6 @@ static int
849iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, 849iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
850 struct sched_domain *sd, enum cpu_idle_type idle, 850 struct sched_domain *sd, enum cpu_idle_type idle,
851 struct rq_iterator *iterator); 851 struct rq_iterator *iterator);
852#else
853static inline unsigned long
854balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
855 unsigned long max_load_move, struct sched_domain *sd,
856 enum cpu_idle_type idle, int *all_pinned,
857 int *this_best_prio, struct rq_iterator *iterator)
858{
859 return 0;
860}
861
862static inline int
863iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
864 struct sched_domain *sd, enum cpu_idle_type idle,
865 struct rq_iterator *iterator)
866{
867 return 0;
868}
869#endif 852#endif
870 853
871#include "sched_stats.h" 854#include "sched_stats.h"
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index a90d0457d603..9971831b560e 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -876,6 +876,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
876 } 876 }
877} 877}
878 878
879#ifdef CONFIG_SMP
879/************************************************** 880/**************************************************
880 * Fair scheduling class load-balancing methods: 881 * Fair scheduling class load-balancing methods:
881 */ 882 */
@@ -1008,6 +1009,7 @@ move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1008 1009
1009 return 0; 1010 return 0;
1010} 1011}
1012#endif
1011 1013
1012/* 1014/*
1013 * scheduler tick hitting a task of our scheduling class: 1015 * scheduler tick hitting a task of our scheduling class:
@@ -1084,8 +1086,10 @@ static const struct sched_class fair_sched_class = {
1084 .pick_next_task = pick_next_task_fair, 1086 .pick_next_task = pick_next_task_fair,
1085 .put_prev_task = put_prev_task_fair, 1087 .put_prev_task = put_prev_task_fair,
1086 1088
1089#ifdef CONFIG_SMP
1087 .load_balance = load_balance_fair, 1090 .load_balance = load_balance_fair,
1088 .move_one_task = move_one_task_fair, 1091 .move_one_task = move_one_task_fair,
1092#endif
1089 1093
1090 .set_curr_task = set_curr_task_fair, 1094 .set_curr_task = set_curr_task_fair,
1091 .task_tick = task_tick_fair, 1095 .task_tick = task_tick_fair,
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 586b06ca30aa..bf9c25c15b8b 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -37,6 +37,7 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
37{ 37{
38} 38}
39 39
40#ifdef CONFIG_SMP
40static unsigned long 41static unsigned long
41load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, 42load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
42 unsigned long max_load_move, 43 unsigned long max_load_move,
@@ -52,6 +53,7 @@ move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
52{ 53{
53 return 0; 54 return 0;
54} 55}
56#endif
55 57
56static void task_tick_idle(struct rq *rq, struct task_struct *curr) 58static void task_tick_idle(struct rq *rq, struct task_struct *curr)
57{ 59{
@@ -76,8 +78,10 @@ const struct sched_class idle_sched_class = {
76 .pick_next_task = pick_next_task_idle, 78 .pick_next_task = pick_next_task_idle,
77 .put_prev_task = put_prev_task_idle, 79 .put_prev_task = put_prev_task_idle,
78 80
81#ifdef CONFIG_SMP
79 .load_balance = load_balance_idle, 82 .load_balance = load_balance_idle,
80 .move_one_task = move_one_task_idle, 83 .move_one_task = move_one_task_idle,
84#endif
81 85
82 .set_curr_task = set_curr_task_idle, 86 .set_curr_task = set_curr_task_idle,
83 .task_tick = task_tick_idle, 87 .task_tick = task_tick_idle,
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index e9395b7119e6..8abd752a0ebd 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -98,6 +98,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
98 p->se.exec_start = 0; 98 p->se.exec_start = 0;
99} 99}
100 100
101#ifdef CONFIG_SMP
101/* 102/*
102 * Load-balancing iterator. Note: while the runqueue stays locked 103 * Load-balancing iterator. Note: while the runqueue stays locked
103 * during the whole iteration, the current task might be 104 * during the whole iteration, the current task might be
@@ -202,6 +203,7 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
202 return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, 203 return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
203 &rt_rq_iterator); 204 &rt_rq_iterator);
204} 205}
206#endif
205 207
206static void task_tick_rt(struct rq *rq, struct task_struct *p) 208static void task_tick_rt(struct rq *rq, struct task_struct *p)
207{ 209{
@@ -245,8 +247,10 @@ const struct sched_class rt_sched_class = {
245 .pick_next_task = pick_next_task_rt, 247 .pick_next_task = pick_next_task_rt,
246 .put_prev_task = put_prev_task_rt, 248 .put_prev_task = put_prev_task_rt,
247 249
250#ifdef CONFIG_SMP
248 .load_balance = load_balance_rt, 251 .load_balance = load_balance_rt,
249 .move_one_task = move_one_task_rt, 252 .move_one_task = move_one_task_rt,
253#endif
250 254
251 .set_curr_task = set_curr_task_rt, 255 .set_curr_task = set_curr_task_rt,
252 .task_tick = task_tick_rt, 256 .task_tick = task_tick_rt,