aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-12-17 11:12:46 -0500
committerIngo Molnar <mingo@elte.hu>2010-01-21 07:40:09 -0500
commit3d45fd804a95055ecab5b3eed81f5ab2dbb047a2 (patch)
tree3ac7d2dbe75c4e406864b7d8c895f9318f308575
parent1e3c88bdeb1260edc341e45c9fb8efd182a5c511 (diff)
sched: Remove the sched_class load_balance methods
Take out the sched_class methods for load-balancing. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/sched.h8
-rw-r--r--kernel/sched.c26
-rw-r--r--kernel/sched_fair.c66
-rw-r--r--kernel/sched_idletask.c21
-rw-r--r--kernel/sched_rt.c20
5 files changed, 37 insertions, 104 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f2f842db03ce..50d685cde70e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1087,14 +1087,6 @@ struct sched_class {
1087#ifdef CONFIG_SMP 1087#ifdef CONFIG_SMP
1088 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); 1088 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1089 1089
1090 unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
1091 struct rq *busiest, unsigned long max_load_move,
1092 struct sched_domain *sd, enum cpu_idle_type idle,
1093 int *all_pinned, int *this_best_prio);
1094
1095 int (*move_one_task) (struct rq *this_rq, int this_cpu,
1096 struct rq *busiest, struct sched_domain *sd,
1097 enum cpu_idle_type idle);
1098 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1090 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1099 void (*post_schedule) (struct rq *this_rq); 1091 void (*post_schedule) (struct rq *this_rq);
1100 void (*task_waking) (struct rq *this_rq, struct task_struct *task); 1092 void (*task_waking) (struct rq *this_rq, struct task_struct *task);
diff --git a/kernel/sched.c b/kernel/sched.c
index 13a2acf18b2d..c0be07932a8d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1390,32 +1390,6 @@ static const u32 prio_to_wmult[40] = {
1390 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 1390 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
1391}; 1391};
1392 1392
1393static void activate_task(struct rq *rq, struct task_struct *p, int wakeup);
1394
1395/*
1396 * runqueue iterator, to support SMP load-balancing between different
1397 * scheduling classes, without having to expose their internal data
1398 * structures to the load-balancing proper:
1399 */
1400struct rq_iterator {
1401 void *arg;
1402 struct task_struct *(*start)(void *);
1403 struct task_struct *(*next)(void *);
1404};
1405
1406#ifdef CONFIG_SMP
1407static unsigned long
1408balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
1409 unsigned long max_load_move, struct sched_domain *sd,
1410 enum cpu_idle_type idle, int *all_pinned,
1411 int *this_best_prio, struct rq_iterator *iterator);
1412
1413static int
1414iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
1415 struct sched_domain *sd, enum cpu_idle_type idle,
1416 struct rq_iterator *iterator);
1417#endif
1418
1419/* Time spent by the tasks of the cpu accounting group executing in ... */ 1393/* Time spent by the tasks of the cpu accounting group executing in ... */
1420enum cpuacct_stat_index { 1394enum cpuacct_stat_index {
1421 CPUACCT_STAT_USER, /* ... user mode */ 1395 CPUACCT_STAT_USER, /* ... user mode */
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 5116b81d7727..faf9a2f099ab 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1851,6 +1851,24 @@ static struct task_struct *load_balance_next_fair(void *arg)
1851 return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator); 1851 return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
1852} 1852}
1853 1853
1854/*
1855 * runqueue iterator, to support SMP load-balancing between different
1856 * scheduling classes, without having to expose their internal data
1857 * structures to the load-balancing proper:
1858 */
1859struct rq_iterator {
1860 void *arg;
1861 struct task_struct *(*start)(void *);
1862 struct task_struct *(*next)(void *);
1863};
1864
1865static unsigned long
1866balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
1867 unsigned long max_load_move, struct sched_domain *sd,
1868 enum cpu_idle_type idle, int *all_pinned,
1869 int *this_best_prio, struct rq_iterator *iterator);
1870
1871
1854static unsigned long 1872static unsigned long
1855__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 1873__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1856 unsigned long max_load_move, struct sched_domain *sd, 1874 unsigned long max_load_move, struct sched_domain *sd,
@@ -1929,8 +1947,20 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1929#endif 1947#endif
1930 1948
1931static int 1949static int
1932move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 1950iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
1933 struct sched_domain *sd, enum cpu_idle_type idle) 1951 struct sched_domain *sd, enum cpu_idle_type idle,
1952 struct rq_iterator *iterator);
1953
1954/*
1955 * move_one_task tries to move exactly one task from busiest to this_rq, as
1956 * part of active balancing operations within "domain".
1957 * Returns 1 if successful and 0 otherwise.
1958 *
1959 * Called with both runqueues locked.
1960 */
1961static int
1962move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
1963 struct sched_domain *sd, enum cpu_idle_type idle)
1934{ 1964{
1935 struct cfs_rq *busy_cfs_rq; 1965 struct cfs_rq *busy_cfs_rq;
1936 struct rq_iterator cfs_rq_iterator; 1966 struct rq_iterator cfs_rq_iterator;
@@ -2094,16 +2124,15 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2094 struct sched_domain *sd, enum cpu_idle_type idle, 2124 struct sched_domain *sd, enum cpu_idle_type idle,
2095 int *all_pinned) 2125 int *all_pinned)
2096{ 2126{
2097 const struct sched_class *class = sched_class_highest; 2127 unsigned long total_load_moved = 0, load_moved;
2098 unsigned long total_load_moved = 0;
2099 int this_best_prio = this_rq->curr->prio; 2128 int this_best_prio = this_rq->curr->prio;
2100 2129
2101 do { 2130 do {
2102 total_load_moved += 2131 load_moved = load_balance_fair(this_rq, this_cpu, busiest,
2103 class->load_balance(this_rq, this_cpu, busiest,
2104 max_load_move - total_load_moved, 2132 max_load_move - total_load_moved,
2105 sd, idle, all_pinned, &this_best_prio); 2133 sd, idle, all_pinned, &this_best_prio);
2106 class = class->next; 2134
2135 total_load_moved += load_moved;
2107 2136
2108#ifdef CONFIG_PREEMPT 2137#ifdef CONFIG_PREEMPT
2109 /* 2138 /*
@@ -2114,7 +2143,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2114 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) 2143 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
2115 break; 2144 break;
2116#endif 2145#endif
2117 } while (class && max_load_move > total_load_moved); 2146 } while (load_moved && max_load_move > total_load_moved);
2118 2147
2119 return total_load_moved > 0; 2148 return total_load_moved > 0;
2120} 2149}
@@ -2145,25 +2174,6 @@ iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
2145 return 0; 2174 return 0;
2146} 2175}
2147 2176
2148/*
2149 * move_one_task tries to move exactly one task from busiest to this_rq, as
2150 * part of active balancing operations within "domain".
2151 * Returns 1 if successful and 0 otherwise.
2152 *
2153 * Called with both runqueues locked.
2154 */
2155static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
2156 struct sched_domain *sd, enum cpu_idle_type idle)
2157{
2158 const struct sched_class *class;
2159
2160 for_each_class(class) {
2161 if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
2162 return 1;
2163 }
2164
2165 return 0;
2166}
2167/********** Helpers for find_busiest_group ************************/ 2177/********** Helpers for find_busiest_group ************************/
2168/* 2178/*
2169 * sd_lb_stats - Structure to store the statistics of a sched_domain 2179 * sd_lb_stats - Structure to store the statistics of a sched_domain
@@ -3873,8 +3883,6 @@ static const struct sched_class fair_sched_class = {
3873#ifdef CONFIG_SMP 3883#ifdef CONFIG_SMP
3874 .select_task_rq = select_task_rq_fair, 3884 .select_task_rq = select_task_rq_fair,
3875 3885
3876 .load_balance = load_balance_fair,
3877 .move_one_task = move_one_task_fair,
3878 .rq_online = rq_online_fair, 3886 .rq_online = rq_online_fair,
3879 .rq_offline = rq_offline_fair, 3887 .rq_offline = rq_offline_fair,
3880 3888
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 01332bfc61a7..a8a6d8a50947 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -44,24 +44,6 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
44{ 44{
45} 45}
46 46
47#ifdef CONFIG_SMP
48static unsigned long
49load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
50 unsigned long max_load_move,
51 struct sched_domain *sd, enum cpu_idle_type idle,
52 int *all_pinned, int *this_best_prio)
53{
54 return 0;
55}
56
57static int
58move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
59 struct sched_domain *sd, enum cpu_idle_type idle)
60{
61 return 0;
62}
63#endif
64
65static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) 47static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
66{ 48{
67} 49}
@@ -119,9 +101,6 @@ static const struct sched_class idle_sched_class = {
119 101
120#ifdef CONFIG_SMP 102#ifdef CONFIG_SMP
121 .select_task_rq = select_task_rq_idle, 103 .select_task_rq = select_task_rq_idle,
122
123 .load_balance = load_balance_idle,
124 .move_one_task = move_one_task_idle,
125#endif 104#endif
126 105
127 .set_curr_task = set_curr_task_idle, 106 .set_curr_task = set_curr_task_idle,
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 072b3fcee8d8..502bb614e40a 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1481,24 +1481,6 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
1481 push_rt_tasks(rq); 1481 push_rt_tasks(rq);
1482} 1482}
1483 1483
1484static unsigned long
1485load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1486 unsigned long max_load_move,
1487 struct sched_domain *sd, enum cpu_idle_type idle,
1488 int *all_pinned, int *this_best_prio)
1489{
1490 /* don't touch RT tasks */
1491 return 0;
1492}
1493
1494static int
1495move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1496 struct sched_domain *sd, enum cpu_idle_type idle)
1497{
1498 /* don't touch RT tasks */
1499 return 0;
1500}
1501
1502static void set_cpus_allowed_rt(struct task_struct *p, 1484static void set_cpus_allowed_rt(struct task_struct *p,
1503 const struct cpumask *new_mask) 1485 const struct cpumask *new_mask)
1504{ 1486{
@@ -1746,8 +1728,6 @@ static const struct sched_class rt_sched_class = {
1746#ifdef CONFIG_SMP 1728#ifdef CONFIG_SMP
1747 .select_task_rq = select_task_rq_rt, 1729 .select_task_rq = select_task_rq_rt,
1748 1730
1749 .load_balance = load_balance_rt,
1750 .move_one_task = move_one_task_rt,
1751 .set_cpus_allowed = set_cpus_allowed_rt, 1731 .set_cpus_allowed = set_cpus_allowed_rt,
1752 .rq_online = rq_online_rt, 1732 .rq_online = rq_online_rt,
1753 .rq_offline = rq_offline_rt, 1733 .rq_offline = rq_offline_rt,