aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-12-17 11:12:46 -0500
committerIngo Molnar <mingo@elte.hu>2010-01-21 07:40:09 -0500
commit3d45fd804a95055ecab5b3eed81f5ab2dbb047a2 (patch)
tree3ac7d2dbe75c4e406864b7d8c895f9318f308575 /kernel/sched_fair.c
parent1e3c88bdeb1260edc341e45c9fb8efd182a5c511 (diff)
sched: Remove the sched_class load_balance methods
Take out the sched_class methods for load-balancing. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c66
1 files changed, 37 insertions, 29 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 5116b81d7727..faf9a2f099ab 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1851,6 +1851,24 @@ static struct task_struct *load_balance_next_fair(void *arg)
1851 return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator); 1851 return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
1852} 1852}
1853 1853
1854/*
1855 * runqueue iterator, to support SMP load-balancing between different
1856 * scheduling classes, without having to expose their internal data
1857 * structures to the load-balancing proper:
1858 */
1859struct rq_iterator {
1860 void *arg;
1861 struct task_struct *(*start)(void *);
1862 struct task_struct *(*next)(void *);
1863};
1864
1865static unsigned long
1866balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
1867 unsigned long max_load_move, struct sched_domain *sd,
1868 enum cpu_idle_type idle, int *all_pinned,
1869 int *this_best_prio, struct rq_iterator *iterator);
1870
1871
1854static unsigned long 1872static unsigned long
1855__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 1873__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1856 unsigned long max_load_move, struct sched_domain *sd, 1874 unsigned long max_load_move, struct sched_domain *sd,
@@ -1929,8 +1947,20 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1929#endif 1947#endif
1930 1948
1931static int 1949static int
1932move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 1950iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
1933 struct sched_domain *sd, enum cpu_idle_type idle) 1951 struct sched_domain *sd, enum cpu_idle_type idle,
1952 struct rq_iterator *iterator);
1953
1954/*
1955 * move_one_task tries to move exactly one task from busiest to this_rq, as
1956 * part of active balancing operations within "domain".
1957 * Returns 1 if successful and 0 otherwise.
1958 *
1959 * Called with both runqueues locked.
1960 */
1961static int
1962move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
1963 struct sched_domain *sd, enum cpu_idle_type idle)
1934{ 1964{
1935 struct cfs_rq *busy_cfs_rq; 1965 struct cfs_rq *busy_cfs_rq;
1936 struct rq_iterator cfs_rq_iterator; 1966 struct rq_iterator cfs_rq_iterator;
@@ -2094,16 +2124,15 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2094 struct sched_domain *sd, enum cpu_idle_type idle, 2124 struct sched_domain *sd, enum cpu_idle_type idle,
2095 int *all_pinned) 2125 int *all_pinned)
2096{ 2126{
2097 const struct sched_class *class = sched_class_highest; 2127 unsigned long total_load_moved = 0, load_moved;
2098 unsigned long total_load_moved = 0;
2099 int this_best_prio = this_rq->curr->prio; 2128 int this_best_prio = this_rq->curr->prio;
2100 2129
2101 do { 2130 do {
2102 total_load_moved += 2131 load_moved = load_balance_fair(this_rq, this_cpu, busiest,
2103 class->load_balance(this_rq, this_cpu, busiest,
2104 max_load_move - total_load_moved, 2132 max_load_move - total_load_moved,
2105 sd, idle, all_pinned, &this_best_prio); 2133 sd, idle, all_pinned, &this_best_prio);
2106 class = class->next; 2134
2135 total_load_moved += load_moved;
2107 2136
2108#ifdef CONFIG_PREEMPT 2137#ifdef CONFIG_PREEMPT
2109 /* 2138 /*
@@ -2114,7 +2143,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2114 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) 2143 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
2115 break; 2144 break;
2116#endif 2145#endif
2117 } while (class && max_load_move > total_load_moved); 2146 } while (load_moved && max_load_move > total_load_moved);
2118 2147
2119 return total_load_moved > 0; 2148 return total_load_moved > 0;
2120} 2149}
@@ -2145,25 +2174,6 @@ iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
2145 return 0; 2174 return 0;
2146} 2175}
2147 2176
2148/*
2149 * move_one_task tries to move exactly one task from busiest to this_rq, as
2150 * part of active balancing operations within "domain".
2151 * Returns 1 if successful and 0 otherwise.
2152 *
2153 * Called with both runqueues locked.
2154 */
2155static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
2156 struct sched_domain *sd, enum cpu_idle_type idle)
2157{
2158 const struct sched_class *class;
2159
2160 for_each_class(class) {
2161 if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
2162 return 1;
2163 }
2164
2165 return 0;
2166}
2167/********** Helpers for find_busiest_group ************************/ 2177/********** Helpers for find_busiest_group ************************/
2168/* 2178/*
2169 * sd_lb_stats - Structure to store the statistics of a sched_domain 2179 * sd_lb_stats - Structure to store the statistics of a sched_domain
@@ -3873,8 +3883,6 @@ static const struct sched_class fair_sched_class = {
3873#ifdef CONFIG_SMP 3883#ifdef CONFIG_SMP
3874 .select_task_rq = select_task_rq_fair, 3884 .select_task_rq = select_task_rq_fair,
3875 3885
3876 .load_balance = load_balance_fair,
3877 .move_one_task = move_one_task_fair,
3878 .rq_online = rq_online_fair, 3886 .rq_online = rq_online_fair,
3879 .rq_offline = rq_offline_fair, 3887 .rq_offline = rq_offline_fair,
3880 3888