aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-12-17 11:25:20 -0500
committerIngo Molnar <mingo@elte.hu>2010-01-21 07:40:10 -0500
commitee00e66ffff250fb0d3a789e5565462f71c7c9a7 (patch)
tree2118b3ef38cec2ad3beb4e958c172307c22ac564 /kernel/sched_fair.c
parent3d45fd804a95055ecab5b3eed81f5ab2dbb047a2 (diff)
sched: Remove rq_iterator usage from load_balance_fair
Since we only ever iterate the fair class, do away with this abstraction. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c80
1 files changed, 29 insertions, 51 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index faf9a2f099ab..709deb33708a 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1866,26 +1866,9 @@ static unsigned long
1866balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, 1866balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
1867 unsigned long max_load_move, struct sched_domain *sd, 1867 unsigned long max_load_move, struct sched_domain *sd,
1868 enum cpu_idle_type idle, int *all_pinned, 1868 enum cpu_idle_type idle, int *all_pinned,
1869 int *this_best_prio, struct rq_iterator *iterator); 1869 int *this_best_prio, struct cfs_rq *busiest_cfs_rq);
1870 1870
1871 1871
1872static unsigned long
1873__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1874 unsigned long max_load_move, struct sched_domain *sd,
1875 enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
1876 struct cfs_rq *cfs_rq)
1877{
1878 struct rq_iterator cfs_rq_iterator;
1879
1880 cfs_rq_iterator.start = load_balance_start_fair;
1881 cfs_rq_iterator.next = load_balance_next_fair;
1882 cfs_rq_iterator.arg = cfs_rq;
1883
1884 return balance_tasks(this_rq, this_cpu, busiest,
1885 max_load_move, sd, idle, all_pinned,
1886 this_best_prio, &cfs_rq_iterator);
1887}
1888
1889#ifdef CONFIG_FAIR_GROUP_SCHED 1872#ifdef CONFIG_FAIR_GROUP_SCHED
1890static unsigned long 1873static unsigned long
1891load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 1874load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
@@ -1915,9 +1898,9 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1915 rem_load = (u64)rem_load_move * busiest_weight; 1898 rem_load = (u64)rem_load_move * busiest_weight;
1916 rem_load = div_u64(rem_load, busiest_h_load + 1); 1899 rem_load = div_u64(rem_load, busiest_h_load + 1);
1917 1900
1918 moved_load = __load_balance_fair(this_rq, this_cpu, busiest, 1901 moved_load = balance_tasks(this_rq, this_cpu, busiest,
1919 rem_load, sd, idle, all_pinned, this_best_prio, 1902 rem_load, sd, idle, all_pinned, this_best_prio,
1920 tg->cfs_rq[busiest_cpu]); 1903 busiest_cfs_rq);
1921 1904
1922 if (!moved_load) 1905 if (!moved_load)
1923 continue; 1906 continue;
@@ -1940,7 +1923,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1940 struct sched_domain *sd, enum cpu_idle_type idle, 1923 struct sched_domain *sd, enum cpu_idle_type idle,
1941 int *all_pinned, int *this_best_prio) 1924 int *all_pinned, int *this_best_prio)
1942{ 1925{
1943 return __load_balance_fair(this_rq, this_cpu, busiest, 1926 return balance_tasks(this_rq, this_cpu, busiest,
1944 max_load_move, sd, idle, all_pinned, 1927 max_load_move, sd, idle, all_pinned,
1945 this_best_prio, &busiest->cfs); 1928 this_best_prio, &busiest->cfs);
1946} 1929}
@@ -2050,53 +2033,48 @@ static unsigned long
2050balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, 2033balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2051 unsigned long max_load_move, struct sched_domain *sd, 2034 unsigned long max_load_move, struct sched_domain *sd,
2052 enum cpu_idle_type idle, int *all_pinned, 2035 enum cpu_idle_type idle, int *all_pinned,
2053 int *this_best_prio, struct rq_iterator *iterator) 2036 int *this_best_prio, struct cfs_rq *busiest_cfs_rq)
2054{ 2037{
2055 int loops = 0, pulled = 0, pinned = 0; 2038 int loops = 0, pulled = 0, pinned = 0;
2056 struct task_struct *p;
2057 long rem_load_move = max_load_move; 2039 long rem_load_move = max_load_move;
2040 struct task_struct *p, *n;
2058 2041
2059 if (max_load_move == 0) 2042 if (max_load_move == 0)
2060 goto out; 2043 goto out;
2061 2044
2062 pinned = 1; 2045 pinned = 1;
2063 2046
2064 /* 2047 list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
2065 * Start the load-balancing iterator: 2048 if (loops++ > sysctl_sched_nr_migrate)
2066 */ 2049 break;
2067 p = iterator->start(iterator->arg);
2068next:
2069 if (!p || loops++ > sysctl_sched_nr_migrate)
2070 goto out;
2071 2050
2072 if ((p->se.load.weight >> 1) > rem_load_move || 2051 if ((p->se.load.weight >> 1) > rem_load_move ||
2073 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) { 2052 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned))
2074 p = iterator->next(iterator->arg); 2053 continue;
2075 goto next;
2076 }
2077 2054
2078 pull_task(busiest, p, this_rq, this_cpu); 2055 pull_task(busiest, p, this_rq, this_cpu);
2079 pulled++; 2056 pulled++;
2080 rem_load_move -= p->se.load.weight; 2057 rem_load_move -= p->se.load.weight;
2081 2058
2082#ifdef CONFIG_PREEMPT 2059#ifdef CONFIG_PREEMPT
2083 /* 2060 /*
2084 * NEWIDLE balancing is a source of latency, so preemptible kernels 2061 * NEWIDLE balancing is a source of latency, so preemptible
2085 * will stop after the first task is pulled to minimize the critical 2062 * kernels will stop after the first task is pulled to minimize
2086 * section. 2063 * the critical section.
2087 */ 2064 */
2088 if (idle == CPU_NEWLY_IDLE) 2065 if (idle == CPU_NEWLY_IDLE)
2089 goto out; 2066 break;
2090#endif 2067#endif
2091 2068
2092 /* 2069 /*
2093 * We only want to steal up to the prescribed amount of weighted load. 2070 * We only want to steal up to the prescribed amount of
2094 */ 2071 * weighted load.
2095 if (rem_load_move > 0) { 2072 */
2073 if (rem_load_move <= 0)
2074 break;
2075
2096 if (p->prio < *this_best_prio) 2076 if (p->prio < *this_best_prio)
2097 *this_best_prio = p->prio; 2077 *this_best_prio = p->prio;
2098 p = iterator->next(iterator->arg);
2099 goto next;
2100 } 2078 }
2101out: 2079out:
2102 /* 2080 /*