diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-12-17 11:47:12 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-01-21 07:40:12 -0500 |
commit | 230059de77a4e0f6afba98073e73bc9fd471506e (patch) | |
tree | 3b76b4e7797f4b66878f42bc45dec6610a14a1f8 /kernel | |
parent | 897c395f4c94ae19302f92393a0b8304e414ee06 (diff) |
sched: Remove from fwd decls
Move code around to get rid of fwd declarations.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_fair.c | 127 |
1 files changed, 60 insertions, 67 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index e48e459da98d..93fccbadde23 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1814,73 +1814,6 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) | |||
1814 | * Fair scheduling class load-balancing methods: | 1814 | * Fair scheduling class load-balancing methods: |
1815 | */ | 1815 | */ |
1816 | 1816 | ||
1817 | static unsigned long | ||
1818 | balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
1819 | unsigned long max_load_move, struct sched_domain *sd, | ||
1820 | enum cpu_idle_type idle, int *all_pinned, | ||
1821 | int *this_best_prio, struct cfs_rq *busiest_cfs_rq); | ||
1822 | |||
1823 | |||
1824 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
1825 | static unsigned long | ||
1826 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
1827 | unsigned long max_load_move, | ||
1828 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
1829 | int *all_pinned, int *this_best_prio) | ||
1830 | { | ||
1831 | long rem_load_move = max_load_move; | ||
1832 | int busiest_cpu = cpu_of(busiest); | ||
1833 | struct task_group *tg; | ||
1834 | |||
1835 | rcu_read_lock(); | ||
1836 | update_h_load(busiest_cpu); | ||
1837 | |||
1838 | list_for_each_entry_rcu(tg, &task_groups, list) { | ||
1839 | struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu]; | ||
1840 | unsigned long busiest_h_load = busiest_cfs_rq->h_load; | ||
1841 | unsigned long busiest_weight = busiest_cfs_rq->load.weight; | ||
1842 | u64 rem_load, moved_load; | ||
1843 | |||
1844 | /* | ||
1845 | * empty group | ||
1846 | */ | ||
1847 | if (!busiest_cfs_rq->task_weight) | ||
1848 | continue; | ||
1849 | |||
1850 | rem_load = (u64)rem_load_move * busiest_weight; | ||
1851 | rem_load = div_u64(rem_load, busiest_h_load + 1); | ||
1852 | |||
1853 | moved_load = balance_tasks(this_rq, this_cpu, busiest, | ||
1854 | rem_load, sd, idle, all_pinned, this_best_prio, | ||
1855 | busiest_cfs_rq); | ||
1856 | |||
1857 | if (!moved_load) | ||
1858 | continue; | ||
1859 | |||
1860 | moved_load *= busiest_h_load; | ||
1861 | moved_load = div_u64(moved_load, busiest_weight + 1); | ||
1862 | |||
1863 | rem_load_move -= moved_load; | ||
1864 | if (rem_load_move < 0) | ||
1865 | break; | ||
1866 | } | ||
1867 | rcu_read_unlock(); | ||
1868 | |||
1869 | return max_load_move - rem_load_move; | ||
1870 | } | ||
1871 | #else | ||
1872 | static unsigned long | ||
1873 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
1874 | unsigned long max_load_move, | ||
1875 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
1876 | int *all_pinned, int *this_best_prio) | ||
1877 | { | ||
1878 | return balance_tasks(this_rq, this_cpu, busiest, | ||
1879 | max_load_move, sd, idle, all_pinned, | ||
1880 | this_best_prio, &busiest->cfs); | ||
1881 | } | ||
1882 | #endif | ||
1883 | |||
1884 | /* | 1817 | /* |
1885 | * pull_task - move a task from a remote runqueue to the local runqueue. | 1818 | * pull_task - move a task from a remote runqueue to the local runqueue. |
1886 | * Both runqueues must be locked. | 1819 | * Both runqueues must be locked. |
@@ -2042,6 +1975,66 @@ out: | |||
2042 | return max_load_move - rem_load_move; | 1975 | return max_load_move - rem_load_move; |
2043 | } | 1976 | } |
2044 | 1977 | ||
1978 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
1979 | static unsigned long | ||
1980 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
1981 | unsigned long max_load_move, | ||
1982 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
1983 | int *all_pinned, int *this_best_prio) | ||
1984 | { | ||
1985 | long rem_load_move = max_load_move; | ||
1986 | int busiest_cpu = cpu_of(busiest); | ||
1987 | struct task_group *tg; | ||
1988 | |||
1989 | rcu_read_lock(); | ||
1990 | update_h_load(busiest_cpu); | ||
1991 | |||
1992 | list_for_each_entry_rcu(tg, &task_groups, list) { | ||
1993 | struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu]; | ||
1994 | unsigned long busiest_h_load = busiest_cfs_rq->h_load; | ||
1995 | unsigned long busiest_weight = busiest_cfs_rq->load.weight; | ||
1996 | u64 rem_load, moved_load; | ||
1997 | |||
1998 | /* | ||
1999 | * empty group | ||
2000 | */ | ||
2001 | if (!busiest_cfs_rq->task_weight) | ||
2002 | continue; | ||
2003 | |||
2004 | rem_load = (u64)rem_load_move * busiest_weight; | ||
2005 | rem_load = div_u64(rem_load, busiest_h_load + 1); | ||
2006 | |||
2007 | moved_load = balance_tasks(this_rq, this_cpu, busiest, | ||
2008 | rem_load, sd, idle, all_pinned, this_best_prio, | ||
2009 | busiest_cfs_rq); | ||
2010 | |||
2011 | if (!moved_load) | ||
2012 | continue; | ||
2013 | |||
2014 | moved_load *= busiest_h_load; | ||
2015 | moved_load = div_u64(moved_load, busiest_weight + 1); | ||
2016 | |||
2017 | rem_load_move -= moved_load; | ||
2018 | if (rem_load_move < 0) | ||
2019 | break; | ||
2020 | } | ||
2021 | rcu_read_unlock(); | ||
2022 | |||
2023 | return max_load_move - rem_load_move; | ||
2024 | } | ||
2025 | #else | ||
2026 | static unsigned long | ||
2027 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
2028 | unsigned long max_load_move, | ||
2029 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
2030 | int *all_pinned, int *this_best_prio) | ||
2031 | { | ||
2032 | return balance_tasks(this_rq, this_cpu, busiest, | ||
2033 | max_load_move, sd, idle, all_pinned, | ||
2034 | this_best_prio, &busiest->cfs); | ||
2035 | } | ||
2036 | #endif | ||
2037 | |||
2045 | /* | 2038 | /* |
2046 | * move_tasks tries to move up to max_load_move weighted load from busiest to | 2039 | * move_tasks tries to move up to max_load_move weighted load from busiest to |
2047 | * this_rq, as part of a balancing operation within domain "sd". | 2040 | * this_rq, as part of a balancing operation within domain "sd". |