diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-12-17 11:45:42 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-01-21 07:40:11 -0500 |
commit | 897c395f4c94ae19302f92393a0b8304e414ee06 (patch) | |
tree | 6126d2e5ab28a78af2fa2ba1e397b326baa35f1a /kernel/sched_fair.c | |
parent | ee00e66ffff250fb0d3a789e5565462f71c7c9a7 (diff) |
sched: Remove rq_iterator from move_one_task
Again, since we only iterate the fair class, remove the abstraction.
Since this is the last user of the rq_iterator, remove all that too.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 146 |
1 files changed, 36 insertions, 110 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 709deb33708a..e48e459da98d 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1814,54 +1814,6 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) | |||
1814 | * Fair scheduling class load-balancing methods: | 1814 | * Fair scheduling class load-balancing methods: |
1815 | */ | 1815 | */ |
1816 | 1816 | ||
1817 | /* | ||
1818 | * Load-balancing iterator. Note: while the runqueue stays locked | ||
1819 | * during the whole iteration, the current task might be | ||
1820 | * dequeued so the iterator has to be dequeue-safe. Here we | ||
1821 | * achieve that by always pre-iterating before returning | ||
1822 | * the current task: | ||
1823 | */ | ||
1824 | static struct task_struct * | ||
1825 | __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next) | ||
1826 | { | ||
1827 | struct task_struct *p = NULL; | ||
1828 | struct sched_entity *se; | ||
1829 | |||
1830 | if (next == &cfs_rq->tasks) | ||
1831 | return NULL; | ||
1832 | |||
1833 | se = list_entry(next, struct sched_entity, group_node); | ||
1834 | p = task_of(se); | ||
1835 | cfs_rq->balance_iterator = next->next; | ||
1836 | |||
1837 | return p; | ||
1838 | } | ||
1839 | |||
1840 | static struct task_struct *load_balance_start_fair(void *arg) | ||
1841 | { | ||
1842 | struct cfs_rq *cfs_rq = arg; | ||
1843 | |||
1844 | return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next); | ||
1845 | } | ||
1846 | |||
1847 | static struct task_struct *load_balance_next_fair(void *arg) | ||
1848 | { | ||
1849 | struct cfs_rq *cfs_rq = arg; | ||
1850 | |||
1851 | return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator); | ||
1852 | } | ||
1853 | |||
1854 | /* | ||
1855 | * runqueue iterator, to support SMP load-balancing between different | ||
1856 | * scheduling classes, without having to expose their internal data | ||
1857 | * structures to the load-balancing proper: | ||
1858 | */ | ||
1859 | struct rq_iterator { | ||
1860 | void *arg; | ||
1861 | struct task_struct *(*start)(void *); | ||
1862 | struct task_struct *(*next)(void *); | ||
1863 | }; | ||
1864 | |||
1865 | static unsigned long | 1817 | static unsigned long |
1866 | balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | 1818 | balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, |
1867 | unsigned long max_load_move, struct sched_domain *sd, | 1819 | unsigned long max_load_move, struct sched_domain *sd, |
@@ -1929,42 +1881,6 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1929 | } | 1881 | } |
1930 | #endif | 1882 | #endif |
1931 | 1883 | ||
1932 | static int | ||
1933 | iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
1934 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
1935 | struct rq_iterator *iterator); | ||
1936 | |||
1937 | /* | ||
1938 | * move_one_task tries to move exactly one task from busiest to this_rq, as | ||
1939 | * part of active balancing operations within "domain". | ||
1940 | * Returns 1 if successful and 0 otherwise. | ||
1941 | * | ||
1942 | * Called with both runqueues locked. | ||
1943 | */ | ||
1944 | static int | ||
1945 | move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
1946 | struct sched_domain *sd, enum cpu_idle_type idle) | ||
1947 | { | ||
1948 | struct cfs_rq *busy_cfs_rq; | ||
1949 | struct rq_iterator cfs_rq_iterator; | ||
1950 | |||
1951 | cfs_rq_iterator.start = load_balance_start_fair; | ||
1952 | cfs_rq_iterator.next = load_balance_next_fair; | ||
1953 | |||
1954 | for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { | ||
1955 | /* | ||
1956 | * pass busy_cfs_rq argument into | ||
1957 | * load_balance_[start|next]_fair iterators | ||
1958 | */ | ||
1959 | cfs_rq_iterator.arg = busy_cfs_rq; | ||
1960 | if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, | ||
1961 | &cfs_rq_iterator)) | ||
1962 | return 1; | ||
1963 | } | ||
1964 | |||
1965 | return 0; | ||
1966 | } | ||
1967 | |||
1968 | /* | 1884 | /* |
1969 | * pull_task - move a task from a remote runqueue to the local runqueue. | 1885 | * pull_task - move a task from a remote runqueue to the local runqueue. |
1970 | * Both runqueues must be locked. | 1886 | * Both runqueues must be locked. |
@@ -2029,6 +1945,42 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | |||
2029 | return 1; | 1945 | return 1; |
2030 | } | 1946 | } |
2031 | 1947 | ||
1948 | /* | ||
1949 | * move_one_task tries to move exactly one task from busiest to this_rq, as | ||
1950 | * part of active balancing operations within "domain". | ||
1951 | * Returns 1 if successful and 0 otherwise. | ||
1952 | * | ||
1953 | * Called with both runqueues locked. | ||
1954 | */ | ||
1955 | static int | ||
1956 | move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
1957 | struct sched_domain *sd, enum cpu_idle_type idle) | ||
1958 | { | ||
1959 | struct task_struct *p, *n; | ||
1960 | struct cfs_rq *cfs_rq; | ||
1961 | int pinned = 0; | ||
1962 | |||
1963 | for_each_leaf_cfs_rq(busiest, cfs_rq) { | ||
1964 | list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) { | ||
1965 | |||
1966 | if (!can_migrate_task(p, busiest, this_cpu, | ||
1967 | sd, idle, &pinned)) | ||
1968 | continue; | ||
1969 | |||
1970 | pull_task(busiest, p, this_rq, this_cpu); | ||
1971 | /* | ||
1972 | * Right now, this is only the second place pull_task() | ||
1973 | * is called, so we can safely collect pull_task() | ||
1974 | * stats here rather than inside pull_task(). | ||
1975 | */ | ||
1976 | schedstat_inc(sd, lb_gained[idle]); | ||
1977 | return 1; | ||
1978 | } | ||
1979 | } | ||
1980 | |||
1981 | return 0; | ||
1982 | } | ||
1983 | |||
2032 | static unsigned long | 1984 | static unsigned long |
2033 | balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | 1985 | balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, |
2034 | unsigned long max_load_move, struct sched_domain *sd, | 1986 | unsigned long max_load_move, struct sched_domain *sd, |
@@ -2126,32 +2078,6 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2126 | return total_load_moved > 0; | 2078 | return total_load_moved > 0; |
2127 | } | 2079 | } |
2128 | 2080 | ||
2129 | static int | ||
2130 | iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
2131 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
2132 | struct rq_iterator *iterator) | ||
2133 | { | ||
2134 | struct task_struct *p = iterator->start(iterator->arg); | ||
2135 | int pinned = 0; | ||
2136 | |||
2137 | while (p) { | ||
2138 | if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) { | ||
2139 | pull_task(busiest, p, this_rq, this_cpu); | ||
2140 | /* | ||
2141 | * Right now, this is only the second place pull_task() | ||
2142 | * is called, so we can safely collect pull_task() | ||
2143 | * stats here rather than inside pull_task(). | ||
2144 | */ | ||
2145 | schedstat_inc(sd, lb_gained[idle]); | ||
2146 | |||
2147 | return 1; | ||
2148 | } | ||
2149 | p = iterator->next(iterator->arg); | ||
2150 | } | ||
2151 | |||
2152 | return 0; | ||
2153 | } | ||
2154 | |||
2155 | /********** Helpers for find_busiest_group ************************/ | 2081 | /********** Helpers for find_busiest_group ************************/ |
2156 | /* | 2082 | /* |
2157 | * sd_lb_stats - Structure to store the statistics of a sched_domain | 2083 | * sd_lb_stats - Structure to store the statistics of a sched_domain |