diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-12-17 11:12:46 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-01-21 07:40:09 -0500 |
commit | 3d45fd804a95055ecab5b3eed81f5ab2dbb047a2 (patch) | |
tree | 3ac7d2dbe75c4e406864b7d8c895f9318f308575 /kernel | |
parent | 1e3c88bdeb1260edc341e45c9fb8efd182a5c511 (diff) |
sched: Remove the sched_class load_balance methods
Take out the sched_class methods for load-balancing.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 26 | ||||
-rw-r--r-- | kernel/sched_fair.c | 66 | ||||
-rw-r--r-- | kernel/sched_idletask.c | 21 | ||||
-rw-r--r-- | kernel/sched_rt.c | 20 |
4 files changed, 37 insertions, 96 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 13a2acf18b2d..c0be07932a8d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1390,32 +1390,6 @@ static const u32 prio_to_wmult[40] = { | |||
1390 | /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, | 1390 | /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, |
1391 | }; | 1391 | }; |
1392 | 1392 | ||
1393 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup); | ||
1394 | |||
1395 | /* | ||
1396 | * runqueue iterator, to support SMP load-balancing between different | ||
1397 | * scheduling classes, without having to expose their internal data | ||
1398 | * structures to the load-balancing proper: | ||
1399 | */ | ||
1400 | struct rq_iterator { | ||
1401 | void *arg; | ||
1402 | struct task_struct *(*start)(void *); | ||
1403 | struct task_struct *(*next)(void *); | ||
1404 | }; | ||
1405 | |||
1406 | #ifdef CONFIG_SMP | ||
1407 | static unsigned long | ||
1408 | balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
1409 | unsigned long max_load_move, struct sched_domain *sd, | ||
1410 | enum cpu_idle_type idle, int *all_pinned, | ||
1411 | int *this_best_prio, struct rq_iterator *iterator); | ||
1412 | |||
1413 | static int | ||
1414 | iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
1415 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
1416 | struct rq_iterator *iterator); | ||
1417 | #endif | ||
1418 | |||
1419 | /* Time spent by the tasks of the cpu accounting group executing in ... */ | 1393 | /* Time spent by the tasks of the cpu accounting group executing in ... */ |
1420 | enum cpuacct_stat_index { | 1394 | enum cpuacct_stat_index { |
1421 | CPUACCT_STAT_USER, /* ... user mode */ | 1395 | CPUACCT_STAT_USER, /* ... user mode */ |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5116b81d7727..faf9a2f099ab 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1851,6 +1851,24 @@ static struct task_struct *load_balance_next_fair(void *arg) | |||
1851 | return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator); | 1851 | return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator); |
1852 | } | 1852 | } |
1853 | 1853 | ||
1854 | /* | ||
1855 | * runqueue iterator, to support SMP load-balancing between different | ||
1856 | * scheduling classes, without having to expose their internal data | ||
1857 | * structures to the load-balancing proper: | ||
1858 | */ | ||
1859 | struct rq_iterator { | ||
1860 | void *arg; | ||
1861 | struct task_struct *(*start)(void *); | ||
1862 | struct task_struct *(*next)(void *); | ||
1863 | }; | ||
1864 | |||
1865 | static unsigned long | ||
1866 | balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
1867 | unsigned long max_load_move, struct sched_domain *sd, | ||
1868 | enum cpu_idle_type idle, int *all_pinned, | ||
1869 | int *this_best_prio, struct rq_iterator *iterator); | ||
1870 | |||
1871 | |||
1854 | static unsigned long | 1872 | static unsigned long |
1855 | __load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 1873 | __load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, |
1856 | unsigned long max_load_move, struct sched_domain *sd, | 1874 | unsigned long max_load_move, struct sched_domain *sd, |
@@ -1929,8 +1947,20 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1929 | #endif | 1947 | #endif |
1930 | 1948 | ||
1931 | static int | 1949 | static int |
1932 | move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 1950 | iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, |
1933 | struct sched_domain *sd, enum cpu_idle_type idle) | 1951 | struct sched_domain *sd, enum cpu_idle_type idle, |
1952 | struct rq_iterator *iterator); | ||
1953 | |||
1954 | /* | ||
1955 | * move_one_task tries to move exactly one task from busiest to this_rq, as | ||
1956 | * part of active balancing operations within "domain". | ||
1957 | * Returns 1 if successful and 0 otherwise. | ||
1958 | * | ||
1959 | * Called with both runqueues locked. | ||
1960 | */ | ||
1961 | static int | ||
1962 | move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
1963 | struct sched_domain *sd, enum cpu_idle_type idle) | ||
1934 | { | 1964 | { |
1935 | struct cfs_rq *busy_cfs_rq; | 1965 | struct cfs_rq *busy_cfs_rq; |
1936 | struct rq_iterator cfs_rq_iterator; | 1966 | struct rq_iterator cfs_rq_iterator; |
@@ -2094,16 +2124,15 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2094 | struct sched_domain *sd, enum cpu_idle_type idle, | 2124 | struct sched_domain *sd, enum cpu_idle_type idle, |
2095 | int *all_pinned) | 2125 | int *all_pinned) |
2096 | { | 2126 | { |
2097 | const struct sched_class *class = sched_class_highest; | 2127 | unsigned long total_load_moved = 0, load_moved; |
2098 | unsigned long total_load_moved = 0; | ||
2099 | int this_best_prio = this_rq->curr->prio; | 2128 | int this_best_prio = this_rq->curr->prio; |
2100 | 2129 | ||
2101 | do { | 2130 | do { |
2102 | total_load_moved += | 2131 | load_moved = load_balance_fair(this_rq, this_cpu, busiest, |
2103 | class->load_balance(this_rq, this_cpu, busiest, | ||
2104 | max_load_move - total_load_moved, | 2132 | max_load_move - total_load_moved, |
2105 | sd, idle, all_pinned, &this_best_prio); | 2133 | sd, idle, all_pinned, &this_best_prio); |
2106 | class = class->next; | 2134 | |
2135 | total_load_moved += load_moved; | ||
2107 | 2136 | ||
2108 | #ifdef CONFIG_PREEMPT | 2137 | #ifdef CONFIG_PREEMPT |
2109 | /* | 2138 | /* |
@@ -2114,7 +2143,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2114 | if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) | 2143 | if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) |
2115 | break; | 2144 | break; |
2116 | #endif | 2145 | #endif |
2117 | } while (class && max_load_move > total_load_moved); | 2146 | } while (load_moved && max_load_move > total_load_moved); |
2118 | 2147 | ||
2119 | return total_load_moved > 0; | 2148 | return total_load_moved > 0; |
2120 | } | 2149 | } |
@@ -2145,25 +2174,6 @@ iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2145 | return 0; | 2174 | return 0; |
2146 | } | 2175 | } |
2147 | 2176 | ||
2148 | /* | ||
2149 | * move_one_task tries to move exactly one task from busiest to this_rq, as | ||
2150 | * part of active balancing operations within "domain". | ||
2151 | * Returns 1 if successful and 0 otherwise. | ||
2152 | * | ||
2153 | * Called with both runqueues locked. | ||
2154 | */ | ||
2155 | static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
2156 | struct sched_domain *sd, enum cpu_idle_type idle) | ||
2157 | { | ||
2158 | const struct sched_class *class; | ||
2159 | |||
2160 | for_each_class(class) { | ||
2161 | if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle)) | ||
2162 | return 1; | ||
2163 | } | ||
2164 | |||
2165 | return 0; | ||
2166 | } | ||
2167 | /********** Helpers for find_busiest_group ************************/ | 2177 | /********** Helpers for find_busiest_group ************************/ |
2168 | /* | 2178 | /* |
2169 | * sd_lb_stats - Structure to store the statistics of a sched_domain | 2179 | * sd_lb_stats - Structure to store the statistics of a sched_domain |
@@ -3873,8 +3883,6 @@ static const struct sched_class fair_sched_class = { | |||
3873 | #ifdef CONFIG_SMP | 3883 | #ifdef CONFIG_SMP |
3874 | .select_task_rq = select_task_rq_fair, | 3884 | .select_task_rq = select_task_rq_fair, |
3875 | 3885 | ||
3876 | .load_balance = load_balance_fair, | ||
3877 | .move_one_task = move_one_task_fair, | ||
3878 | .rq_online = rq_online_fair, | 3886 | .rq_online = rq_online_fair, |
3879 | .rq_offline = rq_offline_fair, | 3887 | .rq_offline = rq_offline_fair, |
3880 | 3888 | ||
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 01332bfc61a7..a8a6d8a50947 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c | |||
@@ -44,24 +44,6 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) | |||
44 | { | 44 | { |
45 | } | 45 | } |
46 | 46 | ||
47 | #ifdef CONFIG_SMP | ||
48 | static unsigned long | ||
49 | load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
50 | unsigned long max_load_move, | ||
51 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
52 | int *all_pinned, int *this_best_prio) | ||
53 | { | ||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static int | ||
58 | move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
59 | struct sched_domain *sd, enum cpu_idle_type idle) | ||
60 | { | ||
61 | return 0; | ||
62 | } | ||
63 | #endif | ||
64 | |||
65 | static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) | 47 | static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) |
66 | { | 48 | { |
67 | } | 49 | } |
@@ -119,9 +101,6 @@ static const struct sched_class idle_sched_class = { | |||
119 | 101 | ||
120 | #ifdef CONFIG_SMP | 102 | #ifdef CONFIG_SMP |
121 | .select_task_rq = select_task_rq_idle, | 103 | .select_task_rq = select_task_rq_idle, |
122 | |||
123 | .load_balance = load_balance_idle, | ||
124 | .move_one_task = move_one_task_idle, | ||
125 | #endif | 104 | #endif |
126 | 105 | ||
127 | .set_curr_task = set_curr_task_idle, | 106 | .set_curr_task = set_curr_task_idle, |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 072b3fcee8d8..502bb614e40a 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -1481,24 +1481,6 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) | |||
1481 | push_rt_tasks(rq); | 1481 | push_rt_tasks(rq); |
1482 | } | 1482 | } |
1483 | 1483 | ||
1484 | static unsigned long | ||
1485 | load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
1486 | unsigned long max_load_move, | ||
1487 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
1488 | int *all_pinned, int *this_best_prio) | ||
1489 | { | ||
1490 | /* don't touch RT tasks */ | ||
1491 | return 0; | ||
1492 | } | ||
1493 | |||
1494 | static int | ||
1495 | move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
1496 | struct sched_domain *sd, enum cpu_idle_type idle) | ||
1497 | { | ||
1498 | /* don't touch RT tasks */ | ||
1499 | return 0; | ||
1500 | } | ||
1501 | |||
1502 | static void set_cpus_allowed_rt(struct task_struct *p, | 1484 | static void set_cpus_allowed_rt(struct task_struct *p, |
1503 | const struct cpumask *new_mask) | 1485 | const struct cpumask *new_mask) |
1504 | { | 1486 | { |
@@ -1746,8 +1728,6 @@ static const struct sched_class rt_sched_class = { | |||
1746 | #ifdef CONFIG_SMP | 1728 | #ifdef CONFIG_SMP |
1747 | .select_task_rq = select_task_rq_rt, | 1729 | .select_task_rq = select_task_rq_rt, |
1748 | 1730 | ||
1749 | .load_balance = load_balance_rt, | ||
1750 | .move_one_task = move_one_task_rt, | ||
1751 | .set_cpus_allowed = set_cpus_allowed_rt, | 1731 | .set_cpus_allowed = set_cpus_allowed_rt, |
1752 | .rq_online = rq_online_rt, | 1732 | .rq_online = rq_online_rt, |
1753 | .rq_offline = rq_offline_rt, | 1733 | .rq_offline = rq_offline_rt, |