aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c1689
1 files changed, 1634 insertions, 55 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 8fe7ee81c552..ff7692ccda89 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1053,7 +1053,8 @@ static inline void hrtick_update(struct rq *rq)
1053 * increased. Here we update the fair scheduling stats and 1053 * increased. Here we update the fair scheduling stats and
1054 * then put the task into the rbtree: 1054 * then put the task into the rbtree:
1055 */ 1055 */
1056static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) 1056static void
1057enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
1057{ 1058{
1058 struct cfs_rq *cfs_rq; 1059 struct cfs_rq *cfs_rq;
1059 struct sched_entity *se = &p->se; 1060 struct sched_entity *se = &p->se;
@@ -1815,57 +1816,164 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
1815 */ 1816 */
1816 1817
1817/* 1818/*
1818 * Load-balancing iterator. Note: while the runqueue stays locked 1819 * pull_task - move a task from a remote runqueue to the local runqueue.
1819 * during the whole iteration, the current task might be 1820 * Both runqueues must be locked.
1820 * dequeued so the iterator has to be dequeue-safe. Here we
1821 * achieve that by always pre-iterating before returning
1822 * the current task:
1823 */ 1821 */
1824static struct task_struct * 1822static void pull_task(struct rq *src_rq, struct task_struct *p,
1825__load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next) 1823 struct rq *this_rq, int this_cpu)
1826{ 1824{
1827 struct task_struct *p = NULL; 1825 deactivate_task(src_rq, p, 0);
1828 struct sched_entity *se; 1826 set_task_cpu(p, this_cpu);
1827 activate_task(this_rq, p, 0);
1828 check_preempt_curr(this_rq, p, 0);
1829}
1829 1830
1830 if (next == &cfs_rq->tasks) 1831/*
1831 return NULL; 1832 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
1833 */
1834static
1835int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
1836 struct sched_domain *sd, enum cpu_idle_type idle,
1837 int *all_pinned)
1838{
1839 int tsk_cache_hot = 0;
1840 /*
1841 * We do not migrate tasks that are:
1842 * 1) running (obviously), or
1843 * 2) cannot be migrated to this CPU due to cpus_allowed, or
1844 * 3) are cache-hot on their current CPU.
1845 */
1846 if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
1847 schedstat_inc(p, se.nr_failed_migrations_affine);
1848 return 0;
1849 }
1850 *all_pinned = 0;
1832 1851
1833 se = list_entry(next, struct sched_entity, group_node); 1852 if (task_running(rq, p)) {
1834 p = task_of(se); 1853 schedstat_inc(p, se.nr_failed_migrations_running);
1835 cfs_rq->balance_iterator = next->next; 1854 return 0;
1855 }
1836 1856
1837 return p; 1857 /*
1838} 1858 * Aggressive migration if:
1859 * 1) task is cache cold, or
1860 * 2) too many balance attempts have failed.
1861 */
1839 1862
1840static struct task_struct *load_balance_start_fair(void *arg) 1863 tsk_cache_hot = task_hot(p, rq->clock, sd);
1841{ 1864 if (!tsk_cache_hot ||
1842 struct cfs_rq *cfs_rq = arg; 1865 sd->nr_balance_failed > sd->cache_nice_tries) {
1866#ifdef CONFIG_SCHEDSTATS
1867 if (tsk_cache_hot) {
1868 schedstat_inc(sd, lb_hot_gained[idle]);
1869 schedstat_inc(p, se.nr_forced_migrations);
1870 }
1871#endif
1872 return 1;
1873 }
1843 1874
1844 return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next); 1875 if (tsk_cache_hot) {
1876 schedstat_inc(p, se.nr_failed_migrations_hot);
1877 return 0;
1878 }
1879 return 1;
1845} 1880}
1846 1881
1847static struct task_struct *load_balance_next_fair(void *arg) 1882/*
1883 * move_one_task tries to move exactly one task from busiest to this_rq, as
1884 * part of active balancing operations within "domain".
1885 * Returns 1 if successful and 0 otherwise.
1886 *
1887 * Called with both runqueues locked.
1888 */
1889static int
1890move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
1891 struct sched_domain *sd, enum cpu_idle_type idle)
1848{ 1892{
1849 struct cfs_rq *cfs_rq = arg; 1893 struct task_struct *p, *n;
1894 struct cfs_rq *cfs_rq;
1895 int pinned = 0;
1896
1897 for_each_leaf_cfs_rq(busiest, cfs_rq) {
1898 list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) {
1899
1900 if (!can_migrate_task(p, busiest, this_cpu,
1901 sd, idle, &pinned))
1902 continue;
1850 1903
1851 return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator); 1904 pull_task(busiest, p, this_rq, this_cpu);
1905 /*
1906 * Right now, this is only the second place pull_task()
1907 * is called, so we can safely collect pull_task()
1908 * stats here rather than inside pull_task().
1909 */
1910 schedstat_inc(sd, lb_gained[idle]);
1911 return 1;
1912 }
1913 }
1914
1915 return 0;
1852} 1916}
1853 1917
1854static unsigned long 1918static unsigned long
1855__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 1919balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
1856 unsigned long max_load_move, struct sched_domain *sd, 1920 unsigned long max_load_move, struct sched_domain *sd,
1857 enum cpu_idle_type idle, int *all_pinned, int *this_best_prio, 1921 enum cpu_idle_type idle, int *all_pinned,
1858 struct cfs_rq *cfs_rq) 1922 int *this_best_prio, struct cfs_rq *busiest_cfs_rq)
1859{ 1923{
1860 struct rq_iterator cfs_rq_iterator; 1924 int loops = 0, pulled = 0, pinned = 0;
1925 long rem_load_move = max_load_move;
1926 struct task_struct *p, *n;
1861 1927
1862 cfs_rq_iterator.start = load_balance_start_fair; 1928 if (max_load_move == 0)
1863 cfs_rq_iterator.next = load_balance_next_fair; 1929 goto out;
1864 cfs_rq_iterator.arg = cfs_rq;
1865 1930
1866 return balance_tasks(this_rq, this_cpu, busiest, 1931 pinned = 1;
1867 max_load_move, sd, idle, all_pinned, 1932
1868 this_best_prio, &cfs_rq_iterator); 1933 list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
1934 if (loops++ > sysctl_sched_nr_migrate)
1935 break;
1936
1937 if ((p->se.load.weight >> 1) > rem_load_move ||
1938 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned))
1939 continue;
1940
1941 pull_task(busiest, p, this_rq, this_cpu);
1942 pulled++;
1943 rem_load_move -= p->se.load.weight;
1944
1945#ifdef CONFIG_PREEMPT
1946 /*
1947 * NEWIDLE balancing is a source of latency, so preemptible
1948 * kernels will stop after the first task is pulled to minimize
1949 * the critical section.
1950 */
1951 if (idle == CPU_NEWLY_IDLE)
1952 break;
1953#endif
1954
1955 /*
1956 * We only want to steal up to the prescribed amount of
1957 * weighted load.
1958 */
1959 if (rem_load_move <= 0)
1960 break;
1961
1962 if (p->prio < *this_best_prio)
1963 *this_best_prio = p->prio;
1964 }
1965out:
1966 /*
1967 * Right now, this is one of only two places pull_task() is called,
1968 * so we can safely collect pull_task() stats here rather than
1969 * inside pull_task().
1970 */
1971 schedstat_add(sd, lb_gained[idle], pulled);
1972
1973 if (all_pinned)
1974 *all_pinned = pinned;
1975
1976 return max_load_move - rem_load_move;
1869} 1977}
1870 1978
1871#ifdef CONFIG_FAIR_GROUP_SCHED 1979#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1897,9 +2005,9 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1897 rem_load = (u64)rem_load_move * busiest_weight; 2005 rem_load = (u64)rem_load_move * busiest_weight;
1898 rem_load = div_u64(rem_load, busiest_h_load + 1); 2006 rem_load = div_u64(rem_load, busiest_h_load + 1);
1899 2007
1900 moved_load = __load_balance_fair(this_rq, this_cpu, busiest, 2008 moved_load = balance_tasks(this_rq, this_cpu, busiest,
1901 rem_load, sd, idle, all_pinned, this_best_prio, 2009 rem_load, sd, idle, all_pinned, this_best_prio,
1902 tg->cfs_rq[busiest_cpu]); 2010 busiest_cfs_rq);
1903 2011
1904 if (!moved_load) 2012 if (!moved_load)
1905 continue; 2013 continue;
@@ -1922,35 +2030,1499 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1922 struct sched_domain *sd, enum cpu_idle_type idle, 2030 struct sched_domain *sd, enum cpu_idle_type idle,
1923 int *all_pinned, int *this_best_prio) 2031 int *all_pinned, int *this_best_prio)
1924{ 2032{
1925 return __load_balance_fair(this_rq, this_cpu, busiest, 2033 return balance_tasks(this_rq, this_cpu, busiest,
1926 max_load_move, sd, idle, all_pinned, 2034 max_load_move, sd, idle, all_pinned,
1927 this_best_prio, &busiest->cfs); 2035 this_best_prio, &busiest->cfs);
1928} 2036}
1929#endif 2037#endif
1930 2038
1931static int 2039/*
1932move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 2040 * move_tasks tries to move up to max_load_move weighted load from busiest to
1933 struct sched_domain *sd, enum cpu_idle_type idle) 2041 * this_rq, as part of a balancing operation within domain "sd".
2042 * Returns 1 if successful and 0 otherwise.
2043 *
2044 * Called with both runqueues locked.
2045 */
2046static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2047 unsigned long max_load_move,
2048 struct sched_domain *sd, enum cpu_idle_type idle,
2049 int *all_pinned)
2050{
2051 unsigned long total_load_moved = 0, load_moved;
2052 int this_best_prio = this_rq->curr->prio;
2053
2054 do {
2055 load_moved = load_balance_fair(this_rq, this_cpu, busiest,
2056 max_load_move - total_load_moved,
2057 sd, idle, all_pinned, &this_best_prio);
2058
2059 total_load_moved += load_moved;
2060
2061#ifdef CONFIG_PREEMPT
2062 /*
2063 * NEWIDLE balancing is a source of latency, so preemptible
2064 * kernels will stop after the first task is pulled to minimize
2065 * the critical section.
2066 */
2067 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
2068 break;
2069
2070 if (raw_spin_is_contended(&this_rq->lock) ||
2071 raw_spin_is_contended(&busiest->lock))
2072 break;
2073#endif
2074 } while (load_moved && max_load_move > total_load_moved);
2075
2076 return total_load_moved > 0;
2077}
2078
2079/********** Helpers for find_busiest_group ************************/
2080/*
2081 * sd_lb_stats - Structure to store the statistics of a sched_domain
2082 * during load balancing.
2083 */
2084struct sd_lb_stats {
2085 struct sched_group *busiest; /* Busiest group in this sd */
2086 struct sched_group *this; /* Local group in this sd */
2087 unsigned long total_load; /* Total load of all groups in sd */
2088 unsigned long total_pwr; /* Total power of all groups in sd */
2089 unsigned long avg_load; /* Average load across all groups in sd */
2090
2091 /** Statistics of this group */
2092 unsigned long this_load;
2093 unsigned long this_load_per_task;
2094 unsigned long this_nr_running;
2095
2096 /* Statistics of the busiest group */
2097 unsigned long max_load;
2098 unsigned long busiest_load_per_task;
2099 unsigned long busiest_nr_running;
2100
2101 int group_imb; /* Is there imbalance in this sd */
2102#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2103 int power_savings_balance; /* Is powersave balance needed for this sd */
2104 struct sched_group *group_min; /* Least loaded group in sd */
2105 struct sched_group *group_leader; /* Group which relieves group_min */
2106 unsigned long min_load_per_task; /* load_per_task in group_min */
2107 unsigned long leader_nr_running; /* Nr running of group_leader */
2108 unsigned long min_nr_running; /* Nr running of group_min */
2109#endif
2110};
2111
2112/*
2113 * sg_lb_stats - stats of a sched_group required for load_balancing
2114 */
2115struct sg_lb_stats {
2116 unsigned long avg_load; /*Avg load across the CPUs of the group */
2117 unsigned long group_load; /* Total load over the CPUs of the group */
2118 unsigned long sum_nr_running; /* Nr tasks running in the group */
2119 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
2120 unsigned long group_capacity;
2121 int group_imb; /* Is there an imbalance in the group ? */
2122};
2123
2124/**
2125 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
2126 * @group: The group whose first cpu is to be returned.
2127 */
2128static inline unsigned int group_first_cpu(struct sched_group *group)
2129{
2130 return cpumask_first(sched_group_cpus(group));
2131}
2132
2133/**
2134 * get_sd_load_idx - Obtain the load index for a given sched domain.
2135 * @sd: The sched_domain whose load_idx is to be obtained.
2136 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
2137 */
2138static inline int get_sd_load_idx(struct sched_domain *sd,
2139 enum cpu_idle_type idle)
2140{
2141 int load_idx;
2142
2143 switch (idle) {
2144 case CPU_NOT_IDLE:
2145 load_idx = sd->busy_idx;
2146 break;
2147
2148 case CPU_NEWLY_IDLE:
2149 load_idx = sd->newidle_idx;
2150 break;
2151 default:
2152 load_idx = sd->idle_idx;
2153 break;
2154 }
2155
2156 return load_idx;
2157}
2158
2159
2160#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2161/**
2162 * init_sd_power_savings_stats - Initialize power savings statistics for
2163 * the given sched_domain, during load balancing.
2164 *
2165 * @sd: Sched domain whose power-savings statistics are to be initialized.
2166 * @sds: Variable containing the statistics for sd.
2167 * @idle: Idle status of the CPU at which we're performing load-balancing.
2168 */
2169static inline void init_sd_power_savings_stats(struct sched_domain *sd,
2170 struct sd_lb_stats *sds, enum cpu_idle_type idle)
2171{
2172 /*
2173 * Busy processors will not participate in power savings
2174 * balance.
2175 */
2176 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
2177 sds->power_savings_balance = 0;
2178 else {
2179 sds->power_savings_balance = 1;
2180 sds->min_nr_running = ULONG_MAX;
2181 sds->leader_nr_running = 0;
2182 }
2183}
2184
2185/**
2186 * update_sd_power_savings_stats - Update the power saving stats for a
2187 * sched_domain while performing load balancing.
2188 *
2189 * @group: sched_group belonging to the sched_domain under consideration.
2190 * @sds: Variable containing the statistics of the sched_domain
2191 * @local_group: Does group contain the CPU for which we're performing
2192 * load balancing ?
2193 * @sgs: Variable containing the statistics of the group.
2194 */
2195static inline void update_sd_power_savings_stats(struct sched_group *group,
2196 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
2197{
2198
2199 if (!sds->power_savings_balance)
2200 return;
2201
2202 /*
2203 * If the local group is idle or completely loaded
2204 * no need to do power savings balance at this domain
2205 */
2206 if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
2207 !sds->this_nr_running))
2208 sds->power_savings_balance = 0;
2209
2210 /*
2211 * If a group is already running at full capacity or idle,
2212 * don't include that group in power savings calculations
2213 */
2214 if (!sds->power_savings_balance ||
2215 sgs->sum_nr_running >= sgs->group_capacity ||
2216 !sgs->sum_nr_running)
2217 return;
2218
2219 /*
2220 * Calculate the group which has the least non-idle load.
2221 * This is the group from where we need to pick up the load
2222 * for saving power
2223 */
2224 if ((sgs->sum_nr_running < sds->min_nr_running) ||
2225 (sgs->sum_nr_running == sds->min_nr_running &&
2226 group_first_cpu(group) > group_first_cpu(sds->group_min))) {
2227 sds->group_min = group;
2228 sds->min_nr_running = sgs->sum_nr_running;
2229 sds->min_load_per_task = sgs->sum_weighted_load /
2230 sgs->sum_nr_running;
2231 }
2232
2233 /*
2234 * Calculate the group which is almost near its
2235 * capacity but still has some space to pick up some load
2236 * from other group and save more power
2237 */
2238 if (sgs->sum_nr_running + 1 > sgs->group_capacity)
2239 return;
2240
2241 if (sgs->sum_nr_running > sds->leader_nr_running ||
2242 (sgs->sum_nr_running == sds->leader_nr_running &&
2243 group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
2244 sds->group_leader = group;
2245 sds->leader_nr_running = sgs->sum_nr_running;
2246 }
2247}
2248
2249/**
2250 * check_power_save_busiest_group - see if there is potential for some power-savings balance
2251 * @sds: Variable containing the statistics of the sched_domain
2252 * under consideration.
2253 * @this_cpu: Cpu at which we're currently performing load-balancing.
2254 * @imbalance: Variable to store the imbalance.
2255 *
2256 * Description:
2257 * Check if we have potential to perform some power-savings balance.
2258 * If yes, set the busiest group to be the least loaded group in the
2259 * sched_domain, so that it's CPUs can be put to idle.
2260 *
2261 * Returns 1 if there is potential to perform power-savings balance.
2262 * Else returns 0.
2263 */
2264static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
2265 int this_cpu, unsigned long *imbalance)
2266{
2267 if (!sds->power_savings_balance)
2268 return 0;
2269
2270 if (sds->this != sds->group_leader ||
2271 sds->group_leader == sds->group_min)
2272 return 0;
2273
2274 *imbalance = sds->min_load_per_task;
2275 sds->busiest = sds->group_min;
2276
2277 return 1;
2278
2279}
2280#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
2281static inline void init_sd_power_savings_stats(struct sched_domain *sd,
2282 struct sd_lb_stats *sds, enum cpu_idle_type idle)
2283{
2284 return;
2285}
2286
2287static inline void update_sd_power_savings_stats(struct sched_group *group,
2288 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
2289{
2290 return;
2291}
2292
2293static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
2294 int this_cpu, unsigned long *imbalance)
2295{
2296 return 0;
2297}
2298#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
2299
2300
2301unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
2302{
2303 return SCHED_LOAD_SCALE;
2304}
2305
2306unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
2307{
2308 return default_scale_freq_power(sd, cpu);
2309}
2310
2311unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
2312{
2313 unsigned long weight = cpumask_weight(sched_domain_span(sd));
2314 unsigned long smt_gain = sd->smt_gain;
2315
2316 smt_gain /= weight;
2317
2318 return smt_gain;
2319}
2320
2321unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
2322{
2323 return default_scale_smt_power(sd, cpu);
2324}
2325
2326unsigned long scale_rt_power(int cpu)
2327{
2328 struct rq *rq = cpu_rq(cpu);
2329 u64 total, available;
2330
2331 sched_avg_update(rq);
2332
2333 total = sched_avg_period() + (rq->clock - rq->age_stamp);
2334 available = total - rq->rt_avg;
2335
2336 if (unlikely((s64)total < SCHED_LOAD_SCALE))
2337 total = SCHED_LOAD_SCALE;
2338
2339 total >>= SCHED_LOAD_SHIFT;
2340
2341 return div_u64(available, total);
2342}
2343
2344static void update_cpu_power(struct sched_domain *sd, int cpu)
2345{
2346 unsigned long weight = cpumask_weight(sched_domain_span(sd));
2347 unsigned long power = SCHED_LOAD_SCALE;
2348 struct sched_group *sdg = sd->groups;
2349
2350 if (sched_feat(ARCH_POWER))
2351 power *= arch_scale_freq_power(sd, cpu);
2352 else
2353 power *= default_scale_freq_power(sd, cpu);
2354
2355 power >>= SCHED_LOAD_SHIFT;
2356
2357 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
2358 if (sched_feat(ARCH_POWER))
2359 power *= arch_scale_smt_power(sd, cpu);
2360 else
2361 power *= default_scale_smt_power(sd, cpu);
2362
2363 power >>= SCHED_LOAD_SHIFT;
2364 }
2365
2366 power *= scale_rt_power(cpu);
2367 power >>= SCHED_LOAD_SHIFT;
2368
2369 if (!power)
2370 power = 1;
2371
2372 sdg->cpu_power = power;
2373}
2374
2375static void update_group_power(struct sched_domain *sd, int cpu)
2376{
2377 struct sched_domain *child = sd->child;
2378 struct sched_group *group, *sdg = sd->groups;
2379 unsigned long power;
2380
2381 if (!child) {
2382 update_cpu_power(sd, cpu);
2383 return;
2384 }
2385
2386 power = 0;
2387
2388 group = child->groups;
2389 do {
2390 power += group->cpu_power;
2391 group = group->next;
2392 } while (group != child->groups);
2393
2394 sdg->cpu_power = power;
2395}
2396
2397/**
2398 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
2399 * @sd: The sched_domain whose statistics are to be updated.
2400 * @group: sched_group whose statistics are to be updated.
2401 * @this_cpu: Cpu for which load balance is currently performed.
2402 * @idle: Idle status of this_cpu
2403 * @load_idx: Load index of sched_domain of this_cpu for load calc.
2404 * @sd_idle: Idle status of the sched_domain containing group.
2405 * @local_group: Does group contain this_cpu.
2406 * @cpus: Set of cpus considered for load balancing.
2407 * @balance: Should we balance.
2408 * @sgs: variable to hold the statistics for this group.
2409 */
2410static inline void update_sg_lb_stats(struct sched_domain *sd,
2411 struct sched_group *group, int this_cpu,
2412 enum cpu_idle_type idle, int load_idx, int *sd_idle,
2413 int local_group, const struct cpumask *cpus,
2414 int *balance, struct sg_lb_stats *sgs)
2415{
2416 unsigned long load, max_cpu_load, min_cpu_load;
2417 int i;
2418 unsigned int balance_cpu = -1, first_idle_cpu = 0;
2419 unsigned long sum_avg_load_per_task;
2420 unsigned long avg_load_per_task;
2421
2422 if (local_group)
2423 balance_cpu = group_first_cpu(group);
2424
2425 /* Tally up the load of all CPUs in the group */
2426 sum_avg_load_per_task = avg_load_per_task = 0;
2427 max_cpu_load = 0;
2428 min_cpu_load = ~0UL;
2429
2430 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
2431 struct rq *rq = cpu_rq(i);
2432
2433 if (*sd_idle && rq->nr_running)
2434 *sd_idle = 0;
2435
2436 /* Bias balancing toward cpus of our domain */
2437 if (local_group) {
2438 if (idle_cpu(i) && !first_idle_cpu) {
2439 first_idle_cpu = 1;
2440 balance_cpu = i;
2441 }
2442
2443 load = target_load(i, load_idx);
2444 } else {
2445 load = source_load(i, load_idx);
2446 if (load > max_cpu_load)
2447 max_cpu_load = load;
2448 if (min_cpu_load > load)
2449 min_cpu_load = load;
2450 }
2451
2452 sgs->group_load += load;
2453 sgs->sum_nr_running += rq->nr_running;
2454 sgs->sum_weighted_load += weighted_cpuload(i);
2455
2456 sum_avg_load_per_task += cpu_avg_load_per_task(i);
2457 }
2458
2459 /*
2460 * First idle cpu or the first cpu(busiest) in this sched group
2461 * is eligible for doing load balancing at this and above
2462 * domains. In the newly idle case, we will allow all the cpu's
2463 * to do the newly idle load balance.
2464 */
2465 if (idle != CPU_NEWLY_IDLE && local_group &&
2466 balance_cpu != this_cpu) {
2467 *balance = 0;
2468 return;
2469 }
2470
2471 update_group_power(sd, this_cpu);
2472
2473 /* Adjust by relative CPU power of the group */
2474 sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
2475
2476
2477 /*
2478 * Consider the group unbalanced when the imbalance is larger
2479 * than the average weight of two tasks.
2480 *
2481 * APZ: with cgroup the avg task weight can vary wildly and
2482 * might not be a suitable number - should we keep a
2483 * normalized nr_running number somewhere that negates
2484 * the hierarchy?
2485 */
2486 avg_load_per_task = (sum_avg_load_per_task * SCHED_LOAD_SCALE) /
2487 group->cpu_power;
2488
2489 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
2490 sgs->group_imb = 1;
2491
2492 sgs->group_capacity =
2493 DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
2494}
2495
2496/**
2497 * update_sd_lb_stats - Update sched_group's statistics for load balancing.
2498 * @sd: sched_domain whose statistics are to be updated.
2499 * @this_cpu: Cpu for which load balance is currently performed.
2500 * @idle: Idle status of this_cpu
2501 * @sd_idle: Idle status of the sched_domain containing group.
2502 * @cpus: Set of cpus considered for load balancing.
2503 * @balance: Should we balance.
2504 * @sds: variable to hold the statistics for this sched_domain.
2505 */
2506static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
2507 enum cpu_idle_type idle, int *sd_idle,
2508 const struct cpumask *cpus, int *balance,
2509 struct sd_lb_stats *sds)
2510{
2511 struct sched_domain *child = sd->child;
2512 struct sched_group *group = sd->groups;
2513 struct sg_lb_stats sgs;
2514 int load_idx, prefer_sibling = 0;
2515
2516 if (child && child->flags & SD_PREFER_SIBLING)
2517 prefer_sibling = 1;
2518
2519 init_sd_power_savings_stats(sd, sds, idle);
2520 load_idx = get_sd_load_idx(sd, idle);
2521
2522 do {
2523 int local_group;
2524
2525 local_group = cpumask_test_cpu(this_cpu,
2526 sched_group_cpus(group));
2527 memset(&sgs, 0, sizeof(sgs));
2528 update_sg_lb_stats(sd, group, this_cpu, idle, load_idx, sd_idle,
2529 local_group, cpus, balance, &sgs);
2530
2531 if (local_group && !(*balance))
2532 return;
2533
2534 sds->total_load += sgs.group_load;
2535 sds->total_pwr += group->cpu_power;
2536
2537 /*
2538 * In case the child domain prefers tasks go to siblings
2539 * first, lower the group capacity to one so that we'll try
2540 * and move all the excess tasks away.
2541 */
2542 if (prefer_sibling)
2543 sgs.group_capacity = min(sgs.group_capacity, 1UL);
2544
2545 if (local_group) {
2546 sds->this_load = sgs.avg_load;
2547 sds->this = group;
2548 sds->this_nr_running = sgs.sum_nr_running;
2549 sds->this_load_per_task = sgs.sum_weighted_load;
2550 } else if (sgs.avg_load > sds->max_load &&
2551 (sgs.sum_nr_running > sgs.group_capacity ||
2552 sgs.group_imb)) {
2553 sds->max_load = sgs.avg_load;
2554 sds->busiest = group;
2555 sds->busiest_nr_running = sgs.sum_nr_running;
2556 sds->busiest_load_per_task = sgs.sum_weighted_load;
2557 sds->group_imb = sgs.group_imb;
2558 }
2559
2560 update_sd_power_savings_stats(group, sds, local_group, &sgs);
2561 group = group->next;
2562 } while (group != sd->groups);
2563}
2564
2565/**
2566 * fix_small_imbalance - Calculate the minor imbalance that exists
2567 * amongst the groups of a sched_domain, during
2568 * load balancing.
2569 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
2570 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
2571 * @imbalance: Variable to store the imbalance.
2572 */
2573static inline void fix_small_imbalance(struct sd_lb_stats *sds,
2574 int this_cpu, unsigned long *imbalance)
2575{
2576 unsigned long tmp, pwr_now = 0, pwr_move = 0;
2577 unsigned int imbn = 2;
2578
2579 if (sds->this_nr_running) {
2580 sds->this_load_per_task /= sds->this_nr_running;
2581 if (sds->busiest_load_per_task >
2582 sds->this_load_per_task)
2583 imbn = 1;
2584 } else
2585 sds->this_load_per_task =
2586 cpu_avg_load_per_task(this_cpu);
2587
2588 if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
2589 sds->busiest_load_per_task * imbn) {
2590 *imbalance = sds->busiest_load_per_task;
2591 return;
2592 }
2593
2594 /*
2595 * OK, we don't have enough imbalance to justify moving tasks,
2596 * however we may be able to increase total CPU power used by
2597 * moving them.
2598 */
2599
2600 pwr_now += sds->busiest->cpu_power *
2601 min(sds->busiest_load_per_task, sds->max_load);
2602 pwr_now += sds->this->cpu_power *
2603 min(sds->this_load_per_task, sds->this_load);
2604 pwr_now /= SCHED_LOAD_SCALE;
2605
2606 /* Amount of load we'd subtract */
2607 tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
2608 sds->busiest->cpu_power;
2609 if (sds->max_load > tmp)
2610 pwr_move += sds->busiest->cpu_power *
2611 min(sds->busiest_load_per_task, sds->max_load - tmp);
2612
2613 /* Amount of load we'd add */
2614 if (sds->max_load * sds->busiest->cpu_power <
2615 sds->busiest_load_per_task * SCHED_LOAD_SCALE)
2616 tmp = (sds->max_load * sds->busiest->cpu_power) /
2617 sds->this->cpu_power;
2618 else
2619 tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
2620 sds->this->cpu_power;
2621 pwr_move += sds->this->cpu_power *
2622 min(sds->this_load_per_task, sds->this_load + tmp);
2623 pwr_move /= SCHED_LOAD_SCALE;
2624
2625 /* Move if we gain throughput */
2626 if (pwr_move > pwr_now)
2627 *imbalance = sds->busiest_load_per_task;
2628}
2629
2630/**
2631 * calculate_imbalance - Calculate the amount of imbalance present within the
2632 * groups of a given sched_domain during load balance.
2633 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
2634 * @this_cpu: Cpu for which currently load balance is being performed.
2635 * @imbalance: The variable to store the imbalance.
2636 */
2637static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
2638 unsigned long *imbalance)
2639{
2640 unsigned long max_pull;
2641 /*
2642 * In the presence of smp nice balancing, certain scenarios can have
2643 * max load less than avg load(as we skip the groups at or below
2644 * its cpu_power, while calculating max_load..)
2645 */
2646 if (sds->max_load < sds->avg_load) {
2647 *imbalance = 0;
2648 return fix_small_imbalance(sds, this_cpu, imbalance);
2649 }
2650
2651 /* Don't want to pull so many tasks that a group would go idle */
2652 max_pull = min(sds->max_load - sds->avg_load,
2653 sds->max_load - sds->busiest_load_per_task);
2654
2655 /* How much load to actually move to equalise the imbalance */
2656 *imbalance = min(max_pull * sds->busiest->cpu_power,
2657 (sds->avg_load - sds->this_load) * sds->this->cpu_power)
2658 / SCHED_LOAD_SCALE;
2659
2660 /*
2661 * if *imbalance is less than the average load per runnable task
2662 * there is no gaurantee that any tasks will be moved so we'll have
2663 * a think about bumping its value to force at least one task to be
2664 * moved
2665 */
2666 if (*imbalance < sds->busiest_load_per_task)
2667 return fix_small_imbalance(sds, this_cpu, imbalance);
2668
2669}
2670/******* find_busiest_group() helpers end here *********************/
2671
2672/**
2673 * find_busiest_group - Returns the busiest group within the sched_domain
2674 * if there is an imbalance. If there isn't an imbalance, and
2675 * the user has opted for power-savings, it returns a group whose
2676 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
2677 * such a group exists.
2678 *
2679 * Also calculates the amount of weighted load which should be moved
2680 * to restore balance.
2681 *
2682 * @sd: The sched_domain whose busiest group is to be returned.
2683 * @this_cpu: The cpu for which load balancing is currently being performed.
2684 * @imbalance: Variable which stores amount of weighted load which should
2685 * be moved to restore balance/put a group to idle.
2686 * @idle: The idle status of this_cpu.
2687 * @sd_idle: The idleness of sd
2688 * @cpus: The set of CPUs under consideration for load-balancing.
2689 * @balance: Pointer to a variable indicating if this_cpu
2690 * is the appropriate cpu to perform load balancing at this_level.
2691 *
2692 * Returns: - the busiest group if imbalance exists.
2693 * - If no imbalance and user has opted for power-savings balance,
2694 * return the least loaded group whose CPUs can be
2695 * put to idle by rebalancing its tasks onto our group.
2696 */
2697static struct sched_group *
2698find_busiest_group(struct sched_domain *sd, int this_cpu,
2699 unsigned long *imbalance, enum cpu_idle_type idle,
2700 int *sd_idle, const struct cpumask *cpus, int *balance)
2701{
2702 struct sd_lb_stats sds;
2703
2704 memset(&sds, 0, sizeof(sds));
2705
2706 /*
2707 * Compute the various statistics relavent for load balancing at
2708 * this level.
2709 */
2710 update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
2711 balance, &sds);
2712
2713 /* Cases where imbalance does not exist from POV of this_cpu */
2714 /* 1) this_cpu is not the appropriate cpu to perform load balancing
2715 * at this level.
2716 * 2) There is no busy sibling group to pull from.
2717 * 3) This group is the busiest group.
2718 * 4) This group is more busy than the avg busieness at this
2719 * sched_domain.
2720 * 5) The imbalance is within the specified limit.
2721 * 6) Any rebalance would lead to ping-pong
2722 */
2723 if (!(*balance))
2724 goto ret;
2725
2726 if (!sds.busiest || sds.busiest_nr_running == 0)
2727 goto out_balanced;
2728
2729 if (sds.this_load >= sds.max_load)
2730 goto out_balanced;
2731
2732 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
2733
2734 if (sds.this_load >= sds.avg_load)
2735 goto out_balanced;
2736
2737 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
2738 goto out_balanced;
2739
2740 sds.busiest_load_per_task /= sds.busiest_nr_running;
2741 if (sds.group_imb)
2742 sds.busiest_load_per_task =
2743 min(sds.busiest_load_per_task, sds.avg_load);
2744
2745 /*
2746 * We're trying to get all the cpus to the average_load, so we don't
2747 * want to push ourselves above the average load, nor do we wish to
2748 * reduce the max loaded cpu below the average load, as either of these
2749 * actions would just result in more rebalancing later, and ping-pong
2750 * tasks around. Thus we look for the minimum possible imbalance.
2751 * Negative imbalances (*we* are more loaded than anyone else) will
2752 * be counted as no imbalance for these purposes -- we can't fix that
2753 * by pulling tasks to us. Be careful of negative numbers as they'll
2754 * appear as very large values with unsigned longs.
2755 */
2756 if (sds.max_load <= sds.busiest_load_per_task)
2757 goto out_balanced;
2758
2759 /* Looks like there is an imbalance. Compute it */
2760 calculate_imbalance(&sds, this_cpu, imbalance);
2761 return sds.busiest;
2762
2763out_balanced:
2764 /*
2765 * There is no obvious imbalance. But check if we can do some balancing
2766 * to save power.
2767 */
2768 if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
2769 return sds.busiest;
2770ret:
2771 *imbalance = 0;
2772 return NULL;
2773}
2774
2775/*
2776 * find_busiest_queue - find the busiest runqueue among the cpus in group.
2777 */
2778static struct rq *
2779find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
2780 unsigned long imbalance, const struct cpumask *cpus)
1934{ 2781{
1935 struct cfs_rq *busy_cfs_rq; 2782 struct rq *busiest = NULL, *rq;
1936 struct rq_iterator cfs_rq_iterator; 2783 unsigned long max_load = 0;
2784 int i;
2785
2786 for_each_cpu(i, sched_group_cpus(group)) {
2787 unsigned long power = power_of(i);
2788 unsigned long capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
2789 unsigned long wl;
2790
2791 if (!cpumask_test_cpu(i, cpus))
2792 continue;
1937 2793
1938 cfs_rq_iterator.start = load_balance_start_fair; 2794 rq = cpu_rq(i);
1939 cfs_rq_iterator.next = load_balance_next_fair; 2795 wl = weighted_cpuload(i);
1940 2796
1941 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
1942 /* 2797 /*
1943 * pass busy_cfs_rq argument into 2798 * When comparing with imbalance, use weighted_cpuload()
1944 * load_balance_[start|next]_fair iterators 2799 * which is not scaled with the cpu power.
1945 */ 2800 */
1946 cfs_rq_iterator.arg = busy_cfs_rq; 2801 if (capacity && rq->nr_running == 1 && wl > imbalance)
1947 if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, 2802 continue;
1948 &cfs_rq_iterator)) 2803
1949 return 1; 2804 /*
2805 * For the load comparisons with the other cpu's, consider
2806 * the weighted_cpuload() scaled with the cpu power, so that
2807 * the load can be moved away from the cpu that is potentially
2808 * running at a lower capacity.
2809 */
2810 wl = (wl * SCHED_LOAD_SCALE) / power;
2811
2812 if (wl > max_load) {
2813 max_load = wl;
2814 busiest = rq;
2815 }
1950 } 2816 }
1951 2817
2818 return busiest;
2819}
2820
2821/*
2822 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
2823 * so long as it is large enough.
2824 */
2825#define MAX_PINNED_INTERVAL 512
2826
2827/* Working cpumask for load_balance and load_balance_newidle. */
2828static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
2829
2830static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle)
2831{
2832 if (idle == CPU_NEWLY_IDLE) {
2833 /*
2834 * The only task running in a non-idle cpu can be moved to this
2835 * cpu in an attempt to completely freeup the other CPU
2836 * package.
2837 *
2838 * The package power saving logic comes from
2839 * find_busiest_group(). If there are no imbalance, then
2840 * f_b_g() will return NULL. However when sched_mc={1,2} then
2841 * f_b_g() will select a group from which a running task may be
2842 * pulled to this cpu in order to make the other package idle.
2843 * If there is no opportunity to make a package idle and if
2844 * there are no imbalance, then f_b_g() will return NULL and no
2845 * action will be taken in load_balance_newidle().
2846 *
2847 * Under normal task pull operation due to imbalance, there
2848 * will be more than one task in the source run queue and
2849 * move_tasks() will succeed. ld_moved will be true and this
2850 * active balance code will not be triggered.
2851 */
2852 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2853 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2854 return 0;
2855
2856 if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
2857 return 0;
2858 }
2859
2860 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
2861}
2862
2863/*
2864 * Check this_cpu to ensure it is balanced within domain. Attempt to move
2865 * tasks if there is an imbalance.
2866 */
2867static int load_balance(int this_cpu, struct rq *this_rq,
2868 struct sched_domain *sd, enum cpu_idle_type idle,
2869 int *balance)
2870{
2871 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
2872 struct sched_group *group;
2873 unsigned long imbalance;
2874 struct rq *busiest;
2875 unsigned long flags;
2876 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
2877
2878 cpumask_copy(cpus, cpu_active_mask);
2879
2880 /*
2881 * When power savings policy is enabled for the parent domain, idle
2882 * sibling can pick up load irrespective of busy siblings. In this case,
2883 * let the state of idle sibling percolate up as CPU_IDLE, instead of
2884 * portraying it as CPU_NOT_IDLE.
2885 */
2886 if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
2887 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2888 sd_idle = 1;
2889
2890 schedstat_inc(sd, lb_count[idle]);
2891
2892redo:
2893 update_shares(sd);
2894 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
2895 cpus, balance);
2896
2897 if (*balance == 0)
2898 goto out_balanced;
2899
2900 if (!group) {
2901 schedstat_inc(sd, lb_nobusyg[idle]);
2902 goto out_balanced;
2903 }
2904
2905 busiest = find_busiest_queue(group, idle, imbalance, cpus);
2906 if (!busiest) {
2907 schedstat_inc(sd, lb_nobusyq[idle]);
2908 goto out_balanced;
2909 }
2910
2911 BUG_ON(busiest == this_rq);
2912
2913 schedstat_add(sd, lb_imbalance[idle], imbalance);
2914
2915 ld_moved = 0;
2916 if (busiest->nr_running > 1) {
2917 /*
2918 * Attempt to move tasks. If find_busiest_group has found
2919 * an imbalance but busiest->nr_running <= 1, the group is
2920 * still unbalanced. ld_moved simply stays zero, so it is
2921 * correctly treated as an imbalance.
2922 */
2923 local_irq_save(flags);
2924 double_rq_lock(this_rq, busiest);
2925 ld_moved = move_tasks(this_rq, this_cpu, busiest,
2926 imbalance, sd, idle, &all_pinned);
2927 double_rq_unlock(this_rq, busiest);
2928 local_irq_restore(flags);
2929
2930 /*
2931 * some other cpu did the load balance for us.
2932 */
2933 if (ld_moved && this_cpu != smp_processor_id())
2934 resched_cpu(this_cpu);
2935
2936 /* All tasks on this runqueue were pinned by CPU affinity */
2937 if (unlikely(all_pinned)) {
2938 cpumask_clear_cpu(cpu_of(busiest), cpus);
2939 if (!cpumask_empty(cpus))
2940 goto redo;
2941 goto out_balanced;
2942 }
2943 }
2944
2945 if (!ld_moved) {
2946 schedstat_inc(sd, lb_failed[idle]);
2947 sd->nr_balance_failed++;
2948
2949 if (need_active_balance(sd, sd_idle, idle)) {
2950 raw_spin_lock_irqsave(&busiest->lock, flags);
2951
2952 /* don't kick the migration_thread, if the curr
2953 * task on busiest cpu can't be moved to this_cpu
2954 */
2955 if (!cpumask_test_cpu(this_cpu,
2956 &busiest->curr->cpus_allowed)) {
2957 raw_spin_unlock_irqrestore(&busiest->lock,
2958 flags);
2959 all_pinned = 1;
2960 goto out_one_pinned;
2961 }
2962
2963 if (!busiest->active_balance) {
2964 busiest->active_balance = 1;
2965 busiest->push_cpu = this_cpu;
2966 active_balance = 1;
2967 }
2968 raw_spin_unlock_irqrestore(&busiest->lock, flags);
2969 if (active_balance)
2970 wake_up_process(busiest->migration_thread);
2971
2972 /*
2973 * We've kicked active balancing, reset the failure
2974 * counter.
2975 */
2976 sd->nr_balance_failed = sd->cache_nice_tries+1;
2977 }
2978 } else
2979 sd->nr_balance_failed = 0;
2980
2981 if (likely(!active_balance)) {
2982 /* We were unbalanced, so reset the balancing interval */
2983 sd->balance_interval = sd->min_interval;
2984 } else {
2985 /*
2986 * If we've begun active balancing, start to back off. This
2987 * case may not be covered by the all_pinned logic if there
2988 * is only 1 task on the busy runqueue (because we don't call
2989 * move_tasks).
2990 */
2991 if (sd->balance_interval < sd->max_interval)
2992 sd->balance_interval *= 2;
2993 }
2994
2995 if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2996 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2997 ld_moved = -1;
2998
2999 goto out;
3000
3001out_balanced:
3002 schedstat_inc(sd, lb_balanced[idle]);
3003
3004 sd->nr_balance_failed = 0;
3005
3006out_one_pinned:
3007 /* tune up the balancing interval */
3008 if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
3009 (sd->balance_interval < sd->max_interval))
3010 sd->balance_interval *= 2;
3011
3012 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3013 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3014 ld_moved = -1;
3015 else
3016 ld_moved = 0;
3017out:
3018 if (ld_moved)
3019 update_shares(sd);
3020 return ld_moved;
3021}
3022
3023/*
3024 * idle_balance is called by schedule() if this_cpu is about to become
3025 * idle. Attempts to pull tasks from other CPUs.
3026 */
3027static void idle_balance(int this_cpu, struct rq *this_rq)
3028{
3029 struct sched_domain *sd;
3030 int pulled_task = 0;
3031 unsigned long next_balance = jiffies + HZ;
3032
3033 this_rq->idle_stamp = this_rq->clock;
3034
3035 if (this_rq->avg_idle < sysctl_sched_migration_cost)
3036 return;
3037
3038 /*
3039 * Drop the rq->lock, but keep IRQ/preempt disabled.
3040 */
3041 raw_spin_unlock(&this_rq->lock);
3042
3043 for_each_domain(this_cpu, sd) {
3044 unsigned long interval;
3045 int balance = 1;
3046
3047 if (!(sd->flags & SD_LOAD_BALANCE))
3048 continue;
3049
3050 if (sd->flags & SD_BALANCE_NEWIDLE) {
3051 /* If we've pulled tasks over stop searching: */
3052 pulled_task = load_balance(this_cpu, this_rq,
3053 sd, CPU_NEWLY_IDLE, &balance);
3054 }
3055
3056 interval = msecs_to_jiffies(sd->balance_interval);
3057 if (time_after(next_balance, sd->last_balance + interval))
3058 next_balance = sd->last_balance + interval;
3059 if (pulled_task) {
3060 this_rq->idle_stamp = 0;
3061 break;
3062 }
3063 }
3064
3065 raw_spin_lock(&this_rq->lock);
3066
3067 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
3068 /*
3069 * We are going idle. next_balance may be set based on
3070 * a busy processor. So reset next_balance.
3071 */
3072 this_rq->next_balance = next_balance;
3073 }
3074}
3075
3076/*
3077 * active_load_balance is run by migration threads. It pushes running tasks
3078 * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
3079 * running on each physical CPU where possible, and avoids physical /
3080 * logical imbalances.
3081 *
3082 * Called with busiest_rq locked.
3083 */
3084static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
3085{
3086 int target_cpu = busiest_rq->push_cpu;
3087 struct sched_domain *sd;
3088 struct rq *target_rq;
3089
3090 /* Is there any task to move? */
3091 if (busiest_rq->nr_running <= 1)
3092 return;
3093
3094 target_rq = cpu_rq(target_cpu);
3095
3096 /*
3097 * This condition is "impossible", if it occurs
3098 * we need to fix it. Originally reported by
3099 * Bjorn Helgaas on a 128-cpu setup.
3100 */
3101 BUG_ON(busiest_rq == target_rq);
3102
3103 /* move a task from busiest_rq to target_rq */
3104 double_lock_balance(busiest_rq, target_rq);
3105 update_rq_clock(busiest_rq);
3106 update_rq_clock(target_rq);
3107
3108 /* Search for an sd spanning us and the target CPU. */
3109 for_each_domain(target_cpu, sd) {
3110 if ((sd->flags & SD_LOAD_BALANCE) &&
3111 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
3112 break;
3113 }
3114
3115 if (likely(sd)) {
3116 schedstat_inc(sd, alb_count);
3117
3118 if (move_one_task(target_rq, target_cpu, busiest_rq,
3119 sd, CPU_IDLE))
3120 schedstat_inc(sd, alb_pushed);
3121 else
3122 schedstat_inc(sd, alb_failed);
3123 }
3124 double_unlock_balance(busiest_rq, target_rq);
3125}
3126
3127#ifdef CONFIG_NO_HZ
3128static struct {
3129 atomic_t load_balancer;
3130 cpumask_var_t cpu_mask;
3131 cpumask_var_t ilb_grp_nohz_mask;
3132} nohz ____cacheline_aligned = {
3133 .load_balancer = ATOMIC_INIT(-1),
3134};
3135
3136int get_nohz_load_balancer(void)
3137{
3138 return atomic_read(&nohz.load_balancer);
3139}
3140
3141#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3142/**
3143 * lowest_flag_domain - Return lowest sched_domain containing flag.
3144 * @cpu: The cpu whose lowest level of sched domain is to
3145 * be returned.
3146 * @flag: The flag to check for the lowest sched_domain
3147 * for the given cpu.
3148 *
3149 * Returns the lowest sched_domain of a cpu which contains the given flag.
3150 */
3151static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
3152{
3153 struct sched_domain *sd;
3154
3155 for_each_domain(cpu, sd)
3156 if (sd && (sd->flags & flag))
3157 break;
3158
3159 return sd;
3160}
3161
3162/**
3163 * for_each_flag_domain - Iterates over sched_domains containing the flag.
3164 * @cpu: The cpu whose domains we're iterating over.
3165 * @sd: variable holding the value of the power_savings_sd
3166 * for cpu.
3167 * @flag: The flag to filter the sched_domains to be iterated.
3168 *
3169 * Iterates over all the scheduler domains for a given cpu that has the 'flag'
3170 * set, starting from the lowest sched_domain to the highest.
3171 */
3172#define for_each_flag_domain(cpu, sd, flag) \
3173 for (sd = lowest_flag_domain(cpu, flag); \
3174 (sd && (sd->flags & flag)); sd = sd->parent)
3175
3176/**
3177 * is_semi_idle_group - Checks if the given sched_group is semi-idle.
3178 * @ilb_group: group to be checked for semi-idleness
3179 *
3180 * Returns: 1 if the group is semi-idle. 0 otherwise.
3181 *
3182 * We define a sched_group to be semi idle if it has atleast one idle-CPU
3183 * and atleast one non-idle CPU. This helper function checks if the given
3184 * sched_group is semi-idle or not.
3185 */
3186static inline int is_semi_idle_group(struct sched_group *ilb_group)
3187{
3188 cpumask_and(nohz.ilb_grp_nohz_mask, nohz.cpu_mask,
3189 sched_group_cpus(ilb_group));
3190
3191 /*
3192 * A sched_group is semi-idle when it has atleast one busy cpu
3193 * and atleast one idle cpu.
3194 */
3195 if (cpumask_empty(nohz.ilb_grp_nohz_mask))
3196 return 0;
3197
3198 if (cpumask_equal(nohz.ilb_grp_nohz_mask, sched_group_cpus(ilb_group)))
3199 return 0;
3200
3201 return 1;
3202}
3203/**
3204 * find_new_ilb - Finds the optimum idle load balancer for nomination.
3205 * @cpu: The cpu which is nominating a new idle_load_balancer.
3206 *
3207 * Returns: Returns the id of the idle load balancer if it exists,
3208 * Else, returns >= nr_cpu_ids.
3209 *
3210 * This algorithm picks the idle load balancer such that it belongs to a
3211 * semi-idle powersavings sched_domain. The idea is to try and avoid
3212 * completely idle packages/cores just for the purpose of idle load balancing
3213 * when there are other idle cpu's which are better suited for that job.
3214 */
3215static int find_new_ilb(int cpu)
3216{
3217 struct sched_domain *sd;
3218 struct sched_group *ilb_group;
3219
3220 /*
3221 * Have idle load balancer selection from semi-idle packages only
3222 * when power-aware load balancing is enabled
3223 */
3224 if (!(sched_smt_power_savings || sched_mc_power_savings))
3225 goto out_done;
3226
3227 /*
3228 * Optimize for the case when we have no idle CPUs or only one
3229 * idle CPU. Don't walk the sched_domain hierarchy in such cases
3230 */
3231 if (cpumask_weight(nohz.cpu_mask) < 2)
3232 goto out_done;
3233
3234 for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
3235 ilb_group = sd->groups;
3236
3237 do {
3238 if (is_semi_idle_group(ilb_group))
3239 return cpumask_first(nohz.ilb_grp_nohz_mask);
3240
3241 ilb_group = ilb_group->next;
3242
3243 } while (ilb_group != sd->groups);
3244 }
3245
3246out_done:
3247 return cpumask_first(nohz.cpu_mask);
3248}
3249#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
3250static inline int find_new_ilb(int call_cpu)
3251{
3252 return cpumask_first(nohz.cpu_mask);
3253}
3254#endif
3255
3256/*
3257 * This routine will try to nominate the ilb (idle load balancing)
3258 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
3259 * load balancing on behalf of all those cpus. If all the cpus in the system
3260 * go into this tickless mode, then there will be no ilb owner (as there is
3261 * no need for one) and all the cpus will sleep till the next wakeup event
3262 * arrives...
3263 *
3264 * For the ilb owner, tick is not stopped. And this tick will be used
3265 * for idle load balancing. ilb owner will still be part of
3266 * nohz.cpu_mask..
3267 *
3268 * While stopping the tick, this cpu will become the ilb owner if there
3269 * is no other owner. And will be the owner till that cpu becomes busy
3270 * or if all cpus in the system stop their ticks at which point
3271 * there is no need for ilb owner.
3272 *
3273 * When the ilb owner becomes busy, it nominates another owner, during the
3274 * next busy scheduler_tick()
3275 */
3276int select_nohz_load_balancer(int stop_tick)
3277{
3278 int cpu = smp_processor_id();
3279
3280 if (stop_tick) {
3281 cpu_rq(cpu)->in_nohz_recently = 1;
3282
3283 if (!cpu_active(cpu)) {
3284 if (atomic_read(&nohz.load_balancer) != cpu)
3285 return 0;
3286
3287 /*
3288 * If we are going offline and still the leader,
3289 * give up!
3290 */
3291 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
3292 BUG();
3293
3294 return 0;
3295 }
3296
3297 cpumask_set_cpu(cpu, nohz.cpu_mask);
3298
3299 /* time for ilb owner also to sleep */
3300 if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) {
3301 if (atomic_read(&nohz.load_balancer) == cpu)
3302 atomic_set(&nohz.load_balancer, -1);
3303 return 0;
3304 }
3305
3306 if (atomic_read(&nohz.load_balancer) == -1) {
3307 /* make me the ilb owner */
3308 if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
3309 return 1;
3310 } else if (atomic_read(&nohz.load_balancer) == cpu) {
3311 int new_ilb;
3312
3313 if (!(sched_smt_power_savings ||
3314 sched_mc_power_savings))
3315 return 1;
3316 /*
3317 * Check to see if there is a more power-efficient
3318 * ilb.
3319 */
3320 new_ilb = find_new_ilb(cpu);
3321 if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
3322 atomic_set(&nohz.load_balancer, -1);
3323 resched_cpu(new_ilb);
3324 return 0;
3325 }
3326 return 1;
3327 }
3328 } else {
3329 if (!cpumask_test_cpu(cpu, nohz.cpu_mask))
3330 return 0;
3331
3332 cpumask_clear_cpu(cpu, nohz.cpu_mask);
3333
3334 if (atomic_read(&nohz.load_balancer) == cpu)
3335 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
3336 BUG();
3337 }
1952 return 0; 3338 return 0;
1953} 3339}
3340#endif
3341
3342static DEFINE_SPINLOCK(balancing);
3343
3344/*
3345 * It checks each scheduling domain to see if it is due to be balanced,
3346 * and initiates a balancing operation if so.
3347 *
3348 * Balancing parameters are set up in arch_init_sched_domains.
3349 */
3350static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3351{
3352 int balance = 1;
3353 struct rq *rq = cpu_rq(cpu);
3354 unsigned long interval;
3355 struct sched_domain *sd;
3356 /* Earliest time when we have to do rebalance again */
3357 unsigned long next_balance = jiffies + 60*HZ;
3358 int update_next_balance = 0;
3359 int need_serialize;
3360
3361 for_each_domain(cpu, sd) {
3362 if (!(sd->flags & SD_LOAD_BALANCE))
3363 continue;
3364
3365 interval = sd->balance_interval;
3366 if (idle != CPU_IDLE)
3367 interval *= sd->busy_factor;
3368
3369 /* scale ms to jiffies */
3370 interval = msecs_to_jiffies(interval);
3371 if (unlikely(!interval))
3372 interval = 1;
3373 if (interval > HZ*NR_CPUS/10)
3374 interval = HZ*NR_CPUS/10;
3375
3376 need_serialize = sd->flags & SD_SERIALIZE;
3377
3378 if (need_serialize) {
3379 if (!spin_trylock(&balancing))
3380 goto out;
3381 }
3382
3383 if (time_after_eq(jiffies, sd->last_balance + interval)) {
3384 if (load_balance(cpu, rq, sd, idle, &balance)) {
3385 /*
3386 * We've pulled tasks over so either we're no
3387 * longer idle, or one of our SMT siblings is
3388 * not idle.
3389 */
3390 idle = CPU_NOT_IDLE;
3391 }
3392 sd->last_balance = jiffies;
3393 }
3394 if (need_serialize)
3395 spin_unlock(&balancing);
3396out:
3397 if (time_after(next_balance, sd->last_balance + interval)) {
3398 next_balance = sd->last_balance + interval;
3399 update_next_balance = 1;
3400 }
3401
3402 /*
3403 * Stop the load balance at this level. There is another
3404 * CPU in our sched group which is doing load balancing more
3405 * actively.
3406 */
3407 if (!balance)
3408 break;
3409 }
3410
3411 /*
3412 * next_balance will be updated only when there is a need.
3413 * When the cpu is attached to null domain for ex, it will not be
3414 * updated.
3415 */
3416 if (likely(update_next_balance))
3417 rq->next_balance = next_balance;
3418}
3419
3420/*
3421 * run_rebalance_domains is triggered when needed from the scheduler tick.
3422 * In CONFIG_NO_HZ case, the idle load balance owner will do the
3423 * rebalancing for all the cpus for whom scheduler ticks are stopped.
3424 */
3425static void run_rebalance_domains(struct softirq_action *h)
3426{
3427 int this_cpu = smp_processor_id();
3428 struct rq *this_rq = cpu_rq(this_cpu);
3429 enum cpu_idle_type idle = this_rq->idle_at_tick ?
3430 CPU_IDLE : CPU_NOT_IDLE;
3431
3432 rebalance_domains(this_cpu, idle);
3433
3434#ifdef CONFIG_NO_HZ
3435 /*
3436 * If this cpu is the owner for idle load balancing, then do the
3437 * balancing on behalf of the other idle cpus whose ticks are
3438 * stopped.
3439 */
3440 if (this_rq->idle_at_tick &&
3441 atomic_read(&nohz.load_balancer) == this_cpu) {
3442 struct rq *rq;
3443 int balance_cpu;
3444
3445 for_each_cpu(balance_cpu, nohz.cpu_mask) {
3446 if (balance_cpu == this_cpu)
3447 continue;
3448
3449 /*
3450 * If this cpu gets work to do, stop the load balancing
3451 * work being done for other cpus. Next load
3452 * balancing owner will pick it up.
3453 */
3454 if (need_resched())
3455 break;
3456
3457 rebalance_domains(balance_cpu, CPU_IDLE);
3458
3459 rq = cpu_rq(balance_cpu);
3460 if (time_after(this_rq->next_balance, rq->next_balance))
3461 this_rq->next_balance = rq->next_balance;
3462 }
3463 }
3464#endif
3465}
3466
3467static inline int on_null_domain(int cpu)
3468{
3469 return !rcu_dereference(cpu_rq(cpu)->sd);
3470}
3471
3472/*
3473 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
3474 *
3475 * In case of CONFIG_NO_HZ, this is the place where we nominate a new
3476 * idle load balancing owner or decide to stop the periodic load balancing,
3477 * if the whole system is idle.
3478 */
3479static inline void trigger_load_balance(struct rq *rq, int cpu)
3480{
3481#ifdef CONFIG_NO_HZ
3482 /*
3483 * If we were in the nohz mode recently and busy at the current
3484 * scheduler tick, then check if we need to nominate new idle
3485 * load balancer.
3486 */
3487 if (rq->in_nohz_recently && !rq->idle_at_tick) {
3488 rq->in_nohz_recently = 0;
3489
3490 if (atomic_read(&nohz.load_balancer) == cpu) {
3491 cpumask_clear_cpu(cpu, nohz.cpu_mask);
3492 atomic_set(&nohz.load_balancer, -1);
3493 }
3494
3495 if (atomic_read(&nohz.load_balancer) == -1) {
3496 int ilb = find_new_ilb(cpu);
3497
3498 if (ilb < nr_cpu_ids)
3499 resched_cpu(ilb);
3500 }
3501 }
3502
3503 /*
3504 * If this cpu is idle and doing idle load balancing for all the
3505 * cpus with ticks stopped, is it time for that to stop?
3506 */
3507 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu &&
3508 cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
3509 resched_cpu(cpu);
3510 return;
3511 }
3512
3513 /*
3514 * If this cpu is idle and the idle load balancing is done by
3515 * someone else, then no need raise the SCHED_SOFTIRQ
3516 */
3517 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu &&
3518 cpumask_test_cpu(cpu, nohz.cpu_mask))
3519 return;
3520#endif
3521 /* Don't need to rebalance while attached to NULL domain */
3522 if (time_after_eq(jiffies, rq->next_balance) &&
3523 likely(!on_null_domain(cpu)))
3524 raise_softirq(SCHED_SOFTIRQ);
3525}
1954 3526
1955static void rq_online_fair(struct rq *rq) 3527static void rq_online_fair(struct rq *rq)
1956{ 3528{
@@ -1962,6 +3534,15 @@ static void rq_offline_fair(struct rq *rq)
1962 update_sysctl(); 3534 update_sysctl();
1963} 3535}
1964 3536
3537#else /* CONFIG_SMP */
3538
3539/*
3540 * on UP we do not need to balance between CPUs:
3541 */
3542static inline void idle_balance(int cpu, struct rq *rq)
3543{
3544}
3545
1965#endif /* CONFIG_SMP */ 3546#endif /* CONFIG_SMP */
1966 3547
1967/* 3548/*
@@ -2076,7 +3657,7 @@ static void moved_group_fair(struct task_struct *p, int on_rq)
2076} 3657}
2077#endif 3658#endif
2078 3659
2079unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) 3660static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
2080{ 3661{
2081 struct sched_entity *se = &task->se; 3662 struct sched_entity *se = &task->se;
2082 unsigned int rr_interval = 0; 3663 unsigned int rr_interval = 0;
@@ -2108,8 +3689,6 @@ static const struct sched_class fair_sched_class = {
2108#ifdef CONFIG_SMP 3689#ifdef CONFIG_SMP
2109 .select_task_rq = select_task_rq_fair, 3690 .select_task_rq = select_task_rq_fair,
2110 3691
2111 .load_balance = load_balance_fair,
2112 .move_one_task = move_one_task_fair,
2113 .rq_online = rq_online_fair, 3692 .rq_online = rq_online_fair,
2114 .rq_offline = rq_offline_fair, 3693 .rq_offline = rq_offline_fair,
2115 3694