diff options
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 660 |
1 files changed, 51 insertions, 609 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 58453b8272fd..9b1f2e533b95 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -633,7 +633,19 @@ void wake_up_nohz_cpu(int cpu) | |||
633 | static inline bool got_nohz_idle_kick(void) | 633 | static inline bool got_nohz_idle_kick(void) |
634 | { | 634 | { |
635 | int cpu = smp_processor_id(); | 635 | int cpu = smp_processor_id(); |
636 | return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); | 636 | |
637 | if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) | ||
638 | return false; | ||
639 | |||
640 | if (idle_cpu(cpu) && !need_resched()) | ||
641 | return true; | ||
642 | |||
643 | /* | ||
644 | * We can't run Idle Load Balance on this CPU for this time so we | ||
645 | * cancel it and clear NOHZ_BALANCE_KICK | ||
646 | */ | ||
647 | clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); | ||
648 | return false; | ||
637 | } | 649 | } |
638 | 650 | ||
639 | #else /* CONFIG_NO_HZ_COMMON */ | 651 | #else /* CONFIG_NO_HZ_COMMON */ |
@@ -667,7 +679,7 @@ void sched_avg_update(struct rq *rq) | |||
667 | { | 679 | { |
668 | s64 period = sched_avg_period(); | 680 | s64 period = sched_avg_period(); |
669 | 681 | ||
670 | while ((s64)(rq->clock - rq->age_stamp) > period) { | 682 | while ((s64)(rq_clock(rq) - rq->age_stamp) > period) { |
671 | /* | 683 | /* |
672 | * Inline assembly required to prevent the compiler | 684 | * Inline assembly required to prevent the compiler |
673 | * optimising this loop into a divmod call. | 685 | * optimising this loop into a divmod call. |
@@ -1328,7 +1340,7 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) | |||
1328 | p->sched_class->task_woken(rq, p); | 1340 | p->sched_class->task_woken(rq, p); |
1329 | 1341 | ||
1330 | if (rq->idle_stamp) { | 1342 | if (rq->idle_stamp) { |
1331 | u64 delta = rq->clock - rq->idle_stamp; | 1343 | u64 delta = rq_clock(rq) - rq->idle_stamp; |
1332 | u64 max = 2*sysctl_sched_migration_cost; | 1344 | u64 max = 2*sysctl_sched_migration_cost; |
1333 | 1345 | ||
1334 | if (delta > max) | 1346 | if (delta > max) |
@@ -1365,6 +1377,8 @@ static int ttwu_remote(struct task_struct *p, int wake_flags) | |||
1365 | 1377 | ||
1366 | rq = __task_rq_lock(p); | 1378 | rq = __task_rq_lock(p); |
1367 | if (p->on_rq) { | 1379 | if (p->on_rq) { |
1380 | /* check_preempt_curr() may use rq clock */ | ||
1381 | update_rq_clock(rq); | ||
1368 | ttwu_do_wakeup(rq, p, wake_flags); | 1382 | ttwu_do_wakeup(rq, p, wake_flags); |
1369 | ret = 1; | 1383 | ret = 1; |
1370 | } | 1384 | } |
@@ -1393,8 +1407,9 @@ static void sched_ttwu_pending(void) | |||
1393 | 1407 | ||
1394 | void scheduler_ipi(void) | 1408 | void scheduler_ipi(void) |
1395 | { | 1409 | { |
1396 | if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick() | 1410 | if (llist_empty(&this_rq()->wake_list) |
1397 | && !tick_nohz_full_cpu(smp_processor_id())) | 1411 | && !tick_nohz_full_cpu(smp_processor_id()) |
1412 | && !got_nohz_idle_kick()) | ||
1398 | return; | 1413 | return; |
1399 | 1414 | ||
1400 | /* | 1415 | /* |
@@ -1417,7 +1432,7 @@ void scheduler_ipi(void) | |||
1417 | /* | 1432 | /* |
1418 | * Check if someone kicked us for doing the nohz idle load balance. | 1433 | * Check if someone kicked us for doing the nohz idle load balance. |
1419 | */ | 1434 | */ |
1420 | if (unlikely(got_nohz_idle_kick() && !need_resched())) { | 1435 | if (unlikely(got_nohz_idle_kick())) { |
1421 | this_rq()->idle_balance = 1; | 1436 | this_rq()->idle_balance = 1; |
1422 | raise_softirq_irqoff(SCHED_SOFTIRQ); | 1437 | raise_softirq_irqoff(SCHED_SOFTIRQ); |
1423 | } | 1438 | } |
@@ -1596,15 +1611,6 @@ static void __sched_fork(struct task_struct *p) | |||
1596 | p->se.vruntime = 0; | 1611 | p->se.vruntime = 0; |
1597 | INIT_LIST_HEAD(&p->se.group_node); | 1612 | INIT_LIST_HEAD(&p->se.group_node); |
1598 | 1613 | ||
1599 | /* | ||
1600 | * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be | ||
1601 | * removed when useful for applications beyond shares distribution (e.g. | ||
1602 | * load-balance). | ||
1603 | */ | ||
1604 | #if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED) | ||
1605 | p->se.avg.runnable_avg_period = 0; | ||
1606 | p->se.avg.runnable_avg_sum = 0; | ||
1607 | #endif | ||
1608 | #ifdef CONFIG_SCHEDSTATS | 1614 | #ifdef CONFIG_SCHEDSTATS |
1609 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); | 1615 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); |
1610 | #endif | 1616 | #endif |
@@ -1748,6 +1754,8 @@ void wake_up_new_task(struct task_struct *p) | |||
1748 | set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0)); | 1754 | set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0)); |
1749 | #endif | 1755 | #endif |
1750 | 1756 | ||
1757 | /* Initialize new task's runnable average */ | ||
1758 | init_task_runnable_average(p); | ||
1751 | rq = __task_rq_lock(p); | 1759 | rq = __task_rq_lock(p); |
1752 | activate_task(rq, p, 0); | 1760 | activate_task(rq, p, 0); |
1753 | p->on_rq = 1; | 1761 | p->on_rq = 1; |
@@ -2056,575 +2064,6 @@ unsigned long nr_iowait_cpu(int cpu) | |||
2056 | return atomic_read(&this->nr_iowait); | 2064 | return atomic_read(&this->nr_iowait); |
2057 | } | 2065 | } |
2058 | 2066 | ||
2059 | unsigned long this_cpu_load(void) | ||
2060 | { | ||
2061 | struct rq *this = this_rq(); | ||
2062 | return this->cpu_load[0]; | ||
2063 | } | ||
2064 | |||
2065 | |||
2066 | /* | ||
2067 | * Global load-average calculations | ||
2068 | * | ||
2069 | * We take a distributed and async approach to calculating the global load-avg | ||
2070 | * in order to minimize overhead. | ||
2071 | * | ||
2072 | * The global load average is an exponentially decaying average of nr_running + | ||
2073 | * nr_uninterruptible. | ||
2074 | * | ||
2075 | * Once every LOAD_FREQ: | ||
2076 | * | ||
2077 | * nr_active = 0; | ||
2078 | * for_each_possible_cpu(cpu) | ||
2079 | * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible; | ||
2080 | * | ||
2081 | * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n) | ||
2082 | * | ||
2083 | * Due to a number of reasons the above turns in the mess below: | ||
2084 | * | ||
2085 | * - for_each_possible_cpu() is prohibitively expensive on machines with | ||
2086 | * serious number of cpus, therefore we need to take a distributed approach | ||
2087 | * to calculating nr_active. | ||
2088 | * | ||
2089 | * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0 | ||
2090 | * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) } | ||
2091 | * | ||
2092 | * So assuming nr_active := 0 when we start out -- true per definition, we | ||
2093 | * can simply take per-cpu deltas and fold those into a global accumulate | ||
2094 | * to obtain the same result. See calc_load_fold_active(). | ||
2095 | * | ||
2096 | * Furthermore, in order to avoid synchronizing all per-cpu delta folding | ||
2097 | * across the machine, we assume 10 ticks is sufficient time for every | ||
2098 | * cpu to have completed this task. | ||
2099 | * | ||
2100 | * This places an upper-bound on the IRQ-off latency of the machine. Then | ||
2101 | * again, being late doesn't loose the delta, just wrecks the sample. | ||
2102 | * | ||
2103 | * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because | ||
2104 | * this would add another cross-cpu cacheline miss and atomic operation | ||
2105 | * to the wakeup path. Instead we increment on whatever cpu the task ran | ||
2106 | * when it went into uninterruptible state and decrement on whatever cpu | ||
2107 | * did the wakeup. This means that only the sum of nr_uninterruptible over | ||
2108 | * all cpus yields the correct result. | ||
2109 | * | ||
2110 | * This covers the NO_HZ=n code, for extra head-aches, see the comment below. | ||
2111 | */ | ||
2112 | |||
2113 | /* Variables and functions for calc_load */ | ||
2114 | static atomic_long_t calc_load_tasks; | ||
2115 | static unsigned long calc_load_update; | ||
2116 | unsigned long avenrun[3]; | ||
2117 | EXPORT_SYMBOL(avenrun); /* should be removed */ | ||
2118 | |||
2119 | /** | ||
2120 | * get_avenrun - get the load average array | ||
2121 | * @loads: pointer to dest load array | ||
2122 | * @offset: offset to add | ||
2123 | * @shift: shift count to shift the result left | ||
2124 | * | ||
2125 | * These values are estimates at best, so no need for locking. | ||
2126 | */ | ||
2127 | void get_avenrun(unsigned long *loads, unsigned long offset, int shift) | ||
2128 | { | ||
2129 | loads[0] = (avenrun[0] + offset) << shift; | ||
2130 | loads[1] = (avenrun[1] + offset) << shift; | ||
2131 | loads[2] = (avenrun[2] + offset) << shift; | ||
2132 | } | ||
2133 | |||
2134 | static long calc_load_fold_active(struct rq *this_rq) | ||
2135 | { | ||
2136 | long nr_active, delta = 0; | ||
2137 | |||
2138 | nr_active = this_rq->nr_running; | ||
2139 | nr_active += (long) this_rq->nr_uninterruptible; | ||
2140 | |||
2141 | if (nr_active != this_rq->calc_load_active) { | ||
2142 | delta = nr_active - this_rq->calc_load_active; | ||
2143 | this_rq->calc_load_active = nr_active; | ||
2144 | } | ||
2145 | |||
2146 | return delta; | ||
2147 | } | ||
2148 | |||
2149 | /* | ||
2150 | * a1 = a0 * e + a * (1 - e) | ||
2151 | */ | ||
2152 | static unsigned long | ||
2153 | calc_load(unsigned long load, unsigned long exp, unsigned long active) | ||
2154 | { | ||
2155 | load *= exp; | ||
2156 | load += active * (FIXED_1 - exp); | ||
2157 | load += 1UL << (FSHIFT - 1); | ||
2158 | return load >> FSHIFT; | ||
2159 | } | ||
2160 | |||
2161 | #ifdef CONFIG_NO_HZ_COMMON | ||
2162 | /* | ||
2163 | * Handle NO_HZ for the global load-average. | ||
2164 | * | ||
2165 | * Since the above described distributed algorithm to compute the global | ||
2166 | * load-average relies on per-cpu sampling from the tick, it is affected by | ||
2167 | * NO_HZ. | ||
2168 | * | ||
2169 | * The basic idea is to fold the nr_active delta into a global idle-delta upon | ||
2170 | * entering NO_HZ state such that we can include this as an 'extra' cpu delta | ||
2171 | * when we read the global state. | ||
2172 | * | ||
2173 | * Obviously reality has to ruin such a delightfully simple scheme: | ||
2174 | * | ||
2175 | * - When we go NO_HZ idle during the window, we can negate our sample | ||
2176 | * contribution, causing under-accounting. | ||
2177 | * | ||
2178 | * We avoid this by keeping two idle-delta counters and flipping them | ||
2179 | * when the window starts, thus separating old and new NO_HZ load. | ||
2180 | * | ||
2181 | * The only trick is the slight shift in index flip for read vs write. | ||
2182 | * | ||
2183 | * 0s 5s 10s 15s | ||
2184 | * +10 +10 +10 +10 | ||
2185 | * |-|-----------|-|-----------|-|-----------|-| | ||
2186 | * r:0 0 1 1 0 0 1 1 0 | ||
2187 | * w:0 1 1 0 0 1 1 0 0 | ||
2188 | * | ||
2189 | * This ensures we'll fold the old idle contribution in this window while | ||
2190 | * accumlating the new one. | ||
2191 | * | ||
2192 | * - When we wake up from NO_HZ idle during the window, we push up our | ||
2193 | * contribution, since we effectively move our sample point to a known | ||
2194 | * busy state. | ||
2195 | * | ||
2196 | * This is solved by pushing the window forward, and thus skipping the | ||
2197 | * sample, for this cpu (effectively using the idle-delta for this cpu which | ||
2198 | * was in effect at the time the window opened). This also solves the issue | ||
2199 | * of having to deal with a cpu having been in NOHZ idle for multiple | ||
2200 | * LOAD_FREQ intervals. | ||
2201 | * | ||
2202 | * When making the ILB scale, we should try to pull this in as well. | ||
2203 | */ | ||
2204 | static atomic_long_t calc_load_idle[2]; | ||
2205 | static int calc_load_idx; | ||
2206 | |||
2207 | static inline int calc_load_write_idx(void) | ||
2208 | { | ||
2209 | int idx = calc_load_idx; | ||
2210 | |||
2211 | /* | ||
2212 | * See calc_global_nohz(), if we observe the new index, we also | ||
2213 | * need to observe the new update time. | ||
2214 | */ | ||
2215 | smp_rmb(); | ||
2216 | |||
2217 | /* | ||
2218 | * If the folding window started, make sure we start writing in the | ||
2219 | * next idle-delta. | ||
2220 | */ | ||
2221 | if (!time_before(jiffies, calc_load_update)) | ||
2222 | idx++; | ||
2223 | |||
2224 | return idx & 1; | ||
2225 | } | ||
2226 | |||
2227 | static inline int calc_load_read_idx(void) | ||
2228 | { | ||
2229 | return calc_load_idx & 1; | ||
2230 | } | ||
2231 | |||
2232 | void calc_load_enter_idle(void) | ||
2233 | { | ||
2234 | struct rq *this_rq = this_rq(); | ||
2235 | long delta; | ||
2236 | |||
2237 | /* | ||
2238 | * We're going into NOHZ mode, if there's any pending delta, fold it | ||
2239 | * into the pending idle delta. | ||
2240 | */ | ||
2241 | delta = calc_load_fold_active(this_rq); | ||
2242 | if (delta) { | ||
2243 | int idx = calc_load_write_idx(); | ||
2244 | atomic_long_add(delta, &calc_load_idle[idx]); | ||
2245 | } | ||
2246 | } | ||
2247 | |||
2248 | void calc_load_exit_idle(void) | ||
2249 | { | ||
2250 | struct rq *this_rq = this_rq(); | ||
2251 | |||
2252 | /* | ||
2253 | * If we're still before the sample window, we're done. | ||
2254 | */ | ||
2255 | if (time_before(jiffies, this_rq->calc_load_update)) | ||
2256 | return; | ||
2257 | |||
2258 | /* | ||
2259 | * We woke inside or after the sample window, this means we're already | ||
2260 | * accounted through the nohz accounting, so skip the entire deal and | ||
2261 | * sync up for the next window. | ||
2262 | */ | ||
2263 | this_rq->calc_load_update = calc_load_update; | ||
2264 | if (time_before(jiffies, this_rq->calc_load_update + 10)) | ||
2265 | this_rq->calc_load_update += LOAD_FREQ; | ||
2266 | } | ||
2267 | |||
2268 | static long calc_load_fold_idle(void) | ||
2269 | { | ||
2270 | int idx = calc_load_read_idx(); | ||
2271 | long delta = 0; | ||
2272 | |||
2273 | if (atomic_long_read(&calc_load_idle[idx])) | ||
2274 | delta = atomic_long_xchg(&calc_load_idle[idx], 0); | ||
2275 | |||
2276 | return delta; | ||
2277 | } | ||
2278 | |||
2279 | /** | ||
2280 | * fixed_power_int - compute: x^n, in O(log n) time | ||
2281 | * | ||
2282 | * @x: base of the power | ||
2283 | * @frac_bits: fractional bits of @x | ||
2284 | * @n: power to raise @x to. | ||
2285 | * | ||
2286 | * By exploiting the relation between the definition of the natural power | ||
2287 | * function: x^n := x*x*...*x (x multiplied by itself for n times), and | ||
2288 | * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i, | ||
2289 | * (where: n_i \elem {0, 1}, the binary vector representing n), | ||
2290 | * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is | ||
2291 | * of course trivially computable in O(log_2 n), the length of our binary | ||
2292 | * vector. | ||
2293 | */ | ||
2294 | static unsigned long | ||
2295 | fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n) | ||
2296 | { | ||
2297 | unsigned long result = 1UL << frac_bits; | ||
2298 | |||
2299 | if (n) for (;;) { | ||
2300 | if (n & 1) { | ||
2301 | result *= x; | ||
2302 | result += 1UL << (frac_bits - 1); | ||
2303 | result >>= frac_bits; | ||
2304 | } | ||
2305 | n >>= 1; | ||
2306 | if (!n) | ||
2307 | break; | ||
2308 | x *= x; | ||
2309 | x += 1UL << (frac_bits - 1); | ||
2310 | x >>= frac_bits; | ||
2311 | } | ||
2312 | |||
2313 | return result; | ||
2314 | } | ||
2315 | |||
2316 | /* | ||
2317 | * a1 = a0 * e + a * (1 - e) | ||
2318 | * | ||
2319 | * a2 = a1 * e + a * (1 - e) | ||
2320 | * = (a0 * e + a * (1 - e)) * e + a * (1 - e) | ||
2321 | * = a0 * e^2 + a * (1 - e) * (1 + e) | ||
2322 | * | ||
2323 | * a3 = a2 * e + a * (1 - e) | ||
2324 | * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e) | ||
2325 | * = a0 * e^3 + a * (1 - e) * (1 + e + e^2) | ||
2326 | * | ||
2327 | * ... | ||
2328 | * | ||
2329 | * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1] | ||
2330 | * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e) | ||
2331 | * = a0 * e^n + a * (1 - e^n) | ||
2332 | * | ||
2333 | * [1] application of the geometric series: | ||
2334 | * | ||
2335 | * n 1 - x^(n+1) | ||
2336 | * S_n := \Sum x^i = ------------- | ||
2337 | * i=0 1 - x | ||
2338 | */ | ||
2339 | static unsigned long | ||
2340 | calc_load_n(unsigned long load, unsigned long exp, | ||
2341 | unsigned long active, unsigned int n) | ||
2342 | { | ||
2343 | |||
2344 | return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); | ||
2345 | } | ||
2346 | |||
2347 | /* | ||
2348 | * NO_HZ can leave us missing all per-cpu ticks calling | ||
2349 | * calc_load_account_active(), but since an idle CPU folds its delta into | ||
2350 | * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold | ||
2351 | * in the pending idle delta if our idle period crossed a load cycle boundary. | ||
2352 | * | ||
2353 | * Once we've updated the global active value, we need to apply the exponential | ||
2354 | * weights adjusted to the number of cycles missed. | ||
2355 | */ | ||
2356 | static void calc_global_nohz(void) | ||
2357 | { | ||
2358 | long delta, active, n; | ||
2359 | |||
2360 | if (!time_before(jiffies, calc_load_update + 10)) { | ||
2361 | /* | ||
2362 | * Catch-up, fold however many we are behind still | ||
2363 | */ | ||
2364 | delta = jiffies - calc_load_update - 10; | ||
2365 | n = 1 + (delta / LOAD_FREQ); | ||
2366 | |||
2367 | active = atomic_long_read(&calc_load_tasks); | ||
2368 | active = active > 0 ? active * FIXED_1 : 0; | ||
2369 | |||
2370 | avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); | ||
2371 | avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); | ||
2372 | avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); | ||
2373 | |||
2374 | calc_load_update += n * LOAD_FREQ; | ||
2375 | } | ||
2376 | |||
2377 | /* | ||
2378 | * Flip the idle index... | ||
2379 | * | ||
2380 | * Make sure we first write the new time then flip the index, so that | ||
2381 | * calc_load_write_idx() will see the new time when it reads the new | ||
2382 | * index, this avoids a double flip messing things up. | ||
2383 | */ | ||
2384 | smp_wmb(); | ||
2385 | calc_load_idx++; | ||
2386 | } | ||
2387 | #else /* !CONFIG_NO_HZ_COMMON */ | ||
2388 | |||
2389 | static inline long calc_load_fold_idle(void) { return 0; } | ||
2390 | static inline void calc_global_nohz(void) { } | ||
2391 | |||
2392 | #endif /* CONFIG_NO_HZ_COMMON */ | ||
2393 | |||
2394 | /* | ||
2395 | * calc_load - update the avenrun load estimates 10 ticks after the | ||
2396 | * CPUs have updated calc_load_tasks. | ||
2397 | */ | ||
2398 | void calc_global_load(unsigned long ticks) | ||
2399 | { | ||
2400 | long active, delta; | ||
2401 | |||
2402 | if (time_before(jiffies, calc_load_update + 10)) | ||
2403 | return; | ||
2404 | |||
2405 | /* | ||
2406 | * Fold the 'old' idle-delta to include all NO_HZ cpus. | ||
2407 | */ | ||
2408 | delta = calc_load_fold_idle(); | ||
2409 | if (delta) | ||
2410 | atomic_long_add(delta, &calc_load_tasks); | ||
2411 | |||
2412 | active = atomic_long_read(&calc_load_tasks); | ||
2413 | active = active > 0 ? active * FIXED_1 : 0; | ||
2414 | |||
2415 | avenrun[0] = calc_load(avenrun[0], EXP_1, active); | ||
2416 | avenrun[1] = calc_load(avenrun[1], EXP_5, active); | ||
2417 | avenrun[2] = calc_load(avenrun[2], EXP_15, active); | ||
2418 | |||
2419 | calc_load_update += LOAD_FREQ; | ||
2420 | |||
2421 | /* | ||
2422 | * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk. | ||
2423 | */ | ||
2424 | calc_global_nohz(); | ||
2425 | } | ||
2426 | |||
2427 | /* | ||
2428 | * Called from update_cpu_load() to periodically update this CPU's | ||
2429 | * active count. | ||
2430 | */ | ||
2431 | static void calc_load_account_active(struct rq *this_rq) | ||
2432 | { | ||
2433 | long delta; | ||
2434 | |||
2435 | if (time_before(jiffies, this_rq->calc_load_update)) | ||
2436 | return; | ||
2437 | |||
2438 | delta = calc_load_fold_active(this_rq); | ||
2439 | if (delta) | ||
2440 | atomic_long_add(delta, &calc_load_tasks); | ||
2441 | |||
2442 | this_rq->calc_load_update += LOAD_FREQ; | ||
2443 | } | ||
2444 | |||
2445 | /* | ||
2446 | * End of global load-average stuff | ||
2447 | */ | ||
2448 | |||
2449 | /* | ||
2450 | * The exact cpuload at various idx values, calculated at every tick would be | ||
2451 | * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load | ||
2452 | * | ||
2453 | * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called | ||
2454 | * on nth tick when cpu may be busy, then we have: | ||
2455 | * load = ((2^idx - 1) / 2^idx)^(n-1) * load | ||
2456 | * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load | ||
2457 | * | ||
2458 | * decay_load_missed() below does efficient calculation of | ||
2459 | * load = ((2^idx - 1) / 2^idx)^(n-1) * load | ||
2460 | * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load | ||
2461 | * | ||
2462 | * The calculation is approximated on a 128 point scale. | ||
2463 | * degrade_zero_ticks is the number of ticks after which load at any | ||
2464 | * particular idx is approximated to be zero. | ||
2465 | * degrade_factor is a precomputed table, a row for each load idx. | ||
2466 | * Each column corresponds to degradation factor for a power of two ticks, | ||
2467 | * based on 128 point scale. | ||
2468 | * Example: | ||
2469 | * row 2, col 3 (=12) says that the degradation at load idx 2 after | ||
2470 | * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8). | ||
2471 | * | ||
2472 | * With this power of 2 load factors, we can degrade the load n times | ||
2473 | * by looking at 1 bits in n and doing as many mult/shift instead of | ||
2474 | * n mult/shifts needed by the exact degradation. | ||
2475 | */ | ||
2476 | #define DEGRADE_SHIFT 7 | ||
2477 | static const unsigned char | ||
2478 | degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128}; | ||
2479 | static const unsigned char | ||
2480 | degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = { | ||
2481 | {0, 0, 0, 0, 0, 0, 0, 0}, | ||
2482 | {64, 32, 8, 0, 0, 0, 0, 0}, | ||
2483 | {96, 72, 40, 12, 1, 0, 0}, | ||
2484 | {112, 98, 75, 43, 15, 1, 0}, | ||
2485 | {120, 112, 98, 76, 45, 16, 2} }; | ||
2486 | |||
2487 | /* | ||
2488 | * Update cpu_load for any missed ticks, due to tickless idle. The backlog | ||
2489 | * would be when CPU is idle and so we just decay the old load without | ||
2490 | * adding any new load. | ||
2491 | */ | ||
2492 | static unsigned long | ||
2493 | decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) | ||
2494 | { | ||
2495 | int j = 0; | ||
2496 | |||
2497 | if (!missed_updates) | ||
2498 | return load; | ||
2499 | |||
2500 | if (missed_updates >= degrade_zero_ticks[idx]) | ||
2501 | return 0; | ||
2502 | |||
2503 | if (idx == 1) | ||
2504 | return load >> missed_updates; | ||
2505 | |||
2506 | while (missed_updates) { | ||
2507 | if (missed_updates % 2) | ||
2508 | load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT; | ||
2509 | |||
2510 | missed_updates >>= 1; | ||
2511 | j++; | ||
2512 | } | ||
2513 | return load; | ||
2514 | } | ||
2515 | |||
2516 | /* | ||
2517 | * Update rq->cpu_load[] statistics. This function is usually called every | ||
2518 | * scheduler tick (TICK_NSEC). With tickless idle this will not be called | ||
2519 | * every tick. We fix it up based on jiffies. | ||
2520 | */ | ||
2521 | static void __update_cpu_load(struct rq *this_rq, unsigned long this_load, | ||
2522 | unsigned long pending_updates) | ||
2523 | { | ||
2524 | int i, scale; | ||
2525 | |||
2526 | this_rq->nr_load_updates++; | ||
2527 | |||
2528 | /* Update our load: */ | ||
2529 | this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ | ||
2530 | for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { | ||
2531 | unsigned long old_load, new_load; | ||
2532 | |||
2533 | /* scale is effectively 1 << i now, and >> i divides by scale */ | ||
2534 | |||
2535 | old_load = this_rq->cpu_load[i]; | ||
2536 | old_load = decay_load_missed(old_load, pending_updates - 1, i); | ||
2537 | new_load = this_load; | ||
2538 | /* | ||
2539 | * Round up the averaging division if load is increasing. This | ||
2540 | * prevents us from getting stuck on 9 if the load is 10, for | ||
2541 | * example. | ||
2542 | */ | ||
2543 | if (new_load > old_load) | ||
2544 | new_load += scale - 1; | ||
2545 | |||
2546 | this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; | ||
2547 | } | ||
2548 | |||
2549 | sched_avg_update(this_rq); | ||
2550 | } | ||
2551 | |||
2552 | #ifdef CONFIG_NO_HZ_COMMON | ||
2553 | /* | ||
2554 | * There is no sane way to deal with nohz on smp when using jiffies because the | ||
2555 | * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading | ||
2556 | * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}. | ||
2557 | * | ||
2558 | * Therefore we cannot use the delta approach from the regular tick since that | ||
2559 | * would seriously skew the load calculation. However we'll make do for those | ||
2560 | * updates happening while idle (nohz_idle_balance) or coming out of idle | ||
2561 | * (tick_nohz_idle_exit). | ||
2562 | * | ||
2563 | * This means we might still be one tick off for nohz periods. | ||
2564 | */ | ||
2565 | |||
2566 | /* | ||
2567 | * Called from nohz_idle_balance() to update the load ratings before doing the | ||
2568 | * idle balance. | ||
2569 | */ | ||
2570 | void update_idle_cpu_load(struct rq *this_rq) | ||
2571 | { | ||
2572 | unsigned long curr_jiffies = ACCESS_ONCE(jiffies); | ||
2573 | unsigned long load = this_rq->load.weight; | ||
2574 | unsigned long pending_updates; | ||
2575 | |||
2576 | /* | ||
2577 | * bail if there's load or we're actually up-to-date. | ||
2578 | */ | ||
2579 | if (load || curr_jiffies == this_rq->last_load_update_tick) | ||
2580 | return; | ||
2581 | |||
2582 | pending_updates = curr_jiffies - this_rq->last_load_update_tick; | ||
2583 | this_rq->last_load_update_tick = curr_jiffies; | ||
2584 | |||
2585 | __update_cpu_load(this_rq, load, pending_updates); | ||
2586 | } | ||
2587 | |||
2588 | /* | ||
2589 | * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed. | ||
2590 | */ | ||
2591 | void update_cpu_load_nohz(void) | ||
2592 | { | ||
2593 | struct rq *this_rq = this_rq(); | ||
2594 | unsigned long curr_jiffies = ACCESS_ONCE(jiffies); | ||
2595 | unsigned long pending_updates; | ||
2596 | |||
2597 | if (curr_jiffies == this_rq->last_load_update_tick) | ||
2598 | return; | ||
2599 | |||
2600 | raw_spin_lock(&this_rq->lock); | ||
2601 | pending_updates = curr_jiffies - this_rq->last_load_update_tick; | ||
2602 | if (pending_updates) { | ||
2603 | this_rq->last_load_update_tick = curr_jiffies; | ||
2604 | /* | ||
2605 | * We were idle, this means load 0, the current load might be | ||
2606 | * !0 due to remote wakeups and the sort. | ||
2607 | */ | ||
2608 | __update_cpu_load(this_rq, 0, pending_updates); | ||
2609 | } | ||
2610 | raw_spin_unlock(&this_rq->lock); | ||
2611 | } | ||
2612 | #endif /* CONFIG_NO_HZ_COMMON */ | ||
2613 | |||
2614 | /* | ||
2615 | * Called from scheduler_tick() | ||
2616 | */ | ||
2617 | static void update_cpu_load_active(struct rq *this_rq) | ||
2618 | { | ||
2619 | /* | ||
2620 | * See the mess around update_idle_cpu_load() / update_cpu_load_nohz(). | ||
2621 | */ | ||
2622 | this_rq->last_load_update_tick = jiffies; | ||
2623 | __update_cpu_load(this_rq, this_rq->load.weight, 1); | ||
2624 | |||
2625 | calc_load_account_active(this_rq); | ||
2626 | } | ||
2627 | |||
2628 | #ifdef CONFIG_SMP | 2067 | #ifdef CONFIG_SMP |
2629 | 2068 | ||
2630 | /* | 2069 | /* |
@@ -2673,7 +2112,7 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) | |||
2673 | 2112 | ||
2674 | if (task_current(rq, p)) { | 2113 | if (task_current(rq, p)) { |
2675 | update_rq_clock(rq); | 2114 | update_rq_clock(rq); |
2676 | ns = rq->clock_task - p->se.exec_start; | 2115 | ns = rq_clock_task(rq) - p->se.exec_start; |
2677 | if ((s64)ns < 0) | 2116 | if ((s64)ns < 0) |
2678 | ns = 0; | 2117 | ns = 0; |
2679 | } | 2118 | } |
@@ -2726,8 +2165,8 @@ void scheduler_tick(void) | |||
2726 | 2165 | ||
2727 | raw_spin_lock(&rq->lock); | 2166 | raw_spin_lock(&rq->lock); |
2728 | update_rq_clock(rq); | 2167 | update_rq_clock(rq); |
2729 | update_cpu_load_active(rq); | ||
2730 | curr->sched_class->task_tick(rq, curr, 0); | 2168 | curr->sched_class->task_tick(rq, curr, 0); |
2169 | update_cpu_load_active(rq); | ||
2731 | raw_spin_unlock(&rq->lock); | 2170 | raw_spin_unlock(&rq->lock); |
2732 | 2171 | ||
2733 | perf_event_task_tick(); | 2172 | perf_event_task_tick(); |
@@ -4745,7 +4184,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
4745 | */ | 4184 | */ |
4746 | idle->sched_class = &idle_sched_class; | 4185 | idle->sched_class = &idle_sched_class; |
4747 | ftrace_graph_init_idle_task(idle, cpu); | 4186 | ftrace_graph_init_idle_task(idle, cpu); |
4748 | vtime_init_idle(idle); | 4187 | vtime_init_idle(idle, cpu); |
4749 | #if defined(CONFIG_SMP) | 4188 | #if defined(CONFIG_SMP) |
4750 | sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); | 4189 | sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); |
4751 | #endif | 4190 | #endif |
@@ -4947,6 +4386,13 @@ static void migrate_tasks(unsigned int dead_cpu) | |||
4947 | */ | 4386 | */ |
4948 | rq->stop = NULL; | 4387 | rq->stop = NULL; |
4949 | 4388 | ||
4389 | /* | ||
4390 | * put_prev_task() and pick_next_task() sched | ||
4391 | * class method both need to have an up-to-date | ||
4392 | * value of rq->clock[_task] | ||
4393 | */ | ||
4394 | update_rq_clock(rq); | ||
4395 | |||
4950 | for ( ; ; ) { | 4396 | for ( ; ; ) { |
4951 | /* | 4397 | /* |
4952 | * There's this thread running, bail when that's the only | 4398 | * There's this thread running, bail when that's the only |
@@ -5080,7 +4526,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) | |||
5080 | return table; | 4526 | return table; |
5081 | } | 4527 | } |
5082 | 4528 | ||
5083 | static ctl_table *sd_alloc_ctl_cpu_table(int cpu) | 4529 | static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) |
5084 | { | 4530 | { |
5085 | struct ctl_table *entry, *table; | 4531 | struct ctl_table *entry, *table; |
5086 | struct sched_domain *sd; | 4532 | struct sched_domain *sd; |
@@ -5894,7 +5340,7 @@ build_sched_groups(struct sched_domain *sd, int cpu) | |||
5894 | get_group(cpu, sdd, &sd->groups); | 5340 | get_group(cpu, sdd, &sd->groups); |
5895 | atomic_inc(&sd->groups->ref); | 5341 | atomic_inc(&sd->groups->ref); |
5896 | 5342 | ||
5897 | if (cpu != cpumask_first(sched_domain_span(sd))) | 5343 | if (cpu != cpumask_first(span)) |
5898 | return 0; | 5344 | return 0; |
5899 | 5345 | ||
5900 | lockdep_assert_held(&sched_domains_mutex); | 5346 | lockdep_assert_held(&sched_domains_mutex); |
@@ -5904,12 +5350,12 @@ build_sched_groups(struct sched_domain *sd, int cpu) | |||
5904 | 5350 | ||
5905 | for_each_cpu(i, span) { | 5351 | for_each_cpu(i, span) { |
5906 | struct sched_group *sg; | 5352 | struct sched_group *sg; |
5907 | int group = get_group(i, sdd, &sg); | 5353 | int group, j; |
5908 | int j; | ||
5909 | 5354 | ||
5910 | if (cpumask_test_cpu(i, covered)) | 5355 | if (cpumask_test_cpu(i, covered)) |
5911 | continue; | 5356 | continue; |
5912 | 5357 | ||
5358 | group = get_group(i, sdd, &sg); | ||
5913 | cpumask_clear(sched_group_cpus(sg)); | 5359 | cpumask_clear(sched_group_cpus(sg)); |
5914 | sg->sgp->power = 0; | 5360 | sg->sgp->power = 0; |
5915 | cpumask_setall(sched_group_mask(sg)); | 5361 | cpumask_setall(sched_group_mask(sg)); |
@@ -5947,7 +5393,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
5947 | { | 5393 | { |
5948 | struct sched_group *sg = sd->groups; | 5394 | struct sched_group *sg = sd->groups; |
5949 | 5395 | ||
5950 | WARN_ON(!sd || !sg); | 5396 | WARN_ON(!sg); |
5951 | 5397 | ||
5952 | do { | 5398 | do { |
5953 | sg->group_weight = cpumask_weight(sched_group_cpus(sg)); | 5399 | sg->group_weight = cpumask_weight(sched_group_cpus(sg)); |
@@ -6112,6 +5558,9 @@ static struct sched_domain_topology_level default_topology[] = { | |||
6112 | 5558 | ||
6113 | static struct sched_domain_topology_level *sched_domain_topology = default_topology; | 5559 | static struct sched_domain_topology_level *sched_domain_topology = default_topology; |
6114 | 5560 | ||
5561 | #define for_each_sd_topology(tl) \ | ||
5562 | for (tl = sched_domain_topology; tl->init; tl++) | ||
5563 | |||
6115 | #ifdef CONFIG_NUMA | 5564 | #ifdef CONFIG_NUMA |
6116 | 5565 | ||
6117 | static int sched_domains_numa_levels; | 5566 | static int sched_domains_numa_levels; |
@@ -6409,7 +5858,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map) | |||
6409 | struct sched_domain_topology_level *tl; | 5858 | struct sched_domain_topology_level *tl; |
6410 | int j; | 5859 | int j; |
6411 | 5860 | ||
6412 | for (tl = sched_domain_topology; tl->init; tl++) { | 5861 | for_each_sd_topology(tl) { |
6413 | struct sd_data *sdd = &tl->data; | 5862 | struct sd_data *sdd = &tl->data; |
6414 | 5863 | ||
6415 | sdd->sd = alloc_percpu(struct sched_domain *); | 5864 | sdd->sd = alloc_percpu(struct sched_domain *); |
@@ -6462,7 +5911,7 @@ static void __sdt_free(const struct cpumask *cpu_map) | |||
6462 | struct sched_domain_topology_level *tl; | 5911 | struct sched_domain_topology_level *tl; |
6463 | int j; | 5912 | int j; |
6464 | 5913 | ||
6465 | for (tl = sched_domain_topology; tl->init; tl++) { | 5914 | for_each_sd_topology(tl) { |
6466 | struct sd_data *sdd = &tl->data; | 5915 | struct sd_data *sdd = &tl->data; |
6467 | 5916 | ||
6468 | for_each_cpu(j, cpu_map) { | 5917 | for_each_cpu(j, cpu_map) { |
@@ -6490,9 +5939,8 @@ static void __sdt_free(const struct cpumask *cpu_map) | |||
6490 | } | 5939 | } |
6491 | 5940 | ||
6492 | struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, | 5941 | struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, |
6493 | struct s_data *d, const struct cpumask *cpu_map, | 5942 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, |
6494 | struct sched_domain_attr *attr, struct sched_domain *child, | 5943 | struct sched_domain *child, int cpu) |
6495 | int cpu) | ||
6496 | { | 5944 | { |
6497 | struct sched_domain *sd = tl->init(tl, cpu); | 5945 | struct sched_domain *sd = tl->init(tl, cpu); |
6498 | if (!sd) | 5946 | if (!sd) |
@@ -6503,8 +5951,8 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, | |||
6503 | sd->level = child->level + 1; | 5951 | sd->level = child->level + 1; |
6504 | sched_domain_level_max = max(sched_domain_level_max, sd->level); | 5952 | sched_domain_level_max = max(sched_domain_level_max, sd->level); |
6505 | child->parent = sd; | 5953 | child->parent = sd; |
5954 | sd->child = child; | ||
6506 | } | 5955 | } |
6507 | sd->child = child; | ||
6508 | set_domain_attribute(sd, attr); | 5956 | set_domain_attribute(sd, attr); |
6509 | 5957 | ||
6510 | return sd; | 5958 | return sd; |
@@ -6517,7 +5965,7 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, | |||
6517 | static int build_sched_domains(const struct cpumask *cpu_map, | 5965 | static int build_sched_domains(const struct cpumask *cpu_map, |
6518 | struct sched_domain_attr *attr) | 5966 | struct sched_domain_attr *attr) |
6519 | { | 5967 | { |
6520 | enum s_alloc alloc_state = sa_none; | 5968 | enum s_alloc alloc_state; |
6521 | struct sched_domain *sd; | 5969 | struct sched_domain *sd; |
6522 | struct s_data d; | 5970 | struct s_data d; |
6523 | int i, ret = -ENOMEM; | 5971 | int i, ret = -ENOMEM; |
@@ -6531,18 +5979,15 @@ static int build_sched_domains(const struct cpumask *cpu_map, | |||
6531 | struct sched_domain_topology_level *tl; | 5979 | struct sched_domain_topology_level *tl; |
6532 | 5980 | ||
6533 | sd = NULL; | 5981 | sd = NULL; |
6534 | for (tl = sched_domain_topology; tl->init; tl++) { | 5982 | for_each_sd_topology(tl) { |
6535 | sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i); | 5983 | sd = build_sched_domain(tl, cpu_map, attr, sd, i); |
5984 | if (tl == sched_domain_topology) | ||
5985 | *per_cpu_ptr(d.sd, i) = sd; | ||
6536 | if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) | 5986 | if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) |
6537 | sd->flags |= SD_OVERLAP; | 5987 | sd->flags |= SD_OVERLAP; |
6538 | if (cpumask_equal(cpu_map, sched_domain_span(sd))) | 5988 | if (cpumask_equal(cpu_map, sched_domain_span(sd))) |
6539 | break; | 5989 | break; |
6540 | } | 5990 | } |
6541 | |||
6542 | while (sd->child) | ||
6543 | sd = sd->child; | ||
6544 | |||
6545 | *per_cpu_ptr(d.sd, i) = sd; | ||
6546 | } | 5991 | } |
6547 | 5992 | ||
6548 | /* Build the groups for the domains */ | 5993 | /* Build the groups for the domains */ |
@@ -6854,9 +6299,6 @@ void __init sched_init_smp(void) | |||
6854 | hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); | 6299 | hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); |
6855 | hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); | 6300 | hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); |
6856 | 6301 | ||
6857 | /* RT runtime code needs to handle some hotplug events */ | ||
6858 | hotcpu_notifier(update_runtime, 0); | ||
6859 | |||
6860 | init_hrtick(); | 6302 | init_hrtick(); |
6861 | 6303 | ||
6862 | /* Move init over to a non-isolated CPU */ | 6304 | /* Move init over to a non-isolated CPU */ |