aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2012-06-22 09:52:09 -0400
committerIngo Molnar <mingo@kernel.org>2012-07-05 14:58:13 -0400
commit5167e8d5417bf5c322a703d2927daec727ea40dd (patch)
treeb919aac933c104e7c7abc1730da810f60ba3229d
parent164c33c6adee609b8b9062cce4c10f764d0dce13 (diff)
sched/nohz: Rewrite and fix load-avg computation -- again
Thanks to Charles Wang for spotting the defects in the current code: - If we go idle during the sample window -- after sampling, we get a negative bias because we can negate our own sample. - If we wake up during the sample window we get a positive bias because we push the sample to a known active period. So rewrite the entire nohz load-avg muck once again, now adding copious documentation to the code. Reported-and-tested-by: Doug Smythies <dsmythies@telus.net> Reported-and-tested-by: Charles Wang <muming.wq@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: stable@kernel.org Link: http://lkml.kernel.org/r/1340373782.18025.74.camel@twins [ minor edits ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/sched.h8
-rw-r--r--kernel/sched/core.c275
-rw-r--r--kernel/sched/idle_task.c1
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/time/tick-sched.c2
5 files changed, 213 insertions, 75 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4059c0f33f07..20cb7497c59c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1909,6 +1909,14 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
1909} 1909}
1910#endif 1910#endif
1911 1911
1912#ifdef CONFIG_NO_HZ
1913void calc_load_enter_idle(void);
1914void calc_load_exit_idle(void);
1915#else
1916static inline void calc_load_enter_idle(void) { }
1917static inline void calc_load_exit_idle(void) { }
1918#endif /* CONFIG_NO_HZ */
1919
1912#ifndef CONFIG_CPUMASK_OFFSTACK 1920#ifndef CONFIG_CPUMASK_OFFSTACK
1913static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 1921static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1914{ 1922{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d5594a4268d4..bb840405335d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2161,11 +2161,73 @@ unsigned long this_cpu_load(void)
2161} 2161}
2162 2162
2163 2163
2164/*
2165 * Global load-average calculations
2166 *
2167 * We take a distributed and async approach to calculating the global load-avg
2168 * in order to minimize overhead.
2169 *
2170 * The global load average is an exponentially decaying average of nr_running +
2171 * nr_uninterruptible.
2172 *
2173 * Once every LOAD_FREQ:
2174 *
2175 * nr_active = 0;
2176 * for_each_possible_cpu(cpu)
2177 * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
2178 *
2179 * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
2180 *
2181 * Due to a number of reasons the above turns in the mess below:
2182 *
2183 * - for_each_possible_cpu() is prohibitively expensive on machines with
2184 * serious number of cpus, therefore we need to take a distributed approach
2185 * to calculating nr_active.
2186 *
2187 * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
2188 * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
2189 *
2190 * So assuming nr_active := 0 when we start out -- true per definition, we
2191 * can simply take per-cpu deltas and fold those into a global accumulate
2192 * to obtain the same result. See calc_load_fold_active().
2193 *
2194 * Furthermore, in order to avoid synchronizing all per-cpu delta folding
2195 * across the machine, we assume 10 ticks is sufficient time for every
2196 * cpu to have completed this task.
2197 *
2198 * This places an upper-bound on the IRQ-off latency of the machine. Then
2199 * again, being late doesn't loose the delta, just wrecks the sample.
2200 *
2201 * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
2202 * this would add another cross-cpu cacheline miss and atomic operation
2203 * to the wakeup path. Instead we increment on whatever cpu the task ran
2204 * when it went into uninterruptible state and decrement on whatever cpu
2205 * did the wakeup. This means that only the sum of nr_uninterruptible over
2206 * all cpus yields the correct result.
2207 *
2208 * This covers the NO_HZ=n code, for extra head-aches, see the comment below.
2209 */
2210
2164/* Variables and functions for calc_load */ 2211/* Variables and functions for calc_load */
2165static atomic_long_t calc_load_tasks; 2212static atomic_long_t calc_load_tasks;
2166static unsigned long calc_load_update; 2213static unsigned long calc_load_update;
2167unsigned long avenrun[3]; 2214unsigned long avenrun[3];
2168EXPORT_SYMBOL(avenrun); 2215EXPORT_SYMBOL(avenrun); /* should be removed */
2216
2217/**
2218 * get_avenrun - get the load average array
2219 * @loads: pointer to dest load array
2220 * @offset: offset to add
2221 * @shift: shift count to shift the result left
2222 *
2223 * These values are estimates at best, so no need for locking.
2224 */
2225void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
2226{
2227 loads[0] = (avenrun[0] + offset) << shift;
2228 loads[1] = (avenrun[1] + offset) << shift;
2229 loads[2] = (avenrun[2] + offset) << shift;
2230}
2169 2231
2170static long calc_load_fold_active(struct rq *this_rq) 2232static long calc_load_fold_active(struct rq *this_rq)
2171{ 2233{
@@ -2182,6 +2244,9 @@ static long calc_load_fold_active(struct rq *this_rq)
2182 return delta; 2244 return delta;
2183} 2245}
2184 2246
2247/*
2248 * a1 = a0 * e + a * (1 - e)
2249 */
2185static unsigned long 2250static unsigned long
2186calc_load(unsigned long load, unsigned long exp, unsigned long active) 2251calc_load(unsigned long load, unsigned long exp, unsigned long active)
2187{ 2252{
@@ -2193,30 +2258,118 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
2193 2258
2194#ifdef CONFIG_NO_HZ 2259#ifdef CONFIG_NO_HZ
2195/* 2260/*
2196 * For NO_HZ we delay the active fold to the next LOAD_FREQ update. 2261 * Handle NO_HZ for the global load-average.
2262 *
2263 * Since the above described distributed algorithm to compute the global
2264 * load-average relies on per-cpu sampling from the tick, it is affected by
2265 * NO_HZ.
2266 *
2267 * The basic idea is to fold the nr_active delta into a global idle-delta upon
2268 * entering NO_HZ state such that we can include this as an 'extra' cpu delta
2269 * when we read the global state.
2270 *
2271 * Obviously reality has to ruin such a delightfully simple scheme:
2272 *
2273 * - When we go NO_HZ idle during the window, we can negate our sample
2274 * contribution, causing under-accounting.
2275 *
2276 * We avoid this by keeping two idle-delta counters and flipping them
2277 * when the window starts, thus separating old and new NO_HZ load.
2278 *
2279 * The only trick is the slight shift in index flip for read vs write.
2280 *
2281 * 0s 5s 10s 15s
2282 * +10 +10 +10 +10
2283 * |-|-----------|-|-----------|-|-----------|-|
2284 * r:0 0 1 1 0 0 1 1 0
2285 * w:0 1 1 0 0 1 1 0 0
2286 *
2287 * This ensures we'll fold the old idle contribution in this window while
2288 * accumlating the new one.
2289 *
2290 * - When we wake up from NO_HZ idle during the window, we push up our
2291 * contribution, since we effectively move our sample point to a known
2292 * busy state.
2293 *
2294 * This is solved by pushing the window forward, and thus skipping the
2295 * sample, for this cpu (effectively using the idle-delta for this cpu which
2296 * was in effect at the time the window opened). This also solves the issue
2297 * of having to deal with a cpu having been in NOHZ idle for multiple
2298 * LOAD_FREQ intervals.
2197 * 2299 *
2198 * When making the ILB scale, we should try to pull this in as well. 2300 * When making the ILB scale, we should try to pull this in as well.
2199 */ 2301 */
2200static atomic_long_t calc_load_tasks_idle; 2302static atomic_long_t calc_load_idle[2];
2303static int calc_load_idx;
2201 2304
2202void calc_load_account_idle(struct rq *this_rq) 2305static inline int calc_load_write_idx(void)
2203{ 2306{
2307 int idx = calc_load_idx;
2308
2309 /*
2310 * See calc_global_nohz(), if we observe the new index, we also
2311 * need to observe the new update time.
2312 */
2313 smp_rmb();
2314
2315 /*
2316 * If the folding window started, make sure we start writing in the
2317 * next idle-delta.
2318 */
2319 if (!time_before(jiffies, calc_load_update))
2320 idx++;
2321
2322 return idx & 1;
2323}
2324
2325static inline int calc_load_read_idx(void)
2326{
2327 return calc_load_idx & 1;
2328}
2329
2330void calc_load_enter_idle(void)
2331{
2332 struct rq *this_rq = this_rq();
2204 long delta; 2333 long delta;
2205 2334
2335 /*
2336 * We're going into NOHZ mode, if there's any pending delta, fold it
2337 * into the pending idle delta.
2338 */
2206 delta = calc_load_fold_active(this_rq); 2339 delta = calc_load_fold_active(this_rq);
2207 if (delta) 2340 if (delta) {
2208 atomic_long_add(delta, &calc_load_tasks_idle); 2341 int idx = calc_load_write_idx();
2342 atomic_long_add(delta, &calc_load_idle[idx]);
2343 }
2209} 2344}
2210 2345
2211static long calc_load_fold_idle(void) 2346void calc_load_exit_idle(void)
2212{ 2347{
2213 long delta = 0; 2348 struct rq *this_rq = this_rq();
2349
2350 /*
2351 * If we're still before the sample window, we're done.
2352 */
2353 if (time_before(jiffies, this_rq->calc_load_update))
2354 return;
2214 2355
2215 /* 2356 /*
2216 * Its got a race, we don't care... 2357 * We woke inside or after the sample window, this means we're already
2358 * accounted through the nohz accounting, so skip the entire deal and
2359 * sync up for the next window.
2217 */ 2360 */
2218 if (atomic_long_read(&calc_load_tasks_idle)) 2361 this_rq->calc_load_update = calc_load_update;
2219 delta = atomic_long_xchg(&calc_load_tasks_idle, 0); 2362 if (time_before(jiffies, this_rq->calc_load_update + 10))
2363 this_rq->calc_load_update += LOAD_FREQ;
2364}
2365
2366static long calc_load_fold_idle(void)
2367{
2368 int idx = calc_load_read_idx();
2369 long delta = 0;
2370
2371 if (atomic_long_read(&calc_load_idle[idx]))
2372 delta = atomic_long_xchg(&calc_load_idle[idx], 0);
2220 2373
2221 return delta; 2374 return delta;
2222} 2375}
@@ -2302,66 +2455,39 @@ static void calc_global_nohz(void)
2302{ 2455{
2303 long delta, active, n; 2456 long delta, active, n;
2304 2457
2305 /* 2458 if (!time_before(jiffies, calc_load_update + 10)) {
2306 * If we crossed a calc_load_update boundary, make sure to fold 2459 /*
2307 * any pending idle changes, the respective CPUs might have 2460 * Catch-up, fold however many we are behind still
2308 * missed the tick driven calc_load_account_active() update 2461 */
2309 * due to NO_HZ. 2462 delta = jiffies - calc_load_update - 10;
2310 */ 2463 n = 1 + (delta / LOAD_FREQ);
2311 delta = calc_load_fold_idle();
2312 if (delta)
2313 atomic_long_add(delta, &calc_load_tasks);
2314
2315 /*
2316 * It could be the one fold was all it took, we done!
2317 */
2318 if (time_before(jiffies, calc_load_update + 10))
2319 return;
2320
2321 /*
2322 * Catch-up, fold however many we are behind still
2323 */
2324 delta = jiffies - calc_load_update - 10;
2325 n = 1 + (delta / LOAD_FREQ);
2326 2464
2327 active = atomic_long_read(&calc_load_tasks); 2465 active = atomic_long_read(&calc_load_tasks);
2328 active = active > 0 ? active * FIXED_1 : 0; 2466 active = active > 0 ? active * FIXED_1 : 0;
2329 2467
2330 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); 2468 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
2331 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); 2469 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
2332 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); 2470 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
2333 2471
2334 calc_load_update += n * LOAD_FREQ; 2472 calc_load_update += n * LOAD_FREQ;
2335} 2473 }
2336#else
2337void calc_load_account_idle(struct rq *this_rq)
2338{
2339}
2340 2474
2341static inline long calc_load_fold_idle(void) 2475 /*
2342{ 2476 * Flip the idle index...
2343 return 0; 2477 *
2478 * Make sure we first write the new time then flip the index, so that
2479 * calc_load_write_idx() will see the new time when it reads the new
2480 * index, this avoids a double flip messing things up.
2481 */
2482 smp_wmb();
2483 calc_load_idx++;
2344} 2484}
2485#else /* !CONFIG_NO_HZ */
2345 2486
2346static void calc_global_nohz(void) 2487static inline long calc_load_fold_idle(void) { return 0; }
2347{ 2488static inline void calc_global_nohz(void) { }
2348}
2349#endif
2350 2489
2351/** 2490#endif /* CONFIG_NO_HZ */
2352 * get_avenrun - get the load average array
2353 * @loads: pointer to dest load array
2354 * @offset: offset to add
2355 * @shift: shift count to shift the result left
2356 *
2357 * These values are estimates at best, so no need for locking.
2358 */
2359void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
2360{
2361 loads[0] = (avenrun[0] + offset) << shift;
2362 loads[1] = (avenrun[1] + offset) << shift;
2363 loads[2] = (avenrun[2] + offset) << shift;
2364}
2365 2491
2366/* 2492/*
2367 * calc_load - update the avenrun load estimates 10 ticks after the 2493 * calc_load - update the avenrun load estimates 10 ticks after the
@@ -2369,11 +2495,18 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
2369 */ 2495 */
2370void calc_global_load(unsigned long ticks) 2496void calc_global_load(unsigned long ticks)
2371{ 2497{
2372 long active; 2498 long active, delta;
2373 2499
2374 if (time_before(jiffies, calc_load_update + 10)) 2500 if (time_before(jiffies, calc_load_update + 10))
2375 return; 2501 return;
2376 2502
2503 /*
2504 * Fold the 'old' idle-delta to include all NO_HZ cpus.
2505 */
2506 delta = calc_load_fold_idle();
2507 if (delta)
2508 atomic_long_add(delta, &calc_load_tasks);
2509
2377 active = atomic_long_read(&calc_load_tasks); 2510 active = atomic_long_read(&calc_load_tasks);
2378 active = active > 0 ? active * FIXED_1 : 0; 2511 active = active > 0 ? active * FIXED_1 : 0;
2379 2512
@@ -2384,12 +2517,7 @@ void calc_global_load(unsigned long ticks)
2384 calc_load_update += LOAD_FREQ; 2517 calc_load_update += LOAD_FREQ;
2385 2518
2386 /* 2519 /*
2387 * Account one period with whatever state we found before 2520 * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
2388 * folding in the nohz state and ageing the entire idle period.
2389 *
2390 * This avoids loosing a sample when we go idle between
2391 * calc_load_account_active() (10 ticks ago) and now and thus
2392 * under-accounting.
2393 */ 2521 */
2394 calc_global_nohz(); 2522 calc_global_nohz();
2395} 2523}
@@ -2406,7 +2534,6 @@ static void calc_load_account_active(struct rq *this_rq)
2406 return; 2534 return;
2407 2535
2408 delta = calc_load_fold_active(this_rq); 2536 delta = calc_load_fold_active(this_rq);
2409 delta += calc_load_fold_idle();
2410 if (delta) 2537 if (delta)
2411 atomic_long_add(delta, &calc_load_tasks); 2538 atomic_long_add(delta, &calc_load_tasks);
2412 2539
@@ -2414,6 +2541,10 @@ static void calc_load_account_active(struct rq *this_rq)
2414} 2541}
2415 2542
2416/* 2543/*
2544 * End of global load-average stuff
2545 */
2546
2547/*
2417 * The exact cpuload at various idx values, calculated at every tick would be 2548 * The exact cpuload at various idx values, calculated at every tick would be
2418 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load 2549 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
2419 * 2550 *
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index b44d604b35d1..b6baf370cae9 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -25,7 +25,6 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
25static struct task_struct *pick_next_task_idle(struct rq *rq) 25static struct task_struct *pick_next_task_idle(struct rq *rq)
26{ 26{
27 schedstat_inc(rq, sched_goidle); 27 schedstat_inc(rq, sched_goidle);
28 calc_load_account_idle(rq);
29 return rq->idle; 28 return rq->idle;
30} 29}
31 30
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 6d52cea7f33d..55844f24435a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -942,8 +942,6 @@ static inline u64 sched_avg_period(void)
942 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; 942 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
943} 943}
944 944
945void calc_load_account_idle(struct rq *this_rq);
946
947#ifdef CONFIG_SCHED_HRTICK 945#ifdef CONFIG_SCHED_HRTICK
948 946
949/* 947/*
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 869997833928..4a08472c3ca7 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -406,6 +406,7 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
406 */ 406 */
407 if (!ts->tick_stopped) { 407 if (!ts->tick_stopped) {
408 select_nohz_load_balancer(1); 408 select_nohz_load_balancer(1);
409 calc_load_enter_idle();
409 410
410 ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); 411 ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
411 ts->tick_stopped = 1; 412 ts->tick_stopped = 1;
@@ -597,6 +598,7 @@ void tick_nohz_idle_exit(void)
597 account_idle_ticks(ticks); 598 account_idle_ticks(ticks);
598#endif 599#endif
599 600
601 calc_load_exit_idle();
600 touch_softlockup_watchdog(); 602 touch_softlockup_watchdog();
601 /* 603 /*
602 * Cancel the scheduled timer and restore the tick 604 * Cancel the scheduled timer and restore the tick