diff options
| author | Ingo Molnar <mingo@elte.hu> | 2010-08-31 03:45:21 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2010-08-31 03:45:46 -0400 |
| commit | daab7fc734a53fdeaf844b7c03053118ad1769da (patch) | |
| tree | 575deb3cdcc6dda562acaed6f7c29bc81ae01cf2 /kernel/sched.c | |
| parent | 774ea0bcb27f57b6fd521b3b6c43237782fed4b9 (diff) | |
| parent | 2bfc96a127bc1cc94d26bfaa40159966064f9c8c (diff) | |
Merge commit 'v2.6.36-rc3' into x86/memblock
Conflicts:
arch/x86/kernel/trampoline.c
mm/memblock.c
Merge reason: Resolve the conflicts, update to latest upstream.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 407 |
1 files changed, 305 insertions, 102 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index f52a8801b7a2..09b574e7f4df 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -77,6 +77,7 @@ | |||
| 77 | #include <asm/irq_regs.h> | 77 | #include <asm/irq_regs.h> |
| 78 | 78 | ||
| 79 | #include "sched_cpupri.h" | 79 | #include "sched_cpupri.h" |
| 80 | #include "workqueue_sched.h" | ||
| 80 | 81 | ||
| 81 | #define CREATE_TRACE_POINTS | 82 | #define CREATE_TRACE_POINTS |
| 82 | #include <trace/events/sched.h> | 83 | #include <trace/events/sched.h> |
| @@ -456,9 +457,10 @@ struct rq { | |||
| 456 | unsigned long nr_running; | 457 | unsigned long nr_running; |
| 457 | #define CPU_LOAD_IDX_MAX 5 | 458 | #define CPU_LOAD_IDX_MAX 5 |
| 458 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; | 459 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
| 460 | unsigned long last_load_update_tick; | ||
| 459 | #ifdef CONFIG_NO_HZ | 461 | #ifdef CONFIG_NO_HZ |
| 460 | u64 nohz_stamp; | 462 | u64 nohz_stamp; |
| 461 | unsigned char in_nohz_recently; | 463 | unsigned char nohz_balance_kick; |
| 462 | #endif | 464 | #endif |
| 463 | unsigned int skip_clock_update; | 465 | unsigned int skip_clock_update; |
| 464 | 466 | ||
| @@ -1193,6 +1195,27 @@ static void resched_cpu(int cpu) | |||
| 1193 | 1195 | ||
| 1194 | #ifdef CONFIG_NO_HZ | 1196 | #ifdef CONFIG_NO_HZ |
| 1195 | /* | 1197 | /* |
| 1198 | * In the semi idle case, use the nearest busy cpu for migrating timers | ||
| 1199 | * from an idle cpu. This is good for power-savings. | ||
| 1200 | * | ||
| 1201 | * We don't do similar optimization for completely idle system, as | ||
| 1202 | * selecting an idle cpu will add more delays to the timers than intended | ||
| 1203 | * (as that cpu's timer base may not be uptodate wrt jiffies etc). | ||
| 1204 | */ | ||
| 1205 | int get_nohz_timer_target(void) | ||
| 1206 | { | ||
| 1207 | int cpu = smp_processor_id(); | ||
| 1208 | int i; | ||
| 1209 | struct sched_domain *sd; | ||
| 1210 | |||
| 1211 | for_each_domain(cpu, sd) { | ||
| 1212 | for_each_cpu(i, sched_domain_span(sd)) | ||
| 1213 | if (!idle_cpu(i)) | ||
| 1214 | return i; | ||
| 1215 | } | ||
| 1216 | return cpu; | ||
| 1217 | } | ||
| 1218 | /* | ||
| 1196 | * When add_timer_on() enqueues a timer into the timer wheel of an | 1219 | * When add_timer_on() enqueues a timer into the timer wheel of an |
| 1197 | * idle CPU then this timer might expire before the next timer event | 1220 | * idle CPU then this timer might expire before the next timer event |
| 1198 | * which is scheduled to wake up that CPU. In case of a completely | 1221 | * which is scheduled to wake up that CPU. In case of a completely |
| @@ -1232,16 +1255,6 @@ void wake_up_idle_cpu(int cpu) | |||
| 1232 | smp_send_reschedule(cpu); | 1255 | smp_send_reschedule(cpu); |
| 1233 | } | 1256 | } |
| 1234 | 1257 | ||
| 1235 | int nohz_ratelimit(int cpu) | ||
| 1236 | { | ||
| 1237 | struct rq *rq = cpu_rq(cpu); | ||
| 1238 | u64 diff = rq->clock - rq->nohz_stamp; | ||
| 1239 | |||
| 1240 | rq->nohz_stamp = rq->clock; | ||
| 1241 | |||
| 1242 | return diff < (NSEC_PER_SEC / HZ) >> 1; | ||
| 1243 | } | ||
| 1244 | |||
| 1245 | #endif /* CONFIG_NO_HZ */ | 1258 | #endif /* CONFIG_NO_HZ */ |
| 1246 | 1259 | ||
| 1247 | static u64 sched_avg_period(void) | 1260 | static u64 sched_avg_period(void) |
| @@ -1652,7 +1665,7 @@ static void update_shares(struct sched_domain *sd) | |||
| 1652 | if (root_task_group_empty()) | 1665 | if (root_task_group_empty()) |
| 1653 | return; | 1666 | return; |
| 1654 | 1667 | ||
| 1655 | now = cpu_clock(raw_smp_processor_id()); | 1668 | now = local_clock(); |
| 1656 | elapsed = now - sd->last_update; | 1669 | elapsed = now - sd->last_update; |
| 1657 | 1670 | ||
| 1658 | if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { | 1671 | if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { |
| @@ -1805,6 +1818,7 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) | |||
| 1805 | static void calc_load_account_idle(struct rq *this_rq); | 1818 | static void calc_load_account_idle(struct rq *this_rq); |
| 1806 | static void update_sysctl(void); | 1819 | static void update_sysctl(void); |
| 1807 | static int get_update_sysctl_factor(void); | 1820 | static int get_update_sysctl_factor(void); |
| 1821 | static void update_cpu_load(struct rq *this_rq); | ||
| 1808 | 1822 | ||
| 1809 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) | 1823 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) |
| 1810 | { | 1824 | { |
| @@ -2267,11 +2281,55 @@ static void update_avg(u64 *avg, u64 sample) | |||
| 2267 | } | 2281 | } |
| 2268 | #endif | 2282 | #endif |
| 2269 | 2283 | ||
| 2270 | /*** | 2284 | static inline void ttwu_activate(struct task_struct *p, struct rq *rq, |
| 2285 | bool is_sync, bool is_migrate, bool is_local, | ||
| 2286 | unsigned long en_flags) | ||
| 2287 | { | ||
| 2288 | schedstat_inc(p, se.statistics.nr_wakeups); | ||
| 2289 | if (is_sync) | ||
| 2290 | schedstat_inc(p, se.statistics.nr_wakeups_sync); | ||
| 2291 | if (is_migrate) | ||
| 2292 | schedstat_inc(p, se.statistics.nr_wakeups_migrate); | ||
| 2293 | if (is_local) | ||
| 2294 | schedstat_inc(p, se.statistics.nr_wakeups_local); | ||
| 2295 | else | ||
| 2296 | schedstat_inc(p, se.statistics.nr_wakeups_remote); | ||
| 2297 | |||
| 2298 | activate_task(rq, p, en_flags); | ||
| 2299 | } | ||
| 2300 | |||
| 2301 | static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, | ||
| 2302 | int wake_flags, bool success) | ||
| 2303 | { | ||
| 2304 | trace_sched_wakeup(p, success); | ||
| 2305 | check_preempt_curr(rq, p, wake_flags); | ||
| 2306 | |||
| 2307 | p->state = TASK_RUNNING; | ||
| 2308 | #ifdef CONFIG_SMP | ||
| 2309 | if (p->sched_class->task_woken) | ||
| 2310 | p->sched_class->task_woken(rq, p); | ||
| 2311 | |||
| 2312 | if (unlikely(rq->idle_stamp)) { | ||
| 2313 | u64 delta = rq->clock - rq->idle_stamp; | ||
| 2314 | u64 max = 2*sysctl_sched_migration_cost; | ||
| 2315 | |||
| 2316 | if (delta > max) | ||
| 2317 | rq->avg_idle = max; | ||
| 2318 | else | ||
| 2319 | update_avg(&rq->avg_idle, delta); | ||
| 2320 | rq->idle_stamp = 0; | ||
| 2321 | } | ||
| 2322 | #endif | ||
| 2323 | /* if a worker is waking up, notify workqueue */ | ||
| 2324 | if ((p->flags & PF_WQ_WORKER) && success) | ||
| 2325 | wq_worker_waking_up(p, cpu_of(rq)); | ||
| 2326 | } | ||
| 2327 | |||
| 2328 | /** | ||
| 2271 | * try_to_wake_up - wake up a thread | 2329 | * try_to_wake_up - wake up a thread |
| 2272 | * @p: the to-be-woken-up thread | 2330 | * @p: the thread to be awakened |
| 2273 | * @state: the mask of task states that can be woken | 2331 | * @state: the mask of task states that can be woken |
| 2274 | * @sync: do a synchronous wakeup? | 2332 | * @wake_flags: wake modifier flags (WF_*) |
| 2275 | * | 2333 | * |
| 2276 | * Put it on the run-queue if it's not already there. The "current" | 2334 | * Put it on the run-queue if it's not already there. The "current" |
| 2277 | * thread is always on the run-queue (except when the actual | 2335 | * thread is always on the run-queue (except when the actual |
| @@ -2279,7 +2337,8 @@ static void update_avg(u64 *avg, u64 sample) | |||
| 2279 | * the simpler "current->state = TASK_RUNNING" to mark yourself | 2337 | * the simpler "current->state = TASK_RUNNING" to mark yourself |
| 2280 | * runnable without the overhead of this. | 2338 | * runnable without the overhead of this. |
| 2281 | * | 2339 | * |
| 2282 | * returns failure only if the task is already active. | 2340 | * Returns %true if @p was woken up, %false if it was already running |
| 2341 | * or @state didn't match @p's state. | ||
| 2283 | */ | 2342 | */ |
| 2284 | static int try_to_wake_up(struct task_struct *p, unsigned int state, | 2343 | static int try_to_wake_up(struct task_struct *p, unsigned int state, |
| 2285 | int wake_flags) | 2344 | int wake_flags) |
| @@ -2359,38 +2418,11 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
| 2359 | 2418 | ||
| 2360 | out_activate: | 2419 | out_activate: |
| 2361 | #endif /* CONFIG_SMP */ | 2420 | #endif /* CONFIG_SMP */ |
| 2362 | schedstat_inc(p, se.statistics.nr_wakeups); | 2421 | ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu, |
| 2363 | if (wake_flags & WF_SYNC) | 2422 | cpu == this_cpu, en_flags); |
| 2364 | schedstat_inc(p, se.statistics.nr_wakeups_sync); | ||
| 2365 | if (orig_cpu != cpu) | ||
| 2366 | schedstat_inc(p, se.statistics.nr_wakeups_migrate); | ||
| 2367 | if (cpu == this_cpu) | ||
| 2368 | schedstat_inc(p, se.statistics.nr_wakeups_local); | ||
| 2369 | else | ||
| 2370 | schedstat_inc(p, se.statistics.nr_wakeups_remote); | ||
| 2371 | activate_task(rq, p, en_flags); | ||
| 2372 | success = 1; | 2423 | success = 1; |
| 2373 | |||
| 2374 | out_running: | 2424 | out_running: |
| 2375 | trace_sched_wakeup(p, success); | 2425 | ttwu_post_activation(p, rq, wake_flags, success); |
| 2376 | check_preempt_curr(rq, p, wake_flags); | ||
| 2377 | |||
| 2378 | p->state = TASK_RUNNING; | ||
| 2379 | #ifdef CONFIG_SMP | ||
| 2380 | if (p->sched_class->task_woken) | ||
| 2381 | p->sched_class->task_woken(rq, p); | ||
| 2382 | |||
| 2383 | if (unlikely(rq->idle_stamp)) { | ||
| 2384 | u64 delta = rq->clock - rq->idle_stamp; | ||
| 2385 | u64 max = 2*sysctl_sched_migration_cost; | ||
| 2386 | |||
| 2387 | if (delta > max) | ||
| 2388 | rq->avg_idle = max; | ||
| 2389 | else | ||
| 2390 | update_avg(&rq->avg_idle, delta); | ||
| 2391 | rq->idle_stamp = 0; | ||
| 2392 | } | ||
| 2393 | #endif | ||
| 2394 | out: | 2426 | out: |
| 2395 | task_rq_unlock(rq, &flags); | 2427 | task_rq_unlock(rq, &flags); |
| 2396 | put_cpu(); | 2428 | put_cpu(); |
| @@ -2399,6 +2431,37 @@ out: | |||
| 2399 | } | 2431 | } |
| 2400 | 2432 | ||
| 2401 | /** | 2433 | /** |
| 2434 | * try_to_wake_up_local - try to wake up a local task with rq lock held | ||
| 2435 | * @p: the thread to be awakened | ||
| 2436 | * | ||
| 2437 | * Put @p on the run-queue if it's not alredy there. The caller must | ||
| 2438 | * ensure that this_rq() is locked, @p is bound to this_rq() and not | ||
| 2439 | * the current task. this_rq() stays locked over invocation. | ||
| 2440 | */ | ||
| 2441 | static void try_to_wake_up_local(struct task_struct *p) | ||
| 2442 | { | ||
| 2443 | struct rq *rq = task_rq(p); | ||
| 2444 | bool success = false; | ||
| 2445 | |||
| 2446 | BUG_ON(rq != this_rq()); | ||
| 2447 | BUG_ON(p == current); | ||
| 2448 | lockdep_assert_held(&rq->lock); | ||
| 2449 | |||
| 2450 | if (!(p->state & TASK_NORMAL)) | ||
| 2451 | return; | ||
| 2452 | |||
| 2453 | if (!p->se.on_rq) { | ||
| 2454 | if (likely(!task_running(rq, p))) { | ||
| 2455 | schedstat_inc(rq, ttwu_count); | ||
| 2456 | schedstat_inc(rq, ttwu_local); | ||
| 2457 | } | ||
| 2458 | ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP); | ||
| 2459 | success = true; | ||
| 2460 | } | ||
| 2461 | ttwu_post_activation(p, rq, 0, success); | ||
| 2462 | } | ||
| 2463 | |||
| 2464 | /** | ||
| 2402 | * wake_up_process - Wake up a specific process | 2465 | * wake_up_process - Wake up a specific process |
| 2403 | * @p: The process to be woken up. | 2466 | * @p: The process to be woken up. |
| 2404 | * | 2467 | * |
| @@ -3012,23 +3075,102 @@ static void calc_load_account_active(struct rq *this_rq) | |||
| 3012 | } | 3075 | } |
| 3013 | 3076 | ||
| 3014 | /* | 3077 | /* |
| 3078 | * The exact cpuload at various idx values, calculated at every tick would be | ||
| 3079 | * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load | ||
| 3080 | * | ||
| 3081 | * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called | ||
| 3082 | * on nth tick when cpu may be busy, then we have: | ||
| 3083 | * load = ((2^idx - 1) / 2^idx)^(n-1) * load | ||
| 3084 | * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load | ||
| 3085 | * | ||
| 3086 | * decay_load_missed() below does efficient calculation of | ||
| 3087 | * load = ((2^idx - 1) / 2^idx)^(n-1) * load | ||
| 3088 | * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load | ||
| 3089 | * | ||
| 3090 | * The calculation is approximated on a 128 point scale. | ||
| 3091 | * degrade_zero_ticks is the number of ticks after which load at any | ||
| 3092 | * particular idx is approximated to be zero. | ||
| 3093 | * degrade_factor is a precomputed table, a row for each load idx. | ||
| 3094 | * Each column corresponds to degradation factor for a power of two ticks, | ||
| 3095 | * based on 128 point scale. | ||
| 3096 | * Example: | ||
| 3097 | * row 2, col 3 (=12) says that the degradation at load idx 2 after | ||
| 3098 | * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8). | ||
| 3099 | * | ||
| 3100 | * With this power of 2 load factors, we can degrade the load n times | ||
| 3101 | * by looking at 1 bits in n and doing as many mult/shift instead of | ||
| 3102 | * n mult/shifts needed by the exact degradation. | ||
| 3103 | */ | ||
| 3104 | #define DEGRADE_SHIFT 7 | ||
| 3105 | static const unsigned char | ||
| 3106 | degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128}; | ||
| 3107 | static const unsigned char | ||
| 3108 | degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = { | ||
| 3109 | {0, 0, 0, 0, 0, 0, 0, 0}, | ||
| 3110 | {64, 32, 8, 0, 0, 0, 0, 0}, | ||
| 3111 | {96, 72, 40, 12, 1, 0, 0}, | ||
| 3112 | {112, 98, 75, 43, 15, 1, 0}, | ||
| 3113 | {120, 112, 98, 76, 45, 16, 2} }; | ||
| 3114 | |||
| 3115 | /* | ||
| 3116 | * Update cpu_load for any missed ticks, due to tickless idle. The backlog | ||
| 3117 | * would be when CPU is idle and so we just decay the old load without | ||
| 3118 | * adding any new load. | ||
| 3119 | */ | ||
| 3120 | static unsigned long | ||
| 3121 | decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) | ||
| 3122 | { | ||
| 3123 | int j = 0; | ||
| 3124 | |||
| 3125 | if (!missed_updates) | ||
| 3126 | return load; | ||
| 3127 | |||
| 3128 | if (missed_updates >= degrade_zero_ticks[idx]) | ||
| 3129 | return 0; | ||
| 3130 | |||
| 3131 | if (idx == 1) | ||
| 3132 | return load >> missed_updates; | ||
| 3133 | |||
| 3134 | while (missed_updates) { | ||
| 3135 | if (missed_updates % 2) | ||
| 3136 | load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT; | ||
| 3137 | |||
| 3138 | missed_updates >>= 1; | ||
| 3139 | j++; | ||
| 3140 | } | ||
| 3141 | return load; | ||
| 3142 | } | ||
| 3143 | |||
| 3144 | /* | ||
| 3015 | * Update rq->cpu_load[] statistics. This function is usually called every | 3145 | * Update rq->cpu_load[] statistics. This function is usually called every |
| 3016 | * scheduler tick (TICK_NSEC). | 3146 | * scheduler tick (TICK_NSEC). With tickless idle this will not be called |
| 3147 | * every tick. We fix it up based on jiffies. | ||
| 3017 | */ | 3148 | */ |
| 3018 | static void update_cpu_load(struct rq *this_rq) | 3149 | static void update_cpu_load(struct rq *this_rq) |
| 3019 | { | 3150 | { |
| 3020 | unsigned long this_load = this_rq->load.weight; | 3151 | unsigned long this_load = this_rq->load.weight; |
| 3152 | unsigned long curr_jiffies = jiffies; | ||
| 3153 | unsigned long pending_updates; | ||
| 3021 | int i, scale; | 3154 | int i, scale; |
| 3022 | 3155 | ||
| 3023 | this_rq->nr_load_updates++; | 3156 | this_rq->nr_load_updates++; |
| 3024 | 3157 | ||
| 3158 | /* Avoid repeated calls on same jiffy, when moving in and out of idle */ | ||
| 3159 | if (curr_jiffies == this_rq->last_load_update_tick) | ||
| 3160 | return; | ||
| 3161 | |||
| 3162 | pending_updates = curr_jiffies - this_rq->last_load_update_tick; | ||
| 3163 | this_rq->last_load_update_tick = curr_jiffies; | ||
| 3164 | |||
| 3025 | /* Update our load: */ | 3165 | /* Update our load: */ |
| 3026 | for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { | 3166 | this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ |
| 3167 | for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { | ||
| 3027 | unsigned long old_load, new_load; | 3168 | unsigned long old_load, new_load; |
| 3028 | 3169 | ||
| 3029 | /* scale is effectively 1 << i now, and >> i divides by scale */ | 3170 | /* scale is effectively 1 << i now, and >> i divides by scale */ |
| 3030 | 3171 | ||
| 3031 | old_load = this_rq->cpu_load[i]; | 3172 | old_load = this_rq->cpu_load[i]; |
| 3173 | old_load = decay_load_missed(old_load, pending_updates - 1, i); | ||
| 3032 | new_load = this_load; | 3174 | new_load = this_load; |
| 3033 | /* | 3175 | /* |
| 3034 | * Round up the averaging division if load is increasing. This | 3176 | * Round up the averaging division if load is increasing. This |
| @@ -3036,9 +3178,15 @@ static void update_cpu_load(struct rq *this_rq) | |||
| 3036 | * example. | 3178 | * example. |
| 3037 | */ | 3179 | */ |
| 3038 | if (new_load > old_load) | 3180 | if (new_load > old_load) |
| 3039 | new_load += scale-1; | 3181 | new_load += scale - 1; |
| 3040 | this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i; | 3182 | |
| 3183 | this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; | ||
| 3041 | } | 3184 | } |
| 3185 | } | ||
| 3186 | |||
| 3187 | static void update_cpu_load_active(struct rq *this_rq) | ||
| 3188 | { | ||
| 3189 | update_cpu_load(this_rq); | ||
| 3042 | 3190 | ||
| 3043 | calc_load_account_active(this_rq); | 3191 | calc_load_account_active(this_rq); |
| 3044 | } | 3192 | } |
| @@ -3426,7 +3574,7 @@ void scheduler_tick(void) | |||
| 3426 | 3574 | ||
| 3427 | raw_spin_lock(&rq->lock); | 3575 | raw_spin_lock(&rq->lock); |
| 3428 | update_rq_clock(rq); | 3576 | update_rq_clock(rq); |
| 3429 | update_cpu_load(rq); | 3577 | update_cpu_load_active(rq); |
| 3430 | curr->sched_class->task_tick(rq, curr, 0); | 3578 | curr->sched_class->task_tick(rq, curr, 0); |
| 3431 | raw_spin_unlock(&rq->lock); | 3579 | raw_spin_unlock(&rq->lock); |
| 3432 | 3580 | ||
| @@ -3598,7 +3746,6 @@ need_resched: | |||
| 3598 | rq = cpu_rq(cpu); | 3746 | rq = cpu_rq(cpu); |
| 3599 | rcu_note_context_switch(cpu); | 3747 | rcu_note_context_switch(cpu); |
| 3600 | prev = rq->curr; | 3748 | prev = rq->curr; |
| 3601 | switch_count = &prev->nivcsw; | ||
| 3602 | 3749 | ||
| 3603 | release_kernel_lock(prev); | 3750 | release_kernel_lock(prev); |
| 3604 | need_resched_nonpreemptible: | 3751 | need_resched_nonpreemptible: |
| @@ -3611,11 +3758,26 @@ need_resched_nonpreemptible: | |||
| 3611 | raw_spin_lock_irq(&rq->lock); | 3758 | raw_spin_lock_irq(&rq->lock); |
| 3612 | clear_tsk_need_resched(prev); | 3759 | clear_tsk_need_resched(prev); |
| 3613 | 3760 | ||
| 3761 | switch_count = &prev->nivcsw; | ||
| 3614 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | 3762 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
| 3615 | if (unlikely(signal_pending_state(prev->state, prev))) | 3763 | if (unlikely(signal_pending_state(prev->state, prev))) { |
| 3616 | prev->state = TASK_RUNNING; | 3764 | prev->state = TASK_RUNNING; |
| 3617 | else | 3765 | } else { |
| 3766 | /* | ||
| 3767 | * If a worker is going to sleep, notify and | ||
| 3768 | * ask workqueue whether it wants to wake up a | ||
| 3769 | * task to maintain concurrency. If so, wake | ||
| 3770 | * up the task. | ||
| 3771 | */ | ||
| 3772 | if (prev->flags & PF_WQ_WORKER) { | ||
| 3773 | struct task_struct *to_wakeup; | ||
| 3774 | |||
| 3775 | to_wakeup = wq_worker_sleeping(prev, cpu); | ||
| 3776 | if (to_wakeup) | ||
| 3777 | try_to_wake_up_local(to_wakeup); | ||
| 3778 | } | ||
| 3618 | deactivate_task(rq, prev, DEQUEUE_SLEEP); | 3779 | deactivate_task(rq, prev, DEQUEUE_SLEEP); |
| 3780 | } | ||
| 3619 | switch_count = &prev->nvcsw; | 3781 | switch_count = &prev->nvcsw; |
| 3620 | } | 3782 | } |
| 3621 | 3783 | ||
| @@ -3637,8 +3799,10 @@ need_resched_nonpreemptible: | |||
| 3637 | 3799 | ||
| 3638 | context_switch(rq, prev, next); /* unlocks the rq */ | 3800 | context_switch(rq, prev, next); /* unlocks the rq */ |
| 3639 | /* | 3801 | /* |
| 3640 | * the context switch might have flipped the stack from under | 3802 | * The context switch have flipped the stack from under us |
| 3641 | * us, hence refresh the local variables. | 3803 | * and restored the local variables which were saved when |
| 3804 | * this task called schedule() in the past. prev == current | ||
| 3805 | * is still correct, but it can be moved to another cpu/rq. | ||
| 3642 | */ | 3806 | */ |
| 3643 | cpu = smp_processor_id(); | 3807 | cpu = smp_processor_id(); |
| 3644 | rq = cpu_rq(cpu); | 3808 | rq = cpu_rq(cpu); |
| @@ -3647,11 +3811,8 @@ need_resched_nonpreemptible: | |||
| 3647 | 3811 | ||
| 3648 | post_schedule(rq); | 3812 | post_schedule(rq); |
| 3649 | 3813 | ||
| 3650 | if (unlikely(reacquire_kernel_lock(current) < 0)) { | 3814 | if (unlikely(reacquire_kernel_lock(prev))) |
| 3651 | prev = rq->curr; | ||
| 3652 | switch_count = &prev->nivcsw; | ||
| 3653 | goto need_resched_nonpreemptible; | 3815 | goto need_resched_nonpreemptible; |
| 3654 | } | ||
| 3655 | 3816 | ||
| 3656 | preempt_enable_no_resched(); | 3817 | preempt_enable_no_resched(); |
| 3657 | if (need_resched()) | 3818 | if (need_resched()) |
| @@ -3704,8 +3865,16 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
| 3704 | /* | 3865 | /* |
| 3705 | * Owner changed, break to re-assess state. | 3866 | * Owner changed, break to re-assess state. |
| 3706 | */ | 3867 | */ |
| 3707 | if (lock->owner != owner) | 3868 | if (lock->owner != owner) { |
| 3869 | /* | ||
| 3870 | * If the lock has switched to a different owner, | ||
| 3871 | * we likely have heavy contention. Return 0 to quit | ||
| 3872 | * optimistic spinning and not contend further: | ||
| 3873 | */ | ||
| 3874 | if (lock->owner) | ||
| 3875 | return 0; | ||
| 3708 | break; | 3876 | break; |
| 3877 | } | ||
| 3709 | 3878 | ||
| 3710 | /* | 3879 | /* |
| 3711 | * Is that owner really running on that cpu? | 3880 | * Is that owner really running on that cpu? |
| @@ -3726,7 +3895,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
| 3726 | * off of preempt_enable. Kernel preemptions off return from interrupt | 3895 | * off of preempt_enable. Kernel preemptions off return from interrupt |
| 3727 | * occur there and call schedule directly. | 3896 | * occur there and call schedule directly. |
| 3728 | */ | 3897 | */ |
| 3729 | asmlinkage void __sched preempt_schedule(void) | 3898 | asmlinkage void __sched notrace preempt_schedule(void) |
| 3730 | { | 3899 | { |
| 3731 | struct thread_info *ti = current_thread_info(); | 3900 | struct thread_info *ti = current_thread_info(); |
| 3732 | 3901 | ||
| @@ -3738,9 +3907,9 @@ asmlinkage void __sched preempt_schedule(void) | |||
| 3738 | return; | 3907 | return; |
| 3739 | 3908 | ||
| 3740 | do { | 3909 | do { |
| 3741 | add_preempt_count(PREEMPT_ACTIVE); | 3910 | add_preempt_count_notrace(PREEMPT_ACTIVE); |
| 3742 | schedule(); | 3911 | schedule(); |
| 3743 | sub_preempt_count(PREEMPT_ACTIVE); | 3912 | sub_preempt_count_notrace(PREEMPT_ACTIVE); |
| 3744 | 3913 | ||
| 3745 | /* | 3914 | /* |
| 3746 | * Check again in case we missed a preemption opportunity | 3915 | * Check again in case we missed a preemption opportunity |
| @@ -4441,12 +4610,8 @@ recheck: | |||
| 4441 | */ | 4610 | */ |
| 4442 | if (user && !capable(CAP_SYS_NICE)) { | 4611 | if (user && !capable(CAP_SYS_NICE)) { |
| 4443 | if (rt_policy(policy)) { | 4612 | if (rt_policy(policy)) { |
| 4444 | unsigned long rlim_rtprio; | 4613 | unsigned long rlim_rtprio = |
| 4445 | 4614 | task_rlimit(p, RLIMIT_RTPRIO); | |
| 4446 | if (!lock_task_sighand(p, &flags)) | ||
| 4447 | return -ESRCH; | ||
| 4448 | rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); | ||
| 4449 | unlock_task_sighand(p, &flags); | ||
| 4450 | 4615 | ||
| 4451 | /* can't set/change the rt policy */ | 4616 | /* can't set/change the rt policy */ |
| 4452 | if (policy != p->policy && !rlim_rtprio) | 4617 | if (policy != p->policy && !rlim_rtprio) |
| @@ -5816,20 +5981,49 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
| 5816 | */ | 5981 | */ |
| 5817 | static struct notifier_block __cpuinitdata migration_notifier = { | 5982 | static struct notifier_block __cpuinitdata migration_notifier = { |
| 5818 | .notifier_call = migration_call, | 5983 | .notifier_call = migration_call, |
| 5819 | .priority = 10 | 5984 | .priority = CPU_PRI_MIGRATION, |
| 5820 | }; | 5985 | }; |
| 5821 | 5986 | ||
| 5987 | static int __cpuinit sched_cpu_active(struct notifier_block *nfb, | ||
| 5988 | unsigned long action, void *hcpu) | ||
| 5989 | { | ||
| 5990 | switch (action & ~CPU_TASKS_FROZEN) { | ||
| 5991 | case CPU_ONLINE: | ||
| 5992 | case CPU_DOWN_FAILED: | ||
| 5993 | set_cpu_active((long)hcpu, true); | ||
| 5994 | return NOTIFY_OK; | ||
| 5995 | default: | ||
| 5996 | return NOTIFY_DONE; | ||
| 5997 | } | ||
| 5998 | } | ||
| 5999 | |||
| 6000 | static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb, | ||
| 6001 | unsigned long action, void *hcpu) | ||
| 6002 | { | ||
| 6003 | switch (action & ~CPU_TASKS_FROZEN) { | ||
| 6004 | case CPU_DOWN_PREPARE: | ||
| 6005 | set_cpu_active((long)hcpu, false); | ||
| 6006 | return NOTIFY_OK; | ||
| 6007 | default: | ||
| 6008 | return NOTIFY_DONE; | ||
| 6009 | } | ||
| 6010 | } | ||
| 6011 | |||
| 5822 | static int __init migration_init(void) | 6012 | static int __init migration_init(void) |
| 5823 | { | 6013 | { |
| 5824 | void *cpu = (void *)(long)smp_processor_id(); | 6014 | void *cpu = (void *)(long)smp_processor_id(); |
| 5825 | int err; | 6015 | int err; |
| 5826 | 6016 | ||
| 5827 | /* Start one for the boot CPU: */ | 6017 | /* Initialize migration for the boot CPU */ |
| 5828 | err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); | 6018 | err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); |
| 5829 | BUG_ON(err == NOTIFY_BAD); | 6019 | BUG_ON(err == NOTIFY_BAD); |
| 5830 | migration_call(&migration_notifier, CPU_ONLINE, cpu); | 6020 | migration_call(&migration_notifier, CPU_ONLINE, cpu); |
| 5831 | register_cpu_notifier(&migration_notifier); | 6021 | register_cpu_notifier(&migration_notifier); |
| 5832 | 6022 | ||
| 6023 | /* Register cpu active notifiers */ | ||
| 6024 | cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); | ||
| 6025 | cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); | ||
| 6026 | |||
| 5833 | return 0; | 6027 | return 0; |
| 5834 | } | 6028 | } |
| 5835 | early_initcall(migration_init); | 6029 | early_initcall(migration_init); |
| @@ -6064,23 +6258,18 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
| 6064 | free_rootdomain(old_rd); | 6258 | free_rootdomain(old_rd); |
| 6065 | } | 6259 | } |
| 6066 | 6260 | ||
| 6067 | static int init_rootdomain(struct root_domain *rd, bool bootmem) | 6261 | static int init_rootdomain(struct root_domain *rd) |
| 6068 | { | 6262 | { |
| 6069 | gfp_t gfp = GFP_KERNEL; | ||
| 6070 | |||
| 6071 | memset(rd, 0, sizeof(*rd)); | 6263 | memset(rd, 0, sizeof(*rd)); |
| 6072 | 6264 | ||
| 6073 | if (bootmem) | 6265 | if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) |
| 6074 | gfp = GFP_NOWAIT; | ||
| 6075 | |||
| 6076 | if (!alloc_cpumask_var(&rd->span, gfp)) | ||
| 6077 | goto out; | 6266 | goto out; |
| 6078 | if (!alloc_cpumask_var(&rd->online, gfp)) | 6267 | if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) |
| 6079 | goto free_span; | 6268 | goto free_span; |
| 6080 | if (!alloc_cpumask_var(&rd->rto_mask, gfp)) | 6269 | if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) |
| 6081 | goto free_online; | 6270 | goto free_online; |
| 6082 | 6271 | ||
| 6083 | if (cpupri_init(&rd->cpupri, bootmem) != 0) | 6272 | if (cpupri_init(&rd->cpupri) != 0) |
| 6084 | goto free_rto_mask; | 6273 | goto free_rto_mask; |
| 6085 | return 0; | 6274 | return 0; |
| 6086 | 6275 | ||
| @@ -6096,7 +6285,7 @@ out: | |||
| 6096 | 6285 | ||
| 6097 | static void init_defrootdomain(void) | 6286 | static void init_defrootdomain(void) |
| 6098 | { | 6287 | { |
| 6099 | init_rootdomain(&def_root_domain, true); | 6288 | init_rootdomain(&def_root_domain); |
| 6100 | 6289 | ||
| 6101 | atomic_set(&def_root_domain.refcount, 1); | 6290 | atomic_set(&def_root_domain.refcount, 1); |
| 6102 | } | 6291 | } |
| @@ -6109,7 +6298,7 @@ static struct root_domain *alloc_rootdomain(void) | |||
| 6109 | if (!rd) | 6298 | if (!rd) |
| 6110 | return NULL; | 6299 | return NULL; |
| 6111 | 6300 | ||
| 6112 | if (init_rootdomain(rd, false) != 0) { | 6301 | if (init_rootdomain(rd) != 0) { |
| 6113 | kfree(rd); | 6302 | kfree(rd); |
| 6114 | return NULL; | 6303 | return NULL; |
| 6115 | } | 6304 | } |
| @@ -7288,29 +7477,35 @@ int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) | |||
| 7288 | } | 7477 | } |
| 7289 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ | 7478 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ |
| 7290 | 7479 | ||
| 7291 | #ifndef CONFIG_CPUSETS | ||
| 7292 | /* | 7480 | /* |
| 7293 | * Add online and remove offline CPUs from the scheduler domains. | 7481 | * Update cpusets according to cpu_active mask. If cpusets are |
| 7294 | * When cpusets are enabled they take over this function. | 7482 | * disabled, cpuset_update_active_cpus() becomes a simple wrapper |
| 7483 | * around partition_sched_domains(). | ||
| 7295 | */ | 7484 | */ |
| 7296 | static int update_sched_domains(struct notifier_block *nfb, | 7485 | static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, |
| 7297 | unsigned long action, void *hcpu) | 7486 | void *hcpu) |
| 7298 | { | 7487 | { |
| 7299 | switch (action) { | 7488 | switch (action & ~CPU_TASKS_FROZEN) { |
| 7300 | case CPU_ONLINE: | 7489 | case CPU_ONLINE: |
| 7301 | case CPU_ONLINE_FROZEN: | ||
| 7302 | case CPU_DOWN_PREPARE: | ||
| 7303 | case CPU_DOWN_PREPARE_FROZEN: | ||
| 7304 | case CPU_DOWN_FAILED: | 7490 | case CPU_DOWN_FAILED: |
| 7305 | case CPU_DOWN_FAILED_FROZEN: | 7491 | cpuset_update_active_cpus(); |
| 7306 | partition_sched_domains(1, NULL, NULL); | ||
| 7307 | return NOTIFY_OK; | 7492 | return NOTIFY_OK; |
| 7493 | default: | ||
| 7494 | return NOTIFY_DONE; | ||
| 7495 | } | ||
| 7496 | } | ||
| 7308 | 7497 | ||
| 7498 | static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, | ||
| 7499 | void *hcpu) | ||
| 7500 | { | ||
| 7501 | switch (action & ~CPU_TASKS_FROZEN) { | ||
| 7502 | case CPU_DOWN_PREPARE: | ||
| 7503 | cpuset_update_active_cpus(); | ||
| 7504 | return NOTIFY_OK; | ||
| 7309 | default: | 7505 | default: |
| 7310 | return NOTIFY_DONE; | 7506 | return NOTIFY_DONE; |
| 7311 | } | 7507 | } |
| 7312 | } | 7508 | } |
| 7313 | #endif | ||
| 7314 | 7509 | ||
| 7315 | static int update_runtime(struct notifier_block *nfb, | 7510 | static int update_runtime(struct notifier_block *nfb, |
| 7316 | unsigned long action, void *hcpu) | 7511 | unsigned long action, void *hcpu) |
| @@ -7356,10 +7551,8 @@ void __init sched_init_smp(void) | |||
| 7356 | mutex_unlock(&sched_domains_mutex); | 7551 | mutex_unlock(&sched_domains_mutex); |
| 7357 | put_online_cpus(); | 7552 | put_online_cpus(); |
| 7358 | 7553 | ||
| 7359 | #ifndef CONFIG_CPUSETS | 7554 | hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); |
| 7360 | /* XXX: Theoretical race here - CPU may be hotplugged now */ | 7555 | hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); |
| 7361 | hotcpu_notifier(update_sched_domains, 0); | ||
| 7362 | #endif | ||
| 7363 | 7556 | ||
| 7364 | /* RT runtime code needs to handle some hotplug events */ | 7557 | /* RT runtime code needs to handle some hotplug events */ |
| 7365 | hotcpu_notifier(update_runtime, 0); | 7558 | hotcpu_notifier(update_runtime, 0); |
| @@ -7604,6 +7797,9 @@ void __init sched_init(void) | |||
| 7604 | 7797 | ||
| 7605 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) | 7798 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) |
| 7606 | rq->cpu_load[j] = 0; | 7799 | rq->cpu_load[j] = 0; |
| 7800 | |||
| 7801 | rq->last_load_update_tick = jiffies; | ||
| 7802 | |||
| 7607 | #ifdef CONFIG_SMP | 7803 | #ifdef CONFIG_SMP |
| 7608 | rq->sd = NULL; | 7804 | rq->sd = NULL; |
| 7609 | rq->rd = NULL; | 7805 | rq->rd = NULL; |
| @@ -7617,6 +7813,10 @@ void __init sched_init(void) | |||
| 7617 | rq->idle_stamp = 0; | 7813 | rq->idle_stamp = 0; |
| 7618 | rq->avg_idle = 2*sysctl_sched_migration_cost; | 7814 | rq->avg_idle = 2*sysctl_sched_migration_cost; |
| 7619 | rq_attach_root(rq, &def_root_domain); | 7815 | rq_attach_root(rq, &def_root_domain); |
| 7816 | #ifdef CONFIG_NO_HZ | ||
| 7817 | rq->nohz_balance_kick = 0; | ||
| 7818 | init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i)); | ||
| 7819 | #endif | ||
| 7620 | #endif | 7820 | #endif |
| 7621 | init_rq_hrtick(rq); | 7821 | init_rq_hrtick(rq); |
| 7622 | atomic_set(&rq->nr_iowait, 0); | 7822 | atomic_set(&rq->nr_iowait, 0); |
| @@ -7661,8 +7861,11 @@ void __init sched_init(void) | |||
| 7661 | zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); | 7861 | zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); |
| 7662 | #ifdef CONFIG_SMP | 7862 | #ifdef CONFIG_SMP |
| 7663 | #ifdef CONFIG_NO_HZ | 7863 | #ifdef CONFIG_NO_HZ |
| 7664 | zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); | 7864 | zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); |
| 7665 | alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); | 7865 | alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT); |
| 7866 | atomic_set(&nohz.load_balancer, nr_cpu_ids); | ||
| 7867 | atomic_set(&nohz.first_pick_cpu, nr_cpu_ids); | ||
| 7868 | atomic_set(&nohz.second_pick_cpu, nr_cpu_ids); | ||
| 7666 | #endif | 7869 | #endif |
| 7667 | /* May be allocated at isolcpus cmdline parse time */ | 7870 | /* May be allocated at isolcpus cmdline parse time */ |
| 7668 | if (cpu_isolated_map == NULL) | 7871 | if (cpu_isolated_map == NULL) |
