diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-06-08 17:20:59 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-06-08 17:20:59 -0400 |
commit | 95ae3c59fa8ad616c73745e21154b5af0fb10168 (patch) | |
tree | c08f1bcad8f619c5097208cbfa85170411033095 /kernel/sched.c | |
parent | dc61b1d65e353d638b2445f71fb8e5b5630f2415 (diff) | |
parent | 21aa9af03d06cb1d19a3738e5cf12acff984e69b (diff) |
Merge branch 'sched-wq' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq into sched/core
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 205 |
1 files changed, 151 insertions, 54 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 2aaceebd484c..8f351c56567f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -77,6 +77,7 @@ | |||
77 | #include <asm/irq_regs.h> | 77 | #include <asm/irq_regs.h> |
78 | 78 | ||
79 | #include "sched_cpupri.h" | 79 | #include "sched_cpupri.h" |
80 | #include "workqueue_sched.h" | ||
80 | 81 | ||
81 | #define CREATE_TRACE_POINTS | 82 | #define CREATE_TRACE_POINTS |
82 | #include <trace/events/sched.h> | 83 | #include <trace/events/sched.h> |
@@ -2264,11 +2265,55 @@ static void update_avg(u64 *avg, u64 sample) | |||
2264 | } | 2265 | } |
2265 | #endif | 2266 | #endif |
2266 | 2267 | ||
2267 | /*** | 2268 | static inline void ttwu_activate(struct task_struct *p, struct rq *rq, |
2269 | bool is_sync, bool is_migrate, bool is_local, | ||
2270 | unsigned long en_flags) | ||
2271 | { | ||
2272 | schedstat_inc(p, se.statistics.nr_wakeups); | ||
2273 | if (is_sync) | ||
2274 | schedstat_inc(p, se.statistics.nr_wakeups_sync); | ||
2275 | if (is_migrate) | ||
2276 | schedstat_inc(p, se.statistics.nr_wakeups_migrate); | ||
2277 | if (is_local) | ||
2278 | schedstat_inc(p, se.statistics.nr_wakeups_local); | ||
2279 | else | ||
2280 | schedstat_inc(p, se.statistics.nr_wakeups_remote); | ||
2281 | |||
2282 | activate_task(rq, p, en_flags); | ||
2283 | } | ||
2284 | |||
2285 | static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, | ||
2286 | int wake_flags, bool success) | ||
2287 | { | ||
2288 | trace_sched_wakeup(p, success); | ||
2289 | check_preempt_curr(rq, p, wake_flags); | ||
2290 | |||
2291 | p->state = TASK_RUNNING; | ||
2292 | #ifdef CONFIG_SMP | ||
2293 | if (p->sched_class->task_woken) | ||
2294 | p->sched_class->task_woken(rq, p); | ||
2295 | |||
2296 | if (unlikely(rq->idle_stamp)) { | ||
2297 | u64 delta = rq->clock - rq->idle_stamp; | ||
2298 | u64 max = 2*sysctl_sched_migration_cost; | ||
2299 | |||
2300 | if (delta > max) | ||
2301 | rq->avg_idle = max; | ||
2302 | else | ||
2303 | update_avg(&rq->avg_idle, delta); | ||
2304 | rq->idle_stamp = 0; | ||
2305 | } | ||
2306 | #endif | ||
2307 | /* if a worker is waking up, notify workqueue */ | ||
2308 | if ((p->flags & PF_WQ_WORKER) && success) | ||
2309 | wq_worker_waking_up(p, cpu_of(rq)); | ||
2310 | } | ||
2311 | |||
2312 | /** | ||
2268 | * try_to_wake_up - wake up a thread | 2313 | * try_to_wake_up - wake up a thread |
2269 | * @p: the to-be-woken-up thread | 2314 | * @p: the thread to be awakened |
2270 | * @state: the mask of task states that can be woken | 2315 | * @state: the mask of task states that can be woken |
2271 | * @sync: do a synchronous wakeup? | 2316 | * @wake_flags: wake modifier flags (WF_*) |
2272 | * | 2317 | * |
2273 | * Put it on the run-queue if it's not already there. The "current" | 2318 | * Put it on the run-queue if it's not already there. The "current" |
2274 | * thread is always on the run-queue (except when the actual | 2319 | * thread is always on the run-queue (except when the actual |
@@ -2276,7 +2321,8 @@ static void update_avg(u64 *avg, u64 sample) | |||
2276 | * the simpler "current->state = TASK_RUNNING" to mark yourself | 2321 | * the simpler "current->state = TASK_RUNNING" to mark yourself |
2277 | * runnable without the overhead of this. | 2322 | * runnable without the overhead of this. |
2278 | * | 2323 | * |
2279 | * returns failure only if the task is already active. | 2324 | * Returns %true if @p was woken up, %false if it was already running |
2325 | * or @state didn't match @p's state. | ||
2280 | */ | 2326 | */ |
2281 | static int try_to_wake_up(struct task_struct *p, unsigned int state, | 2327 | static int try_to_wake_up(struct task_struct *p, unsigned int state, |
2282 | int wake_flags) | 2328 | int wake_flags) |
@@ -2356,38 +2402,11 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2356 | 2402 | ||
2357 | out_activate: | 2403 | out_activate: |
2358 | #endif /* CONFIG_SMP */ | 2404 | #endif /* CONFIG_SMP */ |
2359 | schedstat_inc(p, se.statistics.nr_wakeups); | 2405 | ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu, |
2360 | if (wake_flags & WF_SYNC) | 2406 | cpu == this_cpu, en_flags); |
2361 | schedstat_inc(p, se.statistics.nr_wakeups_sync); | ||
2362 | if (orig_cpu != cpu) | ||
2363 | schedstat_inc(p, se.statistics.nr_wakeups_migrate); | ||
2364 | if (cpu == this_cpu) | ||
2365 | schedstat_inc(p, se.statistics.nr_wakeups_local); | ||
2366 | else | ||
2367 | schedstat_inc(p, se.statistics.nr_wakeups_remote); | ||
2368 | activate_task(rq, p, en_flags); | ||
2369 | success = 1; | 2407 | success = 1; |
2370 | |||
2371 | out_running: | 2408 | out_running: |
2372 | trace_sched_wakeup(p, success); | 2409 | ttwu_post_activation(p, rq, wake_flags, success); |
2373 | check_preempt_curr(rq, p, wake_flags); | ||
2374 | |||
2375 | p->state = TASK_RUNNING; | ||
2376 | #ifdef CONFIG_SMP | ||
2377 | if (p->sched_class->task_woken) | ||
2378 | p->sched_class->task_woken(rq, p); | ||
2379 | |||
2380 | if (unlikely(rq->idle_stamp)) { | ||
2381 | u64 delta = rq->clock - rq->idle_stamp; | ||
2382 | u64 max = 2*sysctl_sched_migration_cost; | ||
2383 | |||
2384 | if (delta > max) | ||
2385 | rq->avg_idle = max; | ||
2386 | else | ||
2387 | update_avg(&rq->avg_idle, delta); | ||
2388 | rq->idle_stamp = 0; | ||
2389 | } | ||
2390 | #endif | ||
2391 | out: | 2410 | out: |
2392 | task_rq_unlock(rq, &flags); | 2411 | task_rq_unlock(rq, &flags); |
2393 | put_cpu(); | 2412 | put_cpu(); |
@@ -2396,6 +2415,37 @@ out: | |||
2396 | } | 2415 | } |
2397 | 2416 | ||
2398 | /** | 2417 | /** |
2418 | * try_to_wake_up_local - try to wake up a local task with rq lock held | ||
2419 | * @p: the thread to be awakened | ||
2420 | * | ||
2421 | * Put @p on the run-queue if it's not alredy there. The caller must | ||
2422 | * ensure that this_rq() is locked, @p is bound to this_rq() and not | ||
2423 | * the current task. this_rq() stays locked over invocation. | ||
2424 | */ | ||
2425 | static void try_to_wake_up_local(struct task_struct *p) | ||
2426 | { | ||
2427 | struct rq *rq = task_rq(p); | ||
2428 | bool success = false; | ||
2429 | |||
2430 | BUG_ON(rq != this_rq()); | ||
2431 | BUG_ON(p == current); | ||
2432 | lockdep_assert_held(&rq->lock); | ||
2433 | |||
2434 | if (!(p->state & TASK_NORMAL)) | ||
2435 | return; | ||
2436 | |||
2437 | if (!p->se.on_rq) { | ||
2438 | if (likely(!task_running(rq, p))) { | ||
2439 | schedstat_inc(rq, ttwu_count); | ||
2440 | schedstat_inc(rq, ttwu_local); | ||
2441 | } | ||
2442 | ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP); | ||
2443 | success = true; | ||
2444 | } | ||
2445 | ttwu_post_activation(p, rq, 0, success); | ||
2446 | } | ||
2447 | |||
2448 | /** | ||
2399 | * wake_up_process - Wake up a specific process | 2449 | * wake_up_process - Wake up a specific process |
2400 | * @p: The process to be woken up. | 2450 | * @p: The process to be woken up. |
2401 | * | 2451 | * |
@@ -3600,10 +3650,24 @@ need_resched_nonpreemptible: | |||
3600 | clear_tsk_need_resched(prev); | 3650 | clear_tsk_need_resched(prev); |
3601 | 3651 | ||
3602 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | 3652 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
3603 | if (unlikely(signal_pending_state(prev->state, prev))) | 3653 | if (unlikely(signal_pending_state(prev->state, prev))) { |
3604 | prev->state = TASK_RUNNING; | 3654 | prev->state = TASK_RUNNING; |
3605 | else | 3655 | } else { |
3656 | /* | ||
3657 | * If a worker is going to sleep, notify and | ||
3658 | * ask workqueue whether it wants to wake up a | ||
3659 | * task to maintain concurrency. If so, wake | ||
3660 | * up the task. | ||
3661 | */ | ||
3662 | if (prev->flags & PF_WQ_WORKER) { | ||
3663 | struct task_struct *to_wakeup; | ||
3664 | |||
3665 | to_wakeup = wq_worker_sleeping(prev, cpu); | ||
3666 | if (to_wakeup) | ||
3667 | try_to_wake_up_local(to_wakeup); | ||
3668 | } | ||
3606 | deactivate_task(rq, prev, DEQUEUE_SLEEP); | 3669 | deactivate_task(rq, prev, DEQUEUE_SLEEP); |
3670 | } | ||
3607 | switch_count = &prev->nvcsw; | 3671 | switch_count = &prev->nvcsw; |
3608 | } | 3672 | } |
3609 | 3673 | ||
@@ -5804,20 +5868,49 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5804 | */ | 5868 | */ |
5805 | static struct notifier_block __cpuinitdata migration_notifier = { | 5869 | static struct notifier_block __cpuinitdata migration_notifier = { |
5806 | .notifier_call = migration_call, | 5870 | .notifier_call = migration_call, |
5807 | .priority = 10 | 5871 | .priority = CPU_PRI_MIGRATION, |
5808 | }; | 5872 | }; |
5809 | 5873 | ||
5874 | static int __cpuinit sched_cpu_active(struct notifier_block *nfb, | ||
5875 | unsigned long action, void *hcpu) | ||
5876 | { | ||
5877 | switch (action & ~CPU_TASKS_FROZEN) { | ||
5878 | case CPU_ONLINE: | ||
5879 | case CPU_DOWN_FAILED: | ||
5880 | set_cpu_active((long)hcpu, true); | ||
5881 | return NOTIFY_OK; | ||
5882 | default: | ||
5883 | return NOTIFY_DONE; | ||
5884 | } | ||
5885 | } | ||
5886 | |||
5887 | static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb, | ||
5888 | unsigned long action, void *hcpu) | ||
5889 | { | ||
5890 | switch (action & ~CPU_TASKS_FROZEN) { | ||
5891 | case CPU_DOWN_PREPARE: | ||
5892 | set_cpu_active((long)hcpu, false); | ||
5893 | return NOTIFY_OK; | ||
5894 | default: | ||
5895 | return NOTIFY_DONE; | ||
5896 | } | ||
5897 | } | ||
5898 | |||
5810 | static int __init migration_init(void) | 5899 | static int __init migration_init(void) |
5811 | { | 5900 | { |
5812 | void *cpu = (void *)(long)smp_processor_id(); | 5901 | void *cpu = (void *)(long)smp_processor_id(); |
5813 | int err; | 5902 | int err; |
5814 | 5903 | ||
5815 | /* Start one for the boot CPU: */ | 5904 | /* Initialize migration for the boot CPU */ |
5816 | err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); | 5905 | err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); |
5817 | BUG_ON(err == NOTIFY_BAD); | 5906 | BUG_ON(err == NOTIFY_BAD); |
5818 | migration_call(&migration_notifier, CPU_ONLINE, cpu); | 5907 | migration_call(&migration_notifier, CPU_ONLINE, cpu); |
5819 | register_cpu_notifier(&migration_notifier); | 5908 | register_cpu_notifier(&migration_notifier); |
5820 | 5909 | ||
5910 | /* Register cpu active notifiers */ | ||
5911 | cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); | ||
5912 | cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); | ||
5913 | |||
5821 | return 0; | 5914 | return 0; |
5822 | } | 5915 | } |
5823 | early_initcall(migration_init); | 5916 | early_initcall(migration_init); |
@@ -7276,29 +7369,35 @@ int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) | |||
7276 | } | 7369 | } |
7277 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ | 7370 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ |
7278 | 7371 | ||
7279 | #ifndef CONFIG_CPUSETS | ||
7280 | /* | 7372 | /* |
7281 | * Add online and remove offline CPUs from the scheduler domains. | 7373 | * Update cpusets according to cpu_active mask. If cpusets are |
7282 | * When cpusets are enabled they take over this function. | 7374 | * disabled, cpuset_update_active_cpus() becomes a simple wrapper |
7375 | * around partition_sched_domains(). | ||
7283 | */ | 7376 | */ |
7284 | static int update_sched_domains(struct notifier_block *nfb, | 7377 | static int __cpuexit cpuset_cpu_active(struct notifier_block *nfb, |
7285 | unsigned long action, void *hcpu) | 7378 | unsigned long action, void *hcpu) |
7286 | { | 7379 | { |
7287 | switch (action) { | 7380 | switch (action & ~CPU_TASKS_FROZEN) { |
7288 | case CPU_ONLINE: | 7381 | case CPU_ONLINE: |
7289 | case CPU_ONLINE_FROZEN: | ||
7290 | case CPU_DOWN_PREPARE: | ||
7291 | case CPU_DOWN_PREPARE_FROZEN: | ||
7292 | case CPU_DOWN_FAILED: | 7382 | case CPU_DOWN_FAILED: |
7293 | case CPU_DOWN_FAILED_FROZEN: | 7383 | cpuset_update_active_cpus(); |
7294 | partition_sched_domains(1, NULL, NULL); | ||
7295 | return NOTIFY_OK; | 7384 | return NOTIFY_OK; |
7385 | default: | ||
7386 | return NOTIFY_DONE; | ||
7387 | } | ||
7388 | } | ||
7296 | 7389 | ||
7390 | static int __cpuexit cpuset_cpu_inactive(struct notifier_block *nfb, | ||
7391 | unsigned long action, void *hcpu) | ||
7392 | { | ||
7393 | switch (action & ~CPU_TASKS_FROZEN) { | ||
7394 | case CPU_DOWN_PREPARE: | ||
7395 | cpuset_update_active_cpus(); | ||
7396 | return NOTIFY_OK; | ||
7297 | default: | 7397 | default: |
7298 | return NOTIFY_DONE; | 7398 | return NOTIFY_DONE; |
7299 | } | 7399 | } |
7300 | } | 7400 | } |
7301 | #endif | ||
7302 | 7401 | ||
7303 | static int update_runtime(struct notifier_block *nfb, | 7402 | static int update_runtime(struct notifier_block *nfb, |
7304 | unsigned long action, void *hcpu) | 7403 | unsigned long action, void *hcpu) |
@@ -7344,10 +7443,8 @@ void __init sched_init_smp(void) | |||
7344 | mutex_unlock(&sched_domains_mutex); | 7443 | mutex_unlock(&sched_domains_mutex); |
7345 | put_online_cpus(); | 7444 | put_online_cpus(); |
7346 | 7445 | ||
7347 | #ifndef CONFIG_CPUSETS | 7446 | hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); |
7348 | /* XXX: Theoretical race here - CPU may be hotplugged now */ | 7447 | hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); |
7349 | hotcpu_notifier(update_sched_domains, 0); | ||
7350 | #endif | ||
7351 | 7448 | ||
7352 | /* RT runtime code needs to handle some hotplug events */ | 7449 | /* RT runtime code needs to handle some hotplug events */ |
7353 | hotcpu_notifier(update_runtime, 0); | 7450 | hotcpu_notifier(update_runtime, 0); |