aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-07 02:19:51 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-07 02:19:51 -0400
commita1922ed661ab2c1637d0b10cde933bd9cd33d965 (patch)
tree0f1777542b385ebefd30b3586d830fd8ed6fda5b /kernel/sched.c
parent75e33751ca8bbb72dd6f1a74d2810ddc8cbe4bdf (diff)
parentd28daf923ac5e4a0d7cecebae56f3e339189366b (diff)
Merge branch 'tracing/core' into tracing/hw-breakpoints
Conflicts: arch/Kconfig kernel/trace/trace.h Merge reason: resolve the conflicts, plus adopt to the new ring-buffer APIs. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c80
1 files changed, 51 insertions, 29 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8fb88a906aaa..1b59e265273b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -493,6 +493,7 @@ struct rt_rq {
493#endif 493#endif
494#ifdef CONFIG_SMP 494#ifdef CONFIG_SMP
495 unsigned long rt_nr_migratory; 495 unsigned long rt_nr_migratory;
496 unsigned long rt_nr_total;
496 int overloaded; 497 int overloaded;
497 struct plist_head pushable_tasks; 498 struct plist_head pushable_tasks;
498#endif 499#endif
@@ -1978,7 +1979,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1978 if (task_hot(p, old_rq->clock, NULL)) 1979 if (task_hot(p, old_rq->clock, NULL))
1979 schedstat_inc(p, se.nr_forced2_migrations); 1980 schedstat_inc(p, se.nr_forced2_migrations);
1980#endif 1981#endif
1981 perf_counter_task_migration(p, new_cpu); 1982 perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS,
1983 1, 1, NULL, 0);
1982 } 1984 }
1983 p->se.vruntime -= old_cfsrq->min_vruntime - 1985 p->se.vruntime -= old_cfsrq->min_vruntime -
1984 new_cfsrq->min_vruntime; 1986 new_cfsrq->min_vruntime;
@@ -2570,15 +2572,37 @@ static void __sched_fork(struct task_struct *p)
2570 p->se.avg_wakeup = sysctl_sched_wakeup_granularity; 2572 p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
2571 2573
2572#ifdef CONFIG_SCHEDSTATS 2574#ifdef CONFIG_SCHEDSTATS
2573 p->se.wait_start = 0; 2575 p->se.wait_start = 0;
2574 p->se.sum_sleep_runtime = 0; 2576 p->se.wait_max = 0;
2575 p->se.sleep_start = 0; 2577 p->se.wait_count = 0;
2576 p->se.block_start = 0; 2578 p->se.wait_sum = 0;
2577 p->se.sleep_max = 0; 2579
2578 p->se.block_max = 0; 2580 p->se.sleep_start = 0;
2579 p->se.exec_max = 0; 2581 p->se.sleep_max = 0;
2580 p->se.slice_max = 0; 2582 p->se.sum_sleep_runtime = 0;
2581 p->se.wait_max = 0; 2583
2584 p->se.block_start = 0;
2585 p->se.block_max = 0;
2586 p->se.exec_max = 0;
2587 p->se.slice_max = 0;
2588
2589 p->se.nr_migrations_cold = 0;
2590 p->se.nr_failed_migrations_affine = 0;
2591 p->se.nr_failed_migrations_running = 0;
2592 p->se.nr_failed_migrations_hot = 0;
2593 p->se.nr_forced_migrations = 0;
2594 p->se.nr_forced2_migrations = 0;
2595
2596 p->se.nr_wakeups = 0;
2597 p->se.nr_wakeups_sync = 0;
2598 p->se.nr_wakeups_migrate = 0;
2599 p->se.nr_wakeups_local = 0;
2600 p->se.nr_wakeups_remote = 0;
2601 p->se.nr_wakeups_affine = 0;
2602 p->se.nr_wakeups_affine_attempts = 0;
2603 p->se.nr_wakeups_passive = 0;
2604 p->se.nr_wakeups_idle = 0;
2605
2582#endif 2606#endif
2583 2607
2584 INIT_LIST_HEAD(&p->rt.run_list); 2608 INIT_LIST_HEAD(&p->rt.run_list);
@@ -6540,6 +6564,11 @@ SYSCALL_DEFINE0(sched_yield)
6540 return 0; 6564 return 0;
6541} 6565}
6542 6566
6567static inline int should_resched(void)
6568{
6569 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
6570}
6571
6543static void __cond_resched(void) 6572static void __cond_resched(void)
6544{ 6573{
6545#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 6574#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
@@ -6559,8 +6588,7 @@ static void __cond_resched(void)
6559 6588
6560int __sched _cond_resched(void) 6589int __sched _cond_resched(void)
6561{ 6590{
6562 if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) && 6591 if (should_resched()) {
6563 system_state == SYSTEM_RUNNING) {
6564 __cond_resched(); 6592 __cond_resched();
6565 return 1; 6593 return 1;
6566 } 6594 }
@@ -6578,12 +6606,12 @@ EXPORT_SYMBOL(_cond_resched);
6578 */ 6606 */
6579int cond_resched_lock(spinlock_t *lock) 6607int cond_resched_lock(spinlock_t *lock)
6580{ 6608{
6581 int resched = need_resched() && system_state == SYSTEM_RUNNING; 6609 int resched = should_resched();
6582 int ret = 0; 6610 int ret = 0;
6583 6611
6584 if (spin_needbreak(lock) || resched) { 6612 if (spin_needbreak(lock) || resched) {
6585 spin_unlock(lock); 6613 spin_unlock(lock);
6586 if (resched && need_resched()) 6614 if (resched)
6587 __cond_resched(); 6615 __cond_resched();
6588 else 6616 else
6589 cpu_relax(); 6617 cpu_relax();
@@ -6598,7 +6626,7 @@ int __sched cond_resched_softirq(void)
6598{ 6626{
6599 BUG_ON(!in_softirq()); 6627 BUG_ON(!in_softirq());
6600 6628
6601 if (need_resched() && system_state == SYSTEM_RUNNING) { 6629 if (should_resched()) {
6602 local_bh_enable(); 6630 local_bh_enable();
6603 __cond_resched(); 6631 __cond_resched();
6604 local_bh_disable(); 6632 local_bh_disable();
@@ -7045,7 +7073,7 @@ static int migration_thread(void *data)
7045 7073
7046 if (cpu_is_offline(cpu)) { 7074 if (cpu_is_offline(cpu)) {
7047 spin_unlock_irq(&rq->lock); 7075 spin_unlock_irq(&rq->lock);
7048 goto wait_to_die; 7076 break;
7049 } 7077 }
7050 7078
7051 if (rq->active_balance) { 7079 if (rq->active_balance) {
@@ -7071,16 +7099,7 @@ static int migration_thread(void *data)
7071 complete(&req->done); 7099 complete(&req->done);
7072 } 7100 }
7073 __set_current_state(TASK_RUNNING); 7101 __set_current_state(TASK_RUNNING);
7074 return 0;
7075 7102
7076wait_to_die:
7077 /* Wait for kthread_stop */
7078 set_current_state(TASK_INTERRUPTIBLE);
7079 while (!kthread_should_stop()) {
7080 schedule();
7081 set_current_state(TASK_INTERRUPTIBLE);
7082 }
7083 __set_current_state(TASK_RUNNING);
7084 return 0; 7103 return 0;
7085} 7104}
7086 7105
@@ -7270,6 +7289,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
7270static void calc_global_load_remove(struct rq *rq) 7289static void calc_global_load_remove(struct rq *rq)
7271{ 7290{
7272 atomic_long_sub(rq->calc_load_active, &calc_load_tasks); 7291 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
7292 rq->calc_load_active = 0;
7273} 7293}
7274#endif /* CONFIG_HOTPLUG_CPU */ 7294#endif /* CONFIG_HOTPLUG_CPU */
7275 7295
@@ -7494,7 +7514,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7494 rq = task_rq_lock(p, &flags); 7514 rq = task_rq_lock(p, &flags);
7495 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); 7515 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
7496 task_rq_unlock(rq, &flags); 7516 task_rq_unlock(rq, &flags);
7517 get_task_struct(p);
7497 cpu_rq(cpu)->migration_thread = p; 7518 cpu_rq(cpu)->migration_thread = p;
7519 rq->calc_load_update = calc_load_update;
7498 break; 7520 break;
7499 7521
7500 case CPU_ONLINE: 7522 case CPU_ONLINE:
@@ -7505,8 +7527,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7505 /* Update our root-domain */ 7527 /* Update our root-domain */
7506 rq = cpu_rq(cpu); 7528 rq = cpu_rq(cpu);
7507 spin_lock_irqsave(&rq->lock, flags); 7529 spin_lock_irqsave(&rq->lock, flags);
7508 rq->calc_load_update = calc_load_update;
7509 rq->calc_load_active = 0;
7510 if (rq->rd) { 7530 if (rq->rd) {
7511 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7531 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7512 7532
@@ -7524,6 +7544,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7524 kthread_bind(cpu_rq(cpu)->migration_thread, 7544 kthread_bind(cpu_rq(cpu)->migration_thread,
7525 cpumask_any(cpu_online_mask)); 7545 cpumask_any(cpu_online_mask));
7526 kthread_stop(cpu_rq(cpu)->migration_thread); 7546 kthread_stop(cpu_rq(cpu)->migration_thread);
7547 put_task_struct(cpu_rq(cpu)->migration_thread);
7527 cpu_rq(cpu)->migration_thread = NULL; 7548 cpu_rq(cpu)->migration_thread = NULL;
7528 break; 7549 break;
7529 7550
@@ -7533,6 +7554,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7533 migrate_live_tasks(cpu); 7554 migrate_live_tasks(cpu);
7534 rq = cpu_rq(cpu); 7555 rq = cpu_rq(cpu);
7535 kthread_stop(rq->migration_thread); 7556 kthread_stop(rq->migration_thread);
7557 put_task_struct(rq->migration_thread);
7536 rq->migration_thread = NULL; 7558 rq->migration_thread = NULL;
7537 /* Idle task back to normal (off runqueue, low prio) */ 7559 /* Idle task back to normal (off runqueue, low prio) */
7538 spin_lock_irq(&rq->lock); 7560 spin_lock_irq(&rq->lock);
@@ -7828,7 +7850,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
7828 free_rootdomain(old_rd); 7850 free_rootdomain(old_rd);
7829} 7851}
7830 7852
7831static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) 7853static int init_rootdomain(struct root_domain *rd, bool bootmem)
7832{ 7854{
7833 gfp_t gfp = GFP_KERNEL; 7855 gfp_t gfp = GFP_KERNEL;
7834 7856
@@ -9075,7 +9097,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
9075#ifdef CONFIG_SMP 9097#ifdef CONFIG_SMP
9076 rt_rq->rt_nr_migratory = 0; 9098 rt_rq->rt_nr_migratory = 0;
9077 rt_rq->overloaded = 0; 9099 rt_rq->overloaded = 0;
9078 plist_head_init(&rq->rt.pushable_tasks, &rq->lock); 9100 plist_head_init(&rt_rq->pushable_tasks, &rq->lock);
9079#endif 9101#endif
9080 9102
9081 rt_rq->rt_time = 0; 9103 rt_rq->rt_time = 0;