aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-06-07 22:20:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-06-07 22:20:28 -0400
commit6715a52a581c891e9a2034abe1c81ddb482d70b3 (patch)
tree03bac8851232d11382af718c2d10559cc16e4f0f /kernel
parentef2398019b305827ea7130ebaf7bf521b444530e (diff)
parent6c6c54e1807faf116724451ef2bd14993780470a (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: sched: Fix/clarify set_task_cpu() locking rules lockdep: Fix lock_is_held() on recursion sched: Fix schedstat.nr_wakeups_migrate sched: Fix cross-cpu clock sync on remote wakeups
Diffstat (limited to 'kernel')
-rw-r--r--kernel/lockdep.c2
-rw-r--r--kernel/sched.c33
2 files changed, 25 insertions, 10 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 63437d065ac..298c9276dfd 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3426,7 +3426,7 @@ int lock_is_held(struct lockdep_map *lock)
3426 int ret = 0; 3426 int ret = 0;
3427 3427
3428 if (unlikely(current->lockdep_recursion)) 3428 if (unlikely(current->lockdep_recursion))
3429 return ret; 3429 return 1; /* avoid false negative lockdep_assert_held() */
3430 3430
3431 raw_local_irq_save(flags); 3431 raw_local_irq_save(flags);
3432 check_flags(flags); 3432 check_flags(flags);
diff --git a/kernel/sched.c b/kernel/sched.c
index cbb3a0eee58..3f2e502d609 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -605,10 +605,10 @@ static inline int cpu_of(struct rq *rq)
605/* 605/*
606 * Return the group to which this tasks belongs. 606 * Return the group to which this tasks belongs.
607 * 607 *
608 * We use task_subsys_state_check() and extend the RCU verification 608 * We use task_subsys_state_check() and extend the RCU verification with
609 * with lockdep_is_held(&p->pi_lock) because cpu_cgroup_attach() 609 * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
610 * holds that lock for each task it moves into the cgroup. Therefore 610 * task it moves into the cgroup. Therefore by holding either of those locks,
611 * by holding that lock, we pin the task to the current cgroup. 611 * we pin the task to the current cgroup.
612 */ 612 */
613static inline struct task_group *task_group(struct task_struct *p) 613static inline struct task_group *task_group(struct task_struct *p)
614{ 614{
@@ -616,7 +616,8 @@ static inline struct task_group *task_group(struct task_struct *p)
616 struct cgroup_subsys_state *css; 616 struct cgroup_subsys_state *css;
617 617
618 css = task_subsys_state_check(p, cpu_cgroup_subsys_id, 618 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
619 lockdep_is_held(&p->pi_lock)); 619 lockdep_is_held(&p->pi_lock) ||
620 lockdep_is_held(&task_rq(p)->lock));
620 tg = container_of(css, struct task_group, css); 621 tg = container_of(css, struct task_group, css);
621 622
622 return autogroup_task_group(p, tg); 623 return autogroup_task_group(p, tg);
@@ -2200,6 +2201,16 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2200 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); 2201 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
2201 2202
2202#ifdef CONFIG_LOCKDEP 2203#ifdef CONFIG_LOCKDEP
2204 /*
2205 * The caller should hold either p->pi_lock or rq->lock, when changing
2206 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
2207 *
2208 * sched_move_task() holds both and thus holding either pins the cgroup,
2209 * see set_task_rq().
2210 *
2211 * Furthermore, all task_rq users should acquire both locks, see
2212 * task_rq_lock().
2213 */
2203 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 2214 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
2204 lockdep_is_held(&task_rq(p)->lock))); 2215 lockdep_is_held(&task_rq(p)->lock)));
2205#endif 2216#endif
@@ -2447,6 +2458,10 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
2447 } 2458 }
2448 rcu_read_unlock(); 2459 rcu_read_unlock();
2449 } 2460 }
2461
2462 if (wake_flags & WF_MIGRATED)
2463 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2464
2450#endif /* CONFIG_SMP */ 2465#endif /* CONFIG_SMP */
2451 2466
2452 schedstat_inc(rq, ttwu_count); 2467 schedstat_inc(rq, ttwu_count);
@@ -2455,9 +2470,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
2455 if (wake_flags & WF_SYNC) 2470 if (wake_flags & WF_SYNC)
2456 schedstat_inc(p, se.statistics.nr_wakeups_sync); 2471 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2457 2472
2458 if (cpu != task_cpu(p))
2459 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2460
2461#endif /* CONFIG_SCHEDSTATS */ 2473#endif /* CONFIG_SCHEDSTATS */
2462} 2474}
2463 2475
@@ -2600,6 +2612,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
2600 2612
2601#if defined(CONFIG_SMP) 2613#if defined(CONFIG_SMP)
2602 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) { 2614 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
2615 sched_clock_cpu(cpu); /* sync clocks x-cpu */
2603 ttwu_queue_remote(p, cpu); 2616 ttwu_queue_remote(p, cpu);
2604 return; 2617 return;
2605 } 2618 }
@@ -2674,8 +2687,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2674 p->sched_class->task_waking(p); 2687 p->sched_class->task_waking(p);
2675 2688
2676 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); 2689 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2677 if (task_cpu(p) != cpu) 2690 if (task_cpu(p) != cpu) {
2691 wake_flags |= WF_MIGRATED;
2678 set_task_cpu(p, cpu); 2692 set_task_cpu(p, cpu);
2693 }
2679#endif /* CONFIG_SMP */ 2694#endif /* CONFIG_SMP */
2680 2695
2681 ttwu_queue(p, cpu); 2696 ttwu_queue(p, cpu);