diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 16:20:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 16:20:18 -0400 |
commit | eee2775d9924b22643bd89b2e568cc5eed7e8a04 (patch) | |
tree | 095ad7851895c5d39596f3ff7ee1e078235a2501 /kernel/sched.c | |
parent | 53e16fbd30005905168d9b75555fdc7e0a2eac58 (diff) | |
parent | 7db905e636f08ea5bc9825c1f73d77802e8ccad5 (diff) |
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (28 commits)
rcu: Move end of special early-boot RCU operation earlier
rcu: Changes from reviews: avoid casts, fix/add warnings, improve comments
rcu: Create rcutree plugins to handle hotplug CPU for multi-level trees
rcu: Remove lockdep annotations from RCU's _notrace() API members
rcu: Add #ifdef to suppress __rcu_offline_cpu() warning in !HOTPLUG_CPU builds
rcu: Add CPU-offline processing for single-node configurations
rcu: Add "notrace" to RCU function headers used by ftrace
rcu: Remove CONFIG_PREEMPT_RCU
rcu: Merge preemptable-RCU functionality into hierarchical RCU
rcu: Simplify rcu_pending()/rcu_check_callbacks() API
rcu: Use debugfs_remove_recursive() simplify code.
rcu: Merge per-RCU-flavor initialization into pre-existing macro
rcu: Fix online/offline indication for rcudata.csv trace file
rcu: Consolidate sparse and lockdep declarations in include/linux/rcupdate.h
rcu: Renamings to increase RCU clarity
rcu: Move private definitions from include/linux/rcutree.h to kernel/rcutree.h
rcu: Expunge lingering references to CONFIG_CLASSIC_RCU, optimize on !SMP
rcu: Delay rcu_barrier() wait until beginning of next CPU-hotunplug operation.
rcu: Fix typo in rcu_irq_exit() comment header
rcu: Make rcupreempt_trace.c look at offline CPUs
...
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 131 |
1 files changed, 128 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 2c75f7daa439..4066241ae9f4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5325,7 +5325,7 @@ need_resched: | |||
5325 | preempt_disable(); | 5325 | preempt_disable(); |
5326 | cpu = smp_processor_id(); | 5326 | cpu = smp_processor_id(); |
5327 | rq = cpu_rq(cpu); | 5327 | rq = cpu_rq(cpu); |
5328 | rcu_qsctr_inc(cpu); | 5328 | rcu_sched_qs(cpu); |
5329 | prev = rq->curr; | 5329 | prev = rq->curr; |
5330 | switch_count = &prev->nivcsw; | 5330 | switch_count = &prev->nivcsw; |
5331 | 5331 | ||
@@ -7053,6 +7053,11 @@ fail: | |||
7053 | return ret; | 7053 | return ret; |
7054 | } | 7054 | } |
7055 | 7055 | ||
7056 | #define RCU_MIGRATION_IDLE 0 | ||
7057 | #define RCU_MIGRATION_NEED_QS 1 | ||
7058 | #define RCU_MIGRATION_GOT_QS 2 | ||
7059 | #define RCU_MIGRATION_MUST_SYNC 3 | ||
7060 | |||
7056 | /* | 7061 | /* |
7057 | * migration_thread - this is a highprio system thread that performs | 7062 | * migration_thread - this is a highprio system thread that performs |
7058 | * thread migration by bumping thread off CPU then 'pushing' onto | 7063 | * thread migration by bumping thread off CPU then 'pushing' onto |
@@ -7060,6 +7065,7 @@ fail: | |||
7060 | */ | 7065 | */ |
7061 | static int migration_thread(void *data) | 7066 | static int migration_thread(void *data) |
7062 | { | 7067 | { |
7068 | int badcpu; | ||
7063 | int cpu = (long)data; | 7069 | int cpu = (long)data; |
7064 | struct rq *rq; | 7070 | struct rq *rq; |
7065 | 7071 | ||
@@ -7094,8 +7100,17 @@ static int migration_thread(void *data) | |||
7094 | req = list_entry(head->next, struct migration_req, list); | 7100 | req = list_entry(head->next, struct migration_req, list); |
7095 | list_del_init(head->next); | 7101 | list_del_init(head->next); |
7096 | 7102 | ||
7097 | spin_unlock(&rq->lock); | 7103 | if (req->task != NULL) { |
7098 | __migrate_task(req->task, cpu, req->dest_cpu); | 7104 | spin_unlock(&rq->lock); |
7105 | __migrate_task(req->task, cpu, req->dest_cpu); | ||
7106 | } else if (likely(cpu == (badcpu = smp_processor_id()))) { | ||
7107 | req->dest_cpu = RCU_MIGRATION_GOT_QS; | ||
7108 | spin_unlock(&rq->lock); | ||
7109 | } else { | ||
7110 | req->dest_cpu = RCU_MIGRATION_MUST_SYNC; | ||
7111 | spin_unlock(&rq->lock); | ||
7112 | WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); | ||
7113 | } | ||
7099 | local_irq_enable(); | 7114 | local_irq_enable(); |
7100 | 7115 | ||
7101 | complete(&req->done); | 7116 | complete(&req->done); |
@@ -10583,3 +10598,113 @@ struct cgroup_subsys cpuacct_subsys = { | |||
10583 | .subsys_id = cpuacct_subsys_id, | 10598 | .subsys_id = cpuacct_subsys_id, |
10584 | }; | 10599 | }; |
10585 | #endif /* CONFIG_CGROUP_CPUACCT */ | 10600 | #endif /* CONFIG_CGROUP_CPUACCT */ |
10601 | |||
10602 | #ifndef CONFIG_SMP | ||
10603 | |||
10604 | int rcu_expedited_torture_stats(char *page) | ||
10605 | { | ||
10606 | return 0; | ||
10607 | } | ||
10608 | EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats); | ||
10609 | |||
10610 | void synchronize_sched_expedited(void) | ||
10611 | { | ||
10612 | } | ||
10613 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | ||
10614 | |||
10615 | #else /* #ifndef CONFIG_SMP */ | ||
10616 | |||
10617 | static DEFINE_PER_CPU(struct migration_req, rcu_migration_req); | ||
10618 | static DEFINE_MUTEX(rcu_sched_expedited_mutex); | ||
10619 | |||
10620 | #define RCU_EXPEDITED_STATE_POST -2 | ||
10621 | #define RCU_EXPEDITED_STATE_IDLE -1 | ||
10622 | |||
10623 | static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; | ||
10624 | |||
10625 | int rcu_expedited_torture_stats(char *page) | ||
10626 | { | ||
10627 | int cnt = 0; | ||
10628 | int cpu; | ||
10629 | |||
10630 | cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state); | ||
10631 | for_each_online_cpu(cpu) { | ||
10632 | cnt += sprintf(&page[cnt], " %d:%d", | ||
10633 | cpu, per_cpu(rcu_migration_req, cpu).dest_cpu); | ||
10634 | } | ||
10635 | cnt += sprintf(&page[cnt], "\n"); | ||
10636 | return cnt; | ||
10637 | } | ||
10638 | EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats); | ||
10639 | |||
10640 | static long synchronize_sched_expedited_count; | ||
10641 | |||
10642 | /* | ||
10643 | * Wait for an rcu-sched grace period to elapse, but use "big hammer" | ||
10644 | * approach to force grace period to end quickly. This consumes | ||
10645 | * significant time on all CPUs, and is thus not recommended for | ||
10646 | * any sort of common-case code. | ||
10647 | * | ||
10648 | * Note that it is illegal to call this function while holding any | ||
10649 | * lock that is acquired by a CPU-hotplug notifier. Failing to | ||
10650 | * observe this restriction will result in deadlock. | ||
10651 | */ | ||
10652 | void synchronize_sched_expedited(void) | ||
10653 | { | ||
10654 | int cpu; | ||
10655 | unsigned long flags; | ||
10656 | bool need_full_sync = 0; | ||
10657 | struct rq *rq; | ||
10658 | struct migration_req *req; | ||
10659 | long snap; | ||
10660 | int trycount = 0; | ||
10661 | |||
10662 | smp_mb(); /* ensure prior mod happens before capturing snap. */ | ||
10663 | snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1; | ||
10664 | get_online_cpus(); | ||
10665 | while (!mutex_trylock(&rcu_sched_expedited_mutex)) { | ||
10666 | put_online_cpus(); | ||
10667 | if (trycount++ < 10) | ||
10668 | udelay(trycount * num_online_cpus()); | ||
10669 | else { | ||
10670 | synchronize_sched(); | ||
10671 | return; | ||
10672 | } | ||
10673 | if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) { | ||
10674 | smp_mb(); /* ensure test happens before caller kfree */ | ||
10675 | return; | ||
10676 | } | ||
10677 | get_online_cpus(); | ||
10678 | } | ||
10679 | rcu_expedited_state = RCU_EXPEDITED_STATE_POST; | ||
10680 | for_each_online_cpu(cpu) { | ||
10681 | rq = cpu_rq(cpu); | ||
10682 | req = &per_cpu(rcu_migration_req, cpu); | ||
10683 | init_completion(&req->done); | ||
10684 | req->task = NULL; | ||
10685 | req->dest_cpu = RCU_MIGRATION_NEED_QS; | ||
10686 | spin_lock_irqsave(&rq->lock, flags); | ||
10687 | list_add(&req->list, &rq->migration_queue); | ||
10688 | spin_unlock_irqrestore(&rq->lock, flags); | ||
10689 | wake_up_process(rq->migration_thread); | ||
10690 | } | ||
10691 | for_each_online_cpu(cpu) { | ||
10692 | rcu_expedited_state = cpu; | ||
10693 | req = &per_cpu(rcu_migration_req, cpu); | ||
10694 | rq = cpu_rq(cpu); | ||
10695 | wait_for_completion(&req->done); | ||
10696 | spin_lock_irqsave(&rq->lock, flags); | ||
10697 | if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) | ||
10698 | need_full_sync = 1; | ||
10699 | req->dest_cpu = RCU_MIGRATION_IDLE; | ||
10700 | spin_unlock_irqrestore(&rq->lock, flags); | ||
10701 | } | ||
10702 | rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; | ||
10703 | mutex_unlock(&rcu_sched_expedited_mutex); | ||
10704 | put_online_cpus(); | ||
10705 | if (need_full_sync) | ||
10706 | synchronize_sched(); | ||
10707 | } | ||
10708 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | ||
10709 | |||
10710 | #endif /* #else #ifndef CONFIG_SMP */ | ||