diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 131 |
1 files changed, 128 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 1b59e265273b..c9beca67a53e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5325,7 +5325,7 @@ need_resched: | |||
5325 | preempt_disable(); | 5325 | preempt_disable(); |
5326 | cpu = smp_processor_id(); | 5326 | cpu = smp_processor_id(); |
5327 | rq = cpu_rq(cpu); | 5327 | rq = cpu_rq(cpu); |
5328 | rcu_qsctr_inc(cpu); | 5328 | rcu_sched_qs(cpu); |
5329 | prev = rq->curr; | 5329 | prev = rq->curr; |
5330 | switch_count = &prev->nivcsw; | 5330 | switch_count = &prev->nivcsw; |
5331 | 5331 | ||
@@ -7051,6 +7051,11 @@ fail: | |||
7051 | return ret; | 7051 | return ret; |
7052 | } | 7052 | } |
7053 | 7053 | ||
7054 | #define RCU_MIGRATION_IDLE 0 | ||
7055 | #define RCU_MIGRATION_NEED_QS 1 | ||
7056 | #define RCU_MIGRATION_GOT_QS 2 | ||
7057 | #define RCU_MIGRATION_MUST_SYNC 3 | ||
7058 | |||
7054 | /* | 7059 | /* |
7055 | * migration_thread - this is a highprio system thread that performs | 7060 | * migration_thread - this is a highprio system thread that performs |
7056 | * thread migration by bumping thread off CPU then 'pushing' onto | 7061 | * thread migration by bumping thread off CPU then 'pushing' onto |
@@ -7058,6 +7063,7 @@ fail: | |||
7058 | */ | 7063 | */ |
7059 | static int migration_thread(void *data) | 7064 | static int migration_thread(void *data) |
7060 | { | 7065 | { |
7066 | int badcpu; | ||
7061 | int cpu = (long)data; | 7067 | int cpu = (long)data; |
7062 | struct rq *rq; | 7068 | struct rq *rq; |
7063 | 7069 | ||
@@ -7092,8 +7098,17 @@ static int migration_thread(void *data) | |||
7092 | req = list_entry(head->next, struct migration_req, list); | 7098 | req = list_entry(head->next, struct migration_req, list); |
7093 | list_del_init(head->next); | 7099 | list_del_init(head->next); |
7094 | 7100 | ||
7095 | spin_unlock(&rq->lock); | 7101 | if (req->task != NULL) { |
7096 | __migrate_task(req->task, cpu, req->dest_cpu); | 7102 | spin_unlock(&rq->lock); |
7103 | __migrate_task(req->task, cpu, req->dest_cpu); | ||
7104 | } else if (likely(cpu == (badcpu = smp_processor_id()))) { | ||
7105 | req->dest_cpu = RCU_MIGRATION_GOT_QS; | ||
7106 | spin_unlock(&rq->lock); | ||
7107 | } else { | ||
7108 | req->dest_cpu = RCU_MIGRATION_MUST_SYNC; | ||
7109 | spin_unlock(&rq->lock); | ||
7110 | WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); | ||
7111 | } | ||
7097 | local_irq_enable(); | 7112 | local_irq_enable(); |
7098 | 7113 | ||
7099 | complete(&req->done); | 7114 | complete(&req->done); |
@@ -10581,3 +10596,113 @@ struct cgroup_subsys cpuacct_subsys = { | |||
10581 | .subsys_id = cpuacct_subsys_id, | 10596 | .subsys_id = cpuacct_subsys_id, |
10582 | }; | 10597 | }; |
10583 | #endif /* CONFIG_CGROUP_CPUACCT */ | 10598 | #endif /* CONFIG_CGROUP_CPUACCT */ |
10599 | |||
10600 | #ifndef CONFIG_SMP | ||
10601 | |||
10602 | int rcu_expedited_torture_stats(char *page) | ||
10603 | { | ||
10604 | return 0; | ||
10605 | } | ||
10606 | EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats); | ||
10607 | |||
10608 | void synchronize_sched_expedited(void) | ||
10609 | { | ||
10610 | } | ||
10611 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | ||
10612 | |||
10613 | #else /* #ifndef CONFIG_SMP */ | ||
10614 | |||
10615 | static DEFINE_PER_CPU(struct migration_req, rcu_migration_req); | ||
10616 | static DEFINE_MUTEX(rcu_sched_expedited_mutex); | ||
10617 | |||
10618 | #define RCU_EXPEDITED_STATE_POST -2 | ||
10619 | #define RCU_EXPEDITED_STATE_IDLE -1 | ||
10620 | |||
10621 | static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; | ||
10622 | |||
10623 | int rcu_expedited_torture_stats(char *page) | ||
10624 | { | ||
10625 | int cnt = 0; | ||
10626 | int cpu; | ||
10627 | |||
10628 | cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state); | ||
10629 | for_each_online_cpu(cpu) { | ||
10630 | cnt += sprintf(&page[cnt], " %d:%d", | ||
10631 | cpu, per_cpu(rcu_migration_req, cpu).dest_cpu); | ||
10632 | } | ||
10633 | cnt += sprintf(&page[cnt], "\n"); | ||
10634 | return cnt; | ||
10635 | } | ||
10636 | EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats); | ||
10637 | |||
10638 | static long synchronize_sched_expedited_count; | ||
10639 | |||
10640 | /* | ||
10641 | * Wait for an rcu-sched grace period to elapse, but use "big hammer" | ||
10642 | * approach to force grace period to end quickly. This consumes | ||
10643 | * significant time on all CPUs, and is thus not recommended for | ||
10644 | * any sort of common-case code. | ||
10645 | * | ||
10646 | * Note that it is illegal to call this function while holding any | ||
10647 | * lock that is acquired by a CPU-hotplug notifier. Failing to | ||
10648 | * observe this restriction will result in deadlock. | ||
10649 | */ | ||
10650 | void synchronize_sched_expedited(void) | ||
10651 | { | ||
10652 | int cpu; | ||
10653 | unsigned long flags; | ||
10654 | bool need_full_sync = 0; | ||
10655 | struct rq *rq; | ||
10656 | struct migration_req *req; | ||
10657 | long snap; | ||
10658 | int trycount = 0; | ||
10659 | |||
10660 | smp_mb(); /* ensure prior mod happens before capturing snap. */ | ||
10661 | snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1; | ||
10662 | get_online_cpus(); | ||
10663 | while (!mutex_trylock(&rcu_sched_expedited_mutex)) { | ||
10664 | put_online_cpus(); | ||
10665 | if (trycount++ < 10) | ||
10666 | udelay(trycount * num_online_cpus()); | ||
10667 | else { | ||
10668 | synchronize_sched(); | ||
10669 | return; | ||
10670 | } | ||
10671 | if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) { | ||
10672 | smp_mb(); /* ensure test happens before caller kfree */ | ||
10673 | return; | ||
10674 | } | ||
10675 | get_online_cpus(); | ||
10676 | } | ||
10677 | rcu_expedited_state = RCU_EXPEDITED_STATE_POST; | ||
10678 | for_each_online_cpu(cpu) { | ||
10679 | rq = cpu_rq(cpu); | ||
10680 | req = &per_cpu(rcu_migration_req, cpu); | ||
10681 | init_completion(&req->done); | ||
10682 | req->task = NULL; | ||
10683 | req->dest_cpu = RCU_MIGRATION_NEED_QS; | ||
10684 | spin_lock_irqsave(&rq->lock, flags); | ||
10685 | list_add(&req->list, &rq->migration_queue); | ||
10686 | spin_unlock_irqrestore(&rq->lock, flags); | ||
10687 | wake_up_process(rq->migration_thread); | ||
10688 | } | ||
10689 | for_each_online_cpu(cpu) { | ||
10690 | rcu_expedited_state = cpu; | ||
10691 | req = &per_cpu(rcu_migration_req, cpu); | ||
10692 | rq = cpu_rq(cpu); | ||
10693 | wait_for_completion(&req->done); | ||
10694 | spin_lock_irqsave(&rq->lock, flags); | ||
10695 | if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) | ||
10696 | need_full_sync = 1; | ||
10697 | req->dest_cpu = RCU_MIGRATION_IDLE; | ||
10698 | spin_unlock_irqrestore(&rq->lock, flags); | ||
10699 | } | ||
10700 | rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; | ||
10701 | mutex_unlock(&rcu_sched_expedited_mutex); | ||
10702 | put_online_cpus(); | ||
10703 | if (need_full_sync) | ||
10704 | synchronize_sched(); | ||
10705 | } | ||
10706 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | ||
10707 | |||
10708 | #endif /* #else #ifndef CONFIG_SMP */ | ||