aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c133
1 files changed, 130 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 1b59e265273b..4066241ae9f4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5325,7 +5325,7 @@ need_resched:
5325 preempt_disable(); 5325 preempt_disable();
5326 cpu = smp_processor_id(); 5326 cpu = smp_processor_id();
5327 rq = cpu_rq(cpu); 5327 rq = cpu_rq(cpu);
5328 rcu_qsctr_inc(cpu); 5328 rcu_sched_qs(cpu);
5329 prev = rq->curr; 5329 prev = rq->curr;
5330 switch_count = &prev->nivcsw; 5330 switch_count = &prev->nivcsw;
5331 5331
@@ -6609,6 +6609,8 @@ int cond_resched_lock(spinlock_t *lock)
6609 int resched = should_resched(); 6609 int resched = should_resched();
6610 int ret = 0; 6610 int ret = 0;
6611 6611
6612 lockdep_assert_held(lock);
6613
6612 if (spin_needbreak(lock) || resched) { 6614 if (spin_needbreak(lock) || resched) {
6613 spin_unlock(lock); 6615 spin_unlock(lock);
6614 if (resched) 6616 if (resched)
@@ -7051,6 +7053,11 @@ fail:
7051 return ret; 7053 return ret;
7052} 7054}
7053 7055
7056#define RCU_MIGRATION_IDLE 0
7057#define RCU_MIGRATION_NEED_QS 1
7058#define RCU_MIGRATION_GOT_QS 2
7059#define RCU_MIGRATION_MUST_SYNC 3
7060
7054/* 7061/*
7055 * migration_thread - this is a highprio system thread that performs 7062 * migration_thread - this is a highprio system thread that performs
7056 * thread migration by bumping thread off CPU then 'pushing' onto 7063 * thread migration by bumping thread off CPU then 'pushing' onto
@@ -7058,6 +7065,7 @@ fail:
7058 */ 7065 */
7059static int migration_thread(void *data) 7066static int migration_thread(void *data)
7060{ 7067{
7068 int badcpu;
7061 int cpu = (long)data; 7069 int cpu = (long)data;
7062 struct rq *rq; 7070 struct rq *rq;
7063 7071
@@ -7092,8 +7100,17 @@ static int migration_thread(void *data)
7092 req = list_entry(head->next, struct migration_req, list); 7100 req = list_entry(head->next, struct migration_req, list);
7093 list_del_init(head->next); 7101 list_del_init(head->next);
7094 7102
7095 spin_unlock(&rq->lock); 7103 if (req->task != NULL) {
7096 __migrate_task(req->task, cpu, req->dest_cpu); 7104 spin_unlock(&rq->lock);
7105 __migrate_task(req->task, cpu, req->dest_cpu);
7106 } else if (likely(cpu == (badcpu = smp_processor_id()))) {
7107 req->dest_cpu = RCU_MIGRATION_GOT_QS;
7108 spin_unlock(&rq->lock);
7109 } else {
7110 req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
7111 spin_unlock(&rq->lock);
7112 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
7113 }
7097 local_irq_enable(); 7114 local_irq_enable();
7098 7115
7099 complete(&req->done); 7116 complete(&req->done);
@@ -10581,3 +10598,113 @@ struct cgroup_subsys cpuacct_subsys = {
10581 .subsys_id = cpuacct_subsys_id, 10598 .subsys_id = cpuacct_subsys_id,
10582}; 10599};
10583#endif /* CONFIG_CGROUP_CPUACCT */ 10600#endif /* CONFIG_CGROUP_CPUACCT */
10601
10602#ifndef CONFIG_SMP
10603
10604int rcu_expedited_torture_stats(char *page)
10605{
10606 return 0;
10607}
10608EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
10609
10610void synchronize_sched_expedited(void)
10611{
10612}
10613EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
10614
10615#else /* #ifndef CONFIG_SMP */
10616
10617static DEFINE_PER_CPU(struct migration_req, rcu_migration_req);
10618static DEFINE_MUTEX(rcu_sched_expedited_mutex);
10619
10620#define RCU_EXPEDITED_STATE_POST -2
10621#define RCU_EXPEDITED_STATE_IDLE -1
10622
10623static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
10624
10625int rcu_expedited_torture_stats(char *page)
10626{
10627 int cnt = 0;
10628 int cpu;
10629
10630 cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state);
10631 for_each_online_cpu(cpu) {
10632 cnt += sprintf(&page[cnt], " %d:%d",
10633 cpu, per_cpu(rcu_migration_req, cpu).dest_cpu);
10634 }
10635 cnt += sprintf(&page[cnt], "\n");
10636 return cnt;
10637}
10638EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
10639
10640static long synchronize_sched_expedited_count;
10641
10642/*
10643 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
10644 * approach to force grace period to end quickly. This consumes
10645 * significant time on all CPUs, and is thus not recommended for
10646 * any sort of common-case code.
10647 *
10648 * Note that it is illegal to call this function while holding any
10649 * lock that is acquired by a CPU-hotplug notifier. Failing to
10650 * observe this restriction will result in deadlock.
10651 */
10652void synchronize_sched_expedited(void)
10653{
10654 int cpu;
10655 unsigned long flags;
10656 bool need_full_sync = 0;
10657 struct rq *rq;
10658 struct migration_req *req;
10659 long snap;
10660 int trycount = 0;
10661
10662 smp_mb(); /* ensure prior mod happens before capturing snap. */
10663 snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1;
10664 get_online_cpus();
10665 while (!mutex_trylock(&rcu_sched_expedited_mutex)) {
10666 put_online_cpus();
10667 if (trycount++ < 10)
10668 udelay(trycount * num_online_cpus());
10669 else {
10670 synchronize_sched();
10671 return;
10672 }
10673 if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) {
10674 smp_mb(); /* ensure test happens before caller kfree */
10675 return;
10676 }
10677 get_online_cpus();
10678 }
10679 rcu_expedited_state = RCU_EXPEDITED_STATE_POST;
10680 for_each_online_cpu(cpu) {
10681 rq = cpu_rq(cpu);
10682 req = &per_cpu(rcu_migration_req, cpu);
10683 init_completion(&req->done);
10684 req->task = NULL;
10685 req->dest_cpu = RCU_MIGRATION_NEED_QS;
10686 spin_lock_irqsave(&rq->lock, flags);
10687 list_add(&req->list, &rq->migration_queue);
10688 spin_unlock_irqrestore(&rq->lock, flags);
10689 wake_up_process(rq->migration_thread);
10690 }
10691 for_each_online_cpu(cpu) {
10692 rcu_expedited_state = cpu;
10693 req = &per_cpu(rcu_migration_req, cpu);
10694 rq = cpu_rq(cpu);
10695 wait_for_completion(&req->done);
10696 spin_lock_irqsave(&rq->lock, flags);
10697 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
10698 need_full_sync = 1;
10699 req->dest_cpu = RCU_MIGRATION_IDLE;
10700 spin_unlock_irqrestore(&rq->lock, flags);
10701 }
10702 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
10703 mutex_unlock(&rcu_sched_expedited_mutex);
10704 put_online_cpus();
10705 if (need_full_sync)
10706 synchronize_sched();
10707}
10708EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
10709
10710#endif /* #else #ifndef CONFIG_SMP */