summaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-16 20:25:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-16 20:25:49 -0400
commit7e67a859997aad47727aff9c5a32e160da079ce3 (patch)
tree96f53425c2834de5b3276d7598782ab6412e4d5e /kernel/rcu/tree.c
parent772c1d06bd402f7ee72c61a18c2db74cd74b6758 (diff)
parent563c4f85f9f0d63b712081d5b4522152cdcb8b6b (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: - MAINTAINERS: Add Mark Rutland as perf submaintainer, Juri Lelli and Vincent Guittot as scheduler submaintainers. Add Dietmar Eggemann, Steven Rostedt, Ben Segall and Mel Gorman as scheduler reviewers. As perf and the scheduler is getting bigger and more complex, document the status quo of current responsibilities and interests, and spread the review pain^H^H^H^H fun via an increase in the Cc: linecount generated by scripts/get_maintainer.pl. :-) - Add another series of patches that brings the -rt (PREEMPT_RT) tree closer to mainline: split the monolithic CONFIG_PREEMPT dependencies into a new CONFIG_PREEMPTION category that will allow the eventual introduction of CONFIG_PREEMPT_RT. Still a few more hundred patches to go though. - Extend the CPU cgroup controller with uclamp.min and uclamp.max to allow the finer shaping of CPU bandwidth usage. - Micro-optimize energy-aware wake-ups from O(CPUS^2) to O(CPUS). - Improve the behavior of high CPU count, high thread count applications running under cpu.cfs_quota_us constraints. - Improve balancing with SCHED_IDLE (SCHED_BATCH) tasks present. - Improve CPU isolation housekeeping CPU allocation NUMA locality. - Fix deadline scheduler bandwidth calculations and logic when cpusets rebuilds the topology, or when it gets deadline-throttled while it's being offlined. - Convert the cpuset_mutex to percpu_rwsem, to allow it to be used from setscheduler() system calls without creating global serialization. Add new synchronization between cpuset topology-changing events and the deadline acceptance tests in setscheduler(), which were broken before. - Rework the active_mm state machine to be less confusing and more optimal. - Rework (simplify) the pick_next_task() slowpath. - Improve load-balancing on AMD EPYC systems. - ... and misc cleanups, smaller fixes and improvements - please see the Git log for more details. * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (53 commits) sched/psi: Correct overly pessimistic size calculation sched/fair: Speed-up energy-aware wake-ups sched/uclamp: Always use 'enum uclamp_id' for clamp_id values sched/uclamp: Update CPU's refcount on TG's clamp changes sched/uclamp: Use TG's clamps to restrict TASK's clamps sched/uclamp: Propagate system defaults to the root group sched/uclamp: Propagate parent clamps sched/uclamp: Extend CPU's cgroup controller sched/topology: Improve load balancing on AMD EPYC systems arch, ia64: Make NUMA select SMP sched, perf: MAINTAINERS update, add submaintainers and reviewers sched/fair: Use rq_lock/unlock in online_fair_sched_group cpufreq: schedutil: fix equation in comment sched: Rework pick_next_task() slow-path sched: Allow put_prev_task() to drop rq->lock sched/fair: Expose newidle_balance() sched: Add task_struct pointer to sched_class::set_curr_task sched: Rework CPU hotplug task selection sched/{rt,deadline}: Fix set_next_task vs pick_next_task sched: Fix kerneldoc comment for ia64_set_curr_task ...
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 71395e91b876..81105141b6a8 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1912,7 +1912,7 @@ rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1912 struct rcu_node *rnp_p; 1912 struct rcu_node *rnp_p;
1913 1913
1914 raw_lockdep_assert_held_rcu_node(rnp); 1914 raw_lockdep_assert_held_rcu_node(rnp);
1915 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)) || 1915 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPTION)) ||
1916 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) || 1916 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
1917 rnp->qsmask != 0) { 1917 rnp->qsmask != 0) {
1918 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1918 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -2266,7 +2266,7 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2266 mask = 0; 2266 mask = 0;
2267 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2267 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2268 if (rnp->qsmask == 0) { 2268 if (rnp->qsmask == 0) {
2269 if (!IS_ENABLED(CONFIG_PREEMPT) || 2269 if (!IS_ENABLED(CONFIG_PREEMPTION) ||
2270 rcu_preempt_blocked_readers_cgp(rnp)) { 2270 rcu_preempt_blocked_readers_cgp(rnp)) {
2271 /* 2271 /*
2272 * No point in scanning bits because they 2272 * No point in scanning bits because they
@@ -2681,7 +2681,7 @@ static int rcu_blocking_is_gp(void)
2681{ 2681{
2682 int ret; 2682 int ret;
2683 2683
2684 if (IS_ENABLED(CONFIG_PREEMPT)) 2684 if (IS_ENABLED(CONFIG_PREEMPTION))
2685 return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE; 2685 return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
2686 might_sleep(); /* Check for RCU read-side critical section. */ 2686 might_sleep(); /* Check for RCU read-side critical section. */
2687 preempt_disable(); 2687 preempt_disable();
@@ -3297,13 +3297,13 @@ static int __init rcu_spawn_gp_kthread(void)
3297 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); 3297 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
3298 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__)) 3298 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
3299 return 0; 3299 return 0;
3300 rnp = rcu_get_root();
3301 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3302 rcu_state.gp_kthread = t;
3303 if (kthread_prio) { 3300 if (kthread_prio) {
3304 sp.sched_priority = kthread_prio; 3301 sp.sched_priority = kthread_prio;
3305 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 3302 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3306 } 3303 }
3304 rnp = rcu_get_root();
3305 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3306 rcu_state.gp_kthread = t;
3307 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3307 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3308 wake_up_process(t); 3308 wake_up_process(t);
3309 rcu_spawn_nocb_kthreads(); 3309 rcu_spawn_nocb_kthreads();