summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMathieu Poirier <mathieu.poirier@linaro.org>2019-07-19 09:59:54 -0400
committerIngo Molnar <mingo@kernel.org>2019-07-25 09:51:57 -0400
commit4b211f2b129dd1f6a6956bbc76e2f232c1ec3ad8 (patch)
tree7b4fbe72ad4673c01c5a3159dcac0232abade07a
parentc22645f4c8f021fb1c5e7189eb1f968132cc0844 (diff)
sched/core: Streamle calls to task_rq_unlock()
Calls to task_rq_unlock() are done several times in the __sched_setscheduler() function. This is fine when only the rq lock needs to be handled but not so much when other locks come into play. This patch streamlines the release of the rq lock so that only one location need to be modified when dealing with more than one lock. No change of functionality is introduced by this patch. Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Acked-by: Tejun Heo <tj@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: bristot@redhat.com Cc: claudio@evidence.eu.com Cc: lizefan@huawei.com Cc: longman@redhat.com Cc: luca.abeni@santannapisa.it Cc: tommaso.cucinotta@santannapisa.it Link: https://lkml.kernel.org/r/20190719140000.31694-3-juri.lelli@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/sched/core.c24
1 files changed, 14 insertions, 10 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0b22e55cebe8..1af3d2dc6b29 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4712,8 +4712,8 @@ recheck:
4712 * Changing the policy of the stop threads its a very bad idea: 4712 * Changing the policy of the stop threads its a very bad idea:
4713 */ 4713 */
4714 if (p == rq->stop) { 4714 if (p == rq->stop) {
4715 task_rq_unlock(rq, p, &rf); 4715 retval = -EINVAL;
4716 return -EINVAL; 4716 goto unlock;
4717 } 4717 }
4718 4718
4719 /* 4719 /*
@@ -4731,8 +4731,8 @@ recheck:
4731 goto change; 4731 goto change;
4732 4732
4733 p->sched_reset_on_fork = reset_on_fork; 4733 p->sched_reset_on_fork = reset_on_fork;
4734 task_rq_unlock(rq, p, &rf); 4734 retval = 0;
4735 return 0; 4735 goto unlock;
4736 } 4736 }
4737change: 4737change:
4738 4738
@@ -4745,8 +4745,8 @@ change:
4745 if (rt_bandwidth_enabled() && rt_policy(policy) && 4745 if (rt_bandwidth_enabled() && rt_policy(policy) &&
4746 task_group(p)->rt_bandwidth.rt_runtime == 0 && 4746 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
4747 !task_group_is_autogroup(task_group(p))) { 4747 !task_group_is_autogroup(task_group(p))) {
4748 task_rq_unlock(rq, p, &rf); 4748 retval = -EPERM;
4749 return -EPERM; 4749 goto unlock;
4750 } 4750 }
4751#endif 4751#endif
4752#ifdef CONFIG_SMP 4752#ifdef CONFIG_SMP
@@ -4761,8 +4761,8 @@ change:
4761 */ 4761 */
4762 if (!cpumask_subset(span, p->cpus_ptr) || 4762 if (!cpumask_subset(span, p->cpus_ptr) ||
4763 rq->rd->dl_bw.bw == 0) { 4763 rq->rd->dl_bw.bw == 0) {
4764 task_rq_unlock(rq, p, &rf); 4764 retval = -EPERM;
4765 return -EPERM; 4765 goto unlock;
4766 } 4766 }
4767 } 4767 }
4768#endif 4768#endif
@@ -4781,8 +4781,8 @@ change:
4781 * is available. 4781 * is available.
4782 */ 4782 */
4783 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 4783 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
4784 task_rq_unlock(rq, p, &rf); 4784 retval = -EBUSY;
4785 return -EBUSY; 4785 goto unlock;
4786 } 4786 }
4787 4787
4788 p->sched_reset_on_fork = reset_on_fork; 4788 p->sched_reset_on_fork = reset_on_fork;
@@ -4840,6 +4840,10 @@ change:
4840 preempt_enable(); 4840 preempt_enable();
4841 4841
4842 return 0; 4842 return 0;
4843
4844unlock:
4845 task_rq_unlock(rq, p, &rf);
4846 return retval;
4843} 4847}
4844 4848
4845static int _sched_setscheduler(struct task_struct *p, int policy, 4849static int _sched_setscheduler(struct task_struct *p, int policy,