aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-07-31 15:28:18 -0400
committerIngo Molnar <mingo@kernel.org>2016-05-05 03:23:59 -0400
commiteb58075149b7f0300ff19142e6245fe75db2a081 (patch)
tree05d413ba23958f62067ccd079859e81c7f822663 /kernel/sched
parent3e71a462dd483ce508a723356b293731e7d788ea (diff)
sched/core: Introduce 'struct rq_flags'
In order to be able to pass around more than just the IRQ flags in the future, add a rq_flags structure. No difference in code generation for the x86_64-defconfig build I tested. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c98
-rw-r--r--kernel/sched/deadline.c6
-rw-r--r--kernel/sched/sched.h14
3 files changed, 62 insertions, 56 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1b609a886795..7c7db60115b4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -173,7 +173,7 @@ static struct rq *this_rq_lock(void)
173/* 173/*
174 * __task_rq_lock - lock the rq @p resides on. 174 * __task_rq_lock - lock the rq @p resides on.
175 */ 175 */
176struct rq *__task_rq_lock(struct task_struct *p) 176struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
177 __acquires(rq->lock) 177 __acquires(rq->lock)
178{ 178{
179 struct rq *rq; 179 struct rq *rq;
@@ -197,14 +197,14 @@ struct rq *__task_rq_lock(struct task_struct *p)
197/* 197/*
198 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 198 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
199 */ 199 */
200struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) 200struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
201 __acquires(p->pi_lock) 201 __acquires(p->pi_lock)
202 __acquires(rq->lock) 202 __acquires(rq->lock)
203{ 203{
204 struct rq *rq; 204 struct rq *rq;
205 205
206 for (;;) { 206 for (;;) {
207 raw_spin_lock_irqsave(&p->pi_lock, *flags); 207 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
208 rq = task_rq(p); 208 rq = task_rq(p);
209 raw_spin_lock(&rq->lock); 209 raw_spin_lock(&rq->lock);
210 /* 210 /*
@@ -228,7 +228,7 @@ struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
228 return rq; 228 return rq;
229 } 229 }
230 raw_spin_unlock(&rq->lock); 230 raw_spin_unlock(&rq->lock);
231 raw_spin_unlock_irqrestore(&p->pi_lock, *flags); 231 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
232 232
233 while (unlikely(task_on_rq_migrating(p))) 233 while (unlikely(task_on_rq_migrating(p)))
234 cpu_relax(); 234 cpu_relax();
@@ -1150,12 +1150,12 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1150static int __set_cpus_allowed_ptr(struct task_struct *p, 1150static int __set_cpus_allowed_ptr(struct task_struct *p,
1151 const struct cpumask *new_mask, bool check) 1151 const struct cpumask *new_mask, bool check)
1152{ 1152{
1153 unsigned long flags;
1154 struct rq *rq;
1155 unsigned int dest_cpu; 1153 unsigned int dest_cpu;
1154 struct rq_flags rf;
1155 struct rq *rq;
1156 int ret = 0; 1156 int ret = 0;
1157 1157
1158 rq = task_rq_lock(p, &flags); 1158 rq = task_rq_lock(p, &rf);
1159 1159
1160 /* 1160 /*
1161 * Must re-check here, to close a race against __kthread_bind(), 1161 * Must re-check here, to close a race against __kthread_bind(),
@@ -1184,7 +1184,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
1184 if (task_running(rq, p) || p->state == TASK_WAKING) { 1184 if (task_running(rq, p) || p->state == TASK_WAKING) {
1185 struct migration_arg arg = { p, dest_cpu }; 1185 struct migration_arg arg = { p, dest_cpu };
1186 /* Need help from migration thread: drop lock and wait. */ 1186 /* Need help from migration thread: drop lock and wait. */
1187 task_rq_unlock(rq, p, &flags); 1187 task_rq_unlock(rq, p, &rf);
1188 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); 1188 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1189 tlb_migrate_finish(p->mm); 1189 tlb_migrate_finish(p->mm);
1190 return 0; 1190 return 0;
@@ -1198,7 +1198,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
1198 lockdep_pin_lock(&rq->lock); 1198 lockdep_pin_lock(&rq->lock);
1199 } 1199 }
1200out: 1200out:
1201 task_rq_unlock(rq, p, &flags); 1201 task_rq_unlock(rq, p, &rf);
1202 1202
1203 return ret; 1203 return ret;
1204} 1204}
@@ -1382,8 +1382,8 @@ out:
1382 */ 1382 */
1383unsigned long wait_task_inactive(struct task_struct *p, long match_state) 1383unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1384{ 1384{
1385 unsigned long flags;
1386 int running, queued; 1385 int running, queued;
1386 struct rq_flags rf;
1387 unsigned long ncsw; 1387 unsigned long ncsw;
1388 struct rq *rq; 1388 struct rq *rq;
1389 1389
@@ -1418,14 +1418,14 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1418 * lock now, to be *sure*. If we're wrong, we'll 1418 * lock now, to be *sure*. If we're wrong, we'll
1419 * just go back and repeat. 1419 * just go back and repeat.
1420 */ 1420 */
1421 rq = task_rq_lock(p, &flags); 1421 rq = task_rq_lock(p, &rf);
1422 trace_sched_wait_task(p); 1422 trace_sched_wait_task(p);
1423 running = task_running(rq, p); 1423 running = task_running(rq, p);
1424 queued = task_on_rq_queued(p); 1424 queued = task_on_rq_queued(p);
1425 ncsw = 0; 1425 ncsw = 0;
1426 if (!match_state || p->state == match_state) 1426 if (!match_state || p->state == match_state)
1427 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 1427 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1428 task_rq_unlock(rq, p, &flags); 1428 task_rq_unlock(rq, p, &rf);
1429 1429
1430 /* 1430 /*
1431 * If it changed from the expected state, bail out now. 1431 * If it changed from the expected state, bail out now.
@@ -1723,17 +1723,18 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1723 */ 1723 */
1724static int ttwu_remote(struct task_struct *p, int wake_flags) 1724static int ttwu_remote(struct task_struct *p, int wake_flags)
1725{ 1725{
1726 struct rq_flags rf;
1726 struct rq *rq; 1727 struct rq *rq;
1727 int ret = 0; 1728 int ret = 0;
1728 1729
1729 rq = __task_rq_lock(p); 1730 rq = __task_rq_lock(p, &rf);
1730 if (task_on_rq_queued(p)) { 1731 if (task_on_rq_queued(p)) {
1731 /* check_preempt_curr() may use rq clock */ 1732 /* check_preempt_curr() may use rq clock */
1732 update_rq_clock(rq); 1733 update_rq_clock(rq);
1733 ttwu_do_wakeup(rq, p, wake_flags); 1734 ttwu_do_wakeup(rq, p, wake_flags);
1734 ret = 1; 1735 ret = 1;
1735 } 1736 }
1736 __task_rq_unlock(rq); 1737 __task_rq_unlock(rq, &rf);
1737 1738
1738 return ret; 1739 return ret;
1739} 1740}
@@ -2486,12 +2487,12 @@ extern void init_dl_bw(struct dl_bw *dl_b);
2486 */ 2487 */
2487void wake_up_new_task(struct task_struct *p) 2488void wake_up_new_task(struct task_struct *p)
2488{ 2489{
2489 unsigned long flags; 2490 struct rq_flags rf;
2490 struct rq *rq; 2491 struct rq *rq;
2491 2492
2492 raw_spin_lock_irqsave(&p->pi_lock, flags);
2493 /* Initialize new task's runnable average */ 2493 /* Initialize new task's runnable average */
2494 init_entity_runnable_average(&p->se); 2494 init_entity_runnable_average(&p->se);
2495 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2495#ifdef CONFIG_SMP 2496#ifdef CONFIG_SMP
2496 /* 2497 /*
2497 * Fork balancing, do it here and not earlier because: 2498 * Fork balancing, do it here and not earlier because:
@@ -2503,7 +2504,7 @@ void wake_up_new_task(struct task_struct *p)
2503 /* Post initialize new task's util average when its cfs_rq is set */ 2504 /* Post initialize new task's util average when its cfs_rq is set */
2504 post_init_entity_util_avg(&p->se); 2505 post_init_entity_util_avg(&p->se);
2505 2506
2506 rq = __task_rq_lock(p); 2507 rq = __task_rq_lock(p, &rf);
2507 activate_task(rq, p, 0); 2508 activate_task(rq, p, 0);
2508 p->on_rq = TASK_ON_RQ_QUEUED; 2509 p->on_rq = TASK_ON_RQ_QUEUED;
2509 trace_sched_wakeup_new(p); 2510 trace_sched_wakeup_new(p);
@@ -2519,7 +2520,7 @@ void wake_up_new_task(struct task_struct *p)
2519 lockdep_pin_lock(&rq->lock); 2520 lockdep_pin_lock(&rq->lock);
2520 } 2521 }
2521#endif 2522#endif
2522 task_rq_unlock(rq, p, &flags); 2523 task_rq_unlock(rq, p, &rf);
2523} 2524}
2524 2525
2525#ifdef CONFIG_PREEMPT_NOTIFIERS 2526#ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -2935,7 +2936,7 @@ EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
2935 */ 2936 */
2936unsigned long long task_sched_runtime(struct task_struct *p) 2937unsigned long long task_sched_runtime(struct task_struct *p)
2937{ 2938{
2938 unsigned long flags; 2939 struct rq_flags rf;
2939 struct rq *rq; 2940 struct rq *rq;
2940 u64 ns; 2941 u64 ns;
2941 2942
@@ -2955,7 +2956,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
2955 return p->se.sum_exec_runtime; 2956 return p->se.sum_exec_runtime;
2956#endif 2957#endif
2957 2958
2958 rq = task_rq_lock(p, &flags); 2959 rq = task_rq_lock(p, &rf);
2959 /* 2960 /*
2960 * Must be ->curr _and_ ->on_rq. If dequeued, we would 2961 * Must be ->curr _and_ ->on_rq. If dequeued, we would
2961 * project cycles that may never be accounted to this 2962 * project cycles that may never be accounted to this
@@ -2966,7 +2967,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
2966 p->sched_class->update_curr(rq); 2967 p->sched_class->update_curr(rq);
2967 } 2968 }
2968 ns = p->se.sum_exec_runtime; 2969 ns = p->se.sum_exec_runtime;
2969 task_rq_unlock(rq, p, &flags); 2970 task_rq_unlock(rq, p, &rf);
2970 2971
2971 return ns; 2972 return ns;
2972} 2973}
@@ -3524,12 +3525,13 @@ EXPORT_SYMBOL(default_wake_function);
3524void rt_mutex_setprio(struct task_struct *p, int prio) 3525void rt_mutex_setprio(struct task_struct *p, int prio)
3525{ 3526{
3526 int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE; 3527 int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE;
3527 struct rq *rq;
3528 const struct sched_class *prev_class; 3528 const struct sched_class *prev_class;
3529 struct rq_flags rf;
3530 struct rq *rq;
3529 3531
3530 BUG_ON(prio > MAX_PRIO); 3532 BUG_ON(prio > MAX_PRIO);
3531 3533
3532 rq = __task_rq_lock(p); 3534 rq = __task_rq_lock(p, &rf);
3533 3535
3534 /* 3536 /*
3535 * Idle task boosting is a nono in general. There is one 3537 * Idle task boosting is a nono in general. There is one
@@ -3605,7 +3607,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3605 check_class_changed(rq, p, prev_class, oldprio); 3607 check_class_changed(rq, p, prev_class, oldprio);
3606out_unlock: 3608out_unlock:
3607 preempt_disable(); /* avoid rq from going away on us */ 3609 preempt_disable(); /* avoid rq from going away on us */
3608 __task_rq_unlock(rq); 3610 __task_rq_unlock(rq, &rf);
3609 3611
3610 balance_callback(rq); 3612 balance_callback(rq);
3611 preempt_enable(); 3613 preempt_enable();
@@ -3615,7 +3617,7 @@ out_unlock:
3615void set_user_nice(struct task_struct *p, long nice) 3617void set_user_nice(struct task_struct *p, long nice)
3616{ 3618{
3617 int old_prio, delta, queued; 3619 int old_prio, delta, queued;
3618 unsigned long flags; 3620 struct rq_flags rf;
3619 struct rq *rq; 3621 struct rq *rq;
3620 3622
3621 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 3623 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
@@ -3624,7 +3626,7 @@ void set_user_nice(struct task_struct *p, long nice)
3624 * We have to be careful, if called from sys_setpriority(), 3626 * We have to be careful, if called from sys_setpriority(),
3625 * the task might be in the middle of scheduling on another CPU. 3627 * the task might be in the middle of scheduling on another CPU.
3626 */ 3628 */
3627 rq = task_rq_lock(p, &flags); 3629 rq = task_rq_lock(p, &rf);
3628 /* 3630 /*
3629 * The RT priorities are set via sched_setscheduler(), but we still 3631 * The RT priorities are set via sched_setscheduler(), but we still
3630 * allow the 'normal' nice value to be set - but as expected 3632 * allow the 'normal' nice value to be set - but as expected
@@ -3655,7 +3657,7 @@ void set_user_nice(struct task_struct *p, long nice)
3655 resched_curr(rq); 3657 resched_curr(rq);
3656 } 3658 }
3657out_unlock: 3659out_unlock:
3658 task_rq_unlock(rq, p, &flags); 3660 task_rq_unlock(rq, p, &rf);
3659} 3661}
3660EXPORT_SYMBOL(set_user_nice); 3662EXPORT_SYMBOL(set_user_nice);
3661 3663
@@ -3952,11 +3954,11 @@ static int __sched_setscheduler(struct task_struct *p,
3952 MAX_RT_PRIO - 1 - attr->sched_priority; 3954 MAX_RT_PRIO - 1 - attr->sched_priority;
3953 int retval, oldprio, oldpolicy = -1, queued, running; 3955 int retval, oldprio, oldpolicy = -1, queued, running;
3954 int new_effective_prio, policy = attr->sched_policy; 3956 int new_effective_prio, policy = attr->sched_policy;
3955 unsigned long flags;
3956 const struct sched_class *prev_class; 3957 const struct sched_class *prev_class;
3957 struct rq *rq; 3958 struct rq_flags rf;
3958 int reset_on_fork; 3959 int reset_on_fork;
3959 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE; 3960 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
3961 struct rq *rq;
3960 3962
3961 /* may grab non-irq protected spin_locks */ 3963 /* may grab non-irq protected spin_locks */
3962 BUG_ON(in_interrupt()); 3964 BUG_ON(in_interrupt());
@@ -4051,13 +4053,13 @@ recheck:
4051 * To be able to change p->policy safely, the appropriate 4053 * To be able to change p->policy safely, the appropriate
4052 * runqueue lock must be held. 4054 * runqueue lock must be held.
4053 */ 4055 */
4054 rq = task_rq_lock(p, &flags); 4056 rq = task_rq_lock(p, &rf);
4055 4057
4056 /* 4058 /*
4057 * Changing the policy of the stop threads its a very bad idea 4059 * Changing the policy of the stop threads its a very bad idea
4058 */ 4060 */
4059 if (p == rq->stop) { 4061 if (p == rq->stop) {
4060 task_rq_unlock(rq, p, &flags); 4062 task_rq_unlock(rq, p, &rf);
4061 return -EINVAL; 4063 return -EINVAL;
4062 } 4064 }
4063 4065
@@ -4074,7 +4076,7 @@ recheck:
4074 goto change; 4076 goto change;
4075 4077
4076 p->sched_reset_on_fork = reset_on_fork; 4078 p->sched_reset_on_fork = reset_on_fork;
4077 task_rq_unlock(rq, p, &flags); 4079 task_rq_unlock(rq, p, &rf);
4078 return 0; 4080 return 0;
4079 } 4081 }
4080change: 4082change:
@@ -4088,7 +4090,7 @@ change:
4088 if (rt_bandwidth_enabled() && rt_policy(policy) && 4090 if (rt_bandwidth_enabled() && rt_policy(policy) &&
4089 task_group(p)->rt_bandwidth.rt_runtime == 0 && 4091 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
4090 !task_group_is_autogroup(task_group(p))) { 4092 !task_group_is_autogroup(task_group(p))) {
4091 task_rq_unlock(rq, p, &flags); 4093 task_rq_unlock(rq, p, &rf);
4092 return -EPERM; 4094 return -EPERM;
4093 } 4095 }
4094#endif 4096#endif
@@ -4103,7 +4105,7 @@ change:
4103 */ 4105 */
4104 if (!cpumask_subset(span, &p->cpus_allowed) || 4106 if (!cpumask_subset(span, &p->cpus_allowed) ||
4105 rq->rd->dl_bw.bw == 0) { 4107 rq->rd->dl_bw.bw == 0) {
4106 task_rq_unlock(rq, p, &flags); 4108 task_rq_unlock(rq, p, &rf);
4107 return -EPERM; 4109 return -EPERM;
4108 } 4110 }
4109 } 4111 }
@@ -4113,7 +4115,7 @@ change:
4113 /* recheck policy now with rq lock held */ 4115 /* recheck policy now with rq lock held */
4114 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 4116 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4115 policy = oldpolicy = -1; 4117 policy = oldpolicy = -1;
4116 task_rq_unlock(rq, p, &flags); 4118 task_rq_unlock(rq, p, &rf);
4117 goto recheck; 4119 goto recheck;
4118 } 4120 }
4119 4121
@@ -4123,7 +4125,7 @@ change:
4123 * is available. 4125 * is available.
4124 */ 4126 */
4125 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) { 4127 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
4126 task_rq_unlock(rq, p, &flags); 4128 task_rq_unlock(rq, p, &rf);
4127 return -EBUSY; 4129 return -EBUSY;
4128 } 4130 }
4129 4131
@@ -4168,7 +4170,7 @@ change:
4168 4170
4169 check_class_changed(rq, p, prev_class, oldprio); 4171 check_class_changed(rq, p, prev_class, oldprio);
4170 preempt_disable(); /* avoid rq from going away on us */ 4172 preempt_disable(); /* avoid rq from going away on us */
4171 task_rq_unlock(rq, p, &flags); 4173 task_rq_unlock(rq, p, &rf);
4172 4174
4173 if (pi) 4175 if (pi)
4174 rt_mutex_adjust_pi(p); 4176 rt_mutex_adjust_pi(p);
@@ -5021,10 +5023,10 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
5021{ 5023{
5022 struct task_struct *p; 5024 struct task_struct *p;
5023 unsigned int time_slice; 5025 unsigned int time_slice;
5024 unsigned long flags; 5026 struct rq_flags rf;
5027 struct timespec t;
5025 struct rq *rq; 5028 struct rq *rq;
5026 int retval; 5029 int retval;
5027 struct timespec t;
5028 5030
5029 if (pid < 0) 5031 if (pid < 0)
5030 return -EINVAL; 5032 return -EINVAL;
@@ -5039,11 +5041,11 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
5039 if (retval) 5041 if (retval)
5040 goto out_unlock; 5042 goto out_unlock;
5041 5043
5042 rq = task_rq_lock(p, &flags); 5044 rq = task_rq_lock(p, &rf);
5043 time_slice = 0; 5045 time_slice = 0;
5044 if (p->sched_class->get_rr_interval) 5046 if (p->sched_class->get_rr_interval)
5045 time_slice = p->sched_class->get_rr_interval(rq, p); 5047 time_slice = p->sched_class->get_rr_interval(rq, p);
5046 task_rq_unlock(rq, p, &flags); 5048 task_rq_unlock(rq, p, &rf);
5047 5049
5048 rcu_read_unlock(); 5050 rcu_read_unlock();
5049 jiffies_to_timespec(time_slice, &t); 5051 jiffies_to_timespec(time_slice, &t);
@@ -5307,11 +5309,11 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
5307 */ 5309 */
5308void sched_setnuma(struct task_struct *p, int nid) 5310void sched_setnuma(struct task_struct *p, int nid)
5309{ 5311{
5310 struct rq *rq;
5311 unsigned long flags;
5312 bool queued, running; 5312 bool queued, running;
5313 struct rq_flags rf;
5314 struct rq *rq;
5313 5315
5314 rq = task_rq_lock(p, &flags); 5316 rq = task_rq_lock(p, &rf);
5315 queued = task_on_rq_queued(p); 5317 queued = task_on_rq_queued(p);
5316 running = task_current(rq, p); 5318 running = task_current(rq, p);
5317 5319
@@ -5326,7 +5328,7 @@ void sched_setnuma(struct task_struct *p, int nid)
5326 p->sched_class->set_curr_task(rq); 5328 p->sched_class->set_curr_task(rq);
5327 if (queued) 5329 if (queued)
5328 enqueue_task(rq, p, ENQUEUE_RESTORE); 5330 enqueue_task(rq, p, ENQUEUE_RESTORE);
5329 task_rq_unlock(rq, p, &flags); 5331 task_rq_unlock(rq, p, &rf);
5330} 5332}
5331#endif /* CONFIG_NUMA_BALANCING */ 5333#endif /* CONFIG_NUMA_BALANCING */
5332 5334
@@ -7757,10 +7759,10 @@ void sched_move_task(struct task_struct *tsk)
7757{ 7759{
7758 struct task_group *tg; 7760 struct task_group *tg;
7759 int queued, running; 7761 int queued, running;
7760 unsigned long flags; 7762 struct rq_flags rf;
7761 struct rq *rq; 7763 struct rq *rq;
7762 7764
7763 rq = task_rq_lock(tsk, &flags); 7765 rq = task_rq_lock(tsk, &rf);
7764 7766
7765 running = task_current(rq, tsk); 7767 running = task_current(rq, tsk);
7766 queued = task_on_rq_queued(tsk); 7768 queued = task_on_rq_queued(tsk);
@@ -7792,7 +7794,7 @@ void sched_move_task(struct task_struct *tsk)
7792 if (queued) 7794 if (queued)
7793 enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE); 7795 enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE);
7794 7796
7795 task_rq_unlock(rq, tsk, &flags); 7797 task_rq_unlock(rq, tsk, &rf);
7796} 7798}
7797#endif /* CONFIG_CGROUP_SCHED */ 7799#endif /* CONFIG_CGROUP_SCHED */
7798 7800
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 8f9b5af4e857..738e3c84dfe1 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -591,10 +591,10 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
591 struct sched_dl_entity, 591 struct sched_dl_entity,
592 dl_timer); 592 dl_timer);
593 struct task_struct *p = dl_task_of(dl_se); 593 struct task_struct *p = dl_task_of(dl_se);
594 unsigned long flags; 594 struct rq_flags rf;
595 struct rq *rq; 595 struct rq *rq;
596 596
597 rq = task_rq_lock(p, &flags); 597 rq = task_rq_lock(p, &rf);
598 598
599 /* 599 /*
600 * The task might have changed its scheduling policy to something 600 * The task might have changed its scheduling policy to something
@@ -677,7 +677,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
677#endif 677#endif
678 678
679unlock: 679unlock:
680 task_rq_unlock(rq, p, &flags); 680 task_rq_unlock(rq, p, &rf);
681 681
682 /* 682 /*
683 * This can free the task_struct, including this hrtimer, do not touch 683 * This can free the task_struct, including this hrtimer, do not touch
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index aab4cf05d48a..a5eecb1e5e4b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1451,13 +1451,17 @@ static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1451static inline void sched_avg_update(struct rq *rq) { } 1451static inline void sched_avg_update(struct rq *rq) { }
1452#endif 1452#endif
1453 1453
1454struct rq *__task_rq_lock(struct task_struct *p) 1454struct rq_flags {
1455 unsigned long flags;
1456};
1457
1458struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1455 __acquires(rq->lock); 1459 __acquires(rq->lock);
1456struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) 1460struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1457 __acquires(p->pi_lock) 1461 __acquires(p->pi_lock)
1458 __acquires(rq->lock); 1462 __acquires(rq->lock);
1459 1463
1460static inline void __task_rq_unlock(struct rq *rq) 1464static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1461 __releases(rq->lock) 1465 __releases(rq->lock)
1462{ 1466{
1463 lockdep_unpin_lock(&rq->lock); 1467 lockdep_unpin_lock(&rq->lock);
@@ -1465,13 +1469,13 @@ static inline void __task_rq_unlock(struct rq *rq)
1465} 1469}
1466 1470
1467static inline void 1471static inline void
1468task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) 1472task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1469 __releases(rq->lock) 1473 __releases(rq->lock)
1470 __releases(p->pi_lock) 1474 __releases(p->pi_lock)
1471{ 1475{
1472 lockdep_unpin_lock(&rq->lock); 1476 lockdep_unpin_lock(&rq->lock);
1473 raw_spin_unlock(&rq->lock); 1477 raw_spin_unlock(&rq->lock);
1474 raw_spin_unlock_irqrestore(&p->pi_lock, *flags); 1478 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
1475} 1479}
1476 1480
1477#ifdef CONFIG_SMP 1481#ifdef CONFIG_SMP