aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c20
1 files changed, 17 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f592ce6f8616..48013633d792 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2309,7 +2309,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2309 * Cause a process which is running on another CPU to enter 2309 * Cause a process which is running on another CPU to enter
2310 * kernel-mode, without any delay. (to get signals handled.) 2310 * kernel-mode, without any delay. (to get signals handled.)
2311 * 2311 *
2312 * NOTE: this function doesnt have to take the runqueue lock, 2312 * NOTE: this function doesn't have to take the runqueue lock,
2313 * because all it wants to ensure is that the remote task enters 2313 * because all it wants to ensure is that the remote task enters
2314 * the kernel. If the IPI races and the task has been migrated 2314 * the kernel. If the IPI races and the task has been migrated
2315 * to another CPU then no harm is done and the purpose has been 2315 * to another CPU then no harm is done and the purpose has been
@@ -4997,7 +4997,7 @@ recheck:
4997 */ 4997 */
4998 raw_spin_lock_irqsave(&p->pi_lock, flags); 4998 raw_spin_lock_irqsave(&p->pi_lock, flags);
4999 /* 4999 /*
5000 * To be able to change p->policy safely, the apropriate 5000 * To be able to change p->policy safely, the appropriate
5001 * runqueue lock must be held. 5001 * runqueue lock must be held.
5002 */ 5002 */
5003 rq = __task_rq_lock(p); 5003 rq = __task_rq_lock(p);
@@ -5011,6 +5011,17 @@ recheck:
5011 return -EINVAL; 5011 return -EINVAL;
5012 } 5012 }
5013 5013
5014 /*
5015 * If not changing anything there's no need to proceed further:
5016 */
5017 if (unlikely(policy == p->policy && (!rt_policy(policy) ||
5018 param->sched_priority == p->rt_priority))) {
5019
5020 __task_rq_unlock(rq);
5021 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5022 return 0;
5023 }
5024
5014#ifdef CONFIG_RT_GROUP_SCHED 5025#ifdef CONFIG_RT_GROUP_SCHED
5015 if (user) { 5026 if (user) {
5016 /* 5027 /*
@@ -5705,7 +5716,7 @@ void show_state_filter(unsigned long state_filter)
5705 do_each_thread(g, p) { 5716 do_each_thread(g, p) {
5706 /* 5717 /*
5707 * reset the NMI-timeout, listing all files on a slow 5718 * reset the NMI-timeout, listing all files on a slow
5708 * console might take alot of time: 5719 * console might take a lot of time:
5709 */ 5720 */
5710 touch_nmi_watchdog(); 5721 touch_nmi_watchdog();
5711 if (!state_filter || (p->state & state_filter)) 5722 if (!state_filter || (p->state & state_filter))
@@ -6320,6 +6331,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6320 break; 6331 break;
6321#endif 6332#endif
6322 } 6333 }
6334
6335 update_max_interval();
6336
6323 return NOTIFY_OK; 6337 return NOTIFY_OK;
6324} 6338}
6325 6339