aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b44b9a43b0fc..a2be2d055299 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4456,9 +4456,9 @@ asmlinkage long sys_sched_yield(void)
4456 return 0; 4456 return 0;
4457} 4457}
4458 4458
4459static inline int __resched_legal(void) 4459static inline int __resched_legal(int expected_preempt_count)
4460{ 4460{
4461 if (unlikely(preempt_count())) 4461 if (unlikely(preempt_count() != expected_preempt_count))
4462 return 0; 4462 return 0;
4463 if (unlikely(system_state != SYSTEM_RUNNING)) 4463 if (unlikely(system_state != SYSTEM_RUNNING))
4464 return 0; 4464 return 0;
@@ -4484,7 +4484,7 @@ static void __cond_resched(void)
4484 4484
4485int __sched cond_resched(void) 4485int __sched cond_resched(void)
4486{ 4486{
4487 if (need_resched() && __resched_legal()) { 4487 if (need_resched() && __resched_legal(0)) {
4488 __cond_resched(); 4488 __cond_resched();
4489 return 1; 4489 return 1;
4490 } 4490 }
@@ -4510,7 +4510,7 @@ int cond_resched_lock(spinlock_t *lock)
4510 ret = 1; 4510 ret = 1;
4511 spin_lock(lock); 4511 spin_lock(lock);
4512 } 4512 }
4513 if (need_resched() && __resched_legal()) { 4513 if (need_resched() && __resched_legal(1)) {
4514 spin_release(&lock->dep_map, 1, _THIS_IP_); 4514 spin_release(&lock->dep_map, 1, _THIS_IP_);
4515 _raw_spin_unlock(lock); 4515 _raw_spin_unlock(lock);
4516 preempt_enable_no_resched(); 4516 preempt_enable_no_resched();
@@ -4526,7 +4526,7 @@ int __sched cond_resched_softirq(void)
4526{ 4526{
4527 BUG_ON(!in_softirq()); 4527 BUG_ON(!in_softirq());
4528 4528
4529 if (need_resched() && __resched_legal()) { 4529 if (need_resched() && __resched_legal(0)) {
4530 raw_local_irq_disable(); 4530 raw_local_irq_disable();
4531 _local_bh_enable(); 4531 _local_bh_enable();
4532 raw_local_irq_enable(); 4532 raw_local_irq_enable();
@@ -6494,7 +6494,12 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6494 for (i = 0; i < MAX_NUMNODES; i++) 6494 for (i = 0; i < MAX_NUMNODES; i++)
6495 init_numa_sched_groups_power(sched_group_nodes[i]); 6495 init_numa_sched_groups_power(sched_group_nodes[i]);
6496 6496
6497 init_numa_sched_groups_power(sched_group_allnodes); 6497 if (sched_group_allnodes) {
6498 int group = cpu_to_allnodes_group(first_cpu(*cpu_map));
6499 struct sched_group *sg = &sched_group_allnodes[group];
6500
6501 init_numa_sched_groups_power(sg);
6502 }
6498#endif 6503#endif
6499 6504
6500 /* Attach the domains */ 6505 /* Attach the domains */
@@ -6761,6 +6766,11 @@ void __init sched_init(void)
6761 } 6766 }
6762 6767
6763 set_load_weight(&init_task); 6768 set_load_weight(&init_task);
6769
6770#ifdef CONFIG_RT_MUTEXES
6771 plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
6772#endif
6773
6764 /* 6774 /*
6765 * The boot idle thread does lazy MMU switching as well: 6775 * The boot idle thread does lazy MMU switching as well:
6766 */ 6776 */