diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 26 |
1 files changed, 17 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index b44b9a43b0fc..a234fbee1238 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4162,10 +4162,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | |||
4162 | read_unlock_irq(&tasklist_lock); | 4162 | read_unlock_irq(&tasklist_lock); |
4163 | return -ESRCH; | 4163 | return -ESRCH; |
4164 | } | 4164 | } |
4165 | get_task_struct(p); | ||
4166 | read_unlock_irq(&tasklist_lock); | ||
4167 | retval = sched_setscheduler(p, policy, &lparam); | 4165 | retval = sched_setscheduler(p, policy, &lparam); |
4168 | put_task_struct(p); | 4166 | read_unlock_irq(&tasklist_lock); |
4169 | 4167 | ||
4170 | return retval; | 4168 | return retval; |
4171 | } | 4169 | } |
@@ -4456,9 +4454,9 @@ asmlinkage long sys_sched_yield(void) | |||
4456 | return 0; | 4454 | return 0; |
4457 | } | 4455 | } |
4458 | 4456 | ||
4459 | static inline int __resched_legal(void) | 4457 | static inline int __resched_legal(int expected_preempt_count) |
4460 | { | 4458 | { |
4461 | if (unlikely(preempt_count())) | 4459 | if (unlikely(preempt_count() != expected_preempt_count)) |
4462 | return 0; | 4460 | return 0; |
4463 | if (unlikely(system_state != SYSTEM_RUNNING)) | 4461 | if (unlikely(system_state != SYSTEM_RUNNING)) |
4464 | return 0; | 4462 | return 0; |
@@ -4484,7 +4482,7 @@ static void __cond_resched(void) | |||
4484 | 4482 | ||
4485 | int __sched cond_resched(void) | 4483 | int __sched cond_resched(void) |
4486 | { | 4484 | { |
4487 | if (need_resched() && __resched_legal()) { | 4485 | if (need_resched() && __resched_legal(0)) { |
4488 | __cond_resched(); | 4486 | __cond_resched(); |
4489 | return 1; | 4487 | return 1; |
4490 | } | 4488 | } |
@@ -4510,7 +4508,7 @@ int cond_resched_lock(spinlock_t *lock) | |||
4510 | ret = 1; | 4508 | ret = 1; |
4511 | spin_lock(lock); | 4509 | spin_lock(lock); |
4512 | } | 4510 | } |
4513 | if (need_resched() && __resched_legal()) { | 4511 | if (need_resched() && __resched_legal(1)) { |
4514 | spin_release(&lock->dep_map, 1, _THIS_IP_); | 4512 | spin_release(&lock->dep_map, 1, _THIS_IP_); |
4515 | _raw_spin_unlock(lock); | 4513 | _raw_spin_unlock(lock); |
4516 | preempt_enable_no_resched(); | 4514 | preempt_enable_no_resched(); |
@@ -4526,7 +4524,7 @@ int __sched cond_resched_softirq(void) | |||
4526 | { | 4524 | { |
4527 | BUG_ON(!in_softirq()); | 4525 | BUG_ON(!in_softirq()); |
4528 | 4526 | ||
4529 | if (need_resched() && __resched_legal()) { | 4527 | if (need_resched() && __resched_legal(0)) { |
4530 | raw_local_irq_disable(); | 4528 | raw_local_irq_disable(); |
4531 | _local_bh_enable(); | 4529 | _local_bh_enable(); |
4532 | raw_local_irq_enable(); | 4530 | raw_local_irq_enable(); |
@@ -6494,7 +6492,12 @@ static int build_sched_domains(const cpumask_t *cpu_map) | |||
6494 | for (i = 0; i < MAX_NUMNODES; i++) | 6492 | for (i = 0; i < MAX_NUMNODES; i++) |
6495 | init_numa_sched_groups_power(sched_group_nodes[i]); | 6493 | init_numa_sched_groups_power(sched_group_nodes[i]); |
6496 | 6494 | ||
6497 | init_numa_sched_groups_power(sched_group_allnodes); | 6495 | if (sched_group_allnodes) { |
6496 | int group = cpu_to_allnodes_group(first_cpu(*cpu_map)); | ||
6497 | struct sched_group *sg = &sched_group_allnodes[group]; | ||
6498 | |||
6499 | init_numa_sched_groups_power(sg); | ||
6500 | } | ||
6498 | #endif | 6501 | #endif |
6499 | 6502 | ||
6500 | /* Attach the domains */ | 6503 | /* Attach the domains */ |
@@ -6761,6 +6764,11 @@ void __init sched_init(void) | |||
6761 | } | 6764 | } |
6762 | 6765 | ||
6763 | set_load_weight(&init_task); | 6766 | set_load_weight(&init_task); |
6767 | |||
6768 | #ifdef CONFIG_RT_MUTEXES | ||
6769 | plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); | ||
6770 | #endif | ||
6771 | |||
6764 | /* | 6772 | /* |
6765 | * The boot idle thread does lazy MMU switching as well: | 6773 | * The boot idle thread does lazy MMU switching as well: |
6766 | */ | 6774 | */ |