diff options
-rw-r--r-- | kernel/fork.c | 1 | ||||
-rw-r--r-- | kernel/sched.c | 6 |
2 files changed, 4 insertions, 3 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 3b159c5991b7..5447dc7defa9 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -273,6 +273,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
273 | 273 | ||
274 | setup_thread_stack(tsk, orig); | 274 | setup_thread_stack(tsk, orig); |
275 | clear_user_return_notifier(tsk); | 275 | clear_user_return_notifier(tsk); |
276 | clear_tsk_need_resched(tsk); | ||
276 | stackend = end_of_stack(tsk); | 277 | stackend = end_of_stack(tsk); |
277 | *stackend = STACK_END_MAGIC; /* for overflow detection */ | 278 | *stackend = STACK_END_MAGIC; /* for overflow detection */ |
278 | 279 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index dc91a4d09ac3..651c899a9b74 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -652,6 +652,7 @@ inline void update_rq_clock(struct rq *rq) | |||
652 | 652 | ||
653 | sched_irq_time_avg_update(rq, irq_time); | 653 | sched_irq_time_avg_update(rq, irq_time); |
654 | } | 654 | } |
655 | rq->skip_clock_update = 0; | ||
655 | } | 656 | } |
656 | 657 | ||
657 | /* | 658 | /* |
@@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | |||
2129 | * A queue event has occurred, and we're going to schedule. In | 2130 | * A queue event has occurred, and we're going to schedule. In |
2130 | * this case, we can save a useless back to back clock update. | 2131 | * this case, we can save a useless back to back clock update. |
2131 | */ | 2132 | */ |
2132 | if (test_tsk_need_resched(rq->curr)) | 2133 | if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) |
2133 | rq->skip_clock_update = 1; | 2134 | rq->skip_clock_update = 1; |
2134 | } | 2135 | } |
2135 | 2136 | ||
@@ -3845,7 +3846,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) | |||
3845 | { | 3846 | { |
3846 | if (prev->se.on_rq) | 3847 | if (prev->se.on_rq) |
3847 | update_rq_clock(rq); | 3848 | update_rq_clock(rq); |
3848 | rq->skip_clock_update = 0; | ||
3849 | prev->sched_class->put_prev_task(rq, prev); | 3849 | prev->sched_class->put_prev_task(rq, prev); |
3850 | } | 3850 | } |
3851 | 3851 | ||
@@ -3903,7 +3903,6 @@ need_resched_nonpreemptible: | |||
3903 | hrtick_clear(rq); | 3903 | hrtick_clear(rq); |
3904 | 3904 | ||
3905 | raw_spin_lock_irq(&rq->lock); | 3905 | raw_spin_lock_irq(&rq->lock); |
3906 | clear_tsk_need_resched(prev); | ||
3907 | 3906 | ||
3908 | switch_count = &prev->nivcsw; | 3907 | switch_count = &prev->nivcsw; |
3909 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | 3908 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
@@ -3933,6 +3932,7 @@ need_resched_nonpreemptible: | |||
3933 | if (unlikely(!rq->nr_running)) | 3932 | if (unlikely(!rq->nr_running)) |
3934 | idle_balance(cpu, rq); | 3933 | idle_balance(cpu, rq); |
3935 | 3934 | ||
3935 | clear_tsk_need_resched(prev); | ||
3936 | put_prev_task(rq, prev); | 3936 | put_prev_task(rq, prev); |
3937 | next = pick_next_task(rq); | 3937 | next = pick_next_task(rq); |
3938 | 3938 | ||