diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 58 |
1 files changed, 32 insertions, 26 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 52b98675acb2..3f7c5eb254e2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -301,7 +301,7 @@ struct cfs_rq { | |||
301 | /* 'curr' points to currently running entity on this cfs_rq. | 301 | /* 'curr' points to currently running entity on this cfs_rq. |
302 | * It is set to NULL otherwise (i.e when none are currently running). | 302 | * It is set to NULL otherwise (i.e when none are currently running). |
303 | */ | 303 | */ |
304 | struct sched_entity *curr; | 304 | struct sched_entity *curr, *next; |
305 | 305 | ||
306 | unsigned long nr_spread_over; | 306 | unsigned long nr_spread_over; |
307 | 307 | ||
@@ -1084,7 +1084,7 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, | |||
1084 | u64 tmp; | 1084 | u64 tmp; |
1085 | 1085 | ||
1086 | if (unlikely(!lw->inv_weight)) | 1086 | if (unlikely(!lw->inv_weight)) |
1087 | lw->inv_weight = (WMULT_CONST - lw->weight/2) / lw->weight + 1; | 1087 | lw->inv_weight = (WMULT_CONST-lw->weight/2) / (lw->weight+1); |
1088 | 1088 | ||
1089 | tmp = (u64)delta_exec * weight; | 1089 | tmp = (u64)delta_exec * weight; |
1090 | /* | 1090 | /* |
@@ -1108,11 +1108,13 @@ calc_delta_fair(unsigned long delta_exec, struct load_weight *lw) | |||
1108 | static inline void update_load_add(struct load_weight *lw, unsigned long inc) | 1108 | static inline void update_load_add(struct load_weight *lw, unsigned long inc) |
1109 | { | 1109 | { |
1110 | lw->weight += inc; | 1110 | lw->weight += inc; |
1111 | lw->inv_weight = 0; | ||
1111 | } | 1112 | } |
1112 | 1113 | ||
1113 | static inline void update_load_sub(struct load_weight *lw, unsigned long dec) | 1114 | static inline void update_load_sub(struct load_weight *lw, unsigned long dec) |
1114 | { | 1115 | { |
1115 | lw->weight -= dec; | 1116 | lw->weight -= dec; |
1117 | lw->inv_weight = 0; | ||
1116 | } | 1118 | } |
1117 | 1119 | ||
1118 | /* | 1120 | /* |
@@ -1394,6 +1396,12 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
1394 | { | 1396 | { |
1395 | s64 delta; | 1397 | s64 delta; |
1396 | 1398 | ||
1399 | /* | ||
1400 | * Buddy candidates are cache hot: | ||
1401 | */ | ||
1402 | if (&p->se == cfs_rq_of(&p->se)->next) | ||
1403 | return 1; | ||
1404 | |||
1397 | if (p->sched_class != &fair_sched_class) | 1405 | if (p->sched_class != &fair_sched_class) |
1398 | return 0; | 1406 | return 0; |
1399 | 1407 | ||
@@ -1853,10 +1861,11 @@ out_activate: | |||
1853 | schedstat_inc(p, se.nr_wakeups_remote); | 1861 | schedstat_inc(p, se.nr_wakeups_remote); |
1854 | update_rq_clock(rq); | 1862 | update_rq_clock(rq); |
1855 | activate_task(rq, p, 1); | 1863 | activate_task(rq, p, 1); |
1856 | check_preempt_curr(rq, p); | ||
1857 | success = 1; | 1864 | success = 1; |
1858 | 1865 | ||
1859 | out_running: | 1866 | out_running: |
1867 | check_preempt_curr(rq, p); | ||
1868 | |||
1860 | p->state = TASK_RUNNING; | 1869 | p->state = TASK_RUNNING; |
1861 | #ifdef CONFIG_SMP | 1870 | #ifdef CONFIG_SMP |
1862 | if (p->sched_class->task_wake_up) | 1871 | if (p->sched_class->task_wake_up) |
@@ -1890,6 +1899,8 @@ static void __sched_fork(struct task_struct *p) | |||
1890 | p->se.exec_start = 0; | 1899 | p->se.exec_start = 0; |
1891 | p->se.sum_exec_runtime = 0; | 1900 | p->se.sum_exec_runtime = 0; |
1892 | p->se.prev_sum_exec_runtime = 0; | 1901 | p->se.prev_sum_exec_runtime = 0; |
1902 | p->se.last_wakeup = 0; | ||
1903 | p->se.avg_overlap = 0; | ||
1893 | 1904 | ||
1894 | #ifdef CONFIG_SCHEDSTATS | 1905 | #ifdef CONFIG_SCHEDSTATS |
1895 | p->se.wait_start = 0; | 1906 | p->se.wait_start = 0; |
@@ -4268,11 +4279,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
4268 | oldprio = p->prio; | 4279 | oldprio = p->prio; |
4269 | on_rq = p->se.on_rq; | 4280 | on_rq = p->se.on_rq; |
4270 | running = task_current(rq, p); | 4281 | running = task_current(rq, p); |
4271 | if (on_rq) { | 4282 | if (on_rq) |
4272 | dequeue_task(rq, p, 0); | 4283 | dequeue_task(rq, p, 0); |
4273 | if (running) | 4284 | if (running) |
4274 | p->sched_class->put_prev_task(rq, p); | 4285 | p->sched_class->put_prev_task(rq, p); |
4275 | } | ||
4276 | 4286 | ||
4277 | if (rt_prio(prio)) | 4287 | if (rt_prio(prio)) |
4278 | p->sched_class = &rt_sched_class; | 4288 | p->sched_class = &rt_sched_class; |
@@ -4281,10 +4291,9 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
4281 | 4291 | ||
4282 | p->prio = prio; | 4292 | p->prio = prio; |
4283 | 4293 | ||
4294 | if (running) | ||
4295 | p->sched_class->set_curr_task(rq); | ||
4284 | if (on_rq) { | 4296 | if (on_rq) { |
4285 | if (running) | ||
4286 | p->sched_class->set_curr_task(rq); | ||
4287 | |||
4288 | enqueue_task(rq, p, 0); | 4297 | enqueue_task(rq, p, 0); |
4289 | 4298 | ||
4290 | check_class_changed(rq, p, prev_class, oldprio, running); | 4299 | check_class_changed(rq, p, prev_class, oldprio, running); |
@@ -4581,19 +4590,17 @@ recheck: | |||
4581 | update_rq_clock(rq); | 4590 | update_rq_clock(rq); |
4582 | on_rq = p->se.on_rq; | 4591 | on_rq = p->se.on_rq; |
4583 | running = task_current(rq, p); | 4592 | running = task_current(rq, p); |
4584 | if (on_rq) { | 4593 | if (on_rq) |
4585 | deactivate_task(rq, p, 0); | 4594 | deactivate_task(rq, p, 0); |
4586 | if (running) | 4595 | if (running) |
4587 | p->sched_class->put_prev_task(rq, p); | 4596 | p->sched_class->put_prev_task(rq, p); |
4588 | } | ||
4589 | 4597 | ||
4590 | oldprio = p->prio; | 4598 | oldprio = p->prio; |
4591 | __setscheduler(rq, p, policy, param->sched_priority); | 4599 | __setscheduler(rq, p, policy, param->sched_priority); |
4592 | 4600 | ||
4601 | if (running) | ||
4602 | p->sched_class->set_curr_task(rq); | ||
4593 | if (on_rq) { | 4603 | if (on_rq) { |
4594 | if (running) | ||
4595 | p->sched_class->set_curr_task(rq); | ||
4596 | |||
4597 | activate_task(rq, p, 0); | 4604 | activate_task(rq, p, 0); |
4598 | 4605 | ||
4599 | check_class_changed(rq, p, prev_class, oldprio, running); | 4606 | check_class_changed(rq, p, prev_class, oldprio, running); |
@@ -5881,7 +5888,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5881 | spin_unlock_irq(&rq->lock); | 5888 | spin_unlock_irq(&rq->lock); |
5882 | break; | 5889 | break; |
5883 | 5890 | ||
5884 | case CPU_DOWN_PREPARE: | 5891 | case CPU_DYING: |
5892 | case CPU_DYING_FROZEN: | ||
5885 | /* Update our root-domain */ | 5893 | /* Update our root-domain */ |
5886 | rq = cpu_rq(cpu); | 5894 | rq = cpu_rq(cpu); |
5887 | spin_lock_irqsave(&rq->lock, flags); | 5895 | spin_lock_irqsave(&rq->lock, flags); |
@@ -7617,11 +7625,10 @@ void sched_move_task(struct task_struct *tsk) | |||
7617 | running = task_current(rq, tsk); | 7625 | running = task_current(rq, tsk); |
7618 | on_rq = tsk->se.on_rq; | 7626 | on_rq = tsk->se.on_rq; |
7619 | 7627 | ||
7620 | if (on_rq) { | 7628 | if (on_rq) |
7621 | dequeue_task(rq, tsk, 0); | 7629 | dequeue_task(rq, tsk, 0); |
7622 | if (unlikely(running)) | 7630 | if (unlikely(running)) |
7623 | tsk->sched_class->put_prev_task(rq, tsk); | 7631 | tsk->sched_class->put_prev_task(rq, tsk); |
7624 | } | ||
7625 | 7632 | ||
7626 | set_task_rq(tsk, task_cpu(tsk)); | 7633 | set_task_rq(tsk, task_cpu(tsk)); |
7627 | 7634 | ||
@@ -7630,11 +7637,10 @@ void sched_move_task(struct task_struct *tsk) | |||
7630 | tsk->sched_class->moved_group(tsk); | 7637 | tsk->sched_class->moved_group(tsk); |
7631 | #endif | 7638 | #endif |
7632 | 7639 | ||
7633 | if (on_rq) { | 7640 | if (unlikely(running)) |
7634 | if (unlikely(running)) | 7641 | tsk->sched_class->set_curr_task(rq); |
7635 | tsk->sched_class->set_curr_task(rq); | 7642 | if (on_rq) |
7636 | enqueue_task(rq, tsk, 0); | 7643 | enqueue_task(rq, tsk, 0); |
7637 | } | ||
7638 | 7644 | ||
7639 | task_rq_unlock(rq, &flags); | 7645 | task_rq_unlock(rq, &flags); |
7640 | } | 7646 | } |