aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-06-25 11:49:08 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-25 11:49:08 -0400
commitc7e745c6de92a757ec525fbc9a74891651a5f1c6 (patch)
treec91b5dbb44adea2b2353e1fc0195582328561151 /kernel
parent688c91755dc3d3c03d8c67c1df13c02be258768e (diff)
parent543cf4cb3fe6f6cae3651ba918b9c56200b257d0 (diff)
Merge commit 'v2.6.26-rc8' into core/softlockup
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c10
-rw-r--r--kernel/futex.c93
-rw-r--r--kernel/kgdb.c3
-rw-r--r--kernel/rcupreempt.c2
-rw-r--r--kernel/sched.c48
-rw-r--r--kernel/sched_rt.c66
-rw-r--r--kernel/sched_stats.h6
7 files changed, 164 insertions, 64 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 039baa4cd90c..9fceb97e989c 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1037,8 +1037,8 @@ int current_cpuset_is_being_rebound(void)
1037 1037
1038static int update_relax_domain_level(struct cpuset *cs, s64 val) 1038static int update_relax_domain_level(struct cpuset *cs, s64 val)
1039{ 1039{
1040 if ((int)val < 0) 1040 if (val < -1 || val >= SD_LV_MAX)
1041 val = -1; 1041 return -EINVAL;
1042 1042
1043 if (val != cs->relax_domain_level) { 1043 if (val != cs->relax_domain_level) {
1044 cs->relax_domain_level = val; 1044 cs->relax_domain_level = val;
@@ -1890,6 +1890,12 @@ static void common_cpu_mem_hotplug_unplug(void)
1890 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; 1890 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
1891 scan_for_empty_cpusets(&top_cpuset); 1891 scan_for_empty_cpusets(&top_cpuset);
1892 1892
1893 /*
1894 * Scheduler destroys domains on hotplug events.
1895 * Rebuild them based on the current settings.
1896 */
1897 rebuild_sched_domains();
1898
1893 cgroup_unlock(); 1899 cgroup_unlock();
1894} 1900}
1895 1901
diff --git a/kernel/futex.c b/kernel/futex.c
index 449def8074fe..7d1136e97c14 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1096,21 +1096,64 @@ static void unqueue_me_pi(struct futex_q *q)
1096 * private futexes. 1096 * private futexes.
1097 */ 1097 */
1098static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, 1098static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1099 struct task_struct *newowner) 1099 struct task_struct *newowner,
1100 struct rw_semaphore *fshared)
1100{ 1101{
1101 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; 1102 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1102 struct futex_pi_state *pi_state = q->pi_state; 1103 struct futex_pi_state *pi_state = q->pi_state;
1104 struct task_struct *oldowner = pi_state->owner;
1103 u32 uval, curval, newval; 1105 u32 uval, curval, newval;
1104 int ret; 1106 int ret, attempt = 0;
1105 1107
1106 /* Owner died? */ 1108 /* Owner died? */
1109 if (!pi_state->owner)
1110 newtid |= FUTEX_OWNER_DIED;
1111
1112 /*
1113 * We are here either because we stole the rtmutex from the
1114 * pending owner or we are the pending owner which failed to
1115 * get the rtmutex. We have to replace the pending owner TID
1116 * in the user space variable. This must be atomic as we have
1117 * to preserve the owner died bit here.
1118 *
1119 * Note: We write the user space value _before_ changing the
1120 * pi_state because we can fault here. Imagine swapped out
1121 * pages or a fork, which was running right before we acquired
1122 * mmap_sem, that marked all the anonymous memory readonly for
1123 * cow.
1124 *
1125 * Modifying pi_state _before_ the user space value would
1126 * leave the pi_state in an inconsistent state when we fault
1127 * here, because we need to drop the hash bucket lock to
1128 * handle the fault. This might be observed in the PID check
1129 * in lookup_pi_state.
1130 */
1131retry:
1132 if (get_futex_value_locked(&uval, uaddr))
1133 goto handle_fault;
1134
1135 while (1) {
1136 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1137
1138 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1139
1140 if (curval == -EFAULT)
1141 goto handle_fault;
1142 if (curval == uval)
1143 break;
1144 uval = curval;
1145 }
1146
1147 /*
1148 * We fixed up user space. Now we need to fix the pi_state
1149 * itself.
1150 */
1107 if (pi_state->owner != NULL) { 1151 if (pi_state->owner != NULL) {
1108 spin_lock_irq(&pi_state->owner->pi_lock); 1152 spin_lock_irq(&pi_state->owner->pi_lock);
1109 WARN_ON(list_empty(&pi_state->list)); 1153 WARN_ON(list_empty(&pi_state->list));
1110 list_del_init(&pi_state->list); 1154 list_del_init(&pi_state->list);
1111 spin_unlock_irq(&pi_state->owner->pi_lock); 1155 spin_unlock_irq(&pi_state->owner->pi_lock);
1112 } else 1156 }
1113 newtid |= FUTEX_OWNER_DIED;
1114 1157
1115 pi_state->owner = newowner; 1158 pi_state->owner = newowner;
1116 1159
@@ -1118,26 +1161,35 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1118 WARN_ON(!list_empty(&pi_state->list)); 1161 WARN_ON(!list_empty(&pi_state->list));
1119 list_add(&pi_state->list, &newowner->pi_state_list); 1162 list_add(&pi_state->list, &newowner->pi_state_list);
1120 spin_unlock_irq(&newowner->pi_lock); 1163 spin_unlock_irq(&newowner->pi_lock);
1164 return 0;
1121 1165
1122 /* 1166 /*
1123 * We own it, so we have to replace the pending owner 1167 * To handle the page fault we need to drop the hash bucket
1124 * TID. This must be atomic as we have preserve the 1168 * lock here. That gives the other task (either the pending
1125 * owner died bit here. 1169 * owner itself or the task which stole the rtmutex) the
1170 * chance to try the fixup of the pi_state. So once we are
1171 * back from handling the fault we need to check the pi_state
1172 * after reacquiring the hash bucket lock and before trying to
1173 * do another fixup. When the fixup has been done already we
1174 * simply return.
1126 */ 1175 */
1127 ret = get_futex_value_locked(&uval, uaddr); 1176handle_fault:
1177 spin_unlock(q->lock_ptr);
1128 1178
1129 while (!ret) { 1179 ret = futex_handle_fault((unsigned long)uaddr, fshared, attempt++);
1130 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1131 1180
1132 curval = cmpxchg_futex_value_locked(uaddr, uval, newval); 1181 spin_lock(q->lock_ptr);
1133 1182
1134 if (curval == -EFAULT) 1183 /*
1135 ret = -EFAULT; 1184 * Check if someone else fixed it for us:
1136 if (curval == uval) 1185 */
1137 break; 1186 if (pi_state->owner != oldowner)
1138 uval = curval; 1187 return 0;
1139 } 1188
1140 return ret; 1189 if (ret)
1190 return ret;
1191
1192 goto retry;
1141} 1193}
1142 1194
1143/* 1195/*
@@ -1507,7 +1559,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1507 * that case: 1559 * that case:
1508 */ 1560 */
1509 if (q.pi_state->owner != curr) 1561 if (q.pi_state->owner != curr)
1510 ret = fixup_pi_state_owner(uaddr, &q, curr); 1562 ret = fixup_pi_state_owner(uaddr, &q, curr, fshared);
1511 } else { 1563 } else {
1512 /* 1564 /*
1513 * Catch the rare case, where the lock was released 1565 * Catch the rare case, where the lock was released
@@ -1539,7 +1591,8 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1539 int res; 1591 int res;
1540 1592
1541 owner = rt_mutex_owner(&q.pi_state->pi_mutex); 1593 owner = rt_mutex_owner(&q.pi_state->pi_mutex);
1542 res = fixup_pi_state_owner(uaddr, &q, owner); 1594 res = fixup_pi_state_owner(uaddr, &q, owner,
1595 fshared);
1543 1596
1544 /* propagate -EFAULT, if the fixup failed */ 1597 /* propagate -EFAULT, if the fixup failed */
1545 if (res) 1598 if (res)
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 79e3c90113c2..3ec23c3ec97f 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -1499,7 +1499,8 @@ int kgdb_nmicallback(int cpu, void *regs)
1499 return 1; 1499 return 1;
1500} 1500}
1501 1501
1502void kgdb_console_write(struct console *co, const char *s, unsigned count) 1502static void kgdb_console_write(struct console *co, const char *s,
1503 unsigned count)
1503{ 1504{
1504 unsigned long flags; 1505 unsigned long flags;
1505 1506
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index e1cdf196a515..5e02b7740702 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -217,8 +217,6 @@ long rcu_batches_completed(void)
217} 217}
218EXPORT_SYMBOL_GPL(rcu_batches_completed); 218EXPORT_SYMBOL_GPL(rcu_batches_completed);
219 219
220EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
221
222void __rcu_read_lock(void) 220void __rcu_read_lock(void)
223{ 221{
224 int idx; 222 int idx;
diff --git a/kernel/sched.c b/kernel/sched.c
index eaf6751e7612..3aaa5c8cb421 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1127,6 +1127,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
1127 return HRTIMER_NORESTART; 1127 return HRTIMER_NORESTART;
1128} 1128}
1129 1129
1130#ifdef CONFIG_SMP
1130static void hotplug_hrtick_disable(int cpu) 1131static void hotplug_hrtick_disable(int cpu)
1131{ 1132{
1132 struct rq *rq = cpu_rq(cpu); 1133 struct rq *rq = cpu_rq(cpu);
@@ -1182,6 +1183,7 @@ static void init_hrtick(void)
1182{ 1183{
1183 hotcpu_notifier(hotplug_hrtick, 0); 1184 hotcpu_notifier(hotplug_hrtick, 0);
1184} 1185}
1186#endif /* CONFIG_SMP */
1185 1187
1186static void init_rq_hrtick(struct rq *rq) 1188static void init_rq_hrtick(struct rq *rq)
1187{ 1189{
@@ -4396,22 +4398,20 @@ do_wait_for_common(struct completion *x, long timeout, int state)
4396 signal_pending(current)) || 4398 signal_pending(current)) ||
4397 (state == TASK_KILLABLE && 4399 (state == TASK_KILLABLE &&
4398 fatal_signal_pending(current))) { 4400 fatal_signal_pending(current))) {
4399 __remove_wait_queue(&x->wait, &wait); 4401 timeout = -ERESTARTSYS;
4400 return -ERESTARTSYS; 4402 break;
4401 } 4403 }
4402 __set_current_state(state); 4404 __set_current_state(state);
4403 spin_unlock_irq(&x->wait.lock); 4405 spin_unlock_irq(&x->wait.lock);
4404 timeout = schedule_timeout(timeout); 4406 timeout = schedule_timeout(timeout);
4405 spin_lock_irq(&x->wait.lock); 4407 spin_lock_irq(&x->wait.lock);
4406 if (!timeout) { 4408 } while (!x->done && timeout);
4407 __remove_wait_queue(&x->wait, &wait);
4408 return timeout;
4409 }
4410 } while (!x->done);
4411 __remove_wait_queue(&x->wait, &wait); 4409 __remove_wait_queue(&x->wait, &wait);
4410 if (!x->done)
4411 return timeout;
4412 } 4412 }
4413 x->done--; 4413 x->done--;
4414 return timeout; 4414 return timeout ?: 1;
4415} 4415}
4416 4416
4417static long __sched 4417static long __sched
@@ -6877,7 +6877,12 @@ static int default_relax_domain_level = -1;
6877 6877
6878static int __init setup_relax_domain_level(char *str) 6878static int __init setup_relax_domain_level(char *str)
6879{ 6879{
6880 default_relax_domain_level = simple_strtoul(str, NULL, 0); 6880 unsigned long val;
6881
6882 val = simple_strtoul(str, NULL, 0);
6883 if (val < SD_LV_MAX)
6884 default_relax_domain_level = val;
6885
6881 return 1; 6886 return 1;
6882} 6887}
6883__setup("relax_domain_level=", setup_relax_domain_level); 6888__setup("relax_domain_level=", setup_relax_domain_level);
@@ -7236,6 +7241,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void)
7236} 7241}
7237 7242
7238/* 7243/*
7244 * Free current domain masks.
7245 * Called after all cpus are attached to NULL domain.
7246 */
7247static void free_sched_domains(void)
7248{
7249 ndoms_cur = 0;
7250 if (doms_cur != &fallback_doms)
7251 kfree(doms_cur);
7252 doms_cur = &fallback_doms;
7253}
7254
7255/*
7239 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 7256 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
7240 * For now this just excludes isolated cpus, but could be used to 7257 * For now this just excludes isolated cpus, but could be used to
7241 * exclude other special cases in the future. 7258 * exclude other special cases in the future.
@@ -7382,6 +7399,7 @@ int arch_reinit_sched_domains(void)
7382 get_online_cpus(); 7399 get_online_cpus();
7383 mutex_lock(&sched_domains_mutex); 7400 mutex_lock(&sched_domains_mutex);
7384 detach_destroy_domains(&cpu_online_map); 7401 detach_destroy_domains(&cpu_online_map);
7402 free_sched_domains();
7385 err = arch_init_sched_domains(&cpu_online_map); 7403 err = arch_init_sched_domains(&cpu_online_map);
7386 mutex_unlock(&sched_domains_mutex); 7404 mutex_unlock(&sched_domains_mutex);
7387 put_online_cpus(); 7405 put_online_cpus();
@@ -7467,6 +7485,7 @@ static int update_sched_domains(struct notifier_block *nfb,
7467 case CPU_DOWN_PREPARE: 7485 case CPU_DOWN_PREPARE:
7468 case CPU_DOWN_PREPARE_FROZEN: 7486 case CPU_DOWN_PREPARE_FROZEN:
7469 detach_destroy_domains(&cpu_online_map); 7487 detach_destroy_domains(&cpu_online_map);
7488 free_sched_domains();
7470 return NOTIFY_OK; 7489 return NOTIFY_OK;
7471 7490
7472 case CPU_UP_CANCELED: 7491 case CPU_UP_CANCELED:
@@ -7485,8 +7504,16 @@ static int update_sched_domains(struct notifier_block *nfb,
7485 return NOTIFY_DONE; 7504 return NOTIFY_DONE;
7486 } 7505 }
7487 7506
7507#ifndef CONFIG_CPUSETS
7508 /*
7509 * Create default domain partitioning if cpusets are disabled.
7510 * Otherwise we let cpusets rebuild the domains based on the
7511 * current setup.
7512 */
7513
7488 /* The hotplug lock is already held by cpu_up/cpu_down */ 7514 /* The hotplug lock is already held by cpu_up/cpu_down */
7489 arch_init_sched_domains(&cpu_online_map); 7515 arch_init_sched_domains(&cpu_online_map);
7516#endif
7490 7517
7491 return NOTIFY_OK; 7518 return NOTIFY_OK;
7492} 7519}
@@ -7626,7 +7653,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
7626 else 7653 else
7627 rt_se->rt_rq = parent->my_q; 7654 rt_se->rt_rq = parent->my_q;
7628 7655
7629 rt_se->rt_rq = &rq->rt;
7630 rt_se->my_q = rt_rq; 7656 rt_se->my_q = rt_rq;
7631 rt_se->parent = parent; 7657 rt_se->parent = parent;
7632 INIT_LIST_HEAD(&rt_se->run_list); 7658 INIT_LIST_HEAD(&rt_se->run_list);
@@ -8348,7 +8374,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
8348#ifdef CONFIG_CGROUP_SCHED 8374#ifdef CONFIG_CGROUP_SCHED
8349static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 8375static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8350{ 8376{
8351 struct task_group *tgi, *parent = tg->parent; 8377 struct task_group *tgi, *parent = tg ? tg->parent : NULL;
8352 unsigned long total = 0; 8378 unsigned long total = 0;
8353 8379
8354 if (!parent) { 8380 if (!parent) {
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 3432d573205d..0f3c19197fa4 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -250,7 +250,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
250 if (rt_rq->rt_time || rt_rq->rt_nr_running) 250 if (rt_rq->rt_time || rt_rq->rt_nr_running)
251 idle = 0; 251 idle = 0;
252 spin_unlock(&rt_rq->rt_runtime_lock); 252 spin_unlock(&rt_rq->rt_runtime_lock);
253 } 253 } else if (rt_rq->rt_nr_running)
254 idle = 0;
254 255
255 if (enqueue) 256 if (enqueue)
256 sched_rt_rq_enqueue(rt_rq); 257 sched_rt_rq_enqueue(rt_rq);
@@ -449,13 +450,19 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
449#endif 450#endif
450} 451}
451 452
452static void enqueue_rt_entity(struct sched_rt_entity *rt_se) 453static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
453{ 454{
454 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 455 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
455 struct rt_prio_array *array = &rt_rq->active; 456 struct rt_prio_array *array = &rt_rq->active;
456 struct rt_rq *group_rq = group_rt_rq(rt_se); 457 struct rt_rq *group_rq = group_rt_rq(rt_se);
457 458
458 if (group_rq && rt_rq_throttled(group_rq)) 459 /*
460 * Don't enqueue the group if its throttled, or when empty.
461 * The latter is a consequence of the former when a child group
462 * get throttled and the current group doesn't have any other
463 * active members.
464 */
465 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
459 return; 466 return;
460 467
461 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); 468 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
@@ -464,7 +471,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
464 inc_rt_tasks(rt_se, rt_rq); 471 inc_rt_tasks(rt_se, rt_rq);
465} 472}
466 473
467static void dequeue_rt_entity(struct sched_rt_entity *rt_se) 474static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
468{ 475{
469 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 476 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
470 struct rt_prio_array *array = &rt_rq->active; 477 struct rt_prio_array *array = &rt_rq->active;
@@ -480,11 +487,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
480 * Because the prio of an upper entry depends on the lower 487 * Because the prio of an upper entry depends on the lower
481 * entries, we must remove entries top - down. 488 * entries, we must remove entries top - down.
482 */ 489 */
483static void dequeue_rt_stack(struct task_struct *p) 490static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
484{ 491{
485 struct sched_rt_entity *rt_se, *back = NULL; 492 struct sched_rt_entity *back = NULL;
486 493
487 rt_se = &p->rt;
488 for_each_sched_rt_entity(rt_se) { 494 for_each_sched_rt_entity(rt_se) {
489 rt_se->back = back; 495 rt_se->back = back;
490 back = rt_se; 496 back = rt_se;
@@ -492,7 +498,26 @@ static void dequeue_rt_stack(struct task_struct *p)
492 498
493 for (rt_se = back; rt_se; rt_se = rt_se->back) { 499 for (rt_se = back; rt_se; rt_se = rt_se->back) {
494 if (on_rt_rq(rt_se)) 500 if (on_rt_rq(rt_se))
495 dequeue_rt_entity(rt_se); 501 __dequeue_rt_entity(rt_se);
502 }
503}
504
505static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
506{
507 dequeue_rt_stack(rt_se);
508 for_each_sched_rt_entity(rt_se)
509 __enqueue_rt_entity(rt_se);
510}
511
512static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
513{
514 dequeue_rt_stack(rt_se);
515
516 for_each_sched_rt_entity(rt_se) {
517 struct rt_rq *rt_rq = group_rt_rq(rt_se);
518
519 if (rt_rq && rt_rq->rt_nr_running)
520 __enqueue_rt_entity(rt_se);
496 } 521 }
497} 522}
498 523
@@ -506,32 +531,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
506 if (wakeup) 531 if (wakeup)
507 rt_se->timeout = 0; 532 rt_se->timeout = 0;
508 533
509 dequeue_rt_stack(p); 534 enqueue_rt_entity(rt_se);
510
511 /*
512 * enqueue everybody, bottom - up.
513 */
514 for_each_sched_rt_entity(rt_se)
515 enqueue_rt_entity(rt_se);
516} 535}
517 536
518static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) 537static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
519{ 538{
520 struct sched_rt_entity *rt_se = &p->rt; 539 struct sched_rt_entity *rt_se = &p->rt;
521 struct rt_rq *rt_rq;
522 540
523 update_curr_rt(rq); 541 update_curr_rt(rq);
524 542 dequeue_rt_entity(rt_se);
525 dequeue_rt_stack(p);
526
527 /*
528 * re-enqueue all non-empty rt_rq entities.
529 */
530 for_each_sched_rt_entity(rt_se) {
531 rt_rq = group_rt_rq(rt_se);
532 if (rt_rq && rt_rq->rt_nr_running)
533 enqueue_rt_entity(rt_se);
534 }
535} 543}
536 544
537/* 545/*
@@ -542,8 +550,10 @@ static
542void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 550void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
543{ 551{
544 struct rt_prio_array *array = &rt_rq->active; 552 struct rt_prio_array *array = &rt_rq->active;
553 struct list_head *queue = array->queue + rt_se_prio(rt_se);
545 554
546 list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); 555 if (on_rt_rq(rt_se))
556 list_move_tail(&rt_se->run_list, queue);
547} 557}
548 558
549static void requeue_task_rt(struct rq *rq, struct task_struct *p) 559static void requeue_task_rt(struct rq *rq, struct task_struct *p)
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index a38878e0e49d..80179ef7450e 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -198,6 +198,9 @@ static inline void sched_info_queued(struct task_struct *t)
198/* 198/*
199 * Called when a process ceases being the active-running process, either 199 * Called when a process ceases being the active-running process, either
200 * voluntarily or involuntarily. Now we can calculate how long we ran. 200 * voluntarily or involuntarily. Now we can calculate how long we ran.
201 * Also, if the process is still in the TASK_RUNNING state, call
202 * sched_info_queued() to mark that it has now again started waiting on
203 * the runqueue.
201 */ 204 */
202static inline void sched_info_depart(struct task_struct *t) 205static inline void sched_info_depart(struct task_struct *t)
203{ 206{
@@ -206,6 +209,9 @@ static inline void sched_info_depart(struct task_struct *t)
206 209
207 t->sched_info.cpu_time += delta; 210 t->sched_info.cpu_time += delta;
208 rq_sched_info_depart(task_rq(t), delta); 211 rq_sched_info_depart(task_rq(t), delta);
212
213 if (t->state == TASK_RUNNING)
214 sched_info_queued(t);
209} 215}
210 216
211/* 217/*