diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-06-25 06:29:02 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-25 06:29:02 -0400 |
commit | 37f5d732f34fa099676b2c52b8a57bab01a011de (patch) | |
tree | cca207ab2009e8d5d2097602133d1a20de2b2e5c /kernel | |
parent | ed9e4996d9a123b7550e63713d563f524fa9d9f0 (diff) | |
parent | 543cf4cb3fe6f6cae3651ba918b9c56200b257d0 (diff) |
Merge branch 'linus' into tracing/sysprof
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpuset.c | 10 | ||||
-rw-r--r-- | kernel/futex.c | 93 | ||||
-rw-r--r-- | kernel/kgdb.c | 3 | ||||
-rw-r--r-- | kernel/rcupreempt.c | 2 | ||||
-rw-r--r-- | kernel/sched.c | 48 | ||||
-rw-r--r-- | kernel/sched_rt.c | 66 | ||||
-rw-r--r-- | kernel/sched_stats.h | 6 | ||||
-rw-r--r-- | kernel/softlockup.c | 15 |
8 files changed, 174 insertions, 69 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 039baa4cd90c..9fceb97e989c 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -1037,8 +1037,8 @@ int current_cpuset_is_being_rebound(void) | |||
1037 | 1037 | ||
1038 | static int update_relax_domain_level(struct cpuset *cs, s64 val) | 1038 | static int update_relax_domain_level(struct cpuset *cs, s64 val) |
1039 | { | 1039 | { |
1040 | if ((int)val < 0) | 1040 | if (val < -1 || val >= SD_LV_MAX) |
1041 | val = -1; | 1041 | return -EINVAL; |
1042 | 1042 | ||
1043 | if (val != cs->relax_domain_level) { | 1043 | if (val != cs->relax_domain_level) { |
1044 | cs->relax_domain_level = val; | 1044 | cs->relax_domain_level = val; |
@@ -1890,6 +1890,12 @@ static void common_cpu_mem_hotplug_unplug(void) | |||
1890 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | 1890 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; |
1891 | scan_for_empty_cpusets(&top_cpuset); | 1891 | scan_for_empty_cpusets(&top_cpuset); |
1892 | 1892 | ||
1893 | /* | ||
1894 | * Scheduler destroys domains on hotplug events. | ||
1895 | * Rebuild them based on the current settings. | ||
1896 | */ | ||
1897 | rebuild_sched_domains(); | ||
1898 | |||
1893 | cgroup_unlock(); | 1899 | cgroup_unlock(); |
1894 | } | 1900 | } |
1895 | 1901 | ||
diff --git a/kernel/futex.c b/kernel/futex.c index 449def8074fe..7d1136e97c14 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -1096,21 +1096,64 @@ static void unqueue_me_pi(struct futex_q *q) | |||
1096 | * private futexes. | 1096 | * private futexes. |
1097 | */ | 1097 | */ |
1098 | static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, | 1098 | static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
1099 | struct task_struct *newowner) | 1099 | struct task_struct *newowner, |
1100 | struct rw_semaphore *fshared) | ||
1100 | { | 1101 | { |
1101 | u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; | 1102 | u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; |
1102 | struct futex_pi_state *pi_state = q->pi_state; | 1103 | struct futex_pi_state *pi_state = q->pi_state; |
1104 | struct task_struct *oldowner = pi_state->owner; | ||
1103 | u32 uval, curval, newval; | 1105 | u32 uval, curval, newval; |
1104 | int ret; | 1106 | int ret, attempt = 0; |
1105 | 1107 | ||
1106 | /* Owner died? */ | 1108 | /* Owner died? */ |
1109 | if (!pi_state->owner) | ||
1110 | newtid |= FUTEX_OWNER_DIED; | ||
1111 | |||
1112 | /* | ||
1113 | * We are here either because we stole the rtmutex from the | ||
1114 | * pending owner or we are the pending owner which failed to | ||
1115 | * get the rtmutex. We have to replace the pending owner TID | ||
1116 | * in the user space variable. This must be atomic as we have | ||
1117 | * to preserve the owner died bit here. | ||
1118 | * | ||
1119 | * Note: We write the user space value _before_ changing the | ||
1120 | * pi_state because we can fault here. Imagine swapped out | ||
1121 | * pages or a fork, which was running right before we acquired | ||
1122 | * mmap_sem, that marked all the anonymous memory readonly for | ||
1123 | * cow. | ||
1124 | * | ||
1125 | * Modifying pi_state _before_ the user space value would | ||
1126 | * leave the pi_state in an inconsistent state when we fault | ||
1127 | * here, because we need to drop the hash bucket lock to | ||
1128 | * handle the fault. This might be observed in the PID check | ||
1129 | * in lookup_pi_state. | ||
1130 | */ | ||
1131 | retry: | ||
1132 | if (get_futex_value_locked(&uval, uaddr)) | ||
1133 | goto handle_fault; | ||
1134 | |||
1135 | while (1) { | ||
1136 | newval = (uval & FUTEX_OWNER_DIED) | newtid; | ||
1137 | |||
1138 | curval = cmpxchg_futex_value_locked(uaddr, uval, newval); | ||
1139 | |||
1140 | if (curval == -EFAULT) | ||
1141 | goto handle_fault; | ||
1142 | if (curval == uval) | ||
1143 | break; | ||
1144 | uval = curval; | ||
1145 | } | ||
1146 | |||
1147 | /* | ||
1148 | * We fixed up user space. Now we need to fix the pi_state | ||
1149 | * itself. | ||
1150 | */ | ||
1107 | if (pi_state->owner != NULL) { | 1151 | if (pi_state->owner != NULL) { |
1108 | spin_lock_irq(&pi_state->owner->pi_lock); | 1152 | spin_lock_irq(&pi_state->owner->pi_lock); |
1109 | WARN_ON(list_empty(&pi_state->list)); | 1153 | WARN_ON(list_empty(&pi_state->list)); |
1110 | list_del_init(&pi_state->list); | 1154 | list_del_init(&pi_state->list); |
1111 | spin_unlock_irq(&pi_state->owner->pi_lock); | 1155 | spin_unlock_irq(&pi_state->owner->pi_lock); |
1112 | } else | 1156 | } |
1113 | newtid |= FUTEX_OWNER_DIED; | ||
1114 | 1157 | ||
1115 | pi_state->owner = newowner; | 1158 | pi_state->owner = newowner; |
1116 | 1159 | ||
@@ -1118,26 +1161,35 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, | |||
1118 | WARN_ON(!list_empty(&pi_state->list)); | 1161 | WARN_ON(!list_empty(&pi_state->list)); |
1119 | list_add(&pi_state->list, &newowner->pi_state_list); | 1162 | list_add(&pi_state->list, &newowner->pi_state_list); |
1120 | spin_unlock_irq(&newowner->pi_lock); | 1163 | spin_unlock_irq(&newowner->pi_lock); |
1164 | return 0; | ||
1121 | 1165 | ||
1122 | /* | 1166 | /* |
1123 | * We own it, so we have to replace the pending owner | 1167 | * To handle the page fault we need to drop the hash bucket |
1124 | * TID. This must be atomic as we have preserve the | 1168 | * lock here. That gives the other task (either the pending |
1125 | * owner died bit here. | 1169 | * owner itself or the task which stole the rtmutex) the |
1170 | * chance to try the fixup of the pi_state. So once we are | ||
1171 | * back from handling the fault we need to check the pi_state | ||
1172 | * after reacquiring the hash bucket lock and before trying to | ||
1173 | * do another fixup. When the fixup has been done already we | ||
1174 | * simply return. | ||
1126 | */ | 1175 | */ |
1127 | ret = get_futex_value_locked(&uval, uaddr); | 1176 | handle_fault: |
1177 | spin_unlock(q->lock_ptr); | ||
1128 | 1178 | ||
1129 | while (!ret) { | 1179 | ret = futex_handle_fault((unsigned long)uaddr, fshared, attempt++); |
1130 | newval = (uval & FUTEX_OWNER_DIED) | newtid; | ||
1131 | 1180 | ||
1132 | curval = cmpxchg_futex_value_locked(uaddr, uval, newval); | 1181 | spin_lock(q->lock_ptr); |
1133 | 1182 | ||
1134 | if (curval == -EFAULT) | 1183 | /* |
1135 | ret = -EFAULT; | 1184 | * Check if someone else fixed it for us: |
1136 | if (curval == uval) | 1185 | */ |
1137 | break; | 1186 | if (pi_state->owner != oldowner) |
1138 | uval = curval; | 1187 | return 0; |
1139 | } | 1188 | |
1140 | return ret; | 1189 | if (ret) |
1190 | return ret; | ||
1191 | |||
1192 | goto retry; | ||
1141 | } | 1193 | } |
1142 | 1194 | ||
1143 | /* | 1195 | /* |
@@ -1507,7 +1559,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
1507 | * that case: | 1559 | * that case: |
1508 | */ | 1560 | */ |
1509 | if (q.pi_state->owner != curr) | 1561 | if (q.pi_state->owner != curr) |
1510 | ret = fixup_pi_state_owner(uaddr, &q, curr); | 1562 | ret = fixup_pi_state_owner(uaddr, &q, curr, fshared); |
1511 | } else { | 1563 | } else { |
1512 | /* | 1564 | /* |
1513 | * Catch the rare case, where the lock was released | 1565 | * Catch the rare case, where the lock was released |
@@ -1539,7 +1591,8 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
1539 | int res; | 1591 | int res; |
1540 | 1592 | ||
1541 | owner = rt_mutex_owner(&q.pi_state->pi_mutex); | 1593 | owner = rt_mutex_owner(&q.pi_state->pi_mutex); |
1542 | res = fixup_pi_state_owner(uaddr, &q, owner); | 1594 | res = fixup_pi_state_owner(uaddr, &q, owner, |
1595 | fshared); | ||
1543 | 1596 | ||
1544 | /* propagate -EFAULT, if the fixup failed */ | 1597 | /* propagate -EFAULT, if the fixup failed */ |
1545 | if (res) | 1598 | if (res) |
diff --git a/kernel/kgdb.c b/kernel/kgdb.c index 79e3c90113c2..3ec23c3ec97f 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c | |||
@@ -1499,7 +1499,8 @@ int kgdb_nmicallback(int cpu, void *regs) | |||
1499 | return 1; | 1499 | return 1; |
1500 | } | 1500 | } |
1501 | 1501 | ||
1502 | void kgdb_console_write(struct console *co, const char *s, unsigned count) | 1502 | static void kgdb_console_write(struct console *co, const char *s, |
1503 | unsigned count) | ||
1503 | { | 1504 | { |
1504 | unsigned long flags; | 1505 | unsigned long flags; |
1505 | 1506 | ||
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index e1cdf196a515..5e02b7740702 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c | |||
@@ -217,8 +217,6 @@ long rcu_batches_completed(void) | |||
217 | } | 217 | } |
218 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | 218 | EXPORT_SYMBOL_GPL(rcu_batches_completed); |
219 | 219 | ||
220 | EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); | ||
221 | |||
222 | void __rcu_read_lock(void) | 220 | void __rcu_read_lock(void) |
223 | { | 221 | { |
224 | int idx; | 222 | int idx; |
diff --git a/kernel/sched.c b/kernel/sched.c index c994d12abbf6..2a7ad35ea79b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1146,6 +1146,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer) | |||
1146 | return HRTIMER_NORESTART; | 1146 | return HRTIMER_NORESTART; |
1147 | } | 1147 | } |
1148 | 1148 | ||
1149 | #ifdef CONFIG_SMP | ||
1149 | static void hotplug_hrtick_disable(int cpu) | 1150 | static void hotplug_hrtick_disable(int cpu) |
1150 | { | 1151 | { |
1151 | struct rq *rq = cpu_rq(cpu); | 1152 | struct rq *rq = cpu_rq(cpu); |
@@ -1201,6 +1202,7 @@ static void init_hrtick(void) | |||
1201 | { | 1202 | { |
1202 | hotcpu_notifier(hotplug_hrtick, 0); | 1203 | hotcpu_notifier(hotplug_hrtick, 0); |
1203 | } | 1204 | } |
1205 | #endif /* CONFIG_SMP */ | ||
1204 | 1206 | ||
1205 | static void init_rq_hrtick(struct rq *rq) | 1207 | static void init_rq_hrtick(struct rq *rq) |
1206 | { | 1208 | { |
@@ -4447,22 +4449,20 @@ do_wait_for_common(struct completion *x, long timeout, int state) | |||
4447 | signal_pending(current)) || | 4449 | signal_pending(current)) || |
4448 | (state == TASK_KILLABLE && | 4450 | (state == TASK_KILLABLE && |
4449 | fatal_signal_pending(current))) { | 4451 | fatal_signal_pending(current))) { |
4450 | __remove_wait_queue(&x->wait, &wait); | 4452 | timeout = -ERESTARTSYS; |
4451 | return -ERESTARTSYS; | 4453 | break; |
4452 | } | 4454 | } |
4453 | __set_current_state(state); | 4455 | __set_current_state(state); |
4454 | spin_unlock_irq(&x->wait.lock); | 4456 | spin_unlock_irq(&x->wait.lock); |
4455 | timeout = schedule_timeout(timeout); | 4457 | timeout = schedule_timeout(timeout); |
4456 | spin_lock_irq(&x->wait.lock); | 4458 | spin_lock_irq(&x->wait.lock); |
4457 | if (!timeout) { | 4459 | } while (!x->done && timeout); |
4458 | __remove_wait_queue(&x->wait, &wait); | ||
4459 | return timeout; | ||
4460 | } | ||
4461 | } while (!x->done); | ||
4462 | __remove_wait_queue(&x->wait, &wait); | 4460 | __remove_wait_queue(&x->wait, &wait); |
4461 | if (!x->done) | ||
4462 | return timeout; | ||
4463 | } | 4463 | } |
4464 | x->done--; | 4464 | x->done--; |
4465 | return timeout; | 4465 | return timeout ?: 1; |
4466 | } | 4466 | } |
4467 | 4467 | ||
4468 | static long __sched | 4468 | static long __sched |
@@ -6928,7 +6928,12 @@ static int default_relax_domain_level = -1; | |||
6928 | 6928 | ||
6929 | static int __init setup_relax_domain_level(char *str) | 6929 | static int __init setup_relax_domain_level(char *str) |
6930 | { | 6930 | { |
6931 | default_relax_domain_level = simple_strtoul(str, NULL, 0); | 6931 | unsigned long val; |
6932 | |||
6933 | val = simple_strtoul(str, NULL, 0); | ||
6934 | if (val < SD_LV_MAX) | ||
6935 | default_relax_domain_level = val; | ||
6936 | |||
6932 | return 1; | 6937 | return 1; |
6933 | } | 6938 | } |
6934 | __setup("relax_domain_level=", setup_relax_domain_level); | 6939 | __setup("relax_domain_level=", setup_relax_domain_level); |
@@ -7287,6 +7292,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void) | |||
7287 | } | 7292 | } |
7288 | 7293 | ||
7289 | /* | 7294 | /* |
7295 | * Free current domain masks. | ||
7296 | * Called after all cpus are attached to NULL domain. | ||
7297 | */ | ||
7298 | static void free_sched_domains(void) | ||
7299 | { | ||
7300 | ndoms_cur = 0; | ||
7301 | if (doms_cur != &fallback_doms) | ||
7302 | kfree(doms_cur); | ||
7303 | doms_cur = &fallback_doms; | ||
7304 | } | ||
7305 | |||
7306 | /* | ||
7290 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | 7307 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
7291 | * For now this just excludes isolated cpus, but could be used to | 7308 | * For now this just excludes isolated cpus, but could be used to |
7292 | * exclude other special cases in the future. | 7309 | * exclude other special cases in the future. |
@@ -7433,6 +7450,7 @@ int arch_reinit_sched_domains(void) | |||
7433 | get_online_cpus(); | 7450 | get_online_cpus(); |
7434 | mutex_lock(&sched_domains_mutex); | 7451 | mutex_lock(&sched_domains_mutex); |
7435 | detach_destroy_domains(&cpu_online_map); | 7452 | detach_destroy_domains(&cpu_online_map); |
7453 | free_sched_domains(); | ||
7436 | err = arch_init_sched_domains(&cpu_online_map); | 7454 | err = arch_init_sched_domains(&cpu_online_map); |
7437 | mutex_unlock(&sched_domains_mutex); | 7455 | mutex_unlock(&sched_domains_mutex); |
7438 | put_online_cpus(); | 7456 | put_online_cpus(); |
@@ -7518,6 +7536,7 @@ static int update_sched_domains(struct notifier_block *nfb, | |||
7518 | case CPU_DOWN_PREPARE: | 7536 | case CPU_DOWN_PREPARE: |
7519 | case CPU_DOWN_PREPARE_FROZEN: | 7537 | case CPU_DOWN_PREPARE_FROZEN: |
7520 | detach_destroy_domains(&cpu_online_map); | 7538 | detach_destroy_domains(&cpu_online_map); |
7539 | free_sched_domains(); | ||
7521 | return NOTIFY_OK; | 7540 | return NOTIFY_OK; |
7522 | 7541 | ||
7523 | case CPU_UP_CANCELED: | 7542 | case CPU_UP_CANCELED: |
@@ -7536,8 +7555,16 @@ static int update_sched_domains(struct notifier_block *nfb, | |||
7536 | return NOTIFY_DONE; | 7555 | return NOTIFY_DONE; |
7537 | } | 7556 | } |
7538 | 7557 | ||
7558 | #ifndef CONFIG_CPUSETS | ||
7559 | /* | ||
7560 | * Create default domain partitioning if cpusets are disabled. | ||
7561 | * Otherwise we let cpusets rebuild the domains based on the | ||
7562 | * current setup. | ||
7563 | */ | ||
7564 | |||
7539 | /* The hotplug lock is already held by cpu_up/cpu_down */ | 7565 | /* The hotplug lock is already held by cpu_up/cpu_down */ |
7540 | arch_init_sched_domains(&cpu_online_map); | 7566 | arch_init_sched_domains(&cpu_online_map); |
7567 | #endif | ||
7541 | 7568 | ||
7542 | return NOTIFY_OK; | 7569 | return NOTIFY_OK; |
7543 | } | 7570 | } |
@@ -7677,7 +7704,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, | |||
7677 | else | 7704 | else |
7678 | rt_se->rt_rq = parent->my_q; | 7705 | rt_se->rt_rq = parent->my_q; |
7679 | 7706 | ||
7680 | rt_se->rt_rq = &rq->rt; | ||
7681 | rt_se->my_q = rt_rq; | 7707 | rt_se->my_q = rt_rq; |
7682 | rt_se->parent = parent; | 7708 | rt_se->parent = parent; |
7683 | INIT_LIST_HEAD(&rt_se->run_list); | 7709 | INIT_LIST_HEAD(&rt_se->run_list); |
@@ -8399,7 +8425,7 @@ static unsigned long to_ratio(u64 period, u64 runtime) | |||
8399 | #ifdef CONFIG_CGROUP_SCHED | 8425 | #ifdef CONFIG_CGROUP_SCHED |
8400 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) | 8426 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) |
8401 | { | 8427 | { |
8402 | struct task_group *tgi, *parent = tg->parent; | 8428 | struct task_group *tgi, *parent = tg ? tg->parent : NULL; |
8403 | unsigned long total = 0; | 8429 | unsigned long total = 0; |
8404 | 8430 | ||
8405 | if (!parent) { | 8431 | if (!parent) { |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 3432d573205d..0f3c19197fa4 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -250,7 +250,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
250 | if (rt_rq->rt_time || rt_rq->rt_nr_running) | 250 | if (rt_rq->rt_time || rt_rq->rt_nr_running) |
251 | idle = 0; | 251 | idle = 0; |
252 | spin_unlock(&rt_rq->rt_runtime_lock); | 252 | spin_unlock(&rt_rq->rt_runtime_lock); |
253 | } | 253 | } else if (rt_rq->rt_nr_running) |
254 | idle = 0; | ||
254 | 255 | ||
255 | if (enqueue) | 256 | if (enqueue) |
256 | sched_rt_rq_enqueue(rt_rq); | 257 | sched_rt_rq_enqueue(rt_rq); |
@@ -449,13 +450,19 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
449 | #endif | 450 | #endif |
450 | } | 451 | } |
451 | 452 | ||
452 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | 453 | static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) |
453 | { | 454 | { |
454 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); | 455 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
455 | struct rt_prio_array *array = &rt_rq->active; | 456 | struct rt_prio_array *array = &rt_rq->active; |
456 | struct rt_rq *group_rq = group_rt_rq(rt_se); | 457 | struct rt_rq *group_rq = group_rt_rq(rt_se); |
457 | 458 | ||
458 | if (group_rq && rt_rq_throttled(group_rq)) | 459 | /* |
460 | * Don't enqueue the group if its throttled, or when empty. | ||
461 | * The latter is a consequence of the former when a child group | ||
462 | * get throttled and the current group doesn't have any other | ||
463 | * active members. | ||
464 | */ | ||
465 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) | ||
459 | return; | 466 | return; |
460 | 467 | ||
461 | list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); | 468 | list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); |
@@ -464,7 +471,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | |||
464 | inc_rt_tasks(rt_se, rt_rq); | 471 | inc_rt_tasks(rt_se, rt_rq); |
465 | } | 472 | } |
466 | 473 | ||
467 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | 474 | static void __dequeue_rt_entity(struct sched_rt_entity *rt_se) |
468 | { | 475 | { |
469 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); | 476 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
470 | struct rt_prio_array *array = &rt_rq->active; | 477 | struct rt_prio_array *array = &rt_rq->active; |
@@ -480,11 +487,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | |||
480 | * Because the prio of an upper entry depends on the lower | 487 | * Because the prio of an upper entry depends on the lower |
481 | * entries, we must remove entries top - down. | 488 | * entries, we must remove entries top - down. |
482 | */ | 489 | */ |
483 | static void dequeue_rt_stack(struct task_struct *p) | 490 | static void dequeue_rt_stack(struct sched_rt_entity *rt_se) |
484 | { | 491 | { |
485 | struct sched_rt_entity *rt_se, *back = NULL; | 492 | struct sched_rt_entity *back = NULL; |
486 | 493 | ||
487 | rt_se = &p->rt; | ||
488 | for_each_sched_rt_entity(rt_se) { | 494 | for_each_sched_rt_entity(rt_se) { |
489 | rt_se->back = back; | 495 | rt_se->back = back; |
490 | back = rt_se; | 496 | back = rt_se; |
@@ -492,7 +498,26 @@ static void dequeue_rt_stack(struct task_struct *p) | |||
492 | 498 | ||
493 | for (rt_se = back; rt_se; rt_se = rt_se->back) { | 499 | for (rt_se = back; rt_se; rt_se = rt_se->back) { |
494 | if (on_rt_rq(rt_se)) | 500 | if (on_rt_rq(rt_se)) |
495 | dequeue_rt_entity(rt_se); | 501 | __dequeue_rt_entity(rt_se); |
502 | } | ||
503 | } | ||
504 | |||
505 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | ||
506 | { | ||
507 | dequeue_rt_stack(rt_se); | ||
508 | for_each_sched_rt_entity(rt_se) | ||
509 | __enqueue_rt_entity(rt_se); | ||
510 | } | ||
511 | |||
512 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | ||
513 | { | ||
514 | dequeue_rt_stack(rt_se); | ||
515 | |||
516 | for_each_sched_rt_entity(rt_se) { | ||
517 | struct rt_rq *rt_rq = group_rt_rq(rt_se); | ||
518 | |||
519 | if (rt_rq && rt_rq->rt_nr_running) | ||
520 | __enqueue_rt_entity(rt_se); | ||
496 | } | 521 | } |
497 | } | 522 | } |
498 | 523 | ||
@@ -506,32 +531,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) | |||
506 | if (wakeup) | 531 | if (wakeup) |
507 | rt_se->timeout = 0; | 532 | rt_se->timeout = 0; |
508 | 533 | ||
509 | dequeue_rt_stack(p); | 534 | enqueue_rt_entity(rt_se); |
510 | |||
511 | /* | ||
512 | * enqueue everybody, bottom - up. | ||
513 | */ | ||
514 | for_each_sched_rt_entity(rt_se) | ||
515 | enqueue_rt_entity(rt_se); | ||
516 | } | 535 | } |
517 | 536 | ||
518 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | 537 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) |
519 | { | 538 | { |
520 | struct sched_rt_entity *rt_se = &p->rt; | 539 | struct sched_rt_entity *rt_se = &p->rt; |
521 | struct rt_rq *rt_rq; | ||
522 | 540 | ||
523 | update_curr_rt(rq); | 541 | update_curr_rt(rq); |
524 | 542 | dequeue_rt_entity(rt_se); | |
525 | dequeue_rt_stack(p); | ||
526 | |||
527 | /* | ||
528 | * re-enqueue all non-empty rt_rq entities. | ||
529 | */ | ||
530 | for_each_sched_rt_entity(rt_se) { | ||
531 | rt_rq = group_rt_rq(rt_se); | ||
532 | if (rt_rq && rt_rq->rt_nr_running) | ||
533 | enqueue_rt_entity(rt_se); | ||
534 | } | ||
535 | } | 543 | } |
536 | 544 | ||
537 | /* | 545 | /* |
@@ -542,8 +550,10 @@ static | |||
542 | void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) | 550 | void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) |
543 | { | 551 | { |
544 | struct rt_prio_array *array = &rt_rq->active; | 552 | struct rt_prio_array *array = &rt_rq->active; |
553 | struct list_head *queue = array->queue + rt_se_prio(rt_se); | ||
545 | 554 | ||
546 | list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); | 555 | if (on_rt_rq(rt_se)) |
556 | list_move_tail(&rt_se->run_list, queue); | ||
547 | } | 557 | } |
548 | 558 | ||
549 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) | 559 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) |
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index a38878e0e49d..80179ef7450e 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
@@ -198,6 +198,9 @@ static inline void sched_info_queued(struct task_struct *t) | |||
198 | /* | 198 | /* |
199 | * Called when a process ceases being the active-running process, either | 199 | * Called when a process ceases being the active-running process, either |
200 | * voluntarily or involuntarily. Now we can calculate how long we ran. | 200 | * voluntarily or involuntarily. Now we can calculate how long we ran. |
201 | * Also, if the process is still in the TASK_RUNNING state, call | ||
202 | * sched_info_queued() to mark that it has now again started waiting on | ||
203 | * the runqueue. | ||
201 | */ | 204 | */ |
202 | static inline void sched_info_depart(struct task_struct *t) | 205 | static inline void sched_info_depart(struct task_struct *t) |
203 | { | 206 | { |
@@ -206,6 +209,9 @@ static inline void sched_info_depart(struct task_struct *t) | |||
206 | 209 | ||
207 | t->sched_info.cpu_time += delta; | 210 | t->sched_info.cpu_time += delta; |
208 | rq_sched_info_depart(task_rq(t), delta); | 211 | rq_sched_info_depart(task_rq(t), delta); |
212 | |||
213 | if (t->state == TASK_RUNNING) | ||
214 | sched_info_queued(t); | ||
209 | } | 215 | } |
210 | 216 | ||
211 | /* | 217 | /* |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 01b6522fd92b..c828c2339cc9 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -49,12 +49,17 @@ static unsigned long get_timestamp(int this_cpu) | |||
49 | return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ | 49 | return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ |
50 | } | 50 | } |
51 | 51 | ||
52 | void touch_softlockup_watchdog(void) | 52 | static void __touch_softlockup_watchdog(void) |
53 | { | 53 | { |
54 | int this_cpu = raw_smp_processor_id(); | 54 | int this_cpu = raw_smp_processor_id(); |
55 | 55 | ||
56 | __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu); | 56 | __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu); |
57 | } | 57 | } |
58 | |||
59 | void touch_softlockup_watchdog(void) | ||
60 | { | ||
61 | __raw_get_cpu_var(touch_timestamp) = 0; | ||
62 | } | ||
58 | EXPORT_SYMBOL(touch_softlockup_watchdog); | 63 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
59 | 64 | ||
60 | void touch_all_softlockup_watchdogs(void) | 65 | void touch_all_softlockup_watchdogs(void) |
@@ -80,7 +85,7 @@ void softlockup_tick(void) | |||
80 | unsigned long now; | 85 | unsigned long now; |
81 | 86 | ||
82 | if (touch_timestamp == 0) { | 87 | if (touch_timestamp == 0) { |
83 | touch_softlockup_watchdog(); | 88 | __touch_softlockup_watchdog(); |
84 | return; | 89 | return; |
85 | } | 90 | } |
86 | 91 | ||
@@ -95,7 +100,7 @@ void softlockup_tick(void) | |||
95 | 100 | ||
96 | /* do not print during early bootup: */ | 101 | /* do not print during early bootup: */ |
97 | if (unlikely(system_state != SYSTEM_RUNNING)) { | 102 | if (unlikely(system_state != SYSTEM_RUNNING)) { |
98 | touch_softlockup_watchdog(); | 103 | __touch_softlockup_watchdog(); |
99 | return; | 104 | return; |
100 | } | 105 | } |
101 | 106 | ||
@@ -214,7 +219,7 @@ static int watchdog(void *__bind_cpu) | |||
214 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 219 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
215 | 220 | ||
216 | /* initialize timestamp */ | 221 | /* initialize timestamp */ |
217 | touch_softlockup_watchdog(); | 222 | __touch_softlockup_watchdog(); |
218 | 223 | ||
219 | set_current_state(TASK_INTERRUPTIBLE); | 224 | set_current_state(TASK_INTERRUPTIBLE); |
220 | /* | 225 | /* |
@@ -223,7 +228,7 @@ static int watchdog(void *__bind_cpu) | |||
223 | * debug-printout triggers in softlockup_tick(). | 228 | * debug-printout triggers in softlockup_tick(). |
224 | */ | 229 | */ |
225 | while (!kthread_should_stop()) { | 230 | while (!kthread_should_stop()) { |
226 | touch_softlockup_watchdog(); | 231 | __touch_softlockup_watchdog(); |
227 | schedule(); | 232 | schedule(); |
228 | 233 | ||
229 | if (kthread_should_stop()) | 234 | if (kthread_should_stop()) |