aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2006-03-28 19:11:19 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-28 21:36:43 -0500
commit35f5cad8c4bab94ecc5acdc4055df5ea12dc76f8 (patch)
treeaad78b130b697c3c4b594483c63a9b1a9c4ed009
parent6a14c5c9da0b4c34b5be783403c54f0396fcfe77 (diff)
[PATCH] revert "Optimize sys_times for a single thread process"
This patch reverts 'CONFIG_SMP && thread_group_empty()' optimization in sys_times(). The reason is that the next patch breaks memory ordering which is needed for that optimization. tasklist_lock in sys_times() will be eliminated completely by further patch. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/sys.c86
2 files changed, 27 insertions, 65 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 3823ec89d7b8..6b2e4cf3e140 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -139,11 +139,7 @@ repeat:
139 ptrace_unlink(p); 139 ptrace_unlink(p);
140 BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); 140 BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
141 __exit_signal(p); 141 __exit_signal(p);
142 /* 142
143 * Note that the fastpath in sys_times depends on __exit_signal having
144 * updated the counters before a task is removed from the tasklist of
145 * the process by __unhash_process.
146 */
147 __unhash_process(p); 143 __unhash_process(p);
148 144
149 /* 145 /*
diff --git a/kernel/sys.c b/kernel/sys.c
index c93d37f71aef..84371fdc660b 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1202,69 +1202,35 @@ asmlinkage long sys_times(struct tms __user * tbuf)
1202 */ 1202 */
1203 if (tbuf) { 1203 if (tbuf) {
1204 struct tms tmp; 1204 struct tms tmp;
1205 struct task_struct *tsk = current;
1206 struct task_struct *t;
1205 cputime_t utime, stime, cutime, cstime; 1207 cputime_t utime, stime, cutime, cstime;
1206 1208
1207#ifdef CONFIG_SMP 1209 read_lock(&tasklist_lock);
1208 if (thread_group_empty(current)) { 1210 utime = tsk->signal->utime;
1209 /* 1211 stime = tsk->signal->stime;
1210 * Single thread case without the use of any locks. 1212 t = tsk;
1211 * 1213 do {
1212 * We may race with release_task if two threads are 1214 utime = cputime_add(utime, t->utime);
1213 * executing. However, release task first adds up the 1215 stime = cputime_add(stime, t->stime);
1214 * counters (__exit_signal) before removing the task 1216 t = next_thread(t);
1215 * from the process tasklist (__unhash_process). 1217 } while (t != tsk);
1216 * __exit_signal also acquires and releases the 1218
1217 * siglock which results in the proper memory ordering 1219 /*
1218 * so that the list modifications are always visible 1220 * While we have tasklist_lock read-locked, no dying thread
1219 * after the counters have been updated. 1221 * can be updating current->signal->[us]time. Instead,
1220 * 1222 * we got their counts included in the live thread loop.
1221 * If the counters have been updated by the second thread 1223 * However, another thread can come in right now and
1222 * but the thread has not yet been removed from the list 1224 * do a wait call that updates current->signal->c[us]time.
1223 * then the other branch will be executing which will 1225 * To make sure we always see that pair updated atomically,
1224 * block on tasklist_lock until the exit handling of the 1226 * we take the siglock around fetching them.
1225 * other task is finished. 1227 */
1226 * 1228 spin_lock_irq(&tsk->sighand->siglock);
1227 * This also implies that the sighand->siglock cannot 1229 cutime = tsk->signal->cutime;
1228 * be held by another processor. So we can also 1230 cstime = tsk->signal->cstime;
1229 * skip acquiring that lock. 1231 spin_unlock_irq(&tsk->sighand->siglock);
1230 */ 1232 read_unlock(&tasklist_lock);
1231 utime = cputime_add(current->signal->utime, current->utime);
1232 stime = cputime_add(current->signal->utime, current->stime);
1233 cutime = current->signal->cutime;
1234 cstime = current->signal->cstime;
1235 } else
1236#endif
1237 {
1238
1239 /* Process with multiple threads */
1240 struct task_struct *tsk = current;
1241 struct task_struct *t;
1242
1243 read_lock(&tasklist_lock);
1244 utime = tsk->signal->utime;
1245 stime = tsk->signal->stime;
1246 t = tsk;
1247 do {
1248 utime = cputime_add(utime, t->utime);
1249 stime = cputime_add(stime, t->stime);
1250 t = next_thread(t);
1251 } while (t != tsk);
1252 1233
1253 /*
1254 * While we have tasklist_lock read-locked, no dying thread
1255 * can be updating current->signal->[us]time. Instead,
1256 * we got their counts included in the live thread loop.
1257 * However, another thread can come in right now and
1258 * do a wait call that updates current->signal->c[us]time.
1259 * To make sure we always see that pair updated atomically,
1260 * we take the siglock around fetching them.
1261 */
1262 spin_lock_irq(&tsk->sighand->siglock);
1263 cutime = tsk->signal->cutime;
1264 cstime = tsk->signal->cstime;
1265 spin_unlock_irq(&tsk->sighand->siglock);
1266 read_unlock(&tasklist_lock);
1267 }
1268 tmp.tms_utime = cputime_to_clock_t(utime); 1234 tmp.tms_utime = cputime_to_clock_t(utime);
1269 tmp.tms_stime = cputime_to_clock_t(stime); 1235 tmp.tms_stime = cputime_to_clock_t(stime);
1270 tmp.tms_cutime = cputime_to_clock_t(cutime); 1236 tmp.tms_cutime = cputime_to_clock_t(cutime);