aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-13 03:44:22 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-13 03:44:22 -0500
commitf8a6b2b9cee298a9663cbe38ce1eb5240987cb62 (patch)
treeb356490269c9e77d164dcc1477792b882fbb8bdb /kernel
parentba1511bf7fbda452138e4096bf10d5a382710f4f (diff)
parent071a0bc2ceace31266836801510879407a3701fa (diff)
Merge branch 'linus' into x86/apic
Conflicts: arch/x86/kernel/acpi/boot.c arch/x86/mm/fault.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/async.c94
-rw-r--r--kernel/cgroup.c3
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/itimer.c4
-rw-r--r--kernel/posix-cpu-timers.c117
-rw-r--r--kernel/power/main.c26
-rw-r--r--kernel/profile.c3
-rw-r--r--kernel/sched.c31
-rw-r--r--kernel/sched_fair.c11
-rw-r--r--kernel/sched_stats.h45
-rw-r--r--kernel/signal.c8
-rw-r--r--kernel/sys.c16
-rw-r--r--kernel/sysctl.c5
-rw-r--r--kernel/trace/ftrace.c5
-rw-r--r--kernel/wait.c59
16 files changed, 332 insertions, 104 deletions
diff --git a/kernel/async.c b/kernel/async.c
index 608b32b42812..f565891f2c9b 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -54,6 +54,7 @@ asynchronous and synchronous parts of the kernel.
54#include <linux/sched.h> 54#include <linux/sched.h>
55#include <linux/init.h> 55#include <linux/init.h>
56#include <linux/kthread.h> 56#include <linux/kthread.h>
57#include <linux/delay.h>
57#include <asm/atomic.h> 58#include <asm/atomic.h>
58 59
59static async_cookie_t next_cookie = 1; 60static async_cookie_t next_cookie = 1;
@@ -132,21 +133,23 @@ static void run_one_entry(void)
132 entry = list_first_entry(&async_pending, struct async_entry, list); 133 entry = list_first_entry(&async_pending, struct async_entry, list);
133 134
134 /* 2) move it to the running queue */ 135 /* 2) move it to the running queue */
135 list_del(&entry->list); 136 list_move_tail(&entry->list, entry->running);
136 list_add_tail(&entry->list, &async_running);
137 spin_unlock_irqrestore(&async_lock, flags); 137 spin_unlock_irqrestore(&async_lock, flags);
138 138
139 /* 3) run it (and print duration)*/ 139 /* 3) run it (and print duration)*/
140 if (initcall_debug && system_state == SYSTEM_BOOTING) { 140 if (initcall_debug && system_state == SYSTEM_BOOTING) {
141 printk("calling %lli_%pF @ %i\n", entry->cookie, entry->func, task_pid_nr(current)); 141 printk("calling %lli_%pF @ %i\n", (long long)entry->cookie,
142 entry->func, task_pid_nr(current));
142 calltime = ktime_get(); 143 calltime = ktime_get();
143 } 144 }
144 entry->func(entry->data, entry->cookie); 145 entry->func(entry->data, entry->cookie);
145 if (initcall_debug && system_state == SYSTEM_BOOTING) { 146 if (initcall_debug && system_state == SYSTEM_BOOTING) {
146 rettime = ktime_get(); 147 rettime = ktime_get();
147 delta = ktime_sub(rettime, calltime); 148 delta = ktime_sub(rettime, calltime);
148 printk("initcall %lli_%pF returned 0 after %lld usecs\n", entry->cookie, 149 printk("initcall %lli_%pF returned 0 after %lld usecs\n",
149 entry->func, ktime_to_ns(delta) >> 10); 150 (long long)entry->cookie,
151 entry->func,
152 (long long)ktime_to_ns(delta) >> 10);
150 } 153 }
151 154
152 /* 4) remove it from the running queue */ 155 /* 4) remove it from the running queue */
@@ -205,18 +208,44 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l
205 return newcookie; 208 return newcookie;
206} 209}
207 210
211/**
212 * async_schedule - schedule a function for asynchronous execution
213 * @ptr: function to execute asynchronously
214 * @data: data pointer to pass to the function
215 *
216 * Returns an async_cookie_t that may be used for checkpointing later.
217 * Note: This function may be called from atomic or non-atomic contexts.
218 */
208async_cookie_t async_schedule(async_func_ptr *ptr, void *data) 219async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
209{ 220{
210 return __async_schedule(ptr, data, &async_pending); 221 return __async_schedule(ptr, data, &async_running);
211} 222}
212EXPORT_SYMBOL_GPL(async_schedule); 223EXPORT_SYMBOL_GPL(async_schedule);
213 224
214async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *running) 225/**
226 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
227 * @ptr: function to execute asynchronously
228 * @data: data pointer to pass to the function
229 * @running: running list for the domain
230 *
231 * Returns an async_cookie_t that may be used for checkpointing later.
232 * @running may be used in the async_synchronize_*_domain() functions
233 * to wait within a certain synchronization domain rather than globally.
234 * A synchronization domain is specified via the running queue @running to use.
235 * Note: This function may be called from atomic or non-atomic contexts.
236 */
237async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
238 struct list_head *running)
215{ 239{
216 return __async_schedule(ptr, data, running); 240 return __async_schedule(ptr, data, running);
217} 241}
218EXPORT_SYMBOL_GPL(async_schedule_special); 242EXPORT_SYMBOL_GPL(async_schedule_domain);
219 243
244/**
245 * async_synchronize_full - synchronize all asynchronous function calls
246 *
247 * This function waits until all asynchronous function calls have been done.
248 */
220void async_synchronize_full(void) 249void async_synchronize_full(void)
221{ 250{
222 do { 251 do {
@@ -225,13 +254,30 @@ void async_synchronize_full(void)
225} 254}
226EXPORT_SYMBOL_GPL(async_synchronize_full); 255EXPORT_SYMBOL_GPL(async_synchronize_full);
227 256
228void async_synchronize_full_special(struct list_head *list) 257/**
258 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
259 * @list: running list to synchronize on
260 *
261 * This function waits until all asynchronous function calls for the
262 * synchronization domain specified by the running list @list have been done.
263 */
264void async_synchronize_full_domain(struct list_head *list)
229{ 265{
230 async_synchronize_cookie_special(next_cookie, list); 266 async_synchronize_cookie_domain(next_cookie, list);
231} 267}
232EXPORT_SYMBOL_GPL(async_synchronize_full_special); 268EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
233 269
234void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *running) 270/**
271 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
272 * @cookie: async_cookie_t to use as checkpoint
273 * @running: running list to synchronize on
274 *
275 * This function waits until all asynchronous function calls for the
276 * synchronization domain specified by the running list @list submitted
277 * prior to @cookie have been done.
278 */
279void async_synchronize_cookie_domain(async_cookie_t cookie,
280 struct list_head *running)
235{ 281{
236 ktime_t starttime, delta, endtime; 282 ktime_t starttime, delta, endtime;
237 283
@@ -247,14 +293,22 @@ void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *r
247 delta = ktime_sub(endtime, starttime); 293 delta = ktime_sub(endtime, starttime);
248 294
249 printk("async_continuing @ %i after %lli usec\n", 295 printk("async_continuing @ %i after %lli usec\n",
250 task_pid_nr(current), ktime_to_ns(delta) >> 10); 296 task_pid_nr(current),
297 (long long)ktime_to_ns(delta) >> 10);
251 } 298 }
252} 299}
253EXPORT_SYMBOL_GPL(async_synchronize_cookie_special); 300EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
254 301
302/**
303 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
304 * @cookie: async_cookie_t to use as checkpoint
305 *
306 * This function waits until all asynchronous function calls prior to @cookie
307 * have been done.
308 */
255void async_synchronize_cookie(async_cookie_t cookie) 309void async_synchronize_cookie(async_cookie_t cookie)
256{ 310{
257 async_synchronize_cookie_special(cookie, &async_running); 311 async_synchronize_cookie_domain(cookie, &async_running);
258} 312}
259EXPORT_SYMBOL_GPL(async_synchronize_cookie); 313EXPORT_SYMBOL_GPL(async_synchronize_cookie);
260 314
@@ -315,7 +369,11 @@ static int async_manager_thread(void *unused)
315 ec = atomic_read(&entry_count); 369 ec = atomic_read(&entry_count);
316 370
317 while (tc < ec && tc < MAX_THREADS) { 371 while (tc < ec && tc < MAX_THREADS) {
318 kthread_run(async_thread, NULL, "async/%i", tc); 372 if (IS_ERR(kthread_run(async_thread, NULL, "async/%i",
373 tc))) {
374 msleep(100);
375 continue;
376 }
319 atomic_inc(&thread_count); 377 atomic_inc(&thread_count);
320 tc++; 378 tc++;
321 } 379 }
@@ -330,7 +388,9 @@ static int async_manager_thread(void *unused)
330static int __init async_init(void) 388static int __init async_init(void)
331{ 389{
332 if (async_enabled) 390 if (async_enabled)
333 kthread_run(async_manager_thread, NULL, "async/mgr"); 391 if (IS_ERR(kthread_run(async_manager_thread, NULL,
392 "async/mgr")))
393 async_enabled = 0;
334 return 0; 394 return 0;
335} 395}
336 396
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 5a54ff42874e..e14db9c089b9 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2351,7 +2351,7 @@ static void cgroup_lock_hierarchy(struct cgroupfs_root *root)
2351 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { 2351 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2352 struct cgroup_subsys *ss = subsys[i]; 2352 struct cgroup_subsys *ss = subsys[i];
2353 if (ss->root == root) 2353 if (ss->root == root)
2354 mutex_lock_nested(&ss->hierarchy_mutex, i); 2354 mutex_lock(&ss->hierarchy_mutex);
2355 } 2355 }
2356} 2356}
2357 2357
@@ -2637,6 +2637,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
2637 BUG_ON(!list_empty(&init_task.tasks)); 2637 BUG_ON(!list_empty(&init_task.tasks));
2638 2638
2639 mutex_init(&ss->hierarchy_mutex); 2639 mutex_init(&ss->hierarchy_mutex);
2640 lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
2640 ss->active = 1; 2641 ss->active = 1;
2641} 2642}
2642 2643
diff --git a/kernel/exit.c b/kernel/exit.c
index 70612c19ac96..167e1e3ad7c6 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -118,6 +118,8 @@ static void __exit_signal(struct task_struct *tsk)
118 * We won't ever get here for the group leader, since it 118 * We won't ever get here for the group leader, since it
119 * will have been the last reference on the signal_struct. 119 * will have been the last reference on the signal_struct.
120 */ 120 */
121 sig->utime = cputime_add(sig->utime, task_utime(tsk));
122 sig->stime = cputime_add(sig->stime, task_stime(tsk));
121 sig->gtime = cputime_add(sig->gtime, task_gtime(tsk)); 123 sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
122 sig->min_flt += tsk->min_flt; 124 sig->min_flt += tsk->min_flt;
123 sig->maj_flt += tsk->maj_flt; 125 sig->maj_flt += tsk->maj_flt;
@@ -126,6 +128,7 @@ static void __exit_signal(struct task_struct *tsk)
126 sig->inblock += task_io_get_inblock(tsk); 128 sig->inblock += task_io_get_inblock(tsk);
127 sig->oublock += task_io_get_oublock(tsk); 129 sig->oublock += task_io_get_oublock(tsk);
128 task_io_accounting_add(&sig->ioac, &tsk->ioac); 130 task_io_accounting_add(&sig->ioac, &tsk->ioac);
131 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
129 sig = NULL; /* Marker for below. */ 132 sig = NULL; /* Marker for below. */
130 } 133 }
131 134
diff --git a/kernel/fork.c b/kernel/fork.c
index 99309df985bf..8de303bdd4e5 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -856,13 +856,14 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
856 sig->tty_old_pgrp = NULL; 856 sig->tty_old_pgrp = NULL;
857 sig->tty = NULL; 857 sig->tty = NULL;
858 858
859 sig->cutime = sig->cstime = cputime_zero; 859 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
860 sig->gtime = cputime_zero; 860 sig->gtime = cputime_zero;
861 sig->cgtime = cputime_zero; 861 sig->cgtime = cputime_zero;
862 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; 862 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
863 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; 863 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
864 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; 864 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
865 task_io_accounting_init(&sig->ioac); 865 task_io_accounting_init(&sig->ioac);
866 sig->sum_sched_runtime = 0;
866 taskstats_tgid_init(sig); 867 taskstats_tgid_init(sig);
867 868
868 task_lock(current->group_leader); 869 task_lock(current->group_leader);
@@ -1010,6 +1011,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1010 * triggers too late. This doesn't hurt, the check is only there 1011 * triggers too late. This doesn't hurt, the check is only there
1011 * to stop root fork bombs. 1012 * to stop root fork bombs.
1012 */ 1013 */
1014 retval = -EAGAIN;
1013 if (nr_threads >= max_threads) 1015 if (nr_threads >= max_threads)
1014 goto bad_fork_cleanup_count; 1016 goto bad_fork_cleanup_count;
1015 1017
@@ -1098,7 +1100,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1098#ifdef CONFIG_DEBUG_MUTEXES 1100#ifdef CONFIG_DEBUG_MUTEXES
1099 p->blocked_on = NULL; /* not blocked yet */ 1101 p->blocked_on = NULL; /* not blocked yet */
1100#endif 1102#endif
1101 if (unlikely(ptrace_reparented(current))) 1103 if (unlikely(current->ptrace))
1102 ptrace_fork(p, clone_flags); 1104 ptrace_fork(p, clone_flags);
1103 1105
1104 /* Perform scheduler related setup. Assign this task to a CPU. */ 1106 /* Perform scheduler related setup. Assign this task to a CPU. */
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 6a5fe93dd8bd..58762f7077ec 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -62,7 +62,7 @@ int do_getitimer(int which, struct itimerval *value)
62 struct task_cputime cputime; 62 struct task_cputime cputime;
63 cputime_t utime; 63 cputime_t utime;
64 64
65 thread_group_cputime(tsk, &cputime); 65 thread_group_cputimer(tsk, &cputime);
66 utime = cputime.utime; 66 utime = cputime.utime;
67 if (cputime_le(cval, utime)) { /* about to fire */ 67 if (cputime_le(cval, utime)) { /* about to fire */
68 cval = jiffies_to_cputime(1); 68 cval = jiffies_to_cputime(1);
@@ -82,7 +82,7 @@ int do_getitimer(int which, struct itimerval *value)
82 struct task_cputime times; 82 struct task_cputime times;
83 cputime_t ptime; 83 cputime_t ptime;
84 84
85 thread_group_cputime(tsk, &times); 85 thread_group_cputimer(tsk, &times);
86 ptime = cputime_add(times.utime, times.stime); 86 ptime = cputime_add(times.utime, times.stime);
87 if (cputime_le(cval, ptime)) { /* about to fire */ 87 if (cputime_le(cval, ptime)) { /* about to fire */
88 cval = jiffies_to_cputime(1); 88 cval = jiffies_to_cputime(1);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index fa07da94d7be..2313a4cc14ea 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -230,6 +230,71 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
230 return 0; 230 return 0;
231} 231}
232 232
233void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
234{
235 struct sighand_struct *sighand;
236 struct signal_struct *sig;
237 struct task_struct *t;
238
239 *times = INIT_CPUTIME;
240
241 rcu_read_lock();
242 sighand = rcu_dereference(tsk->sighand);
243 if (!sighand)
244 goto out;
245
246 sig = tsk->signal;
247
248 t = tsk;
249 do {
250 times->utime = cputime_add(times->utime, t->utime);
251 times->stime = cputime_add(times->stime, t->stime);
252 times->sum_exec_runtime += t->se.sum_exec_runtime;
253
254 t = next_thread(t);
255 } while (t != tsk);
256
257 times->utime = cputime_add(times->utime, sig->utime);
258 times->stime = cputime_add(times->stime, sig->stime);
259 times->sum_exec_runtime += sig->sum_sched_runtime;
260out:
261 rcu_read_unlock();
262}
263
264static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
265{
266 if (cputime_gt(b->utime, a->utime))
267 a->utime = b->utime;
268
269 if (cputime_gt(b->stime, a->stime))
270 a->stime = b->stime;
271
272 if (b->sum_exec_runtime > a->sum_exec_runtime)
273 a->sum_exec_runtime = b->sum_exec_runtime;
274}
275
276void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
277{
278 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
279 struct task_cputime sum;
280 unsigned long flags;
281
282 spin_lock_irqsave(&cputimer->lock, flags);
283 if (!cputimer->running) {
284 cputimer->running = 1;
285 /*
286 * The POSIX timer interface allows for absolute time expiry
287 * values through the TIMER_ABSTIME flag, therefore we have
288 * to synchronize the timer to the clock every time we start
289 * it.
290 */
291 thread_group_cputime(tsk, &sum);
292 update_gt_cputime(&cputimer->cputime, &sum);
293 }
294 *times = cputimer->cputime;
295 spin_unlock_irqrestore(&cputimer->lock, flags);
296}
297
233/* 298/*
234 * Sample a process (thread group) clock for the given group_leader task. 299 * Sample a process (thread group) clock for the given group_leader task.
235 * Must be called with tasklist_lock held for reading. 300 * Must be called with tasklist_lock held for reading.
@@ -457,7 +522,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
457{ 522{
458 struct task_cputime cputime; 523 struct task_cputime cputime;
459 524
460 thread_group_cputime(tsk, &cputime); 525 thread_group_cputimer(tsk, &cputime);
461 cleanup_timers(tsk->signal->cpu_timers, 526 cleanup_timers(tsk->signal->cpu_timers,
462 cputime.utime, cputime.stime, cputime.sum_exec_runtime); 527 cputime.utime, cputime.stime, cputime.sum_exec_runtime);
463} 528}
@@ -964,6 +1029,19 @@ static void check_thread_timers(struct task_struct *tsk,
964 } 1029 }
965} 1030}
966 1031
1032static void stop_process_timers(struct task_struct *tsk)
1033{
1034 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
1035 unsigned long flags;
1036
1037 if (!cputimer->running)
1038 return;
1039
1040 spin_lock_irqsave(&cputimer->lock, flags);
1041 cputimer->running = 0;
1042 spin_unlock_irqrestore(&cputimer->lock, flags);
1043}
1044
967/* 1045/*
968 * Check for any per-thread CPU timers that have fired and move them 1046 * Check for any per-thread CPU timers that have fired and move them
969 * off the tsk->*_timers list onto the firing list. Per-thread timers 1047 * off the tsk->*_timers list onto the firing list. Per-thread timers
@@ -987,13 +1065,15 @@ static void check_process_timers(struct task_struct *tsk,
987 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && 1065 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
988 list_empty(&timers[CPUCLOCK_VIRT]) && 1066 list_empty(&timers[CPUCLOCK_VIRT]) &&
989 cputime_eq(sig->it_virt_expires, cputime_zero) && 1067 cputime_eq(sig->it_virt_expires, cputime_zero) &&
990 list_empty(&timers[CPUCLOCK_SCHED])) 1068 list_empty(&timers[CPUCLOCK_SCHED])) {
1069 stop_process_timers(tsk);
991 return; 1070 return;
1071 }
992 1072
993 /* 1073 /*
994 * Collect the current process totals. 1074 * Collect the current process totals.
995 */ 1075 */
996 thread_group_cputime(tsk, &cputime); 1076 thread_group_cputimer(tsk, &cputime);
997 utime = cputime.utime; 1077 utime = cputime.utime;
998 ptime = cputime_add(utime, cputime.stime); 1078 ptime = cputime_add(utime, cputime.stime);
999 sum_sched_runtime = cputime.sum_exec_runtime; 1079 sum_sched_runtime = cputime.sum_exec_runtime;
@@ -1259,7 +1339,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
1259 if (!task_cputime_zero(&sig->cputime_expires)) { 1339 if (!task_cputime_zero(&sig->cputime_expires)) {
1260 struct task_cputime group_sample; 1340 struct task_cputime group_sample;
1261 1341
1262 thread_group_cputime(tsk, &group_sample); 1342 thread_group_cputimer(tsk, &group_sample);
1263 if (task_cputime_expired(&group_sample, &sig->cputime_expires)) 1343 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1264 return 1; 1344 return 1;
1265 } 1345 }
@@ -1329,6 +1409,33 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1329} 1409}
1330 1410
1331/* 1411/*
1412 * Sample a process (thread group) timer for the given group_leader task.
1413 * Must be called with tasklist_lock held for reading.
1414 */
1415static int cpu_timer_sample_group(const clockid_t which_clock,
1416 struct task_struct *p,
1417 union cpu_time_count *cpu)
1418{
1419 struct task_cputime cputime;
1420
1421 thread_group_cputimer(p, &cputime);
1422 switch (CPUCLOCK_WHICH(which_clock)) {
1423 default:
1424 return -EINVAL;
1425 case CPUCLOCK_PROF:
1426 cpu->cpu = cputime_add(cputime.utime, cputime.stime);
1427 break;
1428 case CPUCLOCK_VIRT:
1429 cpu->cpu = cputime.utime;
1430 break;
1431 case CPUCLOCK_SCHED:
1432 cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
1433 break;
1434 }
1435 return 0;
1436}
1437
1438/*
1332 * Set one of the process-wide special case CPU timers. 1439 * Set one of the process-wide special case CPU timers.
1333 * The tsk->sighand->siglock must be held by the caller. 1440 * The tsk->sighand->siglock must be held by the caller.
1334 * The *newval argument is relative and we update it to be absolute, *oldval 1441 * The *newval argument is relative and we update it to be absolute, *oldval
@@ -1341,7 +1448,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1341 struct list_head *head; 1448 struct list_head *head;
1342 1449
1343 BUG_ON(clock_idx == CPUCLOCK_SCHED); 1450 BUG_ON(clock_idx == CPUCLOCK_SCHED);
1344 cpu_clock_sample_group(clock_idx, tsk, &now); 1451 cpu_timer_sample_group(clock_idx, tsk, &now);
1345 1452
1346 if (oldval) { 1453 if (oldval) {
1347 if (!cputime_eq(*oldval, cputime_zero)) { 1454 if (!cputime_eq(*oldval, cputime_zero)) {
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 239988873971..b4d219016b6c 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -57,16 +57,6 @@ int pm_notifier_call_chain(unsigned long val)
57#ifdef CONFIG_PM_DEBUG 57#ifdef CONFIG_PM_DEBUG
58int pm_test_level = TEST_NONE; 58int pm_test_level = TEST_NONE;
59 59
60static int suspend_test(int level)
61{
62 if (pm_test_level == level) {
63 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
64 mdelay(5000);
65 return 1;
66 }
67 return 0;
68}
69
70static const char * const pm_tests[__TEST_AFTER_LAST] = { 60static const char * const pm_tests[__TEST_AFTER_LAST] = {
71 [TEST_NONE] = "none", 61 [TEST_NONE] = "none",
72 [TEST_CORE] = "core", 62 [TEST_CORE] = "core",
@@ -125,14 +115,24 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
125} 115}
126 116
127power_attr(pm_test); 117power_attr(pm_test);
128#else /* !CONFIG_PM_DEBUG */ 118#endif /* CONFIG_PM_DEBUG */
129static inline int suspend_test(int level) { return 0; }
130#endif /* !CONFIG_PM_DEBUG */
131 119
132#endif /* CONFIG_PM_SLEEP */ 120#endif /* CONFIG_PM_SLEEP */
133 121
134#ifdef CONFIG_SUSPEND 122#ifdef CONFIG_SUSPEND
135 123
124static int suspend_test(int level)
125{
126#ifdef CONFIG_PM_DEBUG
127 if (pm_test_level == level) {
128 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
129 mdelay(5000);
130 return 1;
131 }
132#endif /* !CONFIG_PM_DEBUG */
133 return 0;
134}
135
136#ifdef CONFIG_PM_TEST_SUSPEND 136#ifdef CONFIG_PM_TEST_SUSPEND
137 137
138/* 138/*
diff --git a/kernel/profile.c b/kernel/profile.c
index 784933acf5b8..7724e0409bae 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -114,12 +114,15 @@ int __ref profile_init(void)
114 if (!slab_is_available()) { 114 if (!slab_is_available()) {
115 prof_buffer = alloc_bootmem(buffer_bytes); 115 prof_buffer = alloc_bootmem(buffer_bytes);
116 alloc_bootmem_cpumask_var(&prof_cpu_mask); 116 alloc_bootmem_cpumask_var(&prof_cpu_mask);
117 cpumask_copy(prof_cpu_mask, cpu_possible_mask);
117 return 0; 118 return 0;
118 } 119 }
119 120
120 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) 121 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
121 return -ENOMEM; 122 return -ENOMEM;
122 123
124 cpumask_copy(prof_cpu_mask, cpu_possible_mask);
125
123 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL); 126 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL);
124 if (prof_buffer) 127 if (prof_buffer)
125 return 0; 128 return 0;
diff --git a/kernel/sched.c b/kernel/sched.c
index 400756169aa7..61245b8d0f16 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2266,16 +2266,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2266 if (!sched_feat(SYNC_WAKEUPS)) 2266 if (!sched_feat(SYNC_WAKEUPS))
2267 sync = 0; 2267 sync = 0;
2268 2268
2269 if (!sync) {
2270 if (current->se.avg_overlap < sysctl_sched_migration_cost &&
2271 p->se.avg_overlap < sysctl_sched_migration_cost)
2272 sync = 1;
2273 } else {
2274 if (current->se.avg_overlap >= sysctl_sched_migration_cost ||
2275 p->se.avg_overlap >= sysctl_sched_migration_cost)
2276 sync = 0;
2277 }
2278
2279#ifdef CONFIG_SMP 2269#ifdef CONFIG_SMP
2280 if (sched_feat(LB_WAKEUP_UPDATE)) { 2270 if (sched_feat(LB_WAKEUP_UPDATE)) {
2281 struct sched_domain *sd; 2271 struct sched_domain *sd;
@@ -3890,19 +3880,24 @@ int select_nohz_load_balancer(int stop_tick)
3890 int cpu = smp_processor_id(); 3880 int cpu = smp_processor_id();
3891 3881
3892 if (stop_tick) { 3882 if (stop_tick) {
3893 cpumask_set_cpu(cpu, nohz.cpu_mask);
3894 cpu_rq(cpu)->in_nohz_recently = 1; 3883 cpu_rq(cpu)->in_nohz_recently = 1;
3895 3884
3896 /* 3885 if (!cpu_active(cpu)) {
3897 * If we are going offline and still the leader, give up! 3886 if (atomic_read(&nohz.load_balancer) != cpu)
3898 */ 3887 return 0;
3899 if (!cpu_active(cpu) && 3888
3900 atomic_read(&nohz.load_balancer) == cpu) { 3889 /*
3890 * If we are going offline and still the leader,
3891 * give up!
3892 */
3901 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) 3893 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
3902 BUG(); 3894 BUG();
3895
3903 return 0; 3896 return 0;
3904 } 3897 }
3905 3898
3899 cpumask_set_cpu(cpu, nohz.cpu_mask);
3900
3906 /* time for ilb owner also to sleep */ 3901 /* time for ilb owner also to sleep */
3907 if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { 3902 if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
3908 if (atomic_read(&nohz.load_balancer) == cpu) 3903 if (atomic_read(&nohz.load_balancer) == cpu)
@@ -4697,8 +4692,8 @@ EXPORT_SYMBOL(default_wake_function);
4697 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns 4692 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
4698 * zero in this (rare) case, and we handle it by continuing to scan the queue. 4693 * zero in this (rare) case, and we handle it by continuing to scan the queue.
4699 */ 4694 */
4700static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, 4695void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
4701 int nr_exclusive, int sync, void *key) 4696 int nr_exclusive, int sync, void *key)
4702{ 4697{
4703 wait_queue_t *curr, *next; 4698 wait_queue_t *curr, *next;
4704 4699
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index a7e50ba185ac..0566f2a03c42 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1191,15 +1191,20 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
1191 int idx, unsigned long load, unsigned long this_load, 1191 int idx, unsigned long load, unsigned long this_load,
1192 unsigned int imbalance) 1192 unsigned int imbalance)
1193{ 1193{
1194 struct task_struct *curr = this_rq->curr;
1195 struct task_group *tg;
1194 unsigned long tl = this_load; 1196 unsigned long tl = this_load;
1195 unsigned long tl_per_task; 1197 unsigned long tl_per_task;
1196 struct task_group *tg;
1197 unsigned long weight; 1198 unsigned long weight;
1198 int balanced; 1199 int balanced;
1199 1200
1200 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) 1201 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
1201 return 0; 1202 return 0;
1202 1203
1204 if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1205 p->se.avg_overlap > sysctl_sched_migration_cost))
1206 sync = 0;
1207
1203 /* 1208 /*
1204 * If sync wakeup then subtract the (maximum possible) 1209 * If sync wakeup then subtract the (maximum possible)
1205 * effect of the currently running task from the load 1210 * effect of the currently running task from the load
@@ -1426,7 +1431,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1426 if (!sched_feat(WAKEUP_PREEMPT)) 1431 if (!sched_feat(WAKEUP_PREEMPT))
1427 return; 1432 return;
1428 1433
1429 if (sched_feat(WAKEUP_OVERLAP) && sync) { 1434 if (sched_feat(WAKEUP_OVERLAP) && (sync ||
1435 (se->avg_overlap < sysctl_sched_migration_cost &&
1436 pse->avg_overlap < sysctl_sched_migration_cost))) {
1430 resched_task(curr); 1437 resched_task(curr);
1431 return; 1438 return;
1432 } 1439 }
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 8ab0cef8ecab..a8f93dd374e1 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -296,19 +296,21 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
296static inline void account_group_user_time(struct task_struct *tsk, 296static inline void account_group_user_time(struct task_struct *tsk,
297 cputime_t cputime) 297 cputime_t cputime)
298{ 298{
299 struct task_cputime *times; 299 struct thread_group_cputimer *cputimer;
300 struct signal_struct *sig;
301 300
302 /* tsk == current, ensure it is safe to use ->signal */ 301 /* tsk == current, ensure it is safe to use ->signal */
303 if (unlikely(tsk->exit_state)) 302 if (unlikely(tsk->exit_state))
304 return; 303 return;
305 304
306 sig = tsk->signal; 305 cputimer = &tsk->signal->cputimer;
307 times = &sig->cputime.totals;
308 306
309 spin_lock(&times->lock); 307 if (!cputimer->running)
310 times->utime = cputime_add(times->utime, cputime); 308 return;
311 spin_unlock(&times->lock); 309
310 spin_lock(&cputimer->lock);
311 cputimer->cputime.utime =
312 cputime_add(cputimer->cputime.utime, cputime);
313 spin_unlock(&cputimer->lock);
312} 314}
313 315
314/** 316/**
@@ -324,19 +326,21 @@ static inline void account_group_user_time(struct task_struct *tsk,
324static inline void account_group_system_time(struct task_struct *tsk, 326static inline void account_group_system_time(struct task_struct *tsk,
325 cputime_t cputime) 327 cputime_t cputime)
326{ 328{
327 struct task_cputime *times; 329 struct thread_group_cputimer *cputimer;
328 struct signal_struct *sig;
329 330
330 /* tsk == current, ensure it is safe to use ->signal */ 331 /* tsk == current, ensure it is safe to use ->signal */
331 if (unlikely(tsk->exit_state)) 332 if (unlikely(tsk->exit_state))
332 return; 333 return;
333 334
334 sig = tsk->signal; 335 cputimer = &tsk->signal->cputimer;
335 times = &sig->cputime.totals; 336
337 if (!cputimer->running)
338 return;
336 339
337 spin_lock(&times->lock); 340 spin_lock(&cputimer->lock);
338 times->stime = cputime_add(times->stime, cputime); 341 cputimer->cputime.stime =
339 spin_unlock(&times->lock); 342 cputime_add(cputimer->cputime.stime, cputime);
343 spin_unlock(&cputimer->lock);
340} 344}
341 345
342/** 346/**
@@ -352,7 +356,7 @@ static inline void account_group_system_time(struct task_struct *tsk,
352static inline void account_group_exec_runtime(struct task_struct *tsk, 356static inline void account_group_exec_runtime(struct task_struct *tsk,
353 unsigned long long ns) 357 unsigned long long ns)
354{ 358{
355 struct task_cputime *times; 359 struct thread_group_cputimer *cputimer;
356 struct signal_struct *sig; 360 struct signal_struct *sig;
357 361
358 sig = tsk->signal; 362 sig = tsk->signal;
@@ -361,9 +365,12 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,
361 if (unlikely(!sig)) 365 if (unlikely(!sig))
362 return; 366 return;
363 367
364 times = &sig->cputime.totals; 368 cputimer = &sig->cputimer;
369
370 if (!cputimer->running)
371 return;
365 372
366 spin_lock(&times->lock); 373 spin_lock(&cputimer->lock);
367 times->sum_exec_runtime += ns; 374 cputimer->cputime.sum_exec_runtime += ns;
368 spin_unlock(&times->lock); 375 spin_unlock(&cputimer->lock);
369} 376}
diff --git a/kernel/signal.c b/kernel/signal.c
index b6b36768b758..2a74fe87c0dd 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1367,7 +1367,6 @@ int do_notify_parent(struct task_struct *tsk, int sig)
1367 struct siginfo info; 1367 struct siginfo info;
1368 unsigned long flags; 1368 unsigned long flags;
1369 struct sighand_struct *psig; 1369 struct sighand_struct *psig;
1370 struct task_cputime cputime;
1371 int ret = sig; 1370 int ret = sig;
1372 1371
1373 BUG_ON(sig == -1); 1372 BUG_ON(sig == -1);
@@ -1397,9 +1396,10 @@ int do_notify_parent(struct task_struct *tsk, int sig)
1397 info.si_uid = __task_cred(tsk)->uid; 1396 info.si_uid = __task_cred(tsk)->uid;
1398 rcu_read_unlock(); 1397 rcu_read_unlock();
1399 1398
1400 thread_group_cputime(tsk, &cputime); 1399 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1401 info.si_utime = cputime_to_jiffies(cputime.utime); 1400 tsk->signal->utime));
1402 info.si_stime = cputime_to_jiffies(cputime.stime); 1401 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1402 tsk->signal->stime));
1403 1403
1404 info.si_status = tsk->exit_code & 0x7f; 1404 info.si_status = tsk->exit_code & 0x7f;
1405 if (tsk->exit_code & 0x80) 1405 if (tsk->exit_code & 0x80)
diff --git a/kernel/sys.c b/kernel/sys.c
index e7dc0e10a485..f145c415bc16 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1525,22 +1525,14 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1525 return -EINVAL; 1525 return -EINVAL;
1526 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1526 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1527 return -EFAULT; 1527 return -EFAULT;
1528 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1529 return -EINVAL;
1528 old_rlim = current->signal->rlim + resource; 1530 old_rlim = current->signal->rlim + resource;
1529 if ((new_rlim.rlim_max > old_rlim->rlim_max) && 1531 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1530 !capable(CAP_SYS_RESOURCE)) 1532 !capable(CAP_SYS_RESOURCE))
1531 return -EPERM; 1533 return -EPERM;
1532 1534 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
1533 if (resource == RLIMIT_NOFILE) { 1535 return -EPERM;
1534 if (new_rlim.rlim_max == RLIM_INFINITY)
1535 new_rlim.rlim_max = sysctl_nr_open;
1536 if (new_rlim.rlim_cur == RLIM_INFINITY)
1537 new_rlim.rlim_cur = sysctl_nr_open;
1538 if (new_rlim.rlim_max > sysctl_nr_open)
1539 return -EPERM;
1540 }
1541
1542 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1543 return -EINVAL;
1544 1536
1545 retval = security_task_setrlimit(resource, &new_rlim); 1537 retval = security_task_setrlimit(resource, &new_rlim);
1546 if (retval) 1538 if (retval)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 790f9d785663..c5ef44ff850f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -101,6 +101,7 @@ static int two = 2;
101 101
102static int zero; 102static int zero;
103static int one = 1; 103static int one = 1;
104static unsigned long one_ul = 1;
104static int one_hundred = 100; 105static int one_hundred = 100;
105 106
106/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ 107/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
@@ -974,7 +975,7 @@ static struct ctl_table vm_table[] = {
974 .mode = 0644, 975 .mode = 0644,
975 .proc_handler = &dirty_background_bytes_handler, 976 .proc_handler = &dirty_background_bytes_handler,
976 .strategy = &sysctl_intvec, 977 .strategy = &sysctl_intvec,
977 .extra1 = &one, 978 .extra1 = &one_ul,
978 }, 979 },
979 { 980 {
980 .ctl_name = VM_DIRTY_RATIO, 981 .ctl_name = VM_DIRTY_RATIO,
@@ -995,7 +996,7 @@ static struct ctl_table vm_table[] = {
995 .mode = 0644, 996 .mode = 0644,
996 .proc_handler = &dirty_bytes_handler, 997 .proc_handler = &dirty_bytes_handler,
997 .strategy = &sysctl_intvec, 998 .strategy = &sysctl_intvec,
998 .extra1 = &one, 999 .extra1 = &one_ul,
999 }, 1000 },
1000 { 1001 {
1001 .procname = "dirty_writeback_centisecs", 1002 .procname = "dirty_writeback_centisecs",
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 7dcf6e9f2b04..9a236ffe2aa4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1737,9 +1737,12 @@ static void clear_ftrace_pid(struct pid *pid)
1737{ 1737{
1738 struct task_struct *p; 1738 struct task_struct *p;
1739 1739
1740 rcu_read_lock();
1740 do_each_pid_task(pid, PIDTYPE_PID, p) { 1741 do_each_pid_task(pid, PIDTYPE_PID, p) {
1741 clear_tsk_trace_trace(p); 1742 clear_tsk_trace_trace(p);
1742 } while_each_pid_task(pid, PIDTYPE_PID, p); 1743 } while_each_pid_task(pid, PIDTYPE_PID, p);
1744 rcu_read_unlock();
1745
1743 put_pid(pid); 1746 put_pid(pid);
1744} 1747}
1745 1748
@@ -1747,9 +1750,11 @@ static void set_ftrace_pid(struct pid *pid)
1747{ 1750{
1748 struct task_struct *p; 1751 struct task_struct *p;
1749 1752
1753 rcu_read_lock();
1750 do_each_pid_task(pid, PIDTYPE_PID, p) { 1754 do_each_pid_task(pid, PIDTYPE_PID, p) {
1751 set_tsk_trace_trace(p); 1755 set_tsk_trace_trace(p);
1752 } while_each_pid_task(pid, PIDTYPE_PID, p); 1756 } while_each_pid_task(pid, PIDTYPE_PID, p);
1757 rcu_read_unlock();
1753} 1758}
1754 1759
1755static void clear_ftrace_pid_task(struct pid **pid) 1760static void clear_ftrace_pid_task(struct pid **pid)
diff --git a/kernel/wait.c b/kernel/wait.c
index cd87131f2fc2..42a2dbc181c8 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -91,6 +91,15 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
91} 91}
92EXPORT_SYMBOL(prepare_to_wait_exclusive); 92EXPORT_SYMBOL(prepare_to_wait_exclusive);
93 93
94/*
95 * finish_wait - clean up after waiting in a queue
96 * @q: waitqueue waited on
97 * @wait: wait descriptor
98 *
99 * Sets current thread back to running state and removes
100 * the wait descriptor from the given waitqueue if still
101 * queued.
102 */
94void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) 103void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
95{ 104{
96 unsigned long flags; 105 unsigned long flags;
@@ -117,6 +126,39 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
117} 126}
118EXPORT_SYMBOL(finish_wait); 127EXPORT_SYMBOL(finish_wait);
119 128
129/*
130 * abort_exclusive_wait - abort exclusive waiting in a queue
131 * @q: waitqueue waited on
132 * @wait: wait descriptor
133 * @state: runstate of the waiter to be woken
134 * @key: key to identify a wait bit queue or %NULL
135 *
136 * Sets current thread back to running state and removes
137 * the wait descriptor from the given waitqueue if still
138 * queued.
139 *
140 * Wakes up the next waiter if the caller is concurrently
141 * woken up through the queue.
142 *
143 * This prevents waiter starvation where an exclusive waiter
144 * aborts and is woken up concurrently and noone wakes up
145 * the next waiter.
146 */
147void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
148 unsigned int mode, void *key)
149{
150 unsigned long flags;
151
152 __set_current_state(TASK_RUNNING);
153 spin_lock_irqsave(&q->lock, flags);
154 if (!list_empty(&wait->task_list))
155 list_del_init(&wait->task_list);
156 else if (waitqueue_active(q))
157 __wake_up_common(q, mode, 1, 0, key);
158 spin_unlock_irqrestore(&q->lock, flags);
159}
160EXPORT_SYMBOL(abort_exclusive_wait);
161
120int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) 162int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
121{ 163{
122 int ret = default_wake_function(wait, mode, sync, key); 164 int ret = default_wake_function(wait, mode, sync, key);
@@ -177,17 +219,20 @@ int __sched
177__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, 219__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
178 int (*action)(void *), unsigned mode) 220 int (*action)(void *), unsigned mode)
179{ 221{
180 int ret = 0;
181
182 do { 222 do {
223 int ret;
224
183 prepare_to_wait_exclusive(wq, &q->wait, mode); 225 prepare_to_wait_exclusive(wq, &q->wait, mode);
184 if (test_bit(q->key.bit_nr, q->key.flags)) { 226 if (!test_bit(q->key.bit_nr, q->key.flags))
185 if ((ret = (*action)(q->key.flags))) 227 continue;
186 break; 228 ret = action(q->key.flags);
187 } 229 if (!ret)
230 continue;
231 abort_exclusive_wait(wq, &q->wait, mode, &q->key);
232 return ret;
188 } while (test_and_set_bit(q->key.bit_nr, q->key.flags)); 233 } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
189 finish_wait(wq, &q->wait); 234 finish_wait(wq, &q->wait);
190 return ret; 235 return 0;
191} 236}
192EXPORT_SYMBOL(__wait_on_bit_lock); 237EXPORT_SYMBOL(__wait_on_bit_lock);
193 238