diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/async.c | 94 | ||||
-rw-r--r-- | kernel/fork.c | 1 | ||||
-rw-r--r-- | kernel/power/main.c | 26 | ||||
-rw-r--r-- | kernel/profile.c | 3 | ||||
-rw-r--r-- | kernel/sched.c | 4 | ||||
-rw-r--r-- | kernel/sys.c | 16 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 5 | ||||
-rw-r--r-- | kernel/wait.c | 59 |
8 files changed, 157 insertions, 51 deletions
diff --git a/kernel/async.c b/kernel/async.c index 608b32b42812..f565891f2c9b 100644 --- a/kernel/async.c +++ b/kernel/async.c | |||
@@ -54,6 +54,7 @@ asynchronous and synchronous parts of the kernel. | |||
54 | #include <linux/sched.h> | 54 | #include <linux/sched.h> |
55 | #include <linux/init.h> | 55 | #include <linux/init.h> |
56 | #include <linux/kthread.h> | 56 | #include <linux/kthread.h> |
57 | #include <linux/delay.h> | ||
57 | #include <asm/atomic.h> | 58 | #include <asm/atomic.h> |
58 | 59 | ||
59 | static async_cookie_t next_cookie = 1; | 60 | static async_cookie_t next_cookie = 1; |
@@ -132,21 +133,23 @@ static void run_one_entry(void) | |||
132 | entry = list_first_entry(&async_pending, struct async_entry, list); | 133 | entry = list_first_entry(&async_pending, struct async_entry, list); |
133 | 134 | ||
134 | /* 2) move it to the running queue */ | 135 | /* 2) move it to the running queue */ |
135 | list_del(&entry->list); | 136 | list_move_tail(&entry->list, entry->running); |
136 | list_add_tail(&entry->list, &async_running); | ||
137 | spin_unlock_irqrestore(&async_lock, flags); | 137 | spin_unlock_irqrestore(&async_lock, flags); |
138 | 138 | ||
139 | /* 3) run it (and print duration)*/ | 139 | /* 3) run it (and print duration)*/ |
140 | if (initcall_debug && system_state == SYSTEM_BOOTING) { | 140 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
141 | printk("calling %lli_%pF @ %i\n", entry->cookie, entry->func, task_pid_nr(current)); | 141 | printk("calling %lli_%pF @ %i\n", (long long)entry->cookie, |
142 | entry->func, task_pid_nr(current)); | ||
142 | calltime = ktime_get(); | 143 | calltime = ktime_get(); |
143 | } | 144 | } |
144 | entry->func(entry->data, entry->cookie); | 145 | entry->func(entry->data, entry->cookie); |
145 | if (initcall_debug && system_state == SYSTEM_BOOTING) { | 146 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
146 | rettime = ktime_get(); | 147 | rettime = ktime_get(); |
147 | delta = ktime_sub(rettime, calltime); | 148 | delta = ktime_sub(rettime, calltime); |
148 | printk("initcall %lli_%pF returned 0 after %lld usecs\n", entry->cookie, | 149 | printk("initcall %lli_%pF returned 0 after %lld usecs\n", |
149 | entry->func, ktime_to_ns(delta) >> 10); | 150 | (long long)entry->cookie, |
151 | entry->func, | ||
152 | (long long)ktime_to_ns(delta) >> 10); | ||
150 | } | 153 | } |
151 | 154 | ||
152 | /* 4) remove it from the running queue */ | 155 | /* 4) remove it from the running queue */ |
@@ -205,18 +208,44 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l | |||
205 | return newcookie; | 208 | return newcookie; |
206 | } | 209 | } |
207 | 210 | ||
211 | /** | ||
212 | * async_schedule - schedule a function for asynchronous execution | ||
213 | * @ptr: function to execute asynchronously | ||
214 | * @data: data pointer to pass to the function | ||
215 | * | ||
216 | * Returns an async_cookie_t that may be used for checkpointing later. | ||
217 | * Note: This function may be called from atomic or non-atomic contexts. | ||
218 | */ | ||
208 | async_cookie_t async_schedule(async_func_ptr *ptr, void *data) | 219 | async_cookie_t async_schedule(async_func_ptr *ptr, void *data) |
209 | { | 220 | { |
210 | return __async_schedule(ptr, data, &async_pending); | 221 | return __async_schedule(ptr, data, &async_running); |
211 | } | 222 | } |
212 | EXPORT_SYMBOL_GPL(async_schedule); | 223 | EXPORT_SYMBOL_GPL(async_schedule); |
213 | 224 | ||
214 | async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *running) | 225 | /** |
226 | * async_schedule_domain - schedule a function for asynchronous execution within a certain domain | ||
227 | * @ptr: function to execute asynchronously | ||
228 | * @data: data pointer to pass to the function | ||
229 | * @running: running list for the domain | ||
230 | * | ||
231 | * Returns an async_cookie_t that may be used for checkpointing later. | ||
232 | * @running may be used in the async_synchronize_*_domain() functions | ||
233 | * to wait within a certain synchronization domain rather than globally. | ||
234 | * A synchronization domain is specified via the running queue @running to use. | ||
235 | * Note: This function may be called from atomic or non-atomic contexts. | ||
236 | */ | ||
237 | async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, | ||
238 | struct list_head *running) | ||
215 | { | 239 | { |
216 | return __async_schedule(ptr, data, running); | 240 | return __async_schedule(ptr, data, running); |
217 | } | 241 | } |
218 | EXPORT_SYMBOL_GPL(async_schedule_special); | 242 | EXPORT_SYMBOL_GPL(async_schedule_domain); |
219 | 243 | ||
244 | /** | ||
245 | * async_synchronize_full - synchronize all asynchronous function calls | ||
246 | * | ||
247 | * This function waits until all asynchronous function calls have been done. | ||
248 | */ | ||
220 | void async_synchronize_full(void) | 249 | void async_synchronize_full(void) |
221 | { | 250 | { |
222 | do { | 251 | do { |
@@ -225,13 +254,30 @@ void async_synchronize_full(void) | |||
225 | } | 254 | } |
226 | EXPORT_SYMBOL_GPL(async_synchronize_full); | 255 | EXPORT_SYMBOL_GPL(async_synchronize_full); |
227 | 256 | ||
228 | void async_synchronize_full_special(struct list_head *list) | 257 | /** |
258 | * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain | ||
259 | * @list: running list to synchronize on | ||
260 | * | ||
261 | * This function waits until all asynchronous function calls for the | ||
262 | * synchronization domain specified by the running list @list have been done. | ||
263 | */ | ||
264 | void async_synchronize_full_domain(struct list_head *list) | ||
229 | { | 265 | { |
230 | async_synchronize_cookie_special(next_cookie, list); | 266 | async_synchronize_cookie_domain(next_cookie, list); |
231 | } | 267 | } |
232 | EXPORT_SYMBOL_GPL(async_synchronize_full_special); | 268 | EXPORT_SYMBOL_GPL(async_synchronize_full_domain); |
233 | 269 | ||
234 | void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *running) | 270 | /** |
271 | * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing | ||
272 | * @cookie: async_cookie_t to use as checkpoint | ||
273 | * @running: running list to synchronize on | ||
274 | * | ||
275 | * This function waits until all asynchronous function calls for the | ||
276 | * synchronization domain specified by the running list @list submitted | ||
277 | * prior to @cookie have been done. | ||
278 | */ | ||
279 | void async_synchronize_cookie_domain(async_cookie_t cookie, | ||
280 | struct list_head *running) | ||
235 | { | 281 | { |
236 | ktime_t starttime, delta, endtime; | 282 | ktime_t starttime, delta, endtime; |
237 | 283 | ||
@@ -247,14 +293,22 @@ void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *r | |||
247 | delta = ktime_sub(endtime, starttime); | 293 | delta = ktime_sub(endtime, starttime); |
248 | 294 | ||
249 | printk("async_continuing @ %i after %lli usec\n", | 295 | printk("async_continuing @ %i after %lli usec\n", |
250 | task_pid_nr(current), ktime_to_ns(delta) >> 10); | 296 | task_pid_nr(current), |
297 | (long long)ktime_to_ns(delta) >> 10); | ||
251 | } | 298 | } |
252 | } | 299 | } |
253 | EXPORT_SYMBOL_GPL(async_synchronize_cookie_special); | 300 | EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain); |
254 | 301 | ||
302 | /** | ||
303 | * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing | ||
304 | * @cookie: async_cookie_t to use as checkpoint | ||
305 | * | ||
306 | * This function waits until all asynchronous function calls prior to @cookie | ||
307 | * have been done. | ||
308 | */ | ||
255 | void async_synchronize_cookie(async_cookie_t cookie) | 309 | void async_synchronize_cookie(async_cookie_t cookie) |
256 | { | 310 | { |
257 | async_synchronize_cookie_special(cookie, &async_running); | 311 | async_synchronize_cookie_domain(cookie, &async_running); |
258 | } | 312 | } |
259 | EXPORT_SYMBOL_GPL(async_synchronize_cookie); | 313 | EXPORT_SYMBOL_GPL(async_synchronize_cookie); |
260 | 314 | ||
@@ -315,7 +369,11 @@ static int async_manager_thread(void *unused) | |||
315 | ec = atomic_read(&entry_count); | 369 | ec = atomic_read(&entry_count); |
316 | 370 | ||
317 | while (tc < ec && tc < MAX_THREADS) { | 371 | while (tc < ec && tc < MAX_THREADS) { |
318 | kthread_run(async_thread, NULL, "async/%i", tc); | 372 | if (IS_ERR(kthread_run(async_thread, NULL, "async/%i", |
373 | tc))) { | ||
374 | msleep(100); | ||
375 | continue; | ||
376 | } | ||
319 | atomic_inc(&thread_count); | 377 | atomic_inc(&thread_count); |
320 | tc++; | 378 | tc++; |
321 | } | 379 | } |
@@ -330,7 +388,9 @@ static int async_manager_thread(void *unused) | |||
330 | static int __init async_init(void) | 388 | static int __init async_init(void) |
331 | { | 389 | { |
332 | if (async_enabled) | 390 | if (async_enabled) |
333 | kthread_run(async_manager_thread, NULL, "async/mgr"); | 391 | if (IS_ERR(kthread_run(async_manager_thread, NULL, |
392 | "async/mgr"))) | ||
393 | async_enabled = 0; | ||
334 | return 0; | 394 | return 0; |
335 | } | 395 | } |
336 | 396 | ||
diff --git a/kernel/fork.c b/kernel/fork.c index 43c039d55e95..d624d50f7729 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1005,6 +1005,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1005 | * triggers too late. This doesn't hurt, the check is only there | 1005 | * triggers too late. This doesn't hurt, the check is only there |
1006 | * to stop root fork bombs. | 1006 | * to stop root fork bombs. |
1007 | */ | 1007 | */ |
1008 | retval = -EAGAIN; | ||
1008 | if (nr_threads >= max_threads) | 1009 | if (nr_threads >= max_threads) |
1009 | goto bad_fork_cleanup_count; | 1010 | goto bad_fork_cleanup_count; |
1010 | 1011 | ||
diff --git a/kernel/power/main.c b/kernel/power/main.c index 239988873971..b4d219016b6c 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -57,16 +57,6 @@ int pm_notifier_call_chain(unsigned long val) | |||
57 | #ifdef CONFIG_PM_DEBUG | 57 | #ifdef CONFIG_PM_DEBUG |
58 | int pm_test_level = TEST_NONE; | 58 | int pm_test_level = TEST_NONE; |
59 | 59 | ||
60 | static int suspend_test(int level) | ||
61 | { | ||
62 | if (pm_test_level == level) { | ||
63 | printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n"); | ||
64 | mdelay(5000); | ||
65 | return 1; | ||
66 | } | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static const char * const pm_tests[__TEST_AFTER_LAST] = { | 60 | static const char * const pm_tests[__TEST_AFTER_LAST] = { |
71 | [TEST_NONE] = "none", | 61 | [TEST_NONE] = "none", |
72 | [TEST_CORE] = "core", | 62 | [TEST_CORE] = "core", |
@@ -125,14 +115,24 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
125 | } | 115 | } |
126 | 116 | ||
127 | power_attr(pm_test); | 117 | power_attr(pm_test); |
128 | #else /* !CONFIG_PM_DEBUG */ | 118 | #endif /* CONFIG_PM_DEBUG */ |
129 | static inline int suspend_test(int level) { return 0; } | ||
130 | #endif /* !CONFIG_PM_DEBUG */ | ||
131 | 119 | ||
132 | #endif /* CONFIG_PM_SLEEP */ | 120 | #endif /* CONFIG_PM_SLEEP */ |
133 | 121 | ||
134 | #ifdef CONFIG_SUSPEND | 122 | #ifdef CONFIG_SUSPEND |
135 | 123 | ||
124 | static int suspend_test(int level) | ||
125 | { | ||
126 | #ifdef CONFIG_PM_DEBUG | ||
127 | if (pm_test_level == level) { | ||
128 | printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n"); | ||
129 | mdelay(5000); | ||
130 | return 1; | ||
131 | } | ||
132 | #endif /* !CONFIG_PM_DEBUG */ | ||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | #ifdef CONFIG_PM_TEST_SUSPEND | 136 | #ifdef CONFIG_PM_TEST_SUSPEND |
137 | 137 | ||
138 | /* | 138 | /* |
diff --git a/kernel/profile.c b/kernel/profile.c index 784933acf5b8..7724e0409bae 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -114,12 +114,15 @@ int __ref profile_init(void) | |||
114 | if (!slab_is_available()) { | 114 | if (!slab_is_available()) { |
115 | prof_buffer = alloc_bootmem(buffer_bytes); | 115 | prof_buffer = alloc_bootmem(buffer_bytes); |
116 | alloc_bootmem_cpumask_var(&prof_cpu_mask); | 116 | alloc_bootmem_cpumask_var(&prof_cpu_mask); |
117 | cpumask_copy(prof_cpu_mask, cpu_possible_mask); | ||
117 | return 0; | 118 | return 0; |
118 | } | 119 | } |
119 | 120 | ||
120 | if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) | 121 | if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) |
121 | return -ENOMEM; | 122 | return -ENOMEM; |
122 | 123 | ||
124 | cpumask_copy(prof_cpu_mask, cpu_possible_mask); | ||
125 | |||
123 | prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL); | 126 | prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL); |
124 | if (prof_buffer) | 127 | if (prof_buffer) |
125 | return 0; | 128 | return 0; |
diff --git a/kernel/sched.c b/kernel/sched.c index 242d0d47a70d..8ee437a5ec1d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4697,8 +4697,8 @@ EXPORT_SYMBOL(default_wake_function); | |||
4697 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns | 4697 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns |
4698 | * zero in this (rare) case, and we handle it by continuing to scan the queue. | 4698 | * zero in this (rare) case, and we handle it by continuing to scan the queue. |
4699 | */ | 4699 | */ |
4700 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | 4700 | void __wake_up_common(wait_queue_head_t *q, unsigned int mode, |
4701 | int nr_exclusive, int sync, void *key) | 4701 | int nr_exclusive, int sync, void *key) |
4702 | { | 4702 | { |
4703 | wait_queue_t *curr, *next; | 4703 | wait_queue_t *curr, *next; |
4704 | 4704 | ||
diff --git a/kernel/sys.c b/kernel/sys.c index e7dc0e10a485..f145c415bc16 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1525,22 +1525,14 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) | |||
1525 | return -EINVAL; | 1525 | return -EINVAL; |
1526 | if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) | 1526 | if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) |
1527 | return -EFAULT; | 1527 | return -EFAULT; |
1528 | if (new_rlim.rlim_cur > new_rlim.rlim_max) | ||
1529 | return -EINVAL; | ||
1528 | old_rlim = current->signal->rlim + resource; | 1530 | old_rlim = current->signal->rlim + resource; |
1529 | if ((new_rlim.rlim_max > old_rlim->rlim_max) && | 1531 | if ((new_rlim.rlim_max > old_rlim->rlim_max) && |
1530 | !capable(CAP_SYS_RESOURCE)) | 1532 | !capable(CAP_SYS_RESOURCE)) |
1531 | return -EPERM; | 1533 | return -EPERM; |
1532 | 1534 | if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open) | |
1533 | if (resource == RLIMIT_NOFILE) { | 1535 | return -EPERM; |
1534 | if (new_rlim.rlim_max == RLIM_INFINITY) | ||
1535 | new_rlim.rlim_max = sysctl_nr_open; | ||
1536 | if (new_rlim.rlim_cur == RLIM_INFINITY) | ||
1537 | new_rlim.rlim_cur = sysctl_nr_open; | ||
1538 | if (new_rlim.rlim_max > sysctl_nr_open) | ||
1539 | return -EPERM; | ||
1540 | } | ||
1541 | |||
1542 | if (new_rlim.rlim_cur > new_rlim.rlim_max) | ||
1543 | return -EINVAL; | ||
1544 | 1536 | ||
1545 | retval = security_task_setrlimit(resource, &new_rlim); | 1537 | retval = security_task_setrlimit(resource, &new_rlim); |
1546 | if (retval) | 1538 | if (retval) |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 7dcf6e9f2b04..9a236ffe2aa4 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1737,9 +1737,12 @@ static void clear_ftrace_pid(struct pid *pid) | |||
1737 | { | 1737 | { |
1738 | struct task_struct *p; | 1738 | struct task_struct *p; |
1739 | 1739 | ||
1740 | rcu_read_lock(); | ||
1740 | do_each_pid_task(pid, PIDTYPE_PID, p) { | 1741 | do_each_pid_task(pid, PIDTYPE_PID, p) { |
1741 | clear_tsk_trace_trace(p); | 1742 | clear_tsk_trace_trace(p); |
1742 | } while_each_pid_task(pid, PIDTYPE_PID, p); | 1743 | } while_each_pid_task(pid, PIDTYPE_PID, p); |
1744 | rcu_read_unlock(); | ||
1745 | |||
1743 | put_pid(pid); | 1746 | put_pid(pid); |
1744 | } | 1747 | } |
1745 | 1748 | ||
@@ -1747,9 +1750,11 @@ static void set_ftrace_pid(struct pid *pid) | |||
1747 | { | 1750 | { |
1748 | struct task_struct *p; | 1751 | struct task_struct *p; |
1749 | 1752 | ||
1753 | rcu_read_lock(); | ||
1750 | do_each_pid_task(pid, PIDTYPE_PID, p) { | 1754 | do_each_pid_task(pid, PIDTYPE_PID, p) { |
1751 | set_tsk_trace_trace(p); | 1755 | set_tsk_trace_trace(p); |
1752 | } while_each_pid_task(pid, PIDTYPE_PID, p); | 1756 | } while_each_pid_task(pid, PIDTYPE_PID, p); |
1757 | rcu_read_unlock(); | ||
1753 | } | 1758 | } |
1754 | 1759 | ||
1755 | static void clear_ftrace_pid_task(struct pid **pid) | 1760 | static void clear_ftrace_pid_task(struct pid **pid) |
diff --git a/kernel/wait.c b/kernel/wait.c index cd87131f2fc2..42a2dbc181c8 100644 --- a/kernel/wait.c +++ b/kernel/wait.c | |||
@@ -91,6 +91,15 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) | |||
91 | } | 91 | } |
92 | EXPORT_SYMBOL(prepare_to_wait_exclusive); | 92 | EXPORT_SYMBOL(prepare_to_wait_exclusive); |
93 | 93 | ||
94 | /* | ||
95 | * finish_wait - clean up after waiting in a queue | ||
96 | * @q: waitqueue waited on | ||
97 | * @wait: wait descriptor | ||
98 | * | ||
99 | * Sets current thread back to running state and removes | ||
100 | * the wait descriptor from the given waitqueue if still | ||
101 | * queued. | ||
102 | */ | ||
94 | void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) | 103 | void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) |
95 | { | 104 | { |
96 | unsigned long flags; | 105 | unsigned long flags; |
@@ -117,6 +126,39 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) | |||
117 | } | 126 | } |
118 | EXPORT_SYMBOL(finish_wait); | 127 | EXPORT_SYMBOL(finish_wait); |
119 | 128 | ||
129 | /* | ||
130 | * abort_exclusive_wait - abort exclusive waiting in a queue | ||
131 | * @q: waitqueue waited on | ||
132 | * @wait: wait descriptor | ||
133 | * @state: runstate of the waiter to be woken | ||
134 | * @key: key to identify a wait bit queue or %NULL | ||
135 | * | ||
136 | * Sets current thread back to running state and removes | ||
137 | * the wait descriptor from the given waitqueue if still | ||
138 | * queued. | ||
139 | * | ||
140 | * Wakes up the next waiter if the caller is concurrently | ||
141 | * woken up through the queue. | ||
142 | * | ||
143 | * This prevents waiter starvation where an exclusive waiter | ||
144 | * aborts and is woken up concurrently and noone wakes up | ||
145 | * the next waiter. | ||
146 | */ | ||
147 | void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, | ||
148 | unsigned int mode, void *key) | ||
149 | { | ||
150 | unsigned long flags; | ||
151 | |||
152 | __set_current_state(TASK_RUNNING); | ||
153 | spin_lock_irqsave(&q->lock, flags); | ||
154 | if (!list_empty(&wait->task_list)) | ||
155 | list_del_init(&wait->task_list); | ||
156 | else if (waitqueue_active(q)) | ||
157 | __wake_up_common(q, mode, 1, 0, key); | ||
158 | spin_unlock_irqrestore(&q->lock, flags); | ||
159 | } | ||
160 | EXPORT_SYMBOL(abort_exclusive_wait); | ||
161 | |||
120 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) | 162 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) |
121 | { | 163 | { |
122 | int ret = default_wake_function(wait, mode, sync, key); | 164 | int ret = default_wake_function(wait, mode, sync, key); |
@@ -177,17 +219,20 @@ int __sched | |||
177 | __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, | 219 | __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, |
178 | int (*action)(void *), unsigned mode) | 220 | int (*action)(void *), unsigned mode) |
179 | { | 221 | { |
180 | int ret = 0; | ||
181 | |||
182 | do { | 222 | do { |
223 | int ret; | ||
224 | |||
183 | prepare_to_wait_exclusive(wq, &q->wait, mode); | 225 | prepare_to_wait_exclusive(wq, &q->wait, mode); |
184 | if (test_bit(q->key.bit_nr, q->key.flags)) { | 226 | if (!test_bit(q->key.bit_nr, q->key.flags)) |
185 | if ((ret = (*action)(q->key.flags))) | 227 | continue; |
186 | break; | 228 | ret = action(q->key.flags); |
187 | } | 229 | if (!ret) |
230 | continue; | ||
231 | abort_exclusive_wait(wq, &q->wait, mode, &q->key); | ||
232 | return ret; | ||
188 | } while (test_and_set_bit(q->key.bit_nr, q->key.flags)); | 233 | } while (test_and_set_bit(q->key.bit_nr, q->key.flags)); |
189 | finish_wait(wq, &q->wait); | 234 | finish_wait(wq, &q->wait); |
190 | return ret; | 235 | return 0; |
191 | } | 236 | } |
192 | EXPORT_SYMBOL(__wait_on_bit_lock); | 237 | EXPORT_SYMBOL(__wait_on_bit_lock); |
193 | 238 | ||