aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/async.c3
-rw-r--r--kernel/module.c27
-rw-r--r--kernel/trace/trace.c15
3 files changed, 39 insertions, 6 deletions
diff --git a/kernel/async.c b/kernel/async.c
index 9d3118384858..a1d585c351d6 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -196,6 +196,9 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a
196 atomic_inc(&entry_count); 196 atomic_inc(&entry_count);
197 spin_unlock_irqrestore(&async_lock, flags); 197 spin_unlock_irqrestore(&async_lock, flags);
198 198
199 /* mark that this task has queued an async job, used by module init */
200 current->flags |= PF_USED_ASYNC;
201
199 /* schedule for execution */ 202 /* schedule for execution */
200 queue_work(system_unbound_wq, &entry->work); 203 queue_work(system_unbound_wq, &entry->work);
201 204
diff --git a/kernel/module.c b/kernel/module.c
index 250092c1d57d..b10b048367e1 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3013,6 +3013,12 @@ static int do_init_module(struct module *mod)
3013{ 3013{
3014 int ret = 0; 3014 int ret = 0;
3015 3015
3016 /*
3017 * We want to find out whether @mod uses async during init. Clear
3018 * PF_USED_ASYNC. async_schedule*() will set it.
3019 */
3020 current->flags &= ~PF_USED_ASYNC;
3021
3016 blocking_notifier_call_chain(&module_notify_list, 3022 blocking_notifier_call_chain(&module_notify_list,
3017 MODULE_STATE_COMING, mod); 3023 MODULE_STATE_COMING, mod);
3018 3024
@@ -3058,8 +3064,25 @@ static int do_init_module(struct module *mod)
3058 blocking_notifier_call_chain(&module_notify_list, 3064 blocking_notifier_call_chain(&module_notify_list,
3059 MODULE_STATE_LIVE, mod); 3065 MODULE_STATE_LIVE, mod);
3060 3066
3061 /* We need to finish all async code before the module init sequence is done */ 3067 /*
3062 async_synchronize_full(); 3068 * We need to finish all async code before the module init sequence
3069 * is done. This has potential to deadlock. For example, a newly
3070 * detected block device can trigger request_module() of the
3071 * default iosched from async probing task. Once userland helper
3072 * reaches here, async_synchronize_full() will wait on the async
3073 * task waiting on request_module() and deadlock.
3074 *
3075 * This deadlock is avoided by perfomring async_synchronize_full()
3076 * iff module init queued any async jobs. This isn't a full
3077 * solution as it will deadlock the same if module loading from
3078 * async jobs nests more than once; however, due to the various
3079 * constraints, this hack seems to be the best option for now.
3080 * Please refer to the following thread for details.
3081 *
3082 * http://thread.gmane.org/gmane.linux.kernel/1420814
3083 */
3084 if (current->flags & PF_USED_ASYNC)
3085 async_synchronize_full();
3063 3086
3064 mutex_lock(&module_mutex); 3087 mutex_lock(&module_mutex);
3065 /* Drop initial reference. */ 3088 /* Drop initial reference. */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1bbfa0446507..3c13e46d7d24 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3454,7 +3454,7 @@ static int tracing_wait_pipe(struct file *filp)
3454 return -EINTR; 3454 return -EINTR;
3455 3455
3456 /* 3456 /*
3457 * We block until we read something and tracing is enabled. 3457 * We block until we read something and tracing is disabled.
3458 * We still block if tracing is disabled, but we have never 3458 * We still block if tracing is disabled, but we have never
3459 * read anything. This allows a user to cat this file, and 3459 * read anything. This allows a user to cat this file, and
3460 * then enable tracing. But after we have read something, 3460 * then enable tracing. But after we have read something,
@@ -3462,7 +3462,7 @@ static int tracing_wait_pipe(struct file *filp)
3462 * 3462 *
3463 * iter->pos will be 0 if we haven't read anything. 3463 * iter->pos will be 0 if we haven't read anything.
3464 */ 3464 */
3465 if (tracing_is_enabled() && iter->pos) 3465 if (!tracing_is_enabled() && iter->pos)
3466 break; 3466 break;
3467 } 3467 }
3468 3468
@@ -4817,10 +4817,17 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
4817 return ret; 4817 return ret;
4818 4818
4819 if (buffer) { 4819 if (buffer) {
4820 if (val) 4820 mutex_lock(&trace_types_lock);
4821 if (val) {
4821 ring_buffer_record_on(buffer); 4822 ring_buffer_record_on(buffer);
4822 else 4823 if (current_trace->start)
4824 current_trace->start(tr);
4825 } else {
4823 ring_buffer_record_off(buffer); 4826 ring_buffer_record_off(buffer);
4827 if (current_trace->stop)
4828 current_trace->stop(tr);
4829 }
4830 mutex_unlock(&trace_types_lock);
4824 } 4831 }
4825 4832
4826 (*ppos)++; 4833 (*ppos)++;