diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-02-10 23:31:13 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2015-02-10 23:32:04 -0500 |
commit | 9cc019b8c94fa59e02fd82f15f7b7d689e35c190 (patch) | |
tree | 0d6775b29012dd6a2df95cb16bd190faffd44780 | |
parent | d64810f56147b53e92228c31442e925576314aa2 (diff) |
module: Replace over-engineered nested sleep
Since the introduction of the nested sleep warning; we've established
that the occasional sleep inside a wait_event() is fine.
wait_event() loops are invariant wrt. spurious wakeups, and the
occasional sleep has a similar effect on them. As long as its occasional
its harmless.
Therefore replace the 'correct' but verbose wait_woken() thing with
a simple annotation to shut up the warning.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r-- | kernel/module.c | 36 |
1 files changed, 8 insertions, 28 deletions
diff --git a/kernel/module.c b/kernel/module.c index d7a92682fba3..82dc1f899e6d 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -2984,6 +2984,12 @@ static bool finished_loading(const char *name) | |||
2984 | struct module *mod; | 2984 | struct module *mod; |
2985 | bool ret; | 2985 | bool ret; |
2986 | 2986 | ||
2987 | /* | ||
2988 | * The module_mutex should not be a heavily contended lock; | ||
2989 | * if we get the occasional sleep here, we'll go an extra iteration | ||
2990 | * in the wait_event_interruptible(), which is harmless. | ||
2991 | */ | ||
2992 | sched_annotate_sleep(); | ||
2987 | mutex_lock(&module_mutex); | 2993 | mutex_lock(&module_mutex); |
2988 | mod = find_module_all(name, strlen(name), true); | 2994 | mod = find_module_all(name, strlen(name), true); |
2989 | ret = !mod || mod->state == MODULE_STATE_LIVE | 2995 | ret = !mod || mod->state == MODULE_STATE_LIVE |
@@ -3126,32 +3132,6 @@ static int may_init_module(void) | |||
3126 | } | 3132 | } |
3127 | 3133 | ||
3128 | /* | 3134 | /* |
3129 | * Can't use wait_event_interruptible() because our condition | ||
3130 | * 'finished_loading()' contains a blocking primitive itself (mutex_lock). | ||
3131 | */ | ||
3132 | static int wait_finished_loading(struct module *mod) | ||
3133 | { | ||
3134 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | ||
3135 | int ret = 0; | ||
3136 | |||
3137 | add_wait_queue(&module_wq, &wait); | ||
3138 | for (;;) { | ||
3139 | if (finished_loading(mod->name)) | ||
3140 | break; | ||
3141 | |||
3142 | if (signal_pending(current)) { | ||
3143 | ret = -ERESTARTSYS; | ||
3144 | break; | ||
3145 | } | ||
3146 | |||
3147 | wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | ||
3148 | } | ||
3149 | remove_wait_queue(&module_wq, &wait); | ||
3150 | |||
3151 | return ret; | ||
3152 | } | ||
3153 | |||
3154 | /* | ||
3155 | * We try to place it in the list now to make sure it's unique before | 3135 | * We try to place it in the list now to make sure it's unique before |
3156 | * we dedicate too many resources. In particular, temporary percpu | 3136 | * we dedicate too many resources. In particular, temporary percpu |
3157 | * memory exhaustion. | 3137 | * memory exhaustion. |
@@ -3171,8 +3151,8 @@ again: | |||
3171 | || old->state == MODULE_STATE_UNFORMED) { | 3151 | || old->state == MODULE_STATE_UNFORMED) { |
3172 | /* Wait in case it fails to load. */ | 3152 | /* Wait in case it fails to load. */ |
3173 | mutex_unlock(&module_mutex); | 3153 | mutex_unlock(&module_mutex); |
3174 | 3154 | err = wait_event_interruptible(module_wq, | |
3175 | err = wait_finished_loading(mod); | 3155 | finished_loading(mod->name)); |
3176 | if (err) | 3156 | if (err) |
3177 | goto out_unlocked; | 3157 | goto out_unlocked; |
3178 | goto again; | 3158 | goto again; |