aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/module.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/module.c')
-rw-r--r--kernel/module.c91
1 files changed, 54 insertions, 37 deletions
diff --git a/kernel/module.c b/kernel/module.c
index d856e96a3cce..42a1d2afb217 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -387,9 +387,9 @@ static bool check_symbol(const struct symsearch *syms,
387 pr_warn("Symbol %s is marked as UNUSED, however this module is " 387 pr_warn("Symbol %s is marked as UNUSED, however this module is "
388 "using it.\n", fsa->name); 388 "using it.\n", fsa->name);
389 pr_warn("This symbol will go away in the future.\n"); 389 pr_warn("This symbol will go away in the future.\n");
390 pr_warn("Please evalute if this is the right api to use and if " 390 pr_warn("Please evaluate if this is the right api to use and "
391 "it really is, submit a report the linux kernel " 391 "if it really is, submit a report to the linux kernel "
392 "mailinglist together with submitting your code for " 392 "mailing list together with submitting your code for "
393 "inclusion.\n"); 393 "inclusion.\n");
394 } 394 }
395#endif 395#endif
@@ -1225,6 +1225,12 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
1225 const unsigned long *crc; 1225 const unsigned long *crc;
1226 int err; 1226 int err;
1227 1227
1228 /*
1229 * The module_mutex should not be a heavily contended lock;
1230 * if we get the occasional sleep here, we'll go an extra iteration
1231 * in the wait_event_interruptible(), which is harmless.
1232 */
1233 sched_annotate_sleep();
1228 mutex_lock(&module_mutex); 1234 mutex_lock(&module_mutex);
1229 sym = find_symbol(name, &owner, &crc, 1235 sym = find_symbol(name, &owner, &crc,
1230 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true); 1236 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
@@ -1859,7 +1865,7 @@ static void free_module(struct module *mod)
1859 kfree(mod->args); 1865 kfree(mod->args);
1860 percpu_modfree(mod); 1866 percpu_modfree(mod);
1861 1867
1862 /* Free lock-classes: */ 1868 /* Free lock-classes; relies on the preceding sync_rcu(). */
1863 lockdep_free_key_range(mod->module_core, mod->core_size); 1869 lockdep_free_key_range(mod->module_core, mod->core_size);
1864 1870
1865 /* Finally, free the core (containing the module structure) */ 1871 /* Finally, free the core (containing the module structure) */
@@ -2305,11 +2311,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
2305 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); 2311 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
2306 info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); 2312 info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
2307 mod->core_size += strtab_size; 2313 mod->core_size += strtab_size;
2314 mod->core_size = debug_align(mod->core_size);
2308 2315
2309 /* Put string table section at end of init part of module. */ 2316 /* Put string table section at end of init part of module. */
2310 strsect->sh_flags |= SHF_ALLOC; 2317 strsect->sh_flags |= SHF_ALLOC;
2311 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, 2318 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
2312 info->index.str) | INIT_OFFSET_MASK; 2319 info->index.str) | INIT_OFFSET_MASK;
2320 mod->init_size = debug_align(mod->init_size);
2313 pr_debug("\t%s\n", info->secstrings + strsect->sh_name); 2321 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2314} 2322}
2315 2323
@@ -2471,6 +2479,23 @@ static int elf_header_check(struct load_info *info)
2471 return 0; 2479 return 0;
2472} 2480}
2473 2481
2482#define COPY_CHUNK_SIZE (16*PAGE_SIZE)
2483
2484static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len)
2485{
2486 do {
2487 unsigned long n = min(len, COPY_CHUNK_SIZE);
2488
2489 if (copy_from_user(dst, usrc, n) != 0)
2490 return -EFAULT;
2491 cond_resched();
2492 dst += n;
2493 usrc += n;
2494 len -= n;
2495 } while (len);
2496 return 0;
2497}
2498
2474/* Sets info->hdr and info->len. */ 2499/* Sets info->hdr and info->len. */
2475static int copy_module_from_user(const void __user *umod, unsigned long len, 2500static int copy_module_from_user(const void __user *umod, unsigned long len,
2476 struct load_info *info) 2501 struct load_info *info)
@@ -2486,11 +2511,12 @@ static int copy_module_from_user(const void __user *umod, unsigned long len,
2486 return err; 2511 return err;
2487 2512
2488 /* Suck in entire file: we'll want most of it. */ 2513 /* Suck in entire file: we'll want most of it. */
2489 info->hdr = vmalloc(info->len); 2514 info->hdr = __vmalloc(info->len,
2515 GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, PAGE_KERNEL);
2490 if (!info->hdr) 2516 if (!info->hdr)
2491 return -ENOMEM; 2517 return -ENOMEM;
2492 2518
2493 if (copy_from_user(info->hdr, umod, info->len) != 0) { 2519 if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) {
2494 vfree(info->hdr); 2520 vfree(info->hdr);
2495 return -EFAULT; 2521 return -EFAULT;
2496 } 2522 }
@@ -2745,6 +2771,9 @@ static int find_module_sections(struct module *mod, struct load_info *info)
2745 mod->trace_events = section_objs(info, "_ftrace_events", 2771 mod->trace_events = section_objs(info, "_ftrace_events",
2746 sizeof(*mod->trace_events), 2772 sizeof(*mod->trace_events),
2747 &mod->num_trace_events); 2773 &mod->num_trace_events);
2774 mod->trace_enums = section_objs(info, "_ftrace_enum_map",
2775 sizeof(*mod->trace_enums),
2776 &mod->num_trace_enums);
2748#endif 2777#endif
2749#ifdef CONFIG_TRACING 2778#ifdef CONFIG_TRACING
2750 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", 2779 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
@@ -2978,6 +3007,12 @@ static bool finished_loading(const char *name)
2978 struct module *mod; 3007 struct module *mod;
2979 bool ret; 3008 bool ret;
2980 3009
3010 /*
3011 * The module_mutex should not be a heavily contended lock;
3012 * if we get the occasional sleep here, we'll go an extra iteration
3013 * in the wait_event_interruptible(), which is harmless.
3014 */
3015 sched_annotate_sleep();
2981 mutex_lock(&module_mutex); 3016 mutex_lock(&module_mutex);
2982 mod = find_module_all(name, strlen(name), true); 3017 mod = find_module_all(name, strlen(name), true);
2983 ret = !mod || mod->state == MODULE_STATE_LIVE 3018 ret = !mod || mod->state == MODULE_STATE_LIVE
@@ -3011,8 +3046,13 @@ static void do_free_init(struct rcu_head *head)
3011 kfree(m); 3046 kfree(m);
3012} 3047}
3013 3048
3014/* This is where the real work happens */ 3049/*
3015static int do_init_module(struct module *mod) 3050 * This is where the real work happens.
3051 *
3052 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
3053 * helper command 'lx-symbols'.
3054 */
3055static noinline int do_init_module(struct module *mod)
3016{ 3056{
3017 int ret = 0; 3057 int ret = 0;
3018 struct mod_initfree *freeinit; 3058 struct mod_initfree *freeinit;
@@ -3120,32 +3160,6 @@ static int may_init_module(void)
3120} 3160}
3121 3161
3122/* 3162/*
3123 * Can't use wait_event_interruptible() because our condition
3124 * 'finished_loading()' contains a blocking primitive itself (mutex_lock).
3125 */
3126static int wait_finished_loading(struct module *mod)
3127{
3128 DEFINE_WAIT_FUNC(wait, woken_wake_function);
3129 int ret = 0;
3130
3131 add_wait_queue(&module_wq, &wait);
3132 for (;;) {
3133 if (finished_loading(mod->name))
3134 break;
3135
3136 if (signal_pending(current)) {
3137 ret = -ERESTARTSYS;
3138 break;
3139 }
3140
3141 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
3142 }
3143 remove_wait_queue(&module_wq, &wait);
3144
3145 return ret;
3146}
3147
3148/*
3149 * We try to place it in the list now to make sure it's unique before 3163 * We try to place it in the list now to make sure it's unique before
3150 * we dedicate too many resources. In particular, temporary percpu 3164 * we dedicate too many resources. In particular, temporary percpu
3151 * memory exhaustion. 3165 * memory exhaustion.
@@ -3165,8 +3179,8 @@ again:
3165 || old->state == MODULE_STATE_UNFORMED) { 3179 || old->state == MODULE_STATE_UNFORMED) {
3166 /* Wait in case it fails to load. */ 3180 /* Wait in case it fails to load. */
3167 mutex_unlock(&module_mutex); 3181 mutex_unlock(&module_mutex);
3168 3182 err = wait_event_interruptible(module_wq,
3169 err = wait_finished_loading(mod); 3183 finished_loading(mod->name));
3170 if (err) 3184 if (err)
3171 goto out_unlocked; 3185 goto out_unlocked;
3172 goto again; 3186 goto again;
@@ -3265,7 +3279,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
3265 mod->sig_ok = info->sig_ok; 3279 mod->sig_ok = info->sig_ok;
3266 if (!mod->sig_ok) { 3280 if (!mod->sig_ok) {
3267 pr_notice_once("%s: module verification failed: signature " 3281 pr_notice_once("%s: module verification failed: signature "
3268 "and/or required key missing - tainting " 3282 "and/or required key missing - tainting "
3269 "kernel\n", mod->name); 3283 "kernel\n", mod->name);
3270 add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); 3284 add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
3271 } 3285 }
@@ -3379,6 +3393,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
3379 synchronize_rcu(); 3393 synchronize_rcu();
3380 mutex_unlock(&module_mutex); 3394 mutex_unlock(&module_mutex);
3381 free_module: 3395 free_module:
3396 /* Free lock-classes; relies on the preceding sync_rcu() */
3397 lockdep_free_key_range(mod->module_core, mod->core_size);
3398
3382 module_deallocate(mod, info); 3399 module_deallocate(mod, info);
3383 free_copy: 3400 free_copy:
3384 free_copy(info); 3401 free_copy(info);