aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/module.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/module.c')
-rw-r--r--kernel/module.c108
1 files changed, 93 insertions, 15 deletions
diff --git a/kernel/module.c b/kernel/module.c
index e797812a4d95..38928fcaff2b 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -18,6 +18,7 @@
18*/ 18*/
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/moduleloader.h> 20#include <linux/moduleloader.h>
21#include <linux/ftrace_event.h>
21#include <linux/init.h> 22#include <linux/init.h>
22#include <linux/kallsyms.h> 23#include <linux/kallsyms.h>
23#include <linux/fs.h> 24#include <linux/fs.h>
@@ -52,6 +53,7 @@
52#include <linux/ftrace.h> 53#include <linux/ftrace.h>
53#include <linux/async.h> 54#include <linux/async.h>
54#include <linux/percpu.h> 55#include <linux/percpu.h>
56#include <linux/kmemleak.h>
55 57
56#if 0 58#if 0
57#define DEBUGP printk 59#define DEBUGP printk
@@ -72,6 +74,9 @@ DEFINE_MUTEX(module_mutex);
72EXPORT_SYMBOL_GPL(module_mutex); 74EXPORT_SYMBOL_GPL(module_mutex);
73static LIST_HEAD(modules); 75static LIST_HEAD(modules);
74 76
77/* Block module loading/unloading? */
78int modules_disabled = 0;
79
75/* Waiting for a module to finish initializing? */ 80/* Waiting for a module to finish initializing? */
76static DECLARE_WAIT_QUEUE_HEAD(module_wq); 81static DECLARE_WAIT_QUEUE_HEAD(module_wq);
77 82
@@ -429,6 +434,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
429 unsigned long extra; 434 unsigned long extra;
430 unsigned int i; 435 unsigned int i;
431 void *ptr; 436 void *ptr;
437 int cpu;
432 438
433 if (align > PAGE_SIZE) { 439 if (align > PAGE_SIZE) {
434 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", 440 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
@@ -458,6 +464,11 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
458 if (!split_block(i, size)) 464 if (!split_block(i, size))
459 return NULL; 465 return NULL;
460 466
467 /* add the per-cpu scanning areas */
468 for_each_possible_cpu(cpu)
469 kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0,
470 GFP_KERNEL);
471
461 /* Mark allocated */ 472 /* Mark allocated */
462 pcpu_size[i] = -pcpu_size[i]; 473 pcpu_size[i] = -pcpu_size[i];
463 return ptr; 474 return ptr;
@@ -472,6 +483,7 @@ static void percpu_modfree(void *freeme)
472{ 483{
473 unsigned int i; 484 unsigned int i;
474 void *ptr = __per_cpu_start + block_size(pcpu_size[0]); 485 void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
486 int cpu;
475 487
476 /* First entry is core kernel percpu data. */ 488 /* First entry is core kernel percpu data. */
477 for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) { 489 for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
@@ -483,6 +495,10 @@ static void percpu_modfree(void *freeme)
483 BUG(); 495 BUG();
484 496
485 free: 497 free:
498 /* remove the per-cpu scanning areas */
499 for_each_possible_cpu(cpu)
500 kmemleak_free(freeme + per_cpu_offset(cpu));
501
486 /* Merge with previous? */ 502 /* Merge with previous? */
487 if (pcpu_size[i-1] >= 0) { 503 if (pcpu_size[i-1] >= 0) {
488 pcpu_size[i-1] += pcpu_size[i]; 504 pcpu_size[i-1] += pcpu_size[i];
@@ -777,7 +793,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
777 char name[MODULE_NAME_LEN]; 793 char name[MODULE_NAME_LEN];
778 int ret, forced = 0; 794 int ret, forced = 0;
779 795
780 if (!capable(CAP_SYS_MODULE)) 796 if (!capable(CAP_SYS_MODULE) || modules_disabled)
781 return -EPERM; 797 return -EPERM;
782 798
783 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) 799 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
@@ -1489,9 +1505,6 @@ static void free_module(struct module *mod)
1489 /* Free any allocated parameters. */ 1505 /* Free any allocated parameters. */
1490 destroy_params(mod->kp, mod->num_kp); 1506 destroy_params(mod->kp, mod->num_kp);
1491 1507
1492 /* release any pointers to mcount in this module */
1493 ftrace_release(mod->module_core, mod->core_size);
1494
1495 /* This may be NULL, but that's OK */ 1508 /* This may be NULL, but that's OK */
1496 module_free(mod, mod->module_init); 1509 module_free(mod, mod->module_init);
1497 kfree(mod->args); 1510 kfree(mod->args);
@@ -1878,6 +1891,36 @@ static void *module_alloc_update_bounds(unsigned long size)
1878 return ret; 1891 return ret;
1879} 1892}
1880 1893
1894#ifdef CONFIG_DEBUG_KMEMLEAK
1895static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
1896 Elf_Shdr *sechdrs, char *secstrings)
1897{
1898 unsigned int i;
1899
1900 /* only scan the sections containing data */
1901 kmemleak_scan_area(mod->module_core, (unsigned long)mod -
1902 (unsigned long)mod->module_core,
1903 sizeof(struct module), GFP_KERNEL);
1904
1905 for (i = 1; i < hdr->e_shnum; i++) {
1906 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
1907 continue;
1908 if (strncmp(secstrings + sechdrs[i].sh_name, ".data", 5) != 0
1909 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
1910 continue;
1911
1912 kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
1913 (unsigned long)mod->module_core,
1914 sechdrs[i].sh_size, GFP_KERNEL);
1915 }
1916}
1917#else
1918static inline void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
1919 Elf_Shdr *sechdrs, char *secstrings)
1920{
1921}
1922#endif
1923
1881/* Allocate and load the module: note that size of section 0 is always 1924/* Allocate and load the module: note that size of section 0 is always
1882 zero, and we rely on this for optional sections. */ 1925 zero, and we rely on this for optional sections. */
1883static noinline struct module *load_module(void __user *umod, 1926static noinline struct module *load_module(void __user *umod,
@@ -1892,11 +1935,9 @@ static noinline struct module *load_module(void __user *umod,
1892 unsigned int symindex = 0; 1935 unsigned int symindex = 0;
1893 unsigned int strindex = 0; 1936 unsigned int strindex = 0;
1894 unsigned int modindex, versindex, infoindex, pcpuindex; 1937 unsigned int modindex, versindex, infoindex, pcpuindex;
1895 unsigned int num_mcount;
1896 struct module *mod; 1938 struct module *mod;
1897 long err = 0; 1939 long err = 0;
1898 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ 1940 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
1899 unsigned long *mseg;
1900 mm_segment_t old_fs; 1941 mm_segment_t old_fs;
1901 1942
1902 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n", 1943 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
@@ -2050,6 +2091,12 @@ static noinline struct module *load_module(void __user *umod,
2050 2091
2051 /* Do the allocs. */ 2092 /* Do the allocs. */
2052 ptr = module_alloc_update_bounds(mod->core_size); 2093 ptr = module_alloc_update_bounds(mod->core_size);
2094 /*
2095 * The pointer to this block is stored in the module structure
2096 * which is inside the block. Just mark it as not being a
2097 * leak.
2098 */
2099 kmemleak_not_leak(ptr);
2053 if (!ptr) { 2100 if (!ptr) {
2054 err = -ENOMEM; 2101 err = -ENOMEM;
2055 goto free_percpu; 2102 goto free_percpu;
@@ -2058,6 +2105,13 @@ static noinline struct module *load_module(void __user *umod,
2058 mod->module_core = ptr; 2105 mod->module_core = ptr;
2059 2106
2060 ptr = module_alloc_update_bounds(mod->init_size); 2107 ptr = module_alloc_update_bounds(mod->init_size);
2108 /*
2109 * The pointer to this block is stored in the module structure
2110 * which is inside the block. This block doesn't need to be
2111 * scanned as it contains data and code that will be freed
2112 * after the module is initialized.
2113 */
2114 kmemleak_ignore(ptr);
2061 if (!ptr && mod->init_size) { 2115 if (!ptr && mod->init_size) {
2062 err = -ENOMEM; 2116 err = -ENOMEM;
2063 goto free_core; 2117 goto free_core;
@@ -2088,6 +2142,7 @@ static noinline struct module *load_module(void __user *umod,
2088 } 2142 }
2089 /* Module has been moved. */ 2143 /* Module has been moved. */
2090 mod = (void *)sechdrs[modindex].sh_addr; 2144 mod = (void *)sechdrs[modindex].sh_addr;
2145 kmemleak_load_module(mod, hdr, sechdrs, secstrings);
2091 2146
2092#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) 2147#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
2093 mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), 2148 mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
@@ -2161,6 +2216,10 @@ static noinline struct module *load_module(void __user *umod,
2161 mod->unused_gpl_crcs = section_addr(hdr, sechdrs, secstrings, 2216 mod->unused_gpl_crcs = section_addr(hdr, sechdrs, secstrings,
2162 "__kcrctab_unused_gpl"); 2217 "__kcrctab_unused_gpl");
2163#endif 2218#endif
2219#ifdef CONFIG_CONSTRUCTORS
2220 mod->ctors = section_objs(hdr, sechdrs, secstrings, ".ctors",
2221 sizeof(*mod->ctors), &mod->num_ctors);
2222#endif
2164 2223
2165#ifdef CONFIG_MARKERS 2224#ifdef CONFIG_MARKERS
2166 mod->markers = section_objs(hdr, sechdrs, secstrings, "__markers", 2225 mod->markers = section_objs(hdr, sechdrs, secstrings, "__markers",
@@ -2172,7 +2231,19 @@ static noinline struct module *load_module(void __user *umod,
2172 sizeof(*mod->tracepoints), 2231 sizeof(*mod->tracepoints),
2173 &mod->num_tracepoints); 2232 &mod->num_tracepoints);
2174#endif 2233#endif
2175 2234#ifdef CONFIG_EVENT_TRACING
2235 mod->trace_events = section_objs(hdr, sechdrs, secstrings,
2236 "_ftrace_events",
2237 sizeof(*mod->trace_events),
2238 &mod->num_trace_events);
2239#endif
2240#ifdef CONFIG_FTRACE_MCOUNT_RECORD
2241 /* sechdrs[0].sh_size is always zero */
2242 mod->ftrace_callsites = section_objs(hdr, sechdrs, secstrings,
2243 "__mcount_loc",
2244 sizeof(*mod->ftrace_callsites),
2245 &mod->num_ftrace_callsites);
2246#endif
2176#ifdef CONFIG_MODVERSIONS 2247#ifdef CONFIG_MODVERSIONS
2177 if ((mod->num_syms && !mod->crcs) 2248 if ((mod->num_syms && !mod->crcs)
2178 || (mod->num_gpl_syms && !mod->gpl_crcs) 2249 || (mod->num_gpl_syms && !mod->gpl_crcs)
@@ -2237,11 +2308,6 @@ static noinline struct module *load_module(void __user *umod,
2237 dynamic_debug_setup(debug, num_debug); 2308 dynamic_debug_setup(debug, num_debug);
2238 } 2309 }
2239 2310
2240 /* sechdrs[0].sh_size is always zero */
2241 mseg = section_objs(hdr, sechdrs, secstrings, "__mcount_loc",
2242 sizeof(*mseg), &num_mcount);
2243 ftrace_init_module(mod, mseg, mseg + num_mcount);
2244
2245 err = module_finalize(hdr, sechdrs, mod); 2311 err = module_finalize(hdr, sechdrs, mod);
2246 if (err < 0) 2312 if (err < 0)
2247 goto cleanup; 2313 goto cleanup;
@@ -2302,7 +2368,6 @@ static noinline struct module *load_module(void __user *umod,
2302 cleanup: 2368 cleanup:
2303 kobject_del(&mod->mkobj.kobj); 2369 kobject_del(&mod->mkobj.kobj);
2304 kobject_put(&mod->mkobj.kobj); 2370 kobject_put(&mod->mkobj.kobj);
2305 ftrace_release(mod->module_core, mod->core_size);
2306 free_unload: 2371 free_unload:
2307 module_unload_free(mod); 2372 module_unload_free(mod);
2308#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) 2373#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
@@ -2328,6 +2393,17 @@ static noinline struct module *load_module(void __user *umod,
2328 goto free_hdr; 2393 goto free_hdr;
2329} 2394}
2330 2395
2396/* Call module constructors. */
2397static void do_mod_ctors(struct module *mod)
2398{
2399#ifdef CONFIG_CONSTRUCTORS
2400 unsigned long i;
2401
2402 for (i = 0; i < mod->num_ctors; i++)
2403 mod->ctors[i]();
2404#endif
2405}
2406
2331/* This is where the real work happens */ 2407/* This is where the real work happens */
2332SYSCALL_DEFINE3(init_module, void __user *, umod, 2408SYSCALL_DEFINE3(init_module, void __user *, umod,
2333 unsigned long, len, const char __user *, uargs) 2409 unsigned long, len, const char __user *, uargs)
@@ -2336,7 +2412,7 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
2336 int ret = 0; 2412 int ret = 0;
2337 2413
2338 /* Must have permission */ 2414 /* Must have permission */
2339 if (!capable(CAP_SYS_MODULE)) 2415 if (!capable(CAP_SYS_MODULE) || modules_disabled)
2340 return -EPERM; 2416 return -EPERM;
2341 2417
2342 /* Only one module load at a time, please */ 2418 /* Only one module load at a time, please */
@@ -2356,6 +2432,7 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
2356 blocking_notifier_call_chain(&module_notify_list, 2432 blocking_notifier_call_chain(&module_notify_list,
2357 MODULE_STATE_COMING, mod); 2433 MODULE_STATE_COMING, mod);
2358 2434
2435 do_mod_ctors(mod);
2359 /* Start the module */ 2436 /* Start the module */
2360 if (mod->init != NULL) 2437 if (mod->init != NULL)
2361 ret = do_one_initcall(mod->init); 2438 ret = do_one_initcall(mod->init);
@@ -2394,6 +2471,7 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
2394 mutex_lock(&module_mutex); 2471 mutex_lock(&module_mutex);
2395 /* Drop initial reference. */ 2472 /* Drop initial reference. */
2396 module_put(mod); 2473 module_put(mod);
2474 trim_init_extable(mod);
2397 module_free(mod, mod->module_init); 2475 module_free(mod, mod->module_init);
2398 mod->module_init = NULL; 2476 mod->module_init = NULL;
2399 mod->init_size = 0; 2477 mod->init_size = 0;
@@ -2837,7 +2915,7 @@ void print_modules(void)
2837 struct module *mod; 2915 struct module *mod;
2838 char buf[8]; 2916 char buf[8];
2839 2917
2840 printk("Modules linked in:"); 2918 printk(KERN_DEFAULT "Modules linked in:");
2841 /* Most callers should already have preempt disabled, but make sure */ 2919 /* Most callers should already have preempt disabled, but make sure */
2842 preempt_disable(); 2920 preempt_disable();
2843 list_for_each_entry_rcu(mod, &modules, list) 2921 list_for_each_entry_rcu(mod, &modules, list)