aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/module.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/module.c')
-rw-r--r--kernel/module.c66
1 files changed, 63 insertions, 3 deletions
diff --git a/kernel/module.c b/kernel/module.c
index 2383e60fcf3f..215aaab09e91 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -53,6 +53,7 @@
53#include <linux/ftrace.h> 53#include <linux/ftrace.h>
54#include <linux/async.h> 54#include <linux/async.h>
55#include <linux/percpu.h> 55#include <linux/percpu.h>
56#include <linux/kmemleak.h>
56 57
57#if 0 58#if 0
58#define DEBUGP printk 59#define DEBUGP printk
@@ -73,6 +74,9 @@ DEFINE_MUTEX(module_mutex);
73EXPORT_SYMBOL_GPL(module_mutex); 74EXPORT_SYMBOL_GPL(module_mutex);
74static LIST_HEAD(modules); 75static LIST_HEAD(modules);
75 76
77/* Block module loading/unloading? */
78int modules_disabled = 0;
79
76/* Waiting for a module to finish initializing? */ 80/* Waiting for a module to finish initializing? */
77static DECLARE_WAIT_QUEUE_HEAD(module_wq); 81static DECLARE_WAIT_QUEUE_HEAD(module_wq);
78 82
@@ -430,6 +434,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
430 unsigned long extra; 434 unsigned long extra;
431 unsigned int i; 435 unsigned int i;
432 void *ptr; 436 void *ptr;
437 int cpu;
433 438
434 if (align > PAGE_SIZE) { 439 if (align > PAGE_SIZE) {
435 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", 440 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
@@ -459,6 +464,11 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
459 if (!split_block(i, size)) 464 if (!split_block(i, size))
460 return NULL; 465 return NULL;
461 466
467 /* add the per-cpu scanning areas */
468 for_each_possible_cpu(cpu)
469 kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0,
470 GFP_KERNEL);
471
462 /* Mark allocated */ 472 /* Mark allocated */
463 pcpu_size[i] = -pcpu_size[i]; 473 pcpu_size[i] = -pcpu_size[i];
464 return ptr; 474 return ptr;
@@ -473,6 +483,7 @@ static void percpu_modfree(void *freeme)
473{ 483{
474 unsigned int i; 484 unsigned int i;
475 void *ptr = __per_cpu_start + block_size(pcpu_size[0]); 485 void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
486 int cpu;
476 487
477 /* First entry is core kernel percpu data. */ 488 /* First entry is core kernel percpu data. */
478 for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) { 489 for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
@@ -484,6 +495,10 @@ static void percpu_modfree(void *freeme)
484 BUG(); 495 BUG();
485 496
486 free: 497 free:
498 /* remove the per-cpu scanning areas */
499 for_each_possible_cpu(cpu)
500 kmemleak_free(freeme + per_cpu_offset(cpu));
501
487 /* Merge with previous? */ 502 /* Merge with previous? */
488 if (pcpu_size[i-1] >= 0) { 503 if (pcpu_size[i-1] >= 0) {
489 pcpu_size[i-1] += pcpu_size[i]; 504 pcpu_size[i-1] += pcpu_size[i];
@@ -778,7 +793,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
778 char name[MODULE_NAME_LEN]; 793 char name[MODULE_NAME_LEN];
779 int ret, forced = 0; 794 int ret, forced = 0;
780 795
781 if (!capable(CAP_SYS_MODULE)) 796 if (!capable(CAP_SYS_MODULE) || modules_disabled)
782 return -EPERM; 797 return -EPERM;
783 798
784 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) 799 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
@@ -1876,6 +1891,36 @@ static void *module_alloc_update_bounds(unsigned long size)
1876 return ret; 1891 return ret;
1877} 1892}
1878 1893
1894#ifdef CONFIG_DEBUG_KMEMLEAK
1895static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
1896 Elf_Shdr *sechdrs, char *secstrings)
1897{
1898 unsigned int i;
1899
1900 /* only scan the sections containing data */
1901 kmemleak_scan_area(mod->module_core, (unsigned long)mod -
1902 (unsigned long)mod->module_core,
1903 sizeof(struct module), GFP_KERNEL);
1904
1905 for (i = 1; i < hdr->e_shnum; i++) {
1906 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
1907 continue;
1908 if (strncmp(secstrings + sechdrs[i].sh_name, ".data", 5) != 0
1909 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
1910 continue;
1911
1912 kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
1913 (unsigned long)mod->module_core,
1914 sechdrs[i].sh_size, GFP_KERNEL);
1915 }
1916}
1917#else
1918static inline void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
1919 Elf_Shdr *sechdrs, char *secstrings)
1920{
1921}
1922#endif
1923
1879/* Allocate and load the module: note that size of section 0 is always 1924/* Allocate and load the module: note that size of section 0 is always
1880 zero, and we rely on this for optional sections. */ 1925 zero, and we rely on this for optional sections. */
1881static noinline struct module *load_module(void __user *umod, 1926static noinline struct module *load_module(void __user *umod,
@@ -2046,6 +2091,12 @@ static noinline struct module *load_module(void __user *umod,
2046 2091
2047 /* Do the allocs. */ 2092 /* Do the allocs. */
2048 ptr = module_alloc_update_bounds(mod->core_size); 2093 ptr = module_alloc_update_bounds(mod->core_size);
2094 /*
2095 * The pointer to this block is stored in the module structure
2096 * which is inside the block. Just mark it as not being a
2097 * leak.
2098 */
2099 kmemleak_not_leak(ptr);
2049 if (!ptr) { 2100 if (!ptr) {
2050 err = -ENOMEM; 2101 err = -ENOMEM;
2051 goto free_percpu; 2102 goto free_percpu;
@@ -2054,6 +2105,13 @@ static noinline struct module *load_module(void __user *umod,
2054 mod->module_core = ptr; 2105 mod->module_core = ptr;
2055 2106
2056 ptr = module_alloc_update_bounds(mod->init_size); 2107 ptr = module_alloc_update_bounds(mod->init_size);
2108 /*
2109 * The pointer to this block is stored in the module structure
2110 * which is inside the block. This block doesn't need to be
2111 * scanned as it contains data and code that will be freed
2112 * after the module is initialized.
2113 */
2114 kmemleak_ignore(ptr);
2057 if (!ptr && mod->init_size) { 2115 if (!ptr && mod->init_size) {
2058 err = -ENOMEM; 2116 err = -ENOMEM;
2059 goto free_core; 2117 goto free_core;
@@ -2084,6 +2142,7 @@ static noinline struct module *load_module(void __user *umod,
2084 } 2142 }
2085 /* Module has been moved. */ 2143 /* Module has been moved. */
2086 mod = (void *)sechdrs[modindex].sh_addr; 2144 mod = (void *)sechdrs[modindex].sh_addr;
2145 kmemleak_load_module(mod, hdr, sechdrs, secstrings);
2087 2146
2088#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) 2147#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
2089 mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), 2148 mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
@@ -2338,7 +2397,7 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
2338 int ret = 0; 2397 int ret = 0;
2339 2398
2340 /* Must have permission */ 2399 /* Must have permission */
2341 if (!capable(CAP_SYS_MODULE)) 2400 if (!capable(CAP_SYS_MODULE) || modules_disabled)
2342 return -EPERM; 2401 return -EPERM;
2343 2402
2344 /* Only one module load at a time, please */ 2403 /* Only one module load at a time, please */
@@ -2396,6 +2455,7 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
2396 mutex_lock(&module_mutex); 2455 mutex_lock(&module_mutex);
2397 /* Drop initial reference. */ 2456 /* Drop initial reference. */
2398 module_put(mod); 2457 module_put(mod);
2458 trim_init_extable(mod);
2399 module_free(mod, mod->module_init); 2459 module_free(mod, mod->module_init);
2400 mod->module_init = NULL; 2460 mod->module_init = NULL;
2401 mod->init_size = 0; 2461 mod->init_size = 0;
@@ -2839,7 +2899,7 @@ void print_modules(void)
2839 struct module *mod; 2899 struct module *mod;
2840 char buf[8]; 2900 char buf[8];
2841 2901
2842 printk("Modules linked in:"); 2902 printk(KERN_DEFAULT "Modules linked in:");
2843 /* Most callers should already have preempt disabled, but make sure */ 2903 /* Most callers should already have preempt disabled, but make sure */
2844 preempt_disable(); 2904 preempt_disable();
2845 list_for_each_entry_rcu(mod, &modules, list) 2905 list_for_each_entry_rcu(mod, &modules, list)