aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/module.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/module.c')
-rw-r--r--kernel/module.c56
1 files changed, 56 insertions, 0 deletions
diff --git a/kernel/module.c b/kernel/module.c
index 2383e60fcf3f..5cd55ab15daf 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -53,6 +53,7 @@
53#include <linux/ftrace.h> 53#include <linux/ftrace.h>
54#include <linux/async.h> 54#include <linux/async.h>
55#include <linux/percpu.h> 55#include <linux/percpu.h>
56#include <linux/kmemleak.h>
56 57
57#if 0 58#if 0
58#define DEBUGP printk 59#define DEBUGP printk
@@ -430,6 +431,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
430 unsigned long extra; 431 unsigned long extra;
431 unsigned int i; 432 unsigned int i;
432 void *ptr; 433 void *ptr;
434 int cpu;
433 435
434 if (align > PAGE_SIZE) { 436 if (align > PAGE_SIZE) {
435 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", 437 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
@@ -459,6 +461,11 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
459 if (!split_block(i, size)) 461 if (!split_block(i, size))
460 return NULL; 462 return NULL;
461 463
464 /* add the per-cpu scanning areas */
465 for_each_possible_cpu(cpu)
466 kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0,
467 GFP_KERNEL);
468
462 /* Mark allocated */ 469 /* Mark allocated */
463 pcpu_size[i] = -pcpu_size[i]; 470 pcpu_size[i] = -pcpu_size[i];
464 return ptr; 471 return ptr;
@@ -473,6 +480,7 @@ static void percpu_modfree(void *freeme)
473{ 480{
474 unsigned int i; 481 unsigned int i;
475 void *ptr = __per_cpu_start + block_size(pcpu_size[0]); 482 void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
483 int cpu;
476 484
477 /* First entry is core kernel percpu data. */ 485 /* First entry is core kernel percpu data. */
478 for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) { 486 for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
@@ -484,6 +492,10 @@ static void percpu_modfree(void *freeme)
484 BUG(); 492 BUG();
485 493
486 free: 494 free:
495 /* remove the per-cpu scanning areas */
496 for_each_possible_cpu(cpu)
497 kmemleak_free(freeme + per_cpu_offset(cpu));
498
487 /* Merge with previous? */ 499 /* Merge with previous? */
488 if (pcpu_size[i-1] >= 0) { 500 if (pcpu_size[i-1] >= 0) {
489 pcpu_size[i-1] += pcpu_size[i]; 501 pcpu_size[i-1] += pcpu_size[i];
@@ -1876,6 +1888,36 @@ static void *module_alloc_update_bounds(unsigned long size)
1876 return ret; 1888 return ret;
1877} 1889}
1878 1890
1891#ifdef CONFIG_DEBUG_KMEMLEAK
1892static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
1893 Elf_Shdr *sechdrs, char *secstrings)
1894{
1895 unsigned int i;
1896
1897 /* only scan the sections containing data */
1898 kmemleak_scan_area(mod->module_core, (unsigned long)mod -
1899 (unsigned long)mod->module_core,
1900 sizeof(struct module), GFP_KERNEL);
1901
1902 for (i = 1; i < hdr->e_shnum; i++) {
1903 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
1904 continue;
1905 if (strncmp(secstrings + sechdrs[i].sh_name, ".data", 5) != 0
1906 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
1907 continue;
1908
1909 kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
1910 (unsigned long)mod->module_core,
1911 sechdrs[i].sh_size, GFP_KERNEL);
1912 }
1913}
1914#else
1915static inline void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
1916 Elf_Shdr *sechdrs, char *secstrings)
1917{
1918}
1919#endif
1920
1879/* Allocate and load the module: note that size of section 0 is always 1921/* Allocate and load the module: note that size of section 0 is always
1880 zero, and we rely on this for optional sections. */ 1922 zero, and we rely on this for optional sections. */
1881static noinline struct module *load_module(void __user *umod, 1923static noinline struct module *load_module(void __user *umod,
@@ -2046,6 +2088,12 @@ static noinline struct module *load_module(void __user *umod,
2046 2088
2047 /* Do the allocs. */ 2089 /* Do the allocs. */
2048 ptr = module_alloc_update_bounds(mod->core_size); 2090 ptr = module_alloc_update_bounds(mod->core_size);
2091 /*
2092 * The pointer to this block is stored in the module structure
2093 * which is inside the block. Just mark it as not being a
2094 * leak.
2095 */
2096 kmemleak_not_leak(ptr);
2049 if (!ptr) { 2097 if (!ptr) {
2050 err = -ENOMEM; 2098 err = -ENOMEM;
2051 goto free_percpu; 2099 goto free_percpu;
@@ -2054,6 +2102,13 @@ static noinline struct module *load_module(void __user *umod,
2054 mod->module_core = ptr; 2102 mod->module_core = ptr;
2055 2103
2056 ptr = module_alloc_update_bounds(mod->init_size); 2104 ptr = module_alloc_update_bounds(mod->init_size);
2105 /*
2106 * The pointer to this block is stored in the module structure
2107 * which is inside the block. This block doesn't need to be
2108 * scanned as it contains data and code that will be freed
2109 * after the module is initialized.
2110 */
2111 kmemleak_ignore(ptr);
2057 if (!ptr && mod->init_size) { 2112 if (!ptr && mod->init_size) {
2058 err = -ENOMEM; 2113 err = -ENOMEM;
2059 goto free_core; 2114 goto free_core;
@@ -2084,6 +2139,7 @@ static noinline struct module *load_module(void __user *umod,
2084 } 2139 }
2085 /* Module has been moved. */ 2140 /* Module has been moved. */
2086 mod = (void *)sechdrs[modindex].sh_addr; 2141 mod = (void *)sechdrs[modindex].sh_addr;
2142 kmemleak_load_module(mod, hdr, sechdrs, secstrings);
2087 2143
2088#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) 2144#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
2089 mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), 2145 mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),