aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/pid.c7
-rw-r--r--mm/page_alloc.c14
2 files changed, 3 insertions, 18 deletions
diff --git a/kernel/pid.c b/kernel/pid.c
index 5fa1db48d8b7..31310b5d3f50 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -36,7 +36,6 @@
36#include <linux/pid_namespace.h> 36#include <linux/pid_namespace.h>
37#include <linux/init_task.h> 37#include <linux/init_task.h>
38#include <linux/syscalls.h> 38#include <linux/syscalls.h>
39#include <linux/kmemleak.h>
40 39
41#define pid_hashfn(nr, ns) \ 40#define pid_hashfn(nr, ns) \
42 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) 41 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
@@ -513,12 +512,6 @@ void __init pidhash_init(void)
513 pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); 512 pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
514 if (!pid_hash) 513 if (!pid_hash)
515 panic("Could not alloc pidhash!\n"); 514 panic("Could not alloc pidhash!\n");
516 /*
517 * pid_hash contains references to allocated struct pid objects and it
518 * must be scanned by kmemleak to avoid false positives.
519 */
520 kmemleak_alloc(pid_hash, pidhash_size * sizeof(*(pid_hash)), 0,
521 GFP_KERNEL);
522 for (i = 0; i < pidhash_size; i++) 515 for (i = 0; i < pidhash_size; i++)
523 INIT_HLIST_HEAD(&pid_hash[i]); 516 INIT_HLIST_HEAD(&pid_hash[i]);
524} 517}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ad7cd1c56b07..3ef628845f07 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4745,8 +4745,10 @@ void *__init alloc_large_system_hash(const char *tablename,
4745 * some pages at the end of hash table which 4745 * some pages at the end of hash table which
4746 * alloc_pages_exact() automatically does 4746 * alloc_pages_exact() automatically does
4747 */ 4747 */
4748 if (get_order(size) < MAX_ORDER) 4748 if (get_order(size) < MAX_ORDER) {
4749 table = alloc_pages_exact(size, GFP_ATOMIC); 4749 table = alloc_pages_exact(size, GFP_ATOMIC);
4750 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
4751 }
4750 } 4752 }
4751 } while (!table && size > PAGE_SIZE && --log2qty); 4753 } while (!table && size > PAGE_SIZE && --log2qty);
4752 4754
@@ -4764,16 +4766,6 @@ void *__init alloc_large_system_hash(const char *tablename,
4764 if (_hash_mask) 4766 if (_hash_mask)
4765 *_hash_mask = (1 << log2qty) - 1; 4767 *_hash_mask = (1 << log2qty) - 1;
4766 4768
4767 /*
4768 * If hashdist is set, the table allocation is done with __vmalloc()
4769 * which invokes the kmemleak_alloc() callback. This function may also
4770 * be called before the slab and kmemleak are initialised when
4771 * kmemleak simply buffers the request to be executed later
4772 * (GFP_ATOMIC flag ignored in this case).
4773 */
4774 if (!hashdist)
4775 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
4776
4777 return table; 4769 return table;
4778} 4770}
4779 4771