diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/bootmem.c | 24 | ||||
-rw-r--r-- | mm/page_alloc.c | 8 | ||||
-rw-r--r-- | mm/page_cgroup.c | 7 | ||||
-rw-r--r-- | mm/vmscan.c | 10 |
4 files changed, 41 insertions, 8 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c index 58c66cc5056a..142c84a54993 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -833,15 +833,24 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, | |||
833 | void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, | 833 | void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, |
834 | unsigned long align, unsigned long goal) | 834 | unsigned long align, unsigned long goal) |
835 | { | 835 | { |
836 | void *ptr; | ||
837 | |||
836 | if (WARN_ON_ONCE(slab_is_available())) | 838 | if (WARN_ON_ONCE(slab_is_available())) |
837 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); | 839 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
838 | 840 | ||
839 | #ifdef CONFIG_NO_BOOTMEM | 841 | #ifdef CONFIG_NO_BOOTMEM |
840 | return __alloc_memory_core_early(pgdat->node_id, size, align, | 842 | ptr = __alloc_memory_core_early(pgdat->node_id, size, align, |
843 | goal, -1ULL); | ||
844 | if (ptr) | ||
845 | return ptr; | ||
846 | |||
847 | ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, | ||
841 | goal, -1ULL); | 848 | goal, -1ULL); |
842 | #else | 849 | #else |
843 | return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); | 850 | ptr = ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); |
844 | #endif | 851 | #endif |
852 | |||
853 | return ptr; | ||
845 | } | 854 | } |
846 | 855 | ||
847 | void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, | 856 | void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, |
@@ -977,14 +986,21 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, | |||
977 | void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, | 986 | void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, |
978 | unsigned long align, unsigned long goal) | 987 | unsigned long align, unsigned long goal) |
979 | { | 988 | { |
989 | void *ptr; | ||
990 | |||
980 | if (WARN_ON_ONCE(slab_is_available())) | 991 | if (WARN_ON_ONCE(slab_is_available())) |
981 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); | 992 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
982 | 993 | ||
983 | #ifdef CONFIG_NO_BOOTMEM | 994 | #ifdef CONFIG_NO_BOOTMEM |
984 | return __alloc_memory_core_early(pgdat->node_id, size, align, | 995 | ptr = __alloc_memory_core_early(pgdat->node_id, size, align, |
996 | goal, ARCH_LOW_ADDRESS_LIMIT); | ||
997 | if (ptr) | ||
998 | return ptr; | ||
999 | ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, | ||
985 | goal, ARCH_LOW_ADDRESS_LIMIT); | 1000 | goal, ARCH_LOW_ADDRESS_LIMIT); |
986 | #else | 1001 | #else |
987 | return ___alloc_bootmem_node(pgdat->bdata, size, align, | 1002 | ptr = ___alloc_bootmem_node(pgdat->bdata, size, align, |
988 | goal, ARCH_LOW_ADDRESS_LIMIT); | 1003 | goal, ARCH_LOW_ADDRESS_LIMIT); |
989 | #endif | 1004 | #endif |
1005 | return ptr; | ||
990 | } | 1006 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 431214b941ac..9bd339eb04c6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -3634,6 +3634,9 @@ void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, | |||
3634 | int i; | 3634 | int i; |
3635 | void *ptr; | 3635 | void *ptr; |
3636 | 3636 | ||
3637 | if (limit > get_max_mapped()) | ||
3638 | limit = get_max_mapped(); | ||
3639 | |||
3637 | /* need to go over early_node_map to find out good range for node */ | 3640 | /* need to go over early_node_map to find out good range for node */ |
3638 | for_each_active_range_index_in_nid(i, nid) { | 3641 | for_each_active_range_index_in_nid(i, nid) { |
3639 | u64 addr; | 3642 | u64 addr; |
@@ -3659,6 +3662,11 @@ void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, | |||
3659 | ptr = phys_to_virt(addr); | 3662 | ptr = phys_to_virt(addr); |
3660 | memset(ptr, 0, size); | 3663 | memset(ptr, 0, size); |
3661 | reserve_early_without_check(addr, addr + size, "BOOTMEM"); | 3664 | reserve_early_without_check(addr, addr + size, "BOOTMEM"); |
3665 | /* | ||
3666 | * The min_count is set to 0 so that bootmem allocated blocks | ||
3667 | * are never reported as leaks. | ||
3668 | */ | ||
3669 | kmemleak_alloc(ptr, size, 0, 0); | ||
3662 | return ptr; | 3670 | return ptr; |
3663 | } | 3671 | } |
3664 | 3672 | ||
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 6c0081441a32..5bffada7cde1 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/vmalloc.h> | 9 | #include <linux/vmalloc.h> |
10 | #include <linux/cgroup.h> | 10 | #include <linux/cgroup.h> |
11 | #include <linux/swapops.h> | 11 | #include <linux/swapops.h> |
12 | #include <linux/kmemleak.h> | ||
12 | 13 | ||
13 | static void __meminit | 14 | static void __meminit |
14 | __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn) | 15 | __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn) |
@@ -126,6 +127,12 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn) | |||
126 | if (!base) | 127 | if (!base) |
127 | base = vmalloc(table_size); | 128 | base = vmalloc(table_size); |
128 | } | 129 | } |
130 | /* | ||
131 | * The value stored in section->page_cgroup is (base - pfn) | ||
132 | * and it does not point to the memory block allocated above, | ||
133 | * causing kmemleak false positives. | ||
134 | */ | ||
135 | kmemleak_not_leak(base); | ||
129 | } else { | 136 | } else { |
130 | /* | 137 | /* |
131 | * We don't have to allocate page_cgroup again, but | 138 | * We don't have to allocate page_cgroup again, but |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 9c7e57cc63a3..b94fe1b3da43 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -213,8 +213,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, | |||
213 | list_for_each_entry(shrinker, &shrinker_list, list) { | 213 | list_for_each_entry(shrinker, &shrinker_list, list) { |
214 | unsigned long long delta; | 214 | unsigned long long delta; |
215 | unsigned long total_scan; | 215 | unsigned long total_scan; |
216 | unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask); | 216 | unsigned long max_pass; |
217 | 217 | ||
218 | max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask); | ||
218 | delta = (4 * scanned) / shrinker->seeks; | 219 | delta = (4 * scanned) / shrinker->seeks; |
219 | delta *= max_pass; | 220 | delta *= max_pass; |
220 | do_div(delta, lru_pages + 1); | 221 | do_div(delta, lru_pages + 1); |
@@ -242,8 +243,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, | |||
242 | int shrink_ret; | 243 | int shrink_ret; |
243 | int nr_before; | 244 | int nr_before; |
244 | 245 | ||
245 | nr_before = (*shrinker->shrink)(0, gfp_mask); | 246 | nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask); |
246 | shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask); | 247 | shrink_ret = (*shrinker->shrink)(shrinker, this_scan, |
248 | gfp_mask); | ||
247 | if (shrink_ret == -1) | 249 | if (shrink_ret == -1) |
248 | break; | 250 | break; |
249 | if (shrink_ret < nr_before) | 251 | if (shrink_ret < nr_before) |
@@ -296,7 +298,7 @@ static int may_write_to_queue(struct backing_dev_info *bdi) | |||
296 | static void handle_write_error(struct address_space *mapping, | 298 | static void handle_write_error(struct address_space *mapping, |
297 | struct page *page, int error) | 299 | struct page *page, int error) |
298 | { | 300 | { |
299 | lock_page(page); | 301 | lock_page_nosync(page); |
300 | if (page_mapping(page) == mapping) | 302 | if (page_mapping(page) == mapping) |
301 | mapping_set_error(mapping, error); | 303 | mapping_set_error(mapping, error); |
302 | unlock_page(page); | 304 | unlock_page(page); |