diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-24 19:10:23 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-24 19:10:23 -0400 |
commit | 9c9fa97a8edbc3668dfc7a25de516e80c146e86f (patch) | |
tree | 2dc0e90203796a4b346ce190f9521c3294104058 /mm/sparse.c | |
parent | 5184d449600f501a8688069f35c138c6b3bf8b94 (diff) | |
parent | 2b38d01b4de8b1bbda7f5f7e91252609557635fc (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton:
- a few hot fixes
- ocfs2 updates
- almost all of -mm (slab-generic, slab, slub, kmemleak, kasan,
cleanups, debug, pagecache, memcg, gup, pagemap, memory-hotplug,
sparsemem, vmalloc, initialization, z3fold, compaction, mempolicy,
oom-kill, hugetlb, migration, thp, mmap, madvise, shmem, zswap,
zsmalloc)
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (132 commits)
mm/zsmalloc.c: fix a -Wunused-function warning
zswap: do not map same object twice
zswap: use movable memory if zpool support allocate movable memory
zpool: add malloc_support_movable to zpool_driver
shmem: fix obsolete comment in shmem_getpage_gfp()
mm/madvise: reduce code duplication in error handling paths
mm: mmap: increase sockets maximum memory size pgoff for 32bits
mm/mmap.c: refine find_vma_prev() with rb_last()
riscv: make mmap allocation top-down by default
mips: use generic mmap top-down layout and brk randomization
mips: replace arch specific way to determine 32bit task with generic version
mips: adjust brk randomization offset to fit generic version
mips: use STACK_TOP when computing mmap base address
mips: properly account for stack randomization and stack guard gap
arm: use generic mmap top-down layout and brk randomization
arm: use STACK_TOP when computing mmap base address
arm: properly account for stack randomization and stack guard gap
arm64, mm: make randomization selected by generic topdown mmap layout
arm64, mm: move generic mmap layout functions to mm
arm64: consider stack randomization for mmap base only when necessary
...
Diffstat (limited to 'mm/sparse.c')
-rw-r--r-- | mm/sparse.c | 25 |
1 files changed, 17 insertions, 8 deletions
diff --git a/mm/sparse.c b/mm/sparse.c index 72f010d9bff5..bf32de9e666b 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/export.h> | 11 | #include <linux/export.h> |
12 | #include <linux/spinlock.h> | 12 | #include <linux/spinlock.h> |
13 | #include <linux/vmalloc.h> | 13 | #include <linux/vmalloc.h> |
14 | #include <linux/swap.h> | ||
15 | #include <linux/swapops.h> | ||
14 | 16 | ||
15 | #include "internal.h" | 17 | #include "internal.h" |
16 | #include <asm/dma.h> | 18 | #include <asm/dma.h> |
@@ -470,6 +472,12 @@ struct page __init *__populate_section_memmap(unsigned long pfn, | |||
470 | static void *sparsemap_buf __meminitdata; | 472 | static void *sparsemap_buf __meminitdata; |
471 | static void *sparsemap_buf_end __meminitdata; | 473 | static void *sparsemap_buf_end __meminitdata; |
472 | 474 | ||
475 | static inline void __meminit sparse_buffer_free(unsigned long size) | ||
476 | { | ||
477 | WARN_ON(!sparsemap_buf || size == 0); | ||
478 | memblock_free_early(__pa(sparsemap_buf), size); | ||
479 | } | ||
480 | |||
473 | static void __init sparse_buffer_init(unsigned long size, int nid) | 481 | static void __init sparse_buffer_init(unsigned long size, int nid) |
474 | { | 482 | { |
475 | phys_addr_t addr = __pa(MAX_DMA_ADDRESS); | 483 | phys_addr_t addr = __pa(MAX_DMA_ADDRESS); |
@@ -486,7 +494,7 @@ static void __init sparse_buffer_fini(void) | |||
486 | unsigned long size = sparsemap_buf_end - sparsemap_buf; | 494 | unsigned long size = sparsemap_buf_end - sparsemap_buf; |
487 | 495 | ||
488 | if (sparsemap_buf && size > 0) | 496 | if (sparsemap_buf && size > 0) |
489 | memblock_free_early(__pa(sparsemap_buf), size); | 497 | sparse_buffer_free(size); |
490 | sparsemap_buf = NULL; | 498 | sparsemap_buf = NULL; |
491 | } | 499 | } |
492 | 500 | ||
@@ -495,11 +503,15 @@ void * __meminit sparse_buffer_alloc(unsigned long size) | |||
495 | void *ptr = NULL; | 503 | void *ptr = NULL; |
496 | 504 | ||
497 | if (sparsemap_buf) { | 505 | if (sparsemap_buf) { |
498 | ptr = PTR_ALIGN(sparsemap_buf, size); | 506 | ptr = (void *) roundup((unsigned long)sparsemap_buf, size); |
499 | if (ptr + size > sparsemap_buf_end) | 507 | if (ptr + size > sparsemap_buf_end) |
500 | ptr = NULL; | 508 | ptr = NULL; |
501 | else | 509 | else { |
510 | /* Free redundant aligned space */ | ||
511 | if ((unsigned long)(ptr - sparsemap_buf) > 0) | ||
512 | sparse_buffer_free((unsigned long)(ptr - sparsemap_buf)); | ||
502 | sparsemap_buf = ptr + size; | 513 | sparsemap_buf = ptr + size; |
514 | } | ||
503 | } | 515 | } |
504 | return ptr; | 516 | return ptr; |
505 | } | 517 | } |
@@ -867,7 +879,7 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn, | |||
867 | */ | 879 | */ |
868 | page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages); | 880 | page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages); |
869 | 881 | ||
870 | ms = __pfn_to_section(start_pfn); | 882 | ms = __nr_to_section(section_nr); |
871 | set_section_nid(section_nr, nid); | 883 | set_section_nid(section_nr, nid); |
872 | section_mark_present(ms); | 884 | section_mark_present(ms); |
873 | 885 | ||
@@ -884,9 +896,6 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) | |||
884 | { | 896 | { |
885 | int i; | 897 | int i; |
886 | 898 | ||
887 | if (!memmap) | ||
888 | return; | ||
889 | |||
890 | /* | 899 | /* |
891 | * A further optimization is to have per section refcounted | 900 | * A further optimization is to have per section refcounted |
892 | * num_poisoned_pages. But that would need more space per memmap, so | 901 | * num_poisoned_pages. But that would need more space per memmap, so |
@@ -898,7 +907,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) | |||
898 | 907 | ||
899 | for (i = 0; i < nr_pages; i++) { | 908 | for (i = 0; i < nr_pages; i++) { |
900 | if (PageHWPoison(&memmap[i])) { | 909 | if (PageHWPoison(&memmap[i])) { |
901 | atomic_long_sub(1, &num_poisoned_pages); | 910 | num_poisoned_pages_dec(); |
902 | ClearPageHWPoison(&memmap[i]); | 911 | ClearPageHWPoison(&memmap[i]); |
903 | } | 912 | } |
904 | } | 913 | } |