diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-15 22:42:40 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-15 22:42:40 -0500 |
commit | 7c225c69f86c934e3be9be63ecde754e286838d7 (patch) | |
tree | ff2df419b0c4886b37407235f7d21215e4cf45e4 /mm/sparse-vmemmap.c | |
parent | 6363b3f3ac5be096d08c8c504128befa0c033529 (diff) | |
parent | 1b7176aea0a924ac59c6a283129d3e8eb00aa915 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton:
- a few misc bits
- ocfs2 updates
- almost all of MM
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (131 commits)
memory hotplug: fix comments when adding section
mm: make alloc_node_mem_map a void call if we don't have CONFIG_FLAT_NODE_MEM_MAP
mm: simplify nodemask printing
mm,oom_reaper: remove pointless kthread_run() error check
mm/page_ext.c: check if page_ext is not prepared
writeback: remove unused function parameter
mm: do not rely on preempt_count in print_vma_addr
mm, sparse: do not swamp log with huge vmemmap allocation failures
mm/hmm: remove redundant variable align_end
mm/list_lru.c: mark expected switch fall-through
mm/shmem.c: mark expected switch fall-through
mm/page_alloc.c: broken deferred calculation
mm: don't warn about allocations which stall for too long
fs: fuse: account fuse_inode slab memory as reclaimable
mm, page_alloc: fix potential false positive in __zone_watermark_ok
mm: mlock: remove lru_add_drain_all()
mm, sysctl: make NUMA stats configurable
shmem: convert shmem_init_inodecache() to void
Unify migrate_pages and move_pages access checks
mm, pagevec: rename pagevec drained field
...
Diffstat (limited to 'mm/sparse-vmemmap.c')
-rw-r--r-- | mm/sparse-vmemmap.c | 34 |
1 files changed, 26 insertions, 8 deletions
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 478ce6d4a2c4..17acf01791fa 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c | |||
@@ -42,7 +42,7 @@ static void * __ref __earlyonly_bootmem_alloc(int node, | |||
42 | unsigned long align, | 42 | unsigned long align, |
43 | unsigned long goal) | 43 | unsigned long goal) |
44 | { | 44 | { |
45 | return memblock_virt_alloc_try_nid(size, align, goal, | 45 | return memblock_virt_alloc_try_nid_raw(size, align, goal, |
46 | BOOTMEM_ALLOC_ACCESSIBLE, node); | 46 | BOOTMEM_ALLOC_ACCESSIBLE, node); |
47 | } | 47 | } |
48 | 48 | ||
@@ -53,13 +53,20 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node) | |||
53 | { | 53 | { |
54 | /* If the main allocator is up use that, fallback to bootmem. */ | 54 | /* If the main allocator is up use that, fallback to bootmem. */ |
55 | if (slab_is_available()) { | 55 | if (slab_is_available()) { |
56 | gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; | ||
57 | int order = get_order(size); | ||
58 | static bool warned; | ||
56 | struct page *page; | 59 | struct page *page; |
57 | 60 | ||
58 | page = alloc_pages_node(node, | 61 | page = alloc_pages_node(node, gfp_mask, order); |
59 | GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL, | ||
60 | get_order(size)); | ||
61 | if (page) | 62 | if (page) |
62 | return page_address(page); | 63 | return page_address(page); |
64 | |||
65 | if (!warned) { | ||
66 | warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL, | ||
67 | "vmemmap alloc failure: order:%u", order); | ||
68 | warned = true; | ||
69 | } | ||
63 | return NULL; | 70 | return NULL; |
64 | } else | 71 | } else |
65 | return __earlyonly_bootmem_alloc(node, size, size, | 72 | return __earlyonly_bootmem_alloc(node, size, size, |
@@ -180,11 +187,22 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) | |||
180 | return pte; | 187 | return pte; |
181 | } | 188 | } |
182 | 189 | ||
190 | static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node) | ||
191 | { | ||
192 | void *p = vmemmap_alloc_block(size, node); | ||
193 | |||
194 | if (!p) | ||
195 | return NULL; | ||
196 | memset(p, 0, size); | ||
197 | |||
198 | return p; | ||
199 | } | ||
200 | |||
183 | pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) | 201 | pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) |
184 | { | 202 | { |
185 | pmd_t *pmd = pmd_offset(pud, addr); | 203 | pmd_t *pmd = pmd_offset(pud, addr); |
186 | if (pmd_none(*pmd)) { | 204 | if (pmd_none(*pmd)) { |
187 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | 205 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
188 | if (!p) | 206 | if (!p) |
189 | return NULL; | 207 | return NULL; |
190 | pmd_populate_kernel(&init_mm, pmd, p); | 208 | pmd_populate_kernel(&init_mm, pmd, p); |
@@ -196,7 +214,7 @@ pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node) | |||
196 | { | 214 | { |
197 | pud_t *pud = pud_offset(p4d, addr); | 215 | pud_t *pud = pud_offset(p4d, addr); |
198 | if (pud_none(*pud)) { | 216 | if (pud_none(*pud)) { |
199 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | 217 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
200 | if (!p) | 218 | if (!p) |
201 | return NULL; | 219 | return NULL; |
202 | pud_populate(&init_mm, pud, p); | 220 | pud_populate(&init_mm, pud, p); |
@@ -208,7 +226,7 @@ p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) | |||
208 | { | 226 | { |
209 | p4d_t *p4d = p4d_offset(pgd, addr); | 227 | p4d_t *p4d = p4d_offset(pgd, addr); |
210 | if (p4d_none(*p4d)) { | 228 | if (p4d_none(*p4d)) { |
211 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | 229 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
212 | if (!p) | 230 | if (!p) |
213 | return NULL; | 231 | return NULL; |
214 | p4d_populate(&init_mm, p4d, p); | 232 | p4d_populate(&init_mm, p4d, p); |
@@ -220,7 +238,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) | |||
220 | { | 238 | { |
221 | pgd_t *pgd = pgd_offset_k(addr); | 239 | pgd_t *pgd = pgd_offset_k(addr); |
222 | if (pgd_none(*pgd)) { | 240 | if (pgd_none(*pgd)) { |
223 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | 241 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
224 | if (!p) | 242 | if (!p) |
225 | return NULL; | 243 | return NULL; |
226 | pgd_populate(&init_mm, pgd, p); | 244 | pgd_populate(&init_mm, pgd, p); |