summaryrefslogtreecommitdiffstats
path: root/mm/sparse.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c21
1 files changed, 17 insertions, 4 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index 77a0554fa5bd..7397fb4e78b4 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -65,11 +65,15 @@ static noinline struct mem_section __ref *sparse_index_alloc(int nid)
65 unsigned long array_size = SECTIONS_PER_ROOT * 65 unsigned long array_size = SECTIONS_PER_ROOT *
66 sizeof(struct mem_section); 66 sizeof(struct mem_section);
67 67
68 if (slab_is_available()) 68 if (slab_is_available()) {
69 section = kzalloc_node(array_size, GFP_KERNEL, nid); 69 section = kzalloc_node(array_size, GFP_KERNEL, nid);
70 else 70 } else {
71 section = memblock_alloc_node(array_size, SMP_CACHE_BYTES, 71 section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
72 nid); 72 nid);
73 if (!section)
74 panic("%s: Failed to allocate %lu bytes nid=%d\n",
75 __func__, array_size, nid);
76 }
73 77
74 return section; 78 return section;
75} 79}
@@ -218,6 +222,9 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
218 size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; 222 size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
219 align = 1 << (INTERNODE_CACHE_SHIFT); 223 align = 1 << (INTERNODE_CACHE_SHIFT);
220 mem_section = memblock_alloc(size, align); 224 mem_section = memblock_alloc(size, align);
225 if (!mem_section)
226 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
227 __func__, size, align);
221 } 228 }
222#endif 229#endif
223 230
@@ -404,13 +411,18 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
404{ 411{
405 unsigned long size = section_map_size(); 412 unsigned long size = section_map_size();
406 struct page *map = sparse_buffer_alloc(size); 413 struct page *map = sparse_buffer_alloc(size);
414 phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
407 415
408 if (map) 416 if (map)
409 return map; 417 return map;
410 418
411 map = memblock_alloc_try_nid(size, 419 map = memblock_alloc_try_nid(size,
412 PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 420 PAGE_SIZE, addr,
413 MEMBLOCK_ALLOC_ACCESSIBLE, nid); 421 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
422 if (!map)
423 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
424 __func__, size, PAGE_SIZE, nid, &addr);
425
414 return map; 426 return map;
415} 427}
416#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 428#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
@@ -420,10 +432,11 @@ static void *sparsemap_buf_end __meminitdata;
420 432
421static void __init sparse_buffer_init(unsigned long size, int nid) 433static void __init sparse_buffer_init(unsigned long size, int nid)
422{ 434{
435 phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
423 WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ 436 WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
424 sparsemap_buf = 437 sparsemap_buf =
425 memblock_alloc_try_nid_raw(size, PAGE_SIZE, 438 memblock_alloc_try_nid_raw(size, PAGE_SIZE,
426 __pa(MAX_DMA_ADDRESS), 439 addr,
427 MEMBLOCK_ALLOC_ACCESSIBLE, nid); 440 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
428 sparsemap_buf_end = sparsemap_buf + size; 441 sparsemap_buf_end = sparsemap_buf + size;
429} 442}