summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/kernel/core_cia.c3
-rw-r--r--arch/alpha/kernel/core_marvel.c6
-rw-r--r--arch/alpha/kernel/pci-noop.c13
-rw-r--r--arch/alpha/kernel/pci.c11
-rw-r--r--arch/alpha/kernel/pci_iommu.c12
-rw-r--r--arch/arc/mm/highmem.c4
-rw-r--r--arch/arm/kernel/setup.c6
-rw-r--r--arch/arm/mm/mmu.c14
-rw-r--r--arch/arm64/kernel/setup.c8
-rw-r--r--arch/arm64/mm/kasan_init.c10
-rw-r--r--arch/c6x/mm/dma-coherent.c4
-rw-r--r--arch/c6x/mm/init.c3
-rw-r--r--arch/csky/mm/highmem.c5
-rw-r--r--arch/h8300/mm/init.c3
-rw-r--r--arch/m68k/atari/stram.c4
-rw-r--r--arch/m68k/mm/init.c3
-rw-r--r--arch/m68k/mm/mcfmmu.c6
-rw-r--r--arch/m68k/mm/motorola.c9
-rw-r--r--arch/m68k/mm/sun3mmu.c6
-rw-r--r--arch/m68k/sun3/sun3dvma.c3
-rw-r--r--arch/microblaze/mm/init.c8
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c3
-rw-r--r--arch/mips/kernel/setup.c3
-rw-r--r--arch/mips/kernel/traps.c3
-rw-r--r--arch/mips/mm/init.c5
-rw-r--r--arch/nds32/mm/init.c12
-rw-r--r--arch/openrisc/mm/ioremap.c8
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c5
-rw-r--r--arch/powerpc/kernel/pci_32.c3
-rw-r--r--arch/powerpc/kernel/setup-common.c3
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/lib/alloc.c3
-rw-r--r--arch/powerpc/mm/hash_utils_64.c3
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c9
-rw-r--r--arch/powerpc/mm/pgtable-book3e.c12
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c3
-rw-r--r--arch/powerpc/mm/pgtable-radix.c9
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c3
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c3
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal.c3
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c8
-rw-r--r--arch/powerpc/platforms/ps3/setup.c3
-rw-r--r--arch/powerpc/sysdev/msi_bitmap.c3
-rw-r--r--arch/s390/kernel/setup.c13
-rw-r--r--arch/s390/kernel/smp.c5
-rw-r--r--arch/s390/kernel/topology.c6
-rw-r--r--arch/s390/numa/mode_emu.c3
-rw-r--r--arch/s390/numa/numa.c6
-rw-r--r--arch/sh/mm/init.c6
-rw-r--r--arch/sh/mm/numa.c4
-rw-r--r--arch/um/drivers/net_kern.c3
-rw-r--r--arch/um/drivers/vector_kern.c3
-rw-r--r--arch/um/kernel/initrd.c2
-rw-r--r--arch/um/kernel/mem.c16
-rw-r--r--arch/unicore32/kernel/setup.c4
-rw-r--r--arch/unicore32/mm/mmu.c15
-rw-r--r--arch/x86/kernel/acpi/boot.c3
-rw-r--r--arch/x86/kernel/apic/io_apic.c5
-rw-r--r--arch/x86/kernel/e820.c3
-rw-r--r--arch/x86/platform/olpc/olpc_dt.c3
-rw-r--r--arch/x86/xen/p2m.c11
-rw-r--r--arch/xtensa/mm/kasan_init.c4
-rw-r--r--arch/xtensa/mm/mmu.c3
-rw-r--r--drivers/clk/ti/clk.c3
-rw-r--r--drivers/macintosh/smu.c3
-rw-r--r--drivers/of/fdt.c8
-rw-r--r--drivers/of/unittest.c8
-rw-r--r--drivers/xen/swiotlb-xen.c7
-rw-r--r--kernel/dma/swiotlb.c4
-rw-r--r--kernel/power/snapshot.c3
-rw-r--r--lib/cpumask.c3
-rw-r--r--mm/kasan/init.c10
-rw-r--r--mm/sparse.c21
74 files changed, 411 insertions, 32 deletions
diff --git a/arch/alpha/kernel/core_cia.c b/arch/alpha/kernel/core_cia.c
index 466cd44d8b36..f489170201c3 100644
--- a/arch/alpha/kernel/core_cia.c
+++ b/arch/alpha/kernel/core_cia.c
@@ -332,6 +332,9 @@ cia_prepare_tbia_workaround(int window)
332 332
333 /* Use minimal 1K map. */ 333 /* Use minimal 1K map. */
334 ppte = memblock_alloc(CIA_BROKEN_TBIA_SIZE, 32768); 334 ppte = memblock_alloc(CIA_BROKEN_TBIA_SIZE, 32768);
335 if (!ppte)
336 panic("%s: Failed to allocate %u bytes align=0x%x\n",
337 __func__, CIA_BROKEN_TBIA_SIZE, 32768);
335 pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1; 338 pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
336 339
337 for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i) 340 for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index c1d0c18c71ca..1db9d0eb2922 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -83,6 +83,9 @@ mk_resource_name(int pe, int port, char *str)
83 83
84 sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port); 84 sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port);
85 name = memblock_alloc(strlen(tmp) + 1, SMP_CACHE_BYTES); 85 name = memblock_alloc(strlen(tmp) + 1, SMP_CACHE_BYTES);
86 if (!name)
87 panic("%s: Failed to allocate %zu bytes\n", __func__,
88 strlen(tmp) + 1);
86 strcpy(name, tmp); 89 strcpy(name, tmp);
87 90
88 return name; 91 return name;
@@ -118,6 +121,9 @@ alloc_io7(unsigned int pe)
118 } 121 }
119 122
120 io7 = memblock_alloc(sizeof(*io7), SMP_CACHE_BYTES); 123 io7 = memblock_alloc(sizeof(*io7), SMP_CACHE_BYTES);
124 if (!io7)
125 panic("%s: Failed to allocate %zu bytes\n", __func__,
126 sizeof(*io7));
121 io7->pe = pe; 127 io7->pe = pe;
122 raw_spin_lock_init(&io7->irq_lock); 128 raw_spin_lock_init(&io7->irq_lock);
123 129
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c
index 091cff3c68fd..ae82061edae9 100644
--- a/arch/alpha/kernel/pci-noop.c
+++ b/arch/alpha/kernel/pci-noop.c
@@ -34,6 +34,9 @@ alloc_pci_controller(void)
34 struct pci_controller *hose; 34 struct pci_controller *hose;
35 35
36 hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); 36 hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES);
37 if (!hose)
38 panic("%s: Failed to allocate %zu bytes\n", __func__,
39 sizeof(*hose));
37 40
38 *hose_tail = hose; 41 *hose_tail = hose;
39 hose_tail = &hose->next; 42 hose_tail = &hose->next;
@@ -44,7 +47,13 @@ alloc_pci_controller(void)
44struct resource * __init 47struct resource * __init
45alloc_resource(void) 48alloc_resource(void)
46{ 49{
47 return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); 50 void *ptr = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
51
52 if (!ptr)
53 panic("%s: Failed to allocate %zu bytes\n", __func__,
54 sizeof(struct resource));
55
56 return ptr;
48} 57}
49 58
50SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus, 59SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus,
@@ -54,7 +63,7 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus,
54 63
55 /* from hose or from bus.devfn */ 64 /* from hose or from bus.devfn */
56 if (which & IOBASE_FROM_HOSE) { 65 if (which & IOBASE_FROM_HOSE) {
57 for (hose = hose_head; hose; hose = hose->next) 66 for (hose = hose_head; hose; hose = hose->next)
58 if (hose->index == bus) 67 if (hose->index == bus)
59 break; 68 break;
60 if (!hose) 69 if (!hose)
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index 97098127df83..64fbfb0763b2 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -393,6 +393,9 @@ alloc_pci_controller(void)
393 struct pci_controller *hose; 393 struct pci_controller *hose;
394 394
395 hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); 395 hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES);
396 if (!hose)
397 panic("%s: Failed to allocate %zu bytes\n", __func__,
398 sizeof(*hose));
396 399
397 *hose_tail = hose; 400 *hose_tail = hose;
398 hose_tail = &hose->next; 401 hose_tail = &hose->next;
@@ -403,7 +406,13 @@ alloc_pci_controller(void)
403struct resource * __init 406struct resource * __init
404alloc_resource(void) 407alloc_resource(void)
405{ 408{
406 return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); 409 void *ptr = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
410
411 if (!ptr)
412 panic("%s: Failed to allocate %zu bytes\n", __func__,
413 sizeof(struct resource));
414
415 return ptr;
407} 416}
408 417
409 418
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index e4cf77b07742..3034d6d936d2 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -80,6 +80,9 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
80 " falling back to system-wide allocation\n", 80 " falling back to system-wide allocation\n",
81 __func__, nid); 81 __func__, nid);
82 arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES); 82 arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
83 if (!arena)
84 panic("%s: Failed to allocate %zu bytes\n", __func__,
85 sizeof(*arena));
83 } 86 }
84 87
85 arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid); 88 arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid);
@@ -88,12 +91,21 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
88 " falling back to system-wide allocation\n", 91 " falling back to system-wide allocation\n",
89 __func__, nid); 92 __func__, nid);
90 arena->ptes = memblock_alloc(mem_size, align); 93 arena->ptes = memblock_alloc(mem_size, align);
94 if (!arena->ptes)
95 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
96 __func__, mem_size, align);
91 } 97 }
92 98
93#else /* CONFIG_DISCONTIGMEM */ 99#else /* CONFIG_DISCONTIGMEM */
94 100
95 arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES); 101 arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
102 if (!arena)
103 panic("%s: Failed to allocate %zu bytes\n", __func__,
104 sizeof(*arena));
96 arena->ptes = memblock_alloc(mem_size, align); 105 arena->ptes = memblock_alloc(mem_size, align);
106 if (!arena->ptes)
107 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
108 __func__, mem_size, align);
97 109
98#endif /* CONFIG_DISCONTIGMEM */ 110#endif /* CONFIG_DISCONTIGMEM */
99 111
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index 48e700151810..11f57e2ced8a 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -124,6 +124,10 @@ static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
124 pmd_k = pmd_offset(pud_k, kvaddr); 124 pmd_k = pmd_offset(pud_k, kvaddr);
125 125
126 pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); 126 pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
127 if (!pte_k)
128 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
129 __func__, PAGE_SIZE, PAGE_SIZE);
130
127 pmd_populate_kernel(&init_mm, pmd_k, pte_k); 131 pmd_populate_kernel(&init_mm, pmd_k, pte_k);
128 return pte_k; 132 return pte_k;
129} 133}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 375b13f7e780..5d78b6ac0429 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -867,6 +867,9 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
867 boot_alias_start = phys_to_idmap(start); 867 boot_alias_start = phys_to_idmap(start);
868 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) { 868 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
869 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); 869 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
870 if (!res)
871 panic("%s: Failed to allocate %zu bytes\n",
872 __func__, sizeof(*res));
870 res->name = "System RAM (boot alias)"; 873 res->name = "System RAM (boot alias)";
871 res->start = boot_alias_start; 874 res->start = boot_alias_start;
872 res->end = phys_to_idmap(end); 875 res->end = phys_to_idmap(end);
@@ -875,6 +878,9 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
875 } 878 }
876 879
877 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); 880 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
881 if (!res)
882 panic("%s: Failed to allocate %zu bytes\n", __func__,
883 sizeof(*res));
878 res->name = "System RAM"; 884 res->name = "System RAM";
879 res->start = start; 885 res->start = start;
880 res->end = end; 886 res->end = end;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 57de0dde3ae0..f3ce34113f89 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -721,7 +721,13 @@ EXPORT_SYMBOL(phys_mem_access_prot);
721 721
722static void __init *early_alloc(unsigned long sz) 722static void __init *early_alloc(unsigned long sz)
723{ 723{
724 return memblock_alloc(sz, sz); 724 void *ptr = memblock_alloc(sz, sz);
725
726 if (!ptr)
727 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
728 __func__, sz, sz);
729
730 return ptr;
725} 731}
726 732
727static void *__init late_alloc(unsigned long sz) 733static void *__init late_alloc(unsigned long sz)
@@ -994,6 +1000,9 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
994 return; 1000 return;
995 1001
996 svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm)); 1002 svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm));
1003 if (!svm)
1004 panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
1005 __func__, sizeof(*svm) * nr, __alignof__(*svm));
997 1006
998 for (md = io_desc; nr; md++, nr--) { 1007 for (md = io_desc; nr; md++, nr--) {
999 create_mapping(md); 1008 create_mapping(md);
@@ -1016,6 +1025,9 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
1016 struct static_vm *svm; 1025 struct static_vm *svm;
1017 1026
1018 svm = memblock_alloc(sizeof(*svm), __alignof__(*svm)); 1027 svm = memblock_alloc(sizeof(*svm), __alignof__(*svm));
1028 if (!svm)
1029 panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
1030 __func__, sizeof(*svm), __alignof__(*svm));
1019 1031
1020 vm = &svm->vm; 1032 vm = &svm->vm;
1021 vm->addr = (void *)addr; 1033 vm->addr = (void *)addr;
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 834b321a88f8..f8482fe5a190 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -208,6 +208,7 @@ static void __init request_standard_resources(void)
208 struct memblock_region *region; 208 struct memblock_region *region;
209 struct resource *res; 209 struct resource *res;
210 unsigned long i = 0; 210 unsigned long i = 0;
211 size_t res_size;
211 212
212 kernel_code.start = __pa_symbol(_text); 213 kernel_code.start = __pa_symbol(_text);
213 kernel_code.end = __pa_symbol(__init_begin - 1); 214 kernel_code.end = __pa_symbol(__init_begin - 1);
@@ -215,9 +216,10 @@ static void __init request_standard_resources(void)
215 kernel_data.end = __pa_symbol(_end - 1); 216 kernel_data.end = __pa_symbol(_end - 1);
216 217
217 num_standard_resources = memblock.memory.cnt; 218 num_standard_resources = memblock.memory.cnt;
218 standard_resources = memblock_alloc_low(num_standard_resources * 219 res_size = num_standard_resources * sizeof(*standard_resources);
219 sizeof(*standard_resources), 220 standard_resources = memblock_alloc_low(res_size, SMP_CACHE_BYTES);
220 SMP_CACHE_BYTES); 221 if (!standard_resources)
222 panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
221 223
222 for_each_memblock(memory, region) { 224 for_each_memblock(memory, region) {
223 res = &standard_resources[i++]; 225 res = &standard_resources[i++];
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index f37a86d2a69d..296de39ddee5 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -40,6 +40,11 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
40 void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, 40 void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
41 __pa(MAX_DMA_ADDRESS), 41 __pa(MAX_DMA_ADDRESS),
42 MEMBLOCK_ALLOC_KASAN, node); 42 MEMBLOCK_ALLOC_KASAN, node);
43 if (!p)
44 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
45 __func__, PAGE_SIZE, PAGE_SIZE, node,
46 __pa(MAX_DMA_ADDRESS));
47
43 return __pa(p); 48 return __pa(p);
44} 49}
45 50
@@ -48,6 +53,11 @@ static phys_addr_t __init kasan_alloc_raw_page(int node)
48 void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE, 53 void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
49 __pa(MAX_DMA_ADDRESS), 54 __pa(MAX_DMA_ADDRESS),
50 MEMBLOCK_ALLOC_KASAN, node); 55 MEMBLOCK_ALLOC_KASAN, node);
56 if (!p)
57 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
58 __func__, PAGE_SIZE, PAGE_SIZE, node,
59 __pa(MAX_DMA_ADDRESS));
60
51 return __pa(p); 61 return __pa(p);
52} 62}
53 63
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c
index 0be289839ce0..0d3701bc88f6 100644
--- a/arch/c6x/mm/dma-coherent.c
+++ b/arch/c6x/mm/dma-coherent.c
@@ -138,6 +138,10 @@ void __init coherent_mem_init(phys_addr_t start, u32 size)
138 138
139 dma_bitmap = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long), 139 dma_bitmap = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
140 sizeof(long)); 140 sizeof(long));
141 if (!dma_bitmap)
142 panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
143 __func__, BITS_TO_LONGS(dma_pages) * sizeof(long),
144 sizeof(long));
141} 145}
142 146
143static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size, 147static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
index e83c04654238..fe582c3a1794 100644
--- a/arch/c6x/mm/init.c
+++ b/arch/c6x/mm/init.c
@@ -40,6 +40,9 @@ void __init paging_init(void)
40 40
41 empty_zero_page = (unsigned long) memblock_alloc(PAGE_SIZE, 41 empty_zero_page = (unsigned long) memblock_alloc(PAGE_SIZE,
42 PAGE_SIZE); 42 PAGE_SIZE);
43 if (!empty_zero_page)
44 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
45 __func__, PAGE_SIZE, PAGE_SIZE);
43 46
44 /* 47 /*
45 * Set up user data space 48 * Set up user data space
diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c
index 53b1bfa4c462..3317b774f6dc 100644
--- a/arch/csky/mm/highmem.c
+++ b/arch/csky/mm/highmem.c
@@ -141,6 +141,11 @@ static void __init fixrange_init(unsigned long start, unsigned long end,
141 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 141 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
142 if (pmd_none(*pmd)) { 142 if (pmd_none(*pmd)) {
143 pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); 143 pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
144 if (!pte)
145 panic("%s: Failed to allocate %lu bytes align=%lx\n",
146 __func__, PAGE_SIZE,
147 PAGE_SIZE);
148
144 set_pmd(pmd, __pmd(__pa(pte))); 149 set_pmd(pmd, __pmd(__pa(pte)));
145 BUG_ON(pte != pte_offset_kernel(pmd, 0)); 150 BUG_ON(pte != pte_offset_kernel(pmd, 0));
146 } 151 }
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index a1578904ad4e..0f04a5e9aa4f 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -68,6 +68,9 @@ void __init paging_init(void)
68 * to a couple of allocated pages. 68 * to a couple of allocated pages.
69 */ 69 */
70 empty_zero_page = (unsigned long)memblock_alloc(PAGE_SIZE, PAGE_SIZE); 70 empty_zero_page = (unsigned long)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
71 if (!empty_zero_page)
72 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
73 __func__, PAGE_SIZE, PAGE_SIZE);
71 74
72 /* 75 /*
73 * Set up SFC/DFC registers (user data space). 76 * Set up SFC/DFC registers (user data space).
diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c
index 6ffc204eb07d..6152f9f631d2 100644
--- a/arch/m68k/atari/stram.c
+++ b/arch/m68k/atari/stram.c
@@ -97,6 +97,10 @@ void __init atari_stram_reserve_pages(void *start_mem)
97 pr_debug("atari_stram pool: kernel in ST-RAM, using alloc_bootmem!\n"); 97 pr_debug("atari_stram pool: kernel in ST-RAM, using alloc_bootmem!\n");
98 stram_pool.start = (resource_size_t)memblock_alloc_low(pool_size, 98 stram_pool.start = (resource_size_t)memblock_alloc_low(pool_size,
99 PAGE_SIZE); 99 PAGE_SIZE);
100 if (!stram_pool.start)
101 panic("%s: Failed to allocate %lu bytes align=%lx\n",
102 __func__, pool_size, PAGE_SIZE);
103
100 stram_pool.end = stram_pool.start + pool_size - 1; 104 stram_pool.end = stram_pool.start + pool_size - 1;
101 request_resource(&iomem_resource, &stram_pool); 105 request_resource(&iomem_resource, &stram_pool);
102 stram_virt_offset = 0; 106 stram_virt_offset = 0;
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 933c33e76a48..8868a4c9adae 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -94,6 +94,9 @@ void __init paging_init(void)
94 high_memory = (void *) end_mem; 94 high_memory = (void *) end_mem;
95 95
96 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 96 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
97 if (!empty_zero_page)
98 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
99 __func__, PAGE_SIZE, PAGE_SIZE);
97 100
98 /* 101 /*
99 * Set up SFC/DFC registers (user data space). 102 * Set up SFC/DFC registers (user data space).
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 492f953db31b..6cb1e41d58d0 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -44,6 +44,9 @@ void __init paging_init(void)
44 int i; 44 int i;
45 45
46 empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE); 46 empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
47 if (!empty_zero_page)
48 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
49 __func__, PAGE_SIZE, PAGE_SIZE);
47 50
48 pg_dir = swapper_pg_dir; 51 pg_dir = swapper_pg_dir;
49 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); 52 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
@@ -51,6 +54,9 @@ void __init paging_init(void)
51 size = num_pages * sizeof(pte_t); 54 size = num_pages * sizeof(pte_t);
52 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); 55 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
53 next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE); 56 next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
57 if (!next_pgtable)
58 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
59 __func__, size, PAGE_SIZE);
54 60
55 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; 61 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
56 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT; 62 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 3f3d0bf36091..356601bf96d9 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -55,6 +55,9 @@ static pte_t * __init kernel_page_table(void)
55 pte_t *ptablep; 55 pte_t *ptablep;
56 56
57 ptablep = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); 57 ptablep = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
58 if (!ptablep)
59 panic("%s: Failed to allocate %lu bytes align=%lx\n",
60 __func__, PAGE_SIZE, PAGE_SIZE);
58 61
59 clear_page(ptablep); 62 clear_page(ptablep);
60 __flush_page_to_ram(ptablep); 63 __flush_page_to_ram(ptablep);
@@ -96,6 +99,9 @@ static pmd_t * __init kernel_ptr_table(void)
96 if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) { 99 if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
97 last_pgtable = (pmd_t *)memblock_alloc_low(PAGE_SIZE, 100 last_pgtable = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
98 PAGE_SIZE); 101 PAGE_SIZE);
102 if (!last_pgtable)
103 panic("%s: Failed to allocate %lu bytes align=%lx\n",
104 __func__, PAGE_SIZE, PAGE_SIZE);
99 105
100 clear_page(last_pgtable); 106 clear_page(last_pgtable);
101 __flush_page_to_ram(last_pgtable); 107 __flush_page_to_ram(last_pgtable);
@@ -278,6 +284,9 @@ void __init paging_init(void)
278 * to a couple of allocated pages 284 * to a couple of allocated pages
279 */ 285 */
280 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 286 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
287 if (!empty_zero_page)
288 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
289 __func__, PAGE_SIZE, PAGE_SIZE);
281 290
282 /* 291 /*
283 * Set up SFC/DFC registers 292 * Set up SFC/DFC registers
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index f736db48a2e1..eca1c46bb90a 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -46,6 +46,9 @@ void __init paging_init(void)
46 unsigned long size; 46 unsigned long size;
47 47
48 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 48 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
49 if (!empty_zero_page)
50 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
51 __func__, PAGE_SIZE, PAGE_SIZE);
49 52
50 address = PAGE_OFFSET; 53 address = PAGE_OFFSET;
51 pg_dir = swapper_pg_dir; 54 pg_dir = swapper_pg_dir;
@@ -56,6 +59,9 @@ void __init paging_init(void)
56 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); 59 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
57 60
58 next_pgtable = (unsigned long)memblock_alloc(size, PAGE_SIZE); 61 next_pgtable = (unsigned long)memblock_alloc(size, PAGE_SIZE);
62 if (!next_pgtable)
63 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
64 __func__, size, PAGE_SIZE);
59 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; 65 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
60 66
61 /* Map whole memory from PAGE_OFFSET (0x0E000000) */ 67 /* Map whole memory from PAGE_OFFSET (0x0E000000) */
diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c
index 4d64711d3d47..399f3d06125f 100644
--- a/arch/m68k/sun3/sun3dvma.c
+++ b/arch/m68k/sun3/sun3dvma.c
@@ -269,6 +269,9 @@ void __init dvma_init(void)
269 269
270 iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long), 270 iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long),
271 SMP_CACHE_BYTES); 271 SMP_CACHE_BYTES);
272 if (!iommu_use)
273 panic("%s: Failed to allocate %zu bytes\n", __func__,
274 IOMMU_TOTAL_ENTRIES * sizeof(unsigned long));
272 275
273 dvma_unmap_iommu(DVMA_START, DVMA_SIZE); 276 dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
274 277
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index bd1cd4bff449..7e97d44f6538 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -374,10 +374,14 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
374{ 374{
375 void *p; 375 void *p;
376 376
377 if (mem_init_done) 377 if (mem_init_done) {
378 p = kzalloc(size, mask); 378 p = kzalloc(size, mask);
379 else 379 } else {
380 p = memblock_alloc(size, SMP_CACHE_BYTES); 380 p = memblock_alloc(size, SMP_CACHE_BYTES);
381 if (!p)
382 panic("%s: Failed to allocate %zu bytes\n",
383 __func__, size);
384 }
381 385
382 return p; 386 return p;
383} 387}
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index e8eb60ed99f2..11d5a4e90736 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -245,6 +245,9 @@ void __init plat_swiotlb_setup(void)
245 swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT; 245 swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
246 246
247 octeon_swiotlb = memblock_alloc_low(swiotlbsize, PAGE_SIZE); 247 octeon_swiotlb = memblock_alloc_low(swiotlbsize, PAGE_SIZE);
248 if (!octeon_swiotlb)
249 panic("%s: Failed to allocate %zu bytes align=%lx\n",
250 __func__, swiotlbsize, PAGE_SIZE);
248 251
249 if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM) 252 if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
250 panic("Cannot allocate SWIOTLB buffer"); 253 panic("Cannot allocate SWIOTLB buffer");
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 5151532ad959..8d1dc6c71173 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -919,6 +919,9 @@ static void __init resource_init(void)
919 end = HIGHMEM_START - 1; 919 end = HIGHMEM_START - 1;
920 920
921 res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); 921 res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
922 if (!res)
923 panic("%s: Failed to allocate %zu bytes\n", __func__,
924 sizeof(struct resource));
922 925
923 res->start = start; 926 res->start = start;
924 res->end = end; 927 res->end = end;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index fc511ecefec6..98ca55d62201 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2294,6 +2294,9 @@ void __init trap_init(void)
2294 2294
2295 ebase = (unsigned long) 2295 ebase = (unsigned long)
2296 memblock_alloc(size, 1 << fls(size)); 2296 memblock_alloc(size, 1 << fls(size));
2297 if (!ebase)
2298 panic("%s: Failed to allocate %lu bytes align=0x%x\n",
2299 __func__, size, 1 << fls(size));
2297 2300
2298 /* 2301 /*
2299 * Try to ensure ebase resides in KSeg0 if possible. 2302 * Try to ensure ebase resides in KSeg0 if possible.
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index c3b45e248806..bbb196ad5f26 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -252,6 +252,11 @@ void __init fixrange_init(unsigned long start, unsigned long end,
252 if (pmd_none(*pmd)) { 252 if (pmd_none(*pmd)) {
253 pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, 253 pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
254 PAGE_SIZE); 254 PAGE_SIZE);
255 if (!pte)
256 panic("%s: Failed to allocate %lu bytes align=%lx\n",
257 __func__, PAGE_SIZE,
258 PAGE_SIZE);
259
255 set_pmd(pmd, __pmd((unsigned long)pte)); 260 set_pmd(pmd, __pmd((unsigned long)pte));
256 BUG_ON(pte != pte_offset_kernel(pmd, 0)); 261 BUG_ON(pte != pte_offset_kernel(pmd, 0));
257 } 262 }
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c
index d1e521cce317..1d03633f89a9 100644
--- a/arch/nds32/mm/init.c
+++ b/arch/nds32/mm/init.c
@@ -79,6 +79,9 @@ static void __init map_ram(void)
79 79
80 /* Alloc one page for holding PTE's... */ 80 /* Alloc one page for holding PTE's... */
81 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 81 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
82 if (!pte)
83 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
84 __func__, PAGE_SIZE, PAGE_SIZE);
82 set_pmd(pme, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE)); 85 set_pmd(pme, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
83 86
84 /* Fill the newly allocated page with PTE'S */ 87 /* Fill the newly allocated page with PTE'S */
@@ -111,6 +114,9 @@ static void __init fixedrange_init(void)
111 pud = pud_offset(pgd, vaddr); 114 pud = pud_offset(pgd, vaddr);
112 pmd = pmd_offset(pud, vaddr); 115 pmd = pmd_offset(pud, vaddr);
113 fixmap_pmd_p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 116 fixmap_pmd_p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
117 if (!fixmap_pmd_p)
118 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
119 __func__, PAGE_SIZE, PAGE_SIZE);
114 set_pmd(pmd, __pmd(__pa(fixmap_pmd_p) + _PAGE_KERNEL_TABLE)); 120 set_pmd(pmd, __pmd(__pa(fixmap_pmd_p) + _PAGE_KERNEL_TABLE));
115 121
116#ifdef CONFIG_HIGHMEM 122#ifdef CONFIG_HIGHMEM
@@ -123,6 +129,9 @@ static void __init fixedrange_init(void)
123 pud = pud_offset(pgd, vaddr); 129 pud = pud_offset(pgd, vaddr);
124 pmd = pmd_offset(pud, vaddr); 130 pmd = pmd_offset(pud, vaddr);
125 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 131 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
132 if (!pte)
133 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
134 __func__, PAGE_SIZE, PAGE_SIZE);
126 set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE)); 135 set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
127 pkmap_page_table = pte; 136 pkmap_page_table = pte;
128#endif /* CONFIG_HIGHMEM */ 137#endif /* CONFIG_HIGHMEM */
@@ -148,6 +157,9 @@ void __init paging_init(void)
148 157
149 /* allocate space for empty_zero_page */ 158 /* allocate space for empty_zero_page */
150 zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 159 zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
160 if (!zero_page)
161 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
162 __func__, PAGE_SIZE, PAGE_SIZE);
151 zone_sizes_init(); 163 zone_sizes_init();
152 164
153 empty_zero_page = virt_to_page(zero_page); 165 empty_zero_page = virt_to_page(zero_page);
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index 051bcb4fefd3..a8509950dbbc 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -122,10 +122,14 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
122{ 122{
123 pte_t *pte; 123 pte_t *pte;
124 124
125 if (likely(mem_init_done)) 125 if (likely(mem_init_done)) {
126 pte = (pte_t *)get_zeroed_page(GFP_KERNEL); 126 pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
127 else 127 } else {
128 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 128 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
129 if (!pte)
130 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
131 __func__, PAGE_SIZE, PAGE_SIZE);
132 }
129 133
130 return pte; 134 return pte;
131} 135}
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 28c076c771de..c66fd3ce6478 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -1005,6 +1005,11 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
1005 of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes, 1005 of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
1006 &nr_dt_cpu_features); 1006 &nr_dt_cpu_features);
1007 dt_cpu_features = memblock_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE); 1007 dt_cpu_features = memblock_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE);
1008 if (!dt_cpu_features)
1009 panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
1010 __func__,
1011 sizeof(struct dt_cpu_feature) * nr_dt_cpu_features,
1012 PAGE_SIZE);
1008 1013
1009 cpufeatures_setup_start(isa); 1014 cpufeatures_setup_start(isa);
1010 1015
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index d3f04f2d8249..0417fda13636 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -205,6 +205,9 @@ pci_create_OF_bus_map(void)
205 205
206 of_prop = memblock_alloc(sizeof(struct property) + 256, 206 of_prop = memblock_alloc(sizeof(struct property) + 256,
207 SMP_CACHE_BYTES); 207 SMP_CACHE_BYTES);
208 if (!of_prop)
209 panic("%s: Failed to allocate %zu bytes\n", __func__,
210 sizeof(struct property) + 256);
208 dn = of_find_node_by_path("/"); 211 dn = of_find_node_by_path("/");
209 if (dn) { 212 if (dn) {
210 memset(of_prop, -1, sizeof(struct property) + 256); 213 memset(of_prop, -1, sizeof(struct property) + 256);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index f17868e19e2c..2e5dfb6e0823 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -461,6 +461,9 @@ void __init smp_setup_cpu_maps(void)
461 461
462 cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32), 462 cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32),
463 __alignof__(u32)); 463 __alignof__(u32));
464 if (!cpu_to_phys_id)
465 panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
466 __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32));
464 467
465 for_each_node_by_type(dn, "cpu") { 468 for_each_node_by_type(dn, "cpu") {
466 const __be32 *intserv; 469 const __be32 *intserv;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index ff0aac42bb33..ba404dd9ce1d 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -905,6 +905,10 @@ static void __ref init_fallback_flush(void)
905 l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2, 905 l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
906 l1d_size, MEMBLOCK_LOW_LIMIT, 906 l1d_size, MEMBLOCK_LOW_LIMIT,
907 limit, NUMA_NO_NODE); 907 limit, NUMA_NO_NODE);
908 if (!l1d_flush_fallback_area)
909 panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
910 __func__, l1d_size * 2, l1d_size, &limit);
911
908 912
909 for_each_possible_cpu(cpu) { 913 for_each_possible_cpu(cpu) {
910 struct paca_struct *paca = paca_ptrs[cpu]; 914 struct paca_struct *paca = paca_ptrs[cpu];
diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c
index dedf88a76f58..ce180870bd52 100644
--- a/arch/powerpc/lib/alloc.c
+++ b/arch/powerpc/lib/alloc.c
@@ -15,6 +15,9 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
15 p = kzalloc(size, mask); 15 p = kzalloc(size, mask);
16 else { 16 else {
17 p = memblock_alloc(size, SMP_CACHE_BYTES); 17 p = memblock_alloc(size, SMP_CACHE_BYTES);
18 if (!p)
19 panic("%s: Failed to allocate %zu bytes\n", __func__,
20 size);
18 } 21 }
19 return p; 22 return p;
20} 23}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 880a366c229c..0a4f939a8161 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -915,6 +915,9 @@ static void __init htab_initialize(void)
915 linear_map_hash_slots = memblock_alloc_try_nid( 915 linear_map_hash_slots = memblock_alloc_try_nid(
916 linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT, 916 linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
917 ppc64_rma_size, NUMA_NO_NODE); 917 ppc64_rma_size, NUMA_NO_NODE);
918 if (!linear_map_hash_slots)
919 panic("%s: Failed to allocate %lu bytes max_addr=%pa\n",
920 __func__, linear_map_hash_count, &ppc64_rma_size);
918 } 921 }
919#endif /* CONFIG_DEBUG_PAGEALLOC */ 922#endif /* CONFIG_DEBUG_PAGEALLOC */
920 923
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 22d71a58167f..1945c5f19f5e 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -461,10 +461,19 @@ void __init mmu_context_init(void)
461 * Allocate the maps used by context management 461 * Allocate the maps used by context management
462 */ 462 */
463 context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); 463 context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
464 if (!context_map)
465 panic("%s: Failed to allocate %zu bytes\n", __func__,
466 CTX_MAP_SIZE);
464 context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1), 467 context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1),
465 SMP_CACHE_BYTES); 468 SMP_CACHE_BYTES);
469 if (!context_mm)
470 panic("%s: Failed to allocate %zu bytes\n", __func__,
471 sizeof(void *) * (LAST_CONTEXT + 1));
466#ifdef CONFIG_SMP 472#ifdef CONFIG_SMP
467 stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); 473 stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
474 if (!stale_map[boot_cpuid])
475 panic("%s: Failed to allocate %zu bytes\n", __func__,
476 CTX_MAP_SIZE);
468 477
469 cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE, 478 cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
470 "powerpc/mmu/ctx:prepare", 479 "powerpc/mmu/ctx:prepare",
diff --git a/arch/powerpc/mm/pgtable-book3e.c b/arch/powerpc/mm/pgtable-book3e.c
index 53cbc7dc2df2..1032ef7aaf62 100644
--- a/arch/powerpc/mm/pgtable-book3e.c
+++ b/arch/powerpc/mm/pgtable-book3e.c
@@ -57,8 +57,16 @@ void vmemmap_remove_mapping(unsigned long start,
57 57
58static __ref void *early_alloc_pgtable(unsigned long size) 58static __ref void *early_alloc_pgtable(unsigned long size)
59{ 59{
60 return memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT, 60 void *ptr;
61 __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE); 61
62 ptr = memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
63 __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
64
65 if (!ptr)
66 panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n",
67 __func__, size, size, __pa(MAX_DMA_ADDRESS));
68
69 return ptr;
62} 70}
63 71
64/* 72/*
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 92a3e4c39540..a4341aba0af4 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -197,6 +197,9 @@ void __init mmu_partition_table_init(void)
197 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large."); 197 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
198 /* Initialize the Partition Table with no entries */ 198 /* Initialize the Partition Table with no entries */
199 partition_tb = memblock_alloc(patb_size, patb_size); 199 partition_tb = memblock_alloc(patb_size, patb_size);
200 if (!partition_tb)
201 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
202 __func__, patb_size, patb_size);
200 203
201 /* 204 /*
202 * update partition table control register, 205 * update partition table control register,
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index e377684ac6ad..154472a28c77 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -53,13 +53,20 @@ static __ref void *early_alloc_pgtable(unsigned long size, int nid,
53{ 53{
54 phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT; 54 phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
55 phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE; 55 phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
56 void *ptr;
56 57
57 if (region_start) 58 if (region_start)
58 min_addr = region_start; 59 min_addr = region_start;
59 if (region_end) 60 if (region_end)
60 max_addr = region_end; 61 max_addr = region_end;
61 62
62 return memblock_alloc_try_nid(size, size, min_addr, max_addr, nid); 63 ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
64
65 if (!ptr)
66 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
67 __func__, size, size, nid, &min_addr, &max_addr);
68
69 return ptr;
63} 70}
64 71
65static int early_map_kernel_page(unsigned long ea, unsigned long pa, 72static int early_map_kernel_page(unsigned long ea, unsigned long pa,
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 6c8a60b1e31d..f29d2f118b44 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -340,6 +340,9 @@ void __init MMU_init_hw(void)
340 */ 340 */
341 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); 341 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
342 Hash = memblock_alloc(Hash_size, Hash_size); 342 Hash = memblock_alloc(Hash_size, Hash_size);
343 if (!Hash)
344 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
345 __func__, Hash_size, Hash_size);
343 _SDR1 = __pa(Hash) | SDR1_LOW_BITS; 346 _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
344 347
345 Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size); 348 Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size);
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 86368e238f6e..044c6089462c 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -211,6 +211,9 @@ static int __init iob_init(struct device_node *dn)
211 iob_l2_base = memblock_alloc_try_nid_raw(1UL << 21, 1UL << 21, 211 iob_l2_base = memblock_alloc_try_nid_raw(1UL << 21, 1UL << 21,
212 MEMBLOCK_LOW_LIMIT, 0x80000000, 212 MEMBLOCK_LOW_LIMIT, 0x80000000,
213 NUMA_NO_NODE); 213 NUMA_NO_NODE);
214 if (!iob_l2_base)
215 panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%x\n",
216 __func__, 1UL << 21, 1UL << 21, 0x80000000);
214 217
215 pr_info("IOBMAP L2 allocated at: %p\n", iob_l2_base); 218 pr_info("IOBMAP L2 allocated at: %p\n", iob_l2_base);
216 219
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index 9360cdc408c1..86989c5779c2 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -519,6 +519,9 @@ static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr)
519 return -EINVAL; 519 return -EINVAL;
520 } 520 }
521 nvram_image = memblock_alloc(NVRAM_SIZE, SMP_CACHE_BYTES); 521 nvram_image = memblock_alloc(NVRAM_SIZE, SMP_CACHE_BYTES);
522 if (!nvram_image)
523 panic("%s: Failed to allocate %u bytes\n", __func__,
524 NVRAM_SIZE);
522 nvram_data = ioremap(addr, NVRAM_SIZE*2); 525 nvram_data = ioremap(addr, NVRAM_SIZE*2);
523 nvram_naddrs = 1; /* Make sure we get the correct case */ 526 nvram_naddrs = 1; /* Make sure we get the correct case */
524 527
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 727a7de08635..2b0eca104f86 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -171,6 +171,9 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
171 * Allocate a buffer to hold the MC recoverable ranges. 171 * Allocate a buffer to hold the MC recoverable ranges.
172 */ 172 */
173 mc_recoverable_range = memblock_alloc(size, __alignof__(u64)); 173 mc_recoverable_range = memblock_alloc(size, __alignof__(u64));
174 if (!mc_recoverable_range)
175 panic("%s: Failed to allocate %u bytes align=0x%lx\n",
176 __func__, size, __alignof__(u64));
174 177
175 for (i = 0; i < mc_recoverable_range_len; i++) { 178 for (i = 0; i < mc_recoverable_range_len; i++) {
176 mc_recoverable_range[i].start_addr = 179 mc_recoverable_range[i].start_addr =
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index fa6af52b5219..3ead4c237ed0 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -3657,6 +3657,9 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
3657 pr_debug(" PHB-ID : 0x%016llx\n", phb_id); 3657 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
3658 3658
3659 phb = memblock_alloc(sizeof(*phb), SMP_CACHE_BYTES); 3659 phb = memblock_alloc(sizeof(*phb), SMP_CACHE_BYTES);
3660 if (!phb)
3661 panic("%s: Failed to allocate %zu bytes\n", __func__,
3662 sizeof(*phb));
3660 3663
3661 /* Allocate PCI controller */ 3664 /* Allocate PCI controller */
3662 phb->hose = hose = pcibios_alloc_controller(np); 3665 phb->hose = hose = pcibios_alloc_controller(np);
@@ -3703,6 +3706,9 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
3703 phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE; 3706 phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE;
3704 3707
3705 phb->diag_data = memblock_alloc(phb->diag_data_size, SMP_CACHE_BYTES); 3708 phb->diag_data = memblock_alloc(phb->diag_data_size, SMP_CACHE_BYTES);
3709 if (!phb->diag_data)
3710 panic("%s: Failed to allocate %u bytes\n", __func__,
3711 phb->diag_data_size);
3706 3712
3707 /* Parse 32-bit and IO ranges (if any) */ 3713 /* Parse 32-bit and IO ranges (if any) */
3708 pci_process_bridge_OF_ranges(hose, np, !hose->global_number); 3714 pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
@@ -3762,6 +3768,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
3762 pemap_off = size; 3768 pemap_off = size;
3763 size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe); 3769 size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe);
3764 aux = memblock_alloc(size, SMP_CACHE_BYTES); 3770 aux = memblock_alloc(size, SMP_CACHE_BYTES);
3771 if (!aux)
3772 panic("%s: Failed to allocate %lu bytes\n", __func__, size);
3765 phb->ioda.pe_alloc = aux; 3773 phb->ioda.pe_alloc = aux;
3766 phb->ioda.m64_segmap = aux + m64map_off; 3774 phb->ioda.m64_segmap = aux + m64map_off;
3767 phb->ioda.m32_segmap = aux + m32map_off; 3775 phb->ioda.m32_segmap = aux + m32map_off;
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 658bfab3350b..4ce5458eb0f8 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -127,6 +127,9 @@ static void __init prealloc(struct ps3_prealloc *p)
127 return; 127 return;
128 128
129 p->address = memblock_alloc(p->size, p->align); 129 p->address = memblock_alloc(p->size, p->align);
130 if (!p->address)
131 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
132 __func__, p->size, p->align);
130 133
131 printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size, 134 printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size,
132 p->address); 135 p->address);
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c
index d45450f6666a..51a679a1c403 100644
--- a/arch/powerpc/sysdev/msi_bitmap.c
+++ b/arch/powerpc/sysdev/msi_bitmap.c
@@ -129,6 +129,9 @@ int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count,
129 bmp->bitmap = kzalloc(size, GFP_KERNEL); 129 bmp->bitmap = kzalloc(size, GFP_KERNEL);
130 else { 130 else {
131 bmp->bitmap = memblock_alloc(size, SMP_CACHE_BYTES); 131 bmp->bitmap = memblock_alloc(size, SMP_CACHE_BYTES);
132 if (!bmp->bitmap)
133 panic("%s: Failed to allocate %u bytes\n", __func__,
134 size);
132 /* the bitmap won't be freed from memblock allocator */ 135 /* the bitmap won't be freed from memblock allocator */
133 kmemleak_not_leak(bmp->bitmap); 136 kmemleak_not_leak(bmp->bitmap);
134 } 137 }
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index d7920f3e76c6..2c642af526ce 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -378,6 +378,10 @@ static void __init setup_lowcore_dat_off(void)
378 */ 378 */
379 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE); 379 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
380 lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc)); 380 lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
381 if (!lc)
382 panic("%s: Failed to allocate %zu bytes align=%zx\n",
383 __func__, sizeof(*lc), sizeof(*lc));
384
381 lc->restart_psw.mask = PSW_KERNEL_BITS; 385 lc->restart_psw.mask = PSW_KERNEL_BITS;
382 lc->restart_psw.addr = (unsigned long) restart_int_handler; 386 lc->restart_psw.addr = (unsigned long) restart_int_handler;
383 lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; 387 lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
@@ -419,6 +423,9 @@ static void __init setup_lowcore_dat_off(void)
419 * all CPUs in cast *one* of them does a PSW restart. 423 * all CPUs in cast *one* of them does a PSW restart.
420 */ 424 */
421 restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE); 425 restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
426 if (!restart_stack)
427 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
428 __func__, THREAD_SIZE, THREAD_SIZE);
422 restart_stack += STACK_INIT_OFFSET; 429 restart_stack += STACK_INIT_OFFSET;
423 430
424 /* 431 /*
@@ -495,6 +502,9 @@ static void __init setup_resources(void)
495 502
496 for_each_memblock(memory, reg) { 503 for_each_memblock(memory, reg) {
497 res = memblock_alloc(sizeof(*res), 8); 504 res = memblock_alloc(sizeof(*res), 8);
505 if (!res)
506 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
507 __func__, sizeof(*res), 8);
498 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; 508 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
499 509
500 res->name = "System RAM"; 510 res->name = "System RAM";
@@ -509,6 +519,9 @@ static void __init setup_resources(void)
509 continue; 519 continue;
510 if (std_res->end > res->end) { 520 if (std_res->end > res->end) {
511 sub_res = memblock_alloc(sizeof(*sub_res), 8); 521 sub_res = memblock_alloc(sizeof(*sub_res), 8);
522 if (!sub_res)
523 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
524 __func__, sizeof(*sub_res), 8);
512 *sub_res = *std_res; 525 *sub_res = *std_res;
513 sub_res->end = res->end; 526 sub_res->end = res->end;
514 std_res->start = res->end + 1; 527 std_res->start = res->end + 1;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 5e3cccc408b8..3fe1c77c361b 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -658,7 +658,7 @@ void __init smp_save_dump_cpus(void)
658 /* Allocate a page as dumping area for the store status sigps */ 658 /* Allocate a page as dumping area for the store status sigps */
659 page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31); 659 page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31);
660 if (!page) 660 if (!page)
661 panic("ERROR: Failed to allocate %x bytes below %lx\n", 661 panic("ERROR: Failed to allocate %lx bytes below %lx\n",
662 PAGE_SIZE, 1UL << 31); 662 PAGE_SIZE, 1UL << 31);
663 663
664 /* Set multi-threading state to the previous system. */ 664 /* Set multi-threading state to the previous system. */
@@ -770,6 +770,9 @@ void __init smp_detect_cpus(void)
770 770
771 /* Get CPU information */ 771 /* Get CPU information */
772 info = memblock_alloc(sizeof(*info), 8); 772 info = memblock_alloc(sizeof(*info), 8);
773 if (!info)
774 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
775 __func__, sizeof(*info), 8);
773 smp_get_core_info(info, 1); 776 smp_get_core_info(info, 1);
774 /* Find boot CPU type */ 777 /* Find boot CPU type */
775 if (sclp.has_core_type) { 778 if (sclp.has_core_type) {
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 8992b04c0ade..8964a3f60aad 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -520,6 +520,9 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
520 nr_masks = max(nr_masks, 1); 520 nr_masks = max(nr_masks, 1);
521 for (i = 0; i < nr_masks; i++) { 521 for (i = 0; i < nr_masks; i++) {
522 mask->next = memblock_alloc(sizeof(*mask->next), 8); 522 mask->next = memblock_alloc(sizeof(*mask->next), 8);
523 if (!mask->next)
524 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
525 __func__, sizeof(*mask->next), 8);
523 mask = mask->next; 526 mask = mask->next;
524 } 527 }
525} 528}
@@ -538,6 +541,9 @@ void __init topology_init_early(void)
538 if (!MACHINE_HAS_TOPOLOGY) 541 if (!MACHINE_HAS_TOPOLOGY)
539 goto out; 542 goto out;
540 tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 543 tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
544 if (!tl_info)
545 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
546 __func__, PAGE_SIZE, PAGE_SIZE);
541 info = tl_info; 547 info = tl_info;
542 store_topology(info); 548 store_topology(info);
543 pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n", 549 pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index bfba273c32c0..71a12a4f4906 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -313,6 +313,9 @@ static void __ref create_core_to_node_map(void)
313 int i; 313 int i;
314 314
315 emu_cores = memblock_alloc(sizeof(*emu_cores), 8); 315 emu_cores = memblock_alloc(sizeof(*emu_cores), 8);
316 if (!emu_cores)
317 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
318 __func__, sizeof(*emu_cores), 8);
316 for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++) 319 for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
317 emu_cores->to_node_id[i] = NODE_ID_FREE; 320 emu_cores->to_node_id[i] = NODE_ID_FREE;
318} 321}
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index 2d1271e2a70d..8eb9e9743f5d 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -92,8 +92,12 @@ static void __init numa_setup_memory(void)
92 } while (cur_base < end_of_dram); 92 } while (cur_base < end_of_dram);
93 93
94 /* Allocate and fill out node_data */ 94 /* Allocate and fill out node_data */
95 for (nid = 0; nid < MAX_NUMNODES; nid++) 95 for (nid = 0; nid < MAX_NUMNODES; nid++) {
96 NODE_DATA(nid) = memblock_alloc(sizeof(pg_data_t), 8); 96 NODE_DATA(nid) = memblock_alloc(sizeof(pg_data_t), 8);
97 if (!NODE_DATA(nid))
98 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
99 __func__, sizeof(pg_data_t), 8);
100 }
97 101
98 for_each_online_node(nid) { 102 for_each_online_node(nid) {
99 unsigned long start_pfn, end_pfn; 103 unsigned long start_pfn, end_pfn;
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index a0fa4de03dd5..fceefd92016f 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -128,6 +128,9 @@ static pmd_t * __init one_md_table_init(pud_t *pud)
128 pmd_t *pmd; 128 pmd_t *pmd;
129 129
130 pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 130 pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
131 if (!pmd)
132 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
133 __func__, PAGE_SIZE, PAGE_SIZE);
131 pud_populate(&init_mm, pud, pmd); 134 pud_populate(&init_mm, pud, pmd);
132 BUG_ON(pmd != pmd_offset(pud, 0)); 135 BUG_ON(pmd != pmd_offset(pud, 0));
133 } 136 }
@@ -141,6 +144,9 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
141 pte_t *pte; 144 pte_t *pte;
142 145
143 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 146 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
147 if (!pte)
148 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
149 __func__, PAGE_SIZE, PAGE_SIZE);
144 pmd_populate_kernel(&init_mm, pmd, pte); 150 pmd_populate_kernel(&init_mm, pmd, pte);
145 BUG_ON(pte != pte_offset_kernel(pmd, 0)); 151 BUG_ON(pte != pte_offset_kernel(pmd, 0));
146 } 152 }
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index c4bde6148810..f7e4439deb17 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -43,6 +43,10 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
43 /* Node-local pgdat */ 43 /* Node-local pgdat */
44 NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data), 44 NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
45 SMP_CACHE_BYTES, nid); 45 SMP_CACHE_BYTES, nid);
46 if (!NODE_DATA(nid))
47 panic("%s: Failed to allocate %zu bytes align=0x%x nid=%d\n",
48 __func__, sizeof(struct pglist_data), SMP_CACHE_BYTES,
49 nid);
46 50
47 NODE_DATA(nid)->node_start_pfn = start_pfn; 51 NODE_DATA(nid)->node_start_pfn = start_pfn;
48 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; 52 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index d80cfb1d9430..6e5be5fb4143 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -649,6 +649,9 @@ static int __init eth_setup(char *str)
649 } 649 }
650 650
651 new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES); 651 new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
652 if (!new)
653 panic("%s: Failed to allocate %zu bytes\n", __func__,
654 sizeof(*new));
652 655
653 INIT_LIST_HEAD(&new->list); 656 INIT_LIST_HEAD(&new->list);
654 new->index = n; 657 new->index = n;
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index 046fa9ea0ccc..596e7056f376 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -1576,6 +1576,9 @@ static int __init vector_setup(char *str)
1576 return 1; 1576 return 1;
1577 } 1577 }
1578 new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES); 1578 new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
1579 if (!new)
1580 panic("%s: Failed to allocate %zu bytes\n", __func__,
1581 sizeof(*new));
1579 INIT_LIST_HEAD(&new->list); 1582 INIT_LIST_HEAD(&new->list);
1580 new->unit = n; 1583 new->unit = n;
1581 new->arguments = str; 1584 new->arguments = str;
diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c
index ce169ea87e61..1dcd310cb34d 100644
--- a/arch/um/kernel/initrd.c
+++ b/arch/um/kernel/initrd.c
@@ -37,6 +37,8 @@ int __init read_initrd(void)
37 } 37 }
38 38
39 area = memblock_alloc(size, SMP_CACHE_BYTES); 39 area = memblock_alloc(size, SMP_CACHE_BYTES);
40 if (!area)
41 panic("%s: Failed to allocate %llu bytes\n", __func__, size);
40 42
41 if (load_initrd(initrd, area, size) == -1) 43 if (load_initrd(initrd, area, size) == -1)
42 return 0; 44 return 0;
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 799b571a8f88..99aa11bf53d1 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -66,6 +66,10 @@ static void __init one_page_table_init(pmd_t *pmd)
66 if (pmd_none(*pmd)) { 66 if (pmd_none(*pmd)) {
67 pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, 67 pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
68 PAGE_SIZE); 68 PAGE_SIZE);
69 if (!pte)
70 panic("%s: Failed to allocate %lu bytes align=%lx\n",
71 __func__, PAGE_SIZE, PAGE_SIZE);
72
69 set_pmd(pmd, __pmd(_KERNPG_TABLE + 73 set_pmd(pmd, __pmd(_KERNPG_TABLE +
70 (unsigned long) __pa(pte))); 74 (unsigned long) __pa(pte)));
71 if (pte != pte_offset_kernel(pmd, 0)) 75 if (pte != pte_offset_kernel(pmd, 0))
@@ -77,6 +81,10 @@ static void __init one_md_table_init(pud_t *pud)
77{ 81{
78#ifdef CONFIG_3_LEVEL_PGTABLES 82#ifdef CONFIG_3_LEVEL_PGTABLES
79 pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); 83 pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
84 if (!pmd_table)
85 panic("%s: Failed to allocate %lu bytes align=%lx\n",
86 __func__, PAGE_SIZE, PAGE_SIZE);
87
80 set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table))); 88 set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
81 if (pmd_table != pmd_offset(pud, 0)) 89 if (pmd_table != pmd_offset(pud, 0))
82 BUG(); 90 BUG();
@@ -126,6 +134,10 @@ static void __init fixaddr_user_init( void)
126 134
127 fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir); 135 fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
128 v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE); 136 v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
137 if (!v)
138 panic("%s: Failed to allocate %lu bytes align=%lx\n",
139 __func__, size, PAGE_SIZE);
140
129 memcpy((void *) v , (void *) FIXADDR_USER_START, size); 141 memcpy((void *) v , (void *) FIXADDR_USER_START, size);
130 p = __pa(v); 142 p = __pa(v);
131 for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE, 143 for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
@@ -146,6 +158,10 @@ void __init paging_init(void)
146 158
147 empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE, 159 empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
148 PAGE_SIZE); 160 PAGE_SIZE);
161 if (!empty_zero_page)
162 panic("%s: Failed to allocate %lu bytes align=%lx\n",
163 __func__, PAGE_SIZE, PAGE_SIZE);
164
149 for (i = 0; i < ARRAY_SIZE(zones_size); i++) 165 for (i = 0; i < ARRAY_SIZE(zones_size); i++)
150 zones_size[i] = 0; 166 zones_size[i] = 0;
151 167
diff --git a/arch/unicore32/kernel/setup.c b/arch/unicore32/kernel/setup.c
index 4b0cb68c355a..d3239cf2e837 100644
--- a/arch/unicore32/kernel/setup.c
+++ b/arch/unicore32/kernel/setup.c
@@ -207,6 +207,10 @@ request_standard_resources(struct meminfo *mi)
207 continue; 207 continue;
208 208
209 res = memblock_alloc_low(sizeof(*res), SMP_CACHE_BYTES); 209 res = memblock_alloc_low(sizeof(*res), SMP_CACHE_BYTES);
210 if (!res)
211 panic("%s: Failed to allocate %zu bytes align=%x\n",
212 __func__, sizeof(*res), SMP_CACHE_BYTES);
213
210 res->name = "System RAM"; 214 res->name = "System RAM";
211 res->start = mi->bank[i].start; 215 res->start = mi->bank[i].start;
212 res->end = mi->bank[i].start + mi->bank[i].size - 1; 216 res->end = mi->bank[i].start + mi->bank[i].size - 1;
diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c
index a40219291965..aa2060beb408 100644
--- a/arch/unicore32/mm/mmu.c
+++ b/arch/unicore32/mm/mmu.c
@@ -145,8 +145,13 @@ static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr,
145 unsigned long prot) 145 unsigned long prot)
146{ 146{
147 if (pmd_none(*pmd)) { 147 if (pmd_none(*pmd)) {
148 pte_t *pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), 148 size_t size = PTRS_PER_PTE * sizeof(pte_t);
149 PTRS_PER_PTE * sizeof(pte_t)); 149 pte_t *pte = memblock_alloc(size, size);
150
151 if (!pte)
152 panic("%s: Failed to allocate %zu bytes align=%zx\n",
153 __func__, size, size);
154
150 __pmd_populate(pmd, __pa(pte) | prot); 155 __pmd_populate(pmd, __pa(pte) | prot);
151 } 156 }
152 BUG_ON(pmd_bad(*pmd)); 157 BUG_ON(pmd_bad(*pmd));
@@ -349,6 +354,9 @@ static void __init devicemaps_init(void)
349 * Allocate the vector page early. 354 * Allocate the vector page early.
350 */ 355 */
351 vectors = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 356 vectors = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
357 if (!vectors)
358 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
359 __func__, PAGE_SIZE, PAGE_SIZE);
352 360
353 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) 361 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
354 pmd_clear(pmd_off_k(addr)); 362 pmd_clear(pmd_off_k(addr));
@@ -426,6 +434,9 @@ void __init paging_init(void)
426 434
427 /* allocate the zero page. */ 435 /* allocate the zero page. */
428 zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 436 zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
437 if (!zero_page)
438 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
439 __func__, PAGE_SIZE, PAGE_SIZE);
429 440
430 bootmem_init(); 441 bootmem_init();
431 442
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 2624de16cd7a..8dcbf6890714 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -935,6 +935,9 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
935#define HPET_RESOURCE_NAME_SIZE 9 935#define HPET_RESOURCE_NAME_SIZE 9
936 hpet_res = memblock_alloc(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE, 936 hpet_res = memblock_alloc(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE,
937 SMP_CACHE_BYTES); 937 SMP_CACHE_BYTES);
938 if (!hpet_res)
939 panic("%s: Failed to allocate %zu bytes\n", __func__,
940 sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
938 941
939 hpet_res->name = (void *)&hpet_res[1]; 942 hpet_res->name = (void *)&hpet_res[1];
940 hpet_res->flags = IORESOURCE_MEM; 943 hpet_res->flags = IORESOURCE_MEM;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 264e3221d923..53aa234a6803 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2581,6 +2581,8 @@ static struct resource * __init ioapic_setup_resources(void)
2581 n *= nr_ioapics; 2581 n *= nr_ioapics;
2582 2582
2583 mem = memblock_alloc(n, SMP_CACHE_BYTES); 2583 mem = memblock_alloc(n, SMP_CACHE_BYTES);
2584 if (!mem)
2585 panic("%s: Failed to allocate %lu bytes\n", __func__, n);
2584 res = (void *)mem; 2586 res = (void *)mem;
2585 2587
2586 mem += sizeof(struct resource) * nr_ioapics; 2588 mem += sizeof(struct resource) * nr_ioapics;
@@ -2625,6 +2627,9 @@ fake_ioapic_page:
2625#endif 2627#endif
2626 ioapic_phys = (unsigned long)memblock_alloc(PAGE_SIZE, 2628 ioapic_phys = (unsigned long)memblock_alloc(PAGE_SIZE,
2627 PAGE_SIZE); 2629 PAGE_SIZE);
2630 if (!ioapic_phys)
2631 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
2632 __func__, PAGE_SIZE, PAGE_SIZE);
2628 ioapic_phys = __pa(ioapic_phys); 2633 ioapic_phys = __pa(ioapic_phys);
2629 } 2634 }
2630 set_fixmap_nocache(idx, ioapic_phys); 2635 set_fixmap_nocache(idx, ioapic_phys);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 5203ee4e6435..6831c8437951 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1092,6 +1092,9 @@ void __init e820__reserve_resources(void)
1092 1092
1093 res = memblock_alloc(sizeof(*res) * e820_table->nr_entries, 1093 res = memblock_alloc(sizeof(*res) * e820_table->nr_entries,
1094 SMP_CACHE_BYTES); 1094 SMP_CACHE_BYTES);
1095 if (!res)
1096 panic("%s: Failed to allocate %zu bytes\n", __func__,
1097 sizeof(*res) * e820_table->nr_entries);
1095 e820_res = res; 1098 e820_res = res;
1096 1099
1097 for (i = 0; i < e820_table->nr_entries; i++) { 1100 for (i = 0; i < e820_table->nr_entries; i++) {
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
index b4ab779f1d47..ac9e7bf49b66 100644
--- a/arch/x86/platform/olpc/olpc_dt.c
+++ b/arch/x86/platform/olpc/olpc_dt.c
@@ -141,6 +141,9 @@ void * __init prom_early_alloc(unsigned long size)
141 * wasted bootmem) and hand off chunks of it to callers. 141 * wasted bootmem) and hand off chunks of it to callers.
142 */ 142 */
143 res = memblock_alloc(chunk_size, SMP_CACHE_BYTES); 143 res = memblock_alloc(chunk_size, SMP_CACHE_BYTES);
144 if (!res)
145 panic("%s: Failed to allocate %zu bytes\n", __func__,
146 chunk_size);
144 BUG_ON(!res); 147 BUG_ON(!res);
145 prom_early_allocated += chunk_size; 148 prom_early_allocated += chunk_size;
146 memset(res, 0, chunk_size); 149 memset(res, 0, chunk_size);
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 055e37e43541..95ce9b5be411 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -181,8 +181,15 @@ static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
181 181
182static void * __ref alloc_p2m_page(void) 182static void * __ref alloc_p2m_page(void)
183{ 183{
184 if (unlikely(!slab_is_available())) 184 if (unlikely(!slab_is_available())) {
185 return memblock_alloc(PAGE_SIZE, PAGE_SIZE); 185 void *ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
186
187 if (!ptr)
188 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
189 __func__, PAGE_SIZE, PAGE_SIZE);
190
191 return ptr;
192 }
186 193
187 return (void *)__get_free_page(GFP_KERNEL); 194 return (void *)__get_free_page(GFP_KERNEL);
188} 195}
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
index 4852848a0c28..af7152560bc3 100644
--- a/arch/xtensa/mm/kasan_init.c
+++ b/arch/xtensa/mm/kasan_init.c
@@ -45,6 +45,10 @@ static void __init populate(void *start, void *end)
45 pmd_t *pmd = pmd_offset(pgd, vaddr); 45 pmd_t *pmd = pmd_offset(pgd, vaddr);
46 pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); 46 pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
47 47
48 if (!pte)
49 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
50 __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
51
48 pr_debug("%s: %p - %p\n", __func__, start, end); 52 pr_debug("%s: %p - %p\n", __func__, start, end);
49 53
50 for (i = j = 0; i < n_pmds; ++i) { 54 for (i = j = 0; i < n_pmds; ++i) {
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index a4dcfd39bc5c..2fb7d1172228 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -32,6 +32,9 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
32 __func__, vaddr, n_pages); 32 __func__, vaddr, n_pages);
33 33
34 pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE); 34 pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
35 if (!pte)
36 panic("%s: Failed to allocate %zu bytes align=%lx\n",
37 __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
35 38
36 for (i = 0; i < n_pages; ++i) 39 for (i = 0; i < n_pages; ++i)
37 pte_clear(NULL, 0, pte + i); 40 pte_clear(NULL, 0, pte + i);
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index d0cd58534781..5d7fb2eecce4 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -351,6 +351,9 @@ void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem)
351 struct clk_iomap *io; 351 struct clk_iomap *io;
352 352
353 io = memblock_alloc(sizeof(*io), SMP_CACHE_BYTES); 353 io = memblock_alloc(sizeof(*io), SMP_CACHE_BYTES);
354 if (!io)
355 panic("%s: Failed to allocate %zu bytes\n", __func__,
356 sizeof(*io));
354 357
355 io->mem = mem; 358 io->mem = mem;
356 359
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 42cf68d15da3..6a844125cf2d 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -493,6 +493,9 @@ int __init smu_init (void)
493 } 493 }
494 494
495 smu = memblock_alloc(sizeof(struct smu_device), SMP_CACHE_BYTES); 495 smu = memblock_alloc(sizeof(struct smu_device), SMP_CACHE_BYTES);
496 if (!smu)
497 panic("%s: Failed to allocate %zu bytes\n", __func__,
498 sizeof(struct smu_device));
496 499
497 spin_lock_init(&smu->lock); 500 spin_lock_init(&smu->lock);
498 INIT_LIST_HEAD(&smu->cmd_list); 501 INIT_LIST_HEAD(&smu->cmd_list);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 9cc1461aac7d..4734223ab702 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -1181,7 +1181,13 @@ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
1181 1181
1182static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) 1182static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
1183{ 1183{
1184 return memblock_alloc(size, align); 1184 void *ptr = memblock_alloc(size, align);
1185
1186 if (!ptr)
1187 panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
1188 __func__, size, align);
1189
1190 return ptr;
1185} 1191}
1186 1192
1187bool __init early_init_dt_verify(void *params) 1193bool __init early_init_dt_verify(void *params)
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 66037511f2d7..cccde756b510 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -2241,7 +2241,13 @@ static struct device_node *overlay_base_root;
2241 2241
2242static void * __init dt_alloc_memory(u64 size, u64 align) 2242static void * __init dt_alloc_memory(u64 size, u64 align)
2243{ 2243{
2244 return memblock_alloc(size, align); 2244 void *ptr = memblock_alloc(size, align);
2245
2246 if (!ptr)
2247 panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
2248 __func__, size, align);
2249
2250 return ptr;
2245} 2251}
2246 2252
2247/* 2253/*
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index bb7888429be6..877baf2a94f4 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -214,10 +214,13 @@ retry:
214 /* 214 /*
215 * Get IO TLB memory from any location. 215 * Get IO TLB memory from any location.
216 */ 216 */
217 if (early) 217 if (early) {
218 xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes), 218 xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
219 PAGE_SIZE); 219 PAGE_SIZE);
220 else { 220 if (!xen_io_tlb_start)
221 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
222 __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
223 } else {
221#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) 224#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
222#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 225#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
223 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 226 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index dd6a8e2d53a7..56ac77a80b1f 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -215,13 +215,13 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
215 alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(int)); 215 alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(int));
216 io_tlb_list = memblock_alloc(alloc_size, PAGE_SIZE); 216 io_tlb_list = memblock_alloc(alloc_size, PAGE_SIZE);
217 if (!io_tlb_list) 217 if (!io_tlb_list)
218 panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 218 panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
219 __func__, alloc_size, PAGE_SIZE); 219 __func__, alloc_size, PAGE_SIZE);
220 220
221 alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)); 221 alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t));
222 io_tlb_orig_addr = memblock_alloc(alloc_size, PAGE_SIZE); 222 io_tlb_orig_addr = memblock_alloc(alloc_size, PAGE_SIZE);
223 if (!io_tlb_orig_addr) 223 if (!io_tlb_orig_addr)
224 panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 224 panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
225 __func__, alloc_size, PAGE_SIZE); 225 __func__, alloc_size, PAGE_SIZE);
226 226
227 for (i = 0; i < io_tlb_nslabs; i++) { 227 for (i = 0; i < io_tlb_nslabs; i++) {
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 4802b039b89f..f08a1e4ee1d4 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -965,6 +965,9 @@ void __init __register_nosave_region(unsigned long start_pfn,
965 /* This allocation cannot fail */ 965 /* This allocation cannot fail */
966 region = memblock_alloc(sizeof(struct nosave_region), 966 region = memblock_alloc(sizeof(struct nosave_region),
967 SMP_CACHE_BYTES); 967 SMP_CACHE_BYTES);
968 if (!region)
969 panic("%s: Failed to allocate %zu bytes\n", __func__,
970 sizeof(struct nosave_region));
968 } 971 }
969 region->start_pfn = start_pfn; 972 region->start_pfn = start_pfn;
970 region->end_pfn = end_pfn; 973 region->end_pfn = end_pfn;
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 087a3e9a0202..0cb672eb107c 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -165,6 +165,9 @@ EXPORT_SYMBOL(zalloc_cpumask_var);
165void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) 165void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
166{ 166{
167 *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES); 167 *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
168 if (!*mask)
169 panic("%s: Failed to allocate %u bytes\n", __func__,
170 cpumask_size());
168} 171}
169 172
170/** 173/**
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index fcaa1ca03175..ce45c491ebcd 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -83,8 +83,14 @@ static inline bool kasan_early_shadow_page_entry(pte_t pte)
83 83
84static __init void *early_alloc(size_t size, int node) 84static __init void *early_alloc(size_t size, int node)
85{ 85{
86 return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), 86 void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
87 MEMBLOCK_ALLOC_ACCESSIBLE, node); 87 MEMBLOCK_ALLOC_ACCESSIBLE, node);
88
89 if (!ptr)
90 panic("%s: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
91 __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
92
93 return ptr;
88} 94}
89 95
90static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr, 96static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
diff --git a/mm/sparse.c b/mm/sparse.c
index 77a0554fa5bd..7397fb4e78b4 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -65,11 +65,15 @@ static noinline struct mem_section __ref *sparse_index_alloc(int nid)
65 unsigned long array_size = SECTIONS_PER_ROOT * 65 unsigned long array_size = SECTIONS_PER_ROOT *
66 sizeof(struct mem_section); 66 sizeof(struct mem_section);
67 67
68 if (slab_is_available()) 68 if (slab_is_available()) {
69 section = kzalloc_node(array_size, GFP_KERNEL, nid); 69 section = kzalloc_node(array_size, GFP_KERNEL, nid);
70 else 70 } else {
71 section = memblock_alloc_node(array_size, SMP_CACHE_BYTES, 71 section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
72 nid); 72 nid);
73 if (!section)
74 panic("%s: Failed to allocate %lu bytes nid=%d\n",
75 __func__, array_size, nid);
76 }
73 77
74 return section; 78 return section;
75} 79}
@@ -218,6 +222,9 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
218 size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; 222 size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
219 align = 1 << (INTERNODE_CACHE_SHIFT); 223 align = 1 << (INTERNODE_CACHE_SHIFT);
220 mem_section = memblock_alloc(size, align); 224 mem_section = memblock_alloc(size, align);
225 if (!mem_section)
226 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
227 __func__, size, align);
221 } 228 }
222#endif 229#endif
223 230
@@ -404,13 +411,18 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
404{ 411{
405 unsigned long size = section_map_size(); 412 unsigned long size = section_map_size();
406 struct page *map = sparse_buffer_alloc(size); 413 struct page *map = sparse_buffer_alloc(size);
414 phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
407 415
408 if (map) 416 if (map)
409 return map; 417 return map;
410 418
411 map = memblock_alloc_try_nid(size, 419 map = memblock_alloc_try_nid(size,
412 PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 420 PAGE_SIZE, addr,
413 MEMBLOCK_ALLOC_ACCESSIBLE, nid); 421 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
422 if (!map)
423 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
424 __func__, size, PAGE_SIZE, nid, &addr);
425
414 return map; 426 return map;
415} 427}
416#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 428#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
@@ -420,10 +432,11 @@ static void *sparsemap_buf_end __meminitdata;
420 432
421static void __init sparse_buffer_init(unsigned long size, int nid) 433static void __init sparse_buffer_init(unsigned long size, int nid)
422{ 434{
435 phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
423 WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ 436 WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
424 sparsemap_buf = 437 sparsemap_buf =
425 memblock_alloc_try_nid_raw(size, PAGE_SIZE, 438 memblock_alloc_try_nid_raw(size, PAGE_SIZE,
426 __pa(MAX_DMA_ADDRESS), 439 addr,
427 MEMBLOCK_ALLOC_ACCESSIBLE, nid); 440 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
428 sparsemap_buf_end = sparsemap_buf + size; 441 sparsemap_buf_end = sparsemap_buf + size;
429} 442}