diff options
author | Mike Rapoport <rppt@linux.vnet.ibm.com> | 2018-10-30 18:10:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-31 11:54:17 -0400 |
commit | 2f770806fd2c3db9616965e57ba60d80f43c827d (patch) | |
tree | b5b7687c0a32a69f46fde6b649f6105925e06b1f | |
parent | 7e1c4e27928e5f87b9b1eaf06dc31773b2f1e7f1 (diff) |
mm/memblock.c: warn if zero alignment was requested
After updating all memblock users to explicitly specify SMP_CACHE_BYTES
alignment rather than use 0, it is still possible that uncovered users may
sneak in. Add a WARN_ON_ONCE for such cases.
[sfr@canb.auug.org.au: use dump_stack() instead of WARN_ON_ONCE for the alignment checks]
Link: http://lkml.kernel.org/r/20181016131927.6ceba6ab@canb.auug.org.au
[akpm@linux-foundation.org: add apologetic comment]
Link: http://lkml.kernel.org/r/20181011060850.GA19822@rapoport-lnx
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/memblock.c | 11 |
1 files changed, 11 insertions, 0 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index 839531133816..7df468c8ebc8 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -1247,6 +1247,12 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, | |||
1247 | { | 1247 | { |
1248 | phys_addr_t found; | 1248 | phys_addr_t found; |
1249 | 1249 | ||
1250 | if (!align) { | ||
1251 | /* Can't use WARNs this early in boot on powerpc */ | ||
1252 | dump_stack(); | ||
1253 | align = SMP_CACHE_BYTES; | ||
1254 | } | ||
1255 | |||
1250 | found = memblock_find_in_range_node(size, align, start, end, nid, | 1256 | found = memblock_find_in_range_node(size, align, start, end, nid, |
1251 | flags); | 1257 | flags); |
1252 | if (found && !memblock_reserve(found, size)) { | 1258 | if (found && !memblock_reserve(found, size)) { |
@@ -1369,6 +1375,11 @@ static void * __init memblock_alloc_internal( | |||
1369 | if (WARN_ON_ONCE(slab_is_available())) | 1375 | if (WARN_ON_ONCE(slab_is_available())) |
1370 | return kzalloc_node(size, GFP_NOWAIT, nid); | 1376 | return kzalloc_node(size, GFP_NOWAIT, nid); |
1371 | 1377 | ||
1378 | if (!align) { | ||
1379 | dump_stack(); | ||
1380 | align = SMP_CACHE_BYTES; | ||
1381 | } | ||
1382 | |||
1372 | if (max_addr > memblock.current_limit) | 1383 | if (max_addr > memblock.current_limit) |
1373 | max_addr = memblock.current_limit; | 1384 | max_addr = memblock.current_limit; |
1374 | again: | 1385 | again: |