aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memblock.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-07-12 05:15:54 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2011-07-14 14:47:40 -0400
commited7b56a799cade11f458cd83e1150af54a66b7e8 (patch)
treef61cd8c07445d6a61e4cbf323c797038904db4c3 /mm/memblock.c
parenteb40c4c27f1722f058e4713ccfedebac577d5190 (diff)
memblock: Remove memblock_memory_can_coalesce()
Arch could implement memblock_memor_can_coalesce() to veto merging of adjacent or overlapping memblock regions; however, no arch did and any vetoing would trigger WARN_ON(). Memblock regions are supposed to deal with proper memory anyway. Remove the unused hook. Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/1310462166-31469-2-git-send-email-tj@kernel.org Cc: Yinghai Lu <yinghai@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'mm/memblock.c')
-rw-r--r--mm/memblock.c29
1 files changed, 0 insertions, 29 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index a8edb422795b..bd3a3a9591d4 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -251,12 +251,6 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
251 return 0; 251 return 0;
252} 252}
253 253
254extern int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
255 phys_addr_t addr2, phys_addr_t size2)
256{
257 return 1;
258}
259
260static long __init_memblock memblock_add_region(struct memblock_type *type, 254static long __init_memblock memblock_add_region(struct memblock_type *type,
261 phys_addr_t base, phys_addr_t size) 255 phys_addr_t base, phys_addr_t size)
262{ 256{
@@ -282,17 +276,6 @@ static long __init_memblock memblock_add_region(struct memblock_type *type,
282 * of a block. 276 * of a block.
283 */ 277 */
284 if (base < rgn->base && end >= rgn->base) { 278 if (base < rgn->base && end >= rgn->base) {
285 /* If we can't coalesce, create a new block */
286 if (!memblock_memory_can_coalesce(base, size,
287 rgn->base,
288 rgn->size)) {
289 /* Overlap & can't coalesce are mutually
290 * exclusive, if you do that, be prepared
291 * for trouble
292 */
293 WARN_ON(end != rgn->base);
294 goto new_block;
295 }
296 /* We extend the bottom of the block down to our 279 /* We extend the bottom of the block down to our
297 * base 280 * base
298 */ 281 */
@@ -316,17 +299,6 @@ static long __init_memblock memblock_add_region(struct memblock_type *type,
316 * top of a block 299 * top of a block
317 */ 300 */
318 if (base <= rend && end >= rend) { 301 if (base <= rend && end >= rend) {
319 /* If we can't coalesce, create a new block */
320 if (!memblock_memory_can_coalesce(rgn->base,
321 rgn->size,
322 base, size)) {
323 /* Overlap & can't coalesce are mutually
324 * exclusive, if you do that, be prepared
325 * for trouble
326 */
327 WARN_ON(rend != base);
328 goto new_block;
329 }
330 /* We adjust our base down to enclose the 302 /* We adjust our base down to enclose the
331 * original block and destroy it. It will be 303 * original block and destroy it. It will be
332 * part of our new allocation. Since we've 304 * part of our new allocation. Since we've
@@ -349,7 +321,6 @@ static long __init_memblock memblock_add_region(struct memblock_type *type,
349 return 0; 321 return 0;
350 } 322 }
351 323
352 new_block:
353 /* If we are out of space, we fail. It's too late to resize the array 324 /* If we are out of space, we fail. It's too late to resize the array
354 * but then this shouldn't have happened in the first place. 325 * but then this shouldn't have happened in the first place.
355 */ 326 */