aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory_hotplug.c
diff options
context:
space:
mode:
authorCody P Schafer <cody@linux.vnet.ibm.com>2013-02-22 19:35:23 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:20 -0500
commit108bcc96ef7047c02cad4d229f04da38186a3f3f (patch)
treee11d82074cae54dcf0fa8eea12750c661a16b02d /mm/memory_hotplug.c
parent9127ab4ff92f0ecd7b4671efa9d0edb21c691e9f (diff)
mm: add & use zone_end_pfn() and zone_spans_pfn()
Add 2 helpers (zone_end_pfn() and zone_spans_pfn()) to reduce code duplication. This also switches to using them in compaction (where an additional variable needed to be renamed), page_alloc, vmstat, memory_hotplug, and kmemleak. Note that in compaction.c I avoid calling zone_end_pfn() repeatedly because I expect at some point the sycronization issues with start_pfn & spanned_pages will need fixing, either by actually using the seqlock or clever memory barrier usage. Signed-off-by: Cody P Schafer <cody@linux.vnet.ibm.com> Cc: David Hansen <dave@linux.vnet.ibm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r--mm/memory_hotplug.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index dda1ca695a08..8b3235eedf3d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -299,7 +299,7 @@ static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
299 pgdat_resize_lock(z1->zone_pgdat, &flags); 299 pgdat_resize_lock(z1->zone_pgdat, &flags);
300 300
301 /* can't move pfns which are higher than @z2 */ 301 /* can't move pfns which are higher than @z2 */
302 if (end_pfn > z2->zone_start_pfn + z2->spanned_pages) 302 if (end_pfn > zone_end_pfn(z2))
303 goto out_fail; 303 goto out_fail;
304 /* the move out part mast at the left most of @z2 */ 304 /* the move out part mast at the left most of @z2 */
305 if (start_pfn > z2->zone_start_pfn) 305 if (start_pfn > z2->zone_start_pfn)
@@ -315,7 +315,7 @@ static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
315 z1_start_pfn = start_pfn; 315 z1_start_pfn = start_pfn;
316 316
317 resize_zone(z1, z1_start_pfn, end_pfn); 317 resize_zone(z1, z1_start_pfn, end_pfn);
318 resize_zone(z2, end_pfn, z2->zone_start_pfn + z2->spanned_pages); 318 resize_zone(z2, end_pfn, zone_end_pfn(z2));
319 319
320 pgdat_resize_unlock(z1->zone_pgdat, &flags); 320 pgdat_resize_unlock(z1->zone_pgdat, &flags);
321 321
@@ -347,15 +347,15 @@ static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
347 if (z1->zone_start_pfn > start_pfn) 347 if (z1->zone_start_pfn > start_pfn)
348 goto out_fail; 348 goto out_fail;
349 /* the move out part mast at the right most of @z1 */ 349 /* the move out part mast at the right most of @z1 */
350 if (z1->zone_start_pfn + z1->spanned_pages > end_pfn) 350 if (zone_end_pfn(z1) > end_pfn)
351 goto out_fail; 351 goto out_fail;
352 /* must included/overlap */ 352 /* must included/overlap */
353 if (start_pfn >= z1->zone_start_pfn + z1->spanned_pages) 353 if (start_pfn >= zone_end_pfn(z1))
354 goto out_fail; 354 goto out_fail;
355 355
356 /* use end_pfn for z2's end_pfn if z2 is empty */ 356 /* use end_pfn for z2's end_pfn if z2 is empty */
357 if (z2->spanned_pages) 357 if (z2->spanned_pages)
358 z2_end_pfn = z2->zone_start_pfn + z2->spanned_pages; 358 z2_end_pfn = zone_end_pfn(z2);
359 else 359 else
360 z2_end_pfn = end_pfn; 360 z2_end_pfn = end_pfn;
361 361