aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCody P Schafer <cody@linux.vnet.ibm.com>2013-02-22 19:35:23 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:20 -0500
commit108bcc96ef7047c02cad4d229f04da38186a3f3f (patch)
treee11d82074cae54dcf0fa8eea12750c661a16b02d
parent9127ab4ff92f0ecd7b4671efa9d0edb21c691e9f (diff)
mm: add & use zone_end_pfn() and zone_spans_pfn()
Add 2 helpers (zone_end_pfn() and zone_spans_pfn()) to reduce code duplication. This also switches to using them in compaction (where an additional variable needed to be renamed), page_alloc, vmstat, memory_hotplug, and kmemleak. Note that in compaction.c I avoid calling zone_end_pfn() repeatedly because I expect at some point the sycronization issues with start_pfn & spanned_pages will need fixing, either by actually using the seqlock or clever memory barrier usage. Signed-off-by: Cody P Schafer <cody@linux.vnet.ibm.com> Cc: David Hansen <dave@linux.vnet.ibm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mmzone.h10
-rw-r--r--mm/compaction.c10
-rw-r--r--mm/kmemleak.c5
-rw-r--r--mm/memory_hotplug.c10
-rw-r--r--mm/page_alloc.c22
-rw-r--r--mm/vmstat.c2
6 files changed, 32 insertions, 27 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6c80d0ac14dd..34343f51e211 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -527,6 +527,16 @@ static inline int zone_is_oom_locked(const struct zone *zone)
527 return test_bit(ZONE_OOM_LOCKED, &zone->flags); 527 return test_bit(ZONE_OOM_LOCKED, &zone->flags);
528} 528}
529 529
530static inline unsigned zone_end_pfn(const struct zone *zone)
531{
532 return zone->zone_start_pfn + zone->spanned_pages;
533}
534
535static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
536{
537 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
538}
539
530/* 540/*
531 * The "priority" of VM scanning is how much of the queues we will scan in one 541 * The "priority" of VM scanning is how much of the queues we will scan in one
532 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the 542 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
diff --git a/mm/compaction.c b/mm/compaction.c
index 25e75e3e2ac6..05ccb4cc0bdb 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -86,7 +86,7 @@ static inline bool isolation_suitable(struct compact_control *cc,
86static void __reset_isolation_suitable(struct zone *zone) 86static void __reset_isolation_suitable(struct zone *zone)
87{ 87{
88 unsigned long start_pfn = zone->zone_start_pfn; 88 unsigned long start_pfn = zone->zone_start_pfn;
89 unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages; 89 unsigned long end_pfn = zone_end_pfn(zone);
90 unsigned long pfn; 90 unsigned long pfn;
91 91
92 zone->compact_cached_migrate_pfn = start_pfn; 92 zone->compact_cached_migrate_pfn = start_pfn;
@@ -647,7 +647,7 @@ static void isolate_freepages(struct zone *zone,
647 struct compact_control *cc) 647 struct compact_control *cc)
648{ 648{
649 struct page *page; 649 struct page *page;
650 unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn; 650 unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn;
651 int nr_freepages = cc->nr_freepages; 651 int nr_freepages = cc->nr_freepages;
652 struct list_head *freelist = &cc->freepages; 652 struct list_head *freelist = &cc->freepages;
653 653
@@ -666,7 +666,7 @@ static void isolate_freepages(struct zone *zone,
666 */ 666 */
667 high_pfn = min(low_pfn, pfn); 667 high_pfn = min(low_pfn, pfn);
668 668
669 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 669 z_end_pfn = zone_end_pfn(zone);
670 670
671 /* 671 /*
672 * Isolate free pages until enough are available to migrate the 672 * Isolate free pages until enough are available to migrate the
@@ -709,7 +709,7 @@ static void isolate_freepages(struct zone *zone,
709 * only scans within a pageblock 709 * only scans within a pageblock
710 */ 710 */
711 end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 711 end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
712 end_pfn = min(end_pfn, zone_end_pfn); 712 end_pfn = min(end_pfn, z_end_pfn);
713 isolated = isolate_freepages_block(cc, pfn, end_pfn, 713 isolated = isolate_freepages_block(cc, pfn, end_pfn,
714 freelist, false); 714 freelist, false);
715 nr_freepages += isolated; 715 nr_freepages += isolated;
@@ -923,7 +923,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
923{ 923{
924 int ret; 924 int ret;
925 unsigned long start_pfn = zone->zone_start_pfn; 925 unsigned long start_pfn = zone->zone_start_pfn;
926 unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages; 926 unsigned long end_pfn = zone_end_pfn(zone);
927 927
928 ret = compaction_suitable(zone, cc->order); 928 ret = compaction_suitable(zone, cc->order);
929 switch (ret) { 929 switch (ret) {
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 752a705c77c2..83dd5fbf5e60 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1300,9 +1300,8 @@ static void kmemleak_scan(void)
1300 */ 1300 */
1301 lock_memory_hotplug(); 1301 lock_memory_hotplug();
1302 for_each_online_node(i) { 1302 for_each_online_node(i) {
1303 pg_data_t *pgdat = NODE_DATA(i); 1303 unsigned long start_pfn = node_start_pfn(i);
1304 unsigned long start_pfn = pgdat->node_start_pfn; 1304 unsigned long end_pfn = node_end_pfn(i);
1305 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1306 unsigned long pfn; 1305 unsigned long pfn;
1307 1306
1308 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 1307 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index dda1ca695a08..8b3235eedf3d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -299,7 +299,7 @@ static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
299 pgdat_resize_lock(z1->zone_pgdat, &flags); 299 pgdat_resize_lock(z1->zone_pgdat, &flags);
300 300
301 /* can't move pfns which are higher than @z2 */ 301 /* can't move pfns which are higher than @z2 */
302 if (end_pfn > z2->zone_start_pfn + z2->spanned_pages) 302 if (end_pfn > zone_end_pfn(z2))
303 goto out_fail; 303 goto out_fail;
304 /* the move out part mast at the left most of @z2 */ 304 /* the move out part mast at the left most of @z2 */
305 if (start_pfn > z2->zone_start_pfn) 305 if (start_pfn > z2->zone_start_pfn)
@@ -315,7 +315,7 @@ static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
315 z1_start_pfn = start_pfn; 315 z1_start_pfn = start_pfn;
316 316
317 resize_zone(z1, z1_start_pfn, end_pfn); 317 resize_zone(z1, z1_start_pfn, end_pfn);
318 resize_zone(z2, end_pfn, z2->zone_start_pfn + z2->spanned_pages); 318 resize_zone(z2, end_pfn, zone_end_pfn(z2));
319 319
320 pgdat_resize_unlock(z1->zone_pgdat, &flags); 320 pgdat_resize_unlock(z1->zone_pgdat, &flags);
321 321
@@ -347,15 +347,15 @@ static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
347 if (z1->zone_start_pfn > start_pfn) 347 if (z1->zone_start_pfn > start_pfn)
348 goto out_fail; 348 goto out_fail;
349 /* the move out part mast at the right most of @z1 */ 349 /* the move out part mast at the right most of @z1 */
350 if (z1->zone_start_pfn + z1->spanned_pages > end_pfn) 350 if (zone_end_pfn(z1) > end_pfn)
351 goto out_fail; 351 goto out_fail;
352 /* must included/overlap */ 352 /* must included/overlap */
353 if (start_pfn >= z1->zone_start_pfn + z1->spanned_pages) 353 if (start_pfn >= zone_end_pfn(z1))
354 goto out_fail; 354 goto out_fail;
355 355
356 /* use end_pfn for z2's end_pfn if z2 is empty */ 356 /* use end_pfn for z2's end_pfn if z2 is empty */
357 if (z2->spanned_pages) 357 if (z2->spanned_pages)
358 z2_end_pfn = z2->zone_start_pfn + z2->spanned_pages; 358 z2_end_pfn = zone_end_pfn(z2);
359 else 359 else
360 z2_end_pfn = end_pfn; 360 z2_end_pfn = end_pfn;
361 361
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 64c83a8c3220..a3687afc5917 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -250,9 +250,7 @@ static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
250 250
251 do { 251 do {
252 seq = zone_span_seqbegin(zone); 252 seq = zone_span_seqbegin(zone);
253 if (pfn >= zone->zone_start_pfn + zone->spanned_pages) 253 if (!zone_spans_pfn(zone, pfn))
254 ret = 1;
255 else if (pfn < zone->zone_start_pfn)
256 ret = 1; 254 ret = 1;
257 } while (zone_span_seqretry(zone, seq)); 255 } while (zone_span_seqretry(zone, seq));
258 256
@@ -990,9 +988,9 @@ int move_freepages_block(struct zone *zone, struct page *page,
990 end_pfn = start_pfn + pageblock_nr_pages - 1; 988 end_pfn = start_pfn + pageblock_nr_pages - 1;
991 989
992 /* Do not cross zone boundaries */ 990 /* Do not cross zone boundaries */
993 if (start_pfn < zone->zone_start_pfn) 991 if (!zone_spans_pfn(zone, start_pfn))
994 start_page = page; 992 start_page = page;
995 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages) 993 if (!zone_spans_pfn(zone, end_pfn))
996 return 0; 994 return 0;
997 995
998 return move_freepages(zone, start_page, end_page, migratetype); 996 return move_freepages(zone, start_page, end_page, migratetype);
@@ -1286,7 +1284,7 @@ void mark_free_pages(struct zone *zone)
1286 1284
1287 spin_lock_irqsave(&zone->lock, flags); 1285 spin_lock_irqsave(&zone->lock, flags);
1288 1286
1289 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 1287 max_zone_pfn = zone_end_pfn(zone);
1290 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1288 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1291 if (pfn_valid(pfn)) { 1289 if (pfn_valid(pfn)) {
1292 struct page *page = pfn_to_page(pfn); 1290 struct page *page = pfn_to_page(pfn);
@@ -3798,7 +3796,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
3798 * the block. 3796 * the block.
3799 */ 3797 */
3800 start_pfn = zone->zone_start_pfn; 3798 start_pfn = zone->zone_start_pfn;
3801 end_pfn = start_pfn + zone->spanned_pages; 3799 end_pfn = zone_end_pfn(zone);
3802 start_pfn = roundup(start_pfn, pageblock_nr_pages); 3800 start_pfn = roundup(start_pfn, pageblock_nr_pages);
3803 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >> 3801 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
3804 pageblock_order; 3802 pageblock_order;
@@ -3912,7 +3910,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
3912 * pfn out of zone. 3910 * pfn out of zone.
3913 */ 3911 */
3914 if ((z->zone_start_pfn <= pfn) 3912 if ((z->zone_start_pfn <= pfn)
3915 && (pfn < z->zone_start_pfn + z->spanned_pages) 3913 && (pfn < zone_end_pfn(z))
3916 && !(pfn & (pageblock_nr_pages - 1))) 3914 && !(pfn & (pageblock_nr_pages - 1)))
3917 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 3915 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3918 3916
@@ -4713,7 +4711,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4713 * for the buddy allocator to function correctly. 4711 * for the buddy allocator to function correctly.
4714 */ 4712 */
4715 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 4713 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4716 end = pgdat->node_start_pfn + pgdat->node_spanned_pages; 4714 end = pgdat_end_pfn(pgdat);
4717 end = ALIGN(end, MAX_ORDER_NR_PAGES); 4715 end = ALIGN(end, MAX_ORDER_NR_PAGES);
4718 size = (end - start) * sizeof(struct page); 4716 size = (end - start) * sizeof(struct page);
4719 map = alloc_remap(pgdat->node_id, size); 4717 map = alloc_remap(pgdat->node_id, size);
@@ -5928,8 +5926,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
5928 pfn = page_to_pfn(page); 5926 pfn = page_to_pfn(page);
5929 bitmap = get_pageblock_bitmap(zone, pfn); 5927 bitmap = get_pageblock_bitmap(zone, pfn);
5930 bitidx = pfn_to_bitidx(zone, pfn); 5928 bitidx = pfn_to_bitidx(zone, pfn);
5931 VM_BUG_ON(pfn < zone->zone_start_pfn); 5929 VM_BUG_ON(!zone_spans_pfn(zone, pfn));
5932 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
5933 5930
5934 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 5931 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5935 if (flags & value) 5932 if (flags & value)
@@ -6027,8 +6024,7 @@ bool is_pageblock_removable_nolock(struct page *page)
6027 6024
6028 zone = page_zone(page); 6025 zone = page_zone(page);
6029 pfn = page_to_pfn(page); 6026 pfn = page_to_pfn(page);
6030 if (zone->zone_start_pfn > pfn || 6027 if (!zone_spans_pfn(zone, pfn))
6031 zone->zone_start_pfn + zone->spanned_pages <= pfn)
6032 return false; 6028 return false;
6033 6029
6034 return !has_unmovable_pages(zone, page, 0, true); 6030 return !has_unmovable_pages(zone, page, 0, true);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 57f02fd1768b..e1d8ed172c42 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -891,7 +891,7 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m,
891 int mtype; 891 int mtype;
892 unsigned long pfn; 892 unsigned long pfn;
893 unsigned long start_pfn = zone->zone_start_pfn; 893 unsigned long start_pfn = zone->zone_start_pfn;
894 unsigned long end_pfn = start_pfn + zone->spanned_pages; 894 unsigned long end_pfn = zone_end_pfn(zone);
895 unsigned long count[MIGRATE_TYPES] = { 0, }; 895 unsigned long count[MIGRATE_TYPES] = { 0, };
896 896
897 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 897 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {