aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorCody P Schafer <cody@linux.vnet.ibm.com>2013-02-22 19:35:23 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:20 -0500
commit108bcc96ef7047c02cad4d229f04da38186a3f3f (patch)
treee11d82074cae54dcf0fa8eea12750c661a16b02d /mm/page_alloc.c
parent9127ab4ff92f0ecd7b4671efa9d0edb21c691e9f (diff)
mm: add & use zone_end_pfn() and zone_spans_pfn()
Add 2 helpers (zone_end_pfn() and zone_spans_pfn()) to reduce code duplication. This also switches to using them in compaction (where an additional variable needed to be renamed), page_alloc, vmstat, memory_hotplug, and kmemleak. Note that in compaction.c I avoid calling zone_end_pfn() repeatedly because I expect at some point the sycronization issues with start_pfn & spanned_pages will need fixing, either by actually using the seqlock or clever memory barrier usage. Signed-off-by: Cody P Schafer <cody@linux.vnet.ibm.com> Cc: David Hansen <dave@linux.vnet.ibm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c22
1 files changed, 9 insertions, 13 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 64c83a8c3220..a3687afc5917 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -250,9 +250,7 @@ static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
250 250
251 do { 251 do {
252 seq = zone_span_seqbegin(zone); 252 seq = zone_span_seqbegin(zone);
253 if (pfn >= zone->zone_start_pfn + zone->spanned_pages) 253 if (!zone_spans_pfn(zone, pfn))
254 ret = 1;
255 else if (pfn < zone->zone_start_pfn)
256 ret = 1; 254 ret = 1;
257 } while (zone_span_seqretry(zone, seq)); 255 } while (zone_span_seqretry(zone, seq));
258 256
@@ -990,9 +988,9 @@ int move_freepages_block(struct zone *zone, struct page *page,
990 end_pfn = start_pfn + pageblock_nr_pages - 1; 988 end_pfn = start_pfn + pageblock_nr_pages - 1;
991 989
992 /* Do not cross zone boundaries */ 990 /* Do not cross zone boundaries */
993 if (start_pfn < zone->zone_start_pfn) 991 if (!zone_spans_pfn(zone, start_pfn))
994 start_page = page; 992 start_page = page;
995 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages) 993 if (!zone_spans_pfn(zone, end_pfn))
996 return 0; 994 return 0;
997 995
998 return move_freepages(zone, start_page, end_page, migratetype); 996 return move_freepages(zone, start_page, end_page, migratetype);
@@ -1286,7 +1284,7 @@ void mark_free_pages(struct zone *zone)
1286 1284
1287 spin_lock_irqsave(&zone->lock, flags); 1285 spin_lock_irqsave(&zone->lock, flags);
1288 1286
1289 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 1287 max_zone_pfn = zone_end_pfn(zone);
1290 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1288 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1291 if (pfn_valid(pfn)) { 1289 if (pfn_valid(pfn)) {
1292 struct page *page = pfn_to_page(pfn); 1290 struct page *page = pfn_to_page(pfn);
@@ -3798,7 +3796,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
3798 * the block. 3796 * the block.
3799 */ 3797 */
3800 start_pfn = zone->zone_start_pfn; 3798 start_pfn = zone->zone_start_pfn;
3801 end_pfn = start_pfn + zone->spanned_pages; 3799 end_pfn = zone_end_pfn(zone);
3802 start_pfn = roundup(start_pfn, pageblock_nr_pages); 3800 start_pfn = roundup(start_pfn, pageblock_nr_pages);
3803 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >> 3801 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
3804 pageblock_order; 3802 pageblock_order;
@@ -3912,7 +3910,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
3912 * pfn out of zone. 3910 * pfn out of zone.
3913 */ 3911 */
3914 if ((z->zone_start_pfn <= pfn) 3912 if ((z->zone_start_pfn <= pfn)
3915 && (pfn < z->zone_start_pfn + z->spanned_pages) 3913 && (pfn < zone_end_pfn(z))
3916 && !(pfn & (pageblock_nr_pages - 1))) 3914 && !(pfn & (pageblock_nr_pages - 1)))
3917 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 3915 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3918 3916
@@ -4713,7 +4711,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4713 * for the buddy allocator to function correctly. 4711 * for the buddy allocator to function correctly.
4714 */ 4712 */
4715 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 4713 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4716 end = pgdat->node_start_pfn + pgdat->node_spanned_pages; 4714 end = pgdat_end_pfn(pgdat);
4717 end = ALIGN(end, MAX_ORDER_NR_PAGES); 4715 end = ALIGN(end, MAX_ORDER_NR_PAGES);
4718 size = (end - start) * sizeof(struct page); 4716 size = (end - start) * sizeof(struct page);
4719 map = alloc_remap(pgdat->node_id, size); 4717 map = alloc_remap(pgdat->node_id, size);
@@ -5928,8 +5926,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
5928 pfn = page_to_pfn(page); 5926 pfn = page_to_pfn(page);
5929 bitmap = get_pageblock_bitmap(zone, pfn); 5927 bitmap = get_pageblock_bitmap(zone, pfn);
5930 bitidx = pfn_to_bitidx(zone, pfn); 5928 bitidx = pfn_to_bitidx(zone, pfn);
5931 VM_BUG_ON(pfn < zone->zone_start_pfn); 5929 VM_BUG_ON(!zone_spans_pfn(zone, pfn));
5932 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
5933 5930
5934 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 5931 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5935 if (flags & value) 5932 if (flags & value)
@@ -6027,8 +6024,7 @@ bool is_pageblock_removable_nolock(struct page *page)
6027 6024
6028 zone = page_zone(page); 6025 zone = page_zone(page);
6029 pfn = page_to_pfn(page); 6026 pfn = page_to_pfn(page);
6030 if (zone->zone_start_pfn > pfn || 6027 if (!zone_spans_pfn(zone, pfn))
6031 zone->zone_start_pfn + zone->spanned_pages <= pfn)
6032 return false; 6028 return false;
6033 6029
6034 return !has_unmovable_pages(zone, page, 0, true); 6030 return !has_unmovable_pages(zone, page, 0, true);