aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-02-22 19:34:59 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:18 -0500
commit22b751c3d0376e86a377e3a0aa2ddbbe9d2eefc1 (patch)
treefe946d7d4350b2454d65f88377d264fbc93958ce /mm
parente4715f01be697a3730c78f8ffffb595591d6a88c (diff)
mm: rename page struct field helpers
The function names page_xchg_last_nid(), page_last_nid() and reset_page_last_nid() were judged to be inconsistent so rename them to a struct_field_op style pattern. As it looked jarring to have reset_page_mapcount() and page_nid_reset_last() beside each other in memmap_init_zone(), this patch also renames reset_page_mapcount() to page_mapcount_reset(). There are others like init_page_count() but as it is used throughout the arch code a rename would likely cause more conflicts than it is worth. [akpm@linux-foundation.org: fix zcache] Signed-off-by: Mel Gorman <mgorman@suse.de> Suggested-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c2
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/migrate.c4
-rw-r--r--mm/mmzone.c4
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c2
7 files changed, 13 insertions, 13 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c63a21d0e991..6049376c7226 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1639,7 +1639,7 @@ static void __split_huge_page_refcount(struct page *page)
1639 page_tail->mapping = page->mapping; 1639 page_tail->mapping = page->mapping;
1640 1640
1641 page_tail->index = page->index + i; 1641 page_tail->index = page->index + i;
1642 page_xchg_last_nid(page_tail, page_last_nid(page)); 1642 page_nid_xchg_last(page_tail, page_nid_last(page));
1643 1643
1644 BUG_ON(!PageAnon(page_tail)); 1644 BUG_ON(!PageAnon(page_tail));
1645 BUG_ON(!PageUptodate(page_tail)); 1645 BUG_ON(!PageUptodate(page_tail));
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 6f7979c566d9..2ae78e255e08 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2316,7 +2316,7 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
2316 * it less likely we act on an unlikely task<->page 2316 * it less likely we act on an unlikely task<->page
2317 * relation. 2317 * relation.
2318 */ 2318 */
2319 last_nid = page_xchg_last_nid(page, polnid); 2319 last_nid = page_nid_xchg_last(page, polnid);
2320 if (last_nid != polnid) 2320 if (last_nid != polnid)
2321 goto out; 2321 goto out;
2322 } 2322 }
diff --git a/mm/migrate.c b/mm/migrate.c
index f560071e89c5..de5c371a7969 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1497,7 +1497,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
1497 __GFP_NOWARN) & 1497 __GFP_NOWARN) &
1498 ~GFP_IOFS, 0); 1498 ~GFP_IOFS, 0);
1499 if (newpage) 1499 if (newpage)
1500 page_xchg_last_nid(newpage, page_last_nid(page)); 1500 page_nid_xchg_last(newpage, page_nid_last(page));
1501 1501
1502 return newpage; 1502 return newpage;
1503} 1503}
@@ -1681,7 +1681,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1681 if (!new_page) 1681 if (!new_page)
1682 goto out_fail; 1682 goto out_fail;
1683 1683
1684 page_xchg_last_nid(new_page, page_last_nid(page)); 1684 page_nid_xchg_last(new_page, page_nid_last(page));
1685 1685
1686 isolated = numamigrate_isolate_page(pgdat, page); 1686 isolated = numamigrate_isolate_page(pgdat, page);
1687 if (!isolated) { 1687 if (!isolated) {
diff --git a/mm/mmzone.c b/mm/mmzone.c
index bce796e8487f..2ac0afbd68f3 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -98,14 +98,14 @@ void lruvec_init(struct lruvec *lruvec)
98} 98}
99 99
100#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_NID_NOT_IN_PAGE_FLAGS) 100#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_NID_NOT_IN_PAGE_FLAGS)
101int page_xchg_last_nid(struct page *page, int nid) 101int page_nid_xchg_last(struct page *page, int nid)
102{ 102{
103 unsigned long old_flags, flags; 103 unsigned long old_flags, flags;
104 int last_nid; 104 int last_nid;
105 105
106 do { 106 do {
107 old_flags = flags = page->flags; 107 old_flags = flags = page->flags;
108 last_nid = page_last_nid(page); 108 last_nid = page_nid_last(page);
109 109
110 flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT); 110 flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
111 flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT; 111 flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3ede25e6686e..445718b328b6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -295,7 +295,7 @@ static void bad_page(struct page *page)
295 295
296 /* Don't complain about poisoned pages */ 296 /* Don't complain about poisoned pages */
297 if (PageHWPoison(page)) { 297 if (PageHWPoison(page)) {
298 reset_page_mapcount(page); /* remove PageBuddy */ 298 page_mapcount_reset(page); /* remove PageBuddy */
299 return; 299 return;
300 } 300 }
301 301
@@ -327,7 +327,7 @@ static void bad_page(struct page *page)
327 dump_stack(); 327 dump_stack();
328out: 328out:
329 /* Leave bad fields for debug, except PageBuddy could make trouble */ 329 /* Leave bad fields for debug, except PageBuddy could make trouble */
330 reset_page_mapcount(page); /* remove PageBuddy */ 330 page_mapcount_reset(page); /* remove PageBuddy */
331 add_taint(TAINT_BAD_PAGE); 331 add_taint(TAINT_BAD_PAGE);
332} 332}
333 333
@@ -613,7 +613,7 @@ static inline int free_pages_check(struct page *page)
613 bad_page(page); 613 bad_page(page);
614 return 1; 614 return 1;
615 } 615 }
616 reset_page_last_nid(page); 616 page_nid_reset_last(page);
617 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) 617 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
618 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 618 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
619 return 0; 619 return 0;
@@ -3894,8 +3894,8 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
3894 set_page_links(page, zone, nid, pfn); 3894 set_page_links(page, zone, nid, pfn);
3895 mminit_verify_page_links(page, zone, nid, pfn); 3895 mminit_verify_page_links(page, zone, nid, pfn);
3896 init_page_count(page); 3896 init_page_count(page);
3897 reset_page_mapcount(page); 3897 page_mapcount_reset(page);
3898 reset_page_last_nid(page); 3898 page_nid_reset_last(page);
3899 SetPageReserved(page); 3899 SetPageReserved(page);
3900 /* 3900 /*
3901 * Mark the block movable so that blocks are reserved for 3901 * Mark the block movable so that blocks are reserved for
diff --git a/mm/slob.c b/mm/slob.c
index a99fdf7a0907..eeed4a05a2ef 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -360,7 +360,7 @@ static void slob_free(void *block, int size)
360 clear_slob_page_free(sp); 360 clear_slob_page_free(sp);
361 spin_unlock_irqrestore(&slob_lock, flags); 361 spin_unlock_irqrestore(&slob_lock, flags);
362 __ClearPageSlab(sp); 362 __ClearPageSlab(sp);
363 reset_page_mapcount(sp); 363 page_mapcount_reset(sp);
364 slob_free_pages(b, 0); 364 slob_free_pages(b, 0);
365 return; 365 return;
366 } 366 }
diff --git a/mm/slub.c b/mm/slub.c
index ba2ca53f6c3a..ebcc44eb43b9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1408,7 +1408,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1408 __ClearPageSlab(page); 1408 __ClearPageSlab(page);
1409 1409
1410 memcg_release_pages(s, order); 1410 memcg_release_pages(s, order);
1411 reset_page_mapcount(page); 1411 page_mapcount_reset(page);
1412 if (current->reclaim_state) 1412 if (current->reclaim_state)
1413 current->reclaim_state->reclaimed_slab += pages; 1413 current->reclaim_state->reclaimed_slab += pages;
1414 __free_memcg_kmem_pages(page, order); 1414 __free_memcg_kmem_pages(page, order);