aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-10-07 06:29:20 -0400
committerIngo Molnar <mingo@kernel.org>2013-10-09 08:47:45 -0400
commit90572890d202527c366aa9489b32404e88a7c020 (patch)
tree0577f3b043e312f6d53e50105b236514f7df2455 /mm
parente1dda8a797b59d7ec4b17e393152ec3273a552d5 (diff)
mm: numa: Change page last {nid,pid} into {cpu,pid}
Change the per page last fault tracking to use cpu,pid instead of nid,pid. This will allow us to try and lookup the alternate task more easily. Note that even though it is the cpu that is store in the page flags that the mpol_misplaced decision is still based on the node. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/1381141781-10992-43-git-send-email-mgorman@suse.de [ Fixed build failure on 32-bit systems. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c8
-rw-r--r--mm/memory.c16
-rw-r--r--mm/mempolicy.c16
-rw-r--r--mm/migrate.c4
-rw-r--r--mm/mm_init.c18
-rw-r--r--mm/mmzone.c14
-rw-r--r--mm/mprotect.c28
-rw-r--r--mm/page_alloc.c4
8 files changed, 55 insertions, 53 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0baf0e4d5203..becf92ca54f3 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1282,7 +1282,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1282 struct page *page; 1282 struct page *page;
1283 unsigned long haddr = addr & HPAGE_PMD_MASK; 1283 unsigned long haddr = addr & HPAGE_PMD_MASK;
1284 int page_nid = -1, this_nid = numa_node_id(); 1284 int page_nid = -1, this_nid = numa_node_id();
1285 int target_nid, last_nidpid = -1; 1285 int target_nid, last_cpupid = -1;
1286 bool page_locked; 1286 bool page_locked;
1287 bool migrated = false; 1287 bool migrated = false;
1288 1288
@@ -1293,7 +1293,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1293 page = pmd_page(pmd); 1293 page = pmd_page(pmd);
1294 BUG_ON(is_huge_zero_page(page)); 1294 BUG_ON(is_huge_zero_page(page));
1295 page_nid = page_to_nid(page); 1295 page_nid = page_to_nid(page);
1296 last_nidpid = page_nidpid_last(page); 1296 last_cpupid = page_cpupid_last(page);
1297 count_vm_numa_event(NUMA_HINT_FAULTS); 1297 count_vm_numa_event(NUMA_HINT_FAULTS);
1298 if (page_nid == this_nid) 1298 if (page_nid == this_nid)
1299 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 1299 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
@@ -1362,7 +1362,7 @@ out:
1362 page_unlock_anon_vma_read(anon_vma); 1362 page_unlock_anon_vma_read(anon_vma);
1363 1363
1364 if (page_nid != -1) 1364 if (page_nid != -1)
1365 task_numa_fault(last_nidpid, page_nid, HPAGE_PMD_NR, migrated); 1365 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, migrated);
1366 1366
1367 return 0; 1367 return 0;
1368} 1368}
@@ -1682,7 +1682,7 @@ static void __split_huge_page_refcount(struct page *page,
1682 page_tail->mapping = page->mapping; 1682 page_tail->mapping = page->mapping;
1683 1683
1684 page_tail->index = page->index + i; 1684 page_tail->index = page->index + i;
1685 page_nidpid_xchg_last(page_tail, page_nidpid_last(page)); 1685 page_cpupid_xchg_last(page_tail, page_cpupid_last(page));
1686 1686
1687 BUG_ON(!PageAnon(page_tail)); 1687 BUG_ON(!PageAnon(page_tail));
1688 BUG_ON(!PageUptodate(page_tail)); 1688 BUG_ON(!PageUptodate(page_tail));
diff --git a/mm/memory.c b/mm/memory.c
index cc7f20691c82..5162e6d0d652 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -69,8 +69,8 @@
69 69
70#include "internal.h" 70#include "internal.h"
71 71
72#ifdef LAST_NIDPID_NOT_IN_PAGE_FLAGS 72#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
73#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_nidpid. 73#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
74#endif 74#endif
75 75
76#ifndef CONFIG_NEED_MULTIPLE_NODES 76#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -3536,7 +3536,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3536 struct page *page = NULL; 3536 struct page *page = NULL;
3537 spinlock_t *ptl; 3537 spinlock_t *ptl;
3538 int page_nid = -1; 3538 int page_nid = -1;
3539 int last_nidpid; 3539 int last_cpupid;
3540 int target_nid; 3540 int target_nid;
3541 bool migrated = false; 3541 bool migrated = false;
3542 3542
@@ -3567,7 +3567,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3567 } 3567 }
3568 BUG_ON(is_zero_pfn(page_to_pfn(page))); 3568 BUG_ON(is_zero_pfn(page_to_pfn(page)));
3569 3569
3570 last_nidpid = page_nidpid_last(page); 3570 last_cpupid = page_cpupid_last(page);
3571 page_nid = page_to_nid(page); 3571 page_nid = page_to_nid(page);
3572 target_nid = numa_migrate_prep(page, vma, addr, page_nid); 3572 target_nid = numa_migrate_prep(page, vma, addr, page_nid);
3573 pte_unmap_unlock(ptep, ptl); 3573 pte_unmap_unlock(ptep, ptl);
@@ -3583,7 +3583,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3583 3583
3584out: 3584out:
3585 if (page_nid != -1) 3585 if (page_nid != -1)
3586 task_numa_fault(last_nidpid, page_nid, 1, migrated); 3586 task_numa_fault(last_cpupid, page_nid, 1, migrated);
3587 return 0; 3587 return 0;
3588} 3588}
3589 3589
@@ -3598,7 +3598,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3598 unsigned long offset; 3598 unsigned long offset;
3599 spinlock_t *ptl; 3599 spinlock_t *ptl;
3600 bool numa = false; 3600 bool numa = false;
3601 int last_nidpid; 3601 int last_cpupid;
3602 3602
3603 spin_lock(&mm->page_table_lock); 3603 spin_lock(&mm->page_table_lock);
3604 pmd = *pmdp; 3604 pmd = *pmdp;
@@ -3643,7 +3643,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3643 if (unlikely(!page)) 3643 if (unlikely(!page))
3644 continue; 3644 continue;
3645 3645
3646 last_nidpid = page_nidpid_last(page); 3646 last_cpupid = page_cpupid_last(page);
3647 page_nid = page_to_nid(page); 3647 page_nid = page_to_nid(page);
3648 target_nid = numa_migrate_prep(page, vma, addr, page_nid); 3648 target_nid = numa_migrate_prep(page, vma, addr, page_nid);
3649 pte_unmap_unlock(pte, ptl); 3649 pte_unmap_unlock(pte, ptl);
@@ -3656,7 +3656,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3656 } 3656 }
3657 3657
3658 if (page_nid != -1) 3658 if (page_nid != -1)
3659 task_numa_fault(last_nidpid, page_nid, 1, migrated); 3659 task_numa_fault(last_cpupid, page_nid, 1, migrated);
3660 3660
3661 pte = pte_offset_map_lock(mm, pmdp, addr, &ptl); 3661 pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
3662 } 3662 }
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 0e895a2eed5f..a5867ef24bda 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2324,6 +2324,8 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
2324 struct zone *zone; 2324 struct zone *zone;
2325 int curnid = page_to_nid(page); 2325 int curnid = page_to_nid(page);
2326 unsigned long pgoff; 2326 unsigned long pgoff;
2327 int thiscpu = raw_smp_processor_id();
2328 int thisnid = cpu_to_node(thiscpu);
2327 int polnid = -1; 2329 int polnid = -1;
2328 int ret = -1; 2330 int ret = -1;
2329 2331
@@ -2372,11 +2374,11 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
2372 2374
2373 /* Migrate the page towards the node whose CPU is referencing it */ 2375 /* Migrate the page towards the node whose CPU is referencing it */
2374 if (pol->flags & MPOL_F_MORON) { 2376 if (pol->flags & MPOL_F_MORON) {
2375 int last_nidpid; 2377 int last_cpupid;
2376 int this_nidpid; 2378 int this_cpupid;
2377 2379
2378 polnid = numa_node_id(); 2380 polnid = thisnid;
2379 this_nidpid = nid_pid_to_nidpid(polnid, current->pid); 2381 this_cpupid = cpu_pid_to_cpupid(thiscpu, current->pid);
2380 2382
2381 /* 2383 /*
2382 * Multi-stage node selection is used in conjunction 2384 * Multi-stage node selection is used in conjunction
@@ -2399,8 +2401,8 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
2399 * it less likely we act on an unlikely task<->page 2401 * it less likely we act on an unlikely task<->page
2400 * relation. 2402 * relation.
2401 */ 2403 */
2402 last_nidpid = page_nidpid_xchg_last(page, this_nidpid); 2404 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
2403 if (!nidpid_pid_unset(last_nidpid) && nidpid_to_nid(last_nidpid) != polnid) 2405 if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != thisnid)
2404 goto out; 2406 goto out;
2405 2407
2406#ifdef CONFIG_NUMA_BALANCING 2408#ifdef CONFIG_NUMA_BALANCING
@@ -2410,7 +2412,7 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
2410 * This way a short and temporary process migration will 2412 * This way a short and temporary process migration will
2411 * not cause excessive memory migration. 2413 * not cause excessive memory migration.
2412 */ 2414 */
2413 if (polnid != current->numa_preferred_nid && 2415 if (thisnid != current->numa_preferred_nid &&
2414 !current->numa_migrate_seq) 2416 !current->numa_migrate_seq)
2415 goto out; 2417 goto out;
2416#endif 2418#endif
diff --git a/mm/migrate.c b/mm/migrate.c
index 025d1e3d2ad2..ff537749d3b4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1498,7 +1498,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
1498 __GFP_NOWARN) & 1498 __GFP_NOWARN) &
1499 ~GFP_IOFS, 0); 1499 ~GFP_IOFS, 0);
1500 if (newpage) 1500 if (newpage)
1501 page_nidpid_xchg_last(newpage, page_nidpid_last(page)); 1501 page_cpupid_xchg_last(newpage, page_cpupid_last(page));
1502 1502
1503 return newpage; 1503 return newpage;
1504} 1504}
@@ -1675,7 +1675,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1675 if (!new_page) 1675 if (!new_page)
1676 goto out_fail; 1676 goto out_fail;
1677 1677
1678 page_nidpid_xchg_last(new_page, page_nidpid_last(page)); 1678 page_cpupid_xchg_last(new_page, page_cpupid_last(page));
1679 1679
1680 isolated = numamigrate_isolate_page(pgdat, page); 1680 isolated = numamigrate_isolate_page(pgdat, page);
1681 if (!isolated) { 1681 if (!isolated) {
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 467de579784b..68562e92d50c 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -71,26 +71,26 @@ void __init mminit_verify_pageflags_layout(void)
71 unsigned long or_mask, add_mask; 71 unsigned long or_mask, add_mask;
72 72
73 shift = 8 * sizeof(unsigned long); 73 shift = 8 * sizeof(unsigned long);
74 width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_NIDPID_SHIFT; 74 width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_CPUPID_SHIFT;
75 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths", 75 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
76 "Section %d Node %d Zone %d Lastnidpid %d Flags %d\n", 76 "Section %d Node %d Zone %d Lastcpupid %d Flags %d\n",
77 SECTIONS_WIDTH, 77 SECTIONS_WIDTH,
78 NODES_WIDTH, 78 NODES_WIDTH,
79 ZONES_WIDTH, 79 ZONES_WIDTH,
80 LAST_NIDPID_WIDTH, 80 LAST_CPUPID_WIDTH,
81 NR_PAGEFLAGS); 81 NR_PAGEFLAGS);
82 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts", 82 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
83 "Section %d Node %d Zone %d Lastnidpid %d\n", 83 "Section %d Node %d Zone %d Lastcpupid %d\n",
84 SECTIONS_SHIFT, 84 SECTIONS_SHIFT,
85 NODES_SHIFT, 85 NODES_SHIFT,
86 ZONES_SHIFT, 86 ZONES_SHIFT,
87 LAST_NIDPID_SHIFT); 87 LAST_CPUPID_SHIFT);
88 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts", 88 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
89 "Section %lu Node %lu Zone %lu Lastnidpid %lu\n", 89 "Section %lu Node %lu Zone %lu Lastcpupid %lu\n",
90 (unsigned long)SECTIONS_PGSHIFT, 90 (unsigned long)SECTIONS_PGSHIFT,
91 (unsigned long)NODES_PGSHIFT, 91 (unsigned long)NODES_PGSHIFT,
92 (unsigned long)ZONES_PGSHIFT, 92 (unsigned long)ZONES_PGSHIFT,
93 (unsigned long)LAST_NIDPID_PGSHIFT); 93 (unsigned long)LAST_CPUPID_PGSHIFT);
94 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid", 94 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
95 "Node/Zone ID: %lu -> %lu\n", 95 "Node/Zone ID: %lu -> %lu\n",
96 (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT), 96 (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
@@ -102,9 +102,9 @@ void __init mminit_verify_pageflags_layout(void)
102 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags", 102 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
103 "Node not in page flags"); 103 "Node not in page flags");
104#endif 104#endif
105#ifdef LAST_NIDPID_NOT_IN_PAGE_FLAGS 105#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
106 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags", 106 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
107 "Last nidpid not in page flags"); 107 "Last cpupid not in page flags");
108#endif 108#endif
109 109
110 if (SECTIONS_WIDTH) { 110 if (SECTIONS_WIDTH) {
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 25bb477deb26..bf34fb8556db 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -97,20 +97,20 @@ void lruvec_init(struct lruvec *lruvec)
97 INIT_LIST_HEAD(&lruvec->lists[lru]); 97 INIT_LIST_HEAD(&lruvec->lists[lru]);
98} 98}
99 99
100#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_NIDPID_NOT_IN_PAGE_FLAGS) 100#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
101int page_nidpid_xchg_last(struct page *page, int nidpid) 101int page_cpupid_xchg_last(struct page *page, int cpupid)
102{ 102{
103 unsigned long old_flags, flags; 103 unsigned long old_flags, flags;
104 int last_nidpid; 104 int last_cpupid;
105 105
106 do { 106 do {
107 old_flags = flags = page->flags; 107 old_flags = flags = page->flags;
108 last_nidpid = page_nidpid_last(page); 108 last_cpupid = page_cpupid_last(page);
109 109
110 flags &= ~(LAST_NIDPID_MASK << LAST_NIDPID_PGSHIFT); 110 flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
111 flags |= (nidpid & LAST_NIDPID_MASK) << LAST_NIDPID_PGSHIFT; 111 flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
112 } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags)); 112 } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
113 113
114 return last_nidpid; 114 return last_cpupid;
115} 115}
116#endif 116#endif
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 5aae39017d6d..9a74855f1241 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -37,14 +37,14 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
37 37
38static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 38static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
39 unsigned long addr, unsigned long end, pgprot_t newprot, 39 unsigned long addr, unsigned long end, pgprot_t newprot,
40 int dirty_accountable, int prot_numa, bool *ret_all_same_nidpid) 40 int dirty_accountable, int prot_numa, bool *ret_all_same_cpupid)
41{ 41{
42 struct mm_struct *mm = vma->vm_mm; 42 struct mm_struct *mm = vma->vm_mm;
43 pte_t *pte, oldpte; 43 pte_t *pte, oldpte;
44 spinlock_t *ptl; 44 spinlock_t *ptl;
45 unsigned long pages = 0; 45 unsigned long pages = 0;
46 bool all_same_nidpid = true; 46 bool all_same_cpupid = true;
47 int last_nid = -1; 47 int last_cpu = -1;
48 int last_pid = -1; 48 int last_pid = -1;
49 49
50 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 50 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
@@ -64,17 +64,17 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
64 64
65 page = vm_normal_page(vma, addr, oldpte); 65 page = vm_normal_page(vma, addr, oldpte);
66 if (page) { 66 if (page) {
67 int nidpid = page_nidpid_last(page); 67 int cpupid = page_cpupid_last(page);
68 int this_nid = nidpid_to_nid(nidpid); 68 int this_cpu = cpupid_to_cpu(cpupid);
69 int this_pid = nidpid_to_pid(nidpid); 69 int this_pid = cpupid_to_pid(cpupid);
70 70
71 if (last_nid == -1) 71 if (last_cpu == -1)
72 last_nid = this_nid; 72 last_cpu = this_cpu;
73 if (last_pid == -1) 73 if (last_pid == -1)
74 last_pid = this_pid; 74 last_pid = this_pid;
75 if (last_nid != this_nid || 75 if (last_cpu != this_cpu ||
76 last_pid != this_pid) { 76 last_pid != this_pid) {
77 all_same_nidpid = false; 77 all_same_cpupid = false;
78 } 78 }
79 79
80 if (!pte_numa(oldpte)) { 80 if (!pte_numa(oldpte)) {
@@ -115,7 +115,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
115 arch_leave_lazy_mmu_mode(); 115 arch_leave_lazy_mmu_mode();
116 pte_unmap_unlock(pte - 1, ptl); 116 pte_unmap_unlock(pte - 1, ptl);
117 117
118 *ret_all_same_nidpid = all_same_nidpid; 118 *ret_all_same_cpupid = all_same_cpupid;
119 return pages; 119 return pages;
120} 120}
121 121
@@ -142,7 +142,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
142 pmd_t *pmd; 142 pmd_t *pmd;
143 unsigned long next; 143 unsigned long next;
144 unsigned long pages = 0; 144 unsigned long pages = 0;
145 bool all_same_nidpid; 145 bool all_same_cpupid;
146 146
147 pmd = pmd_offset(pud, addr); 147 pmd = pmd_offset(pud, addr);
148 do { 148 do {
@@ -168,7 +168,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
168 if (pmd_none_or_clear_bad(pmd)) 168 if (pmd_none_or_clear_bad(pmd))
169 continue; 169 continue;
170 this_pages = change_pte_range(vma, pmd, addr, next, newprot, 170 this_pages = change_pte_range(vma, pmd, addr, next, newprot,
171 dirty_accountable, prot_numa, &all_same_nidpid); 171 dirty_accountable, prot_numa, &all_same_cpupid);
172 pages += this_pages; 172 pages += this_pages;
173 173
174 /* 174 /*
@@ -177,7 +177,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
177 * node. This allows a regular PMD to be handled as one fault 177 * node. This allows a regular PMD to be handled as one fault
178 * and effectively batches the taking of the PTL 178 * and effectively batches the taking of the PTL
179 */ 179 */
180 if (prot_numa && this_pages && all_same_nidpid) 180 if (prot_numa && this_pages && all_same_cpupid)
181 change_pmd_protnuma(vma->vm_mm, addr, pmd); 181 change_pmd_protnuma(vma->vm_mm, addr, pmd);
182 } while (pmd++, addr = next, addr != end); 182 } while (pmd++, addr = next, addr != end);
183 183
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 89bedd0e4cad..73d812f16dde 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -626,7 +626,7 @@ static inline int free_pages_check(struct page *page)
626 bad_page(page); 626 bad_page(page);
627 return 1; 627 return 1;
628 } 628 }
629 page_nidpid_reset_last(page); 629 page_cpupid_reset_last(page);
630 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) 630 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
631 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 631 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
632 return 0; 632 return 0;
@@ -4015,7 +4015,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
4015 mminit_verify_page_links(page, zone, nid, pfn); 4015 mminit_verify_page_links(page, zone, nid, pfn);
4016 init_page_count(page); 4016 init_page_count(page);
4017 page_mapcount_reset(page); 4017 page_mapcount_reset(page);
4018 page_nidpid_reset_last(page); 4018 page_cpupid_reset_last(page);
4019 SetPageReserved(page); 4019 SetPageReserved(page);
4020 /* 4020 /*
4021 * Mark the block movable so that blocks are reserved for 4021 * Mark the block movable so that blocks are reserved for