aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c21
1 files changed, 14 insertions, 7 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index c94e58b192c3..36f42573a335 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -93,6 +93,8 @@
93#include <asm/tlbflush.h> 93#include <asm/tlbflush.h>
94#include <asm/uaccess.h> 94#include <asm/uaccess.h>
95 95
96#include "internal.h"
97
96/* Internal flags */ 98/* Internal flags */
97#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 99#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
98#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 100#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
@@ -762,8 +764,11 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
762 /* 764 /*
763 * Avoid migrating a page that is shared with others. 765 * Avoid migrating a page that is shared with others.
764 */ 766 */
765 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) 767 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
766 isolate_lru_page(page, pagelist); 768 if (!isolate_lru_page(page)) {
769 list_add_tail(&page->lru, pagelist);
770 }
771 }
767} 772}
768 773
769static struct page *new_node_page(struct page *page, unsigned long node, int **x) 774static struct page *new_node_page(struct page *page, unsigned long node, int **x)
@@ -803,7 +808,6 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
803int do_migrate_pages(struct mm_struct *mm, 808int do_migrate_pages(struct mm_struct *mm,
804 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) 809 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
805{ 810{
806 LIST_HEAD(pagelist);
807 int busy = 0; 811 int busy = 0;
808 int err = 0; 812 int err = 0;
809 nodemask_t tmp; 813 nodemask_t tmp;
@@ -1481,7 +1485,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1481 1485
1482 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 1486 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1483 zl = node_zonelist(interleave_nid(*mpol, vma, addr, 1487 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1484 HPAGE_SHIFT), gfp_flags); 1488 huge_page_shift(hstate_vma(vma))), gfp_flags);
1485 } else { 1489 } else {
1486 zl = policy_zonelist(gfp_flags, *mpol); 1490 zl = policy_zonelist(gfp_flags, *mpol);
1487 if ((*mpol)->mode == MPOL_BIND) 1491 if ((*mpol)->mode == MPOL_BIND)
@@ -2198,7 +2202,7 @@ static void gather_stats(struct page *page, void *private, int pte_dirty)
2198 if (PageSwapCache(page)) 2202 if (PageSwapCache(page))
2199 md->swapcache++; 2203 md->swapcache++;
2200 2204
2201 if (PageActive(page)) 2205 if (PageActive(page) || PageUnevictable(page))
2202 md->active++; 2206 md->active++;
2203 2207
2204 if (PageWriteback(page)) 2208 if (PageWriteback(page))
@@ -2220,9 +2224,12 @@ static void check_huge_range(struct vm_area_struct *vma,
2220{ 2224{
2221 unsigned long addr; 2225 unsigned long addr;
2222 struct page *page; 2226 struct page *page;
2227 struct hstate *h = hstate_vma(vma);
2228 unsigned long sz = huge_page_size(h);
2223 2229
2224 for (addr = start; addr < end; addr += HPAGE_SIZE) { 2230 for (addr = start; addr < end; addr += sz) {
2225 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK); 2231 pte_t *ptep = huge_pte_offset(vma->vm_mm,
2232 addr & huge_page_mask(h));
2226 pte_t pte; 2233 pte_t pte;
2227 2234
2228 if (!ptep) 2235 if (!ptep)