diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2013-12-05 13:38:22 -0500 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2013-12-08 19:40:23 -0500 |
commit | 5877231f646bbd6d1d545e7af83aaa6e6b746013 (patch) | |
tree | d0ca877cb929b73087aafa19ca884409391f0ebc | |
parent | 2c49195b6aedd21ff1cd1e095fab9866fba3411b (diff) |
mm: Move change_prot_numa outside CONFIG_ARCH_USES_NUMA_PROT_NONE
change_prot_numa should work even if _PAGE_NUMA != _PAGE_PROTNONE.
On archs like ppc64 that don't use _PAGE_PROTNONE and also have
a separate page table outside linux pagetable, we just need to
make sure that when calling change_prot_numa we flush the
hardware page table entry so that next page access result in a numa
fault.
We still need to make sure we use the numa faulting logic only
when CONFIG_NUMA_BALANCING is set. This implies the migrate-on-fault
(Lazy migration) via mbind will only work if CONFIG_NUMA_BALANCING
is set.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r-- | include/linux/mm.h | 2 | ||||
-rw-r--r-- | mm/mempolicy.c | 5 |
2 files changed, 3 insertions, 4 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 1cedd000cf29..a7b4e310bf42 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1842,7 +1842,7 @@ static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) | |||
1842 | } | 1842 | } |
1843 | #endif | 1843 | #endif |
1844 | 1844 | ||
1845 | #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE | 1845 | #ifdef CONFIG_NUMA_BALANCING |
1846 | unsigned long change_prot_numa(struct vm_area_struct *vma, | 1846 | unsigned long change_prot_numa(struct vm_area_struct *vma, |
1847 | unsigned long start, unsigned long end); | 1847 | unsigned long start, unsigned long end); |
1848 | #endif | 1848 | #endif |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index eca4a3129129..9f73b29d304d 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -613,7 +613,7 @@ static inline int queue_pages_pgd_range(struct vm_area_struct *vma, | |||
613 | return 0; | 613 | return 0; |
614 | } | 614 | } |
615 | 615 | ||
616 | #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE | 616 | #ifdef CONFIG_NUMA_BALANCING |
617 | /* | 617 | /* |
618 | * This is used to mark a range of virtual addresses to be inaccessible. | 618 | * This is used to mark a range of virtual addresses to be inaccessible. |
619 | * These are later cleared by a NUMA hinting fault. Depending on these | 619 | * These are later cleared by a NUMA hinting fault. Depending on these |
@@ -627,7 +627,6 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, | |||
627 | unsigned long addr, unsigned long end) | 627 | unsigned long addr, unsigned long end) |
628 | { | 628 | { |
629 | int nr_updated; | 629 | int nr_updated; |
630 | BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE); | ||
631 | 630 | ||
632 | nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1); | 631 | nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1); |
633 | if (nr_updated) | 632 | if (nr_updated) |
@@ -641,7 +640,7 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma, | |||
641 | { | 640 | { |
642 | return 0; | 641 | return 0; |
643 | } | 642 | } |
644 | #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ | 643 | #endif /* CONFIG_NUMA_BALANCING */ |
645 | 644 | ||
646 | /* | 645 | /* |
647 | * Walk through page tables and collect pages to be migrated. | 646 | * Walk through page tables and collect pages to be migrated. |