diff options
author | Mel Gorman <mgorman@suse.de> | 2015-02-12 17:58:35 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-12 21:54:08 -0500 |
commit | e944fd67b625c02bda4a78ddf85e413c5e401474 (patch) | |
tree | 73430e339d1d27b315b3789bc0a271882ecf9174 /mm/huge_memory.c | |
parent | 21d9ee3eda7792c45880b2f11bff8e95c9a061fb (diff) |
mm: numa: do not trap faults on the huge zero page
Faults on the huge zero page are pointless and there is a BUG_ON to catch
them during fault time. This patch reintroduces a check that avoids
marking the zero page PAGE_NONE.
Signed-off-by: Mel Gorman <mgorman@suse.de>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Dave Jones <davej@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Kirill Shutemov <kirill.shutemov@linux.intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 13 |
1 files changed, 12 insertions, 1 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index cb9b3e847dac..8e791a3db6b6 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1471,7 +1471,7 @@ out: | |||
1471 | * - HPAGE_PMD_NR is protections changed and TLB flush necessary | 1471 | * - HPAGE_PMD_NR is protections changed and TLB flush necessary |
1472 | */ | 1472 | */ |
1473 | int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | 1473 | int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
1474 | unsigned long addr, pgprot_t newprot) | 1474 | unsigned long addr, pgprot_t newprot, int prot_numa) |
1475 | { | 1475 | { |
1476 | struct mm_struct *mm = vma->vm_mm; | 1476 | struct mm_struct *mm = vma->vm_mm; |
1477 | spinlock_t *ptl; | 1477 | spinlock_t *ptl; |
@@ -1479,6 +1479,17 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1479 | 1479 | ||
1480 | if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { | 1480 | if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { |
1481 | pmd_t entry; | 1481 | pmd_t entry; |
1482 | |||
1483 | /* | ||
1484 | * Avoid trapping faults against the zero page. The read-only | ||
1485 | * data is likely to be read-cached on the local CPU and | ||
1486 | * local/remote hits to the zero page are not interesting. | ||
1487 | */ | ||
1488 | if (prot_numa && is_huge_zero_pmd(*pmd)) { | ||
1489 | spin_unlock(ptl); | ||
1490 | return 0; | ||
1491 | } | ||
1492 | |||
1482 | ret = 1; | 1493 | ret = 1; |
1483 | entry = pmdp_get_and_clear_notify(mm, addr, pmd); | 1494 | entry = pmdp_get_and_clear_notify(mm, addr, pmd); |
1484 | entry = pmd_modify(entry, newprot); | 1495 | entry = pmd_modify(entry, newprot); |