diff options
author | Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> | 2010-05-27 20:29:21 -0400 |
---|---|---|
committer | Andi Kleen <ak@linux.intel.com> | 2010-08-11 03:23:01 -0400 |
commit | fd6a03edd271cf2d69a61aa8df98dd05fa6b9afd (patch) | |
tree | 32d3e89f1f66a3d9b68bccc88fec548acc361bf5 /mm/hugetlb.c | |
parent | 93f70f900da36fbc19c13c2aa04b2e468c8d00fb (diff) |
HWPOISON, hugetlb: detect hwpoison in hugetlb code
This patch enables to block access to hwpoisoned hugepage and
also enables to block unmapping for it.
Dependency:
"HWPOISON, hugetlb: enable error handling path for hugepage"
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Fengguang Wu <fengguang.wu@intel.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 40 |
1 files changed, 40 insertions, 0 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 8c163f64cf10..4c2efc0f3919 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -19,6 +19,8 @@ | |||
19 | #include <linux/sysfs.h> | 19 | #include <linux/sysfs.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/rmap.h> | 21 | #include <linux/rmap.h> |
22 | #include <linux/swap.h> | ||
23 | #include <linux/swapops.h> | ||
22 | 24 | ||
23 | #include <asm/page.h> | 25 | #include <asm/page.h> |
24 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
@@ -2149,6 +2151,19 @@ nomem: | |||
2149 | return -ENOMEM; | 2151 | return -ENOMEM; |
2150 | } | 2152 | } |
2151 | 2153 | ||
2154 | static int is_hugetlb_entry_hwpoisoned(pte_t pte) | ||
2155 | { | ||
2156 | swp_entry_t swp; | ||
2157 | |||
2158 | if (huge_pte_none(pte) || pte_present(pte)) | ||
2159 | return 0; | ||
2160 | swp = pte_to_swp_entry(pte); | ||
2161 | if (non_swap_entry(swp) && is_hwpoison_entry(swp)) { | ||
2162 | return 1; | ||
2163 | } else | ||
2164 | return 0; | ||
2165 | } | ||
2166 | |||
2152 | void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | 2167 | void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, |
2153 | unsigned long end, struct page *ref_page) | 2168 | unsigned long end, struct page *ref_page) |
2154 | { | 2169 | { |
@@ -2207,6 +2222,12 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
2207 | if (huge_pte_none(pte)) | 2222 | if (huge_pte_none(pte)) |
2208 | continue; | 2223 | continue; |
2209 | 2224 | ||
2225 | /* | ||
2226 | * HWPoisoned hugepage is already unmapped and dropped reference | ||
2227 | */ | ||
2228 | if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) | ||
2229 | continue; | ||
2230 | |||
2210 | page = pte_page(pte); | 2231 | page = pte_page(pte); |
2211 | if (pte_dirty(pte)) | 2232 | if (pte_dirty(pte)) |
2212 | set_page_dirty(page); | 2233 | set_page_dirty(page); |
@@ -2491,6 +2512,18 @@ retry: | |||
2491 | } | 2512 | } |
2492 | 2513 | ||
2493 | /* | 2514 | /* |
2515 | * Since memory error handler replaces pte into hwpoison swap entry | ||
2516 | * at the time of error handling, a process which reserved but not have | ||
2517 | * the mapping to the error hugepage does not have hwpoison swap entry. | ||
2518 | * So we need to block accesses from such a process by checking | ||
2519 | * PG_hwpoison bit here. | ||
2520 | */ | ||
2521 | if (unlikely(PageHWPoison(page))) { | ||
2522 | ret = VM_FAULT_HWPOISON; | ||
2523 | goto backout_unlocked; | ||
2524 | } | ||
2525 | |||
2526 | /* | ||
2494 | * If we are going to COW a private mapping later, we examine the | 2527 | * If we are going to COW a private mapping later, we examine the |
2495 | * pending reservations for this page now. This will ensure that | 2528 | * pending reservations for this page now. This will ensure that |
2496 | * any allocations necessary to record that reservation occur outside | 2529 | * any allocations necessary to record that reservation occur outside |
@@ -2544,6 +2577,13 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2544 | static DEFINE_MUTEX(hugetlb_instantiation_mutex); | 2577 | static DEFINE_MUTEX(hugetlb_instantiation_mutex); |
2545 | struct hstate *h = hstate_vma(vma); | 2578 | struct hstate *h = hstate_vma(vma); |
2546 | 2579 | ||
2580 | ptep = huge_pte_offset(mm, address); | ||
2581 | if (ptep) { | ||
2582 | entry = huge_ptep_get(ptep); | ||
2583 | if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) | ||
2584 | return VM_FAULT_HWPOISON; | ||
2585 | } | ||
2586 | |||
2547 | ptep = huge_pte_alloc(mm, address, huge_page_size(h)); | 2587 | ptep = huge_pte_alloc(mm, address, huge_page_size(h)); |
2548 | if (!ptep) | 2588 | if (!ptep) |
2549 | return VM_FAULT_OOM; | 2589 | return VM_FAULT_OOM; |