aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2014-06-23 16:22:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-23 19:47:43 -0400
commit4a705fef986231a3e7a6b1a6d3c37025f021f49f (patch)
tree540d2a129fa06a8441050071a2a2fa53f953bc41 /mm
parent13ace4d0d9db40e10ecd66dfda14e297571be813 (diff)
hugetlb: fix copy_hugetlb_page_range() to handle migration/hwpoisoned entry
There's a race between fork() and hugepage migration, as a result we try to "dereference" a swap entry as a normal pte, causing kernel panic. The cause of the problem is that copy_hugetlb_page_range() can't handle "swap entry" family (migration entry and hwpoisoned entry) so let's fix it. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Hugh Dickins <hughd@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: <stable@vger.kernel.org> [2.6.37+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c71
1 files changed, 43 insertions, 28 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 226910cb7c9b..2024bbd573d2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2520,6 +2520,31 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
2520 update_mmu_cache(vma, address, ptep); 2520 update_mmu_cache(vma, address, ptep);
2521} 2521}
2522 2522
2523static int is_hugetlb_entry_migration(pte_t pte)
2524{
2525 swp_entry_t swp;
2526
2527 if (huge_pte_none(pte) || pte_present(pte))
2528 return 0;
2529 swp = pte_to_swp_entry(pte);
2530 if (non_swap_entry(swp) && is_migration_entry(swp))
2531 return 1;
2532 else
2533 return 0;
2534}
2535
2536static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2537{
2538 swp_entry_t swp;
2539
2540 if (huge_pte_none(pte) || pte_present(pte))
2541 return 0;
2542 swp = pte_to_swp_entry(pte);
2543 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2544 return 1;
2545 else
2546 return 0;
2547}
2523 2548
2524int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 2549int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2525 struct vm_area_struct *vma) 2550 struct vm_area_struct *vma)
@@ -2559,10 +2584,26 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2559 dst_ptl = huge_pte_lock(h, dst, dst_pte); 2584 dst_ptl = huge_pte_lock(h, dst, dst_pte);
2560 src_ptl = huge_pte_lockptr(h, src, src_pte); 2585 src_ptl = huge_pte_lockptr(h, src, src_pte);
2561 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 2586 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
2562 if (!huge_pte_none(huge_ptep_get(src_pte))) { 2587 entry = huge_ptep_get(src_pte);
2588 if (huge_pte_none(entry)) { /* skip none entry */
2589 ;
2590 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
2591 is_hugetlb_entry_hwpoisoned(entry))) {
2592 swp_entry_t swp_entry = pte_to_swp_entry(entry);
2593
2594 if (is_write_migration_entry(swp_entry) && cow) {
2595 /*
2596 * COW mappings require pages in both
2597 * parent and child to be set to read.
2598 */
2599 make_migration_entry_read(&swp_entry);
2600 entry = swp_entry_to_pte(swp_entry);
2601 set_huge_pte_at(src, addr, src_pte, entry);
2602 }
2603 set_huge_pte_at(dst, addr, dst_pte, entry);
2604 } else {
2563 if (cow) 2605 if (cow)
2564 huge_ptep_set_wrprotect(src, addr, src_pte); 2606 huge_ptep_set_wrprotect(src, addr, src_pte);
2565 entry = huge_ptep_get(src_pte);
2566 ptepage = pte_page(entry); 2607 ptepage = pte_page(entry);
2567 get_page(ptepage); 2608 get_page(ptepage);
2568 page_dup_rmap(ptepage); 2609 page_dup_rmap(ptepage);
@@ -2578,32 +2619,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2578 return ret; 2619 return ret;
2579} 2620}
2580 2621
2581static int is_hugetlb_entry_migration(pte_t pte)
2582{
2583 swp_entry_t swp;
2584
2585 if (huge_pte_none(pte) || pte_present(pte))
2586 return 0;
2587 swp = pte_to_swp_entry(pte);
2588 if (non_swap_entry(swp) && is_migration_entry(swp))
2589 return 1;
2590 else
2591 return 0;
2592}
2593
2594static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2595{
2596 swp_entry_t swp;
2597
2598 if (huge_pte_none(pte) || pte_present(pte))
2599 return 0;
2600 swp = pte_to_swp_entry(pte);
2601 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2602 return 1;
2603 else
2604 return 0;
2605}
2606
2607void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 2622void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2608 unsigned long start, unsigned long end, 2623 unsigned long start, unsigned long end,
2609 struct page *ref_page) 2624 struct page *ref_page)