diff options
author | Hugh Dickins <hugh@veritas.com> | 2009-01-06 17:40:10 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-06 18:59:07 -0500 |
commit | 2509ef26db4699a5d9fa876e90ddfc107afcab84 (patch) | |
tree | 09e65185142c60b5d766d8b75f3cbc8a65de6a39 /mm/memory.c | |
parent | 22b31eec63e5f2e219a3ee15f456897272bc73e8 (diff) |
badpage: zap print_bad_pte on swap and file
Complete zap_pte_range()'s coverage of bad pagetable entries by calling
print_bad_pte() on a pte_file in a linear vma and on a bad swap entry.
That needs free_swap_and_cache() to tell it, which will also have shown
one of those "swap_free" errors (but with much less information).
Similar checks in fork's copy_one_pte()? No, that would be more noisy
than helpful: we'll see them when parent and child exec or exit.
Where do_nonlinear_fault() calls print_bad_pte(): omit !VM_CAN_NONLINEAR
case, that could only be a bug in sys_remap_file_pages(), not a bad pte.
VM_FAULT_OOM rather than VM_FAULT_SIGBUS? Well, okay, that is consistent
with what happens if do_swap_page() operates a bad swap entry; but don't
we have patches to be more careful about killing when VM_FAULT_OOM?
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 11 |
1 files changed, 7 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c index 890095f5f36d..b273cc12b15d 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -810,8 +810,12 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, | |||
810 | */ | 810 | */ |
811 | if (unlikely(details)) | 811 | if (unlikely(details)) |
812 | continue; | 812 | continue; |
813 | if (!pte_file(ptent)) | 813 | if (pte_file(ptent)) { |
814 | free_swap_and_cache(pte_to_swp_entry(ptent)); | 814 | if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) |
815 | print_bad_pte(vma, addr, ptent, NULL); | ||
816 | } else if | ||
817 | (unlikely(!free_swap_and_cache(pte_to_swp_entry(ptent)))) | ||
818 | print_bad_pte(vma, addr, ptent, NULL); | ||
815 | pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); | 819 | pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); |
816 | } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); | 820 | } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); |
817 | 821 | ||
@@ -2707,8 +2711,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2707 | if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) | 2711 | if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) |
2708 | return 0; | 2712 | return 0; |
2709 | 2713 | ||
2710 | if (unlikely(!(vma->vm_flags & VM_NONLINEAR) || | 2714 | if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) { |
2711 | !(vma->vm_flags & VM_CAN_NONLINEAR))) { | ||
2712 | /* | 2715 | /* |
2713 | * Page table corrupted: show pte and kill process. | 2716 | * Page table corrupted: show pte and kill process. |
2714 | */ | 2717 | */ |