aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2009-01-06 17:40:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 18:59:07 -0500
commit2509ef26db4699a5d9fa876e90ddfc107afcab84 (patch)
tree09e65185142c60b5d766d8b75f3cbc8a65de6a39 /mm/swapfile.c
parent22b31eec63e5f2e219a3ee15f456897272bc73e8 (diff)
badpage: zap print_bad_pte on swap and file
Complete zap_pte_range()'s coverage of bad pagetable entries by calling print_bad_pte() on a pte_file in a linear vma and on a bad swap entry. That needs free_swap_and_cache() to tell it, which will also have shown one of those "swap_free" errors (but with much less information). Similar checks in fork's copy_one_pte()? No, that would be more noisy than helpful: we'll see them when parent and child exec or exit. Where do_nonlinear_fault() calls print_bad_pte(): omit !VM_CAN_NONLINEAR case, that could only be a bug in sys_remap_file_pages(), not a bad pte. VM_FAULT_OOM rather than VM_FAULT_SIGBUS? Well, okay, that is consistent with what happens if do_swap_page() operates a bad swap entry; but don't we have patches to be more careful about killing when VM_FAULT_OOM? Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index d00523601913..f28745855772 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -571,13 +571,13 @@ int try_to_free_swap(struct page *page)
571 * Free the swap entry like above, but also try to 571 * Free the swap entry like above, but also try to
572 * free the page cache entry if it is the last user. 572 * free the page cache entry if it is the last user.
573 */ 573 */
574void free_swap_and_cache(swp_entry_t entry) 574int free_swap_and_cache(swp_entry_t entry)
575{ 575{
576 struct swap_info_struct * p; 576 struct swap_info_struct *p;
577 struct page *page = NULL; 577 struct page *page = NULL;
578 578
579 if (is_migration_entry(entry)) 579 if (is_migration_entry(entry))
580 return; 580 return 1;
581 581
582 p = swap_info_get(entry); 582 p = swap_info_get(entry);
583 if (p) { 583 if (p) {
@@ -603,6 +603,7 @@ void free_swap_and_cache(swp_entry_t entry)
603 unlock_page(page); 603 unlock_page(page);
604 page_cache_release(page); 604 page_cache_release(page);
605 } 605 }
606 return p != NULL;
606} 607}
607 608
608#ifdef CONFIG_HIBERNATION 609#ifdef CONFIG_HIBERNATION