diff options
author | Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> | 2010-05-27 20:29:17 -0400 |
---|---|---|
committer | Andi Kleen <ak@linux.intel.com> | 2010-08-11 03:21:36 -0400 |
commit | 7af446a841a264a1a9675001005b29ce01d1fc57 (patch) | |
tree | 902fec55a889d33771f267fc6242b1de43d9d0c6 /mm/memory-failure.c | |
parent | 0fe6e20b9c4c53b3e97096ee73a0857f60aad43f (diff) |
HWPOISON, hugetlb: enable error handling path for hugepage
This patch just enables handling path. Real containing and
recovering operation will be implemented in following patches.
Dependency:
"hugetlb, rmap: add reverse mapping for hugepage."
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Fengguang Wu <fengguang.wu@intel.com>
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Diffstat (limited to 'mm/memory-failure.c')
-rw-r--r-- | mm/memory-failure.c | 39 |
1 files changed, 22 insertions, 17 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 620b0b46159..1ec68c80788 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/page-isolation.h> | 45 | #include <linux/page-isolation.h> |
46 | #include <linux/suspend.h> | 46 | #include <linux/suspend.h> |
47 | #include <linux/slab.h> | 47 | #include <linux/slab.h> |
48 | #include <linux/hugetlb.h> | ||
48 | #include "internal.h" | 49 | #include "internal.h" |
49 | 50 | ||
50 | int sysctl_memory_failure_early_kill __read_mostly = 0; | 51 | int sysctl_memory_failure_early_kill __read_mostly = 0; |
@@ -837,6 +838,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
837 | int ret; | 838 | int ret; |
838 | int i; | 839 | int i; |
839 | int kill = 1; | 840 | int kill = 1; |
841 | struct page *hpage = compound_head(p); | ||
840 | 842 | ||
841 | if (PageReserved(p) || PageSlab(p)) | 843 | if (PageReserved(p) || PageSlab(p)) |
842 | return SWAP_SUCCESS; | 844 | return SWAP_SUCCESS; |
@@ -845,10 +847,10 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
845 | * This check implies we don't kill processes if their pages | 847 | * This check implies we don't kill processes if their pages |
846 | * are in the swap cache early. Those are always late kills. | 848 | * are in the swap cache early. Those are always late kills. |
847 | */ | 849 | */ |
848 | if (!page_mapped(p)) | 850 | if (!page_mapped(hpage)) |
849 | return SWAP_SUCCESS; | 851 | return SWAP_SUCCESS; |
850 | 852 | ||
851 | if (PageCompound(p) || PageKsm(p)) | 853 | if (PageKsm(p)) |
852 | return SWAP_FAIL; | 854 | return SWAP_FAIL; |
853 | 855 | ||
854 | if (PageSwapCache(p)) { | 856 | if (PageSwapCache(p)) { |
@@ -863,10 +865,11 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
863 | * XXX: the dirty test could be racy: set_page_dirty() may not always | 865 | * XXX: the dirty test could be racy: set_page_dirty() may not always |
864 | * be called inside page lock (it's recommended but not enforced). | 866 | * be called inside page lock (it's recommended but not enforced). |
865 | */ | 867 | */ |
866 | mapping = page_mapping(p); | 868 | mapping = page_mapping(hpage); |
867 | if (!PageDirty(p) && mapping && mapping_cap_writeback_dirty(mapping)) { | 869 | if (!PageDirty(hpage) && mapping && |
868 | if (page_mkclean(p)) { | 870 | mapping_cap_writeback_dirty(mapping)) { |
869 | SetPageDirty(p); | 871 | if (page_mkclean(hpage)) { |
872 | SetPageDirty(hpage); | ||
870 | } else { | 873 | } else { |
871 | kill = 0; | 874 | kill = 0; |
872 | ttu |= TTU_IGNORE_HWPOISON; | 875 | ttu |= TTU_IGNORE_HWPOISON; |
@@ -885,14 +888,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
885 | * there's nothing that can be done. | 888 | * there's nothing that can be done. |
886 | */ | 889 | */ |
887 | if (kill) | 890 | if (kill) |
888 | collect_procs(p, &tokill); | 891 | collect_procs(hpage, &tokill); |
889 | 892 | ||
890 | /* | 893 | /* |
891 | * try_to_unmap can fail temporarily due to races. | 894 | * try_to_unmap can fail temporarily due to races. |
892 | * Try a few times (RED-PEN better strategy?) | 895 | * Try a few times (RED-PEN better strategy?) |
893 | */ | 896 | */ |
894 | for (i = 0; i < N_UNMAP_TRIES; i++) { | 897 | for (i = 0; i < N_UNMAP_TRIES; i++) { |
895 | ret = try_to_unmap(p, ttu); | 898 | ret = try_to_unmap(hpage, ttu); |
896 | if (ret == SWAP_SUCCESS) | 899 | if (ret == SWAP_SUCCESS) |
897 | break; | 900 | break; |
898 | pr_debug("MCE %#lx: try_to_unmap retry needed %d\n", pfn, ret); | 901 | pr_debug("MCE %#lx: try_to_unmap retry needed %d\n", pfn, ret); |
@@ -900,7 +903,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
900 | 903 | ||
901 | if (ret != SWAP_SUCCESS) | 904 | if (ret != SWAP_SUCCESS) |
902 | printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", | 905 | printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", |
903 | pfn, page_mapcount(p)); | 906 | pfn, page_mapcount(hpage)); |
904 | 907 | ||
905 | /* | 908 | /* |
906 | * Now that the dirty bit has been propagated to the | 909 | * Now that the dirty bit has been propagated to the |
@@ -911,7 +914,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
911 | * use a more force-full uncatchable kill to prevent | 914 | * use a more force-full uncatchable kill to prevent |
912 | * any accesses to the poisoned memory. | 915 | * any accesses to the poisoned memory. |
913 | */ | 916 | */ |
914 | kill_procs_ao(&tokill, !!PageDirty(p), trapno, | 917 | kill_procs_ao(&tokill, !!PageDirty(hpage), trapno, |
915 | ret != SWAP_SUCCESS, pfn); | 918 | ret != SWAP_SUCCESS, pfn); |
916 | 919 | ||
917 | return ret; | 920 | return ret; |
@@ -921,6 +924,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) | |||
921 | { | 924 | { |
922 | struct page_state *ps; | 925 | struct page_state *ps; |
923 | struct page *p; | 926 | struct page *p; |
927 | struct page *hpage; | ||
924 | int res; | 928 | int res; |
925 | 929 | ||
926 | if (!sysctl_memory_failure_recovery) | 930 | if (!sysctl_memory_failure_recovery) |
@@ -934,6 +938,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) | |||
934 | } | 938 | } |
935 | 939 | ||
936 | p = pfn_to_page(pfn); | 940 | p = pfn_to_page(pfn); |
941 | hpage = compound_head(p); | ||
937 | if (TestSetPageHWPoison(p)) { | 942 | if (TestSetPageHWPoison(p)) { |
938 | printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn); | 943 | printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn); |
939 | return 0; | 944 | return 0; |
@@ -953,7 +958,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) | |||
953 | * that may make page_freeze_refs()/page_unfreeze_refs() mismatch. | 958 | * that may make page_freeze_refs()/page_unfreeze_refs() mismatch. |
954 | */ | 959 | */ |
955 | if (!(flags & MF_COUNT_INCREASED) && | 960 | if (!(flags & MF_COUNT_INCREASED) && |
956 | !get_page_unless_zero(compound_head(p))) { | 961 | !get_page_unless_zero(hpage)) { |
957 | if (is_free_buddy_page(p)) { | 962 | if (is_free_buddy_page(p)) { |
958 | action_result(pfn, "free buddy", DELAYED); | 963 | action_result(pfn, "free buddy", DELAYED); |
959 | return 0; | 964 | return 0; |
@@ -971,9 +976,9 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) | |||
971 | * The check (unnecessarily) ignores LRU pages being isolated and | 976 | * The check (unnecessarily) ignores LRU pages being isolated and |
972 | * walked by the page reclaim code, however that's not a big loss. | 977 | * walked by the page reclaim code, however that's not a big loss. |
973 | */ | 978 | */ |
974 | if (!PageLRU(p)) | 979 | if (!PageLRU(p) && !PageHuge(p)) |
975 | shake_page(p, 0); | 980 | shake_page(p, 0); |
976 | if (!PageLRU(p)) { | 981 | if (!PageLRU(p) && !PageHuge(p)) { |
977 | /* | 982 | /* |
978 | * shake_page could have turned it free. | 983 | * shake_page could have turned it free. |
979 | */ | 984 | */ |
@@ -991,7 +996,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) | |||
991 | * It's very difficult to mess with pages currently under IO | 996 | * It's very difficult to mess with pages currently under IO |
992 | * and in many cases impossible, so we just avoid it here. | 997 | * and in many cases impossible, so we just avoid it here. |
993 | */ | 998 | */ |
994 | lock_page_nosync(p); | 999 | lock_page_nosync(hpage); |
995 | 1000 | ||
996 | /* | 1001 | /* |
997 | * unpoison always clear PG_hwpoison inside page lock | 1002 | * unpoison always clear PG_hwpoison inside page lock |
@@ -1004,8 +1009,8 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) | |||
1004 | if (hwpoison_filter(p)) { | 1009 | if (hwpoison_filter(p)) { |
1005 | if (TestClearPageHWPoison(p)) | 1010 | if (TestClearPageHWPoison(p)) |
1006 | atomic_long_dec(&mce_bad_pages); | 1011 | atomic_long_dec(&mce_bad_pages); |
1007 | unlock_page(p); | 1012 | unlock_page(hpage); |
1008 | put_page(p); | 1013 | put_page(hpage); |
1009 | return 0; | 1014 | return 0; |
1010 | } | 1015 | } |
1011 | 1016 | ||
@@ -1038,7 +1043,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) | |||
1038 | } | 1043 | } |
1039 | } | 1044 | } |
1040 | out: | 1045 | out: |
1041 | unlock_page(p); | 1046 | unlock_page(hpage); |
1042 | return res; | 1047 | return res; |
1043 | } | 1048 | } |
1044 | EXPORT_SYMBOL_GPL(__memory_failure); | 1049 | EXPORT_SYMBOL_GPL(__memory_failure); |