aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory-failure.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory-failure.c')
-rw-r--r--mm/memory-failure.c39
1 files changed, 24 insertions, 15 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index fabe55046c1d..90002ea43638 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -611,7 +611,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
611} 611}
612 612
613/* 613/*
614 * Dirty cache page page 614 * Dirty pagecache page
615 * Issues: when the error hit a hole page the error is not properly 615 * Issues: when the error hit a hole page the error is not properly
616 * propagated. 616 * propagated.
617 */ 617 */
@@ -856,14 +856,14 @@ static int page_action(struct page_state *ps, struct page *p,
856 * the pages and send SIGBUS to the processes if the data was dirty. 856 * the pages and send SIGBUS to the processes if the data was dirty.
857 */ 857 */
858static int hwpoison_user_mappings(struct page *p, unsigned long pfn, 858static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
859 int trapno, int flags) 859 int trapno, int flags, struct page **hpagep)
860{ 860{
861 enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; 861 enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
862 struct address_space *mapping; 862 struct address_space *mapping;
863 LIST_HEAD(tokill); 863 LIST_HEAD(tokill);
864 int ret; 864 int ret;
865 int kill = 1, forcekill; 865 int kill = 1, forcekill;
866 struct page *hpage = compound_head(p); 866 struct page *hpage = *hpagep;
867 struct page *ppage; 867 struct page *ppage;
868 868
869 if (PageReserved(p) || PageSlab(p)) 869 if (PageReserved(p) || PageSlab(p))
@@ -942,11 +942,16 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
942 * We pinned the head page for hwpoison handling, 942 * We pinned the head page for hwpoison handling,
943 * now we split the thp and we are interested in 943 * now we split the thp and we are interested in
944 * the hwpoisoned raw page, so move the refcount 944 * the hwpoisoned raw page, so move the refcount
945 * to it. 945 * to it. Similarly, page lock is shifted.
946 */ 946 */
947 if (hpage != p) { 947 if (hpage != p) {
948 put_page(hpage); 948 if (!(flags & MF_COUNT_INCREASED)) {
949 get_page(p); 949 put_page(hpage);
950 get_page(p);
951 }
952 lock_page(p);
953 unlock_page(hpage);
954 *hpagep = p;
950 } 955 }
951 /* THP is split, so ppage should be the real poisoned page. */ 956 /* THP is split, so ppage should be the real poisoned page. */
952 ppage = p; 957 ppage = p;
@@ -964,17 +969,11 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
964 if (kill) 969 if (kill)
965 collect_procs(ppage, &tokill); 970 collect_procs(ppage, &tokill);
966 971
967 if (hpage != ppage)
968 lock_page(ppage);
969
970 ret = try_to_unmap(ppage, ttu); 972 ret = try_to_unmap(ppage, ttu);
971 if (ret != SWAP_SUCCESS) 973 if (ret != SWAP_SUCCESS)
972 printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", 974 printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
973 pfn, page_mapcount(ppage)); 975 pfn, page_mapcount(ppage));
974 976
975 if (hpage != ppage)
976 unlock_page(ppage);
977
978 /* 977 /*
979 * Now that the dirty bit has been propagated to the 978 * Now that the dirty bit has been propagated to the
980 * struct page and all unmaps done we can decide if 979 * struct page and all unmaps done we can decide if
@@ -1193,8 +1192,12 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1193 /* 1192 /*
1194 * Now take care of user space mappings. 1193 * Now take care of user space mappings.
1195 * Abort on fail: __delete_from_page_cache() assumes unmapped page. 1194 * Abort on fail: __delete_from_page_cache() assumes unmapped page.
1195 *
1196 * When the raw error page is thp tail page, hpage points to the raw
1197 * page after thp split.
1196 */ 1198 */
1197 if (hwpoison_user_mappings(p, pfn, trapno, flags) != SWAP_SUCCESS) { 1199 if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
1200 != SWAP_SUCCESS) {
1198 printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn); 1201 printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
1199 res = -EBUSY; 1202 res = -EBUSY;
1200 goto out; 1203 goto out;
@@ -1585,7 +1588,13 @@ static int __soft_offline_page(struct page *page, int flags)
1585 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 1588 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
1586 MIGRATE_SYNC, MR_MEMORY_FAILURE); 1589 MIGRATE_SYNC, MR_MEMORY_FAILURE);
1587 if (ret) { 1590 if (ret) {
1588 putback_lru_pages(&pagelist); 1591 if (!list_empty(&pagelist)) {
1592 list_del(&page->lru);
1593 dec_zone_page_state(page, NR_ISOLATED_ANON +
1594 page_is_file_cache(page));
1595 putback_lru_page(page);
1596 }
1597
1589 pr_info("soft offline: %#lx: migration failed %d, type %lx\n", 1598 pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
1590 pfn, ret, page->flags); 1599 pfn, ret, page->flags);
1591 if (ret > 0) 1600 if (ret > 0)
@@ -1642,7 +1651,7 @@ int soft_offline_page(struct page *page, int flags)
1642{ 1651{
1643 int ret; 1652 int ret;
1644 unsigned long pfn = page_to_pfn(page); 1653 unsigned long pfn = page_to_pfn(page);
1645 struct page *hpage = compound_trans_head(page); 1654 struct page *hpage = compound_head(page);
1646 1655
1647 if (PageHWPoison(page)) { 1656 if (PageHWPoison(page)) {
1648 pr_info("soft offline: %#lx page already poisoned\n", pfn); 1657 pr_info("soft offline: %#lx page already poisoned\n", pfn);