aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory-failure.c
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2010-12-10 09:19:18 -0500
committerJiri Kosina <jkosina@suse.cz>2010-12-10 09:19:18 -0500
commit2ade0c1d9d93b7642212657ef76f4a1e30233711 (patch)
tree63bc720c0ffe5f4760cac4ed617b9870b050175e /mm/memory-failure.c
parent504499f22c08a03e2e19dc88d31aa0ecd2ac815e (diff)
parent6313e3c21743cc88bb5bd8aa72948ee1e83937b6 (diff)
Merge branch 'master' into upstream
Diffstat (limited to 'mm/memory-failure.c')
-rw-r--r--mm/memory-failure.c184
1 files changed, 130 insertions, 54 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 757f6b0accfe..46ab2c044b0e 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -7,21 +7,26 @@
7 * Free Software Foundation. 7 * Free Software Foundation.
8 * 8 *
9 * High level machine check handler. Handles pages reported by the 9 * High level machine check handler. Handles pages reported by the
10 * hardware as being corrupted usually due to a 2bit ECC memory or cache 10 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
11 * failure. 11 * failure.
12 *
13 * In addition there is a "soft offline" entry point that allows stop using
14 * not-yet-corrupted-by-suspicious pages without killing anything.
12 * 15 *
13 * Handles page cache pages in various states. The tricky part 16 * Handles page cache pages in various states. The tricky part
14 * here is that we can access any page asynchronous to other VM 17 * here is that we can access any page asynchronously in respect to
15 * users, because memory failures could happen anytime and anywhere, 18 * other VM users, because memory failures could happen anytime and
16 * possibly violating some of their assumptions. This is why this code 19 * anywhere. This could violate some of their assumptions. This is why
17 * has to be extremely careful. Generally it tries to use normal locking 20 * this code has to be extremely careful. Generally it tries to use
18 * rules, as in get the standard locks, even if that means the 21 * normal locking rules, as in get the standard locks, even if that means
19 * error handling takes potentially a long time. 22 * the error handling takes potentially a long time.
20 * 23 *
21 * The operation to map back from RMAP chains to processes has to walk 24 * There are several operations here with exponential complexity because
22 * the complete process list and has non linear complexity with the number 25 * of unsuitable VM data structures. For example the operation to map back
23 * mappings. In short it can be quite slow. But since memory corruptions 26 * from RMAP chains to processes has to walk the complete process list and
24 * are rare we hope to get away with this. 27 * has non linear complexity with the number. But since memory corruptions
28 * are rare we hope to get away with this. This avoids impacting the core
29 * VM.
25 */ 30 */
26 31
27/* 32/*
@@ -30,7 +35,6 @@
30 * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages 35 * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages
31 * - pass bad pages to kdump next kernel 36 * - pass bad pages to kdump next kernel
32 */ 37 */
33#define DEBUG 1 /* remove me in 2.6.34 */
34#include <linux/kernel.h> 38#include <linux/kernel.h>
35#include <linux/mm.h> 39#include <linux/mm.h>
36#include <linux/page-flags.h> 40#include <linux/page-flags.h>
@@ -47,6 +51,7 @@
47#include <linux/slab.h> 51#include <linux/slab.h>
48#include <linux/swapops.h> 52#include <linux/swapops.h>
49#include <linux/hugetlb.h> 53#include <linux/hugetlb.h>
54#include <linux/memory_hotplug.h>
50#include "internal.h" 55#include "internal.h"
51 56
52int sysctl_memory_failure_early_kill __read_mostly = 0; 57int sysctl_memory_failure_early_kill __read_mostly = 0;
@@ -78,7 +83,7 @@ static int hwpoison_filter_dev(struct page *p)
78 return 0; 83 return 0;
79 84
80 /* 85 /*
81 * page_mapping() does not accept slab page 86 * page_mapping() does not accept slab pages.
82 */ 87 */
83 if (PageSlab(p)) 88 if (PageSlab(p))
84 return -EINVAL; 89 return -EINVAL;
@@ -268,7 +273,7 @@ struct to_kill {
268 struct list_head nd; 273 struct list_head nd;
269 struct task_struct *tsk; 274 struct task_struct *tsk;
270 unsigned long addr; 275 unsigned long addr;
271 unsigned addr_valid:1; 276 char addr_valid;
272}; 277};
273 278
274/* 279/*
@@ -309,7 +314,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
309 * a SIGKILL because the error is not contained anymore. 314 * a SIGKILL because the error is not contained anymore.
310 */ 315 */
311 if (tk->addr == -EFAULT) { 316 if (tk->addr == -EFAULT) {
312 pr_debug("MCE: Unable to find user space address %lx in %s\n", 317 pr_info("MCE: Unable to find user space address %lx in %s\n",
313 page_to_pfn(p), tsk->comm); 318 page_to_pfn(p), tsk->comm);
314 tk->addr_valid = 0; 319 tk->addr_valid = 0;
315 } 320 }
@@ -577,7 +582,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
577 pfn, err); 582 pfn, err);
578 } else if (page_has_private(p) && 583 } else if (page_has_private(p) &&
579 !try_to_release_page(p, GFP_NOIO)) { 584 !try_to_release_page(p, GFP_NOIO)) {
580 pr_debug("MCE %#lx: failed to release buffers\n", pfn); 585 pr_info("MCE %#lx: failed to release buffers\n", pfn);
581 } else { 586 } else {
582 ret = RECOVERED; 587 ret = RECOVERED;
583 } 588 }
@@ -693,11 +698,10 @@ static int me_swapcache_clean(struct page *p, unsigned long pfn)
693 * Issues: 698 * Issues:
694 * - Error on hugepage is contained in hugepage unit (not in raw page unit.) 699 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
695 * To narrow down kill region to one page, we need to break up pmd. 700 * To narrow down kill region to one page, we need to break up pmd.
696 * - To support soft-offlining for hugepage, we need to support hugepage
697 * migration.
698 */ 701 */
699static int me_huge_page(struct page *p, unsigned long pfn) 702static int me_huge_page(struct page *p, unsigned long pfn)
700{ 703{
704 int res = 0;
701 struct page *hpage = compound_head(p); 705 struct page *hpage = compound_head(p);
702 /* 706 /*
703 * We can safely recover from error on free or reserved (i.e. 707 * We can safely recover from error on free or reserved (i.e.
@@ -710,8 +714,9 @@ static int me_huge_page(struct page *p, unsigned long pfn)
710 * so there is no race between isolation and mapping/unmapping. 714 * so there is no race between isolation and mapping/unmapping.
711 */ 715 */
712 if (!(page_mapping(hpage) || PageAnon(hpage))) { 716 if (!(page_mapping(hpage) || PageAnon(hpage))) {
713 __isolate_hwpoisoned_huge_page(hpage); 717 res = dequeue_hwpoisoned_huge_page(hpage);
714 return RECOVERED; 718 if (!res)
719 return RECOVERED;
715 } 720 }
716 return DELAYED; 721 return DELAYED;
717} 722}
@@ -836,8 +841,6 @@ static int page_action(struct page_state *ps, struct page *p,
836 return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY; 841 return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY;
837} 842}
838 843
839#define N_UNMAP_TRIES 5
840
841/* 844/*
842 * Do all that is necessary to remove user space mappings. Unmap 845 * Do all that is necessary to remove user space mappings. Unmap
843 * the pages and send SIGBUS to the processes if the data was dirty. 846 * the pages and send SIGBUS to the processes if the data was dirty.
@@ -849,7 +852,6 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
849 struct address_space *mapping; 852 struct address_space *mapping;
850 LIST_HEAD(tokill); 853 LIST_HEAD(tokill);
851 int ret; 854 int ret;
852 int i;
853 int kill = 1; 855 int kill = 1;
854 struct page *hpage = compound_head(p); 856 struct page *hpage = compound_head(p);
855 857
@@ -903,17 +905,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
903 if (kill) 905 if (kill)
904 collect_procs(hpage, &tokill); 906 collect_procs(hpage, &tokill);
905 907
906 /* 908 ret = try_to_unmap(hpage, ttu);
907 * try_to_unmap can fail temporarily due to races.
908 * Try a few times (RED-PEN better strategy?)
909 */
910 for (i = 0; i < N_UNMAP_TRIES; i++) {
911 ret = try_to_unmap(hpage, ttu);
912 if (ret == SWAP_SUCCESS)
913 break;
914 pr_debug("MCE %#lx: try_to_unmap retry needed %d\n", pfn, ret);
915 }
916
917 if (ret != SWAP_SUCCESS) 909 if (ret != SWAP_SUCCESS)
918 printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", 910 printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
919 pfn, page_mapcount(hpage)); 911 pfn, page_mapcount(hpage));
@@ -981,7 +973,10 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
981 * We need/can do nothing about count=0 pages. 973 * We need/can do nothing about count=0 pages.
982 * 1) it's a free page, and therefore in safe hand: 974 * 1) it's a free page, and therefore in safe hand:
983 * prep_new_page() will be the gate keeper. 975 * prep_new_page() will be the gate keeper.
984 * 2) it's part of a non-compound high order page. 976 * 2) it's a free hugepage, which is also safe:
977 * an affected hugepage will be dequeued from hugepage freelist,
978 * so there's no concern about reusing it ever after.
979 * 3) it's part of a non-compound high order page.
985 * Implies some kernel user: cannot stop them from 980 * Implies some kernel user: cannot stop them from
986 * R/W the page; let's pray that the page has been 981 * R/W the page; let's pray that the page has been
987 * used and will be freed some time later. 982 * used and will be freed some time later.
@@ -993,6 +988,24 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
993 if (is_free_buddy_page(p)) { 988 if (is_free_buddy_page(p)) {
994 action_result(pfn, "free buddy", DELAYED); 989 action_result(pfn, "free buddy", DELAYED);
995 return 0; 990 return 0;
991 } else if (PageHuge(hpage)) {
992 /*
993 * Check "just unpoisoned", "filter hit", and
994 * "race with other subpage."
995 */
996 lock_page_nosync(hpage);
997 if (!PageHWPoison(hpage)
998 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
999 || (p != hpage && TestSetPageHWPoison(hpage))) {
1000 atomic_long_sub(nr_pages, &mce_bad_pages);
1001 return 0;
1002 }
1003 set_page_hwpoison_huge_page(hpage);
1004 res = dequeue_hwpoisoned_huge_page(hpage);
1005 action_result(pfn, "free huge",
1006 res ? IGNORED : DELAYED);
1007 unlock_page(hpage);
1008 return res;
996 } else { 1009 } else {
997 action_result(pfn, "high order kernel", IGNORED); 1010 action_result(pfn, "high order kernel", IGNORED);
998 return -EBUSY; 1011 return -EBUSY;
@@ -1147,16 +1160,26 @@ int unpoison_memory(unsigned long pfn)
1147 page = compound_head(p); 1160 page = compound_head(p);
1148 1161
1149 if (!PageHWPoison(p)) { 1162 if (!PageHWPoison(p)) {
1150 pr_debug("MCE: Page was already unpoisoned %#lx\n", pfn); 1163 pr_info("MCE: Page was already unpoisoned %#lx\n", pfn);
1151 return 0; 1164 return 0;
1152 } 1165 }
1153 1166
1154 nr_pages = 1 << compound_order(page); 1167 nr_pages = 1 << compound_order(page);
1155 1168
1156 if (!get_page_unless_zero(page)) { 1169 if (!get_page_unless_zero(page)) {
1170 /*
1171 * Since HWPoisoned hugepage should have non-zero refcount,
1172 * race between memory failure and unpoison seems to happen.
1173 * In such case unpoison fails and memory failure runs
1174 * to the end.
1175 */
1176 if (PageHuge(page)) {
1177 pr_debug("MCE: Memory failure is now running on free hugepage %#lx\n", pfn);
1178 return 0;
1179 }
1157 if (TestClearPageHWPoison(p)) 1180 if (TestClearPageHWPoison(p))
1158 atomic_long_sub(nr_pages, &mce_bad_pages); 1181 atomic_long_sub(nr_pages, &mce_bad_pages);
1159 pr_debug("MCE: Software-unpoisoned free page %#lx\n", pfn); 1182 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
1160 return 0; 1183 return 0;
1161 } 1184 }
1162 1185
@@ -1168,12 +1191,12 @@ int unpoison_memory(unsigned long pfn)
1168 * the free buddy page pool. 1191 * the free buddy page pool.
1169 */ 1192 */
1170 if (TestClearPageHWPoison(page)) { 1193 if (TestClearPageHWPoison(page)) {
1171 pr_debug("MCE: Software-unpoisoned page %#lx\n", pfn); 1194 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
1172 atomic_long_sub(nr_pages, &mce_bad_pages); 1195 atomic_long_sub(nr_pages, &mce_bad_pages);
1173 freeit = 1; 1196 freeit = 1;
1197 if (PageHuge(page))
1198 clear_page_hwpoison_huge_page(page);
1174 } 1199 }
1175 if (PageHuge(p))
1176 clear_page_hwpoison_huge_page(page);
1177 unlock_page(page); 1200 unlock_page(page);
1178 1201
1179 put_page(page); 1202 put_page(page);
@@ -1187,7 +1210,11 @@ EXPORT_SYMBOL(unpoison_memory);
1187static struct page *new_page(struct page *p, unsigned long private, int **x) 1210static struct page *new_page(struct page *p, unsigned long private, int **x)
1188{ 1211{
1189 int nid = page_to_nid(p); 1212 int nid = page_to_nid(p);
1190 return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0); 1213 if (PageHuge(p))
1214 return alloc_huge_page_node(page_hstate(compound_head(p)),
1215 nid);
1216 else
1217 return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0);
1191} 1218}
1192 1219
1193/* 1220/*
@@ -1204,25 +1231,31 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
1204 return 1; 1231 return 1;
1205 1232
1206 /* 1233 /*
1207 * The lock_system_sleep prevents a race with memory hotplug, 1234 * The lock_memory_hotplug prevents a race with memory hotplug.
1208 * because the isolation assumes there's only a single user.
1209 * This is a big hammer, a better would be nicer. 1235 * This is a big hammer, a better would be nicer.
1210 */ 1236 */
1211 lock_system_sleep(); 1237 lock_memory_hotplug();
1212 1238
1213 /* 1239 /*
1214 * Isolate the page, so that it doesn't get reallocated if it 1240 * Isolate the page, so that it doesn't get reallocated if it
1215 * was free. 1241 * was free.
1216 */ 1242 */
1217 set_migratetype_isolate(p); 1243 set_migratetype_isolate(p);
1244 /*
1245 * When the target page is a free hugepage, just remove it
1246 * from free hugepage list.
1247 */
1218 if (!get_page_unless_zero(compound_head(p))) { 1248 if (!get_page_unless_zero(compound_head(p))) {
1219 if (is_free_buddy_page(p)) { 1249 if (PageHuge(p)) {
1220 pr_debug("get_any_page: %#lx free buddy page\n", pfn); 1250 pr_info("get_any_page: %#lx free huge page\n", pfn);
1251 ret = dequeue_hwpoisoned_huge_page(compound_head(p));
1252 } else if (is_free_buddy_page(p)) {
1253 pr_info("get_any_page: %#lx free buddy page\n", pfn);
1221 /* Set hwpoison bit while page is still isolated */ 1254 /* Set hwpoison bit while page is still isolated */
1222 SetPageHWPoison(p); 1255 SetPageHWPoison(p);
1223 ret = 0; 1256 ret = 0;
1224 } else { 1257 } else {
1225 pr_debug("get_any_page: %#lx: unknown zero refcount page type %lx\n", 1258 pr_info("get_any_page: %#lx: unknown zero refcount page type %lx\n",
1226 pfn, p->flags); 1259 pfn, p->flags);
1227 ret = -EIO; 1260 ret = -EIO;
1228 } 1261 }
@@ -1231,7 +1264,47 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
1231 ret = 1; 1264 ret = 1;
1232 } 1265 }
1233 unset_migratetype_isolate(p); 1266 unset_migratetype_isolate(p);
1234 unlock_system_sleep(); 1267 unlock_memory_hotplug();
1268 return ret;
1269}
1270
1271static int soft_offline_huge_page(struct page *page, int flags)
1272{
1273 int ret;
1274 unsigned long pfn = page_to_pfn(page);
1275 struct page *hpage = compound_head(page);
1276 LIST_HEAD(pagelist);
1277
1278 ret = get_any_page(page, pfn, flags);
1279 if (ret < 0)
1280 return ret;
1281 if (ret == 0)
1282 goto done;
1283
1284 if (PageHWPoison(hpage)) {
1285 put_page(hpage);
1286 pr_debug("soft offline: %#lx hugepage already poisoned\n", pfn);
1287 return -EBUSY;
1288 }
1289
1290 /* Keep page count to indicate a given hugepage is isolated. */
1291
1292 list_add(&hpage->lru, &pagelist);
1293 ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
1294 if (ret) {
1295 putback_lru_pages(&pagelist);
1296 pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
1297 pfn, ret, page->flags);
1298 if (ret > 0)
1299 ret = -EIO;
1300 return ret;
1301 }
1302done:
1303 if (!PageHWPoison(hpage))
1304 atomic_long_add(1 << compound_order(hpage), &mce_bad_pages);
1305 set_page_hwpoison_huge_page(hpage);
1306 dequeue_hwpoisoned_huge_page(hpage);
1307 /* keep elevated page count for bad page */
1235 return ret; 1308 return ret;
1236} 1309}
1237 1310
@@ -1262,6 +1335,9 @@ int soft_offline_page(struct page *page, int flags)
1262 int ret; 1335 int ret;
1263 unsigned long pfn = page_to_pfn(page); 1336 unsigned long pfn = page_to_pfn(page);
1264 1337
1338 if (PageHuge(page))
1339 return soft_offline_huge_page(page, flags);
1340
1265 ret = get_any_page(page, pfn, flags); 1341 ret = get_any_page(page, pfn, flags);
1266 if (ret < 0) 1342 if (ret < 0)
1267 return ret; 1343 return ret;
@@ -1288,7 +1364,7 @@ int soft_offline_page(struct page *page, int flags)
1288 goto done; 1364 goto done;
1289 } 1365 }
1290 if (!PageLRU(page)) { 1366 if (!PageLRU(page)) {
1291 pr_debug("soft_offline: %#lx: unknown non LRU page type %lx\n", 1367 pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
1292 pfn, page->flags); 1368 pfn, page->flags);
1293 return -EIO; 1369 return -EIO;
1294 } 1370 }
@@ -1302,7 +1378,7 @@ int soft_offline_page(struct page *page, int flags)
1302 if (PageHWPoison(page)) { 1378 if (PageHWPoison(page)) {
1303 unlock_page(page); 1379 unlock_page(page);
1304 put_page(page); 1380 put_page(page);
1305 pr_debug("soft offline: %#lx page already poisoned\n", pfn); 1381 pr_info("soft offline: %#lx page already poisoned\n", pfn);
1306 return -EBUSY; 1382 return -EBUSY;
1307 } 1383 }
1308 1384
@@ -1323,7 +1399,7 @@ int soft_offline_page(struct page *page, int flags)
1323 put_page(page); 1399 put_page(page);
1324 if (ret == 1) { 1400 if (ret == 1) {
1325 ret = 0; 1401 ret = 0;
1326 pr_debug("soft_offline: %#lx: invalidated\n", pfn); 1402 pr_info("soft_offline: %#lx: invalidated\n", pfn);
1327 goto done; 1403 goto done;
1328 } 1404 }
1329 1405
@@ -1339,13 +1415,13 @@ int soft_offline_page(struct page *page, int flags)
1339 list_add(&page->lru, &pagelist); 1415 list_add(&page->lru, &pagelist);
1340 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0); 1416 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
1341 if (ret) { 1417 if (ret) {
1342 pr_debug("soft offline: %#lx: migration failed %d, type %lx\n", 1418 pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
1343 pfn, ret, page->flags); 1419 pfn, ret, page->flags);
1344 if (ret > 0) 1420 if (ret > 0)
1345 ret = -EIO; 1421 ret = -EIO;
1346 } 1422 }
1347 } else { 1423 } else {
1348 pr_debug("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", 1424 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
1349 pfn, ret, page_count(page), page->flags); 1425 pfn, ret, page_count(page), page->flags);
1350 } 1426 }
1351 if (ret) 1427 if (ret)