diff options
author | Andi Kleen <andi@firstfloor.org> | 2009-12-16 06:19:57 -0500 |
---|---|---|
committer | Andi Kleen <ak@linux.intel.com> | 2009-12-16 06:19:57 -0500 |
commit | 588f9ce6ca61ecb4663ee6ef2f75d2d96c73151e (patch) | |
tree | 4f68c55642c51312bdf6e49818b9889ec37a1ac3 | |
parent | 7bc98b97ed5dfe710025414de771baa674998892 (diff) |
HWPOISON: Be more aggressive at freeing non LRU caches
shake_page handles more types of page caches than lru_drain_all()
- per cpu page allocator pages
- per CPU LRU
Stops early when the page became free.
Used in followon patches.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
-rw-r--r-- | include/linux/mm.h | 1 | ||||
-rw-r--r-- | mm/memory-failure.c | 22 |
2 files changed, 23 insertions, 0 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 9d65ae4ba0e0..68c84bb2ad3f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1335,6 +1335,7 @@ extern void memory_failure(unsigned long pfn, int trapno); | |||
1335 | extern int __memory_failure(unsigned long pfn, int trapno, int ref); | 1335 | extern int __memory_failure(unsigned long pfn, int trapno, int ref); |
1336 | extern int sysctl_memory_failure_early_kill; | 1336 | extern int sysctl_memory_failure_early_kill; |
1337 | extern int sysctl_memory_failure_recovery; | 1337 | extern int sysctl_memory_failure_recovery; |
1338 | extern void shake_page(struct page *p); | ||
1338 | extern atomic_long_t mce_bad_pages; | 1339 | extern atomic_long_t mce_bad_pages; |
1339 | 1340 | ||
1340 | #endif /* __KERNEL__ */ | 1341 | #endif /* __KERNEL__ */ |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 50d4f8d7024a..38fcbb22eab9 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -83,6 +83,28 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, | |||
83 | } | 83 | } |
84 | 84 | ||
85 | /* | 85 | /* |
86 | * When a unknown page type is encountered drain as many buffers as possible | ||
87 | * in the hope to turn the page into a LRU or free page, which we can handle. | ||
88 | */ | ||
89 | void shake_page(struct page *p) | ||
90 | { | ||
91 | if (!PageSlab(p)) { | ||
92 | lru_add_drain_all(); | ||
93 | if (PageLRU(p)) | ||
94 | return; | ||
95 | drain_all_pages(); | ||
96 | if (PageLRU(p) || is_free_buddy_page(p)) | ||
97 | return; | ||
98 | } | ||
99 | /* | ||
100 | * Could call shrink_slab here (which would also | ||
101 | * shrink other caches). Unfortunately that might | ||
102 | * also access the corrupted page, which could be fatal. | ||
103 | */ | ||
104 | } | ||
105 | EXPORT_SYMBOL_GPL(shake_page); | ||
106 | |||
107 | /* | ||
86 | * Kill all processes that have a poisoned page mapped and then isolate | 108 | * Kill all processes that have a poisoned page mapped and then isolate |
87 | * the page. | 109 | * the page. |
88 | * | 110 | * |