aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2017-05-03 17:56:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-03 18:52:12 -0400
commit8bcb74de764aaa261d6af3ce5ac723e435f00ff4 (patch)
tree614316758c5c70378ce2fcf40d3c40b2c5d871a9
parent0ccfece6ed507738c0e7e4414c3688b78d4e3756 (diff)
mm: hwpoison: call shake_page() unconditionally
shake_page() is called before going into core error handling code in order to ensure that the error page is flushed from lru_cache lists where pages stay during transferring among LRU lists. But currently it's not fully functional because when the page is linked to lru_cache by calling activate_page(), its PageLRU flag is set and shake_page() is skipped. The result is to fail error handling with "still referenced by 1 users" message. When the page is linked to lru_cache by isolate_lru_page(), its PageLRU is clear, so that's fine. This patch makes shake_page() unconditionally called to avoild the failure. Fixes: 23a003bfd23ea9ea0b7756b920e51f64b284b468 ("mm/madvise: pass return code of memory_failure() to userspace") Link: http://lkml.kernel.org/r/20170417055948.GM31394@yexl-desktop Link: http://lkml.kernel.org/r/1493197841-23986-2-git-send-email-n-horiguchi@ah.jp.nec.com Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Reported-by: kernel test robot <lkp@intel.com> Cc: Xiaolong Ye <xiaolong.ye@intel.com> Cc: Chen Gong <gong.chen@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/hwpoison-inject.c3
-rw-r--r--mm/memory-failure.c27
2 files changed, 12 insertions, 18 deletions
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index 9d26fd9fefe4..356df057a2a8 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -34,8 +34,7 @@ static int hwpoison_inject(void *data, u64 val)
34 if (!hwpoison_filter_enable) 34 if (!hwpoison_filter_enable)
35 goto inject; 35 goto inject;
36 36
37 if (!PageLRU(hpage) && !PageHuge(p)) 37 shake_page(hpage, 0);
38 shake_page(hpage, 0);
39 /* 38 /*
40 * This implies unable to support non-LRU pages. 39 * This implies unable to support non-LRU pages.
41 */ 40 */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 92865bb1816d..9d87fcab96c9 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -220,6 +220,9 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
220 */ 220 */
221void shake_page(struct page *p, int access) 221void shake_page(struct page *p, int access)
222{ 222{
223 if (PageHuge(p))
224 return;
225
223 if (!PageSlab(p)) { 226 if (!PageSlab(p)) {
224 lru_add_drain_all(); 227 lru_add_drain_all();
225 if (PageLRU(p)) 228 if (PageLRU(p))
@@ -1137,22 +1140,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1137 * The check (unnecessarily) ignores LRU pages being isolated and 1140 * The check (unnecessarily) ignores LRU pages being isolated and
1138 * walked by the page reclaim code, however that's not a big loss. 1141 * walked by the page reclaim code, however that's not a big loss.
1139 */ 1142 */
1140 if (!PageHuge(p)) { 1143 shake_page(p, 0);
1141 if (!PageLRU(p)) 1144 /* shake_page could have turned it free. */
1142 shake_page(p, 0); 1145 if (!PageLRU(p) && is_free_buddy_page(p)) {
1143 if (!PageLRU(p)) { 1146 if (flags & MF_COUNT_INCREASED)
1144 /* 1147 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
1145 * shake_page could have turned it free. 1148 else
1146 */ 1149 action_result(pfn, MF_MSG_BUDDY_2ND, MF_DELAYED);
1147 if (is_free_buddy_page(p)) { 1150 return 0;
1148 if (flags & MF_COUNT_INCREASED)
1149 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
1150 else
1151 action_result(pfn, MF_MSG_BUDDY_2ND,
1152 MF_DELAYED);
1153 return 0;
1154 }
1155 }
1156 } 1151 }
1157 1152
1158 lock_page(hpage); 1153 lock_page(hpage);