aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2014-06-04 19:11:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 19:54:13 -0400
commit74614de17db6fb472370c426d4f934d8d616edf2 (patch)
tree44252cb1eb9034d4d372ee6fa84a23123f5c1549 /mm
parenta70ffcac741d31a406c1d2b832ae43d658e7e1cf (diff)
mm/memory-failure.c: don't let collect_procs() skip over processes for MF_ACTION_REQUIRED
When Linux sees an "action optional" machine check (where h/w has reported an error that is not in the current execution path) we generally do not want to signal a process, since most processes do not have a SIGBUS handler - we'd just prematurely terminate the process for a problem that they might never actually see. task_early_kill() decides whether to consider a process - and it checks whether this specific process has been marked for early signals with "prctl", or if the system administrator has requested early signals for all processes using /proc/sys/vm/memory_failure_early_kill. But for MF_ACTION_REQUIRED case we must not defer. The error is in the execution path of the current thread so we must send the SIGBUS immediatley. Fix by passing a flag argument through collect_procs*() to task_early_kill() so it knows whether we can defer or must take action. Signed-off-by: Tony Luck <tony.luck@intel.com> Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Borislav Petkov <bp@suse.de> Cc: Chen Gong <gong.chen@linux.jf.intel.com> Cc: <stable@vger.kernel.org> [3.2+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory-failure.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 89ad452182bb..ed339c505d55 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -380,10 +380,12 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
380 } 380 }
381} 381}
382 382
383static int task_early_kill(struct task_struct *tsk) 383static int task_early_kill(struct task_struct *tsk, int force_early)
384{ 384{
385 if (!tsk->mm) 385 if (!tsk->mm)
386 return 0; 386 return 0;
387 if (force_early)
388 return 1;
387 if (tsk->flags & PF_MCE_PROCESS) 389 if (tsk->flags & PF_MCE_PROCESS)
388 return !!(tsk->flags & PF_MCE_EARLY); 390 return !!(tsk->flags & PF_MCE_EARLY);
389 return sysctl_memory_failure_early_kill; 391 return sysctl_memory_failure_early_kill;
@@ -393,7 +395,7 @@ static int task_early_kill(struct task_struct *tsk)
393 * Collect processes when the error hit an anonymous page. 395 * Collect processes when the error hit an anonymous page.
394 */ 396 */
395static void collect_procs_anon(struct page *page, struct list_head *to_kill, 397static void collect_procs_anon(struct page *page, struct list_head *to_kill,
396 struct to_kill **tkc) 398 struct to_kill **tkc, int force_early)
397{ 399{
398 struct vm_area_struct *vma; 400 struct vm_area_struct *vma;
399 struct task_struct *tsk; 401 struct task_struct *tsk;
@@ -409,7 +411,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
409 for_each_process (tsk) { 411 for_each_process (tsk) {
410 struct anon_vma_chain *vmac; 412 struct anon_vma_chain *vmac;
411 413
412 if (!task_early_kill(tsk)) 414 if (!task_early_kill(tsk, force_early))
413 continue; 415 continue;
414 anon_vma_interval_tree_foreach(vmac, &av->rb_root, 416 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
415 pgoff, pgoff) { 417 pgoff, pgoff) {
@@ -428,7 +430,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
428 * Collect processes when the error hit a file mapped page. 430 * Collect processes when the error hit a file mapped page.
429 */ 431 */
430static void collect_procs_file(struct page *page, struct list_head *to_kill, 432static void collect_procs_file(struct page *page, struct list_head *to_kill,
431 struct to_kill **tkc) 433 struct to_kill **tkc, int force_early)
432{ 434{
433 struct vm_area_struct *vma; 435 struct vm_area_struct *vma;
434 struct task_struct *tsk; 436 struct task_struct *tsk;
@@ -439,7 +441,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
439 for_each_process(tsk) { 441 for_each_process(tsk) {
440 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 442 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
441 443
442 if (!task_early_kill(tsk)) 444 if (!task_early_kill(tsk, force_early))
443 continue; 445 continue;
444 446
445 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, 447 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
@@ -465,7 +467,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
465 * First preallocate one tokill structure outside the spin locks, 467 * First preallocate one tokill structure outside the spin locks,
466 * so that we can kill at least one process reasonably reliable. 468 * so that we can kill at least one process reasonably reliable.
467 */ 469 */
468static void collect_procs(struct page *page, struct list_head *tokill) 470static void collect_procs(struct page *page, struct list_head *tokill,
471 int force_early)
469{ 472{
470 struct to_kill *tk; 473 struct to_kill *tk;
471 474
@@ -476,9 +479,9 @@ static void collect_procs(struct page *page, struct list_head *tokill)
476 if (!tk) 479 if (!tk)
477 return; 480 return;
478 if (PageAnon(page)) 481 if (PageAnon(page))
479 collect_procs_anon(page, tokill, &tk); 482 collect_procs_anon(page, tokill, &tk, force_early);
480 else 483 else
481 collect_procs_file(page, tokill, &tk); 484 collect_procs_file(page, tokill, &tk, force_early);
482 kfree(tk); 485 kfree(tk);
483} 486}
484 487
@@ -963,7 +966,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
963 * there's nothing that can be done. 966 * there's nothing that can be done.
964 */ 967 */
965 if (kill) 968 if (kill)
966 collect_procs(ppage, &tokill); 969 collect_procs(ppage, &tokill, flags & MF_ACTION_REQUIRED);
967 970
968 ret = try_to_unmap(ppage, ttu); 971 ret = try_to_unmap(ppage, ttu);
969 if (ret != SWAP_SUCCESS) 972 if (ret != SWAP_SUCCESS)