aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory-failure.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory-failure.c')
-rw-r--r--mm/memory-failure.c25
1 files changed, 9 insertions, 16 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 5c8f7e08928..740c4f52059 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -52,6 +52,7 @@
52#include <linux/swapops.h> 52#include <linux/swapops.h>
53#include <linux/hugetlb.h> 53#include <linux/hugetlb.h>
54#include <linux/memory_hotplug.h> 54#include <linux/memory_hotplug.h>
55#include <linux/mm_inline.h>
55#include "internal.h" 56#include "internal.h"
56 57
57int sysctl_memory_failure_early_kill __read_mostly = 0; 58int sysctl_memory_failure_early_kill __read_mostly = 0;
@@ -390,10 +391,11 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
390 struct task_struct *tsk; 391 struct task_struct *tsk;
391 struct anon_vma *av; 392 struct anon_vma *av;
392 393
393 read_lock(&tasklist_lock);
394 av = page_lock_anon_vma(page); 394 av = page_lock_anon_vma(page);
395 if (av == NULL) /* Not actually mapped anymore */ 395 if (av == NULL) /* Not actually mapped anymore */
396 goto out; 396 return;
397
398 read_lock(&tasklist_lock);
397 for_each_process (tsk) { 399 for_each_process (tsk) {
398 struct anon_vma_chain *vmac; 400 struct anon_vma_chain *vmac;
399 401
@@ -407,9 +409,8 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
407 add_to_kill(tsk, page, vma, to_kill, tkc); 409 add_to_kill(tsk, page, vma, to_kill, tkc);
408 } 410 }
409 } 411 }
410 page_unlock_anon_vma(av);
411out:
412 read_unlock(&tasklist_lock); 412 read_unlock(&tasklist_lock);
413 page_unlock_anon_vma(av);
413} 414}
414 415
415/* 416/*
@@ -423,17 +424,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
423 struct prio_tree_iter iter; 424 struct prio_tree_iter iter;
424 struct address_space *mapping = page->mapping; 425 struct address_space *mapping = page->mapping;
425 426
426 /*
427 * A note on the locking order between the two locks.
428 * We don't rely on this particular order.
429 * If you have some other code that needs a different order
430 * feel free to switch them around. Or add a reverse link
431 * from mm_struct to task_struct, then this could be all
432 * done without taking tasklist_lock and looping over all tasks.
433 */
434
435 read_lock(&tasklist_lock);
436 mutex_lock(&mapping->i_mmap_mutex); 427 mutex_lock(&mapping->i_mmap_mutex);
428 read_lock(&tasklist_lock);
437 for_each_process(tsk) { 429 for_each_process(tsk) {
438 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 430 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
439 431
@@ -453,8 +445,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
453 add_to_kill(tsk, page, vma, to_kill, tkc); 445 add_to_kill(tsk, page, vma, to_kill, tkc);
454 } 446 }
455 } 447 }
456 mutex_unlock(&mapping->i_mmap_mutex);
457 read_unlock(&tasklist_lock); 448 read_unlock(&tasklist_lock);
449 mutex_unlock(&mapping->i_mmap_mutex);
458} 450}
459 451
460/* 452/*
@@ -1468,7 +1460,8 @@ int soft_offline_page(struct page *page, int flags)
1468 put_page(page); 1460 put_page(page);
1469 if (!ret) { 1461 if (!ret) {
1470 LIST_HEAD(pagelist); 1462 LIST_HEAD(pagelist);
1471 1463 inc_zone_page_state(page, NR_ISOLATED_ANON +
1464 page_is_file_cache(page));
1472 list_add(&page->lru, &pagelist); 1465 list_add(&page->lru, &pagelist);
1473 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 1466 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
1474 0, true); 1467 0, true);