diff options
Diffstat (limited to 'mm/memory-failure.c')
| -rw-r--r-- | mm/memory-failure.c | 59 |
1 files changed, 31 insertions, 28 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 729d4b15b645..dacc64183874 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/mm.h> | 35 | #include <linux/mm.h> |
| 36 | #include <linux/page-flags.h> | 36 | #include <linux/page-flags.h> |
| 37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
| 38 | #include <linux/ksm.h> | ||
| 38 | #include <linux/rmap.h> | 39 | #include <linux/rmap.h> |
| 39 | #include <linux/pagemap.h> | 40 | #include <linux/pagemap.h> |
| 40 | #include <linux/swap.h> | 41 | #include <linux/swap.h> |
| @@ -370,9 +371,6 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn) | |||
| 370 | int ret = FAILED; | 371 | int ret = FAILED; |
| 371 | struct address_space *mapping; | 372 | struct address_space *mapping; |
| 372 | 373 | ||
| 373 | if (!isolate_lru_page(p)) | ||
| 374 | page_cache_release(p); | ||
| 375 | |||
| 376 | /* | 374 | /* |
| 377 | * For anonymous pages we're done the only reference left | 375 | * For anonymous pages we're done the only reference left |
| 378 | * should be the one m_f() holds. | 376 | * should be the one m_f() holds. |
| @@ -498,30 +496,18 @@ static int me_pagecache_dirty(struct page *p, unsigned long pfn) | |||
| 498 | */ | 496 | */ |
| 499 | static int me_swapcache_dirty(struct page *p, unsigned long pfn) | 497 | static int me_swapcache_dirty(struct page *p, unsigned long pfn) |
| 500 | { | 498 | { |
| 501 | int ret = FAILED; | ||
| 502 | |||
| 503 | ClearPageDirty(p); | 499 | ClearPageDirty(p); |
| 504 | /* Trigger EIO in shmem: */ | 500 | /* Trigger EIO in shmem: */ |
| 505 | ClearPageUptodate(p); | 501 | ClearPageUptodate(p); |
| 506 | 502 | ||
| 507 | if (!isolate_lru_page(p)) { | 503 | return DELAYED; |
| 508 | page_cache_release(p); | ||
| 509 | ret = DELAYED; | ||
| 510 | } | ||
| 511 | |||
| 512 | return ret; | ||
| 513 | } | 504 | } |
| 514 | 505 | ||
| 515 | static int me_swapcache_clean(struct page *p, unsigned long pfn) | 506 | static int me_swapcache_clean(struct page *p, unsigned long pfn) |
| 516 | { | 507 | { |
| 517 | int ret = FAILED; | ||
| 518 | |||
| 519 | if (!isolate_lru_page(p)) { | ||
| 520 | page_cache_release(p); | ||
| 521 | ret = RECOVERED; | ||
| 522 | } | ||
| 523 | delete_from_swap_cache(p); | 508 | delete_from_swap_cache(p); |
| 524 | return ret; | 509 | |
| 510 | return RECOVERED; | ||
| 525 | } | 511 | } |
| 526 | 512 | ||
| 527 | /* | 513 | /* |
| @@ -611,8 +597,6 @@ static struct page_state { | |||
| 611 | { 0, 0, "unknown page state", me_unknown }, | 597 | { 0, 0, "unknown page state", me_unknown }, |
| 612 | }; | 598 | }; |
| 613 | 599 | ||
| 614 | #undef lru | ||
| 615 | |||
| 616 | static void action_result(unsigned long pfn, char *msg, int result) | 600 | static void action_result(unsigned long pfn, char *msg, int result) |
| 617 | { | 601 | { |
| 618 | struct page *page = NULL; | 602 | struct page *page = NULL; |
| @@ -629,13 +613,16 @@ static int page_action(struct page_state *ps, struct page *p, | |||
| 629 | unsigned long pfn, int ref) | 613 | unsigned long pfn, int ref) |
| 630 | { | 614 | { |
| 631 | int result; | 615 | int result; |
| 616 | int count; | ||
| 632 | 617 | ||
| 633 | result = ps->action(p, pfn); | 618 | result = ps->action(p, pfn); |
| 634 | action_result(pfn, ps->msg, result); | 619 | action_result(pfn, ps->msg, result); |
| 635 | if (page_count(p) != 1 + ref) | 620 | |
| 621 | count = page_count(p) - 1 - ref; | ||
| 622 | if (count != 0) | ||
| 636 | printk(KERN_ERR | 623 | printk(KERN_ERR |
| 637 | "MCE %#lx: %s page still referenced by %d users\n", | 624 | "MCE %#lx: %s page still referenced by %d users\n", |
| 638 | pfn, ps->msg, page_count(p) - 1); | 625 | pfn, ps->msg, count); |
| 639 | 626 | ||
| 640 | /* Could do more checks here if page looks ok */ | 627 | /* Could do more checks here if page looks ok */ |
| 641 | /* | 628 | /* |
| @@ -661,12 +648,9 @@ static void hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
| 661 | int i; | 648 | int i; |
| 662 | int kill = 1; | 649 | int kill = 1; |
| 663 | 650 | ||
| 664 | if (PageReserved(p) || PageCompound(p) || PageSlab(p)) | 651 | if (PageReserved(p) || PageCompound(p) || PageSlab(p) || PageKsm(p)) |
| 665 | return; | 652 | return; |
| 666 | 653 | ||
| 667 | if (!PageLRU(p)) | ||
| 668 | lru_add_drain_all(); | ||
| 669 | |||
| 670 | /* | 654 | /* |
| 671 | * This check implies we don't kill processes if their pages | 655 | * This check implies we don't kill processes if their pages |
| 672 | * are in the swap cache early. Those are always late kills. | 656 | * are in the swap cache early. Those are always late kills. |
| @@ -738,6 +722,7 @@ static void hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
| 738 | 722 | ||
| 739 | int __memory_failure(unsigned long pfn, int trapno, int ref) | 723 | int __memory_failure(unsigned long pfn, int trapno, int ref) |
| 740 | { | 724 | { |
| 725 | unsigned long lru_flag; | ||
| 741 | struct page_state *ps; | 726 | struct page_state *ps; |
| 742 | struct page *p; | 727 | struct page *p; |
| 743 | int res; | 728 | int res; |
| @@ -775,6 +760,24 @@ int __memory_failure(unsigned long pfn, int trapno, int ref) | |||
| 775 | } | 760 | } |
| 776 | 761 | ||
| 777 | /* | 762 | /* |
| 763 | * We ignore non-LRU pages for good reasons. | ||
| 764 | * - PG_locked is only well defined for LRU pages and a few others | ||
| 765 | * - to avoid races with __set_page_locked() | ||
| 766 | * - to avoid races with __SetPageSlab*() (and more non-atomic ops) | ||
| 767 | * The check (unnecessarily) ignores LRU pages being isolated and | ||
| 768 | * walked by the page reclaim code, however that's not a big loss. | ||
| 769 | */ | ||
| 770 | if (!PageLRU(p)) | ||
| 771 | lru_add_drain_all(); | ||
| 772 | lru_flag = p->flags & lru; | ||
| 773 | if (isolate_lru_page(p)) { | ||
| 774 | action_result(pfn, "non LRU", IGNORED); | ||
| 775 | put_page(p); | ||
| 776 | return -EBUSY; | ||
| 777 | } | ||
| 778 | page_cache_release(p); | ||
| 779 | |||
| 780 | /* | ||
| 778 | * Lock the page and wait for writeback to finish. | 781 | * Lock the page and wait for writeback to finish. |
| 779 | * It's very difficult to mess with pages currently under IO | 782 | * It's very difficult to mess with pages currently under IO |
| 780 | * and in many cases impossible, so we just avoid it here. | 783 | * and in many cases impossible, so we just avoid it here. |
| @@ -790,7 +793,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref) | |||
| 790 | /* | 793 | /* |
| 791 | * Torn down by someone else? | 794 | * Torn down by someone else? |
| 792 | */ | 795 | */ |
| 793 | if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) { | 796 | if ((lru_flag & lru) && !PageSwapCache(p) && p->mapping == NULL) { |
| 794 | action_result(pfn, "already truncated LRU", IGNORED); | 797 | action_result(pfn, "already truncated LRU", IGNORED); |
| 795 | res = 0; | 798 | res = 0; |
| 796 | goto out; | 799 | goto out; |
| @@ -798,7 +801,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref) | |||
| 798 | 801 | ||
| 799 | res = -EBUSY; | 802 | res = -EBUSY; |
| 800 | for (ps = error_states;; ps++) { | 803 | for (ps = error_states;; ps++) { |
| 801 | if ((p->flags & ps->mask) == ps->res) { | 804 | if (((p->flags | lru_flag)& ps->mask) == ps->res) { |
| 802 | res = page_action(ps, p, pfn, ref); | 805 | res = page_action(ps, p, pfn, ref); |
| 803 | break; | 806 | break; |
| 804 | } | 807 | } |
