diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2013-07-31 16:53:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-31 17:41:03 -0400 |
commit | e180cf806a93ea1abbce47b245d25204ff557ce9 (patch) | |
tree | 5572b815fe0925f720624fb71d0d1bea81dd878b /mm/swap.c | |
parent | ef2a2cbdda7e9d084a85846770fcc844958881f6 (diff) |
thp, mm: avoid PageUnevictable on active/inactive lru lists
active/inactive lru lists can contain unevicable pages (i.e. ramfs pages
that have been placed on the LRU lists when first allocated), but these
pages must not have PageUnevictable set - otherwise shrink_[in]active_list
goes crazy:
kernel BUG at /home/space/kas/git/public/linux-next/mm/vmscan.c:1122!
1090 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1091 struct lruvec *lruvec, struct list_head *dst,
1092 unsigned long *nr_scanned, struct scan_control *sc,
1093 isolate_mode_t mode, enum lru_list lru)
1094 {
...
1108 switch (__isolate_lru_page(page, mode)) {
1109 case 0:
...
1116 case -EBUSY:
...
1121 default:
1122 BUG();
1123 }
1124 }
...
1130 }
__isolate_lru_page() returns EINVAL for PageUnevictable(page).
For lru_add_page_tail(), it means we should not set PageUnevictable()
for tail pages unless we're sure that it will go to LRU_UNEVICTABLE.
Let's just copy PG_active and PG_unevictable from head page in
__split_huge_page_refcount(), it will simplify lru_add_page_tail().
This will fix one more bug in lru_add_page_tail(): if
page_evictable(page_tail) is false and PageLRU(page) is true, page_tail
will go to the same lru as page, but nobody cares to sync page_tail
active/inactive state with page. So we can end up with inactive page on
active lru. The patch will fix it as well since we copy PG_active from
head page.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r-- | mm/swap.c | 20 |
1 files changed, 2 insertions, 18 deletions
@@ -770,8 +770,6 @@ EXPORT_SYMBOL(__pagevec_release); | |||
770 | void lru_add_page_tail(struct page *page, struct page *page_tail, | 770 | void lru_add_page_tail(struct page *page, struct page *page_tail, |
771 | struct lruvec *lruvec, struct list_head *list) | 771 | struct lruvec *lruvec, struct list_head *list) |
772 | { | 772 | { |
773 | int uninitialized_var(active); | ||
774 | enum lru_list lru; | ||
775 | const int file = 0; | 773 | const int file = 0; |
776 | 774 | ||
777 | VM_BUG_ON(!PageHead(page)); | 775 | VM_BUG_ON(!PageHead(page)); |
@@ -783,20 +781,6 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, | |||
783 | if (!list) | 781 | if (!list) |
784 | SetPageLRU(page_tail); | 782 | SetPageLRU(page_tail); |
785 | 783 | ||
786 | if (page_evictable(page_tail)) { | ||
787 | if (PageActive(page)) { | ||
788 | SetPageActive(page_tail); | ||
789 | active = 1; | ||
790 | lru = LRU_ACTIVE_ANON; | ||
791 | } else { | ||
792 | active = 0; | ||
793 | lru = LRU_INACTIVE_ANON; | ||
794 | } | ||
795 | } else { | ||
796 | SetPageUnevictable(page_tail); | ||
797 | lru = LRU_UNEVICTABLE; | ||
798 | } | ||
799 | |||
800 | if (likely(PageLRU(page))) | 784 | if (likely(PageLRU(page))) |
801 | list_add_tail(&page_tail->lru, &page->lru); | 785 | list_add_tail(&page_tail->lru, &page->lru); |
802 | else if (list) { | 786 | else if (list) { |
@@ -812,13 +796,13 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, | |||
812 | * Use the standard add function to put page_tail on the list, | 796 | * Use the standard add function to put page_tail on the list, |
813 | * but then correct its position so they all end up in order. | 797 | * but then correct its position so they all end up in order. |
814 | */ | 798 | */ |
815 | add_page_to_lru_list(page_tail, lruvec, lru); | 799 | add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail)); |
816 | list_head = page_tail->lru.prev; | 800 | list_head = page_tail->lru.prev; |
817 | list_move_tail(&page_tail->lru, list_head); | 801 | list_move_tail(&page_tail->lru, list_head); |
818 | } | 802 | } |
819 | 803 | ||
820 | if (!PageUnevictable(page)) | 804 | if (!PageUnevictable(page)) |
821 | update_page_reclaim_stat(lruvec, file, active); | 805 | update_page_reclaim_stat(lruvec, file, PageActive(page_tail)); |
822 | } | 806 | } |
823 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 807 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
824 | 808 | ||