summaryrefslogtreecommitdiffstats
path: root/mm/z3fold.c
diff options
context:
space:
mode:
authorVitaly Wool <vitalywool@gmail.com>2018-05-11 19:01:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-05-11 20:28:45 -0400
commit6098d7e136692f9c6e23ae362c62ec822343e4d5 (patch)
tree5d57adcfc8b45d6455ddd9c0e0526f2b8bb281a5 /mm/z3fold.c
parentae646f0b9ca135b87bc73ff606ef996c3029780a (diff)
z3fold: fix reclaim lock-ups
Do not try to optimize in-page object layout while the page is under reclaim. This fixes lock-ups on reclaim and improves reclaim performance at the same time. [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/20180430125800.444cae9706489f412ad12621@gmail.com Signed-off-by: Vitaly Wool <vitaly.vul@sony.com> Reported-by: Guenter Roeck <linux@roeck-us.net> Tested-by: Guenter Roeck <linux@roeck-us.net> Cc: <Oleksiy.Avramchenko@sony.com> Cc: Matthew Wilcox <mawilcox@microsoft.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/z3fold.c')
-rw-r--r--mm/z3fold.c42
1 files changed, 30 insertions, 12 deletions
diff --git a/mm/z3fold.c b/mm/z3fold.c
index c0bca6153b95..4b366d181f35 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -144,7 +144,8 @@ enum z3fold_page_flags {
144 PAGE_HEADLESS = 0, 144 PAGE_HEADLESS = 0,
145 MIDDLE_CHUNK_MAPPED, 145 MIDDLE_CHUNK_MAPPED,
146 NEEDS_COMPACTING, 146 NEEDS_COMPACTING,
147 PAGE_STALE 147 PAGE_STALE,
148 UNDER_RECLAIM
148}; 149};
149 150
150/***************** 151/*****************
@@ -173,6 +174,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
173 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); 174 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
174 clear_bit(NEEDS_COMPACTING, &page->private); 175 clear_bit(NEEDS_COMPACTING, &page->private);
175 clear_bit(PAGE_STALE, &page->private); 176 clear_bit(PAGE_STALE, &page->private);
177 clear_bit(UNDER_RECLAIM, &page->private);
176 178
177 spin_lock_init(&zhdr->page_lock); 179 spin_lock_init(&zhdr->page_lock);
178 kref_init(&zhdr->refcount); 180 kref_init(&zhdr->refcount);
@@ -756,6 +758,10 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
756 atomic64_dec(&pool->pages_nr); 758 atomic64_dec(&pool->pages_nr);
757 return; 759 return;
758 } 760 }
761 if (test_bit(UNDER_RECLAIM, &page->private)) {
762 z3fold_page_unlock(zhdr);
763 return;
764 }
759 if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) { 765 if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
760 z3fold_page_unlock(zhdr); 766 z3fold_page_unlock(zhdr);
761 return; 767 return;
@@ -840,6 +846,8 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
840 kref_get(&zhdr->refcount); 846 kref_get(&zhdr->refcount);
841 list_del_init(&zhdr->buddy); 847 list_del_init(&zhdr->buddy);
842 zhdr->cpu = -1; 848 zhdr->cpu = -1;
849 set_bit(UNDER_RECLAIM, &page->private);
850 break;
843 } 851 }
844 852
845 list_del_init(&page->lru); 853 list_del_init(&page->lru);
@@ -887,25 +895,35 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
887 goto next; 895 goto next;
888 } 896 }
889next: 897next:
890 spin_lock(&pool->lock);
891 if (test_bit(PAGE_HEADLESS, &page->private)) { 898 if (test_bit(PAGE_HEADLESS, &page->private)) {
892 if (ret == 0) { 899 if (ret == 0) {
893 spin_unlock(&pool->lock);
894 free_z3fold_page(page); 900 free_z3fold_page(page);
895 return 0; 901 return 0;
896 } 902 }
897 } else if (kref_put(&zhdr->refcount, release_z3fold_page)) { 903 spin_lock(&pool->lock);
898 atomic64_dec(&pool->pages_nr); 904 list_add(&page->lru, &pool->lru);
905 spin_unlock(&pool->lock);
906 } else {
907 z3fold_page_lock(zhdr);
908 clear_bit(UNDER_RECLAIM, &page->private);
909 if (kref_put(&zhdr->refcount,
910 release_z3fold_page_locked)) {
911 atomic64_dec(&pool->pages_nr);
912 return 0;
913 }
914 /*
915 * if we are here, the page is still not completely
916 * free. Take the global pool lock then to be able
917 * to add it back to the lru list
918 */
919 spin_lock(&pool->lock);
920 list_add(&page->lru, &pool->lru);
899 spin_unlock(&pool->lock); 921 spin_unlock(&pool->lock);
900 return 0; 922 z3fold_page_unlock(zhdr);
901 } 923 }
902 924
903 /* 925 /* We started off locked to we need to lock the pool back */
904 * Add to the beginning of LRU. 926 spin_lock(&pool->lock);
905 * Pool lock has to be kept here to ensure the page has
906 * not already been released
907 */
908 list_add(&page->lru, &pool->lru);
909 } 927 }
910 spin_unlock(&pool->lock); 928 spin_unlock(&pool->lock);
911 return -EAGAIN; 929 return -EAGAIN;