aboutsummaryrefslogtreecommitdiffstats
path: root/mm/z3fold.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-10-09 03:02:35 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-10-09 03:02:35 -0400
commit1236d6bb6e19fc72ffc6bbcdeb1bfefe450e54ee (patch)
tree47da3feee8e263e8c9352c85cf518e624be3c211 /mm/z3fold.c
parent750b1a6894ecc9b178c6e3d0a1170122971b2036 (diff)
parent8a5776a5f49812d29fe4b2d0a2d71675c3facf3f (diff)
Merge 4.14-rc4 into staging-next
We want the staging/iio fixes in here as well to handle merge issues. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'mm/z3fold.c')
-rw-r--r--mm/z3fold.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 486550df32be..b2ba2ba585f3 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -250,6 +250,7 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
250 250
251 WARN_ON(!list_empty(&zhdr->buddy)); 251 WARN_ON(!list_empty(&zhdr->buddy));
252 set_bit(PAGE_STALE, &page->private); 252 set_bit(PAGE_STALE, &page->private);
253 clear_bit(NEEDS_COMPACTING, &page->private);
253 spin_lock(&pool->lock); 254 spin_lock(&pool->lock);
254 if (!list_empty(&page->lru)) 255 if (!list_empty(&page->lru))
255 list_del(&page->lru); 256 list_del(&page->lru);
@@ -303,7 +304,6 @@ static void free_pages_work(struct work_struct *w)
303 list_del(&zhdr->buddy); 304 list_del(&zhdr->buddy);
304 if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) 305 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
305 continue; 306 continue;
306 clear_bit(NEEDS_COMPACTING, &page->private);
307 spin_unlock(&pool->stale_lock); 307 spin_unlock(&pool->stale_lock);
308 cancel_work_sync(&zhdr->work); 308 cancel_work_sync(&zhdr->work);
309 free_z3fold_page(page); 309 free_z3fold_page(page);
@@ -624,10 +624,8 @@ lookup:
624 * stale pages list. cancel_work_sync() can sleep so we must make 624 * stale pages list. cancel_work_sync() can sleep so we must make
625 * sure it won't be called in case we're in atomic context. 625 * sure it won't be called in case we're in atomic context.
626 */ 626 */
627 if (zhdr && (can_sleep || !work_pending(&zhdr->work) || 627 if (zhdr && (can_sleep || !work_pending(&zhdr->work))) {
628 !unlikely(work_busy(&zhdr->work)))) {
629 list_del(&zhdr->buddy); 628 list_del(&zhdr->buddy);
630 clear_bit(NEEDS_COMPACTING, &page->private);
631 spin_unlock(&pool->stale_lock); 629 spin_unlock(&pool->stale_lock);
632 if (can_sleep) 630 if (can_sleep)
633 cancel_work_sync(&zhdr->work); 631 cancel_work_sync(&zhdr->work);
@@ -875,16 +873,18 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
875 goto next; 873 goto next;
876 } 874 }
877next: 875next:
876 spin_lock(&pool->lock);
878 if (test_bit(PAGE_HEADLESS, &page->private)) { 877 if (test_bit(PAGE_HEADLESS, &page->private)) {
879 if (ret == 0) { 878 if (ret == 0) {
879 spin_unlock(&pool->lock);
880 free_z3fold_page(page); 880 free_z3fold_page(page);
881 return 0; 881 return 0;
882 } 882 }
883 } else if (kref_put(&zhdr->refcount, release_z3fold_page)) { 883 } else if (kref_put(&zhdr->refcount, release_z3fold_page)) {
884 atomic64_dec(&pool->pages_nr); 884 atomic64_dec(&pool->pages_nr);
885 spin_unlock(&pool->lock);
885 return 0; 886 return 0;
886 } 887 }
887 spin_lock(&pool->lock);
888 888
889 /* 889 /*
890 * Add to the beginning of LRU. 890 * Add to the beginning of LRU.