summaryrefslogtreecommitdiffstats
path: root/mm/z3fold.c
diff options
context:
space:
mode:
authorVitaly Wool <vitalywool@gmail.com>2017-11-17 18:26:16 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-17 19:10:00 -0500
commit5d03a6613957785e94af7a4a6212ad4af66aa5c2 (patch)
tree973176f96aa7dfc154525c1513dc920a2fee912e /mm/z3fold.c
parent1334be3657dd02af0591d6d8adf0e6a60a7710a6 (diff)
mm/z3fold.c: use kref to prevent page free/compact race
There is a race in the current z3fold implementation between do_compact() called in a work queue context and the page release procedure when page's kref goes to 0. do_compact() may be waiting for page lock, which is released by release_z3fold_page_locked right before putting the page onto the "stale" list, and then the page may be freed as do_compact() modifies its contents. The mechanism currently implemented to handle that (checking the PAGE_STALE flag) is not reliable enough. Instead, we'll use page's kref counter to guarantee that the page is not released if its compaction is scheduled. It then becomes compaction function's responsibility to decrease the counter and quit immediately if the page was actually freed. Link: http://lkml.kernel.org/r/20171117092032.00ea56f42affbed19f4fcc6c@gmail.com Signed-off-by: Vitaly Wool <vitaly.wool@sonymobile.com> Cc: <Oleksiy.Avramchenko@sony.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/z3fold.c')
-rw-r--r--mm/z3fold.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/mm/z3fold.c b/mm/z3fold.c
index b2ba2ba585f3..39e19125d6a0 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -404,8 +404,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
404 WARN_ON(z3fold_page_trylock(zhdr)); 404 WARN_ON(z3fold_page_trylock(zhdr));
405 else 405 else
406 z3fold_page_lock(zhdr); 406 z3fold_page_lock(zhdr);
407 if (test_bit(PAGE_STALE, &page->private) || 407 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
408 !test_and_clear_bit(NEEDS_COMPACTING, &page->private)) {
409 z3fold_page_unlock(zhdr); 408 z3fold_page_unlock(zhdr);
410 return; 409 return;
411 } 410 }
@@ -413,6 +412,11 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
413 list_del_init(&zhdr->buddy); 412 list_del_init(&zhdr->buddy);
414 spin_unlock(&pool->lock); 413 spin_unlock(&pool->lock);
415 414
415 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
416 atomic64_dec(&pool->pages_nr);
417 return;
418 }
419
416 z3fold_compact_page(zhdr); 420 z3fold_compact_page(zhdr);
417 unbuddied = get_cpu_ptr(pool->unbuddied); 421 unbuddied = get_cpu_ptr(pool->unbuddied);
418 fchunks = num_free_chunks(zhdr); 422 fchunks = num_free_chunks(zhdr);
@@ -753,9 +757,11 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
753 list_del_init(&zhdr->buddy); 757 list_del_init(&zhdr->buddy);
754 spin_unlock(&pool->lock); 758 spin_unlock(&pool->lock);
755 zhdr->cpu = -1; 759 zhdr->cpu = -1;
760 kref_get(&zhdr->refcount);
756 do_compact_page(zhdr, true); 761 do_compact_page(zhdr, true);
757 return; 762 return;
758 } 763 }
764 kref_get(&zhdr->refcount);
759 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); 765 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
760 z3fold_page_unlock(zhdr); 766 z3fold_page_unlock(zhdr);
761} 767}