summaryrefslogtreecommitdiffstats
path: root/mm/z3fold.c
diff options
context:
space:
mode:
authorVitaly Wool <vitalywool@gmail.com>2017-10-03 19:15:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-10-03 20:54:24 -0400
commit3552935742e0d5f0dafd823736f45bdaa7ba672c (patch)
tree43039ba93ba9302c5ff1dc10716510d104f1d8c0 /mm/z3fold.c
parent6818600ff094ca255a7fe31838ad50c29656c3c5 (diff)
z3fold: fix stale list handling
Fix the situation when clear_bit() is called for page->private before the page pointer is actually assigned. While at it, remove work_busy() check because it is costly and does not give 100% guarantee anyway. Signed-off-by: Vitaly Wool <vitalywool@gmail.com> Cc: Dan Streetman <ddstreet@ieee.org> Cc: <Oleksiy.Avramchenko@sony.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/z3fold.c')
-rw-r--r--mm/z3fold.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/mm/z3fold.c b/mm/z3fold.c
index b04fa3ba1bf2..b2ba2ba585f3 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -250,6 +250,7 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
250 250
251 WARN_ON(!list_empty(&zhdr->buddy)); 251 WARN_ON(!list_empty(&zhdr->buddy));
252 set_bit(PAGE_STALE, &page->private); 252 set_bit(PAGE_STALE, &page->private);
253 clear_bit(NEEDS_COMPACTING, &page->private);
253 spin_lock(&pool->lock); 254 spin_lock(&pool->lock);
254 if (!list_empty(&page->lru)) 255 if (!list_empty(&page->lru))
255 list_del(&page->lru); 256 list_del(&page->lru);
@@ -303,7 +304,6 @@ static void free_pages_work(struct work_struct *w)
303 list_del(&zhdr->buddy); 304 list_del(&zhdr->buddy);
304 if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) 305 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
305 continue; 306 continue;
306 clear_bit(NEEDS_COMPACTING, &page->private);
307 spin_unlock(&pool->stale_lock); 307 spin_unlock(&pool->stale_lock);
308 cancel_work_sync(&zhdr->work); 308 cancel_work_sync(&zhdr->work);
309 free_z3fold_page(page); 309 free_z3fold_page(page);
@@ -624,10 +624,8 @@ lookup:
624 * stale pages list. cancel_work_sync() can sleep so we must make 624 * stale pages list. cancel_work_sync() can sleep so we must make
625 * sure it won't be called in case we're in atomic context. 625 * sure it won't be called in case we're in atomic context.
626 */ 626 */
627 if (zhdr && (can_sleep || !work_pending(&zhdr->work) || 627 if (zhdr && (can_sleep || !work_pending(&zhdr->work))) {
628 !unlikely(work_busy(&zhdr->work)))) {
629 list_del(&zhdr->buddy); 628 list_del(&zhdr->buddy);
630 clear_bit(NEEDS_COMPACTING, &page->private);
631 spin_unlock(&pool->stale_lock); 629 spin_unlock(&pool->stale_lock);
632 if (can_sleep) 630 if (can_sleep)
633 cancel_work_sync(&zhdr->work); 631 cancel_work_sync(&zhdr->work);