diff options
author | Vitaly Wool <vitalywool@gmail.com> | 2017-04-13 17:56:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-04-13 21:24:20 -0400 |
commit | 76e32a2a084ed71b48179023cd8fdb3787c8a6ad (patch) | |
tree | d0a2cb8e6addcbdcb946192f27243eef0bb7ee3c /mm/z3fold.c | |
parent | 2760078203a6b46b96307f4b06030ab0b801c97e (diff) |
z3fold: fix page locking in z3fold_alloc()
Stress testing of the current z3fold implementation on a 8-core system
revealed it was possible that a z3fold page deleted from its unbuddied
list in z3fold_alloc() would be put on another unbuddied list by
z3fold_free() while z3fold_alloc() is still processing it. This has
been introduced with commit 5a27aa822 ("z3fold: add kref refcounting")
due to the removal of special handling of a z3fold page not on any list
in z3fold_free().
To fix this, the z3fold page lock should be taken in z3fold_alloc()
before the pool lock is released. To avoid deadlocking, we just try to
lock the page as soon as we get a hold of it, and if trylock fails, we
drop this page and take the next one.
Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: <Oleksiy.Avramchenko@sony.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/z3fold.c')
-rw-r--r-- | mm/z3fold.c | 9 |
1 files changed, 7 insertions, 2 deletions
diff --git a/mm/z3fold.c b/mm/z3fold.c index f9492bccfd79..54f63c4a809a 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c | |||
@@ -185,6 +185,12 @@ static inline void z3fold_page_lock(struct z3fold_header *zhdr) | |||
185 | spin_lock(&zhdr->page_lock); | 185 | spin_lock(&zhdr->page_lock); |
186 | } | 186 | } |
187 | 187 | ||
188 | /* Try to lock a z3fold page */ | ||
189 | static inline int z3fold_page_trylock(struct z3fold_header *zhdr) | ||
190 | { | ||
191 | return spin_trylock(&zhdr->page_lock); | ||
192 | } | ||
193 | |||
188 | /* Unlock a z3fold page */ | 194 | /* Unlock a z3fold page */ |
189 | static inline void z3fold_page_unlock(struct z3fold_header *zhdr) | 195 | static inline void z3fold_page_unlock(struct z3fold_header *zhdr) |
190 | { | 196 | { |
@@ -385,7 +391,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, | |||
385 | spin_lock(&pool->lock); | 391 | spin_lock(&pool->lock); |
386 | zhdr = list_first_entry_or_null(&pool->unbuddied[i], | 392 | zhdr = list_first_entry_or_null(&pool->unbuddied[i], |
387 | struct z3fold_header, buddy); | 393 | struct z3fold_header, buddy); |
388 | if (!zhdr) { | 394 | if (!zhdr || !z3fold_page_trylock(zhdr)) { |
389 | spin_unlock(&pool->lock); | 395 | spin_unlock(&pool->lock); |
390 | continue; | 396 | continue; |
391 | } | 397 | } |
@@ -394,7 +400,6 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, | |||
394 | spin_unlock(&pool->lock); | 400 | spin_unlock(&pool->lock); |
395 | 401 | ||
396 | page = virt_to_page(zhdr); | 402 | page = virt_to_page(zhdr); |
397 | z3fold_page_lock(zhdr); | ||
398 | if (zhdr->first_chunks == 0) { | 403 | if (zhdr->first_chunks == 0) { |
399 | if (zhdr->middle_chunks != 0 && | 404 | if (zhdr->middle_chunks != 0 && |
400 | chunks >= zhdr->start_middle) | 405 | chunks >= zhdr->start_middle) |