aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-31 12:46:39 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-31 12:46:39 -0500
commit2a1a2c1a76cf89aaeb98a89179c2942c7882f68a (patch)
tree07373c743c4f43085d7e3a89b83a92e1fa48c6c8
parent9ab97aea85cca43a6aedc90e0d1feba91eebe1ad (diff)
parentd8a706414af4827fc0b4b1c0c631c607351938b9 (diff)
Merge tag 'dax-fix-4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull dax fix from Dan Williams: "Clean up unnecessary usage of prepare_to_wait_exclusive(). While I feel a bit silly sending a single-commit pull-request there is nothing else queued up for dax this cycle. This change has shipped in -next for multiple releases" * tag 'dax-fix-4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: dax: Use non-exclusive wait in wait_entry_unlocked()
-rw-r--r--fs/dax.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/fs/dax.c b/fs/dax.c
index 262e14f29933..6959837cc465 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -246,18 +246,16 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
246 ewait.wait.func = wake_exceptional_entry_func; 246 ewait.wait.func = wake_exceptional_entry_func;
247 247
248 wq = dax_entry_waitqueue(xas, entry, &ewait.key); 248 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
249 prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); 249 /*
250 * Unlike get_unlocked_entry() there is no guarantee that this
251 * path ever successfully retrieves an unlocked entry before an
252 * inode dies. Perform a non-exclusive wait in case this path
253 * never successfully performs its own wake up.
254 */
255 prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
250 xas_unlock_irq(xas); 256 xas_unlock_irq(xas);
251 schedule(); 257 schedule();
252 finish_wait(wq, &ewait.wait); 258 finish_wait(wq, &ewait.wait);
253
254 /*
255 * Entry lock waits are exclusive. Wake up the next waiter since
256 * we aren't sure we will acquire the entry lock and thus wake
257 * the next waiter up on unlock.
258 */
259 if (waitqueue_active(wq))
260 __wake_up(wq, TASK_NORMAL, 1, &ewait.key);
261} 259}
262 260
263static void put_unlocked_entry(struct xa_state *xas, void *entry) 261static void put_unlocked_entry(struct xa_state *xas, void *entry)