diff options
-rw-r--r-- | fs/dax.c | 16 |
1 files changed, 7 insertions, 9 deletions
@@ -246,18 +246,16 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry) | |||
246 | ewait.wait.func = wake_exceptional_entry_func; | 246 | ewait.wait.func = wake_exceptional_entry_func; |
247 | 247 | ||
248 | wq = dax_entry_waitqueue(xas, entry, &ewait.key); | 248 | wq = dax_entry_waitqueue(xas, entry, &ewait.key); |
249 | prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); | 249 | /* |
250 | * Unlike get_unlocked_entry() there is no guarantee that this | ||
251 | * path ever successfully retrieves an unlocked entry before an | ||
252 | * inode dies. Perform a non-exclusive wait in case this path | ||
253 | * never successfully performs its own wake up. | ||
254 | */ | ||
255 | prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); | ||
250 | xas_unlock_irq(xas); | 256 | xas_unlock_irq(xas); |
251 | schedule(); | 257 | schedule(); |
252 | finish_wait(wq, &ewait.wait); | 258 | finish_wait(wq, &ewait.wait); |
253 | |||
254 | /* | ||
255 | * Entry lock waits are exclusive. Wake up the next waiter since | ||
256 | * we aren't sure we will acquire the entry lock and thus wake | ||
257 | * the next waiter up on unlock. | ||
258 | */ | ||
259 | if (waitqueue_active(wq)) | ||
260 | __wake_up(wq, TASK_NORMAL, 1, &ewait.key); | ||
261 | } | 259 | } |
262 | 260 | ||
263 | static void put_unlocked_entry(struct xa_state *xas, void *entry) | 261 | static void put_unlocked_entry(struct xa_state *xas, void *entry) |