aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r--fs/btrfs/inode.c25
1 files changed, 23 insertions, 2 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 25dcff71e451..41a5688ffdfe 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7430,7 +7430,26 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7430 cached_state, GFP_NOFS); 7430 cached_state, GFP_NOFS);
7431 7431
7432 if (ordered) { 7432 if (ordered) {
7433 btrfs_start_ordered_extent(inode, ordered, 1); 7433 /*
7434 * If we are doing a DIO read and the ordered extent we
7435 * found is for a buffered write, we can not wait for it
7436 * to complete and retry, because if we do so we can
7437 * deadlock with concurrent buffered writes on page
7438 * locks. This happens only if our DIO read covers more
7439 * than one extent map, if at this point has already
7440 * created an ordered extent for a previous extent map
7441 * and locked its range in the inode's io tree, and a
7442 * concurrent write against that previous extent map's
7443 * range and this range started (we unlock the ranges
7444 * in the io tree only when the bios complete and
7445 * buffered writes always lock pages before attempting
7446 * to lock range in the io tree).
7447 */
7448 if (writing ||
7449 test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
7450 btrfs_start_ordered_extent(inode, ordered, 1);
7451 else
7452 ret = -ENOTBLK;
7434 btrfs_put_ordered_extent(ordered); 7453 btrfs_put_ordered_extent(ordered);
7435 } else { 7454 } else {
7436 /* 7455 /*
@@ -7447,9 +7466,11 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7447 * that page. 7466 * that page.
7448 */ 7467 */
7449 ret = -ENOTBLK; 7468 ret = -ENOTBLK;
7450 break;
7451 } 7469 }
7452 7470
7471 if (ret)
7472 break;
7473
7453 cond_resched(); 7474 cond_resched();
7454 } 7475 }
7455 7476