diff options
Diffstat (limited to 'fs/xfs/xfs_buf.c')
-rw-r--r-- | fs/xfs/xfs_buf.c | 85 |
1 files changed, 61 insertions, 24 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 933b7930b863..56d1614760cf 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -175,7 +175,7 @@ xfs_buf_get_maps( | |||
175 | bp->b_map_count = map_count; | 175 | bp->b_map_count = map_count; |
176 | 176 | ||
177 | if (map_count == 1) { | 177 | if (map_count == 1) { |
178 | bp->b_maps = &bp->b_map; | 178 | bp->b_maps = &bp->__b_map; |
179 | return 0; | 179 | return 0; |
180 | } | 180 | } |
181 | 181 | ||
@@ -193,7 +193,7 @@ static void | |||
193 | xfs_buf_free_maps( | 193 | xfs_buf_free_maps( |
194 | struct xfs_buf *bp) | 194 | struct xfs_buf *bp) |
195 | { | 195 | { |
196 | if (bp->b_maps != &bp->b_map) { | 196 | if (bp->b_maps != &bp->__b_map) { |
197 | kmem_free(bp->b_maps); | 197 | kmem_free(bp->b_maps); |
198 | bp->b_maps = NULL; | 198 | bp->b_maps = NULL; |
199 | } | 199 | } |
@@ -377,8 +377,8 @@ xfs_buf_allocate_memory( | |||
377 | } | 377 | } |
378 | 378 | ||
379 | use_alloc_page: | 379 | use_alloc_page: |
380 | start = BBTOB(bp->b_map.bm_bn) >> PAGE_SHIFT; | 380 | start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; |
381 | end = (BBTOB(bp->b_map.bm_bn + bp->b_length) + PAGE_SIZE - 1) | 381 | end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) |
382 | >> PAGE_SHIFT; | 382 | >> PAGE_SHIFT; |
383 | page_count = end - start; | 383 | page_count = end - start; |
384 | error = _xfs_buf_get_pages(bp, page_count, flags); | 384 | error = _xfs_buf_get_pages(bp, page_count, flags); |
@@ -569,7 +569,9 @@ found: | |||
569 | */ | 569 | */ |
570 | if (bp->b_flags & XBF_STALE) { | 570 | if (bp->b_flags & XBF_STALE) { |
571 | ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); | 571 | ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); |
572 | ASSERT(bp->b_iodone == NULL); | ||
572 | bp->b_flags &= _XBF_KMEM | _XBF_PAGES; | 573 | bp->b_flags &= _XBF_KMEM | _XBF_PAGES; |
574 | bp->b_ops = NULL; | ||
573 | } | 575 | } |
574 | 576 | ||
575 | trace_xfs_buf_find(bp, flags, _RET_IP_); | 577 | trace_xfs_buf_find(bp, flags, _RET_IP_); |
@@ -638,7 +640,7 @@ _xfs_buf_read( | |||
638 | xfs_buf_flags_t flags) | 640 | xfs_buf_flags_t flags) |
639 | { | 641 | { |
640 | ASSERT(!(flags & XBF_WRITE)); | 642 | ASSERT(!(flags & XBF_WRITE)); |
641 | ASSERT(bp->b_map.bm_bn != XFS_BUF_DADDR_NULL); | 643 | ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); |
642 | 644 | ||
643 | bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); | 645 | bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); |
644 | bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); | 646 | bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); |
@@ -654,7 +656,8 @@ xfs_buf_read_map( | |||
654 | struct xfs_buftarg *target, | 656 | struct xfs_buftarg *target, |
655 | struct xfs_buf_map *map, | 657 | struct xfs_buf_map *map, |
656 | int nmaps, | 658 | int nmaps, |
657 | xfs_buf_flags_t flags) | 659 | xfs_buf_flags_t flags, |
660 | const struct xfs_buf_ops *ops) | ||
658 | { | 661 | { |
659 | struct xfs_buf *bp; | 662 | struct xfs_buf *bp; |
660 | 663 | ||
@@ -666,6 +669,7 @@ xfs_buf_read_map( | |||
666 | 669 | ||
667 | if (!XFS_BUF_ISDONE(bp)) { | 670 | if (!XFS_BUF_ISDONE(bp)) { |
668 | XFS_STATS_INC(xb_get_read); | 671 | XFS_STATS_INC(xb_get_read); |
672 | bp->b_ops = ops; | ||
669 | _xfs_buf_read(bp, flags); | 673 | _xfs_buf_read(bp, flags); |
670 | } else if (flags & XBF_ASYNC) { | 674 | } else if (flags & XBF_ASYNC) { |
671 | /* | 675 | /* |
@@ -691,13 +695,14 @@ void | |||
691 | xfs_buf_readahead_map( | 695 | xfs_buf_readahead_map( |
692 | struct xfs_buftarg *target, | 696 | struct xfs_buftarg *target, |
693 | struct xfs_buf_map *map, | 697 | struct xfs_buf_map *map, |
694 | int nmaps) | 698 | int nmaps, |
699 | const struct xfs_buf_ops *ops) | ||
695 | { | 700 | { |
696 | if (bdi_read_congested(target->bt_bdi)) | 701 | if (bdi_read_congested(target->bt_bdi)) |
697 | return; | 702 | return; |
698 | 703 | ||
699 | xfs_buf_read_map(target, map, nmaps, | 704 | xfs_buf_read_map(target, map, nmaps, |
700 | XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD); | 705 | XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops); |
701 | } | 706 | } |
702 | 707 | ||
703 | /* | 708 | /* |
@@ -709,10 +714,10 @@ xfs_buf_read_uncached( | |||
709 | struct xfs_buftarg *target, | 714 | struct xfs_buftarg *target, |
710 | xfs_daddr_t daddr, | 715 | xfs_daddr_t daddr, |
711 | size_t numblks, | 716 | size_t numblks, |
712 | int flags) | 717 | int flags, |
718 | const struct xfs_buf_ops *ops) | ||
713 | { | 719 | { |
714 | xfs_buf_t *bp; | 720 | struct xfs_buf *bp; |
715 | int error; | ||
716 | 721 | ||
717 | bp = xfs_buf_get_uncached(target, numblks, flags); | 722 | bp = xfs_buf_get_uncached(target, numblks, flags); |
718 | if (!bp) | 723 | if (!bp) |
@@ -723,13 +728,10 @@ xfs_buf_read_uncached( | |||
723 | bp->b_bn = daddr; | 728 | bp->b_bn = daddr; |
724 | bp->b_maps[0].bm_bn = daddr; | 729 | bp->b_maps[0].bm_bn = daddr; |
725 | bp->b_flags |= XBF_READ; | 730 | bp->b_flags |= XBF_READ; |
731 | bp->b_ops = ops; | ||
726 | 732 | ||
727 | xfsbdstrat(target->bt_mount, bp); | 733 | xfsbdstrat(target->bt_mount, bp); |
728 | error = xfs_buf_iowait(bp); | 734 | xfs_buf_iowait(bp); |
729 | if (error) { | ||
730 | xfs_buf_relse(bp); | ||
731 | return NULL; | ||
732 | } | ||
733 | return bp; | 735 | return bp; |
734 | } | 736 | } |
735 | 737 | ||
@@ -999,27 +1001,37 @@ STATIC void | |||
999 | xfs_buf_iodone_work( | 1001 | xfs_buf_iodone_work( |
1000 | struct work_struct *work) | 1002 | struct work_struct *work) |
1001 | { | 1003 | { |
1002 | xfs_buf_t *bp = | 1004 | struct xfs_buf *bp = |
1003 | container_of(work, xfs_buf_t, b_iodone_work); | 1005 | container_of(work, xfs_buf_t, b_iodone_work); |
1006 | bool read = !!(bp->b_flags & XBF_READ); | ||
1007 | |||
1008 | bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); | ||
1009 | if (read && bp->b_ops) | ||
1010 | bp->b_ops->verify_read(bp); | ||
1004 | 1011 | ||
1005 | if (bp->b_iodone) | 1012 | if (bp->b_iodone) |
1006 | (*(bp->b_iodone))(bp); | 1013 | (*(bp->b_iodone))(bp); |
1007 | else if (bp->b_flags & XBF_ASYNC) | 1014 | else if (bp->b_flags & XBF_ASYNC) |
1008 | xfs_buf_relse(bp); | 1015 | xfs_buf_relse(bp); |
1016 | else { | ||
1017 | ASSERT(read && bp->b_ops); | ||
1018 | complete(&bp->b_iowait); | ||
1019 | } | ||
1009 | } | 1020 | } |
1010 | 1021 | ||
1011 | void | 1022 | void |
1012 | xfs_buf_ioend( | 1023 | xfs_buf_ioend( |
1013 | xfs_buf_t *bp, | 1024 | struct xfs_buf *bp, |
1014 | int schedule) | 1025 | int schedule) |
1015 | { | 1026 | { |
1027 | bool read = !!(bp->b_flags & XBF_READ); | ||
1028 | |||
1016 | trace_xfs_buf_iodone(bp, _RET_IP_); | 1029 | trace_xfs_buf_iodone(bp, _RET_IP_); |
1017 | 1030 | ||
1018 | bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); | ||
1019 | if (bp->b_error == 0) | 1031 | if (bp->b_error == 0) |
1020 | bp->b_flags |= XBF_DONE; | 1032 | bp->b_flags |= XBF_DONE; |
1021 | 1033 | ||
1022 | if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { | 1034 | if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) { |
1023 | if (schedule) { | 1035 | if (schedule) { |
1024 | INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); | 1036 | INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); |
1025 | queue_work(xfslogd_workqueue, &bp->b_iodone_work); | 1037 | queue_work(xfslogd_workqueue, &bp->b_iodone_work); |
@@ -1027,6 +1039,7 @@ xfs_buf_ioend( | |||
1027 | xfs_buf_iodone_work(&bp->b_iodone_work); | 1039 | xfs_buf_iodone_work(&bp->b_iodone_work); |
1028 | } | 1040 | } |
1029 | } else { | 1041 | } else { |
1042 | bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); | ||
1030 | complete(&bp->b_iowait); | 1043 | complete(&bp->b_iowait); |
1031 | } | 1044 | } |
1032 | } | 1045 | } |
@@ -1197,9 +1210,14 @@ xfs_buf_bio_end_io( | |||
1197 | { | 1210 | { |
1198 | xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; | 1211 | xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; |
1199 | 1212 | ||
1200 | xfs_buf_ioerror(bp, -error); | 1213 | /* |
1214 | * don't overwrite existing errors - otherwise we can lose errors on | ||
1215 | * buffers that require multiple bios to complete. | ||
1216 | */ | ||
1217 | if (!bp->b_error) | ||
1218 | xfs_buf_ioerror(bp, -error); | ||
1201 | 1219 | ||
1202 | if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) | 1220 | if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) |
1203 | invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); | 1221 | invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); |
1204 | 1222 | ||
1205 | _xfs_buf_ioend(bp, 1); | 1223 | _xfs_buf_ioend(bp, 1); |
@@ -1279,6 +1297,11 @@ next_chunk: | |||
1279 | if (size) | 1297 | if (size) |
1280 | goto next_chunk; | 1298 | goto next_chunk; |
1281 | } else { | 1299 | } else { |
1300 | /* | ||
1301 | * This is guaranteed not to be the last io reference count | ||
1302 | * because the caller (xfs_buf_iorequest) holds a count itself. | ||
1303 | */ | ||
1304 | atomic_dec(&bp->b_io_remaining); | ||
1282 | xfs_buf_ioerror(bp, EIO); | 1305 | xfs_buf_ioerror(bp, EIO); |
1283 | bio_put(bio); | 1306 | bio_put(bio); |
1284 | } | 1307 | } |
@@ -1304,6 +1327,20 @@ _xfs_buf_ioapply( | |||
1304 | rw |= REQ_FUA; | 1327 | rw |= REQ_FUA; |
1305 | if (bp->b_flags & XBF_FLUSH) | 1328 | if (bp->b_flags & XBF_FLUSH) |
1306 | rw |= REQ_FLUSH; | 1329 | rw |= REQ_FLUSH; |
1330 | |||
1331 | /* | ||
1332 | * Run the write verifier callback function if it exists. If | ||
1333 | * this function fails it will mark the buffer with an error and | ||
1334 | * the IO should not be dispatched. | ||
1335 | */ | ||
1336 | if (bp->b_ops) { | ||
1337 | bp->b_ops->verify_write(bp); | ||
1338 | if (bp->b_error) { | ||
1339 | xfs_force_shutdown(bp->b_target->bt_mount, | ||
1340 | SHUTDOWN_CORRUPT_INCORE); | ||
1341 | return; | ||
1342 | } | ||
1343 | } | ||
1307 | } else if (bp->b_flags & XBF_READ_AHEAD) { | 1344 | } else if (bp->b_flags & XBF_READ_AHEAD) { |
1308 | rw = READA; | 1345 | rw = READA; |
1309 | } else { | 1346 | } else { |
@@ -1672,7 +1709,7 @@ xfs_buf_cmp( | |||
1672 | struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); | 1709 | struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); |
1673 | xfs_daddr_t diff; | 1710 | xfs_daddr_t diff; |
1674 | 1711 | ||
1675 | diff = ap->b_map.bm_bn - bp->b_map.bm_bn; | 1712 | diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; |
1676 | if (diff < 0) | 1713 | if (diff < 0) |
1677 | return -1; | 1714 | return -1; |
1678 | if (diff > 0) | 1715 | if (diff > 0) |