aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/disk-io.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-07-03 13:34:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-03 13:34:51 -0400
commitc6b1e36c8fa04a6680c44fe0321d0370400e90b6 (patch)
tree5110f0639bfa803baa8d213cb21efe37beeaf742 /fs/btrfs/disk-io.c
parent81e3e044897b0875a52953b3fb6241a33428e4f9 (diff)
parenta84ebb837b419787c2ece74efa566c998929cead (diff)
Merge branch 'for-4.13/block' of git://git.kernel.dk/linux-block
Pull core block/IO updates from Jens Axboe: "This is the main pull request for the block layer for 4.13. Not a huge round in terms of features, but there's a lot of churn related to some core cleanups. Note this depends on the UUID tree pull request, that Christoph already sent out. This pull request contains: - A series from Christoph, unifying the error/stats codes in the block layer. We now use blk_status_t everywhere, instead of using different schemes for different places. - Also from Christoph, some cleanups around request allocation and IO scheduler interactions in blk-mq. - And yet another series from Christoph, cleaning up how we handle and do bounce buffering in the block layer. - A blk-mq debugfs series from Bart, further improving on the support we have for exporting internal information to aid debugging IO hangs or stalls. - Also from Bart, a series that cleans up the request initialization differences across types of devices. - A series from Goldwyn Rodrigues, allowing the block layer to return failure if we will block and the user asked for non-blocking. - Patch from Hannes for supporting setting loop devices block size to that of the underlying device. - Two series of patches from Javier, fixing various issues with lightnvm, particular around pblk. - A series from me, adding support for write hints. This comes with NVMe support as well, so applications can help guide data placement on flash to improve performance, latencies, and write amplification. - A series from Ming, improving and hardening blk-mq support for stopping/starting and quiescing hardware queues. - Two pull requests for NVMe updates. Nothing major on the feature side, but lots of cleanups and bug fixes. From the usual crew. - A series from Neil Brown, greatly improving the bio rescue set support. Most notably, this kills the bio rescue work queues, if we don't really need them. - Lots of other little bug fixes that are all over the place" * 'for-4.13/block' of git://git.kernel.dk/linux-block: (217 commits) lightnvm: pblk: set line bitmap check under debug lightnvm: pblk: verify that cache read is still valid lightnvm: pblk: add initialization check lightnvm: pblk: remove target using async. I/Os lightnvm: pblk: use vmalloc for GC data buffer lightnvm: pblk: use right metadata buffer for recovery lightnvm: pblk: schedule if data is not ready lightnvm: pblk: remove unused return variable lightnvm: pblk: fix double-free on pblk init lightnvm: pblk: fix bad le64 assignations nvme: Makefile: remove dead build rule blk-mq: map all HWQ also in hyperthreaded system nvmet-rdma: register ib_client to not deadlock in device removal nvme_fc: fix error recovery on link down. nvmet_fc: fix crashes on bad opcodes nvme_fc: Fix crash when nvme controller connection fails. nvme_fc: replace ioabort msleep loop with completion nvme_fc: fix double calls to nvme_cleanup_cmd() nvme-fabrics: verify that a controller returns the correct NQN nvme: simplify nvme_dev_attrs_are_visible ...
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r--fs/btrfs/disk-io.c75
1 files changed, 37 insertions, 38 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5f678dcb20e6..6036d15b47b8 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -87,7 +87,7 @@ struct btrfs_end_io_wq {
87 bio_end_io_t *end_io; 87 bio_end_io_t *end_io;
88 void *private; 88 void *private;
89 struct btrfs_fs_info *info; 89 struct btrfs_fs_info *info;
90 int error; 90 blk_status_t status;
91 enum btrfs_wq_endio_type metadata; 91 enum btrfs_wq_endio_type metadata;
92 struct list_head list; 92 struct list_head list;
93 struct btrfs_work work; 93 struct btrfs_work work;
@@ -131,7 +131,7 @@ struct async_submit_bio {
131 */ 131 */
132 u64 bio_offset; 132 u64 bio_offset;
133 struct btrfs_work work; 133 struct btrfs_work work;
134 int error; 134 blk_status_t status;
135}; 135};
136 136
137/* 137/*
@@ -799,7 +799,7 @@ static void end_workqueue_bio(struct bio *bio)
799 btrfs_work_func_t func; 799 btrfs_work_func_t func;
800 800
801 fs_info = end_io_wq->info; 801 fs_info = end_io_wq->info;
802 end_io_wq->error = bio->bi_error; 802 end_io_wq->status = bio->bi_status;
803 803
804 if (bio_op(bio) == REQ_OP_WRITE) { 804 if (bio_op(bio) == REQ_OP_WRITE) {
805 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { 805 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
@@ -836,19 +836,19 @@ static void end_workqueue_bio(struct bio *bio)
836 btrfs_queue_work(wq, &end_io_wq->work); 836 btrfs_queue_work(wq, &end_io_wq->work);
837} 837}
838 838
839int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, 839blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
840 enum btrfs_wq_endio_type metadata) 840 enum btrfs_wq_endio_type metadata)
841{ 841{
842 struct btrfs_end_io_wq *end_io_wq; 842 struct btrfs_end_io_wq *end_io_wq;
843 843
844 end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS); 844 end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
845 if (!end_io_wq) 845 if (!end_io_wq)
846 return -ENOMEM; 846 return BLK_STS_RESOURCE;
847 847
848 end_io_wq->private = bio->bi_private; 848 end_io_wq->private = bio->bi_private;
849 end_io_wq->end_io = bio->bi_end_io; 849 end_io_wq->end_io = bio->bi_end_io;
850 end_io_wq->info = info; 850 end_io_wq->info = info;
851 end_io_wq->error = 0; 851 end_io_wq->status = 0;
852 end_io_wq->bio = bio; 852 end_io_wq->bio = bio;
853 end_io_wq->metadata = metadata; 853 end_io_wq->metadata = metadata;
854 854
@@ -868,14 +868,14 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
868static void run_one_async_start(struct btrfs_work *work) 868static void run_one_async_start(struct btrfs_work *work)
869{ 869{
870 struct async_submit_bio *async; 870 struct async_submit_bio *async;
871 int ret; 871 blk_status_t ret;
872 872
873 async = container_of(work, struct async_submit_bio, work); 873 async = container_of(work, struct async_submit_bio, work);
874 ret = async->submit_bio_start(async->inode, async->bio, 874 ret = async->submit_bio_start(async->inode, async->bio,
875 async->mirror_num, async->bio_flags, 875 async->mirror_num, async->bio_flags,
876 async->bio_offset); 876 async->bio_offset);
877 if (ret) 877 if (ret)
878 async->error = ret; 878 async->status = ret;
879} 879}
880 880
881static void run_one_async_done(struct btrfs_work *work) 881static void run_one_async_done(struct btrfs_work *work)
@@ -898,8 +898,8 @@ static void run_one_async_done(struct btrfs_work *work)
898 wake_up(&fs_info->async_submit_wait); 898 wake_up(&fs_info->async_submit_wait);
899 899
900 /* If an error occurred we just want to clean up the bio and move on */ 900 /* If an error occurred we just want to clean up the bio and move on */
901 if (async->error) { 901 if (async->status) {
902 async->bio->bi_error = async->error; 902 async->bio->bi_status = async->status;
903 bio_endio(async->bio); 903 bio_endio(async->bio);
904 return; 904 return;
905 } 905 }
@@ -916,18 +916,17 @@ static void run_one_async_free(struct btrfs_work *work)
916 kfree(async); 916 kfree(async);
917} 917}
918 918
919int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, 919blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info,
920 struct bio *bio, int mirror_num, 920 struct inode *inode, struct bio *bio, int mirror_num,
921 unsigned long bio_flags, 921 unsigned long bio_flags, u64 bio_offset,
922 u64 bio_offset, 922 extent_submit_bio_hook_t *submit_bio_start,
923 extent_submit_bio_hook_t *submit_bio_start, 923 extent_submit_bio_hook_t *submit_bio_done)
924 extent_submit_bio_hook_t *submit_bio_done)
925{ 924{
926 struct async_submit_bio *async; 925 struct async_submit_bio *async;
927 926
928 async = kmalloc(sizeof(*async), GFP_NOFS); 927 async = kmalloc(sizeof(*async), GFP_NOFS);
929 if (!async) 928 if (!async)
930 return -ENOMEM; 929 return BLK_STS_RESOURCE;
931 930
932 async->inode = inode; 931 async->inode = inode;
933 async->bio = bio; 932 async->bio = bio;
@@ -941,7 +940,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
941 async->bio_flags = bio_flags; 940 async->bio_flags = bio_flags;
942 async->bio_offset = bio_offset; 941 async->bio_offset = bio_offset;
943 942
944 async->error = 0; 943 async->status = 0;
945 944
946 atomic_inc(&fs_info->nr_async_submits); 945 atomic_inc(&fs_info->nr_async_submits);
947 946
@@ -959,7 +958,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
959 return 0; 958 return 0;
960} 959}
961 960
962static int btree_csum_one_bio(struct bio *bio) 961static blk_status_t btree_csum_one_bio(struct bio *bio)
963{ 962{
964 struct bio_vec *bvec; 963 struct bio_vec *bvec;
965 struct btrfs_root *root; 964 struct btrfs_root *root;
@@ -972,12 +971,12 @@ static int btree_csum_one_bio(struct bio *bio)
972 break; 971 break;
973 } 972 }
974 973
975 return ret; 974 return errno_to_blk_status(ret);
976} 975}
977 976
978static int __btree_submit_bio_start(struct inode *inode, struct bio *bio, 977static blk_status_t __btree_submit_bio_start(struct inode *inode,
979 int mirror_num, unsigned long bio_flags, 978 struct bio *bio, int mirror_num, unsigned long bio_flags,
980 u64 bio_offset) 979 u64 bio_offset)
981{ 980{
982 /* 981 /*
983 * when we're called for a write, we're already in the async 982 * when we're called for a write, we're already in the async
@@ -986,11 +985,11 @@ static int __btree_submit_bio_start(struct inode *inode, struct bio *bio,
986 return btree_csum_one_bio(bio); 985 return btree_csum_one_bio(bio);
987} 986}
988 987
989static int __btree_submit_bio_done(struct inode *inode, struct bio *bio, 988static blk_status_t __btree_submit_bio_done(struct inode *inode,
990 int mirror_num, unsigned long bio_flags, 989 struct bio *bio, int mirror_num, unsigned long bio_flags,
991 u64 bio_offset) 990 u64 bio_offset)
992{ 991{
993 int ret; 992 blk_status_t ret;
994 993
995 /* 994 /*
996 * when we're called for a write, we're already in the async 995 * when we're called for a write, we're already in the async
@@ -998,7 +997,7 @@ static int __btree_submit_bio_done(struct inode *inode, struct bio *bio,
998 */ 997 */
999 ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1); 998 ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
1000 if (ret) { 999 if (ret) {
1001 bio->bi_error = ret; 1000 bio->bi_status = ret;
1002 bio_endio(bio); 1001 bio_endio(bio);
1003 } 1002 }
1004 return ret; 1003 return ret;
@@ -1015,13 +1014,13 @@ static int check_async_write(unsigned long bio_flags)
1015 return 1; 1014 return 1;
1016} 1015}
1017 1016
1018static int btree_submit_bio_hook(struct inode *inode, struct bio *bio, 1017static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio,
1019 int mirror_num, unsigned long bio_flags, 1018 int mirror_num, unsigned long bio_flags,
1020 u64 bio_offset) 1019 u64 bio_offset)
1021{ 1020{
1022 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1021 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1023 int async = check_async_write(bio_flags); 1022 int async = check_async_write(bio_flags);
1024 int ret; 1023 blk_status_t ret;
1025 1024
1026 if (bio_op(bio) != REQ_OP_WRITE) { 1025 if (bio_op(bio) != REQ_OP_WRITE) {
1027 /* 1026 /*
@@ -1054,7 +1053,7 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
1054 return 0; 1053 return 0;
1055 1054
1056out_w_error: 1055out_w_error:
1057 bio->bi_error = ret; 1056 bio->bi_status = ret;
1058 bio_endio(bio); 1057 bio_endio(bio);
1059 return ret; 1058 return ret;
1060} 1059}
@@ -1820,7 +1819,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
1820 end_io_wq = container_of(work, struct btrfs_end_io_wq, work); 1819 end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1821 bio = end_io_wq->bio; 1820 bio = end_io_wq->bio;
1822 1821
1823 bio->bi_error = end_io_wq->error; 1822 bio->bi_status = end_io_wq->status;
1824 bio->bi_private = end_io_wq->private; 1823 bio->bi_private = end_io_wq->private;
1825 bio->bi_end_io = end_io_wq->end_io; 1824 bio->bi_end_io = end_io_wq->end_io;
1826 kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); 1825 kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
@@ -3497,11 +3496,11 @@ static void btrfs_end_empty_barrier(struct bio *bio)
3497 * any device where the flush fails with eopnotsupp are flagged as not-barrier 3496 * any device where the flush fails with eopnotsupp are flagged as not-barrier
3498 * capable 3497 * capable
3499 */ 3498 */
3500static int write_dev_flush(struct btrfs_device *device, int wait) 3499static blk_status_t write_dev_flush(struct btrfs_device *device, int wait)
3501{ 3500{
3502 struct request_queue *q = bdev_get_queue(device->bdev); 3501 struct request_queue *q = bdev_get_queue(device->bdev);
3503 struct bio *bio; 3502 struct bio *bio;
3504 int ret = 0; 3503 blk_status_t ret = 0;
3505 3504
3506 if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 3505 if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
3507 return 0; 3506 return 0;
@@ -3513,8 +3512,8 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
3513 3512
3514 wait_for_completion(&device->flush_wait); 3513 wait_for_completion(&device->flush_wait);
3515 3514
3516 if (bio->bi_error) { 3515 if (bio->bi_status) {
3517 ret = bio->bi_error; 3516 ret = bio->bi_status;
3518 btrfs_dev_stat_inc_and_print(device, 3517 btrfs_dev_stat_inc_and_print(device,
3519 BTRFS_DEV_STAT_FLUSH_ERRS); 3518 BTRFS_DEV_STAT_FLUSH_ERRS);
3520 } 3519 }
@@ -3533,7 +3532,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
3533 device->flush_bio = NULL; 3532 device->flush_bio = NULL;
3534 bio = btrfs_io_bio_alloc(GFP_NOFS, 0); 3533 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
3535 if (!bio) 3534 if (!bio)
3536 return -ENOMEM; 3535 return BLK_STS_RESOURCE;
3537 3536
3538 bio->bi_end_io = btrfs_end_empty_barrier; 3537 bio->bi_end_io = btrfs_end_empty_barrier;
3539 bio->bi_bdev = device->bdev; 3538 bio->bi_bdev = device->bdev;
@@ -3558,7 +3557,7 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
3558 struct btrfs_device *dev; 3557 struct btrfs_device *dev;
3559 int errors_send = 0; 3558 int errors_send = 0;
3560 int errors_wait = 0; 3559 int errors_wait = 0;
3561 int ret; 3560 blk_status_t ret;
3562 3561
3563 /* send down all the barriers */ 3562 /* send down all the barriers */
3564 head = &info->fs_devices->devices; 3563 head = &info->fs_devices->devices;