diff options
author | Josh Durgin <josh.durgin@inktank.com> | 2013-08-26 20:55:38 -0400 |
---|---|---|
committer | Sage Weil <sage@inktank.com> | 2013-09-04 01:06:10 -0400 |
commit | 17c1cc1d9293a568a00545469078e29555cc7f39 (patch) | |
tree | f20e923740a3b20fb449bff813ca7438ce00b1e5 /drivers/block | |
parent | 7d6e1f5461d0c16eb6aa8d226976995856d85e4e (diff) |
rbd: fix I/O error propagation for reads
When a request returns an error, the driver needs to report the entire
extent of the request as completed. Writes already did this, since
they always set xferred = length, but reads were skipping that step if
an error other than -ENOENT occurred. Instead, rbd would end up
passing 0 xferred to blk_end_request(), which would always report
needing more data. This resulted in an assert failing when more data
was required by the block layer, but all the object requests were
done:
[ 1868.719077] rbd: obj_request read result -108 xferred 0
[ 1868.719077]
[ 1868.719518] end_request: I/O error, dev rbd1, sector 0
[ 1868.719739]
[ 1868.719739] Assertion failure in rbd_img_obj_callback() at line 1736:
[ 1868.719739]
[ 1868.719739] rbd_assert(more ^ (which == img_request->obj_request_count));
Without this assert, reads that hit errors would hang forever, since
the block layer considered them incomplete.
Fixes: http://tracker.ceph.com/issues/5647
CC: stable@vger.kernel.org # v3.10
Signed-off-by: Josh Durgin <josh.durgin@inktank.com>
Reviewed-by: Alex Elder <alex.elder@linaro.org>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/rbd.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 0d669ae80d61..f8fd7d3c13ba 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -1557,11 +1557,12 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request) | |||
1557 | obj_request, obj_request->img_request, obj_request->result, | 1557 | obj_request, obj_request->img_request, obj_request->result, |
1558 | xferred, length); | 1558 | xferred, length); |
1559 | /* | 1559 | /* |
1560 | * ENOENT means a hole in the image. We zero-fill the | 1560 | * ENOENT means a hole in the image. We zero-fill the entire |
1561 | * entire length of the request. A short read also implies | 1561 | * length of the request. A short read also implies zero-fill |
1562 | * zero-fill to the end of the request. Either way we | 1562 | * to the end of the request. An error requires the whole |
1563 | * update the xferred count to indicate the whole request | 1563 | * length of the request to be reported finished with an error |
1564 | * was satisfied. | 1564 | * to the block layer. In each case we update the xferred |
1565 | * count to indicate the whole request was satisfied. | ||
1565 | */ | 1566 | */ |
1566 | rbd_assert(obj_request->type != OBJ_REQUEST_NODATA); | 1567 | rbd_assert(obj_request->type != OBJ_REQUEST_NODATA); |
1567 | if (obj_request->result == -ENOENT) { | 1568 | if (obj_request->result == -ENOENT) { |
@@ -1570,14 +1571,13 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request) | |||
1570 | else | 1571 | else |
1571 | zero_pages(obj_request->pages, 0, length); | 1572 | zero_pages(obj_request->pages, 0, length); |
1572 | obj_request->result = 0; | 1573 | obj_request->result = 0; |
1573 | obj_request->xferred = length; | ||
1574 | } else if (xferred < length && !obj_request->result) { | 1574 | } else if (xferred < length && !obj_request->result) { |
1575 | if (obj_request->type == OBJ_REQUEST_BIO) | 1575 | if (obj_request->type == OBJ_REQUEST_BIO) |
1576 | zero_bio_chain(obj_request->bio_list, xferred); | 1576 | zero_bio_chain(obj_request->bio_list, xferred); |
1577 | else | 1577 | else |
1578 | zero_pages(obj_request->pages, xferred, length); | 1578 | zero_pages(obj_request->pages, xferred, length); |
1579 | obj_request->xferred = length; | ||
1580 | } | 1579 | } |
1580 | obj_request->xferred = length; | ||
1581 | obj_request_done_set(obj_request); | 1581 | obj_request_done_set(obj_request); |
1582 | } | 1582 | } |
1583 | 1583 | ||