aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/block/rbd.c47
1 files changed, 20 insertions, 27 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 4f5a647dbfd2..22085e86a409 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1274,42 +1274,30 @@ static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1274 complete_all(&obj_request->completion); 1274 complete_all(&obj_request->completion);
1275} 1275}
1276 1276
1277static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request, 1277static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1278 struct ceph_osd_op *op)
1279{ 1278{
1280 dout("%s: obj %p\n", __func__, obj_request); 1279 dout("%s: obj %p\n", __func__, obj_request);
1281 obj_request_done_set(obj_request); 1280 obj_request_done_set(obj_request);
1282} 1281}
1283 1282
1284static void rbd_osd_read_callback(struct rbd_obj_request *obj_request, 1283static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1285 struct ceph_osd_op *op)
1286{ 1284{
1287 u64 xferred;
1288 1285
1289 /*
1290 * We support a 64-bit length, but ultimately it has to be
1291 * passed to blk_end_request(), which takes an unsigned int.
1292 */
1293 xferred = le64_to_cpu(op->extent.length);
1294 rbd_assert(xferred < (u64) UINT_MAX);
1295 dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request, 1286 dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request,
1296 obj_request->result, xferred, obj_request->length); 1287 obj_request->result, obj_request->xferred, obj_request->length);
1297 if (obj_request->result == (s32) -ENOENT) { 1288 if (obj_request->result == (s32) -ENOENT) {
1298 zero_bio_chain(obj_request->bio_list, 0); 1289 zero_bio_chain(obj_request->bio_list, 0);
1299 obj_request->result = 0; 1290 obj_request->result = 0;
1300 } else if (xferred < obj_request->length && !obj_request->result) { 1291 } else if (obj_request->xferred < obj_request->length &&
1301 zero_bio_chain(obj_request->bio_list, xferred); 1292 !obj_request->result) {
1302 xferred = obj_request->length; 1293 zero_bio_chain(obj_request->bio_list, obj_request->xferred);
1294 obj_request->xferred = obj_request->length;
1303 } 1295 }
1304 obj_request->xferred = xferred;
1305 obj_request_done_set(obj_request); 1296 obj_request_done_set(obj_request);
1306} 1297}
1307 1298
1308static void rbd_osd_write_callback(struct rbd_obj_request *obj_request, 1299static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1309 struct ceph_osd_op *op)
1310{ 1300{
1311
1312 obj_request->xferred = le64_to_cpu(op->extent.length);
1313 dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request, 1301 dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request,
1314 obj_request->result, obj_request->xferred, obj_request->length); 1302 obj_request->result, obj_request->xferred, obj_request->length);
1315 1303
@@ -1331,8 +1319,7 @@ static void rbd_osd_write_callback(struct rbd_obj_request *obj_request,
1331 * For a simple stat call there's nothing to do. We'll do more if 1319 * For a simple stat call there's nothing to do. We'll do more if
1332 * this is part of a write sequence for a layered image. 1320 * this is part of a write sequence for a layered image.
1333 */ 1321 */
1334static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request, 1322static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1335 struct ceph_osd_op *op)
1336{ 1323{
1337 dout("%s: obj %p\n", __func__, obj_request); 1324 dout("%s: obj %p\n", __func__, obj_request);
1338 obj_request_done_set(obj_request); 1325 obj_request_done_set(obj_request);
@@ -1352,7 +1339,6 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1352 rbd_assert(!!obj_request->img_request ^ 1339 rbd_assert(!!obj_request->img_request ^
1353 (obj_request->which == BAD_WHICH)); 1340 (obj_request->which == BAD_WHICH));
1354 1341
1355 obj_request->xferred = le32_to_cpu(msg->hdr.data_len);
1356 reply_head = msg->front.iov_base; 1342 reply_head = msg->front.iov_base;
1357 obj_request->result = (s32) le32_to_cpu(reply_head->result); 1343 obj_request->result = (s32) le32_to_cpu(reply_head->result);
1358 obj_request->version = le64_to_cpu(osd_req->r_reassert_version.version); 1344 obj_request->version = le64_to_cpu(osd_req->r_reassert_version.version);
@@ -1360,22 +1346,29 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1360 num_ops = le32_to_cpu(reply_head->num_ops); 1346 num_ops = le32_to_cpu(reply_head->num_ops);
1361 WARN_ON(num_ops != 1); /* For now */ 1347 WARN_ON(num_ops != 1); /* For now */
1362 1348
1349 /*
1350 * We support a 64-bit length, but ultimately it has to be
1351 * passed to blk_end_request(), which takes an unsigned int.
1352 */
1363 op = &reply_head->ops[0]; 1353 op = &reply_head->ops[0];
1354 obj_request->xferred = le64_to_cpu(op->extent.length);
1355 rbd_assert(obj_request->xferred < (u64) UINT_MAX);
1356
1364 opcode = le16_to_cpu(op->op); 1357 opcode = le16_to_cpu(op->op);
1365 switch (opcode) { 1358 switch (opcode) {
1366 case CEPH_OSD_OP_READ: 1359 case CEPH_OSD_OP_READ:
1367 rbd_osd_read_callback(obj_request, op); 1360 rbd_osd_read_callback(obj_request);
1368 break; 1361 break;
1369 case CEPH_OSD_OP_WRITE: 1362 case CEPH_OSD_OP_WRITE:
1370 rbd_osd_write_callback(obj_request, op); 1363 rbd_osd_write_callback(obj_request);
1371 break; 1364 break;
1372 case CEPH_OSD_OP_STAT: 1365 case CEPH_OSD_OP_STAT:
1373 rbd_osd_stat_callback(obj_request, op); 1366 rbd_osd_stat_callback(obj_request);
1374 break; 1367 break;
1375 case CEPH_OSD_OP_CALL: 1368 case CEPH_OSD_OP_CALL:
1376 case CEPH_OSD_OP_NOTIFY_ACK: 1369 case CEPH_OSD_OP_NOTIFY_ACK:
1377 case CEPH_OSD_OP_WATCH: 1370 case CEPH_OSD_OP_WATCH:
1378 rbd_osd_trivial_callback(obj_request, op); 1371 rbd_osd_trivial_callback(obj_request);
1379 break; 1372 break;
1380 default: 1373 default:
1381 rbd_warn(NULL, "%s: unsupported op %hu\n", 1374 rbd_warn(NULL, "%s: unsupported op %hu\n",