aboutsummaryrefslogtreecommitdiffstats
path: root/block/bsg.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2007-07-16 02:52:15 -0400
committerJens Axboe <jens.axboe@oracle.com>2007-07-16 02:52:47 -0400
commit2c9ecdf40af0554ee9a2b1cbbbbdbc77f90a40e1 (patch)
tree6cbf0efc9c2da5a4efdab62f78db968eb3555ef0 /block/bsg.c
parentabae1fde63fcdd2a3abaa0d7930938d8326f83d2 (diff)
bsg: add bidi support
bsg uses the rq->next_rq pointer for a bidi request. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/bsg.c')
-rw-r--r--block/bsg.c74
1 files changed, 49 insertions, 25 deletions
diff --git a/block/bsg.c b/block/bsg.c
index 5f4abc902a00..13ecc951a4c0 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -95,6 +95,7 @@ struct bsg_command {
95 struct list_head list; 95 struct list_head list;
96 struct request *rq; 96 struct request *rq;
97 struct bio *bio; 97 struct bio *bio;
98 struct bio *bidi_bio;
98 int err; 99 int err;
99 struct sg_io_v4 hdr; 100 struct sg_io_v4 hdr;
100 struct sg_io_v4 __user *uhdr; 101 struct sg_io_v4 __user *uhdr;
@@ -243,16 +244,6 @@ bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw)
243 if (hdr->protocol || hdr->subprotocol) 244 if (hdr->protocol || hdr->subprotocol)
244 return -EINVAL; 245 return -EINVAL;
245 246
246 /*
247 * looks sane, if no data then it should be fine from our POV
248 */
249 if (!hdr->dout_xfer_len && !hdr->din_xfer_len)
250 return 0;
251
252 /* not supported currently */
253 if (hdr->dout_xfer_len && hdr->din_xfer_len)
254 return -EINVAL;
255
256 *rw = hdr->dout_xfer_len ? WRITE : READ; 247 *rw = hdr->dout_xfer_len ? WRITE : READ;
257 248
258 return 0; 249 return 0;
@@ -265,7 +256,7 @@ static struct request *
265bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr) 256bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
266{ 257{
267 request_queue_t *q = bd->queue; 258 request_queue_t *q = bd->queue;
268 struct request *rq; 259 struct request *rq, *next_rq = NULL;
269 int ret, rw = 0; /* shut up gcc */ 260 int ret, rw = 0; /* shut up gcc */
270 unsigned int dxfer_len; 261 unsigned int dxfer_len;
271 void *dxferp = NULL; 262 void *dxferp = NULL;
@@ -282,11 +273,30 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
282 * map scatter-gather elements seperately and string them to request 273 * map scatter-gather elements seperately and string them to request
283 */ 274 */
284 rq = blk_get_request(q, rw, GFP_KERNEL); 275 rq = blk_get_request(q, rw, GFP_KERNEL);
276 if (!rq)
277 return ERR_PTR(-ENOMEM);
285 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM, 278 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM,
286 &bd->flags)); 279 &bd->flags));
287 if (ret) { 280 if (ret)
288 blk_put_request(rq); 281 goto out;
289 return ERR_PTR(ret); 282
283 if (rw == WRITE && hdr->din_xfer_len) {
284 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
285 ret = -EOPNOTSUPP;
286 goto out;
287 }
288
289 next_rq = blk_get_request(q, READ, GFP_KERNEL);
290 if (!next_rq) {
291 ret = -ENOMEM;
292 goto out;
293 }
294 rq->next_rq = next_rq;
295
296 dxferp = (void*)(unsigned long)hdr->din_xferp;
297 ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len);
298 if (ret)
299 goto out;
290 } 300 }
291 301
292 if (hdr->dout_xfer_len) { 302 if (hdr->dout_xfer_len) {
@@ -300,14 +310,17 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
300 310
301 if (dxfer_len) { 311 if (dxfer_len) {
302 ret = blk_rq_map_user(q, rq, dxferp, dxfer_len); 312 ret = blk_rq_map_user(q, rq, dxferp, dxfer_len);
303 if (ret) { 313 if (ret)
304 dprintk("failed map at %d\n", ret); 314 goto out;
305 blk_put_request(rq);
306 rq = ERR_PTR(ret);
307 }
308 } 315 }
309
310 return rq; 316 return rq;
317out:
318 blk_put_request(rq);
319 if (next_rq) {
320 blk_rq_unmap_user(next_rq->bio);
321 blk_put_request(next_rq);
322 }
323 return ERR_PTR(ret);
311} 324}
312 325
313/* 326/*
@@ -346,6 +359,8 @@ static void bsg_add_command(struct bsg_device *bd, request_queue_t *q,
346 */ 359 */
347 bc->rq = rq; 360 bc->rq = rq;
348 bc->bio = rq->bio; 361 bc->bio = rq->bio;
362 if (rq->next_rq)
363 bc->bidi_bio = rq->next_rq->bio;
349 bc->hdr.duration = jiffies; 364 bc->hdr.duration = jiffies;
350 spin_lock_irq(&bd->lock); 365 spin_lock_irq(&bd->lock);
351 list_add_tail(&bc->list, &bd->busy_list); 366 list_add_tail(&bc->list, &bd->busy_list);
@@ -402,7 +417,7 @@ static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
402} 417}
403 418
404static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, 419static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
405 struct bio *bio) 420 struct bio *bio, struct bio *bidi_bio)
406{ 421{
407 int ret = 0; 422 int ret = 0;
408 423
@@ -431,6 +446,11 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
431 ret = -EFAULT; 446 ret = -EFAULT;
432 } 447 }
433 448
449 if (rq->next_rq) {
450 blk_rq_unmap_user(bidi_bio);
451 blk_put_request(rq->next_rq);
452 }
453
434 blk_rq_unmap_user(bio); 454 blk_rq_unmap_user(bio);
435 blk_put_request(rq); 455 blk_put_request(rq);
436 456
@@ -477,7 +497,8 @@ static int bsg_complete_all_commands(struct bsg_device *bd)
477 if (IS_ERR(bc)) 497 if (IS_ERR(bc))
478 break; 498 break;
479 499
480 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio); 500 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
501 bc->bidi_bio);
481 if (!ret) 502 if (!ret)
482 ret = tret; 503 ret = tret;
483 504
@@ -511,7 +532,8 @@ __bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
511 * after completing the request. so do that here, 532 * after completing the request. so do that here,
512 * bsg_complete_work() cannot do that for us 533 * bsg_complete_work() cannot do that for us
513 */ 534 */
514 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio); 535 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
536 bc->bidi_bio);
515 537
516 if (copy_to_user(buf, (char *) &bc->hdr, sizeof(bc->hdr))) 538 if (copy_to_user(buf, (char *) &bc->hdr, sizeof(bc->hdr)))
517 ret = -EFAULT; 539 ret = -EFAULT;
@@ -868,7 +890,7 @@ bsg_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
868 } 890 }
869 case SG_IO: { 891 case SG_IO: {
870 struct request *rq; 892 struct request *rq;
871 struct bio *bio; 893 struct bio *bio, *bidi_bio = NULL;
872 struct sg_io_v4 hdr; 894 struct sg_io_v4 hdr;
873 895
874 if (copy_from_user(&hdr, uarg, sizeof(hdr))) 896 if (copy_from_user(&hdr, uarg, sizeof(hdr)))
@@ -879,8 +901,10 @@ bsg_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
879 return PTR_ERR(rq); 901 return PTR_ERR(rq);
880 902
881 bio = rq->bio; 903 bio = rq->bio;
904 if (rq->next_rq)
905 bidi_bio = rq->next_rq->bio;
882 blk_execute_rq(bd->queue, NULL, rq, 0); 906 blk_execute_rq(bd->queue, NULL, rq, 0);
883 blk_complete_sgv4_hdr_rq(rq, &hdr, bio); 907 blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
884 908
885 if (copy_to_user(uarg, &hdr, sizeof(hdr))) 909 if (copy_to_user(uarg, &hdr, sizeof(hdr)))
886 return -EFAULT; 910 return -EFAULT;