diff options
author | FUJITA Tomonori <tomof@acm.org> | 2006-12-20 05:20:15 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2007-07-16 02:52:44 -0400 |
commit | 70e36eceaf897da11aa0b4d82b46ca66e65a05f1 (patch) | |
tree | c3d54741408168732e1695e42f03e6ecf8c89743 /block/bsg.c | |
parent | 45977d0e87ac988d04fccfb89221727aaf8d78a4 (diff) |
bsg: replace SG v3 with SG v4
This patch replaces SG v3 in bsg with SG v4 (except for SG_IO).
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/bsg.c')
-rw-r--r-- | block/bsg.c | 198 |
1 files changed, 121 insertions, 77 deletions
diff --git a/block/bsg.c b/block/bsg.c index 53a09a52d154..6d139d20ec99 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -103,8 +103,8 @@ struct bsg_command { | |||
103 | struct request *rq; | 103 | struct request *rq; |
104 | struct bio *bio; | 104 | struct bio *bio; |
105 | int err; | 105 | int err; |
106 | struct sg_io_hdr hdr; | 106 | struct sg_io_v4 hdr; |
107 | struct sg_io_hdr __user *uhdr; | 107 | struct sg_io_v4 __user *uhdr; |
108 | char sense[SCSI_SENSE_BUFFERSIZE]; | 108 | char sense[SCSI_SENSE_BUFFERSIZE]; |
109 | }; | 109 | }; |
110 | 110 | ||
@@ -235,57 +235,82 @@ static struct bsg_command *bsg_get_command(struct bsg_device *bd) | |||
235 | return bc; | 235 | return bc; |
236 | } | 236 | } |
237 | 237 | ||
238 | static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq, | ||
239 | struct sg_io_v4 *hdr, int has_write_perm) | ||
240 | { | ||
241 | memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ | ||
242 | |||
243 | if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request, | ||
244 | hdr->request_len)) | ||
245 | return -EFAULT; | ||
246 | if (blk_verify_command(rq->cmd, has_write_perm)) | ||
247 | return -EPERM; | ||
248 | |||
249 | /* | ||
250 | * fill in request structure | ||
251 | */ | ||
252 | rq->cmd_len = hdr->request_len; | ||
253 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | ||
254 | |||
255 | rq->timeout = (hdr->timeout * HZ) / 1000; | ||
256 | if (!rq->timeout) | ||
257 | rq->timeout = q->sg_timeout; | ||
258 | if (!rq->timeout) | ||
259 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; | ||
260 | |||
261 | return 0; | ||
262 | } | ||
263 | |||
238 | /* | 264 | /* |
239 | * Check if sg_io_hdr from user is allowed and valid | 265 | * Check if sg_io_v4 from user is allowed and valid |
240 | */ | 266 | */ |
241 | static int | 267 | static int |
242 | bsg_validate_sghdr(request_queue_t *q, struct sg_io_hdr *hdr, int *rw) | 268 | bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw) |
243 | { | 269 | { |
244 | if (hdr->interface_id != 'S') | 270 | if (hdr->guard != 'Q') |
245 | return -EINVAL; | 271 | return -EINVAL; |
246 | if (hdr->cmd_len > BLK_MAX_CDB) | 272 | if (hdr->request_len > BLK_MAX_CDB) |
247 | return -EINVAL; | 273 | return -EINVAL; |
248 | if (hdr->dxfer_len > (q->max_sectors << 9)) | 274 | if (hdr->dout_xfer_len > (q->max_sectors << 9) || |
275 | hdr->din_xfer_len > (q->max_sectors << 9)) | ||
249 | return -EIO; | 276 | return -EIO; |
250 | 277 | ||
278 | /* not supported currently */ | ||
279 | if (hdr->protocol || hdr->subprotocol) | ||
280 | return -EINVAL; | ||
281 | |||
251 | /* | 282 | /* |
252 | * looks sane, if no data then it should be fine from our POV | 283 | * looks sane, if no data then it should be fine from our POV |
253 | */ | 284 | */ |
254 | if (!hdr->dxfer_len) | 285 | if (!hdr->dout_xfer_len && !hdr->din_xfer_len) |
255 | return 0; | 286 | return 0; |
256 | 287 | ||
257 | switch (hdr->dxfer_direction) { | 288 | /* not supported currently */ |
258 | case SG_DXFER_TO_FROM_DEV: | 289 | if (hdr->dout_xfer_len && hdr->din_xfer_len) |
259 | case SG_DXFER_FROM_DEV: | 290 | return -EINVAL; |
260 | *rw = READ; | 291 | |
261 | break; | 292 | *rw = hdr->dout_xfer_len ? WRITE : READ; |
262 | case SG_DXFER_TO_DEV: | ||
263 | *rw = WRITE; | ||
264 | break; | ||
265 | default: | ||
266 | return -EINVAL; | ||
267 | } | ||
268 | 293 | ||
269 | return 0; | 294 | return 0; |
270 | } | 295 | } |
271 | 296 | ||
272 | /* | 297 | /* |
273 | * map sg_io_hdr to a request. for scatter-gather sg_io_hdr, we map | 298 | * map sg_io_v4 to a request. |
274 | * each segment to a bio and string multiple bio's to the request | ||
275 | */ | 299 | */ |
276 | static struct request * | 300 | static struct request * |
277 | bsg_map_hdr(struct bsg_device *bd, int rw, struct sg_io_hdr *hdr) | 301 | bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr) |
278 | { | 302 | { |
279 | request_queue_t *q = bd->queue; | 303 | request_queue_t *q = bd->queue; |
280 | struct sg_iovec iov; | ||
281 | struct sg_iovec __user *u_iov; | ||
282 | struct request *rq; | 304 | struct request *rq; |
283 | int ret, i = 0; | 305 | int ret, rw; |
306 | unsigned int dxfer_len; | ||
307 | void *dxferp = NULL; | ||
284 | 308 | ||
285 | dprintk("map hdr %p/%d/%d\n", hdr->dxferp, hdr->dxfer_len, | 309 | dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, |
286 | hdr->iovec_count); | 310 | hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, |
311 | hdr->din_xfer_len); | ||
287 | 312 | ||
288 | ret = bsg_validate_sghdr(q, hdr, &rw); | 313 | ret = bsg_validate_sgv4_hdr(q, hdr, &rw); |
289 | if (ret) | 314 | if (ret) |
290 | return ERR_PTR(ret); | 315 | return ERR_PTR(ret); |
291 | 316 | ||
@@ -293,44 +318,29 @@ bsg_map_hdr(struct bsg_device *bd, int rw, struct sg_io_hdr *hdr) | |||
293 | * map scatter-gather elements seperately and string them to request | 318 | * map scatter-gather elements seperately and string them to request |
294 | */ | 319 | */ |
295 | rq = blk_get_request(q, rw, GFP_KERNEL); | 320 | rq = blk_get_request(q, rw, GFP_KERNEL); |
296 | ret = blk_fill_sghdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM, | 321 | ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM, |
297 | &bd->flags)); | 322 | &bd->flags)); |
298 | if (ret) { | 323 | if (ret) { |
299 | blk_put_request(rq); | 324 | blk_put_request(rq); |
300 | return ERR_PTR(ret); | 325 | return ERR_PTR(ret); |
301 | } | 326 | } |
302 | 327 | ||
303 | if (!hdr->iovec_count) { | 328 | if (hdr->dout_xfer_len) { |
304 | ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len); | 329 | dxfer_len = hdr->dout_xfer_len; |
305 | if (ret) | 330 | dxferp = (void*)(unsigned long)hdr->dout_xferp; |
306 | goto out; | 331 | } else if (hdr->din_xfer_len) { |
307 | } | 332 | dxfer_len = hdr->din_xfer_len; |
308 | 333 | dxferp = (void*)(unsigned long)hdr->din_xferp; | |
309 | u_iov = hdr->dxferp; | 334 | } else |
310 | for (ret = 0, i = 0; i < hdr->iovec_count; i++, u_iov++) { | 335 | dxfer_len = 0; |
311 | if (copy_from_user(&iov, u_iov, sizeof(iov))) { | 336 | |
312 | ret = -EFAULT; | 337 | if (dxfer_len) { |
313 | break; | 338 | ret = blk_rq_map_user(q, rq, dxferp, dxfer_len); |
314 | } | 339 | if (ret) { |
315 | 340 | dprintk("failed map at %d\n", ret); | |
316 | if (!iov.iov_len || !iov.iov_base) { | 341 | blk_put_request(rq); |
317 | ret = -EINVAL; | 342 | rq = ERR_PTR(ret); |
318 | break; | ||
319 | } | 343 | } |
320 | |||
321 | ret = blk_rq_map_user(q, rq, iov.iov_base, iov.iov_len); | ||
322 | if (ret) | ||
323 | break; | ||
324 | } | ||
325 | |||
326 | /* | ||
327 | * bugger, cleanup | ||
328 | */ | ||
329 | if (ret) { | ||
330 | out: | ||
331 | dprintk("failed map at %d: %d\n", i, ret); | ||
332 | blk_unmap_sghdr_rq(rq, hdr); | ||
333 | rq = ERR_PTR(ret); | ||
334 | } | 344 | } |
335 | 345 | ||
336 | return rq; | 346 | return rq; |
@@ -346,7 +356,7 @@ static void bsg_rq_end_io(struct request *rq, int uptodate) | |||
346 | struct bsg_device *bd = bc->bd; | 356 | struct bsg_device *bd = bc->bd; |
347 | unsigned long flags; | 357 | unsigned long flags; |
348 | 358 | ||
349 | dprintk("%s: finished rq %p bc %p, bio %p offset %d stat %d\n", | 359 | dprintk("%s: finished rq %p bc %p, bio %p offset %Zd stat %d\n", |
350 | bd->name, rq, bc, bc->bio, bc - bd->cmd_map, uptodate); | 360 | bd->name, rq, bc, bc->bio, bc - bd->cmd_map, uptodate); |
351 | 361 | ||
352 | bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); | 362 | bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); |
@@ -434,6 +444,42 @@ bsg_get_done_cmd_nosignals(struct bsg_device *bd) | |||
434 | return __bsg_get_done_cmd(bd, TASK_UNINTERRUPTIBLE); | 444 | return __bsg_get_done_cmd(bd, TASK_UNINTERRUPTIBLE); |
435 | } | 445 | } |
436 | 446 | ||
447 | static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, | ||
448 | struct bio *bio) | ||
449 | { | ||
450 | int ret = 0; | ||
451 | |||
452 | dprintk("rq %p bio %p %u\n", rq, bio, rq->errors); | ||
453 | /* | ||
454 | * fill in all the output members | ||
455 | */ | ||
456 | hdr->device_status = status_byte(rq->errors); | ||
457 | hdr->transport_status = host_byte(rq->errors); | ||
458 | hdr->driver_status = driver_byte(rq->errors); | ||
459 | hdr->info = 0; | ||
460 | if (hdr->device_status || hdr->transport_status || hdr->driver_status) | ||
461 | hdr->info |= SG_INFO_CHECK; | ||
462 | hdr->din_resid = rq->data_len; | ||
463 | hdr->response_len = 0; | ||
464 | |||
465 | if (rq->sense_len && hdr->response) { | ||
466 | int len = min((unsigned int) hdr->max_response_len, | ||
467 | rq->sense_len); | ||
468 | |||
469 | ret = copy_to_user((void*)(unsigned long)hdr->response, | ||
470 | rq->sense, len); | ||
471 | if (!ret) | ||
472 | hdr->response_len = len; | ||
473 | else | ||
474 | ret = -EFAULT; | ||
475 | } | ||
476 | |||
477 | blk_rq_unmap_user(bio); | ||
478 | blk_put_request(rq); | ||
479 | |||
480 | return ret; | ||
481 | } | ||
482 | |||
437 | static int bsg_complete_all_commands(struct bsg_device *bd) | 483 | static int bsg_complete_all_commands(struct bsg_device *bd) |
438 | { | 484 | { |
439 | struct bsg_command *bc; | 485 | struct bsg_command *bc; |
@@ -476,7 +522,7 @@ static int bsg_complete_all_commands(struct bsg_device *bd) | |||
476 | break; | 522 | break; |
477 | } | 523 | } |
478 | 524 | ||
479 | tret = blk_complete_sghdr_rq(bc->rq, &bc->hdr, bc->bio); | 525 | tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio); |
480 | if (!ret) | 526 | if (!ret) |
481 | ret = tret; | 527 | ret = tret; |
482 | 528 | ||
@@ -495,11 +541,11 @@ __bsg_read(char __user *buf, size_t count, bsg_command_callback get_bc, | |||
495 | struct bsg_command *bc; | 541 | struct bsg_command *bc; |
496 | int nr_commands, ret; | 542 | int nr_commands, ret; |
497 | 543 | ||
498 | if (count % sizeof(struct sg_io_hdr)) | 544 | if (count % sizeof(struct sg_io_v4)) |
499 | return -EINVAL; | 545 | return -EINVAL; |
500 | 546 | ||
501 | ret = 0; | 547 | ret = 0; |
502 | nr_commands = count / sizeof(struct sg_io_hdr); | 548 | nr_commands = count / sizeof(struct sg_io_v4); |
503 | while (nr_commands) { | 549 | while (nr_commands) { |
504 | bc = get_bc(bd, iov); | 550 | bc = get_bc(bd, iov); |
505 | if (IS_ERR(bc)) { | 551 | if (IS_ERR(bc)) { |
@@ -512,7 +558,7 @@ __bsg_read(char __user *buf, size_t count, bsg_command_callback get_bc, | |||
512 | * after completing the request. so do that here, | 558 | * after completing the request. so do that here, |
513 | * bsg_complete_work() cannot do that for us | 559 | * bsg_complete_work() cannot do that for us |
514 | */ | 560 | */ |
515 | ret = blk_complete_sghdr_rq(bc->rq, &bc->hdr, bc->bio); | 561 | ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio); |
516 | 562 | ||
517 | if (copy_to_user(buf, (char *) &bc->hdr, sizeof(bc->hdr))) | 563 | if (copy_to_user(buf, (char *) &bc->hdr, sizeof(bc->hdr))) |
518 | ret = -EFAULT; | 564 | ret = -EFAULT; |
@@ -522,8 +568,8 @@ __bsg_read(char __user *buf, size_t count, bsg_command_callback get_bc, | |||
522 | if (ret) | 568 | if (ret) |
523 | break; | 569 | break; |
524 | 570 | ||
525 | buf += sizeof(struct sg_io_hdr); | 571 | buf += sizeof(struct sg_io_v4); |
526 | *bytes_read += sizeof(struct sg_io_hdr); | 572 | *bytes_read += sizeof(struct sg_io_v4); |
527 | nr_commands--; | 573 | nr_commands--; |
528 | } | 574 | } |
529 | 575 | ||
@@ -582,16 +628,15 @@ static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf, | |||
582 | struct request *rq; | 628 | struct request *rq; |
583 | int ret, nr_commands; | 629 | int ret, nr_commands; |
584 | 630 | ||
585 | if (count % sizeof(struct sg_io_hdr)) | 631 | if (count % sizeof(struct sg_io_v4)) |
586 | return -EINVAL; | 632 | return -EINVAL; |
587 | 633 | ||
588 | nr_commands = count / sizeof(struct sg_io_hdr); | 634 | nr_commands = count / sizeof(struct sg_io_v4); |
589 | rq = NULL; | 635 | rq = NULL; |
590 | bc = NULL; | 636 | bc = NULL; |
591 | ret = 0; | 637 | ret = 0; |
592 | while (nr_commands) { | 638 | while (nr_commands) { |
593 | request_queue_t *q = bd->queue; | 639 | request_queue_t *q = bd->queue; |
594 | int rw = READ; | ||
595 | 640 | ||
596 | bc = bsg_get_command(bd); | 641 | bc = bsg_get_command(bd); |
597 | if (!bc) | 642 | if (!bc) |
@@ -602,7 +647,7 @@ static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf, | |||
602 | break; | 647 | break; |
603 | } | 648 | } |
604 | 649 | ||
605 | bc->uhdr = (struct sg_io_hdr __user *) buf; | 650 | bc->uhdr = (struct sg_io_v4 __user *) buf; |
606 | if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { | 651 | if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { |
607 | ret = -EFAULT; | 652 | ret = -EFAULT; |
608 | break; | 653 | break; |
@@ -611,7 +656,7 @@ static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf, | |||
611 | /* | 656 | /* |
612 | * get a request, fill in the blanks, and add to request queue | 657 | * get a request, fill in the blanks, and add to request queue |
613 | */ | 658 | */ |
614 | rq = bsg_map_hdr(bd, rw, &bc->hdr); | 659 | rq = bsg_map_hdr(bd, &bc->hdr); |
615 | if (IS_ERR(rq)) { | 660 | if (IS_ERR(rq)) { |
616 | ret = PTR_ERR(rq); | 661 | ret = PTR_ERR(rq); |
617 | rq = NULL; | 662 | rq = NULL; |
@@ -622,12 +667,10 @@ static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf, | |||
622 | bc = NULL; | 667 | bc = NULL; |
623 | rq = NULL; | 668 | rq = NULL; |
624 | nr_commands--; | 669 | nr_commands--; |
625 | buf += sizeof(struct sg_io_hdr); | 670 | buf += sizeof(struct sg_io_v4); |
626 | *bytes_read += sizeof(struct sg_io_hdr); | 671 | *bytes_read += sizeof(struct sg_io_v4); |
627 | } | 672 | } |
628 | 673 | ||
629 | if (rq) | ||
630 | blk_unmap_sghdr_rq(rq, &bc->hdr); | ||
631 | if (bc) | 674 | if (bc) |
632 | bsg_free_command(bc); | 675 | bsg_free_command(bc); |
633 | 676 | ||
@@ -898,11 +941,12 @@ bsg_ioctl(struct inode *inode, struct file *file, unsigned int cmd, | |||
898 | case SG_GET_RESERVED_SIZE: | 941 | case SG_GET_RESERVED_SIZE: |
899 | case SG_SET_RESERVED_SIZE: | 942 | case SG_SET_RESERVED_SIZE: |
900 | case SG_EMULATED_HOST: | 943 | case SG_EMULATED_HOST: |
901 | case SG_IO: | ||
902 | case SCSI_IOCTL_SEND_COMMAND: { | 944 | case SCSI_IOCTL_SEND_COMMAND: { |
903 | void __user *uarg = (void __user *) arg; | 945 | void __user *uarg = (void __user *) arg; |
904 | return scsi_cmd_ioctl(file, bd->disk, cmd, uarg); | 946 | return scsi_cmd_ioctl(file, bd->disk, cmd, uarg); |
905 | } | 947 | } |
948 | case SG_IO: | ||
949 | return -EINVAL; | ||
906 | /* | 950 | /* |
907 | * block device ioctls | 951 | * block device ioctls |
908 | */ | 952 | */ |