diff options
author | Christoph Hellwig <hch@lst.de> | 2010-06-18 10:59:42 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-08-07 12:23:08 -0400 |
commit | 66ac0280197981f88774e74b60c8e5f9f07c1dba (patch) | |
tree | d093ce493146779926df88b5831805c6f9ee14e1 /drivers | |
parent | 082439004b31adc146e96e5f1c574dd2b57dcd93 (diff) |
block: don't allocate a payload for discard request
Allocating a fixed payload for discard requests always was a horrible hack,
and it's not coming to byte us when adding support for discard in DM/MD.
So change the code to leave the allocation of a payload to the lowlevel
driver. Unfortunately that means we'll need another hack, which allows
us to update the various block layer length fields indicating that we
have a payload. Instead of hiding this in sd.c, which we already partially
do for UNMAP support add a documented helper in the core block layer for it.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/scsi/sd.c | 52 |
1 files changed, 34 insertions, 18 deletions
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index a3fdf4dc59da..86da819c70eb 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -411,22 +411,25 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif) | |||
411 | } | 411 | } |
412 | 412 | ||
413 | /** | 413 | /** |
414 | * sd_prepare_discard - unmap blocks on thinly provisioned device | 414 | * scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device |
415 | * @sdp: scsi device to operate one | ||
415 | * @rq: Request to prepare | 416 | * @rq: Request to prepare |
416 | * | 417 | * |
417 | * Will issue either UNMAP or WRITE SAME(16) depending on preference | 418 | * Will issue either UNMAP or WRITE SAME(16) depending on preference |
418 | * indicated by target device. | 419 | * indicated by target device. |
419 | **/ | 420 | **/ |
420 | static int sd_prepare_discard(struct request *rq) | 421 | static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) |
421 | { | 422 | { |
422 | struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); | 423 | struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); |
423 | struct bio *bio = rq->bio; | 424 | struct bio *bio = rq->bio; |
424 | sector_t sector = bio->bi_sector; | 425 | sector_t sector = bio->bi_sector; |
425 | unsigned int num = bio_sectors(bio); | 426 | unsigned int nr_sectors = bio_sectors(bio); |
427 | unsigned int len; | ||
428 | struct page *page; | ||
426 | 429 | ||
427 | if (sdkp->device->sector_size == 4096) { | 430 | if (sdkp->device->sector_size == 4096) { |
428 | sector >>= 3; | 431 | sector >>= 3; |
429 | num >>= 3; | 432 | nr_sectors >>= 3; |
430 | } | 433 | } |
431 | 434 | ||
432 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | 435 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
@@ -434,31 +437,35 @@ static int sd_prepare_discard(struct request *rq) | |||
434 | 437 | ||
435 | memset(rq->cmd, 0, rq->cmd_len); | 438 | memset(rq->cmd, 0, rq->cmd_len); |
436 | 439 | ||
440 | page = alloc_page(GFP_ATOMIC | __GFP_ZERO); | ||
441 | if (!page) | ||
442 | return BLKPREP_DEFER; | ||
443 | |||
437 | if (sdkp->unmap) { | 444 | if (sdkp->unmap) { |
438 | char *buf = kmap_atomic(bio_page(bio), KM_USER0); | 445 | char *buf = page_address(page); |
439 | 446 | ||
447 | rq->cmd_len = 10; | ||
440 | rq->cmd[0] = UNMAP; | 448 | rq->cmd[0] = UNMAP; |
441 | rq->cmd[8] = 24; | 449 | rq->cmd[8] = 24; |
442 | rq->cmd_len = 10; | ||
443 | |||
444 | /* Ensure that data length matches payload */ | ||
445 | rq->__data_len = bio->bi_size = bio->bi_io_vec->bv_len = 24; | ||
446 | 450 | ||
447 | put_unaligned_be16(6 + 16, &buf[0]); | 451 | put_unaligned_be16(6 + 16, &buf[0]); |
448 | put_unaligned_be16(16, &buf[2]); | 452 | put_unaligned_be16(16, &buf[2]); |
449 | put_unaligned_be64(sector, &buf[8]); | 453 | put_unaligned_be64(sector, &buf[8]); |
450 | put_unaligned_be32(num, &buf[16]); | 454 | put_unaligned_be32(nr_sectors, &buf[16]); |
451 | 455 | ||
452 | kunmap_atomic(buf, KM_USER0); | 456 | len = 24; |
453 | } else { | 457 | } else { |
458 | rq->cmd_len = 16; | ||
454 | rq->cmd[0] = WRITE_SAME_16; | 459 | rq->cmd[0] = WRITE_SAME_16; |
455 | rq->cmd[1] = 0x8; /* UNMAP */ | 460 | rq->cmd[1] = 0x8; /* UNMAP */ |
456 | put_unaligned_be64(sector, &rq->cmd[2]); | 461 | put_unaligned_be64(sector, &rq->cmd[2]); |
457 | put_unaligned_be32(num, &rq->cmd[10]); | 462 | put_unaligned_be32(nr_sectors, &rq->cmd[10]); |
458 | rq->cmd_len = 16; | 463 | |
464 | len = sdkp->device->sector_size; | ||
459 | } | 465 | } |
460 | 466 | ||
461 | return BLKPREP_OK; | 467 | blk_add_request_payload(rq, page, len); |
468 | return scsi_setup_blk_pc_cmnd(sdp, rq); | ||
462 | } | 469 | } |
463 | 470 | ||
464 | /** | 471 | /** |
@@ -485,10 +492,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) | |||
485 | * Discard request come in as REQ_TYPE_FS but we turn them into | 492 | * Discard request come in as REQ_TYPE_FS but we turn them into |
486 | * block PC requests to make life easier. | 493 | * block PC requests to make life easier. |
487 | */ | 494 | */ |
488 | if (rq->cmd_flags & REQ_DISCARD) | 495 | if (rq->cmd_flags & REQ_DISCARD) { |
489 | ret = sd_prepare_discard(rq); | 496 | ret = scsi_setup_discard_cmnd(sdp, rq); |
490 | 497 | goto out; | |
491 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { | 498 | } else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
492 | ret = scsi_setup_blk_pc_cmnd(sdp, rq); | 499 | ret = scsi_setup_blk_pc_cmnd(sdp, rq); |
493 | goto out; | 500 | goto out; |
494 | } else if (rq->cmd_type != REQ_TYPE_FS) { | 501 | } else if (rq->cmd_type != REQ_TYPE_FS) { |
@@ -1163,6 +1170,15 @@ static int sd_done(struct scsi_cmnd *SCpnt) | |||
1163 | int sense_valid = 0; | 1170 | int sense_valid = 0; |
1164 | int sense_deferred = 0; | 1171 | int sense_deferred = 0; |
1165 | 1172 | ||
1173 | /* | ||
1174 | * If this is a discard request that originated from the kernel | ||
1175 | * we need to free our payload here. Note that we need to check | ||
1176 | * the request flag as the normal payload rules apply for | ||
1177 | * pass-through UNMAP / WRITE SAME requests. | ||
1178 | */ | ||
1179 | if (SCpnt->request->cmd_flags & REQ_DISCARD) | ||
1180 | __free_page(bio_page(SCpnt->request->bio)); | ||
1181 | |||
1166 | if (result) { | 1182 | if (result) { |
1167 | sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); | 1183 | sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); |
1168 | if (sense_valid) | 1184 | if (sense_valid) |