aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c346
1 files changed, 250 insertions, 96 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 4afef5cdcb17..ba93d6e66d48 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -63,39 +63,6 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
63}; 63};
64#undef SP 64#undef SP
65 65
66
67/*
68 * Function: scsi_insert_special_req()
69 *
70 * Purpose: Insert pre-formed request into request queue.
71 *
72 * Arguments: sreq - request that is ready to be queued.
73 * at_head - boolean. True if we should insert at head
74 * of queue, false if we should insert at tail.
75 *
76 * Lock status: Assumed that lock is not held upon entry.
77 *
78 * Returns: Nothing
79 *
80 * Notes: This function is called from character device and from
81 * ioctl types of functions where the caller knows exactly
82 * what SCSI command needs to be issued. The idea is that
83 * we merely inject the command into the queue (at the head
84 * for now), and then call the queue request function to actually
85 * process it.
86 */
87int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
88{
89 /*
90 * Because users of this function are apt to reuse requests with no
91 * modification, we have to sanitise the request flags here
92 */
93 sreq->sr_request->flags &= ~REQ_DONTPREP;
94 blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
95 at_head, sreq);
96 return 0;
97}
98
99static void scsi_run_queue(struct request_queue *q); 66static void scsi_run_queue(struct request_queue *q);
100 67
101/* 68/*
@@ -249,8 +216,13 @@ void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
249 216
250 /* 217 /*
251 * head injection *required* here otherwise quiesce won't work 218 * head injection *required* here otherwise quiesce won't work
219 *
220 * Because users of this function are apt to reuse requests with no
221 * modification, we have to sanitise the request flags here
252 */ 222 */
253 scsi_insert_special_req(sreq, 1); 223 sreq->sr_request->flags &= ~REQ_DONTPREP;
224 blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
225 1, sreq);
254} 226}
255EXPORT_SYMBOL(scsi_do_req); 227EXPORT_SYMBOL(scsi_do_req);
256 228
@@ -287,6 +259,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
287 memcpy(req->cmd, cmd, req->cmd_len); 259 memcpy(req->cmd, cmd, req->cmd_len);
288 req->sense = sense; 260 req->sense = sense;
289 req->sense_len = 0; 261 req->sense_len = 0;
262 req->retries = retries;
290 req->timeout = timeout; 263 req->timeout = timeout;
291 req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET; 264 req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET;
292 265
@@ -327,6 +300,200 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
327} 300}
328EXPORT_SYMBOL(scsi_execute_req); 301EXPORT_SYMBOL(scsi_execute_req);
329 302
303struct scsi_io_context {
304 void *data;
305 void (*done)(void *data, char *sense, int result, int resid);
306 char sense[SCSI_SENSE_BUFFERSIZE];
307};
308
309static kmem_cache_t *scsi_io_context_cache;
310
311static void scsi_end_async(struct request *req, int uptodate)
312{
313 struct scsi_io_context *sioc = req->end_io_data;
314
315 if (sioc->done)
316 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
317
318 kmem_cache_free(scsi_io_context_cache, sioc);
319 __blk_put_request(req->q, req);
320}
321
322static int scsi_merge_bio(struct request *rq, struct bio *bio)
323{
324 struct request_queue *q = rq->q;
325
326 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
327 if (rq_data_dir(rq) == WRITE)
328 bio->bi_rw |= (1 << BIO_RW);
329 blk_queue_bounce(q, &bio);
330
331 if (!rq->bio)
332 blk_rq_bio_prep(q, rq, bio);
333 else if (!q->back_merge_fn(q, rq, bio))
334 return -EINVAL;
335 else {
336 rq->biotail->bi_next = bio;
337 rq->biotail = bio;
338 rq->hard_nr_sectors += bio_sectors(bio);
339 rq->nr_sectors = rq->hard_nr_sectors;
340 }
341
342 return 0;
343}
344
345static int scsi_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
346{
347 if (bio->bi_size)
348 return 1;
349
350 bio_put(bio);
351 return 0;
352}
353
354/**
355 * scsi_req_map_sg - map a scatterlist into a request
356 * @rq: request to fill
357 * @sg: scatterlist
358 * @nsegs: number of elements
359 * @bufflen: len of buffer
360 * @gfp: memory allocation flags
361 *
362 * scsi_req_map_sg maps a scatterlist into a request so that the
363 * request can be sent to the block layer. We do not trust the scatterlist
364 * sent to use, as some ULDs use that struct to only organize the pages.
365 */
366static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
367 int nsegs, unsigned bufflen, gfp_t gfp)
368{
369 struct request_queue *q = rq->q;
370 int nr_pages = (bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
371 unsigned int data_len = 0, len, bytes, off;
372 struct page *page;
373 struct bio *bio = NULL;
374 int i, err, nr_vecs = 0;
375
376 for (i = 0; i < nsegs; i++) {
377 page = sgl[i].page;
378 off = sgl[i].offset;
379 len = sgl[i].length;
380 data_len += len;
381
382 while (len > 0) {
383 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
384
385 if (!bio) {
386 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
387 nr_pages -= nr_vecs;
388
389 bio = bio_alloc(gfp, nr_vecs);
390 if (!bio) {
391 err = -ENOMEM;
392 goto free_bios;
393 }
394 bio->bi_end_io = scsi_bi_endio;
395 }
396
397 if (bio_add_pc_page(q, bio, page, bytes, off) !=
398 bytes) {
399 bio_put(bio);
400 err = -EINVAL;
401 goto free_bios;
402 }
403
404 if (bio->bi_vcnt >= nr_vecs) {
405 err = scsi_merge_bio(rq, bio);
406 if (err) {
407 bio_endio(bio, bio->bi_size, 0);
408 goto free_bios;
409 }
410 bio = NULL;
411 }
412
413 page++;
414 len -= bytes;
415 off = 0;
416 }
417 }
418
419 rq->buffer = rq->data = NULL;
420 rq->data_len = data_len;
421 return 0;
422
423free_bios:
424 while ((bio = rq->bio) != NULL) {
425 rq->bio = bio->bi_next;
426 /*
427 * call endio instead of bio_put incase it was bounced
428 */
429 bio_endio(bio, bio->bi_size, 0);
430 }
431
432 return err;
433}
434
435/**
436 * scsi_execute_async - insert request
437 * @sdev: scsi device
438 * @cmd: scsi command
439 * @data_direction: data direction
440 * @buffer: data buffer (this can be a kernel buffer or scatterlist)
441 * @bufflen: len of buffer
442 * @use_sg: if buffer is a scatterlist this is the number of elements
443 * @timeout: request timeout in seconds
444 * @retries: number of times to retry request
445 * @flags: or into request flags
446 **/
447int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
448 int data_direction, void *buffer, unsigned bufflen,
449 int use_sg, int timeout, int retries, void *privdata,
450 void (*done)(void *, char *, int, int), gfp_t gfp)
451{
452 struct request *req;
453 struct scsi_io_context *sioc;
454 int err = 0;
455 int write = (data_direction == DMA_TO_DEVICE);
456
457 sioc = kmem_cache_alloc(scsi_io_context_cache, gfp);
458 if (!sioc)
459 return DRIVER_ERROR << 24;
460 memset(sioc, 0, sizeof(*sioc));
461
462 req = blk_get_request(sdev->request_queue, write, gfp);
463 if (!req)
464 goto free_sense;
465 req->flags |= REQ_BLOCK_PC | REQ_QUIET;
466
467 if (use_sg)
468 err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
469 else if (bufflen)
470 err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp);
471
472 if (err)
473 goto free_req;
474
475 req->cmd_len = COMMAND_SIZE(cmd[0]);
476 memcpy(req->cmd, cmd, req->cmd_len);
477 req->sense = sioc->sense;
478 req->sense_len = 0;
479 req->timeout = timeout;
480 req->retries = retries;
481 req->end_io_data = sioc;
482
483 sioc->data = privdata;
484 sioc->done = done;
485
486 blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async);
487 return 0;
488
489free_req:
490 blk_put_request(req);
491free_sense:
492 kfree(sioc);
493 return DRIVER_ERROR << 24;
494}
495EXPORT_SYMBOL_GPL(scsi_execute_async);
496
330/* 497/*
331 * Function: scsi_init_cmd_errh() 498 * Function: scsi_init_cmd_errh()
332 * 499 *
@@ -542,10 +709,17 @@ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
542 709
543void scsi_next_command(struct scsi_cmnd *cmd) 710void scsi_next_command(struct scsi_cmnd *cmd)
544{ 711{
545 struct request_queue *q = cmd->device->request_queue; 712 struct scsi_device *sdev = cmd->device;
713 struct request_queue *q = sdev->request_queue;
714
715 /* need to hold a reference on the device before we let go of the cmd */
716 get_device(&sdev->sdev_gendev);
546 717
547 scsi_put_command(cmd); 718 scsi_put_command(cmd);
548 scsi_run_queue(q); 719 scsi_run_queue(q);
720
721 /* ok to remove device now */
722 put_device(&sdev->sdev_gendev);
549} 723}
550 724
551void scsi_run_host_queues(struct Scsi_Host *shost) 725void scsi_run_host_queues(struct Scsi_Host *shost)
@@ -617,7 +791,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
617 spin_lock_irqsave(q->queue_lock, flags); 791 spin_lock_irqsave(q->queue_lock, flags);
618 if (blk_rq_tagged(req)) 792 if (blk_rq_tagged(req))
619 blk_queue_end_tag(q, req); 793 blk_queue_end_tag(q, req);
620 end_that_request_last(req); 794 end_that_request_last(req, uptodate);
621 spin_unlock_irqrestore(q->queue_lock, flags); 795 spin_unlock_irqrestore(q->queue_lock, flags);
622 796
623 /* 797 /*
@@ -758,9 +932,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
758 int sense_valid = 0; 932 int sense_valid = 0;
759 int sense_deferred = 0; 933 int sense_deferred = 0;
760 934
761 if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
762 return;
763
764 /* 935 /*
765 * Free up any indirection buffers we allocated for DMA purposes. 936 * Free up any indirection buffers we allocated for DMA purposes.
766 * For the case of a READ, we need to copy the data out of the 937 * For the case of a READ, we need to copy the data out of the
@@ -877,7 +1048,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
877 * system where READ CAPACITY failed, we may have read 1048 * system where READ CAPACITY failed, we may have read
878 * past the end of the disk. 1049 * past the end of the disk.
879 */ 1050 */
880 if (cmd->device->use_10_for_rw && 1051 if ((cmd->device->use_10_for_rw &&
1052 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
881 (cmd->cmnd[0] == READ_10 || 1053 (cmd->cmnd[0] == READ_10 ||
882 cmd->cmnd[0] == WRITE_10)) { 1054 cmd->cmnd[0] == WRITE_10)) {
883 cmd->device->use_10_for_rw = 0; 1055 cmd->device->use_10_for_rw = 0;
@@ -1024,38 +1196,6 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
1024 return BLKPREP_KILL; 1196 return BLKPREP_KILL;
1025} 1197}
1026 1198
1027static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
1028{
1029 struct scsi_device *sdev = q->queuedata;
1030 struct scsi_driver *drv;
1031
1032 if (sdev->sdev_state == SDEV_RUNNING) {
1033 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1034
1035 if (drv->prepare_flush)
1036 return drv->prepare_flush(q, rq);
1037 }
1038
1039 return 0;
1040}
1041
1042static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
1043{
1044 struct scsi_device *sdev = q->queuedata;
1045 struct request *flush_rq = rq->end_io_data;
1046 struct scsi_driver *drv;
1047
1048 if (flush_rq->errors) {
1049 printk("scsi: barrier error, disabling flush support\n");
1050 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1051 }
1052
1053 if (sdev->sdev_state == SDEV_RUNNING) {
1054 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1055 drv->end_flush(q, rq);
1056 }
1057}
1058
1059static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, 1199static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1060 sector_t *error_sector) 1200 sector_t *error_sector)
1061{ 1201{
@@ -1075,8 +1215,34 @@ static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1075static void scsi_generic_done(struct scsi_cmnd *cmd) 1215static void scsi_generic_done(struct scsi_cmnd *cmd)
1076{ 1216{
1077 BUG_ON(!blk_pc_request(cmd->request)); 1217 BUG_ON(!blk_pc_request(cmd->request));
1078 scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0); 1218 /*
1219 * This will complete the whole command with uptodate=1 so
1220 * as far as the block layer is concerned the command completed
1221 * successfully. Since this is a REQ_BLOCK_PC command the
1222 * caller should check the request's errors value
1223 */
1224 scsi_io_completion(cmd, cmd->bufflen, 0);
1225}
1226
1227void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
1228{
1229 struct request *req = cmd->request;
1230
1231 BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
1232 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1233 cmd->cmd_len = req->cmd_len;
1234 if (!req->data_len)
1235 cmd->sc_data_direction = DMA_NONE;
1236 else if (rq_data_dir(req) == WRITE)
1237 cmd->sc_data_direction = DMA_TO_DEVICE;
1238 else
1239 cmd->sc_data_direction = DMA_FROM_DEVICE;
1240
1241 cmd->transfersize = req->data_len;
1242 cmd->allowed = req->retries;
1243 cmd->timeout_per_command = req->timeout;
1079} 1244}
1245EXPORT_SYMBOL_GPL(scsi_setup_blk_pc_cmnd);
1080 1246
1081static int scsi_prep_fn(struct request_queue *q, struct request *req) 1247static int scsi_prep_fn(struct request_queue *q, struct request *req)
1082{ 1248{
@@ -1213,18 +1379,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1213 goto kill; 1379 goto kill;
1214 } 1380 }
1215 } else { 1381 } else {
1216 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); 1382 scsi_setup_blk_pc_cmnd(cmd);
1217 cmd->cmd_len = req->cmd_len;
1218 if (rq_data_dir(req) == WRITE)
1219 cmd->sc_data_direction = DMA_TO_DEVICE;
1220 else if (req->data_len)
1221 cmd->sc_data_direction = DMA_FROM_DEVICE;
1222 else
1223 cmd->sc_data_direction = DMA_NONE;
1224
1225 cmd->transfersize = req->data_len;
1226 cmd->allowed = 3;
1227 cmd->timeout_per_command = req->timeout;
1228 cmd->done = scsi_generic_done; 1383 cmd->done = scsi_generic_done;
1229 } 1384 }
1230 } 1385 }
@@ -1513,17 +1668,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1513 blk_queue_segment_boundary(q, shost->dma_boundary); 1668 blk_queue_segment_boundary(q, shost->dma_boundary);
1514 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); 1669 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1515 1670
1516 /*
1517 * ordered tags are superior to flush ordering
1518 */
1519 if (shost->ordered_tag)
1520 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1521 else if (shost->ordered_flush) {
1522 blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1523 q->prepare_flush_fn = scsi_prepare_flush_fn;
1524 q->end_flush_fn = scsi_end_flush_fn;
1525 }
1526
1527 if (!shost->use_clustering) 1671 if (!shost->use_clustering)
1528 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 1672 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1529 return q; 1673 return q;
@@ -1587,6 +1731,14 @@ int __init scsi_init_queue(void)
1587{ 1731{
1588 int i; 1732 int i;
1589 1733
1734 scsi_io_context_cache = kmem_cache_create("scsi_io_context",
1735 sizeof(struct scsi_io_context),
1736 0, 0, NULL, NULL);
1737 if (!scsi_io_context_cache) {
1738 printk(KERN_ERR "SCSI: can't init scsi io context cache\n");
1739 return -ENOMEM;
1740 }
1741
1590 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1742 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1591 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1743 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1592 int size = sgp->size * sizeof(struct scatterlist); 1744 int size = sgp->size * sizeof(struct scatterlist);
@@ -1614,6 +1766,8 @@ void scsi_exit_queue(void)
1614{ 1766{
1615 int i; 1767 int i;
1616 1768
1769 kmem_cache_destroy(scsi_io_context_cache);
1770
1617 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1771 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1618 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1772 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1619 mempool_destroy(sgp->pool); 1773 mempool_destroy(sgp->pool);