aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c225
1 files changed, 90 insertions, 135 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9db097a28a74..9f841df6add8 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -512,68 +512,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
512 scsi_run_queue(sdev->request_queue); 512 scsi_run_queue(sdev->request_queue);
513} 513}
514 514
515static void __scsi_release_buffers(struct scsi_cmnd *, int);
516
517/*
518 * Function: scsi_end_request()
519 *
520 * Purpose: Post-processing of completed commands (usually invoked at end
521 * of upper level post-processing and scsi_io_completion).
522 *
523 * Arguments: cmd - command that is complete.
524 * error - 0 if I/O indicates success, < 0 for I/O error.
525 * bytes - number of bytes of completed I/O
526 * requeue - indicates whether we should requeue leftovers.
527 *
528 * Lock status: Assumed that lock is not held upon entry.
529 *
530 * Returns: cmd if requeue required, NULL otherwise.
531 *
532 * Notes: This is called for block device requests in order to
533 * mark some number of sectors as complete.
534 *
535 * We are guaranteeing that the request queue will be goosed
536 * at some point during this call.
537 * Notes: If cmd was requeued, upon return it will be a stale pointer.
538 */
539static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
540 int bytes, int requeue)
541{
542 struct request_queue *q = cmd->device->request_queue;
543 struct request *req = cmd->request;
544
545 /*
546 * If there are blocks left over at the end, set up the command
547 * to queue the remainder of them.
548 */
549 if (blk_end_request(req, error, bytes)) {
550 /* kill remainder if no retrys */
551 if (error && scsi_noretry_cmd(cmd))
552 blk_end_request_all(req, error);
553 else {
554 if (requeue) {
555 /*
556 * Bleah. Leftovers again. Stick the
557 * leftovers in the front of the
558 * queue, and goose the queue again.
559 */
560 scsi_release_buffers(cmd);
561 scsi_requeue_command(q, cmd);
562 cmd = NULL;
563 }
564 return cmd;
565 }
566 }
567
568 /*
569 * This will goose the queue request function at the end, so we don't
570 * need to worry about launching another command.
571 */
572 __scsi_release_buffers(cmd, 0);
573 scsi_next_command(cmd);
574 return NULL;
575}
576
577static inline unsigned int scsi_sgtable_index(unsigned short nents) 515static inline unsigned int scsi_sgtable_index(unsigned short nents)
578{ 516{
579 unsigned int index; 517 unsigned int index;
@@ -625,30 +563,10 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
625 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); 563 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
626} 564}
627 565
628static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
629{
630
631 if (cmd->sdb.table.nents)
632 scsi_free_sgtable(&cmd->sdb);
633
634 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
635
636 if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
637 struct scsi_data_buffer *bidi_sdb =
638 cmd->request->next_rq->special;
639 scsi_free_sgtable(bidi_sdb);
640 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
641 cmd->request->next_rq->special = NULL;
642 }
643
644 if (scsi_prot_sg_count(cmd))
645 scsi_free_sgtable(cmd->prot_sdb);
646}
647
648/* 566/*
649 * Function: scsi_release_buffers() 567 * Function: scsi_release_buffers()
650 * 568 *
651 * Purpose: Completion processing for block device I/O requests. 569 * Purpose: Free resources allocate for a scsi_command.
652 * 570 *
653 * Arguments: cmd - command that we are bailing. 571 * Arguments: cmd - command that we are bailing.
654 * 572 *
@@ -659,15 +577,29 @@ static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
659 * Notes: In the event that an upper level driver rejects a 577 * Notes: In the event that an upper level driver rejects a
660 * command, we must release resources allocated during 578 * command, we must release resources allocated during
661 * the __init_io() function. Primarily this would involve 579 * the __init_io() function. Primarily this would involve
662 * the scatter-gather table, and potentially any bounce 580 * the scatter-gather table.
663 * buffers.
664 */ 581 */
665void scsi_release_buffers(struct scsi_cmnd *cmd) 582void scsi_release_buffers(struct scsi_cmnd *cmd)
666{ 583{
667 __scsi_release_buffers(cmd, 1); 584 if (cmd->sdb.table.nents)
585 scsi_free_sgtable(&cmd->sdb);
586
587 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
588
589 if (scsi_prot_sg_count(cmd))
590 scsi_free_sgtable(cmd->prot_sdb);
668} 591}
669EXPORT_SYMBOL(scsi_release_buffers); 592EXPORT_SYMBOL(scsi_release_buffers);
670 593
594static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
595{
596 struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
597
598 scsi_free_sgtable(bidi_sdb);
599 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
600 cmd->request->next_rq->special = NULL;
601}
602
671/** 603/**
672 * __scsi_error_from_host_byte - translate SCSI error code into errno 604 * __scsi_error_from_host_byte - translate SCSI error code into errno
673 * @cmd: SCSI command (unused) 605 * @cmd: SCSI command (unused)
@@ -725,16 +657,9 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
725 * 657 *
726 * Returns: Nothing 658 * Returns: Nothing
727 * 659 *
728 * Notes: This function is matched in terms of capabilities to 660 * Notes: We will finish off the specified number of sectors. If we
729 * the function that created the scatter-gather list. 661 * are done, the command block will be released and the queue
730 * In other words, if there are no bounce buffers 662 * function will be goosed. If we are not done then we have to
731 * (the normal case for most drivers), we don't need
732 * the logic to deal with cleaning up afterwards.
733 *
734 * We must call scsi_end_request(). This will finish off
735 * the specified number of sectors. If we are done, the
736 * command block will be released and the queue function
737 * will be goosed. If we are not done then we have to
738 * figure out what to do next: 663 * figure out what to do next:
739 * 664 *
740 * a) We can call scsi_requeue_command(). The request 665 * a) We can call scsi_requeue_command(). The request
@@ -743,7 +668,7 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
743 * be used if we made forward progress, or if we want 668 * be used if we made forward progress, or if we want
744 * to switch from READ(10) to READ(6) for example. 669 * to switch from READ(10) to READ(6) for example.
745 * 670 *
746 * b) We can call scsi_queue_insert(). The request will 671 * b) We can call __scsi_queue_insert(). The request will
747 * be put back on the queue and retried using the same 672 * be put back on the queue and retried using the same
748 * command as before, possibly after a delay. 673 * command as before, possibly after a delay.
749 * 674 *
@@ -801,6 +726,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
801 req->next_rq->resid_len = scsi_in(cmd)->resid; 726 req->next_rq->resid_len = scsi_in(cmd)->resid;
802 727
803 scsi_release_buffers(cmd); 728 scsi_release_buffers(cmd);
729 scsi_release_bidi_buffers(cmd);
730
804 blk_end_request_all(req, 0); 731 blk_end_request_all(req, 0);
805 732
806 scsi_next_command(cmd); 733 scsi_next_command(cmd);
@@ -840,12 +767,25 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
840 } 767 }
841 768
842 /* 769 /*
843 * A number of bytes were successfully read. If there 770 * If we finished all bytes in the request we are done now.
844 * are leftovers and there is some kind of error
845 * (result != 0), retry the rest.
846 */ 771 */
847 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) 772 if (!blk_end_request(req, error, good_bytes))
848 return; 773 goto next_command;
774
775 /*
776 * Kill remainder if no retrys.
777 */
778 if (error && scsi_noretry_cmd(cmd)) {
779 blk_end_request_all(req, error);
780 goto next_command;
781 }
782
783 /*
784 * If there had been no error, but we have leftover bytes in the
785 * requeues just queue the command up again.
786 */
787 if (result == 0)
788 goto requeue;
849 789
850 error = __scsi_error_from_host_byte(cmd, result); 790 error = __scsi_error_from_host_byte(cmd, result);
851 791
@@ -973,7 +913,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
973 switch (action) { 913 switch (action) {
974 case ACTION_FAIL: 914 case ACTION_FAIL:
975 /* Give up and fail the remainder of the request */ 915 /* Give up and fail the remainder of the request */
976 scsi_release_buffers(cmd);
977 if (!(req->cmd_flags & REQ_QUIET)) { 916 if (!(req->cmd_flags & REQ_QUIET)) {
978 if (description) 917 if (description)
979 scmd_printk(KERN_INFO, cmd, "%s\n", 918 scmd_printk(KERN_INFO, cmd, "%s\n",
@@ -983,12 +922,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
983 scsi_print_sense("", cmd); 922 scsi_print_sense("", cmd);
984 scsi_print_command(cmd); 923 scsi_print_command(cmd);
985 } 924 }
986 if (blk_end_request_err(req, error)) 925 if (!blk_end_request_err(req, error))
987 scsi_requeue_command(q, cmd); 926 goto next_command;
988 else 927 /*FALLTHRU*/
989 scsi_next_command(cmd);
990 break;
991 case ACTION_REPREP: 928 case ACTION_REPREP:
929 requeue:
992 /* Unprep the request and put it back at the head of the queue. 930 /* Unprep the request and put it back at the head of the queue.
993 * A new command will be prepared and issued. 931 * A new command will be prepared and issued.
994 */ 932 */
@@ -1004,6 +942,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1004 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); 942 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
1005 break; 943 break;
1006 } 944 }
945 return;
946
947next_command:
948 scsi_release_buffers(cmd);
949 scsi_next_command(cmd);
1007} 950}
1008 951
1009static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, 952static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
@@ -1130,15 +1073,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1130 1073
1131int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1074int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1132{ 1075{
1133 struct scsi_cmnd *cmd; 1076 struct scsi_cmnd *cmd = req->special;
1134 int ret = scsi_prep_state_check(sdev, req);
1135
1136 if (ret != BLKPREP_OK)
1137 return ret;
1138
1139 cmd = scsi_get_cmd_from_req(sdev, req);
1140 if (unlikely(!cmd))
1141 return BLKPREP_DEFER;
1142 1077
1143 /* 1078 /*
1144 * BLOCK_PC requests may transfer data, in which case they must 1079 * BLOCK_PC requests may transfer data, in which case they must
@@ -1182,15 +1117,11 @@ EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1182 */ 1117 */
1183int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1118int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1184{ 1119{
1185 struct scsi_cmnd *cmd; 1120 struct scsi_cmnd *cmd = req->special;
1186 int ret = scsi_prep_state_check(sdev, req);
1187
1188 if (ret != BLKPREP_OK)
1189 return ret;
1190 1121
1191 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh 1122 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1192 && sdev->scsi_dh_data->scsi_dh->prep_fn)) { 1123 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1193 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); 1124 int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1194 if (ret != BLKPREP_OK) 1125 if (ret != BLKPREP_OK)
1195 return ret; 1126 return ret;
1196 } 1127 }
@@ -1200,16 +1131,13 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1200 */ 1131 */
1201 BUG_ON(!req->nr_phys_segments); 1132 BUG_ON(!req->nr_phys_segments);
1202 1133
1203 cmd = scsi_get_cmd_from_req(sdev, req);
1204 if (unlikely(!cmd))
1205 return BLKPREP_DEFER;
1206
1207 memset(cmd->cmnd, 0, BLK_MAX_CDB); 1134 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1208 return scsi_init_io(cmd, GFP_ATOMIC); 1135 return scsi_init_io(cmd, GFP_ATOMIC);
1209} 1136}
1210EXPORT_SYMBOL(scsi_setup_fs_cmnd); 1137EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1211 1138
1212int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) 1139static int
1140scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1213{ 1141{
1214 int ret = BLKPREP_OK; 1142 int ret = BLKPREP_OK;
1215 1143
@@ -1261,9 +1189,9 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1261 } 1189 }
1262 return ret; 1190 return ret;
1263} 1191}
1264EXPORT_SYMBOL(scsi_prep_state_check);
1265 1192
1266int scsi_prep_return(struct request_queue *q, struct request *req, int ret) 1193static int
1194scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1267{ 1195{
1268 struct scsi_device *sdev = q->queuedata; 1196 struct scsi_device *sdev = q->queuedata;
1269 1197
@@ -1294,18 +1222,44 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1294 1222
1295 return ret; 1223 return ret;
1296} 1224}
1297EXPORT_SYMBOL(scsi_prep_return);
1298 1225
1299int scsi_prep_fn(struct request_queue *q, struct request *req) 1226static int scsi_prep_fn(struct request_queue *q, struct request *req)
1300{ 1227{
1301 struct scsi_device *sdev = q->queuedata; 1228 struct scsi_device *sdev = q->queuedata;
1302 int ret = BLKPREP_KILL; 1229 struct scsi_cmnd *cmd;
1230 int ret;
1303 1231
1304 if (req->cmd_type == REQ_TYPE_BLOCK_PC) 1232 ret = scsi_prep_state_check(sdev, req);
1233 if (ret != BLKPREP_OK)
1234 goto out;
1235
1236 cmd = scsi_get_cmd_from_req(sdev, req);
1237 if (unlikely(!cmd)) {
1238 ret = BLKPREP_DEFER;
1239 goto out;
1240 }
1241
1242 if (req->cmd_type == REQ_TYPE_FS)
1243 ret = scsi_cmd_to_driver(cmd)->init_command(cmd);
1244 else if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1305 ret = scsi_setup_blk_pc_cmnd(sdev, req); 1245 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1246 else
1247 ret = BLKPREP_KILL;
1248
1249out:
1306 return scsi_prep_return(q, req, ret); 1250 return scsi_prep_return(q, req, ret);
1307} 1251}
1308EXPORT_SYMBOL(scsi_prep_fn); 1252
1253static void scsi_unprep_fn(struct request_queue *q, struct request *req)
1254{
1255 if (req->cmd_type == REQ_TYPE_FS) {
1256 struct scsi_cmnd *cmd = req->special;
1257 struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
1258
1259 if (drv->uninit_command)
1260 drv->uninit_command(cmd);
1261 }
1262}
1309 1263
1310/* 1264/*
1311 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1265 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
@@ -1726,6 +1680,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1726 return NULL; 1680 return NULL;
1727 1681
1728 blk_queue_prep_rq(q, scsi_prep_fn); 1682 blk_queue_prep_rq(q, scsi_prep_fn);
1683 blk_queue_unprep_rq(q, scsi_unprep_fn);
1729 blk_queue_softirq_done(q, scsi_softirq_done); 1684 blk_queue_softirq_done(q, scsi_softirq_done);
1730 blk_queue_rq_timed_out(q, scsi_times_out); 1685 blk_queue_rq_timed_out(q, scsi_times_out);
1731 blk_queue_lld_busy(q, scsi_lld_busy); 1686 blk_queue_lld_busy(q, scsi_lld_busy);