aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c351
1 files changed, 193 insertions, 158 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d2c02df12fdc..1748e27501cd 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -36,7 +36,7 @@
36struct scsi_host_sg_pool { 36struct scsi_host_sg_pool {
37 size_t size; 37 size_t size;
38 char *name; 38 char *name;
39 kmem_cache_t *slab; 39 struct kmem_cache *slab;
40 mempool_t *pool; 40 mempool_t *pool;
41}; 41};
42 42
@@ -241,7 +241,7 @@ struct scsi_io_context {
241 char sense[SCSI_SENSE_BUFFERSIZE]; 241 char sense[SCSI_SENSE_BUFFERSIZE];
242}; 242};
243 243
244static kmem_cache_t *scsi_io_context_cache; 244static struct kmem_cache *scsi_io_context_cache;
245 245
246static void scsi_end_async(struct request *req, int uptodate) 246static void scsi_end_async(struct request *req, int uptodate)
247{ 247{
@@ -410,6 +410,7 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
410 goto free_req; 410 goto free_req;
411 411
412 req->cmd_len = cmd_len; 412 req->cmd_len = cmd_len;
413 memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
413 memcpy(req->cmd, cmd, req->cmd_len); 414 memcpy(req->cmd, cmd, req->cmd_len);
414 req->sense = sioc->sense; 415 req->sense = sioc->sense;
415 req->sense_len = 0; 416 req->sense_len = 0;
@@ -703,7 +704,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
703 return NULL; 704 return NULL;
704} 705}
705 706
706static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) 707struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
707{ 708{
708 struct scsi_host_sg_pool *sgp; 709 struct scsi_host_sg_pool *sgp;
709 struct scatterlist *sgl; 710 struct scatterlist *sgl;
@@ -744,7 +745,9 @@ static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_m
744 return sgl; 745 return sgl;
745} 746}
746 747
747static void scsi_free_sgtable(struct scatterlist *sgl, int index) 748EXPORT_SYMBOL(scsi_alloc_sgtable);
749
750void scsi_free_sgtable(struct scatterlist *sgl, int index)
748{ 751{
749 struct scsi_host_sg_pool *sgp; 752 struct scsi_host_sg_pool *sgp;
750 753
@@ -754,6 +757,8 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index)
754 mempool_free(sgl, sgp->pool); 757 mempool_free(sgl, sgp->pool);
755} 758}
756 759
760EXPORT_SYMBOL(scsi_free_sgtable);
761
757/* 762/*
758 * Function: scsi_release_buffers() 763 * Function: scsi_release_buffers()
759 * 764 *
@@ -995,25 +1000,14 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
995 int count; 1000 int count;
996 1001
997 /* 1002 /*
998 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer 1003 * We used to not use scatter-gather for single segment request,
999 */
1000 if (blk_pc_request(req) && !req->bio) {
1001 cmd->request_bufflen = req->data_len;
1002 cmd->request_buffer = req->data;
1003 req->buffer = req->data;
1004 cmd->use_sg = 0;
1005 return 0;
1006 }
1007
1008 /*
1009 * we used to not use scatter-gather for single segment request,
1010 * but now we do (it makes highmem I/O easier to support without 1004 * but now we do (it makes highmem I/O easier to support without
1011 * kmapping pages) 1005 * kmapping pages)
1012 */ 1006 */
1013 cmd->use_sg = req->nr_phys_segments; 1007 cmd->use_sg = req->nr_phys_segments;
1014 1008
1015 /* 1009 /*
1016 * if sg table allocation fails, requeue request later. 1010 * If sg table allocation fails, requeue request later.
1017 */ 1011 */
1018 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC); 1012 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
1019 if (unlikely(!sgpnt)) { 1013 if (unlikely(!sgpnt)) {
@@ -1021,24 +1015,21 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
1021 return BLKPREP_DEFER; 1015 return BLKPREP_DEFER;
1022 } 1016 }
1023 1017
1018 req->buffer = NULL;
1024 cmd->request_buffer = (char *) sgpnt; 1019 cmd->request_buffer = (char *) sgpnt;
1025 cmd->request_bufflen = req->nr_sectors << 9;
1026 if (blk_pc_request(req)) 1020 if (blk_pc_request(req))
1027 cmd->request_bufflen = req->data_len; 1021 cmd->request_bufflen = req->data_len;
1028 req->buffer = NULL; 1022 else
1023 cmd->request_bufflen = req->nr_sectors << 9;
1029 1024
1030 /* 1025 /*
1031 * Next, walk the list, and fill in the addresses and sizes of 1026 * Next, walk the list, and fill in the addresses and sizes of
1032 * each segment. 1027 * each segment.
1033 */ 1028 */
1034 count = blk_rq_map_sg(req->q, req, cmd->request_buffer); 1029 count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1035
1036 /*
1037 * mapped well, send it off
1038 */
1039 if (likely(count <= cmd->use_sg)) { 1030 if (likely(count <= cmd->use_sg)) {
1040 cmd->use_sg = count; 1031 cmd->use_sg = count;
1041 return 0; 1032 return BLKPREP_OK;
1042 } 1033 }
1043 1034
1044 printk(KERN_ERR "Incorrect number of segments after building list\n"); 1035 printk(KERN_ERR "Incorrect number of segments after building list\n");
@@ -1068,6 +1059,27 @@ static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1068 return -EOPNOTSUPP; 1059 return -EOPNOTSUPP;
1069} 1060}
1070 1061
1062static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1063 struct request *req)
1064{
1065 struct scsi_cmnd *cmd;
1066
1067 if (!req->special) {
1068 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1069 if (unlikely(!cmd))
1070 return NULL;
1071 req->special = cmd;
1072 } else {
1073 cmd = req->special;
1074 }
1075
1076 /* pull a tag out of the request if we have one */
1077 cmd->tag = req->tag;
1078 cmd->request = req;
1079
1080 return cmd;
1081}
1082
1071static void scsi_blk_pc_done(struct scsi_cmnd *cmd) 1083static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1072{ 1084{
1073 BUG_ON(!blk_pc_request(cmd->request)); 1085 BUG_ON(!blk_pc_request(cmd->request));
@@ -1080,9 +1092,37 @@ static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1080 scsi_io_completion(cmd, cmd->request_bufflen); 1092 scsi_io_completion(cmd, cmd->request_bufflen);
1081} 1093}
1082 1094
1083static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) 1095static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1084{ 1096{
1085 struct request *req = cmd->request; 1097 struct scsi_cmnd *cmd;
1098
1099 cmd = scsi_get_cmd_from_req(sdev, req);
1100 if (unlikely(!cmd))
1101 return BLKPREP_DEFER;
1102
1103 /*
1104 * BLOCK_PC requests may transfer data, in which case they must
1105 * a bio attached to them. Or they might contain a SCSI command
1106 * that does not transfer data, in which case they may optionally
1107 * submit a request without an attached bio.
1108 */
1109 if (req->bio) {
1110 int ret;
1111
1112 BUG_ON(!req->nr_phys_segments);
1113
1114 ret = scsi_init_io(cmd);
1115 if (unlikely(ret))
1116 return ret;
1117 } else {
1118 BUG_ON(req->data_len);
1119 BUG_ON(req->data);
1120
1121 cmd->request_bufflen = 0;
1122 cmd->request_buffer = NULL;
1123 cmd->use_sg = 0;
1124 req->buffer = NULL;
1125 }
1086 1126
1087 BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd)); 1127 BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
1088 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); 1128 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
@@ -1098,154 +1138,138 @@ static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
1098 cmd->allowed = req->retries; 1138 cmd->allowed = req->retries;
1099 cmd->timeout_per_command = req->timeout; 1139 cmd->timeout_per_command = req->timeout;
1100 cmd->done = scsi_blk_pc_done; 1140 cmd->done = scsi_blk_pc_done;
1141 return BLKPREP_OK;
1101} 1142}
1102 1143
1103static int scsi_prep_fn(struct request_queue *q, struct request *req) 1144/*
1145 * Setup a REQ_TYPE_FS command. These are simple read/write request
1146 * from filesystems that still need to be translated to SCSI CDBs from
1147 * the ULD.
1148 */
1149static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1104{ 1150{
1105 struct scsi_device *sdev = q->queuedata;
1106 struct scsi_cmnd *cmd; 1151 struct scsi_cmnd *cmd;
1107 int specials_only = 0; 1152 struct scsi_driver *drv;
1153 int ret;
1108 1154
1109 /* 1155 /*
1110 * Just check to see if the device is online. If it isn't, we 1156 * Filesystem requests must transfer data.
1111 * refuse to process any commands. The device must be brought
1112 * online before trying any recovery commands
1113 */ 1157 */
1114 if (unlikely(!scsi_device_online(sdev))) { 1158 BUG_ON(!req->nr_phys_segments);
1115 sdev_printk(KERN_ERR, sdev, 1159
1116 "rejecting I/O to offline device\n"); 1160 cmd = scsi_get_cmd_from_req(sdev, req);
1117 goto kill; 1161 if (unlikely(!cmd))
1118 } 1162 return BLKPREP_DEFER;
1119 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1163
1120 /* OK, we're not in a running state don't prep 1164 ret = scsi_init_io(cmd);
1121 * user commands */ 1165 if (unlikely(ret))
1122 if (sdev->sdev_state == SDEV_DEL) { 1166 return ret;
1123 /* Device is fully deleted, no commands 1167
1124 * at all allowed down */ 1168 /*
1125 sdev_printk(KERN_ERR, sdev, 1169 * Initialize the actual SCSI command for this request.
1126 "rejecting I/O to dead device\n"); 1170 */
1127 goto kill; 1171 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1128 } 1172 if (unlikely(!drv->init_command(cmd))) {
1129 /* OK, we only allow special commands (i.e. not 1173 scsi_release_buffers(cmd);
1130 * user initiated ones */ 1174 scsi_put_command(cmd);
1131 specials_only = sdev->sdev_state; 1175 return BLKPREP_KILL;
1132 } 1176 }
1133 1177
1178 return BLKPREP_OK;
1179}
1180
1181static int scsi_prep_fn(struct request_queue *q, struct request *req)
1182{
1183 struct scsi_device *sdev = q->queuedata;
1184 int ret = BLKPREP_OK;
1185
1134 /* 1186 /*
1135 * Find the actual device driver associated with this command. 1187 * If the device is not in running state we will reject some
1136 * The SPECIAL requests are things like character device or 1188 * or all commands.
1137 * ioctls, which did not originate from ll_rw_blk. Note that
1138 * the special field is also used to indicate the cmd for
1139 * the remainder of a partially fulfilled request that can
1140 * come up when there is a medium error. We have to treat
1141 * these two cases differently. We differentiate by looking
1142 * at request->cmd, as this tells us the real story.
1143 */ 1189 */
1144 if (blk_special_request(req) && req->special) 1190 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1145 cmd = req->special; 1191 switch (sdev->sdev_state) {
1146 else if (blk_pc_request(req) || blk_fs_request(req)) { 1192 case SDEV_OFFLINE:
1147 if (unlikely(specials_only) && !(req->cmd_flags & REQ_PREEMPT)){ 1193 /*
1148 if (specials_only == SDEV_QUIESCE || 1194 * If the device is offline we refuse to process any
1149 specials_only == SDEV_BLOCK) 1195 * commands. The device must be brought online
1150 goto defer; 1196 * before trying any recovery commands.
1151 1197 */
1152 sdev_printk(KERN_ERR, sdev, 1198 sdev_printk(KERN_ERR, sdev,
1153 "rejecting I/O to device being removed\n"); 1199 "rejecting I/O to offline device\n");
1154 goto kill; 1200 ret = BLKPREP_KILL;
1201 break;
1202 case SDEV_DEL:
1203 /*
1204 * If the device is fully deleted, we refuse to
1205 * process any commands as well.
1206 */
1207 sdev_printk(KERN_ERR, sdev,
1208 "rejecting I/O to dead device\n");
1209 ret = BLKPREP_KILL;
1210 break;
1211 case SDEV_QUIESCE:
1212 case SDEV_BLOCK:
1213 /*
1214 * If the devices is blocked we defer normal commands.
1215 */
1216 if (!(req->cmd_flags & REQ_PREEMPT))
1217 ret = BLKPREP_DEFER;
1218 break;
1219 default:
1220 /*
1221 * For any other not fully online state we only allow
1222 * special commands. In particular any user initiated
1223 * command is not allowed.
1224 */
1225 if (!(req->cmd_flags & REQ_PREEMPT))
1226 ret = BLKPREP_KILL;
1227 break;
1155 } 1228 }
1156 1229
1157 /* 1230 if (ret != BLKPREP_OK)
1158 * Now try and find a command block that we can use. 1231 goto out;
1159 */
1160 if (!req->special) {
1161 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1162 if (unlikely(!cmd))
1163 goto defer;
1164 } else
1165 cmd = req->special;
1166
1167 /* pull a tag out of the request if we have one */
1168 cmd->tag = req->tag;
1169 } else {
1170 blk_dump_rq_flags(req, "SCSI bad req");
1171 goto kill;
1172 } 1232 }
1173
1174 /* note the overloading of req->special. When the tag
1175 * is active it always means cmd. If the tag goes
1176 * back for re-queueing, it may be reset */
1177 req->special = cmd;
1178 cmd->request = req;
1179
1180 /*
1181 * FIXME: drop the lock here because the functions below
1182 * expect to be called without the queue lock held. Also,
1183 * previously, we dequeued the request before dropping the
1184 * lock. We hope REQ_STARTED prevents anything untoward from
1185 * happening now.
1186 */
1187 if (blk_fs_request(req) || blk_pc_request(req)) {
1188 int ret;
1189 1233
1234 switch (req->cmd_type) {
1235 case REQ_TYPE_BLOCK_PC:
1236 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1237 break;
1238 case REQ_TYPE_FS:
1239 ret = scsi_setup_fs_cmnd(sdev, req);
1240 break;
1241 default:
1190 /* 1242 /*
1191 * This will do a couple of things: 1243 * All other command types are not supported.
1192 * 1) Fill in the actual SCSI command.
1193 * 2) Fill in any other upper-level specific fields
1194 * (timeout).
1195 * 1244 *
1196 * If this returns 0, it means that the request failed 1245 * Note that these days the SCSI subsystem does not use
1197 * (reading past end of disk, reading offline device, 1246 * REQ_TYPE_SPECIAL requests anymore. These are only used
1198 * etc). This won't actually talk to the device, but 1247 * (directly or via blk_insert_request) by non-SCSI drivers.
1199 * some kinds of consistency checking may cause the
1200 * request to be rejected immediately.
1201 */ 1248 */
1249 blk_dump_rq_flags(req, "SCSI bad req");
1250 ret = BLKPREP_KILL;
1251 break;
1252 }
1202 1253
1203 /* 1254 out:
1204 * This sets up the scatter-gather table (allocating if 1255 switch (ret) {
1205 * required). 1256 case BLKPREP_KILL:
1206 */ 1257 req->errors = DID_NO_CONNECT << 16;
1207 ret = scsi_init_io(cmd); 1258 break;
1208 switch(ret) { 1259 case BLKPREP_DEFER:
1209 /* For BLKPREP_KILL/DEFER the cmd was released */
1210 case BLKPREP_KILL:
1211 goto kill;
1212 case BLKPREP_DEFER:
1213 goto defer;
1214 }
1215
1216 /* 1260 /*
1217 * Initialize the actual SCSI command for this request. 1261 * If we defer, the elv_next_request() returns NULL, but the
1262 * queue must be restarted, so we plug here if no returning
1263 * command will automatically do that.
1218 */ 1264 */
1219 if (blk_pc_request(req)) { 1265 if (sdev->device_busy == 0)
1220 scsi_setup_blk_pc_cmnd(cmd); 1266 blk_plug_device(q);
1221 } else if (req->rq_disk) { 1267 break;
1222 struct scsi_driver *drv; 1268 default:
1223 1269 req->cmd_flags |= REQ_DONTPREP;
1224 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1225 if (unlikely(!drv->init_command(cmd))) {
1226 scsi_release_buffers(cmd);
1227 scsi_put_command(cmd);
1228 goto kill;
1229 }
1230 }
1231 } 1270 }
1232 1271
1233 /* 1272 return ret;
1234 * The request is now prepped, no need to come back here
1235 */
1236 req->cmd_flags |= REQ_DONTPREP;
1237 return BLKPREP_OK;
1238
1239 defer:
1240 /* If we defer, the elv_next_request() returns NULL, but the
1241 * queue must be restarted, so we plug here if no returning
1242 * command will automatically do that. */
1243 if (sdev->device_busy == 0)
1244 blk_plug_device(q);
1245 return BLKPREP_DEFER;
1246 kill:
1247 req->errors = DID_NO_CONNECT << 16;
1248 return BLKPREP_KILL;
1249} 1273}
1250 1274
1251/* 1275/*
@@ -1547,29 +1571,40 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1547} 1571}
1548EXPORT_SYMBOL(scsi_calculate_bounce_limit); 1572EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1549 1573
1550struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 1574struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1575 request_fn_proc *request_fn)
1551{ 1576{
1552 struct Scsi_Host *shost = sdev->host;
1553 struct request_queue *q; 1577 struct request_queue *q;
1554 1578
1555 q = blk_init_queue(scsi_request_fn, NULL); 1579 q = blk_init_queue(request_fn, NULL);
1556 if (!q) 1580 if (!q)
1557 return NULL; 1581 return NULL;
1558 1582
1559 blk_queue_prep_rq(q, scsi_prep_fn);
1560
1561 blk_queue_max_hw_segments(q, shost->sg_tablesize); 1583 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1562 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS); 1584 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1563 blk_queue_max_sectors(q, shost->max_sectors); 1585 blk_queue_max_sectors(q, shost->max_sectors);
1564 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1586 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1565 blk_queue_segment_boundary(q, shost->dma_boundary); 1587 blk_queue_segment_boundary(q, shost->dma_boundary);
1566 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1567 blk_queue_softirq_done(q, scsi_softirq_done);
1568 1588
1569 if (!shost->use_clustering) 1589 if (!shost->use_clustering)
1570 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 1590 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1571 return q; 1591 return q;
1572} 1592}
1593EXPORT_SYMBOL(__scsi_alloc_queue);
1594
1595struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1596{
1597 struct request_queue *q;
1598
1599 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1600 if (!q)
1601 return NULL;
1602
1603 blk_queue_prep_rq(q, scsi_prep_fn);
1604 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1605 blk_queue_softirq_done(q, scsi_softirq_done);
1606 return q;
1607}
1573 1608
1574void scsi_free_queue(struct request_queue *q) 1609void scsi_free_queue(struct request_queue *q)
1575{ 1610{