aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-12-07 04:57:19 -0500
committerJeff Garzik <jeff@garzik.org>2006-12-07 04:57:19 -0500
commit8d1413b28033c49c7f1a4d320e815d7a5531acee (patch)
treeb37281abef014cd60803b81c100388d7a475d49e /drivers/scsi/scsi_lib.c
parented25ffa16434724f5ed825aa48734c7f3aefa203 (diff)
parent620034c84d1d939717bdfbe02c51a3fee43541c3 (diff)
Merge branch 'master' into upstream
Conflicts: drivers/net/netxen/netxen_nic.h drivers/net/netxen/netxen_nic_main.c
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c346
1 files changed, 190 insertions, 156 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 3ac4890ce086..fb616c69151f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -704,7 +704,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
704 return NULL; 704 return NULL;
705} 705}
706 706
707static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) 707struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
708{ 708{
709 struct scsi_host_sg_pool *sgp; 709 struct scsi_host_sg_pool *sgp;
710 struct scatterlist *sgl; 710 struct scatterlist *sgl;
@@ -745,7 +745,9 @@ static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_m
745 return sgl; 745 return sgl;
746} 746}
747 747
748static void scsi_free_sgtable(struct scatterlist *sgl, int index) 748EXPORT_SYMBOL(scsi_alloc_sgtable);
749
750void scsi_free_sgtable(struct scatterlist *sgl, int index)
749{ 751{
750 struct scsi_host_sg_pool *sgp; 752 struct scsi_host_sg_pool *sgp;
751 753
@@ -755,6 +757,8 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index)
755 mempool_free(sgl, sgp->pool); 757 mempool_free(sgl, sgp->pool);
756} 758}
757 759
760EXPORT_SYMBOL(scsi_free_sgtable);
761
758/* 762/*
759 * Function: scsi_release_buffers() 763 * Function: scsi_release_buffers()
760 * 764 *
@@ -996,25 +1000,14 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
996 int count; 1000 int count;
997 1001
998 /* 1002 /*
999 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer 1003 * We used to not use scatter-gather for single segment request,
1000 */
1001 if (blk_pc_request(req) && !req->bio) {
1002 cmd->request_bufflen = req->data_len;
1003 cmd->request_buffer = req->data;
1004 req->buffer = req->data;
1005 cmd->use_sg = 0;
1006 return 0;
1007 }
1008
1009 /*
1010 * we used to not use scatter-gather for single segment request,
1011 * but now we do (it makes highmem I/O easier to support without 1004 * but now we do (it makes highmem I/O easier to support without
1012 * kmapping pages) 1005 * kmapping pages)
1013 */ 1006 */
1014 cmd->use_sg = req->nr_phys_segments; 1007 cmd->use_sg = req->nr_phys_segments;
1015 1008
1016 /* 1009 /*
1017 * if sg table allocation fails, requeue request later. 1010 * If sg table allocation fails, requeue request later.
1018 */ 1011 */
1019 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC); 1012 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
1020 if (unlikely(!sgpnt)) { 1013 if (unlikely(!sgpnt)) {
@@ -1022,24 +1015,21 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
1022 return BLKPREP_DEFER; 1015 return BLKPREP_DEFER;
1023 } 1016 }
1024 1017
1018 req->buffer = NULL;
1025 cmd->request_buffer = (char *) sgpnt; 1019 cmd->request_buffer = (char *) sgpnt;
1026 cmd->request_bufflen = req->nr_sectors << 9;
1027 if (blk_pc_request(req)) 1020 if (blk_pc_request(req))
1028 cmd->request_bufflen = req->data_len; 1021 cmd->request_bufflen = req->data_len;
1029 req->buffer = NULL; 1022 else
1023 cmd->request_bufflen = req->nr_sectors << 9;
1030 1024
1031 /* 1025 /*
1032 * Next, walk the list, and fill in the addresses and sizes of 1026 * Next, walk the list, and fill in the addresses and sizes of
1033 * each segment. 1027 * each segment.
1034 */ 1028 */
1035 count = blk_rq_map_sg(req->q, req, cmd->request_buffer); 1029 count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1036
1037 /*
1038 * mapped well, send it off
1039 */
1040 if (likely(count <= cmd->use_sg)) { 1030 if (likely(count <= cmd->use_sg)) {
1041 cmd->use_sg = count; 1031 cmd->use_sg = count;
1042 return 0; 1032 return BLKPREP_OK;
1043 } 1033 }
1044 1034
1045 printk(KERN_ERR "Incorrect number of segments after building list\n"); 1035 printk(KERN_ERR "Incorrect number of segments after building list\n");
@@ -1069,6 +1059,27 @@ static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1069 return -EOPNOTSUPP; 1059 return -EOPNOTSUPP;
1070} 1060}
1071 1061
1062static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1063 struct request *req)
1064{
1065 struct scsi_cmnd *cmd;
1066
1067 if (!req->special) {
1068 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1069 if (unlikely(!cmd))
1070 return NULL;
1071 req->special = cmd;
1072 } else {
1073 cmd = req->special;
1074 }
1075
1076 /* pull a tag out of the request if we have one */
1077 cmd->tag = req->tag;
1078 cmd->request = req;
1079
1080 return cmd;
1081}
1082
1072static void scsi_blk_pc_done(struct scsi_cmnd *cmd) 1083static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1073{ 1084{
1074 BUG_ON(!blk_pc_request(cmd->request)); 1085 BUG_ON(!blk_pc_request(cmd->request));
@@ -1081,9 +1092,37 @@ static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1081 scsi_io_completion(cmd, cmd->request_bufflen); 1092 scsi_io_completion(cmd, cmd->request_bufflen);
1082} 1093}
1083 1094
1084static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) 1095static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1085{ 1096{
1086 struct request *req = cmd->request; 1097 struct scsi_cmnd *cmd;
1098
1099 cmd = scsi_get_cmd_from_req(sdev, req);
1100 if (unlikely(!cmd))
1101 return BLKPREP_DEFER;
1102
1103 /*
1104 * BLOCK_PC requests may transfer data, in which case they must
1105 * a bio attached to them. Or they might contain a SCSI command
1106 * that does not transfer data, in which case they may optionally
1107 * submit a request without an attached bio.
1108 */
1109 if (req->bio) {
1110 int ret;
1111
1112 BUG_ON(!req->nr_phys_segments);
1113
1114 ret = scsi_init_io(cmd);
1115 if (unlikely(ret))
1116 return ret;
1117 } else {
1118 BUG_ON(req->data_len);
1119 BUG_ON(req->data);
1120
1121 cmd->request_bufflen = 0;
1122 cmd->request_buffer = NULL;
1123 cmd->use_sg = 0;
1124 req->buffer = NULL;
1125 }
1087 1126
1088 BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd)); 1127 BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
1089 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); 1128 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
@@ -1099,154 +1138,138 @@ static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
1099 cmd->allowed = req->retries; 1138 cmd->allowed = req->retries;
1100 cmd->timeout_per_command = req->timeout; 1139 cmd->timeout_per_command = req->timeout;
1101 cmd->done = scsi_blk_pc_done; 1140 cmd->done = scsi_blk_pc_done;
1141 return BLKPREP_OK;
1102} 1142}
1103 1143
1104static int scsi_prep_fn(struct request_queue *q, struct request *req) 1144/*
1145 * Setup a REQ_TYPE_FS command. These are simple read/write request
1146 * from filesystems that still need to be translated to SCSI CDBs from
1147 * the ULD.
1148 */
1149static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1105{ 1150{
1106 struct scsi_device *sdev = q->queuedata;
1107 struct scsi_cmnd *cmd; 1151 struct scsi_cmnd *cmd;
1108 int specials_only = 0; 1152 struct scsi_driver *drv;
1153 int ret;
1109 1154
1110 /* 1155 /*
1111 * Just check to see if the device is online. If it isn't, we 1156 * Filesystem requests must transfer data.
1112 * refuse to process any commands. The device must be brought
1113 * online before trying any recovery commands
1114 */ 1157 */
1115 if (unlikely(!scsi_device_online(sdev))) { 1158 BUG_ON(!req->nr_phys_segments);
1116 sdev_printk(KERN_ERR, sdev, 1159
1117 "rejecting I/O to offline device\n"); 1160 cmd = scsi_get_cmd_from_req(sdev, req);
1118 goto kill; 1161 if (unlikely(!cmd))
1119 } 1162 return BLKPREP_DEFER;
1120 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1163
1121 /* OK, we're not in a running state don't prep 1164 ret = scsi_init_io(cmd);
1122 * user commands */ 1165 if (unlikely(ret))
1123 if (sdev->sdev_state == SDEV_DEL) { 1166 return ret;
1124 /* Device is fully deleted, no commands 1167
1125 * at all allowed down */ 1168 /*
1126 sdev_printk(KERN_ERR, sdev, 1169 * Initialize the actual SCSI command for this request.
1127 "rejecting I/O to dead device\n"); 1170 */
1128 goto kill; 1171 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1129 } 1172 if (unlikely(!drv->init_command(cmd))) {
1130 /* OK, we only allow special commands (i.e. not 1173 scsi_release_buffers(cmd);
1131 * user initiated ones */ 1174 scsi_put_command(cmd);
1132 specials_only = sdev->sdev_state; 1175 return BLKPREP_KILL;
1133 } 1176 }
1134 1177
1178 return BLKPREP_OK;
1179}
1180
1181static int scsi_prep_fn(struct request_queue *q, struct request *req)
1182{
1183 struct scsi_device *sdev = q->queuedata;
1184 int ret = BLKPREP_OK;
1185
1135 /* 1186 /*
1136 * Find the actual device driver associated with this command. 1187 * If the device is not in running state we will reject some
1137 * The SPECIAL requests are things like character device or 1188 * or all commands.
1138 * ioctls, which did not originate from ll_rw_blk. Note that
1139 * the special field is also used to indicate the cmd for
1140 * the remainder of a partially fulfilled request that can
1141 * come up when there is a medium error. We have to treat
1142 * these two cases differently. We differentiate by looking
1143 * at request->cmd, as this tells us the real story.
1144 */ 1189 */
1145 if (blk_special_request(req) && req->special) 1190 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1146 cmd = req->special; 1191 switch (sdev->sdev_state) {
1147 else if (blk_pc_request(req) || blk_fs_request(req)) { 1192 case SDEV_OFFLINE:
1148 if (unlikely(specials_only) && !(req->cmd_flags & REQ_PREEMPT)){ 1193 /*
1149 if (specials_only == SDEV_QUIESCE || 1194 * If the device is offline we refuse to process any
1150 specials_only == SDEV_BLOCK) 1195 * commands. The device must be brought online
1151 goto defer; 1196 * before trying any recovery commands.
1152 1197 */
1153 sdev_printk(KERN_ERR, sdev, 1198 sdev_printk(KERN_ERR, sdev,
1154 "rejecting I/O to device being removed\n"); 1199 "rejecting I/O to offline device\n");
1155 goto kill; 1200 ret = BLKPREP_KILL;
1201 break;
1202 case SDEV_DEL:
1203 /*
1204 * If the device is fully deleted, we refuse to
1205 * process any commands as well.
1206 */
1207 sdev_printk(KERN_ERR, sdev,
1208 "rejecting I/O to dead device\n");
1209 ret = BLKPREP_KILL;
1210 break;
1211 case SDEV_QUIESCE:
1212 case SDEV_BLOCK:
1213 /*
1214 * If the devices is blocked we defer normal commands.
1215 */
1216 if (!(req->cmd_flags & REQ_PREEMPT))
1217 ret = BLKPREP_DEFER;
1218 break;
1219 default:
1220 /*
1221 * For any other not fully online state we only allow
1222 * special commands. In particular any user initiated
1223 * command is not allowed.
1224 */
1225 if (!(req->cmd_flags & REQ_PREEMPT))
1226 ret = BLKPREP_KILL;
1227 break;
1156 } 1228 }
1157 1229
1158 /* 1230 if (ret != BLKPREP_OK)
1159 * Now try and find a command block that we can use. 1231 goto out;
1160 */
1161 if (!req->special) {
1162 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1163 if (unlikely(!cmd))
1164 goto defer;
1165 } else
1166 cmd = req->special;
1167
1168 /* pull a tag out of the request if we have one */
1169 cmd->tag = req->tag;
1170 } else {
1171 blk_dump_rq_flags(req, "SCSI bad req");
1172 goto kill;
1173 } 1232 }
1174
1175 /* note the overloading of req->special. When the tag
1176 * is active it always means cmd. If the tag goes
1177 * back for re-queueing, it may be reset */
1178 req->special = cmd;
1179 cmd->request = req;
1180
1181 /*
1182 * FIXME: drop the lock here because the functions below
1183 * expect to be called without the queue lock held. Also,
1184 * previously, we dequeued the request before dropping the
1185 * lock. We hope REQ_STARTED prevents anything untoward from
1186 * happening now.
1187 */
1188 if (blk_fs_request(req) || blk_pc_request(req)) {
1189 int ret;
1190 1233
1234 switch (req->cmd_type) {
1235 case REQ_TYPE_BLOCK_PC:
1236 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1237 break;
1238 case REQ_TYPE_FS:
1239 ret = scsi_setup_fs_cmnd(sdev, req);
1240 break;
1241 default:
1191 /* 1242 /*
1192 * This will do a couple of things: 1243 * All other command types are not supported.
1193 * 1) Fill in the actual SCSI command.
1194 * 2) Fill in any other upper-level specific fields
1195 * (timeout).
1196 * 1244 *
1197 * If this returns 0, it means that the request failed 1245 * Note that these days the SCSI subsystem does not use
1198 * (reading past end of disk, reading offline device, 1246 * REQ_TYPE_SPECIAL requests anymore. These are only used
1199 * etc). This won't actually talk to the device, but 1247 * (directly or via blk_insert_request) by non-SCSI drivers.
1200 * some kinds of consistency checking may cause the
1201 * request to be rejected immediately.
1202 */ 1248 */
1249 blk_dump_rq_flags(req, "SCSI bad req");
1250 ret = BLKPREP_KILL;
1251 break;
1252 }
1203 1253
1204 /* 1254 out:
1205 * This sets up the scatter-gather table (allocating if 1255 switch (ret) {
1206 * required). 1256 case BLKPREP_KILL:
1207 */ 1257 req->errors = DID_NO_CONNECT << 16;
1208 ret = scsi_init_io(cmd); 1258 break;
1209 switch(ret) { 1259 case BLKPREP_DEFER:
1210 /* For BLKPREP_KILL/DEFER the cmd was released */
1211 case BLKPREP_KILL:
1212 goto kill;
1213 case BLKPREP_DEFER:
1214 goto defer;
1215 }
1216
1217 /* 1260 /*
1218 * Initialize the actual SCSI command for this request. 1261 * If we defer, the elv_next_request() returns NULL, but the
1262 * queue must be restarted, so we plug here if no returning
1263 * command will automatically do that.
1219 */ 1264 */
1220 if (blk_pc_request(req)) { 1265 if (sdev->device_busy == 0)
1221 scsi_setup_blk_pc_cmnd(cmd); 1266 blk_plug_device(q);
1222 } else if (req->rq_disk) { 1267 break;
1223 struct scsi_driver *drv; 1268 default:
1224 1269 req->cmd_flags |= REQ_DONTPREP;
1225 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1226 if (unlikely(!drv->init_command(cmd))) {
1227 scsi_release_buffers(cmd);
1228 scsi_put_command(cmd);
1229 goto kill;
1230 }
1231 }
1232 } 1270 }
1233 1271
1234 /* 1272 return ret;
1235 * The request is now prepped, no need to come back here
1236 */
1237 req->cmd_flags |= REQ_DONTPREP;
1238 return BLKPREP_OK;
1239
1240 defer:
1241 /* If we defer, the elv_next_request() returns NULL, but the
1242 * queue must be restarted, so we plug here if no returning
1243 * command will automatically do that. */
1244 if (sdev->device_busy == 0)
1245 blk_plug_device(q);
1246 return BLKPREP_DEFER;
1247 kill:
1248 req->errors = DID_NO_CONNECT << 16;
1249 return BLKPREP_KILL;
1250} 1273}
1251 1274
1252/* 1275/*
@@ -1548,29 +1571,40 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1548} 1571}
1549EXPORT_SYMBOL(scsi_calculate_bounce_limit); 1572EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1550 1573
1551struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 1574struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1575 request_fn_proc *request_fn)
1552{ 1576{
1553 struct Scsi_Host *shost = sdev->host;
1554 struct request_queue *q; 1577 struct request_queue *q;
1555 1578
1556 q = blk_init_queue(scsi_request_fn, NULL); 1579 q = blk_init_queue(request_fn, NULL);
1557 if (!q) 1580 if (!q)
1558 return NULL; 1581 return NULL;
1559 1582
1560 blk_queue_prep_rq(q, scsi_prep_fn);
1561
1562 blk_queue_max_hw_segments(q, shost->sg_tablesize); 1583 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1563 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS); 1584 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1564 blk_queue_max_sectors(q, shost->max_sectors); 1585 blk_queue_max_sectors(q, shost->max_sectors);
1565 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1586 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1566 blk_queue_segment_boundary(q, shost->dma_boundary); 1587 blk_queue_segment_boundary(q, shost->dma_boundary);
1567 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1568 blk_queue_softirq_done(q, scsi_softirq_done);
1569 1588
1570 if (!shost->use_clustering) 1589 if (!shost->use_clustering)
1571 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 1590 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1572 return q; 1591 return q;
1573} 1592}
1593EXPORT_SYMBOL(__scsi_alloc_queue);
1594
1595struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1596{
1597 struct request_queue *q;
1598
1599 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1600 if (!q)
1601 return NULL;
1602
1603 blk_queue_prep_rq(q, scsi_prep_fn);
1604 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1605 blk_queue_softirq_done(q, scsi_softirq_done);
1606 return q;
1607}
1574 1608
1575void scsi_free_queue(struct request_queue *q) 1609void scsi_free_queue(struct request_queue *q)
1576{ 1610{