aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2006-11-04 14:10:55 -0500
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-11-15 15:14:20 -0500
commit3b00315799d78f76531b71435fbc2643cd71ae4c (patch)
tree82f4041632d24e90c18482ffffa6511207978636 /drivers
parent2dc611de5a3fd955cd0298c50691d4c05046db97 (diff)
[SCSI] untangle scsi_prep_fn
I wanted to add some BUG checks to scsi_prep_fn to make sure no one sends us a non-sg command, but this function is a horrible mess. So I decided to detangle the function and document what the valid cases are. While doing that I found that REQ_TYPE_SPECIAL commands aren't used by the SCSI layer anymore and we can get rid of the code handling them. The new structure of scsi_prep_fn is: (1) check if we're allowed to send this command (2) big switch on cmd_type. For the two valid types call into a function to set the command up, else error (3) code to handle error cases Because FS and BLOCK_PC commands are handled entirely separate after the patch this introduces a tiny amount of code duplication. This improves readabiulity though and will help to avoid the bidi command overhead for FS commands so it's a good thing. I've tested this on both sata and mptsas. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/scsi_lib.c313
1 files changed, 166 insertions, 147 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 743f67ed7640..ee35a62bb7a2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -995,25 +995,14 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
995 int count; 995 int count;
996 996
997 /* 997 /*
998 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer 998 * We used to not use scatter-gather for single segment request,
999 */
1000 if (blk_pc_request(req) && !req->bio) {
1001 cmd->request_bufflen = req->data_len;
1002 cmd->request_buffer = req->data;
1003 req->buffer = req->data;
1004 cmd->use_sg = 0;
1005 return 0;
1006 }
1007
1008 /*
1009 * we used to not use scatter-gather for single segment request,
1010 * but now we do (it makes highmem I/O easier to support without 999 * but now we do (it makes highmem I/O easier to support without
1011 * kmapping pages) 1000 * kmapping pages)
1012 */ 1001 */
1013 cmd->use_sg = req->nr_phys_segments; 1002 cmd->use_sg = req->nr_phys_segments;
1014 1003
1015 /* 1004 /*
1016 * if sg table allocation fails, requeue request later. 1005 * If sg table allocation fails, requeue request later.
1017 */ 1006 */
1018 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC); 1007 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
1019 if (unlikely(!sgpnt)) { 1008 if (unlikely(!sgpnt)) {
@@ -1021,24 +1010,21 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
1021 return BLKPREP_DEFER; 1010 return BLKPREP_DEFER;
1022 } 1011 }
1023 1012
1013 req->buffer = NULL;
1024 cmd->request_buffer = (char *) sgpnt; 1014 cmd->request_buffer = (char *) sgpnt;
1025 cmd->request_bufflen = req->nr_sectors << 9;
1026 if (blk_pc_request(req)) 1015 if (blk_pc_request(req))
1027 cmd->request_bufflen = req->data_len; 1016 cmd->request_bufflen = req->data_len;
1028 req->buffer = NULL; 1017 else
1018 cmd->request_bufflen = req->nr_sectors << 9;
1029 1019
1030 /* 1020 /*
1031 * Next, walk the list, and fill in the addresses and sizes of 1021 * Next, walk the list, and fill in the addresses and sizes of
1032 * each segment. 1022 * each segment.
1033 */ 1023 */
1034 count = blk_rq_map_sg(req->q, req, cmd->request_buffer); 1024 count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1035
1036 /*
1037 * mapped well, send it off
1038 */
1039 if (likely(count <= cmd->use_sg)) { 1025 if (likely(count <= cmd->use_sg)) {
1040 cmd->use_sg = count; 1026 cmd->use_sg = count;
1041 return 0; 1027 return BLKPREP_OK;
1042 } 1028 }
1043 1029
1044 printk(KERN_ERR "Incorrect number of segments after building list\n"); 1030 printk(KERN_ERR "Incorrect number of segments after building list\n");
@@ -1068,6 +1054,27 @@ static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1068 return -EOPNOTSUPP; 1054 return -EOPNOTSUPP;
1069} 1055}
1070 1056
1057static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1058 struct request *req)
1059{
1060 struct scsi_cmnd *cmd;
1061
1062 if (!req->special) {
1063 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1064 if (unlikely(!cmd))
1065 return NULL;
1066 req->special = cmd;
1067 } else {
1068 cmd = req->special;
1069 }
1070
1071 /* pull a tag out of the request if we have one */
1072 cmd->tag = req->tag;
1073 cmd->request = req;
1074
1075 return cmd;
1076}
1077
1071static void scsi_blk_pc_done(struct scsi_cmnd *cmd) 1078static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1072{ 1079{
1073 BUG_ON(!blk_pc_request(cmd->request)); 1080 BUG_ON(!blk_pc_request(cmd->request));
@@ -1080,9 +1087,37 @@ static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1080 scsi_io_completion(cmd, cmd->request_bufflen); 1087 scsi_io_completion(cmd, cmd->request_bufflen);
1081} 1088}
1082 1089
1083static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) 1090static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1084{ 1091{
1085 struct request *req = cmd->request; 1092 struct scsi_cmnd *cmd;
1093
1094 cmd = scsi_get_cmd_from_req(sdev, req);
1095 if (unlikely(!cmd))
1096 return BLKPREP_DEFER;
1097
1098 /*
1099 * BLOCK_PC requests may transfer data, in which case they must
1100 * a bio attached to them. Or they might contain a SCSI command
1101 * that does not transfer data, in which case they may optionally
1102 * submit a request without an attached bio.
1103 */
1104 if (req->bio) {
1105 int ret;
1106
1107 BUG_ON(!req->nr_phys_segments);
1108
1109 ret = scsi_init_io(cmd);
1110 if (unlikely(ret))
1111 return ret;
1112 } else {
1113 BUG_ON(req->data_len);
1114 BUG_ON(req->data);
1115
1116 cmd->request_bufflen = 0;
1117 cmd->request_buffer = NULL;
1118 cmd->use_sg = 0;
1119 req->buffer = NULL;
1120 }
1086 1121
1087 BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd)); 1122 BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
1088 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); 1123 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
@@ -1098,154 +1133,138 @@ static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
1098 cmd->allowed = req->retries; 1133 cmd->allowed = req->retries;
1099 cmd->timeout_per_command = req->timeout; 1134 cmd->timeout_per_command = req->timeout;
1100 cmd->done = scsi_blk_pc_done; 1135 cmd->done = scsi_blk_pc_done;
1136 return BLKPREP_OK;
1101} 1137}
1102 1138
1103static int scsi_prep_fn(struct request_queue *q, struct request *req) 1139/*
1140 * Setup a REQ_TYPE_FS command. These are simple read/write request
1141 * from filesystems that still need to be translated to SCSI CDBs from
1142 * the ULD.
1143 */
1144static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1104{ 1145{
1105 struct scsi_device *sdev = q->queuedata;
1106 struct scsi_cmnd *cmd; 1146 struct scsi_cmnd *cmd;
1107 int specials_only = 0; 1147 struct scsi_driver *drv;
1148 int ret;
1108 1149
1109 /* 1150 /*
1110 * Just check to see if the device is online. If it isn't, we 1151 * Filesystem requests must transfer data.
1111 * refuse to process any commands. The device must be brought
1112 * online before trying any recovery commands
1113 */ 1152 */
1114 if (unlikely(!scsi_device_online(sdev))) { 1153 BUG_ON(!req->nr_phys_segments);
1115 sdev_printk(KERN_ERR, sdev, 1154
1116 "rejecting I/O to offline device\n"); 1155 cmd = scsi_get_cmd_from_req(sdev, req);
1117 goto kill; 1156 if (unlikely(!cmd))
1118 } 1157 return BLKPREP_DEFER;
1119 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1158
1120 /* OK, we're not in a running state don't prep 1159 ret = scsi_init_io(cmd);
1121 * user commands */ 1160 if (unlikely(ret))
1122 if (sdev->sdev_state == SDEV_DEL) { 1161 return ret;
1123 /* Device is fully deleted, no commands 1162
1124 * at all allowed down */ 1163 /*
1125 sdev_printk(KERN_ERR, sdev, 1164 * Initialize the actual SCSI command for this request.
1126 "rejecting I/O to dead device\n"); 1165 */
1127 goto kill; 1166 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1128 } 1167 if (unlikely(!drv->init_command(cmd))) {
1129 /* OK, we only allow special commands (i.e. not 1168 scsi_release_buffers(cmd);
1130 * user initiated ones */ 1169 scsi_put_command(cmd);
1131 specials_only = sdev->sdev_state; 1170 return BLKPREP_KILL;
1132 } 1171 }
1133 1172
1173 return BLKPREP_OK;
1174}
1175
1176static int scsi_prep_fn(struct request_queue *q, struct request *req)
1177{
1178 struct scsi_device *sdev = q->queuedata;
1179 int ret = BLKPREP_OK;
1180
1134 /* 1181 /*
1135 * Find the actual device driver associated with this command. 1182 * If the device is not in running state we will reject some
1136 * The SPECIAL requests are things like character device or 1183 * or all commands.
1137 * ioctls, which did not originate from ll_rw_blk. Note that
1138 * the special field is also used to indicate the cmd for
1139 * the remainder of a partially fulfilled request that can
1140 * come up when there is a medium error. We have to treat
1141 * these two cases differently. We differentiate by looking
1142 * at request->cmd, as this tells us the real story.
1143 */ 1184 */
1144 if (blk_special_request(req) && req->special) 1185 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1145 cmd = req->special; 1186 switch (sdev->sdev_state) {
1146 else if (blk_pc_request(req) || blk_fs_request(req)) { 1187 case SDEV_OFFLINE:
1147 if (unlikely(specials_only) && !(req->cmd_flags & REQ_PREEMPT)){ 1188 /*
1148 if (specials_only == SDEV_QUIESCE || 1189 * If the device is offline we refuse to process any
1149 specials_only == SDEV_BLOCK) 1190 * commands. The device must be brought online
1150 goto defer; 1191 * before trying any recovery commands.
1151 1192 */
1152 sdev_printk(KERN_ERR, sdev, 1193 sdev_printk(KERN_ERR, sdev,
1153 "rejecting I/O to device being removed\n"); 1194 "rejecting I/O to offline device\n");
1154 goto kill; 1195 ret = BLKPREP_KILL;
1196 break;
1197 case SDEV_DEL:
1198 /*
1199 * If the device is fully deleted, we refuse to
1200 * process any commands as well.
1201 */
1202 sdev_printk(KERN_ERR, sdev,
1203 "rejecting I/O to dead device\n");
1204 ret = BLKPREP_KILL;
1205 break;
1206 case SDEV_QUIESCE:
1207 case SDEV_BLOCK:
1208 /*
1209 * If the devices is blocked we defer normal commands.
1210 */
1211 if (!(req->cmd_flags & REQ_PREEMPT))
1212 ret = BLKPREP_DEFER;
1213 break;
1214 default:
1215 /*
1216 * For any other not fully online state we only allow
1217 * special commands. In particular any user initiated
1218 * command is not allowed.
1219 */
1220 if (!(req->cmd_flags & REQ_PREEMPT))
1221 ret = BLKPREP_KILL;
1222 break;
1155 } 1223 }
1156 1224
1157 /* 1225 if (ret != BLKPREP_OK)
1158 * Now try and find a command block that we can use. 1226 goto out;
1159 */
1160 if (!req->special) {
1161 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1162 if (unlikely(!cmd))
1163 goto defer;
1164 } else
1165 cmd = req->special;
1166
1167 /* pull a tag out of the request if we have one */
1168 cmd->tag = req->tag;
1169 } else {
1170 blk_dump_rq_flags(req, "SCSI bad req");
1171 goto kill;
1172 } 1227 }
1173
1174 /* note the overloading of req->special. When the tag
1175 * is active it always means cmd. If the tag goes
1176 * back for re-queueing, it may be reset */
1177 req->special = cmd;
1178 cmd->request = req;
1179
1180 /*
1181 * FIXME: drop the lock here because the functions below
1182 * expect to be called without the queue lock held. Also,
1183 * previously, we dequeued the request before dropping the
1184 * lock. We hope REQ_STARTED prevents anything untoward from
1185 * happening now.
1186 */
1187 if (blk_fs_request(req) || blk_pc_request(req)) {
1188 int ret;
1189 1228
1229 switch (req->cmd_type) {
1230 case REQ_TYPE_BLOCK_PC:
1231 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1232 break;
1233 case REQ_TYPE_FS:
1234 ret = scsi_setup_fs_cmnd(sdev, req);
1235 break;
1236 default:
1190 /* 1237 /*
1191 * This will do a couple of things: 1238 * All other command types are not supported.
1192 * 1) Fill in the actual SCSI command.
1193 * 2) Fill in any other upper-level specific fields
1194 * (timeout).
1195 * 1239 *
1196 * If this returns 0, it means that the request failed 1240 * Note that these days the SCSI subsystem does not use
1197 * (reading past end of disk, reading offline device, 1241 * REQ_TYPE_SPECIAL requests anymore. These are only used
1198 * etc). This won't actually talk to the device, but 1242 * (directly or via blk_insert_request) by non-SCSI drivers.
1199 * some kinds of consistency checking may cause the
1200 * request to be rejected immediately.
1201 */ 1243 */
1244 blk_dump_rq_flags(req, "SCSI bad req");
1245 ret = BLKPREP_KILL;
1246 break;
1247 }
1202 1248
1203 /* 1249 out:
1204 * This sets up the scatter-gather table (allocating if 1250 switch (ret) {
1205 * required). 1251 case BLKPREP_KILL:
1206 */ 1252 req->errors = DID_NO_CONNECT << 16;
1207 ret = scsi_init_io(cmd); 1253 break;
1208 switch(ret) { 1254 case BLKPREP_DEFER:
1209 /* For BLKPREP_KILL/DEFER the cmd was released */
1210 case BLKPREP_KILL:
1211 goto kill;
1212 case BLKPREP_DEFER:
1213 goto defer;
1214 }
1215
1216 /* 1255 /*
1217 * Initialize the actual SCSI command for this request. 1256 * If we defer, the elv_next_request() returns NULL, but the
1257 * queue must be restarted, so we plug here if no returning
1258 * command will automatically do that.
1218 */ 1259 */
1219 if (blk_pc_request(req)) { 1260 if (sdev->device_busy == 0)
1220 scsi_setup_blk_pc_cmnd(cmd); 1261 blk_plug_device(q);
1221 } else if (req->rq_disk) { 1262 break;
1222 struct scsi_driver *drv; 1263 default:
1223 1264 req->cmd_flags |= REQ_DONTPREP;
1224 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1225 if (unlikely(!drv->init_command(cmd))) {
1226 scsi_release_buffers(cmd);
1227 scsi_put_command(cmd);
1228 goto kill;
1229 }
1230 }
1231 } 1265 }
1232 1266
1233 /* 1267 return ret;
1234 * The request is now prepped, no need to come back here
1235 */
1236 req->cmd_flags |= REQ_DONTPREP;
1237 return BLKPREP_OK;
1238
1239 defer:
1240 /* If we defer, the elv_next_request() returns NULL, but the
1241 * queue must be restarted, so we plug here if no returning
1242 * command will automatically do that. */
1243 if (sdev->device_busy == 0)
1244 blk_plug_device(q);
1245 return BLKPREP_DEFER;
1246 kill:
1247 req->errors = DID_NO_CONNECT << 16;
1248 return BLKPREP_KILL;
1249} 1268}
1250 1269
1251/* 1270/*