aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c313
1 files changed, 166 insertions, 147 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 3ac4890ce08..2f12f9f12fc 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -996,25 +996,14 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
996 int count; 996 int count;
997 997
998 /* 998 /*
999 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer 999 * We used to not use scatter-gather for single segment request,
1000 */
1001 if (blk_pc_request(req) && !req->bio) {
1002 cmd->request_bufflen = req->data_len;
1003 cmd->request_buffer = req->data;
1004 req->buffer = req->data;
1005 cmd->use_sg = 0;
1006 return 0;
1007 }
1008
1009 /*
1010 * we used to not use scatter-gather for single segment request,
1011 * but now we do (it makes highmem I/O easier to support without 1000 * but now we do (it makes highmem I/O easier to support without
1012 * kmapping pages) 1001 * kmapping pages)
1013 */ 1002 */
1014 cmd->use_sg = req->nr_phys_segments; 1003 cmd->use_sg = req->nr_phys_segments;
1015 1004
1016 /* 1005 /*
1017 * if sg table allocation fails, requeue request later. 1006 * If sg table allocation fails, requeue request later.
1018 */ 1007 */
1019 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC); 1008 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
1020 if (unlikely(!sgpnt)) { 1009 if (unlikely(!sgpnt)) {
@@ -1022,24 +1011,21 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
1022 return BLKPREP_DEFER; 1011 return BLKPREP_DEFER;
1023 } 1012 }
1024 1013
1014 req->buffer = NULL;
1025 cmd->request_buffer = (char *) sgpnt; 1015 cmd->request_buffer = (char *) sgpnt;
1026 cmd->request_bufflen = req->nr_sectors << 9;
1027 if (blk_pc_request(req)) 1016 if (blk_pc_request(req))
1028 cmd->request_bufflen = req->data_len; 1017 cmd->request_bufflen = req->data_len;
1029 req->buffer = NULL; 1018 else
1019 cmd->request_bufflen = req->nr_sectors << 9;
1030 1020
1031 /* 1021 /*
1032 * Next, walk the list, and fill in the addresses and sizes of 1022 * Next, walk the list, and fill in the addresses and sizes of
1033 * each segment. 1023 * each segment.
1034 */ 1024 */
1035 count = blk_rq_map_sg(req->q, req, cmd->request_buffer); 1025 count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1036
1037 /*
1038 * mapped well, send it off
1039 */
1040 if (likely(count <= cmd->use_sg)) { 1026 if (likely(count <= cmd->use_sg)) {
1041 cmd->use_sg = count; 1027 cmd->use_sg = count;
1042 return 0; 1028 return BLKPREP_OK;
1043 } 1029 }
1044 1030
1045 printk(KERN_ERR "Incorrect number of segments after building list\n"); 1031 printk(KERN_ERR "Incorrect number of segments after building list\n");
@@ -1069,6 +1055,27 @@ static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1069 return -EOPNOTSUPP; 1055 return -EOPNOTSUPP;
1070} 1056}
1071 1057
1058static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1059 struct request *req)
1060{
1061 struct scsi_cmnd *cmd;
1062
1063 if (!req->special) {
1064 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1065 if (unlikely(!cmd))
1066 return NULL;
1067 req->special = cmd;
1068 } else {
1069 cmd = req->special;
1070 }
1071
1072 /* pull a tag out of the request if we have one */
1073 cmd->tag = req->tag;
1074 cmd->request = req;
1075
1076 return cmd;
1077}
1078
1072static void scsi_blk_pc_done(struct scsi_cmnd *cmd) 1079static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1073{ 1080{
1074 BUG_ON(!blk_pc_request(cmd->request)); 1081 BUG_ON(!blk_pc_request(cmd->request));
@@ -1081,9 +1088,37 @@ static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1081 scsi_io_completion(cmd, cmd->request_bufflen); 1088 scsi_io_completion(cmd, cmd->request_bufflen);
1082} 1089}
1083 1090
1084static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) 1091static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1085{ 1092{
1086 struct request *req = cmd->request; 1093 struct scsi_cmnd *cmd;
1094
1095 cmd = scsi_get_cmd_from_req(sdev, req);
1096 if (unlikely(!cmd))
1097 return BLKPREP_DEFER;
1098
1099 /*
1100 * BLOCK_PC requests may transfer data, in which case they must
1101 * a bio attached to them. Or they might contain a SCSI command
1102 * that does not transfer data, in which case they may optionally
1103 * submit a request without an attached bio.
1104 */
1105 if (req->bio) {
1106 int ret;
1107
1108 BUG_ON(!req->nr_phys_segments);
1109
1110 ret = scsi_init_io(cmd);
1111 if (unlikely(ret))
1112 return ret;
1113 } else {
1114 BUG_ON(req->data_len);
1115 BUG_ON(req->data);
1116
1117 cmd->request_bufflen = 0;
1118 cmd->request_buffer = NULL;
1119 cmd->use_sg = 0;
1120 req->buffer = NULL;
1121 }
1087 1122
1088 BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd)); 1123 BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
1089 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); 1124 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
@@ -1099,154 +1134,138 @@ static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
1099 cmd->allowed = req->retries; 1134 cmd->allowed = req->retries;
1100 cmd->timeout_per_command = req->timeout; 1135 cmd->timeout_per_command = req->timeout;
1101 cmd->done = scsi_blk_pc_done; 1136 cmd->done = scsi_blk_pc_done;
1137 return BLKPREP_OK;
1102} 1138}
1103 1139
1104static int scsi_prep_fn(struct request_queue *q, struct request *req) 1140/*
1141 * Setup a REQ_TYPE_FS command. These are simple read/write request
1142 * from filesystems that still need to be translated to SCSI CDBs from
1143 * the ULD.
1144 */
1145static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1105{ 1146{
1106 struct scsi_device *sdev = q->queuedata;
1107 struct scsi_cmnd *cmd; 1147 struct scsi_cmnd *cmd;
1108 int specials_only = 0; 1148 struct scsi_driver *drv;
1149 int ret;
1109 1150
1110 /* 1151 /*
1111 * Just check to see if the device is online. If it isn't, we 1152 * Filesystem requests must transfer data.
1112 * refuse to process any commands. The device must be brought
1113 * online before trying any recovery commands
1114 */ 1153 */
1115 if (unlikely(!scsi_device_online(sdev))) { 1154 BUG_ON(!req->nr_phys_segments);
1116 sdev_printk(KERN_ERR, sdev, 1155
1117 "rejecting I/O to offline device\n"); 1156 cmd = scsi_get_cmd_from_req(sdev, req);
1118 goto kill; 1157 if (unlikely(!cmd))
1119 } 1158 return BLKPREP_DEFER;
1120 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1159
1121 /* OK, we're not in a running state don't prep 1160 ret = scsi_init_io(cmd);
1122 * user commands */ 1161 if (unlikely(ret))
1123 if (sdev->sdev_state == SDEV_DEL) { 1162 return ret;
1124 /* Device is fully deleted, no commands 1163
1125 * at all allowed down */ 1164 /*
1126 sdev_printk(KERN_ERR, sdev, 1165 * Initialize the actual SCSI command for this request.
1127 "rejecting I/O to dead device\n"); 1166 */
1128 goto kill; 1167 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1129 } 1168 if (unlikely(!drv->init_command(cmd))) {
1130 /* OK, we only allow special commands (i.e. not 1169 scsi_release_buffers(cmd);
1131 * user initiated ones */ 1170 scsi_put_command(cmd);
1132 specials_only = sdev->sdev_state; 1171 return BLKPREP_KILL;
1133 } 1172 }
1134 1173
1174 return BLKPREP_OK;
1175}
1176
1177static int scsi_prep_fn(struct request_queue *q, struct request *req)
1178{
1179 struct scsi_device *sdev = q->queuedata;
1180 int ret = BLKPREP_OK;
1181
1135 /* 1182 /*
1136 * Find the actual device driver associated with this command. 1183 * If the device is not in running state we will reject some
1137 * The SPECIAL requests are things like character device or 1184 * or all commands.
1138 * ioctls, which did not originate from ll_rw_blk. Note that
1139 * the special field is also used to indicate the cmd for
1140 * the remainder of a partially fulfilled request that can
1141 * come up when there is a medium error. We have to treat
1142 * these two cases differently. We differentiate by looking
1143 * at request->cmd, as this tells us the real story.
1144 */ 1185 */
1145 if (blk_special_request(req) && req->special) 1186 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1146 cmd = req->special; 1187 switch (sdev->sdev_state) {
1147 else if (blk_pc_request(req) || blk_fs_request(req)) { 1188 case SDEV_OFFLINE:
1148 if (unlikely(specials_only) && !(req->cmd_flags & REQ_PREEMPT)){ 1189 /*
1149 if (specials_only == SDEV_QUIESCE || 1190 * If the device is offline we refuse to process any
1150 specials_only == SDEV_BLOCK) 1191 * commands. The device must be brought online
1151 goto defer; 1192 * before trying any recovery commands.
1152 1193 */
1153 sdev_printk(KERN_ERR, sdev, 1194 sdev_printk(KERN_ERR, sdev,
1154 "rejecting I/O to device being removed\n"); 1195 "rejecting I/O to offline device\n");
1155 goto kill; 1196 ret = BLKPREP_KILL;
1197 break;
1198 case SDEV_DEL:
1199 /*
1200 * If the device is fully deleted, we refuse to
1201 * process any commands as well.
1202 */
1203 sdev_printk(KERN_ERR, sdev,
1204 "rejecting I/O to dead device\n");
1205 ret = BLKPREP_KILL;
1206 break;
1207 case SDEV_QUIESCE:
1208 case SDEV_BLOCK:
1209 /*
1210 * If the devices is blocked we defer normal commands.
1211 */
1212 if (!(req->cmd_flags & REQ_PREEMPT))
1213 ret = BLKPREP_DEFER;
1214 break;
1215 default:
1216 /*
1217 * For any other not fully online state we only allow
1218 * special commands. In particular any user initiated
1219 * command is not allowed.
1220 */
1221 if (!(req->cmd_flags & REQ_PREEMPT))
1222 ret = BLKPREP_KILL;
1223 break;
1156 } 1224 }
1157 1225
1158 /* 1226 if (ret != BLKPREP_OK)
1159 * Now try and find a command block that we can use. 1227 goto out;
1160 */
1161 if (!req->special) {
1162 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1163 if (unlikely(!cmd))
1164 goto defer;
1165 } else
1166 cmd = req->special;
1167
1168 /* pull a tag out of the request if we have one */
1169 cmd->tag = req->tag;
1170 } else {
1171 blk_dump_rq_flags(req, "SCSI bad req");
1172 goto kill;
1173 } 1228 }
1174
1175 /* note the overloading of req->special. When the tag
1176 * is active it always means cmd. If the tag goes
1177 * back for re-queueing, it may be reset */
1178 req->special = cmd;
1179 cmd->request = req;
1180
1181 /*
1182 * FIXME: drop the lock here because the functions below
1183 * expect to be called without the queue lock held. Also,
1184 * previously, we dequeued the request before dropping the
1185 * lock. We hope REQ_STARTED prevents anything untoward from
1186 * happening now.
1187 */
1188 if (blk_fs_request(req) || blk_pc_request(req)) {
1189 int ret;
1190 1229
1230 switch (req->cmd_type) {
1231 case REQ_TYPE_BLOCK_PC:
1232 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1233 break;
1234 case REQ_TYPE_FS:
1235 ret = scsi_setup_fs_cmnd(sdev, req);
1236 break;
1237 default:
1191 /* 1238 /*
1192 * This will do a couple of things: 1239 * All other command types are not supported.
1193 * 1) Fill in the actual SCSI command.
1194 * 2) Fill in any other upper-level specific fields
1195 * (timeout).
1196 * 1240 *
1197 * If this returns 0, it means that the request failed 1241 * Note that these days the SCSI subsystem does not use
1198 * (reading past end of disk, reading offline device, 1242 * REQ_TYPE_SPECIAL requests anymore. These are only used
1199 * etc). This won't actually talk to the device, but 1243 * (directly or via blk_insert_request) by non-SCSI drivers.
1200 * some kinds of consistency checking may cause the
1201 * request to be rejected immediately.
1202 */ 1244 */
1245 blk_dump_rq_flags(req, "SCSI bad req");
1246 ret = BLKPREP_KILL;
1247 break;
1248 }
1203 1249
1204 /* 1250 out:
1205 * This sets up the scatter-gather table (allocating if 1251 switch (ret) {
1206 * required). 1252 case BLKPREP_KILL:
1207 */ 1253 req->errors = DID_NO_CONNECT << 16;
1208 ret = scsi_init_io(cmd); 1254 break;
1209 switch(ret) { 1255 case BLKPREP_DEFER:
1210 /* For BLKPREP_KILL/DEFER the cmd was released */
1211 case BLKPREP_KILL:
1212 goto kill;
1213 case BLKPREP_DEFER:
1214 goto defer;
1215 }
1216
1217 /* 1256 /*
1218 * Initialize the actual SCSI command for this request. 1257 * If we defer, the elv_next_request() returns NULL, but the
1258 * queue must be restarted, so we plug here if no returning
1259 * command will automatically do that.
1219 */ 1260 */
1220 if (blk_pc_request(req)) { 1261 if (sdev->device_busy == 0)
1221 scsi_setup_blk_pc_cmnd(cmd); 1262 blk_plug_device(q);
1222 } else if (req->rq_disk) { 1263 break;
1223 struct scsi_driver *drv; 1264 default:
1224 1265 req->cmd_flags |= REQ_DONTPREP;
1225 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1226 if (unlikely(!drv->init_command(cmd))) {
1227 scsi_release_buffers(cmd);
1228 scsi_put_command(cmd);
1229 goto kill;
1230 }
1231 }
1232 } 1266 }
1233 1267
1234 /* 1268 return ret;
1235 * The request is now prepped, no need to come back here
1236 */
1237 req->cmd_flags |= REQ_DONTPREP;
1238 return BLKPREP_OK;
1239
1240 defer:
1241 /* If we defer, the elv_next_request() returns NULL, but the
1242 * queue must be restarted, so we plug here if no returning
1243 * command will automatically do that. */
1244 if (sdev->device_busy == 0)
1245 blk_plug_device(q);
1246 return BLKPREP_DEFER;
1247 kill:
1248 req->errors = DID_NO_CONNECT << 16;
1249 return BLKPREP_KILL;
1250} 1269}
1251 1270
1252/* 1271/*