diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-15 11:19:33 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-15 11:19:33 -0400 |
commit | df3d80f5a5c74168be42788364d13cf6c83c7b9c (patch) | |
tree | 892a964c2fd28d028f2fb7471e8543d3f4006a58 /drivers/scsi/scsi_lib.c | |
parent | 3d06f7a5f74a813cee817c4b30b5e6f0398da0be (diff) | |
parent | c8e91b0a8fc8493e3bf3efcb3c8f866e9453cf1c (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (207 commits)
[SCSI] gdth: fix CONFIG_ISA build failure
[SCSI] esp_scsi: remove __dev{init,exit}
[SCSI] gdth: !use_sg cleanup and use of scsi accessors
[SCSI] gdth: Move members from SCp to gdth_cmndinfo, stage 2
[SCSI] gdth: Setup proper per-command private data
[SCSI] gdth: Remove gdth_ctr_tab[]
[SCSI] gdth: switch to modern scsi host registration
[SCSI] gdth: gdth_interrupt() gdth_get_status() & gdth_wait() fixes
[SCSI] gdth: clean up host private data
[SCSI] gdth: Remove virt hosts
[SCSI] gdth: Reorder scsi_host_template intitializers
[SCSI] gdth: kill gdth_{read,write}[bwl] wrappers
[SCSI] gdth: Remove 2.4.x support, in-kernel changelog
[SCSI] gdth: split out pci probing
[SCSI] gdth: split out eisa probing
[SCSI] gdth: split out isa probing
gdth: Make one abuse of scsi_cmnd less obvious
[SCSI] NCR5380: Use scsi_eh API for REQUEST_SENSE invocation
[SCSI] usb storage: use scsi_eh API in REQUEST_SENSE execution
[SCSI] scsi_error: Refactoring scsi_error to facilitate in synchronous REQUEST_SENSE
...
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 123 |
1 files changed, 53 insertions, 70 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 604f4d717933..207f1aa08869 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -288,7 +288,7 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl, | |||
288 | { | 288 | { |
289 | struct request_queue *q = rq->q; | 289 | struct request_queue *q = rq->q; |
290 | int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; | 290 | int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; |
291 | unsigned int data_len = 0, len, bytes, off; | 291 | unsigned int data_len = bufflen, len, bytes, off; |
292 | struct page *page; | 292 | struct page *page; |
293 | struct bio *bio = NULL; | 293 | struct bio *bio = NULL; |
294 | int i, err, nr_vecs = 0; | 294 | int i, err, nr_vecs = 0; |
@@ -297,10 +297,15 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl, | |||
297 | page = sgl[i].page; | 297 | page = sgl[i].page; |
298 | off = sgl[i].offset; | 298 | off = sgl[i].offset; |
299 | len = sgl[i].length; | 299 | len = sgl[i].length; |
300 | data_len += len; | ||
301 | 300 | ||
302 | while (len > 0) { | 301 | while (len > 0 && data_len > 0) { |
302 | /* | ||
303 | * sg sends a scatterlist that is larger than | ||
304 | * the data_len it wants transferred for certain | ||
305 | * IO sizes | ||
306 | */ | ||
303 | bytes = min_t(unsigned int, len, PAGE_SIZE - off); | 307 | bytes = min_t(unsigned int, len, PAGE_SIZE - off); |
308 | bytes = min(bytes, data_len); | ||
304 | 309 | ||
305 | if (!bio) { | 310 | if (!bio) { |
306 | nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); | 311 | nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); |
@@ -332,12 +337,13 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl, | |||
332 | 337 | ||
333 | page++; | 338 | page++; |
334 | len -= bytes; | 339 | len -= bytes; |
340 | data_len -=bytes; | ||
335 | off = 0; | 341 | off = 0; |
336 | } | 342 | } |
337 | } | 343 | } |
338 | 344 | ||
339 | rq->buffer = rq->data = NULL; | 345 | rq->buffer = rq->data = NULL; |
340 | rq->data_len = data_len; | 346 | rq->data_len = bufflen; |
341 | return 0; | 347 | return 0; |
342 | 348 | ||
343 | free_bios: | 349 | free_bios: |
@@ -430,6 +436,7 @@ EXPORT_SYMBOL_GPL(scsi_execute_async); | |||
430 | static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) | 436 | static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) |
431 | { | 437 | { |
432 | cmd->serial_number = 0; | 438 | cmd->serial_number = 0; |
439 | cmd->resid = 0; | ||
433 | memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); | 440 | memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); |
434 | if (cmd->cmd_len == 0) | 441 | if (cmd->cmd_len == 0) |
435 | cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); | 442 | cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); |
@@ -924,11 +931,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
924 | break; | 931 | break; |
925 | } | 932 | } |
926 | } | 933 | } |
927 | if (!(req->cmd_flags & REQ_QUIET)) { | 934 | if (!(req->cmd_flags & REQ_QUIET)) |
928 | scmd_printk(KERN_INFO, cmd, | 935 | scsi_cmd_print_sense_hdr(cmd, |
929 | "Device not ready: "); | 936 | "Device not ready", |
930 | scsi_print_sense_hdr("", &sshdr); | 937 | &sshdr); |
931 | } | 938 | |
932 | scsi_end_request(cmd, 0, this_count, 1); | 939 | scsi_end_request(cmd, 0, this_count, 1); |
933 | return; | 940 | return; |
934 | case VOLUME_OVERFLOW: | 941 | case VOLUME_OVERFLOW: |
@@ -962,7 +969,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
962 | } | 969 | } |
963 | scsi_end_request(cmd, 0, this_count, !result); | 970 | scsi_end_request(cmd, 0, this_count, !result); |
964 | } | 971 | } |
965 | EXPORT_SYMBOL(scsi_io_completion); | ||
966 | 972 | ||
967 | /* | 973 | /* |
968 | * Function: scsi_init_io() | 974 | * Function: scsi_init_io() |
@@ -1019,9 +1025,6 @@ static int scsi_init_io(struct scsi_cmnd *cmd) | |||
1019 | printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors, | 1025 | printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors, |
1020 | req->current_nr_sectors); | 1026 | req->current_nr_sectors); |
1021 | 1027 | ||
1022 | /* release the command and kill it */ | ||
1023 | scsi_release_buffers(cmd); | ||
1024 | scsi_put_command(cmd); | ||
1025 | return BLKPREP_KILL; | 1028 | return BLKPREP_KILL; |
1026 | } | 1029 | } |
1027 | 1030 | ||
@@ -1046,21 +1049,13 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, | |||
1046 | return cmd; | 1049 | return cmd; |
1047 | } | 1050 | } |
1048 | 1051 | ||
1049 | static void scsi_blk_pc_done(struct scsi_cmnd *cmd) | 1052 | int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) |
1050 | { | ||
1051 | BUG_ON(!blk_pc_request(cmd->request)); | ||
1052 | /* | ||
1053 | * This will complete the whole command with uptodate=1 so | ||
1054 | * as far as the block layer is concerned the command completed | ||
1055 | * successfully. Since this is a REQ_BLOCK_PC command the | ||
1056 | * caller should check the request's errors value | ||
1057 | */ | ||
1058 | scsi_io_completion(cmd, cmd->request_bufflen); | ||
1059 | } | ||
1060 | |||
1061 | static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) | ||
1062 | { | 1053 | { |
1063 | struct scsi_cmnd *cmd; | 1054 | struct scsi_cmnd *cmd; |
1055 | int ret = scsi_prep_state_check(sdev, req); | ||
1056 | |||
1057 | if (ret != BLKPREP_OK) | ||
1058 | return ret; | ||
1064 | 1059 | ||
1065 | cmd = scsi_get_cmd_from_req(sdev, req); | 1060 | cmd = scsi_get_cmd_from_req(sdev, req); |
1066 | if (unlikely(!cmd)) | 1061 | if (unlikely(!cmd)) |
@@ -1103,21 +1098,22 @@ static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) | |||
1103 | cmd->transfersize = req->data_len; | 1098 | cmd->transfersize = req->data_len; |
1104 | cmd->allowed = req->retries; | 1099 | cmd->allowed = req->retries; |
1105 | cmd->timeout_per_command = req->timeout; | 1100 | cmd->timeout_per_command = req->timeout; |
1106 | cmd->done = scsi_blk_pc_done; | ||
1107 | return BLKPREP_OK; | 1101 | return BLKPREP_OK; |
1108 | } | 1102 | } |
1103 | EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); | ||
1109 | 1104 | ||
1110 | /* | 1105 | /* |
1111 | * Setup a REQ_TYPE_FS command. These are simple read/write request | 1106 | * Setup a REQ_TYPE_FS command. These are simple read/write request |
1112 | * from filesystems that still need to be translated to SCSI CDBs from | 1107 | * from filesystems that still need to be translated to SCSI CDBs from |
1113 | * the ULD. | 1108 | * the ULD. |
1114 | */ | 1109 | */ |
1115 | static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) | 1110 | int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) |
1116 | { | 1111 | { |
1117 | struct scsi_cmnd *cmd; | 1112 | struct scsi_cmnd *cmd; |
1118 | struct scsi_driver *drv; | 1113 | int ret = scsi_prep_state_check(sdev, req); |
1119 | int ret; | ||
1120 | 1114 | ||
1115 | if (ret != BLKPREP_OK) | ||
1116 | return ret; | ||
1121 | /* | 1117 | /* |
1122 | * Filesystem requests must transfer data. | 1118 | * Filesystem requests must transfer data. |
1123 | */ | 1119 | */ |
@@ -1127,26 +1123,12 @@ static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) | |||
1127 | if (unlikely(!cmd)) | 1123 | if (unlikely(!cmd)) |
1128 | return BLKPREP_DEFER; | 1124 | return BLKPREP_DEFER; |
1129 | 1125 | ||
1130 | ret = scsi_init_io(cmd); | 1126 | return scsi_init_io(cmd); |
1131 | if (unlikely(ret)) | ||
1132 | return ret; | ||
1133 | |||
1134 | /* | ||
1135 | * Initialize the actual SCSI command for this request. | ||
1136 | */ | ||
1137 | drv = *(struct scsi_driver **)req->rq_disk->private_data; | ||
1138 | if (unlikely(!drv->init_command(cmd))) { | ||
1139 | scsi_release_buffers(cmd); | ||
1140 | scsi_put_command(cmd); | ||
1141 | return BLKPREP_KILL; | ||
1142 | } | ||
1143 | |||
1144 | return BLKPREP_OK; | ||
1145 | } | 1127 | } |
1128 | EXPORT_SYMBOL(scsi_setup_fs_cmnd); | ||
1146 | 1129 | ||
1147 | static int scsi_prep_fn(struct request_queue *q, struct request *req) | 1130 | int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) |
1148 | { | 1131 | { |
1149 | struct scsi_device *sdev = q->queuedata; | ||
1150 | int ret = BLKPREP_OK; | 1132 | int ret = BLKPREP_OK; |
1151 | 1133 | ||
1152 | /* | 1134 | /* |
@@ -1192,35 +1174,25 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req) | |||
1192 | ret = BLKPREP_KILL; | 1174 | ret = BLKPREP_KILL; |
1193 | break; | 1175 | break; |
1194 | } | 1176 | } |
1195 | |||
1196 | if (ret != BLKPREP_OK) | ||
1197 | goto out; | ||
1198 | } | 1177 | } |
1178 | return ret; | ||
1179 | } | ||
1180 | EXPORT_SYMBOL(scsi_prep_state_check); | ||
1199 | 1181 | ||
1200 | switch (req->cmd_type) { | 1182 | int scsi_prep_return(struct request_queue *q, struct request *req, int ret) |
1201 | case REQ_TYPE_BLOCK_PC: | 1183 | { |
1202 | ret = scsi_setup_blk_pc_cmnd(sdev, req); | 1184 | struct scsi_device *sdev = q->queuedata; |
1203 | break; | ||
1204 | case REQ_TYPE_FS: | ||
1205 | ret = scsi_setup_fs_cmnd(sdev, req); | ||
1206 | break; | ||
1207 | default: | ||
1208 | /* | ||
1209 | * All other command types are not supported. | ||
1210 | * | ||
1211 | * Note that these days the SCSI subsystem does not use | ||
1212 | * REQ_TYPE_SPECIAL requests anymore. These are only used | ||
1213 | * (directly or via blk_insert_request) by non-SCSI drivers. | ||
1214 | */ | ||
1215 | blk_dump_rq_flags(req, "SCSI bad req"); | ||
1216 | ret = BLKPREP_KILL; | ||
1217 | break; | ||
1218 | } | ||
1219 | 1185 | ||
1220 | out: | ||
1221 | switch (ret) { | 1186 | switch (ret) { |
1222 | case BLKPREP_KILL: | 1187 | case BLKPREP_KILL: |
1223 | req->errors = DID_NO_CONNECT << 16; | 1188 | req->errors = DID_NO_CONNECT << 16; |
1189 | /* release the command and kill it */ | ||
1190 | if (req->special) { | ||
1191 | struct scsi_cmnd *cmd = req->special; | ||
1192 | scsi_release_buffers(cmd); | ||
1193 | scsi_put_command(cmd); | ||
1194 | req->special = NULL; | ||
1195 | } | ||
1224 | break; | 1196 | break; |
1225 | case BLKPREP_DEFER: | 1197 | case BLKPREP_DEFER: |
1226 | /* | 1198 | /* |
@@ -1237,6 +1209,17 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req) | |||
1237 | 1209 | ||
1238 | return ret; | 1210 | return ret; |
1239 | } | 1211 | } |
1212 | EXPORT_SYMBOL(scsi_prep_return); | ||
1213 | |||
1214 | static int scsi_prep_fn(struct request_queue *q, struct request *req) | ||
1215 | { | ||
1216 | struct scsi_device *sdev = q->queuedata; | ||
1217 | int ret = BLKPREP_KILL; | ||
1218 | |||
1219 | if (req->cmd_type == REQ_TYPE_BLOCK_PC) | ||
1220 | ret = scsi_setup_blk_pc_cmnd(sdev, req); | ||
1221 | return scsi_prep_return(q, req, ret); | ||
1222 | } | ||
1240 | 1223 | ||
1241 | /* | 1224 | /* |
1242 | * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else | 1225 | * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else |