diff options
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 117 |
1 files changed, 72 insertions, 45 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index a9ac5b1b1667..4cf902efbdbf 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -175,7 +175,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) | |||
175 | * | 175 | * |
176 | * returns the req->errors value which is the scsi_cmnd result | 176 | * returns the req->errors value which is the scsi_cmnd result |
177 | * field. | 177 | * field. |
178 | **/ | 178 | */ |
179 | int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, | 179 | int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, |
180 | int data_direction, void *buffer, unsigned bufflen, | 180 | int data_direction, void *buffer, unsigned bufflen, |
181 | unsigned char *sense, int timeout, int retries, int flags) | 181 | unsigned char *sense, int timeout, int retries, int flags) |
@@ -274,7 +274,7 @@ static void scsi_bi_endio(struct bio *bio, int error) | |||
274 | /** | 274 | /** |
275 | * scsi_req_map_sg - map a scatterlist into a request | 275 | * scsi_req_map_sg - map a scatterlist into a request |
276 | * @rq: request to fill | 276 | * @rq: request to fill |
277 | * @sg: scatterlist | 277 | * @sgl: scatterlist |
278 | * @nsegs: number of elements | 278 | * @nsegs: number of elements |
279 | * @bufflen: len of buffer | 279 | * @bufflen: len of buffer |
280 | * @gfp: memory allocation flags | 280 | * @gfp: memory allocation flags |
@@ -365,14 +365,16 @@ free_bios: | |||
365 | * @sdev: scsi device | 365 | * @sdev: scsi device |
366 | * @cmd: scsi command | 366 | * @cmd: scsi command |
367 | * @cmd_len: length of scsi cdb | 367 | * @cmd_len: length of scsi cdb |
368 | * @data_direction: data direction | 368 | * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE |
369 | * @buffer: data buffer (this can be a kernel buffer or scatterlist) | 369 | * @buffer: data buffer (this can be a kernel buffer or scatterlist) |
370 | * @bufflen: len of buffer | 370 | * @bufflen: len of buffer |
371 | * @use_sg: if buffer is a scatterlist this is the number of elements | 371 | * @use_sg: if buffer is a scatterlist this is the number of elements |
372 | * @timeout: request timeout in seconds | 372 | * @timeout: request timeout in seconds |
373 | * @retries: number of times to retry request | 373 | * @retries: number of times to retry request |
374 | * @flags: or into request flags | 374 | * @privdata: data passed to done() |
375 | **/ | 375 | * @done: callback function when done |
376 | * @gfp: memory allocation flags | ||
377 | */ | ||
376 | int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, | 378 | int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, |
377 | int cmd_len, int data_direction, void *buffer, unsigned bufflen, | 379 | int cmd_len, int data_direction, void *buffer, unsigned bufflen, |
378 | int use_sg, int timeout, int retries, void *privdata, | 380 | int use_sg, int timeout, int retries, void *privdata, |
@@ -439,7 +441,7 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) | |||
439 | { | 441 | { |
440 | cmd->serial_number = 0; | 442 | cmd->serial_number = 0; |
441 | cmd->resid = 0; | 443 | cmd->resid = 0; |
442 | memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); | 444 | memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); |
443 | if (cmd->cmd_len == 0) | 445 | if (cmd->cmd_len == 0) |
444 | cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); | 446 | cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); |
445 | } | 447 | } |
@@ -524,7 +526,7 @@ static void scsi_run_queue(struct request_queue *q) | |||
524 | struct Scsi_Host *shost = sdev->host; | 526 | struct Scsi_Host *shost = sdev->host; |
525 | unsigned long flags; | 527 | unsigned long flags; |
526 | 528 | ||
527 | if (sdev->single_lun) | 529 | if (scsi_target(sdev)->single_lun) |
528 | scsi_single_lun_run(sdev); | 530 | scsi_single_lun_run(sdev); |
529 | 531 | ||
530 | spin_lock_irqsave(shost->host_lock, flags); | 532 | spin_lock_irqsave(shost->host_lock, flags); |
@@ -1102,7 +1104,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
1102 | * | 1104 | * |
1103 | * Returns: 0 on success | 1105 | * Returns: 0 on success |
1104 | * BLKPREP_DEFER if the failure is retryable | 1106 | * BLKPREP_DEFER if the failure is retryable |
1105 | * BLKPREP_KILL if the failure is fatal | ||
1106 | */ | 1107 | */ |
1107 | static int scsi_init_io(struct scsi_cmnd *cmd) | 1108 | static int scsi_init_io(struct scsi_cmnd *cmd) |
1108 | { | 1109 | { |
@@ -1136,17 +1137,9 @@ static int scsi_init_io(struct scsi_cmnd *cmd) | |||
1136 | * each segment. | 1137 | * each segment. |
1137 | */ | 1138 | */ |
1138 | count = blk_rq_map_sg(req->q, req, cmd->request_buffer); | 1139 | count = blk_rq_map_sg(req->q, req, cmd->request_buffer); |
1139 | if (likely(count <= cmd->use_sg)) { | 1140 | BUG_ON(count > cmd->use_sg); |
1140 | cmd->use_sg = count; | 1141 | cmd->use_sg = count; |
1141 | return BLKPREP_OK; | 1142 | return BLKPREP_OK; |
1142 | } | ||
1143 | |||
1144 | printk(KERN_ERR "Incorrect number of segments after building list\n"); | ||
1145 | printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg); | ||
1146 | printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors, | ||
1147 | req->current_nr_sectors); | ||
1148 | |||
1149 | return BLKPREP_KILL; | ||
1150 | } | 1143 | } |
1151 | 1144 | ||
1152 | static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, | 1145 | static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, |
@@ -1557,7 +1550,7 @@ static void scsi_request_fn(struct request_queue *q) | |||
1557 | 1550 | ||
1558 | if (!scsi_host_queue_ready(q, shost, sdev)) | 1551 | if (!scsi_host_queue_ready(q, shost, sdev)) |
1559 | goto not_ready; | 1552 | goto not_ready; |
1560 | if (sdev->single_lun) { | 1553 | if (scsi_target(sdev)->single_lun) { |
1561 | if (scsi_target(sdev)->starget_sdev_user && | 1554 | if (scsi_target(sdev)->starget_sdev_user && |
1562 | scsi_target(sdev)->starget_sdev_user != sdev) | 1555 | scsi_target(sdev)->starget_sdev_user != sdev) |
1563 | goto not_ready; | 1556 | goto not_ready; |
@@ -1675,6 +1668,14 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, | |||
1675 | 1668 | ||
1676 | if (!shost->use_clustering) | 1669 | if (!shost->use_clustering) |
1677 | clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | 1670 | clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
1671 | |||
1672 | /* | ||
1673 | * set a reasonable default alignment on word boundaries: the | ||
1674 | * host and device may alter it using | ||
1675 | * blk_queue_update_dma_alignment() later. | ||
1676 | */ | ||
1677 | blk_queue_dma_alignment(q, 0x03); | ||
1678 | |||
1678 | return q; | 1679 | return q; |
1679 | } | 1680 | } |
1680 | EXPORT_SYMBOL(__scsi_alloc_queue); | 1681 | EXPORT_SYMBOL(__scsi_alloc_queue); |
@@ -1804,7 +1805,7 @@ void scsi_exit_queue(void) | |||
1804 | * @timeout: command timeout | 1805 | * @timeout: command timeout |
1805 | * @retries: number of retries before failing | 1806 | * @retries: number of retries before failing |
1806 | * @data: returns a structure abstracting the mode header data | 1807 | * @data: returns a structure abstracting the mode header data |
1807 | * @sense: place to put sense data (or NULL if no sense to be collected). | 1808 | * @sshdr: place to put sense data (or NULL if no sense to be collected). |
1808 | * must be SCSI_SENSE_BUFFERSIZE big. | 1809 | * must be SCSI_SENSE_BUFFERSIZE big. |
1809 | * | 1810 | * |
1810 | * Returns zero if successful; negative error number or scsi | 1811 | * Returns zero if successful; negative error number or scsi |
@@ -1871,8 +1872,7 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, | |||
1871 | EXPORT_SYMBOL_GPL(scsi_mode_select); | 1872 | EXPORT_SYMBOL_GPL(scsi_mode_select); |
1872 | 1873 | ||
1873 | /** | 1874 | /** |
1874 | * scsi_mode_sense - issue a mode sense, falling back from 10 to | 1875 | * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. |
1875 | * six bytes if necessary. | ||
1876 | * @sdev: SCSI device to be queried | 1876 | * @sdev: SCSI device to be queried |
1877 | * @dbd: set if mode sense will allow block descriptors to be returned | 1877 | * @dbd: set if mode sense will allow block descriptors to be returned |
1878 | * @modepage: mode page being requested | 1878 | * @modepage: mode page being requested |
@@ -1881,13 +1881,13 @@ EXPORT_SYMBOL_GPL(scsi_mode_select); | |||
1881 | * @timeout: command timeout | 1881 | * @timeout: command timeout |
1882 | * @retries: number of retries before failing | 1882 | * @retries: number of retries before failing |
1883 | * @data: returns a structure abstracting the mode header data | 1883 | * @data: returns a structure abstracting the mode header data |
1884 | * @sense: place to put sense data (or NULL if no sense to be collected). | 1884 | * @sshdr: place to put sense data (or NULL if no sense to be collected). |
1885 | * must be SCSI_SENSE_BUFFERSIZE big. | 1885 | * must be SCSI_SENSE_BUFFERSIZE big. |
1886 | * | 1886 | * |
1887 | * Returns zero if unsuccessful, or the header offset (either 4 | 1887 | * Returns zero if unsuccessful, or the header offset (either 4 |
1888 | * or 8 depending on whether a six or ten byte command was | 1888 | * or 8 depending on whether a six or ten byte command was |
1889 | * issued) if successful. | 1889 | * issued) if successful. |
1890 | **/ | 1890 | */ |
1891 | int | 1891 | int |
1892 | scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, | 1892 | scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, |
1893 | unsigned char *buffer, int len, int timeout, int retries, | 1893 | unsigned char *buffer, int len, int timeout, int retries, |
@@ -1981,40 +1981,69 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, | |||
1981 | } | 1981 | } |
1982 | EXPORT_SYMBOL(scsi_mode_sense); | 1982 | EXPORT_SYMBOL(scsi_mode_sense); |
1983 | 1983 | ||
1984 | /** | ||
1985 | * scsi_test_unit_ready - test if unit is ready | ||
1986 | * @sdev: scsi device to change the state of. | ||
1987 | * @timeout: command timeout | ||
1988 | * @retries: number of retries before failing | ||
1989 | * @sshdr_external: Optional pointer to struct scsi_sense_hdr for | ||
1990 | * returning sense. Make sure that this is cleared before passing | ||
1991 | * in. | ||
1992 | * | ||
1993 | * Returns zero if unsuccessful or an error if TUR failed. For | ||
1994 | * removable media, a return of NOT_READY or UNIT_ATTENTION is | ||
1995 | * translated to success, with the ->changed flag updated. | ||
1996 | **/ | ||
1984 | int | 1997 | int |
1985 | scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries) | 1998 | scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, |
1999 | struct scsi_sense_hdr *sshdr_external) | ||
1986 | { | 2000 | { |
1987 | char cmd[] = { | 2001 | char cmd[] = { |
1988 | TEST_UNIT_READY, 0, 0, 0, 0, 0, | 2002 | TEST_UNIT_READY, 0, 0, 0, 0, 0, |
1989 | }; | 2003 | }; |
1990 | struct scsi_sense_hdr sshdr; | 2004 | struct scsi_sense_hdr *sshdr; |
1991 | int result; | 2005 | int result; |
1992 | 2006 | ||
1993 | result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr, | 2007 | if (!sshdr_external) |
1994 | timeout, retries); | 2008 | sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); |
2009 | else | ||
2010 | sshdr = sshdr_external; | ||
2011 | |||
2012 | /* try to eat the UNIT_ATTENTION if there are enough retries */ | ||
2013 | do { | ||
2014 | result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, | ||
2015 | timeout, retries); | ||
2016 | } while ((driver_byte(result) & DRIVER_SENSE) && | ||
2017 | sshdr && sshdr->sense_key == UNIT_ATTENTION && | ||
2018 | --retries); | ||
2019 | |||
2020 | if (!sshdr) | ||
2021 | /* could not allocate sense buffer, so can't process it */ | ||
2022 | return result; | ||
1995 | 2023 | ||
1996 | if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) { | 2024 | if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) { |
1997 | 2025 | ||
1998 | if ((scsi_sense_valid(&sshdr)) && | 2026 | if ((scsi_sense_valid(sshdr)) && |
1999 | ((sshdr.sense_key == UNIT_ATTENTION) || | 2027 | ((sshdr->sense_key == UNIT_ATTENTION) || |
2000 | (sshdr.sense_key == NOT_READY))) { | 2028 | (sshdr->sense_key == NOT_READY))) { |
2001 | sdev->changed = 1; | 2029 | sdev->changed = 1; |
2002 | result = 0; | 2030 | result = 0; |
2003 | } | 2031 | } |
2004 | } | 2032 | } |
2033 | if (!sshdr_external) | ||
2034 | kfree(sshdr); | ||
2005 | return result; | 2035 | return result; |
2006 | } | 2036 | } |
2007 | EXPORT_SYMBOL(scsi_test_unit_ready); | 2037 | EXPORT_SYMBOL(scsi_test_unit_ready); |
2008 | 2038 | ||
2009 | /** | 2039 | /** |
2010 | * scsi_device_set_state - Take the given device through the device | 2040 | * scsi_device_set_state - Take the given device through the device state model. |
2011 | * state model. | ||
2012 | * @sdev: scsi device to change the state of. | 2041 | * @sdev: scsi device to change the state of. |
2013 | * @state: state to change to. | 2042 | * @state: state to change to. |
2014 | * | 2043 | * |
2015 | * Returns zero if unsuccessful or an error if the requested | 2044 | * Returns zero if unsuccessful or an error if the requested |
2016 | * transition is illegal. | 2045 | * transition is illegal. |
2017 | **/ | 2046 | */ |
2018 | int | 2047 | int |
2019 | scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) | 2048 | scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) |
2020 | { | 2049 | { |
@@ -2264,7 +2293,7 @@ EXPORT_SYMBOL_GPL(sdev_evt_send_simple); | |||
2264 | * Must be called with user context, may sleep. | 2293 | * Must be called with user context, may sleep. |
2265 | * | 2294 | * |
2266 | * Returns zero if unsuccessful or an error if not. | 2295 | * Returns zero if unsuccessful or an error if not. |
2267 | **/ | 2296 | */ |
2268 | int | 2297 | int |
2269 | scsi_device_quiesce(struct scsi_device *sdev) | 2298 | scsi_device_quiesce(struct scsi_device *sdev) |
2270 | { | 2299 | { |
@@ -2289,7 +2318,7 @@ EXPORT_SYMBOL(scsi_device_quiesce); | |||
2289 | * queues. | 2318 | * queues. |
2290 | * | 2319 | * |
2291 | * Must be called with user context, may sleep. | 2320 | * Must be called with user context, may sleep. |
2292 | **/ | 2321 | */ |
2293 | void | 2322 | void |
2294 | scsi_device_resume(struct scsi_device *sdev) | 2323 | scsi_device_resume(struct scsi_device *sdev) |
2295 | { | 2324 | { |
@@ -2326,8 +2355,7 @@ scsi_target_resume(struct scsi_target *starget) | |||
2326 | EXPORT_SYMBOL(scsi_target_resume); | 2355 | EXPORT_SYMBOL(scsi_target_resume); |
2327 | 2356 | ||
2328 | /** | 2357 | /** |
2329 | * scsi_internal_device_block - internal function to put a device | 2358 | * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state |
2330 | * temporarily into the SDEV_BLOCK state | ||
2331 | * @sdev: device to block | 2359 | * @sdev: device to block |
2332 | * | 2360 | * |
2333 | * Block request made by scsi lld's to temporarily stop all | 2361 | * Block request made by scsi lld's to temporarily stop all |
@@ -2342,7 +2370,7 @@ EXPORT_SYMBOL(scsi_target_resume); | |||
2342 | * state, all commands are deferred until the scsi lld reenables | 2370 | * state, all commands are deferred until the scsi lld reenables |
2343 | * the device with scsi_device_unblock or device_block_tmo fires. | 2371 | * the device with scsi_device_unblock or device_block_tmo fires. |
2344 | * This routine assumes the host_lock is held on entry. | 2372 | * This routine assumes the host_lock is held on entry. |
2345 | **/ | 2373 | */ |
2346 | int | 2374 | int |
2347 | scsi_internal_device_block(struct scsi_device *sdev) | 2375 | scsi_internal_device_block(struct scsi_device *sdev) |
2348 | { | 2376 | { |
@@ -2382,7 +2410,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block); | |||
2382 | * (which must be a legal transition) allowing the midlayer to | 2410 | * (which must be a legal transition) allowing the midlayer to |
2383 | * goose the queue for this device. This routine assumes the | 2411 | * goose the queue for this device. This routine assumes the |
2384 | * host_lock is held upon entry. | 2412 | * host_lock is held upon entry. |
2385 | **/ | 2413 | */ |
2386 | int | 2414 | int |
2387 | scsi_internal_device_unblock(struct scsi_device *sdev) | 2415 | scsi_internal_device_unblock(struct scsi_device *sdev) |
2388 | { | 2416 | { |
@@ -2460,7 +2488,7 @@ EXPORT_SYMBOL_GPL(scsi_target_unblock); | |||
2460 | 2488 | ||
2461 | /** | 2489 | /** |
2462 | * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt | 2490 | * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt |
2463 | * @sg: scatter-gather list | 2491 | * @sgl: scatter-gather list |
2464 | * @sg_count: number of segments in sg | 2492 | * @sg_count: number of segments in sg |
2465 | * @offset: offset in bytes into sg, on return offset into the mapped area | 2493 | * @offset: offset in bytes into sg, on return offset into the mapped area |
2466 | * @len: bytes to map, on return number of bytes mapped | 2494 | * @len: bytes to map, on return number of bytes mapped |
@@ -2509,8 +2537,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, | |||
2509 | EXPORT_SYMBOL(scsi_kmap_atomic_sg); | 2537 | EXPORT_SYMBOL(scsi_kmap_atomic_sg); |
2510 | 2538 | ||
2511 | /** | 2539 | /** |
2512 | * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously | 2540 | * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg |
2513 | * mapped with scsi_kmap_atomic_sg | ||
2514 | * @virt: virtual address to be unmapped | 2541 | * @virt: virtual address to be unmapped |
2515 | */ | 2542 | */ |
2516 | void scsi_kunmap_atomic_sg(void *virt) | 2543 | void scsi_kunmap_atomic_sg(void *virt) |