diff options
| author | James Bottomley <jejb@mulgrave.(none)> | 2005-09-09 11:44:16 -0400 |
|---|---|---|
| committer | James Bottomley <jejb@mulgrave.(none)> | 2005-09-09 11:44:16 -0400 |
| commit | e91442b635be776ea205fba233bdd5bc74b62bc3 (patch) | |
| tree | c04066f8d17be121244d870ab9347ca6e8c7cda3 | |
| parent | 286f3e13a1dc7f32407629fbd7aabc8ea78c62b5 (diff) | |
[SCSI] SCSI core: fix leakage of scsi_cmnd's
From: Alan Stern <stern@rowland.harvard.edu>
This patch (as559b) adds a new routine, scsi_unprep_request, which
gets called every place a request is requeued. (That includes
scsi_queue_insert as well as scsi_requeue_command.) It also changes
scsi_kill_requests to make it call __scsi_done with result equal to
DID_NO_CONNECT << 16. (I'm not sure if it's necessary to call
scsi_init_cmd_errh here; maybe you can check on that.) Finally, the
patch changes the return value from scsi_end_request, to avoid
returning a stale pointer in the case where the request was requeued.
Fortunately the return value is used in only place, and the change
actually simplified it.
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Rejections fixed up and
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
| -rw-r--r-- | drivers/scsi/scsi_lib.c | 111 |
1 files changed, 71 insertions, 40 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 2ad60f1dbc63..003f8cf47cf1 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -97,6 +97,30 @@ int scsi_insert_special_req(struct scsi_request *sreq, int at_head) | |||
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | static void scsi_run_queue(struct request_queue *q); | 99 | static void scsi_run_queue(struct request_queue *q); |
| 100 | static void scsi_release_buffers(struct scsi_cmnd *cmd); | ||
| 101 | |||
| 102 | /* | ||
| 103 | * Function: scsi_unprep_request() | ||
| 104 | * | ||
| 105 | * Purpose: Remove all preparation done for a request, including its | ||
| 106 | * associated scsi_cmnd, so that it can be requeued. | ||
| 107 | * | ||
| 108 | * Arguments: req - request to unprepare | ||
| 109 | * | ||
| 110 | * Lock status: Assumed that no locks are held upon entry. | ||
| 111 | * | ||
| 112 | * Returns: Nothing. | ||
| 113 | */ | ||
| 114 | static void scsi_unprep_request(struct request *req) | ||
| 115 | { | ||
| 116 | struct scsi_cmnd *cmd = req->special; | ||
| 117 | |||
| 118 | req->flags &= ~REQ_DONTPREP; | ||
| 119 | req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL; | ||
| 120 | |||
| 121 | scsi_release_buffers(cmd); | ||
| 122 | scsi_put_command(cmd); | ||
| 123 | } | ||
| 100 | 124 | ||
| 101 | /* | 125 | /* |
| 102 | * Function: scsi_queue_insert() | 126 | * Function: scsi_queue_insert() |
| @@ -116,12 +140,14 @@ static void scsi_run_queue(struct request_queue *q); | |||
| 116 | * commands. | 140 | * commands. |
| 117 | * Notes: This could be called either from an interrupt context or a | 141 | * Notes: This could be called either from an interrupt context or a |
| 118 | * normal process context. | 142 | * normal process context. |
| 143 | * Notes: Upon return, cmd is a stale pointer. | ||
| 119 | */ | 144 | */ |
| 120 | int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) | 145 | int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) |
| 121 | { | 146 | { |
| 122 | struct Scsi_Host *host = cmd->device->host; | 147 | struct Scsi_Host *host = cmd->device->host; |
| 123 | struct scsi_device *device = cmd->device; | 148 | struct scsi_device *device = cmd->device; |
| 124 | struct request_queue *q = device->request_queue; | 149 | struct request_queue *q = device->request_queue; |
| 150 | struct request *req = cmd->request; | ||
| 125 | unsigned long flags; | 151 | unsigned long flags; |
| 126 | 152 | ||
| 127 | SCSI_LOG_MLQUEUE(1, | 153 | SCSI_LOG_MLQUEUE(1, |
| @@ -162,8 +188,9 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) | |||
| 162 | * function. The SCSI request function detects the blocked condition | 188 | * function. The SCSI request function detects the blocked condition |
| 163 | * and plugs the queue appropriately. | 189 | * and plugs the queue appropriately. |
| 164 | */ | 190 | */ |
| 191 | scsi_unprep_request(req); | ||
| 165 | spin_lock_irqsave(q->queue_lock, flags); | 192 | spin_lock_irqsave(q->queue_lock, flags); |
| 166 | blk_requeue_request(q, cmd->request); | 193 | blk_requeue_request(q, req); |
| 167 | spin_unlock_irqrestore(q->queue_lock, flags); | 194 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 168 | 195 | ||
| 169 | scsi_run_queue(q); | 196 | scsi_run_queue(q); |
| @@ -552,15 +579,16 @@ static void scsi_run_queue(struct request_queue *q) | |||
| 552 | * I/O errors in the middle of the request, in which case | 579 | * I/O errors in the middle of the request, in which case |
| 553 | * we need to request the blocks that come after the bad | 580 | * we need to request the blocks that come after the bad |
| 554 | * sector. | 581 | * sector. |
| 582 | * Notes: Upon return, cmd is a stale pointer. | ||
| 555 | */ | 583 | */ |
| 556 | static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) | 584 | static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) |
| 557 | { | 585 | { |
| 586 | struct request *req = cmd->request; | ||
| 558 | unsigned long flags; | 587 | unsigned long flags; |
| 559 | 588 | ||
| 560 | cmd->request->flags &= ~REQ_DONTPREP; | 589 | scsi_unprep_request(req); |
| 561 | |||
| 562 | spin_lock_irqsave(q->queue_lock, flags); | 590 | spin_lock_irqsave(q->queue_lock, flags); |
| 563 | blk_requeue_request(q, cmd->request); | 591 | blk_requeue_request(q, req); |
| 564 | spin_unlock_irqrestore(q->queue_lock, flags); | 592 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 565 | 593 | ||
| 566 | scsi_run_queue(q); | 594 | scsi_run_queue(q); |
| @@ -595,13 +623,14 @@ void scsi_run_host_queues(struct Scsi_Host *shost) | |||
| 595 | * | 623 | * |
| 596 | * Lock status: Assumed that lock is not held upon entry. | 624 | * Lock status: Assumed that lock is not held upon entry. |
| 597 | * | 625 | * |
| 598 | * Returns: cmd if requeue done or required, NULL otherwise | 626 | * Returns: cmd if requeue required, NULL otherwise. |
| 599 | * | 627 | * |
| 600 | * Notes: This is called for block device requests in order to | 628 | * Notes: This is called for block device requests in order to |
| 601 | * mark some number of sectors as complete. | 629 | * mark some number of sectors as complete. |
| 602 | * | 630 | * |
| 603 | * We are guaranteeing that the request queue will be goosed | 631 | * We are guaranteeing that the request queue will be goosed |
| 604 | * at some point during this call. | 632 | * at some point during this call. |
| 633 | * Notes: If cmd was requeued, upon return it will be a stale pointer. | ||
| 605 | */ | 634 | */ |
| 606 | static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, | 635 | static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, |
| 607 | int bytes, int requeue) | 636 | int bytes, int requeue) |
| @@ -624,14 +653,15 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, | |||
| 624 | if (!uptodate && blk_noretry_request(req)) | 653 | if (!uptodate && blk_noretry_request(req)) |
| 625 | end_that_request_chunk(req, 0, leftover); | 654 | end_that_request_chunk(req, 0, leftover); |
| 626 | else { | 655 | else { |
| 627 | if (requeue) | 656 | if (requeue) { |
| 628 | /* | 657 | /* |
| 629 | * Bleah. Leftovers again. Stick the | 658 | * Bleah. Leftovers again. Stick the |
| 630 | * leftovers in the front of the | 659 | * leftovers in the front of the |
| 631 | * queue, and goose the queue again. | 660 | * queue, and goose the queue again. |
| 632 | */ | 661 | */ |
| 633 | scsi_requeue_command(q, cmd); | 662 | scsi_requeue_command(q, cmd); |
| 634 | 663 | cmd = NULL; | |
| 664 | } | ||
| 635 | return cmd; | 665 | return cmd; |
| 636 | } | 666 | } |
| 637 | } | 667 | } |
| @@ -857,15 +887,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, | |||
| 857 | * requeueing right here - we will requeue down below | 887 | * requeueing right here - we will requeue down below |
| 858 | * when we handle the bad sectors. | 888 | * when we handle the bad sectors. |
| 859 | */ | 889 | */ |
| 860 | cmd = scsi_end_request(cmd, 1, good_bytes, result == 0); | ||
| 861 | 890 | ||
| 862 | /* | 891 | /* |
| 863 | * If the command completed without error, then either finish off the | 892 | * If the command completed without error, then either |
| 864 | * rest of the command, or start a new one. | 893 | * finish off the rest of the command, or start a new one. |
| 865 | */ | 894 | */ |
| 866 | if (result == 0 || cmd == NULL ) { | 895 | if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL) |
| 867 | return; | 896 | return; |
| 868 | } | ||
| 869 | } | 897 | } |
| 870 | /* | 898 | /* |
| 871 | * Now, if we were good little boys and girls, Santa left us a request | 899 | * Now, if we were good little boys and girls, Santa left us a request |
| @@ -880,7 +908,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, | |||
| 880 | * and quietly refuse further access. | 908 | * and quietly refuse further access. |
| 881 | */ | 909 | */ |
| 882 | cmd->device->changed = 1; | 910 | cmd->device->changed = 1; |
| 883 | cmd = scsi_end_request(cmd, 0, | 911 | scsi_end_request(cmd, 0, |
| 884 | this_count, 1); | 912 | this_count, 1); |
| 885 | return; | 913 | return; |
| 886 | } else { | 914 | } else { |
| @@ -914,7 +942,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, | |||
| 914 | scsi_requeue_command(q, cmd); | 942 | scsi_requeue_command(q, cmd); |
| 915 | result = 0; | 943 | result = 0; |
| 916 | } else { | 944 | } else { |
| 917 | cmd = scsi_end_request(cmd, 0, this_count, 1); | 945 | scsi_end_request(cmd, 0, this_count, 1); |
| 918 | return; | 946 | return; |
| 919 | } | 947 | } |
| 920 | break; | 948 | break; |
| @@ -931,7 +959,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, | |||
| 931 | dev_printk(KERN_INFO, | 959 | dev_printk(KERN_INFO, |
| 932 | &cmd->device->sdev_gendev, | 960 | &cmd->device->sdev_gendev, |
| 933 | "Device not ready.\n"); | 961 | "Device not ready.\n"); |
| 934 | cmd = scsi_end_request(cmd, 0, this_count, 1); | 962 | scsi_end_request(cmd, 0, this_count, 1); |
| 935 | return; | 963 | return; |
| 936 | case VOLUME_OVERFLOW: | 964 | case VOLUME_OVERFLOW: |
| 937 | if (!(req->flags & REQ_QUIET)) { | 965 | if (!(req->flags & REQ_QUIET)) { |
| @@ -941,7 +969,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, | |||
| 941 | __scsi_print_command(cmd->data_cmnd); | 969 | __scsi_print_command(cmd->data_cmnd); |
| 942 | scsi_print_sense("", cmd); | 970 | scsi_print_sense("", cmd); |
| 943 | } | 971 | } |
| 944 | cmd = scsi_end_request(cmd, 0, block_bytes, 1); | 972 | scsi_end_request(cmd, 0, block_bytes, 1); |
| 945 | return; | 973 | return; |
| 946 | default: | 974 | default: |
| 947 | break; | 975 | break; |
| @@ -972,7 +1000,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, | |||
| 972 | block_bytes = req->hard_cur_sectors << 9; | 1000 | block_bytes = req->hard_cur_sectors << 9; |
| 973 | if (!block_bytes) | 1001 | if (!block_bytes) |
| 974 | block_bytes = req->data_len; | 1002 | block_bytes = req->data_len; |
| 975 | cmd = scsi_end_request(cmd, 0, block_bytes, 1); | 1003 | scsi_end_request(cmd, 0, block_bytes, 1); |
| 976 | } | 1004 | } |
| 977 | } | 1005 | } |
| 978 | EXPORT_SYMBOL(scsi_io_completion); | 1006 | EXPORT_SYMBOL(scsi_io_completion); |
| @@ -1336,19 +1364,24 @@ static inline int scsi_host_queue_ready(struct request_queue *q, | |||
| 1336 | } | 1364 | } |
| 1337 | 1365 | ||
| 1338 | /* | 1366 | /* |
| 1339 | * Kill requests for a dead device | 1367 | * Kill a request for a dead device |
| 1340 | */ | 1368 | */ |
| 1341 | static void scsi_kill_requests(request_queue_t *q) | 1369 | static void scsi_kill_request(struct request *req, request_queue_t *q) |
| 1342 | { | 1370 | { |
| 1343 | struct request *req; | 1371 | struct scsi_cmnd *cmd = req->special; |
| 1344 | 1372 | ||
| 1345 | while ((req = elv_next_request(q)) != NULL) { | 1373 | spin_unlock(q->queue_lock); |
| 1346 | blkdev_dequeue_request(req); | 1374 | if (unlikely(cmd == NULL)) { |
| 1347 | req->flags |= REQ_QUIET; | 1375 | printk(KERN_CRIT "impossible request in %s.\n", |
| 1348 | while (end_that_request_first(req, 0, req->nr_sectors)) | 1376 | __FUNCTION__); |
| 1349 | ; | 1377 | BUG(); |
| 1350 | end_that_request_last(req); | ||
| 1351 | } | 1378 | } |
| 1379 | |||
| 1380 | scsi_init_cmd_errh(cmd); | ||
| 1381 | cmd->result = DID_NO_CONNECT << 16; | ||
| 1382 | atomic_inc(&cmd->device->iorequest_cnt); | ||
| 1383 | __scsi_done(cmd); | ||
| 1384 | spin_lock(q->queue_lock); | ||
| 1352 | } | 1385 | } |
| 1353 | 1386 | ||
| 1354 | /* | 1387 | /* |
| @@ -1371,7 +1404,8 @@ static void scsi_request_fn(struct request_queue *q) | |||
| 1371 | 1404 | ||
| 1372 | if (!sdev) { | 1405 | if (!sdev) { |
| 1373 | printk("scsi: killing requests for dead queue\n"); | 1406 | printk("scsi: killing requests for dead queue\n"); |
| 1374 | scsi_kill_requests(q); | 1407 | while ((req = elv_next_request(q)) != NULL) |
| 1408 | scsi_kill_request(req, q); | ||
| 1375 | return; | 1409 | return; |
| 1376 | } | 1410 | } |
| 1377 | 1411 | ||
| @@ -1399,10 +1433,7 @@ static void scsi_request_fn(struct request_queue *q) | |||
| 1399 | printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n", | 1433 | printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n", |
| 1400 | sdev->host->host_no, sdev->id, sdev->lun); | 1434 | sdev->host->host_no, sdev->id, sdev->lun); |
| 1401 | blkdev_dequeue_request(req); | 1435 | blkdev_dequeue_request(req); |
| 1402 | req->flags |= REQ_QUIET; | 1436 | scsi_kill_request(req, q); |
| 1403 | while (end_that_request_first(req, 0, req->nr_sectors)) | ||
| 1404 | ; | ||
| 1405 | end_that_request_last(req); | ||
| 1406 | continue; | 1437 | continue; |
| 1407 | } | 1438 | } |
| 1408 | 1439 | ||
| @@ -1415,6 +1446,14 @@ static void scsi_request_fn(struct request_queue *q) | |||
| 1415 | sdev->device_busy++; | 1446 | sdev->device_busy++; |
| 1416 | 1447 | ||
| 1417 | spin_unlock(q->queue_lock); | 1448 | spin_unlock(q->queue_lock); |
| 1449 | cmd = req->special; | ||
| 1450 | if (unlikely(cmd == NULL)) { | ||
| 1451 | printk(KERN_CRIT "impossible request in %s.\n" | ||
| 1452 | "please mail a stack trace to " | ||
| 1453 | "linux-scsi@vger.kernel.org", | ||
| 1454 | __FUNCTION__); | ||
| 1455 | BUG(); | ||
| 1456 | } | ||
| 1418 | spin_lock(shost->host_lock); | 1457 | spin_lock(shost->host_lock); |
| 1419 | 1458 | ||
| 1420 | if (!scsi_host_queue_ready(q, shost, sdev)) | 1459 | if (!scsi_host_queue_ready(q, shost, sdev)) |
| @@ -1433,15 +1472,6 @@ static void scsi_request_fn(struct request_queue *q) | |||
| 1433 | */ | 1472 | */ |
| 1434 | spin_unlock_irq(shost->host_lock); | 1473 | spin_unlock_irq(shost->host_lock); |
| 1435 | 1474 | ||
| 1436 | cmd = req->special; | ||
| 1437 | if (unlikely(cmd == NULL)) { | ||
| 1438 | printk(KERN_CRIT "impossible request in %s.\n" | ||
| 1439 | "please mail a stack trace to " | ||
| 1440 | "linux-scsi@vger.kernel.org", | ||
| 1441 | __FUNCTION__); | ||
| 1442 | BUG(); | ||
| 1443 | } | ||
| 1444 | |||
| 1445 | /* | 1475 | /* |
| 1446 | * Finally, initialize any error handling parameters, and set up | 1476 | * Finally, initialize any error handling parameters, and set up |
| 1447 | * the timers for timeouts. | 1477 | * the timers for timeouts. |
| @@ -1477,6 +1507,7 @@ static void scsi_request_fn(struct request_queue *q) | |||
| 1477 | * cases (host limits or settings) should run the queue at some | 1507 | * cases (host limits or settings) should run the queue at some |
| 1478 | * later time. | 1508 | * later time. |
| 1479 | */ | 1509 | */ |
| 1510 | scsi_unprep_request(req); | ||
| 1480 | spin_lock_irq(q->queue_lock); | 1511 | spin_lock_irq(q->queue_lock); |
| 1481 | blk_requeue_request(q, req); | 1512 | blk_requeue_request(q, req); |
| 1482 | sdev->device_busy--; | 1513 | sdev->device_busy--; |
