aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Holzheu <holzheu@de.ibm.com>2008-05-30 04:03:25 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-05-30 04:03:33 -0400
commitf71ad62a264a89cb1952df0c92b167005de8d1b0 (patch)
treede173ab5f138204d5ba5458de249c39e5faecdbd
parent97195d6b411fec8e33aa55b6a7c3dde7984d65ca (diff)
[S390] tape: Fix race condition in tape block device driver
Due to incorrect function call sequence it can happen that a tape block request is finished before the request is taken from the block request queue. The following sequence leads to that condition: * tapeblock_start_request() -> start CCW program * Request finishes -> IO interrupt * tapeblock_end_request() * end_that_request_last() If blkdev_dequeue_request() has not been called before end_that_request_last(), a kernel bug is triggered in end_that_request_last() because the request is still queued. To solve that problem blkdev_dequeue_request() has to be called before starting the CCW program. Signed-off-by: Michael Holzheu <holzheu@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--drivers/s390/char/tape_block.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index ddc4a114e7f4..95da72bc17e8 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -179,11 +179,11 @@ tapeblock_requeue(struct work_struct *work) {
179 tapeblock_end_request(req, -EIO); 179 tapeblock_end_request(req, -EIO);
180 continue; 180 continue;
181 } 181 }
182 blkdev_dequeue_request(req);
183 nr_queued++;
182 spin_unlock_irq(&device->blk_data.request_queue_lock); 184 spin_unlock_irq(&device->blk_data.request_queue_lock);
183 rc = tapeblock_start_request(device, req); 185 rc = tapeblock_start_request(device, req);
184 spin_lock_irq(&device->blk_data.request_queue_lock); 186 spin_lock_irq(&device->blk_data.request_queue_lock);
185 blkdev_dequeue_request(req);
186 nr_queued++;
187 } 187 }
188 spin_unlock_irq(&device->blk_data.request_queue_lock); 188 spin_unlock_irq(&device->blk_data.request_queue_lock);
189 atomic_set(&device->blk_data.requeue_scheduled, 0); 189 atomic_set(&device->blk_data.requeue_scheduled, 0);