aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2009-02-03 21:36:27 -0500
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2009-03-12 13:58:12 -0400
commitc96952ed7031e7c576ecf90cf95b8ec099d5295a (patch)
tree4217498cf82131f6c870b0f92fea7039596fa0e7 /drivers/scsi
parenta3b7aeaba29e3dd995ece05ba50db9e0650c16b6 (diff)
[SCSI] sg: avoid blk_put_request/blk_rq_unmap_user in interrupt
This fixes the following oops: http://marc.info/?l=linux-kernel&m=123316111415677&w=2 You can reproduce this bug by interrupting a program before a sg response completes. This leads to the special sg state (the orphan state), then sg calls blk_put_request in interrupt (rq->end_io). The above bug report shows the recursive lock problem because sg calls blk_put_request in interrupt. We could call __blk_put_request here instead however we also need to handle blk_rq_unmap_user here, which can't be called in interrupt too. In the orphan state, we don't need to care about the data transfer (the program revoked the command) so adding 'just free the resource' mode to blk_rq_unmap_user is a possible option. I prefer to avoid complicating the blk mapping API when possible. I change the orphan state to call sg_finish_rem_req via execute_in_process_context. We hold sg_fd->kref so sg_fd doesn't go away until keventd_wq finishes our work. copy_from_user/to_user fails so blk_rq_unmap_user just frees the resource without the data transfer. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Douglas Gilbert <dgilbert@interlog.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/sg.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 18d079e3990e..cdd83cf97100 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -138,6 +138,7 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
138 volatile char done; /* 0->before bh, 1->before read, 2->read */ 138 volatile char done; /* 0->before bh, 1->before read, 2->read */
139 struct request *rq; 139 struct request *rq;
140 struct bio *bio; 140 struct bio *bio;
141 struct execute_work ew;
141} Sg_request; 142} Sg_request;
142 143
143typedef struct sg_fd { /* holds the state of a file descriptor */ 144typedef struct sg_fd { /* holds the state of a file descriptor */
@@ -1234,6 +1235,15 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1234 return 0; 1235 return 0;
1235} 1236}
1236 1237
1238static void sg_rq_end_io_usercontext(struct work_struct *work)
1239{
1240 struct sg_request *srp = container_of(work, struct sg_request, ew.work);
1241 struct sg_fd *sfp = srp->parentfp;
1242
1243 sg_finish_rem_req(srp);
1244 kref_put(&sfp->f_ref, sg_remove_sfp);
1245}
1246
1237/* 1247/*
1238 * This function is a "bottom half" handler that is called by the mid 1248 * This function is a "bottom half" handler that is called by the mid
1239 * level when a command is completed (or has failed). 1249 * level when a command is completed (or has failed).
@@ -1312,10 +1322,9 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
1312 */ 1322 */
1313 wake_up_interruptible(&sfp->read_wait); 1323 wake_up_interruptible(&sfp->read_wait);
1314 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); 1324 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
1325 kref_put(&sfp->f_ref, sg_remove_sfp);
1315 } else 1326 } else
1316 sg_finish_rem_req(srp); /* call with srp->done == 0 */ 1327 execute_in_process_context(sg_rq_end_io_usercontext, &srp->ew);
1317
1318 kref_put(&sfp->f_ref, sg_remove_sfp);
1319} 1328}
1320 1329
1321static struct file_operations sg_fops = { 1330static struct file_operations sg_fops = {