aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cdrom/viocd.c
diff options
context:
space:
mode:
authorTony Breeds <tony@bakeyournoodle.com>2007-03-05 03:30:14 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-03-05 10:57:51 -0500
commit1ad7c311079412541bebaf1c8436d405cc8c6b2c (patch)
tree579185b75cf968de132d6a2b0cd6d4fd21ff9670 /drivers/cdrom/viocd.c
parent5fdc2abe39b76822e34e7029ca5f69fe4bea58d4 (diff)
[PATCH] Fix soft lockup with iSeries viocd driver
Fix soft lockup with iSeries viocd driver, caused by eventually calling end_that_request_first() with nr_bytes 0. Some versions of hald do an SG_IO ioctl on the viocd device which becomes a request with hard_nr_sectors and hard_cur_sectors set to zero. Passing zero as the number of sectors to end_request() (which calls end_that_request_first()) causes an infinite loop when the bio is being freed. This patch makes sure that the zero is never passed. It only requires some number larger the the request size the terminate the loop. The lockup is triggered by hald, interrogating the device. Signed-off-by: Tony Breeds <tony@bakeyournoodle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/cdrom/viocd.c')
-rw-r--r--drivers/cdrom/viocd.c27
1 files changed, 23 insertions, 4 deletions
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index dc13ebacedfb..44cd7b2ddf09 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -376,6 +376,25 @@ static int send_request(struct request *req)
376 return 0; 376 return 0;
377} 377}
378 378
379static void viocd_end_request(struct request *req, int uptodate)
380{
381 int nsectors = req->hard_nr_sectors;
382
383 /*
384 * Make sure it's fully ended, and ensure that we process
385 * at least one sector.
386 */
387 if (blk_pc_request(req))
388 nsectors = (req->data_len + 511) >> 9;
389 if (!nsectors)
390 nsectors = 1;
391
392 if (end_that_request_first(req, uptodate, nsectors))
393 BUG();
394 add_disk_randomness(req->rq_disk);
395 blkdev_dequeue_request(req);
396 end_that_request_last(req, uptodate);
397}
379 398
380static int rwreq; 399static int rwreq;
381 400
@@ -385,11 +404,11 @@ static void do_viocd_request(request_queue_t *q)
385 404
386 while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) { 405 while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {
387 if (!blk_fs_request(req)) 406 if (!blk_fs_request(req))
388 end_request(req, 0); 407 viocd_end_request(req, 0);
389 else if (send_request(req) < 0) { 408 else if (send_request(req) < 0) {
390 printk(VIOCD_KERN_WARNING 409 printk(VIOCD_KERN_WARNING
391 "unable to send message to OS/400!"); 410 "unable to send message to OS/400!");
392 end_request(req, 0); 411 viocd_end_request(req, 0);
393 } else 412 } else
394 rwreq++; 413 rwreq++;
395 } 414 }
@@ -601,9 +620,9 @@ return_complete:
601 "with rc %d:0x%04X: %s\n", 620 "with rc %d:0x%04X: %s\n",
602 req, event->xRc, 621 req, event->xRc,
603 bevent->sub_result, err->msg); 622 bevent->sub_result, err->msg);
604 end_request(req, 0); 623 viocd_end_request(req, 0);
605 } else 624 } else
606 end_request(req, 1); 625 viocd_end_request(req, 1);
607 626
608 /* restart handling of incoming requests */ 627 /* restart handling of incoming requests */
609 spin_unlock_irqrestore(&viocd_reqlock, flags); 628 spin_unlock_irqrestore(&viocd_reqlock, flags);