aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPete Zaitcev <zaitcev@redhat.com>2005-12-28 17:22:17 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2006-01-31 20:23:36 -0500
commit65b4fe553bf43018c06740f3d1f6caf42cf95924 (patch)
tree54a95a1a762501307f0b764612eb6b68894c9eb6
parentb6daf7f50836c8ed12d8b0ec0113e415f04e8530 (diff)
[PATCH] USB: ub 03 Oops with CFQ
The blk_cleanup_queue does not necesserily destroy the queue. When we destroy the corresponding ub_dev, it may leave the queue spinlock pointer dangling. This patch moves spinlocks from ub_dev to static memory. The locking scheme is not changed. These spinlocks are still separate from the ub_lock. Signed-off-by: Pete Zaitcev <zaitcev@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/block/ub.c68
1 files changed, 45 insertions, 23 deletions
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index a05fe5843e6c..db4943c53d36 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -355,7 +355,7 @@ struct ub_lun {
355 * The USB device instance. 355 * The USB device instance.
356 */ 356 */
357struct ub_dev { 357struct ub_dev {
358 spinlock_t lock; 358 spinlock_t *lock;
359 atomic_t poison; /* The USB device is disconnected */ 359 atomic_t poison; /* The USB device is disconnected */
360 int openc; /* protected by ub_lock! */ 360 int openc; /* protected by ub_lock! */
361 /* kref is too implicit for our taste */ 361 /* kref is too implicit for our taste */
@@ -452,6 +452,10 @@ MODULE_DEVICE_TABLE(usb, ub_usb_ids);
452#define UB_MAX_HOSTS 26 452#define UB_MAX_HOSTS 26
453static char ub_hostv[UB_MAX_HOSTS]; 453static char ub_hostv[UB_MAX_HOSTS];
454 454
455#define UB_QLOCK_NUM 5
456static spinlock_t ub_qlockv[UB_QLOCK_NUM];
457static int ub_qlock_next = 0;
458
455static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */ 459static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */
456 460
457/* 461/*
@@ -531,7 +535,7 @@ static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr,
531 return 0; 535 return 0;
532 536
533 cnt = 0; 537 cnt = 0;
534 spin_lock_irqsave(&sc->lock, flags); 538 spin_lock_irqsave(sc->lock, flags);
535 539
536 cnt += sprintf(page + cnt, 540 cnt += sprintf(page + cnt,
537 "poison %d reset %d\n", 541 "poison %d reset %d\n",
@@ -579,7 +583,7 @@ static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr,
579 if (++nc == SCMD_TRACE_SZ) nc = 0; 583 if (++nc == SCMD_TRACE_SZ) nc = 0;
580 } 584 }
581 585
582 spin_unlock_irqrestore(&sc->lock, flags); 586 spin_unlock_irqrestore(sc->lock, flags);
583 return cnt; 587 return cnt;
584} 588}
585 589
@@ -627,6 +631,24 @@ static void ub_id_put(int id)
627} 631}
628 632
629/* 633/*
634 * This is necessitated by the fact that blk_cleanup_queue does not
635 * necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
636 * Since our blk_init_queue() passes a spinlock common with ub_dev,
637 * we have life time issues when ub_cleanup frees ub_dev.
638 */
639static spinlock_t *ub_next_lock(void)
640{
641 unsigned long flags;
642 spinlock_t *ret;
643
644 spin_lock_irqsave(&ub_lock, flags);
645 ret = &ub_qlockv[ub_qlock_next];
646 ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
647 spin_unlock_irqrestore(&ub_lock, flags);
648 return ret;
649}
650
651/*
630 * Downcount for deallocation. This rides on two assumptions: 652 * Downcount for deallocation. This rides on two assumptions:
631 * - once something is poisoned, its refcount cannot grow 653 * - once something is poisoned, its refcount cannot grow
632 * - opens cannot happen at this time (del_gendisk was done) 654 * - opens cannot happen at this time (del_gendisk was done)
@@ -1083,9 +1105,9 @@ static void ub_urb_timeout(unsigned long arg)
1083 struct ub_dev *sc = (struct ub_dev *) arg; 1105 struct ub_dev *sc = (struct ub_dev *) arg;
1084 unsigned long flags; 1106 unsigned long flags;
1085 1107
1086 spin_lock_irqsave(&sc->lock, flags); 1108 spin_lock_irqsave(sc->lock, flags);
1087 usb_unlink_urb(&sc->work_urb); 1109 usb_unlink_urb(&sc->work_urb);
1088 spin_unlock_irqrestore(&sc->lock, flags); 1110 spin_unlock_irqrestore(sc->lock, flags);
1089} 1111}
1090 1112
1091/* 1113/*
@@ -1108,10 +1130,10 @@ static void ub_scsi_action(unsigned long _dev)
1108 struct ub_dev *sc = (struct ub_dev *) _dev; 1130 struct ub_dev *sc = (struct ub_dev *) _dev;
1109 unsigned long flags; 1131 unsigned long flags;
1110 1132
1111 spin_lock_irqsave(&sc->lock, flags); 1133 spin_lock_irqsave(sc->lock, flags);
1112 del_timer(&sc->work_timer); 1134 del_timer(&sc->work_timer);
1113 ub_scsi_dispatch(sc); 1135 ub_scsi_dispatch(sc);
1114 spin_unlock_irqrestore(&sc->lock, flags); 1136 spin_unlock_irqrestore(sc->lock, flags);
1115} 1137}
1116 1138
1117static void ub_scsi_dispatch(struct ub_dev *sc) 1139static void ub_scsi_dispatch(struct ub_dev *sc)
@@ -1754,7 +1776,7 @@ static void ub_reset_task(void *arg)
1754 * queues of resets or anything. We do need a spinlock though, 1776 * queues of resets or anything. We do need a spinlock though,
1755 * to interact with block layer. 1777 * to interact with block layer.
1756 */ 1778 */
1757 spin_lock_irqsave(&sc->lock, flags); 1779 spin_lock_irqsave(sc->lock, flags);
1758 sc->reset = 0; 1780 sc->reset = 0;
1759 tasklet_schedule(&sc->tasklet); 1781 tasklet_schedule(&sc->tasklet);
1760 list_for_each(p, &sc->luns) { 1782 list_for_each(p, &sc->luns) {
@@ -1762,7 +1784,7 @@ static void ub_reset_task(void *arg)
1762 blk_start_queue(lun->disk->queue); 1784 blk_start_queue(lun->disk->queue);
1763 } 1785 }
1764 wake_up(&sc->reset_wait); 1786 wake_up(&sc->reset_wait);
1765 spin_unlock_irqrestore(&sc->lock, flags); 1787 spin_unlock_irqrestore(sc->lock, flags);
1766} 1788}
1767 1789
1768/* 1790/*
@@ -1990,11 +2012,11 @@ static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
1990 cmd->done = ub_probe_done; 2012 cmd->done = ub_probe_done;
1991 cmd->back = &compl; 2013 cmd->back = &compl;
1992 2014
1993 spin_lock_irqsave(&sc->lock, flags); 2015 spin_lock_irqsave(sc->lock, flags);
1994 cmd->tag = sc->tagcnt++; 2016 cmd->tag = sc->tagcnt++;
1995 2017
1996 rc = ub_submit_scsi(sc, cmd); 2018 rc = ub_submit_scsi(sc, cmd);
1997 spin_unlock_irqrestore(&sc->lock, flags); 2019 spin_unlock_irqrestore(sc->lock, flags);
1998 2020
1999 if (rc != 0) { 2021 if (rc != 0) {
2000 printk("ub: testing ready: submit error (%d)\n", rc); /* P3 */ 2022 printk("ub: testing ready: submit error (%d)\n", rc); /* P3 */
@@ -2052,11 +2074,11 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
2052 cmd->done = ub_probe_done; 2074 cmd->done = ub_probe_done;
2053 cmd->back = &compl; 2075 cmd->back = &compl;
2054 2076
2055 spin_lock_irqsave(&sc->lock, flags); 2077 spin_lock_irqsave(sc->lock, flags);
2056 cmd->tag = sc->tagcnt++; 2078 cmd->tag = sc->tagcnt++;
2057 2079
2058 rc = ub_submit_scsi(sc, cmd); 2080 rc = ub_submit_scsi(sc, cmd);
2059 spin_unlock_irqrestore(&sc->lock, flags); 2081 spin_unlock_irqrestore(sc->lock, flags);
2060 2082
2061 if (rc != 0) { 2083 if (rc != 0) {
2062 printk("ub: reading capacity: submit error (%d)\n", rc); /* P3 */ 2084 printk("ub: reading capacity: submit error (%d)\n", rc); /* P3 */
@@ -2333,7 +2355,7 @@ static int ub_probe(struct usb_interface *intf,
2333 if ((sc = kmalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL) 2355 if ((sc = kmalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
2334 goto err_core; 2356 goto err_core;
2335 memset(sc, 0, sizeof(struct ub_dev)); 2357 memset(sc, 0, sizeof(struct ub_dev));
2336 spin_lock_init(&sc->lock); 2358 sc->lock = ub_next_lock();
2337 INIT_LIST_HEAD(&sc->luns); 2359 INIT_LIST_HEAD(&sc->luns);
2338 usb_init_urb(&sc->work_urb); 2360 usb_init_urb(&sc->work_urb);
2339 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); 2361 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
@@ -2483,7 +2505,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
2483 disk->driverfs_dev = &sc->intf->dev; 2505 disk->driverfs_dev = &sc->intf->dev;
2484 2506
2485 rc = -ENOMEM; 2507 rc = -ENOMEM;
2486 if ((q = blk_init_queue(ub_request_fn, &sc->lock)) == NULL) 2508 if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
2487 goto err_blkqinit; 2509 goto err_blkqinit;
2488 2510
2489 disk->queue = q; 2511 disk->queue = q;
@@ -2554,7 +2576,7 @@ static void ub_disconnect(struct usb_interface *intf)
2554 * and the whole queue drains. So, we just use this code to 2576 * and the whole queue drains. So, we just use this code to
2555 * print warnings. 2577 * print warnings.
2556 */ 2578 */
2557 spin_lock_irqsave(&sc->lock, flags); 2579 spin_lock_irqsave(sc->lock, flags);
2558 { 2580 {
2559 struct ub_scsi_cmd *cmd; 2581 struct ub_scsi_cmd *cmd;
2560 int cnt = 0; 2582 int cnt = 0;
@@ -2571,7 +2593,7 @@ static void ub_disconnect(struct usb_interface *intf)
2571 "%d was queued after shutdown\n", sc->name, cnt); 2593 "%d was queued after shutdown\n", sc->name, cnt);
2572 } 2594 }
2573 } 2595 }
2574 spin_unlock_irqrestore(&sc->lock, flags); 2596 spin_unlock_irqrestore(sc->lock, flags);
2575 2597
2576 /* 2598 /*
2577 * Unregister the upper layer. 2599 * Unregister the upper layer.
@@ -2590,19 +2612,15 @@ static void ub_disconnect(struct usb_interface *intf)
2590 } 2612 }
2591 2613
2592 /* 2614 /*
2593 * Taking a lock on a structure which is about to be freed
2594 * is very nonsensual. Here it is largely a way to do a debug freeze,
2595 * and a bracket which shows where the nonsensual code segment ends.
2596 *
2597 * Testing for -EINPROGRESS is always a bug, so we are bending 2615 * Testing for -EINPROGRESS is always a bug, so we are bending
2598 * the rules a little. 2616 * the rules a little.
2599 */ 2617 */
2600 spin_lock_irqsave(&sc->lock, flags); 2618 spin_lock_irqsave(sc->lock, flags);
2601 if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */ 2619 if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */
2602 printk(KERN_WARNING "%s: " 2620 printk(KERN_WARNING "%s: "
2603 "URB is active after disconnect\n", sc->name); 2621 "URB is active after disconnect\n", sc->name);
2604 } 2622 }
2605 spin_unlock_irqrestore(&sc->lock, flags); 2623 spin_unlock_irqrestore(sc->lock, flags);
2606 2624
2607 /* 2625 /*
2608 * There is virtually no chance that other CPU runs times so long 2626 * There is virtually no chance that other CPU runs times so long
@@ -2636,6 +2654,10 @@ static struct usb_driver ub_driver = {
2636static int __init ub_init(void) 2654static int __init ub_init(void)
2637{ 2655{
2638 int rc; 2656 int rc;
2657 int i;
2658
2659 for (i = 0; i < UB_QLOCK_NUM; i++)
2660 spin_lock_init(&ub_qlockv[i]);
2639 2661
2640 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) 2662 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2641 goto err_regblkdev; 2663 goto err_regblkdev;