aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/ub.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/ub.c')
-rw-r--r--drivers/block/ub.c139
1 files changed, 107 insertions, 32 deletions
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index a05fe5843e6..f04d864770a 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -14,7 +14,6 @@
14 * -- special case some senses, e.g. 3a/0 -> no media present, reduce retries 14 * -- special case some senses, e.g. 3a/0 -> no media present, reduce retries
15 * -- verify the 13 conditions and do bulk resets 15 * -- verify the 13 conditions and do bulk resets
16 * -- kill last_pipe and simply do two-state clearing on both pipes 16 * -- kill last_pipe and simply do two-state clearing on both pipes
17 * -- verify protocol (bulk) from USB descriptors (maybe...)
18 * -- highmem 17 * -- highmem
19 * -- move top_sense and work_bcs into separate allocations (if they survive) 18 * -- move top_sense and work_bcs into separate allocations (if they survive)
20 * for cache purists and esoteric architectures. 19 * for cache purists and esoteric architectures.
@@ -355,7 +354,7 @@ struct ub_lun {
355 * The USB device instance. 354 * The USB device instance.
356 */ 355 */
357struct ub_dev { 356struct ub_dev {
358 spinlock_t lock; 357 spinlock_t *lock;
359 atomic_t poison; /* The USB device is disconnected */ 358 atomic_t poison; /* The USB device is disconnected */
360 int openc; /* protected by ub_lock! */ 359 int openc; /* protected by ub_lock! */
361 /* kref is too implicit for our taste */ 360 /* kref is too implicit for our taste */
@@ -420,11 +419,13 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
420static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 419static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
421 int stalled_pipe); 420 int stalled_pipe);
422static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); 421static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
423static void ub_reset_enter(struct ub_dev *sc); 422static void ub_reset_enter(struct ub_dev *sc, int try);
424static void ub_reset_task(void *arg); 423static void ub_reset_task(void *arg);
425static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); 424static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
426static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, 425static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
427 struct ub_capacity *ret); 426 struct ub_capacity *ret);
427static int ub_sync_reset(struct ub_dev *sc);
428static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe);
428static int ub_probe_lun(struct ub_dev *sc, int lnum); 429static int ub_probe_lun(struct ub_dev *sc, int lnum);
429 430
430/* 431/*
@@ -452,6 +453,10 @@ MODULE_DEVICE_TABLE(usb, ub_usb_ids);
452#define UB_MAX_HOSTS 26 453#define UB_MAX_HOSTS 26
453static char ub_hostv[UB_MAX_HOSTS]; 454static char ub_hostv[UB_MAX_HOSTS];
454 455
456#define UB_QLOCK_NUM 5
457static spinlock_t ub_qlockv[UB_QLOCK_NUM];
458static int ub_qlock_next = 0;
459
455static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */ 460static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */
456 461
457/* 462/*
@@ -531,7 +536,7 @@ static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr,
531 return 0; 536 return 0;
532 537
533 cnt = 0; 538 cnt = 0;
534 spin_lock_irqsave(&sc->lock, flags); 539 spin_lock_irqsave(sc->lock, flags);
535 540
536 cnt += sprintf(page + cnt, 541 cnt += sprintf(page + cnt,
537 "poison %d reset %d\n", 542 "poison %d reset %d\n",
@@ -579,7 +584,7 @@ static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr,
579 if (++nc == SCMD_TRACE_SZ) nc = 0; 584 if (++nc == SCMD_TRACE_SZ) nc = 0;
580 } 585 }
581 586
582 spin_unlock_irqrestore(&sc->lock, flags); 587 spin_unlock_irqrestore(sc->lock, flags);
583 return cnt; 588 return cnt;
584} 589}
585 590
@@ -627,6 +632,24 @@ static void ub_id_put(int id)
627} 632}
628 633
629/* 634/*
635 * This is necessitated by the fact that blk_cleanup_queue does not
636 * necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
637 * Since our blk_init_queue() passes a spinlock common with ub_dev,
638 * we have life time issues when ub_cleanup frees ub_dev.
639 */
640static spinlock_t *ub_next_lock(void)
641{
642 unsigned long flags;
643 spinlock_t *ret;
644
645 spin_lock_irqsave(&ub_lock, flags);
646 ret = &ub_qlockv[ub_qlock_next];
647 ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
648 spin_unlock_irqrestore(&ub_lock, flags);
649 return ret;
650}
651
652/*
630 * Downcount for deallocation. This rides on two assumptions: 653 * Downcount for deallocation. This rides on two assumptions:
631 * - once something is poisoned, its refcount cannot grow 654 * - once something is poisoned, its refcount cannot grow
632 * - opens cannot happen at this time (del_gendisk was done) 655 * - opens cannot happen at this time (del_gendisk was done)
@@ -961,7 +984,7 @@ static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
961 if (atomic_read(&sc->poison)) 984 if (atomic_read(&sc->poison))
962 return -ENXIO; 985 return -ENXIO;
963 986
964 ub_reset_enter(sc); 987 ub_reset_enter(sc, urq->current_try);
965 988
966 if (urq->current_try >= 3) 989 if (urq->current_try >= 3)
967 return -EIO; 990 return -EIO;
@@ -997,8 +1020,6 @@ static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
997 * No exceptions. 1020 * No exceptions.
998 * 1021 *
999 * Host is assumed locked. 1022 * Host is assumed locked.
1000 *
1001 * XXX We only support Bulk for the moment.
1002 */ 1023 */
1003static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1024static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1004{ 1025{
@@ -1083,9 +1104,10 @@ static void ub_urb_timeout(unsigned long arg)
1083 struct ub_dev *sc = (struct ub_dev *) arg; 1104 struct ub_dev *sc = (struct ub_dev *) arg;
1084 unsigned long flags; 1105 unsigned long flags;
1085 1106
1086 spin_lock_irqsave(&sc->lock, flags); 1107 spin_lock_irqsave(sc->lock, flags);
1087 usb_unlink_urb(&sc->work_urb); 1108 if (!ub_is_completed(&sc->work_done))
1088 spin_unlock_irqrestore(&sc->lock, flags); 1109 usb_unlink_urb(&sc->work_urb);
1110 spin_unlock_irqrestore(sc->lock, flags);
1089} 1111}
1090 1112
1091/* 1113/*
@@ -1108,10 +1130,9 @@ static void ub_scsi_action(unsigned long _dev)
1108 struct ub_dev *sc = (struct ub_dev *) _dev; 1130 struct ub_dev *sc = (struct ub_dev *) _dev;
1109 unsigned long flags; 1131 unsigned long flags;
1110 1132
1111 spin_lock_irqsave(&sc->lock, flags); 1133 spin_lock_irqsave(sc->lock, flags);
1112 del_timer(&sc->work_timer);
1113 ub_scsi_dispatch(sc); 1134 ub_scsi_dispatch(sc);
1114 spin_unlock_irqrestore(&sc->lock, flags); 1135 spin_unlock_irqrestore(sc->lock, flags);
1115} 1136}
1116 1137
1117static void ub_scsi_dispatch(struct ub_dev *sc) 1138static void ub_scsi_dispatch(struct ub_dev *sc)
@@ -1133,6 +1154,7 @@ static void ub_scsi_dispatch(struct ub_dev *sc)
1133 } else { 1154 } else {
1134 if (!ub_is_completed(&sc->work_done)) 1155 if (!ub_is_completed(&sc->work_done))
1135 break; 1156 break;
1157 del_timer(&sc->work_timer);
1136 ub_scsi_urb_compl(sc, cmd); 1158 ub_scsi_urb_compl(sc, cmd);
1137 } 1159 }
1138 } 1160 }
@@ -1680,16 +1702,18 @@ static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1680 1702
1681/* 1703/*
1682 * Reset management 1704 * Reset management
1705 * XXX Move usb_reset_device to khubd. Hogging kevent is not a good thing.
1706 * XXX Make usb_sync_reset asynchronous.
1683 */ 1707 */
1684 1708
1685static void ub_reset_enter(struct ub_dev *sc) 1709static void ub_reset_enter(struct ub_dev *sc, int try)
1686{ 1710{
1687 1711
1688 if (sc->reset) { 1712 if (sc->reset) {
1689 /* This happens often on multi-LUN devices. */ 1713 /* This happens often on multi-LUN devices. */
1690 return; 1714 return;
1691 } 1715 }
1692 sc->reset = 1; 1716 sc->reset = try + 1;
1693 1717
1694#if 0 /* Not needed because the disconnect waits for us. */ 1718#if 0 /* Not needed because the disconnect waits for us. */
1695 unsigned long flags; 1719 unsigned long flags;
@@ -1727,6 +1751,11 @@ static void ub_reset_task(void *arg)
1727 if (atomic_read(&sc->poison)) { 1751 if (atomic_read(&sc->poison)) {
1728 printk(KERN_NOTICE "%s: Not resetting disconnected device\n", 1752 printk(KERN_NOTICE "%s: Not resetting disconnected device\n",
1729 sc->name); /* P3 This floods. Remove soon. XXX */ 1753 sc->name); /* P3 This floods. Remove soon. XXX */
1754 } else if ((sc->reset & 1) == 0) {
1755 ub_sync_reset(sc);
1756 msleep(700); /* usb-storage sleeps 6s (!) */
1757 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
1758 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
1730 } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) { 1759 } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
1731 printk(KERN_NOTICE "%s: Not resetting multi-interface device\n", 1760 printk(KERN_NOTICE "%s: Not resetting multi-interface device\n",
1732 sc->name); /* P3 This floods. Remove soon. XXX */ 1761 sc->name); /* P3 This floods. Remove soon. XXX */
@@ -1754,7 +1783,7 @@ static void ub_reset_task(void *arg)
1754 * queues of resets or anything. We do need a spinlock though, 1783 * queues of resets or anything. We do need a spinlock though,
1755 * to interact with block layer. 1784 * to interact with block layer.
1756 */ 1785 */
1757 spin_lock_irqsave(&sc->lock, flags); 1786 spin_lock_irqsave(sc->lock, flags);
1758 sc->reset = 0; 1787 sc->reset = 0;
1759 tasklet_schedule(&sc->tasklet); 1788 tasklet_schedule(&sc->tasklet);
1760 list_for_each(p, &sc->luns) { 1789 list_for_each(p, &sc->luns) {
@@ -1762,7 +1791,7 @@ static void ub_reset_task(void *arg)
1762 blk_start_queue(lun->disk->queue); 1791 blk_start_queue(lun->disk->queue);
1763 } 1792 }
1764 wake_up(&sc->reset_wait); 1793 wake_up(&sc->reset_wait);
1765 spin_unlock_irqrestore(&sc->lock, flags); 1794 spin_unlock_irqrestore(sc->lock, flags);
1766} 1795}
1767 1796
1768/* 1797/*
@@ -1990,11 +2019,11 @@ static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
1990 cmd->done = ub_probe_done; 2019 cmd->done = ub_probe_done;
1991 cmd->back = &compl; 2020 cmd->back = &compl;
1992 2021
1993 spin_lock_irqsave(&sc->lock, flags); 2022 spin_lock_irqsave(sc->lock, flags);
1994 cmd->tag = sc->tagcnt++; 2023 cmd->tag = sc->tagcnt++;
1995 2024
1996 rc = ub_submit_scsi(sc, cmd); 2025 rc = ub_submit_scsi(sc, cmd);
1997 spin_unlock_irqrestore(&sc->lock, flags); 2026 spin_unlock_irqrestore(sc->lock, flags);
1998 2027
1999 if (rc != 0) { 2028 if (rc != 0) {
2000 printk("ub: testing ready: submit error (%d)\n", rc); /* P3 */ 2029 printk("ub: testing ready: submit error (%d)\n", rc); /* P3 */
@@ -2052,11 +2081,11 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
2052 cmd->done = ub_probe_done; 2081 cmd->done = ub_probe_done;
2053 cmd->back = &compl; 2082 cmd->back = &compl;
2054 2083
2055 spin_lock_irqsave(&sc->lock, flags); 2084 spin_lock_irqsave(sc->lock, flags);
2056 cmd->tag = sc->tagcnt++; 2085 cmd->tag = sc->tagcnt++;
2057 2086
2058 rc = ub_submit_scsi(sc, cmd); 2087 rc = ub_submit_scsi(sc, cmd);
2059 spin_unlock_irqrestore(&sc->lock, flags); 2088 spin_unlock_irqrestore(sc->lock, flags);
2060 2089
2061 if (rc != 0) { 2090 if (rc != 0) {
2062 printk("ub: reading capacity: submit error (%d)\n", rc); /* P3 */ 2091 printk("ub: reading capacity: submit error (%d)\n", rc); /* P3 */
@@ -2118,6 +2147,52 @@ static void ub_probe_timeout(unsigned long arg)
2118} 2147}
2119 2148
2120/* 2149/*
2150 * Reset with a Bulk reset.
2151 */
2152static int ub_sync_reset(struct ub_dev *sc)
2153{
2154 int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
2155 struct usb_ctrlrequest *cr;
2156 struct completion compl;
2157 struct timer_list timer;
2158 int rc;
2159
2160 init_completion(&compl);
2161
2162 cr = &sc->work_cr;
2163 cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
2164 cr->bRequest = US_BULK_RESET_REQUEST;
2165 cr->wValue = cpu_to_le16(0);
2166 cr->wIndex = cpu_to_le16(ifnum);
2167 cr->wLength = cpu_to_le16(0);
2168
2169 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2170 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2171 sc->work_urb.actual_length = 0;
2172 sc->work_urb.error_count = 0;
2173 sc->work_urb.status = 0;
2174
2175 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2176 printk(KERN_WARNING
2177 "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc);
2178 return rc;
2179 }
2180
2181 init_timer(&timer);
2182 timer.function = ub_probe_timeout;
2183 timer.data = (unsigned long) &compl;
2184 timer.expires = jiffies + UB_CTRL_TIMEOUT;
2185 add_timer(&timer);
2186
2187 wait_for_completion(&compl);
2188
2189 del_timer_sync(&timer);
2190 usb_kill_urb(&sc->work_urb);
2191
2192 return sc->work_urb.status;
2193}
2194
2195/*
2121 * Get number of LUNs by the way of Bulk GetMaxLUN command. 2196 * Get number of LUNs by the way of Bulk GetMaxLUN command.
2122 */ 2197 */
2123static int ub_sync_getmaxlun(struct ub_dev *sc) 2198static int ub_sync_getmaxlun(struct ub_dev *sc)
@@ -2333,7 +2408,7 @@ static int ub_probe(struct usb_interface *intf,
2333 if ((sc = kmalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL) 2408 if ((sc = kmalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
2334 goto err_core; 2409 goto err_core;
2335 memset(sc, 0, sizeof(struct ub_dev)); 2410 memset(sc, 0, sizeof(struct ub_dev));
2336 spin_lock_init(&sc->lock); 2411 sc->lock = ub_next_lock();
2337 INIT_LIST_HEAD(&sc->luns); 2412 INIT_LIST_HEAD(&sc->luns);
2338 usb_init_urb(&sc->work_urb); 2413 usb_init_urb(&sc->work_urb);
2339 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); 2414 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
@@ -2483,7 +2558,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
2483 disk->driverfs_dev = &sc->intf->dev; 2558 disk->driverfs_dev = &sc->intf->dev;
2484 2559
2485 rc = -ENOMEM; 2560 rc = -ENOMEM;
2486 if ((q = blk_init_queue(ub_request_fn, &sc->lock)) == NULL) 2561 if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
2487 goto err_blkqinit; 2562 goto err_blkqinit;
2488 2563
2489 disk->queue = q; 2564 disk->queue = q;
@@ -2554,7 +2629,7 @@ static void ub_disconnect(struct usb_interface *intf)
2554 * and the whole queue drains. So, we just use this code to 2629 * and the whole queue drains. So, we just use this code to
2555 * print warnings. 2630 * print warnings.
2556 */ 2631 */
2557 spin_lock_irqsave(&sc->lock, flags); 2632 spin_lock_irqsave(sc->lock, flags);
2558 { 2633 {
2559 struct ub_scsi_cmd *cmd; 2634 struct ub_scsi_cmd *cmd;
2560 int cnt = 0; 2635 int cnt = 0;
@@ -2571,7 +2646,7 @@ static void ub_disconnect(struct usb_interface *intf)
2571 "%d was queued after shutdown\n", sc->name, cnt); 2646 "%d was queued after shutdown\n", sc->name, cnt);
2572 } 2647 }
2573 } 2648 }
2574 spin_unlock_irqrestore(&sc->lock, flags); 2649 spin_unlock_irqrestore(sc->lock, flags);
2575 2650
2576 /* 2651 /*
2577 * Unregister the upper layer. 2652 * Unregister the upper layer.
@@ -2590,19 +2665,15 @@ static void ub_disconnect(struct usb_interface *intf)
2590 } 2665 }
2591 2666
2592 /* 2667 /*
2593 * Taking a lock on a structure which is about to be freed
2594 * is very nonsensual. Here it is largely a way to do a debug freeze,
2595 * and a bracket which shows where the nonsensual code segment ends.
2596 *
2597 * Testing for -EINPROGRESS is always a bug, so we are bending 2668 * Testing for -EINPROGRESS is always a bug, so we are bending
2598 * the rules a little. 2669 * the rules a little.
2599 */ 2670 */
2600 spin_lock_irqsave(&sc->lock, flags); 2671 spin_lock_irqsave(sc->lock, flags);
2601 if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */ 2672 if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */
2602 printk(KERN_WARNING "%s: " 2673 printk(KERN_WARNING "%s: "
2603 "URB is active after disconnect\n", sc->name); 2674 "URB is active after disconnect\n", sc->name);
2604 } 2675 }
2605 spin_unlock_irqrestore(&sc->lock, flags); 2676 spin_unlock_irqrestore(sc->lock, flags);
2606 2677
2607 /* 2678 /*
2608 * There is virtually no chance that other CPU runs times so long 2679 * There is virtually no chance that other CPU runs times so long
@@ -2636,6 +2707,10 @@ static struct usb_driver ub_driver = {
2636static int __init ub_init(void) 2707static int __init ub_init(void)
2637{ 2708{
2638 int rc; 2709 int rc;
2710 int i;
2711
2712 for (i = 0; i < UB_QLOCK_NUM; i++)
2713 spin_lock_init(&ub_qlockv[i]);
2639 2714
2640 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) 2715 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2641 goto err_regblkdev; 2716 goto err_regblkdev;