diff options
Diffstat (limited to 'drivers/usb/musb')
| -rw-r--r-- | drivers/usb/musb/Kconfig | 6 | ||||
| -rw-r--r-- | drivers/usb/musb/davinci.c | 63 | ||||
| -rw-r--r-- | drivers/usb/musb/davinci.h | 23 | ||||
| -rw-r--r-- | drivers/usb/musb/musb_core.c | 2 | ||||
| -rw-r--r-- | drivers/usb/musb/musb_core.h | 5 | ||||
| -rw-r--r-- | drivers/usb/musb/musb_host.c | 140 | ||||
| -rw-r--r-- | drivers/usb/musb/musb_virthub.c | 2 |
7 files changed, 163 insertions, 78 deletions
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index 9985db08e7db..b66e8544d8b9 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig | |||
| @@ -20,8 +20,8 @@ config USB_MUSB_HDRC | |||
| 20 | it's being used with, including the USB peripheral role, | 20 | it's being used with, including the USB peripheral role, |
| 21 | or the USB host role, or both. | 21 | or the USB host role, or both. |
| 22 | 22 | ||
| 23 | Texas Instruments parts using this IP include DaVinci 644x, | 23 | Texas Instruments familiies using this IP include DaVinci |
| 24 | OMAP 243x, OMAP 343x, and TUSB 6010. | 24 | (35x, 644x ...), OMAP 243x, OMAP 3, and TUSB 6010. |
| 25 | 25 | ||
| 26 | Analog Devices parts using this IP include Blackfin BF54x, | 26 | Analog Devices parts using this IP include Blackfin BF54x, |
| 27 | BF525 and BF527. | 27 | BF525 and BF527. |
| @@ -40,7 +40,7 @@ config USB_MUSB_SOC | |||
| 40 | default y if (BF54x && !BF544) | 40 | default y if (BF54x && !BF544) |
| 41 | default y if (BF52x && !BF522 && !BF523) | 41 | default y if (BF52x && !BF522 && !BF523) |
| 42 | 42 | ||
| 43 | comment "DaVinci 644x USB support" | 43 | comment "DaVinci 35x and 644x USB support" |
| 44 | depends on USB_MUSB_HDRC && ARCH_DAVINCI | 44 | depends on USB_MUSB_HDRC && ARCH_DAVINCI |
| 45 | 45 | ||
| 46 | comment "OMAP 243x high speed USB support" | 46 | comment "OMAP 243x high speed USB support" |
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c index 2dc7606f319c..10d11ab113ab 100644 --- a/drivers/usb/musb/davinci.c +++ b/drivers/usb/musb/davinci.c | |||
| @@ -48,6 +48,9 @@ | |||
| 48 | #include "cppi_dma.h" | 48 | #include "cppi_dma.h" |
| 49 | 49 | ||
| 50 | 50 | ||
| 51 | #define USB_PHY_CTRL IO_ADDRESS(USBPHY_CTL_PADDR) | ||
| 52 | #define DM355_DEEPSLEEP IO_ADDRESS(DM355_DEEPSLEEP_PADDR) | ||
| 53 | |||
| 51 | /* REVISIT (PM) we should be able to keep the PHY in low power mode most | 54 | /* REVISIT (PM) we should be able to keep the PHY in low power mode most |
| 52 | * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0 | 55 | * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0 |
| 53 | * and, when in host mode, autosuspending idle root ports... PHYPLLON | 56 | * and, when in host mode, autosuspending idle root ports... PHYPLLON |
| @@ -56,20 +59,26 @@ | |||
| 56 | 59 | ||
| 57 | static inline void phy_on(void) | 60 | static inline void phy_on(void) |
| 58 | { | 61 | { |
| 59 | /* start the on-chip PHY and its PLL */ | 62 | u32 phy_ctrl = __raw_readl(USB_PHY_CTRL); |
| 60 | __raw_writel(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON, | 63 | |
| 61 | (void __force __iomem *) IO_ADDRESS(USBPHY_CTL_PADDR)); | 64 | /* power everything up; start the on-chip PHY and its PLL */ |
| 62 | while ((__raw_readl((void __force __iomem *) | 65 | phy_ctrl &= ~(USBPHY_OSCPDWN | USBPHY_OTGPDWN | USBPHY_PHYPDWN); |
| 63 | IO_ADDRESS(USBPHY_CTL_PADDR)) | 66 | phy_ctrl |= USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON; |
| 64 | & USBPHY_PHYCLKGD) == 0) | 67 | __raw_writel(phy_ctrl, USB_PHY_CTRL); |
| 68 | |||
| 69 | /* wait for PLL to lock before proceeding */ | ||
| 70 | while ((__raw_readl(USB_PHY_CTRL) & USBPHY_PHYCLKGD) == 0) | ||
| 65 | cpu_relax(); | 71 | cpu_relax(); |
| 66 | } | 72 | } |
| 67 | 73 | ||
| 68 | static inline void phy_off(void) | 74 | static inline void phy_off(void) |
| 69 | { | 75 | { |
| 70 | /* powerdown the on-chip PHY and its oscillator */ | 76 | u32 phy_ctrl = __raw_readl(USB_PHY_CTRL); |
| 71 | __raw_writel(USBPHY_OSCPDWN | USBPHY_PHYPDWN, (void __force __iomem *) | 77 | |
| 72 | IO_ADDRESS(USBPHY_CTL_PADDR)); | 78 | /* powerdown the on-chip PHY, its PLL, and the OTG block */ |
| 79 | phy_ctrl &= ~(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON); | ||
| 80 | phy_ctrl |= USBPHY_OSCPDWN | USBPHY_OTGPDWN | USBPHY_PHYPDWN; | ||
| 81 | __raw_writel(phy_ctrl, USB_PHY_CTRL); | ||
| 73 | } | 82 | } |
| 74 | 83 | ||
| 75 | static int dma_off = 1; | 84 | static int dma_off = 1; |
| @@ -126,10 +135,6 @@ void musb_platform_disable(struct musb *musb) | |||
| 126 | } | 135 | } |
| 127 | 136 | ||
| 128 | 137 | ||
| 129 | /* REVISIT it's not clear whether DaVinci can support full OTG. */ | ||
| 130 | |||
| 131 | static int vbus_state = -1; | ||
| 132 | |||
| 133 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | 138 | #ifdef CONFIG_USB_MUSB_HDRC_HCD |
| 134 | #define portstate(stmt) stmt | 139 | #define portstate(stmt) stmt |
| 135 | #else | 140 | #else |
| @@ -137,10 +142,19 @@ static int vbus_state = -1; | |||
| 137 | #endif | 142 | #endif |
| 138 | 143 | ||
| 139 | 144 | ||
| 140 | /* VBUS SWITCHING IS BOARD-SPECIFIC */ | 145 | /* |
| 146 | * VBUS SWITCHING IS BOARD-SPECIFIC ... at least for the DM6446 EVM, | ||
| 147 | * which doesn't wire DRVVBUS to the FET that switches it. Unclear | ||
| 148 | * if that's a problem with the DM6446 chip or just with that board. | ||
| 149 | * | ||
| 150 | * In either case, the DM355 EVM automates DRVVBUS the normal way, | ||
| 151 | * when J10 is out, and TI documents it as handling OTG. | ||
| 152 | */ | ||
| 141 | 153 | ||
| 142 | #ifdef CONFIG_MACH_DAVINCI_EVM | 154 | #ifdef CONFIG_MACH_DAVINCI_EVM |
| 143 | 155 | ||
| 156 | static int vbus_state = -1; | ||
| 157 | |||
| 144 | /* I2C operations are always synchronous, and require a task context. | 158 | /* I2C operations are always synchronous, and require a task context. |
| 145 | * With unloaded systems, using the shared workqueue seems to suffice | 159 | * With unloaded systems, using the shared workqueue seems to suffice |
| 146 | * to satisfy the 100msec A_WAIT_VRISE timeout... | 160 | * to satisfy the 100msec A_WAIT_VRISE timeout... |
| @@ -150,12 +164,12 @@ static void evm_deferred_drvvbus(struct work_struct *ignored) | |||
| 150 | gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state); | 164 | gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state); |
| 151 | vbus_state = !vbus_state; | 165 | vbus_state = !vbus_state; |
| 152 | } | 166 | } |
| 153 | static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus); | ||
| 154 | 167 | ||
| 155 | #endif /* EVM */ | 168 | #endif /* EVM */ |
| 156 | 169 | ||
| 157 | static void davinci_source_power(struct musb *musb, int is_on, int immediate) | 170 | static void davinci_source_power(struct musb *musb, int is_on, int immediate) |
| 158 | { | 171 | { |
| 172 | #ifdef CONFIG_MACH_DAVINCI_EVM | ||
| 159 | if (is_on) | 173 | if (is_on) |
| 160 | is_on = 1; | 174 | is_on = 1; |
| 161 | 175 | ||
| @@ -163,16 +177,17 @@ static void davinci_source_power(struct musb *musb, int is_on, int immediate) | |||
| 163 | return; | 177 | return; |
| 164 | vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */ | 178 | vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */ |
| 165 | 179 | ||
| 166 | #ifdef CONFIG_MACH_DAVINCI_EVM | ||
| 167 | if (machine_is_davinci_evm()) { | 180 | if (machine_is_davinci_evm()) { |
| 181 | static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus); | ||
| 182 | |||
| 168 | if (immediate) | 183 | if (immediate) |
| 169 | gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state); | 184 | gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state); |
| 170 | else | 185 | else |
| 171 | schedule_work(&evm_vbus_work); | 186 | schedule_work(&evm_vbus_work); |
| 172 | } | 187 | } |
| 173 | #endif | ||
| 174 | if (immediate) | 188 | if (immediate) |
| 175 | vbus_state = is_on; | 189 | vbus_state = is_on; |
| 190 | #endif | ||
| 176 | } | 191 | } |
| 177 | 192 | ||
| 178 | static void davinci_set_vbus(struct musb *musb, int is_on) | 193 | static void davinci_set_vbus(struct musb *musb, int is_on) |
| @@ -391,6 +406,17 @@ int __init musb_platform_init(struct musb *musb) | |||
| 391 | musb->board_set_vbus = davinci_set_vbus; | 406 | musb->board_set_vbus = davinci_set_vbus; |
| 392 | davinci_source_power(musb, 0, 1); | 407 | davinci_source_power(musb, 0, 1); |
| 393 | 408 | ||
| 409 | /* dm355 EVM swaps D+/D- for signal integrity, and | ||
| 410 | * is clocked from the main 24 MHz crystal. | ||
| 411 | */ | ||
| 412 | if (machine_is_davinci_dm355_evm()) { | ||
| 413 | u32 phy_ctrl = __raw_readl(USB_PHY_CTRL); | ||
| 414 | |||
| 415 | phy_ctrl &= ~(3 << 9); | ||
| 416 | phy_ctrl |= USBPHY_DATAPOL; | ||
| 417 | __raw_writel(phy_ctrl, USB_PHY_CTRL); | ||
| 418 | } | ||
| 419 | |||
| 394 | /* reset the controller */ | 420 | /* reset the controller */ |
| 395 | musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1); | 421 | musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1); |
| 396 | 422 | ||
| @@ -401,8 +427,7 @@ int __init musb_platform_init(struct musb *musb) | |||
| 401 | 427 | ||
| 402 | /* NOTE: irqs are in mixed mode, not bypass to pure-musb */ | 428 | /* NOTE: irqs are in mixed mode, not bypass to pure-musb */ |
| 403 | pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n", | 429 | pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n", |
| 404 | revision, __raw_readl((void __force __iomem *) | 430 | revision, __raw_readl(USB_PHY_CTRL), |
| 405 | IO_ADDRESS(USBPHY_CTL_PADDR)), | ||
| 406 | musb_readb(tibase, DAVINCI_USB_CTRL_REG)); | 431 | musb_readb(tibase, DAVINCI_USB_CTRL_REG)); |
| 407 | 432 | ||
| 408 | musb->isr = davinci_interrupt; | 433 | musb->isr = davinci_interrupt; |
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h index 7fb6238e270f..046c84433cad 100644 --- a/drivers/usb/musb/davinci.h +++ b/drivers/usb/musb/davinci.h | |||
| @@ -15,14 +15,21 @@ | |||
| 15 | */ | 15 | */ |
| 16 | 16 | ||
| 17 | /* Integrated highspeed/otg PHY */ | 17 | /* Integrated highspeed/otg PHY */ |
| 18 | #define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34) | 18 | #define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34) |
| 19 | #define USBPHY_PHYCLKGD (1 << 8) | 19 | #define USBPHY_DATAPOL BIT(11) /* (dm355) switch D+/D- */ |
| 20 | #define USBPHY_SESNDEN (1 << 7) /* v(sess_end) comparator */ | 20 | #define USBPHY_PHYCLKGD BIT(8) |
| 21 | #define USBPHY_VBDTCTEN (1 << 6) /* v(bus) comparator */ | 21 | #define USBPHY_SESNDEN BIT(7) /* v(sess_end) comparator */ |
| 22 | #define USBPHY_PHYPLLON (1 << 4) /* override pll suspend */ | 22 | #define USBPHY_VBDTCTEN BIT(6) /* v(bus) comparator */ |
| 23 | #define USBPHY_CLKO1SEL (1 << 3) | 23 | #define USBPHY_VBUSSENS BIT(5) /* (dm355,ro) is vbus > 0.5V */ |
| 24 | #define USBPHY_OSCPDWN (1 << 2) | 24 | #define USBPHY_PHYPLLON BIT(4) /* override pll suspend */ |
| 25 | #define USBPHY_PHYPDWN (1 << 0) | 25 | #define USBPHY_CLKO1SEL BIT(3) |
| 26 | #define USBPHY_OSCPDWN BIT(2) | ||
| 27 | #define USBPHY_OTGPDWN BIT(1) | ||
| 28 | #define USBPHY_PHYPDWN BIT(0) | ||
| 29 | |||
| 30 | #define DM355_DEEPSLEEP_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x48) | ||
| 31 | #define DRVVBUS_FORCE BIT(2) | ||
| 32 | #define DRVVBUS_OVERRIDE BIT(1) | ||
| 26 | 33 | ||
| 27 | /* For now include usb OTG module registers here */ | 34 | /* For now include usb OTG module registers here */ |
| 28 | #define DAVINCI_USB_VERSION_REG 0x00 | 35 | #define DAVINCI_USB_VERSION_REG 0x00 |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index af77e4659006..338cd1611ab3 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
| @@ -769,7 +769,7 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb, | |||
| 769 | case OTG_STATE_A_SUSPEND: | 769 | case OTG_STATE_A_SUSPEND: |
| 770 | usb_hcd_resume_root_hub(musb_to_hcd(musb)); | 770 | usb_hcd_resume_root_hub(musb_to_hcd(musb)); |
| 771 | musb_root_disconnect(musb); | 771 | musb_root_disconnect(musb); |
| 772 | if (musb->a_wait_bcon != 0) | 772 | if (musb->a_wait_bcon != 0 && is_otg_enabled(musb)) |
| 773 | musb_platform_try_idle(musb, jiffies | 773 | musb_platform_try_idle(musb, jiffies |
| 774 | + msecs_to_jiffies(musb->a_wait_bcon)); | 774 | + msecs_to_jiffies(musb->a_wait_bcon)); |
| 775 | break; | 775 | break; |
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 630946a2d9fc..efb39b5e55b5 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h | |||
| @@ -331,7 +331,6 @@ struct musb { | |||
| 331 | struct list_head control; /* of musb_qh */ | 331 | struct list_head control; /* of musb_qh */ |
| 332 | struct list_head in_bulk; /* of musb_qh */ | 332 | struct list_head in_bulk; /* of musb_qh */ |
| 333 | struct list_head out_bulk; /* of musb_qh */ | 333 | struct list_head out_bulk; /* of musb_qh */ |
| 334 | struct musb_qh *periodic[32]; /* tree of interrupt+iso */ | ||
| 335 | #endif | 334 | #endif |
| 336 | 335 | ||
| 337 | /* called with IRQs blocked; ON/nonzero implies starting a session, | 336 | /* called with IRQs blocked; ON/nonzero implies starting a session, |
| @@ -479,10 +478,11 @@ static inline void musb_configure_ep0(struct musb *musb) | |||
| 479 | static inline int musb_read_fifosize(struct musb *musb, | 478 | static inline int musb_read_fifosize(struct musb *musb, |
| 480 | struct musb_hw_ep *hw_ep, u8 epnum) | 479 | struct musb_hw_ep *hw_ep, u8 epnum) |
| 481 | { | 480 | { |
| 481 | void *mbase = musb->mregs; | ||
| 482 | u8 reg = 0; | 482 | u8 reg = 0; |
| 483 | 483 | ||
| 484 | /* read from core using indexed model */ | 484 | /* read from core using indexed model */ |
| 485 | reg = musb_readb(hw_ep->regs, 0x10 + MUSB_FIFOSIZE); | 485 | reg = musb_readb(mbase, MUSB_EP_OFFSET(epnum, MUSB_FIFOSIZE)); |
| 486 | /* 0's returned when no more endpoints */ | 486 | /* 0's returned when no more endpoints */ |
| 487 | if (!reg) | 487 | if (!reg) |
| 488 | return -ENODEV; | 488 | return -ENODEV; |
| @@ -509,6 +509,7 @@ static inline void musb_configure_ep0(struct musb *musb) | |||
| 509 | { | 509 | { |
| 510 | musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE; | 510 | musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE; |
| 511 | musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE; | 511 | musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE; |
| 512 | musb->endpoints[0].is_shared_fifo = true; | ||
| 512 | } | 513 | } |
| 513 | #endif /* CONFIG_BLACKFIN */ | 514 | #endif /* CONFIG_BLACKFIN */ |
| 514 | 515 | ||
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 6dbbd0786a6a..499c431a6d62 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
| @@ -64,11 +64,8 @@ | |||
| 64 | * | 64 | * |
| 65 | * - DMA (Mentor/OMAP) ...has at least toggle update problems | 65 | * - DMA (Mentor/OMAP) ...has at least toggle update problems |
| 66 | * | 66 | * |
| 67 | * - Still no traffic scheduling code to make NAKing for bulk or control | 67 | * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet |
| 68 | * transfers unable to starve other requests; or to make efficient use | 68 | * starvation ... nothing yet for TX, interrupt, or bulk. |
| 69 | * of hardware with periodic transfers. (Note that network drivers | ||
| 70 | * commonly post bulk reads that stay pending for a long time; these | ||
| 71 | * would make very visible trouble.) | ||
| 72 | * | 69 | * |
| 73 | * - Not tested with HNP, but some SRP paths seem to behave. | 70 | * - Not tested with HNP, but some SRP paths seem to behave. |
| 74 | * | 71 | * |
| @@ -88,11 +85,8 @@ | |||
| 88 | * | 85 | * |
| 89 | * CONTROL transfers all go through ep0. BULK ones go through dedicated IN | 86 | * CONTROL transfers all go through ep0. BULK ones go through dedicated IN |
| 90 | * and OUT endpoints ... hardware is dedicated for those "async" queue(s). | 87 | * and OUT endpoints ... hardware is dedicated for those "async" queue(s). |
| 91 | * | ||
| 92 | * (Yes, bulk _could_ use more of the endpoints than that, and would even | 88 | * (Yes, bulk _could_ use more of the endpoints than that, and would even |
| 93 | * benefit from it ... one remote device may easily be NAKing while others | 89 | * benefit from it.) |
| 94 | * need to perform transfers in that same direction. The same thing could | ||
| 95 | * be done in software though, assuming dma cooperates.) | ||
| 96 | * | 90 | * |
| 97 | * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. | 91 | * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. |
| 98 | * So far that scheduling is both dumb and optimistic: the endpoint will be | 92 | * So far that scheduling is both dumb and optimistic: the endpoint will be |
| @@ -201,8 +195,9 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | |||
| 201 | len = urb->iso_frame_desc[0].length; | 195 | len = urb->iso_frame_desc[0].length; |
| 202 | break; | 196 | break; |
| 203 | default: /* bulk, interrupt */ | 197 | default: /* bulk, interrupt */ |
| 204 | buf = urb->transfer_buffer; | 198 | /* actual_length may be nonzero on retry paths */ |
| 205 | len = urb->transfer_buffer_length; | 199 | buf = urb->transfer_buffer + urb->actual_length; |
| 200 | len = urb->transfer_buffer_length - urb->actual_length; | ||
| 206 | } | 201 | } |
| 207 | 202 | ||
| 208 | DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", | 203 | DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", |
| @@ -395,7 +390,6 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status) | |||
| 395 | * de-allocated if it's tracked and allocated; | 390 | * de-allocated if it's tracked and allocated; |
| 396 | * and where we'd update the schedule tree... | 391 | * and where we'd update the schedule tree... |
| 397 | */ | 392 | */ |
| 398 | musb->periodic[ep->epnum] = NULL; | ||
| 399 | kfree(qh); | 393 | kfree(qh); |
| 400 | qh = NULL; | 394 | qh = NULL; |
| 401 | break; | 395 | break; |
| @@ -1045,7 +1039,8 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb) | |||
| 1045 | 1039 | ||
| 1046 | /* NOTE: this code path would be a good place to PAUSE a | 1040 | /* NOTE: this code path would be a good place to PAUSE a |
| 1047 | * control transfer, if another one is queued, so that | 1041 | * control transfer, if another one is queued, so that |
| 1048 | * ep0 is more likely to stay busy. | 1042 | * ep0 is more likely to stay busy. That's already done |
| 1043 | * for bulk RX transfers. | ||
| 1049 | * | 1044 | * |
| 1050 | * if (qh->ring.next != &musb->control), then | 1045 | * if (qh->ring.next != &musb->control), then |
| 1051 | * we have a candidate... NAKing is *NOT* an error | 1046 | * we have a candidate... NAKing is *NOT* an error |
| @@ -1197,6 +1192,7 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
| 1197 | /* NOTE: this code path would be a good place to PAUSE a | 1192 | /* NOTE: this code path would be a good place to PAUSE a |
| 1198 | * transfer, if there's some other (nonperiodic) tx urb | 1193 | * transfer, if there's some other (nonperiodic) tx urb |
| 1199 | * that could use this fifo. (dma complicates it...) | 1194 | * that could use this fifo. (dma complicates it...) |
| 1195 | * That's already done for bulk RX transfers. | ||
| 1200 | * | 1196 | * |
| 1201 | * if (bulk && qh->ring.next != &musb->out_bulk), then | 1197 | * if (bulk && qh->ring.next != &musb->out_bulk), then |
| 1202 | * we have a candidate... NAKing is *NOT* an error | 1198 | * we have a candidate... NAKing is *NOT* an error |
| @@ -1358,6 +1354,50 @@ finish: | |||
| 1358 | 1354 | ||
| 1359 | #endif | 1355 | #endif |
| 1360 | 1356 | ||
| 1357 | /* Schedule next QH from musb->in_bulk and move the current qh to | ||
| 1358 | * the end; avoids starvation for other endpoints. | ||
| 1359 | */ | ||
| 1360 | static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep) | ||
| 1361 | { | ||
| 1362 | struct dma_channel *dma; | ||
| 1363 | struct urb *urb; | ||
| 1364 | void __iomem *mbase = musb->mregs; | ||
| 1365 | void __iomem *epio = ep->regs; | ||
| 1366 | struct musb_qh *cur_qh, *next_qh; | ||
| 1367 | u16 rx_csr; | ||
| 1368 | |||
| 1369 | musb_ep_select(mbase, ep->epnum); | ||
| 1370 | dma = is_dma_capable() ? ep->rx_channel : NULL; | ||
| 1371 | |||
| 1372 | /* clear nak timeout bit */ | ||
| 1373 | rx_csr = musb_readw(epio, MUSB_RXCSR); | ||
| 1374 | rx_csr |= MUSB_RXCSR_H_WZC_BITS; | ||
| 1375 | rx_csr &= ~MUSB_RXCSR_DATAERROR; | ||
| 1376 | musb_writew(epio, MUSB_RXCSR, rx_csr); | ||
| 1377 | |||
| 1378 | cur_qh = first_qh(&musb->in_bulk); | ||
| 1379 | if (cur_qh) { | ||
| 1380 | urb = next_urb(cur_qh); | ||
| 1381 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
| 1382 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | ||
| 1383 | musb->dma_controller->channel_abort(dma); | ||
| 1384 | urb->actual_length += dma->actual_len; | ||
| 1385 | dma->actual_len = 0L; | ||
| 1386 | } | ||
| 1387 | musb_save_toggle(ep, 1, urb); | ||
| 1388 | |||
| 1389 | /* move cur_qh to end of queue */ | ||
| 1390 | list_move_tail(&cur_qh->ring, &musb->in_bulk); | ||
| 1391 | |||
| 1392 | /* get the next qh from musb->in_bulk */ | ||
| 1393 | next_qh = first_qh(&musb->in_bulk); | ||
| 1394 | |||
| 1395 | /* set rx_reinit and schedule the next qh */ | ||
| 1396 | ep->rx_reinit = 1; | ||
| 1397 | musb_start_urb(musb, 1, next_qh); | ||
| 1398 | } | ||
| 1399 | } | ||
| 1400 | |||
| 1361 | /* | 1401 | /* |
| 1362 | * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, | 1402 | * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, |
| 1363 | * and high-bandwidth IN transfer cases. | 1403 | * and high-bandwidth IN transfer cases. |
| @@ -1421,18 +1461,26 @@ void musb_host_rx(struct musb *musb, u8 epnum) | |||
| 1421 | } else if (rx_csr & MUSB_RXCSR_DATAERROR) { | 1461 | } else if (rx_csr & MUSB_RXCSR_DATAERROR) { |
| 1422 | 1462 | ||
| 1423 | if (USB_ENDPOINT_XFER_ISOC != qh->type) { | 1463 | if (USB_ENDPOINT_XFER_ISOC != qh->type) { |
| 1424 | /* NOTE this code path would be a good place to PAUSE a | 1464 | DBG(6, "RX end %d NAK timeout\n", epnum); |
| 1425 | * transfer, if there's some other (nonperiodic) rx urb | 1465 | |
| 1426 | * that could use this fifo. (dma complicates it...) | 1466 | /* NOTE: NAKing is *NOT* an error, so we want to |
| 1467 | * continue. Except ... if there's a request for | ||
| 1468 | * another QH, use that instead of starving it. | ||
| 1427 | * | 1469 | * |
| 1428 | * if (bulk && qh->ring.next != &musb->in_bulk), then | 1470 | * Devices like Ethernet and serial adapters keep |
| 1429 | * we have a candidate... NAKing is *NOT* an error | 1471 | * reads posted at all times, which will starve |
| 1472 | * other devices without this logic. | ||
| 1430 | */ | 1473 | */ |
| 1431 | DBG(6, "RX end %d NAK timeout\n", epnum); | 1474 | if (usb_pipebulk(urb->pipe) |
| 1475 | && qh->mux == 1 | ||
| 1476 | && !list_is_singular(&musb->in_bulk)) { | ||
| 1477 | musb_bulk_rx_nak_timeout(musb, hw_ep); | ||
| 1478 | return; | ||
| 1479 | } | ||
| 1432 | musb_ep_select(mbase, epnum); | 1480 | musb_ep_select(mbase, epnum); |
| 1433 | musb_writew(epio, MUSB_RXCSR, | 1481 | rx_csr |= MUSB_RXCSR_H_WZC_BITS; |
| 1434 | MUSB_RXCSR_H_WZC_BITS | 1482 | rx_csr &= ~MUSB_RXCSR_DATAERROR; |
| 1435 | | MUSB_RXCSR_H_REQPKT); | 1483 | musb_writew(epio, MUSB_RXCSR, rx_csr); |
| 1436 | 1484 | ||
| 1437 | goto finish; | 1485 | goto finish; |
| 1438 | } else { | 1486 | } else { |
| @@ -1711,31 +1759,27 @@ static int musb_schedule( | |||
| 1711 | 1759 | ||
| 1712 | /* else, periodic transfers get muxed to other endpoints */ | 1760 | /* else, periodic transfers get muxed to other endpoints */ |
| 1713 | 1761 | ||
| 1714 | /* FIXME this doesn't consider direction, so it can only | 1762 | /* |
| 1715 | * work for one half of the endpoint hardware, and assumes | 1763 | * We know this qh hasn't been scheduled, so all we need to do |
| 1716 | * the previous cases handled all non-shared endpoints... | ||
| 1717 | */ | ||
| 1718 | |||
| 1719 | /* we know this qh hasn't been scheduled, so all we need to do | ||
| 1720 | * is choose which hardware endpoint to put it on ... | 1764 | * is choose which hardware endpoint to put it on ... |
| 1721 | * | 1765 | * |
| 1722 | * REVISIT what we really want here is a regular schedule tree | 1766 | * REVISIT what we really want here is a regular schedule tree |
| 1723 | * like e.g. OHCI uses, but for now musb->periodic is just an | 1767 | * like e.g. OHCI uses. |
| 1724 | * array of the _single_ logical endpoint associated with a | ||
| 1725 | * given physical one (identity mapping logical->physical). | ||
| 1726 | * | ||
| 1727 | * that simplistic approach makes TT scheduling a lot simpler; | ||
| 1728 | * there is none, and thus none of its complexity... | ||
| 1729 | */ | 1768 | */ |
| 1730 | best_diff = 4096; | 1769 | best_diff = 4096; |
| 1731 | best_end = -1; | 1770 | best_end = -1; |
| 1732 | 1771 | ||
| 1733 | for (epnum = 1; epnum < musb->nr_endpoints; epnum++) { | 1772 | for (epnum = 1, hw_ep = musb->endpoints + 1; |
| 1773 | epnum < musb->nr_endpoints; | ||
| 1774 | epnum++, hw_ep++) { | ||
| 1734 | int diff; | 1775 | int diff; |
| 1735 | 1776 | ||
| 1736 | if (musb->periodic[epnum]) | 1777 | if (is_in || hw_ep->is_shared_fifo) { |
| 1778 | if (hw_ep->in_qh != NULL) | ||
| 1779 | continue; | ||
| 1780 | } else if (hw_ep->out_qh != NULL) | ||
| 1737 | continue; | 1781 | continue; |
| 1738 | hw_ep = &musb->endpoints[epnum]; | 1782 | |
| 1739 | if (hw_ep == musb->bulk_ep) | 1783 | if (hw_ep == musb->bulk_ep) |
| 1740 | continue; | 1784 | continue; |
| 1741 | 1785 | ||
| @@ -1756,6 +1800,17 @@ static int musb_schedule( | |||
| 1756 | head = &musb->in_bulk; | 1800 | head = &musb->in_bulk; |
| 1757 | else | 1801 | else |
| 1758 | head = &musb->out_bulk; | 1802 | head = &musb->out_bulk; |
| 1803 | |||
| 1804 | /* Enable bulk RX NAK timeout scheme when bulk requests are | ||
| 1805 | * multiplexed. This scheme doen't work in high speed to full | ||
| 1806 | * speed scenario as NAK interrupts are not coming from a | ||
| 1807 | * full speed device connected to a high speed device. | ||
| 1808 | * NAK timeout interval is 8 (128 uframe or 16ms) for HS and | ||
| 1809 | * 4 (8 frame or 8ms) for FS device. | ||
| 1810 | */ | ||
| 1811 | if (is_in && qh->dev) | ||
| 1812 | qh->intv_reg = | ||
| 1813 | (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4; | ||
| 1759 | goto success; | 1814 | goto success; |
| 1760 | } else if (best_end < 0) { | 1815 | } else if (best_end < 0) { |
| 1761 | return -ENOSPC; | 1816 | return -ENOSPC; |
| @@ -1764,7 +1819,6 @@ static int musb_schedule( | |||
| 1764 | idle = 1; | 1819 | idle = 1; |
| 1765 | qh->mux = 0; | 1820 | qh->mux = 0; |
| 1766 | hw_ep = musb->endpoints + best_end; | 1821 | hw_ep = musb->endpoints + best_end; |
| 1767 | musb->periodic[best_end] = qh; | ||
| 1768 | DBG(4, "qh %p periodic slot %d\n", qh, best_end); | 1822 | DBG(4, "qh %p periodic slot %d\n", qh, best_end); |
| 1769 | success: | 1823 | success: |
| 1770 | if (head) { | 1824 | if (head) { |
| @@ -1888,13 +1942,11 @@ static int musb_urb_enqueue( | |||
| 1888 | * | 1942 | * |
| 1889 | * The downside of disabling this is that transfer scheduling | 1943 | * The downside of disabling this is that transfer scheduling |
| 1890 | * gets VERY unfair for nonperiodic transfers; a misbehaving | 1944 | * gets VERY unfair for nonperiodic transfers; a misbehaving |
| 1891 | * peripheral could make that hurt. Or for reads, one that's | 1945 | * peripheral could make that hurt. That's perfectly normal |
| 1892 | * perfectly normal: network and other drivers keep reads | 1946 | * for reads from network or serial adapters ... so we have |
| 1893 | * posted at all times, having one pending for a week should | 1947 | * partial NAKlimit support for bulk RX. |
| 1894 | * be perfectly safe. | ||
| 1895 | * | 1948 | * |
| 1896 | * The upside of disabling it is avoidng transfer scheduling | 1949 | * The upside of disabling it is simpler transfer scheduling. |
| 1897 | * code to put this aside for while. | ||
| 1898 | */ | 1950 | */ |
| 1899 | interval = 0; | 1951 | interval = 0; |
| 1900 | } | 1952 | } |
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index e0e9ce584175..bf677acc83db 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c | |||
| @@ -285,7 +285,7 @@ int musb_hub_control( | |||
| 285 | desc->bDescLength = 9; | 285 | desc->bDescLength = 9; |
| 286 | desc->bDescriptorType = 0x29; | 286 | desc->bDescriptorType = 0x29; |
| 287 | desc->bNbrPorts = 1; | 287 | desc->bNbrPorts = 1; |
| 288 | desc->wHubCharacteristics = __constant_cpu_to_le16( | 288 | desc->wHubCharacteristics = cpu_to_le16( |
| 289 | 0x0001 /* per-port power switching */ | 289 | 0x0001 /* per-port power switching */ |
| 290 | | 0x0010 /* no overcurrent reporting */ | 290 | | 0x0010 /* no overcurrent reporting */ |
| 291 | ); | 291 | ); |
