diff options
author | Jiri Kosina <jkosina@suse.cz> | 2010-12-22 12:57:02 -0500 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2010-12-22 12:57:02 -0500 |
commit | 4b7bd364700d9ac8372eff48832062b936d0793b (patch) | |
tree | 0dbf78c95456a0b02d07fcd473281f04a87e266d /drivers/usb/musb | |
parent | c0d8768af260e2cbb4bf659ae6094a262c86b085 (diff) | |
parent | 90a8a73c06cc32b609a880d48449d7083327e11a (diff) |
Merge branch 'master' into for-next
Conflicts:
MAINTAINERS
arch/arm/mach-omap2/pm24xx.c
drivers/scsi/bfa/bfa_fcpim.c
Needed to update to apply fixes for which the old branch was too
outdated.
Diffstat (limited to 'drivers/usb/musb')
-rw-r--r-- | drivers/usb/musb/blackfin.c | 80 | ||||
-rw-r--r-- | drivers/usb/musb/musb_core.c | 44 | ||||
-rw-r--r-- | drivers/usb/musb/musb_core.h | 2 | ||||
-rw-r--r-- | drivers/usb/musb/musb_gadget.c | 165 | ||||
-rw-r--r-- | drivers/usb/musb/musb_regs.h | 3 | ||||
-rw-r--r-- | drivers/usb/musb/musbhsdma.c | 14 |
6 files changed, 201 insertions, 107 deletions
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index 611a9d274363..fcb5206a65bd 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c | |||
@@ -171,8 +171,9 @@ static irqreturn_t blackfin_interrupt(int irq, void *__hci) | |||
171 | } | 171 | } |
172 | 172 | ||
173 | /* Start sampling ID pin, when plug is removed from MUSB */ | 173 | /* Start sampling ID pin, when plug is removed from MUSB */ |
174 | if (is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE | 174 | if ((is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE |
175 | || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { | 175 | || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) || |
176 | (musb->int_usb & MUSB_INTR_DISCONNECT && is_host_active(musb))) { | ||
176 | mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); | 177 | mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); |
177 | musb->a_wait_bcon = TIMER_DELAY; | 178 | musb->a_wait_bcon = TIMER_DELAY; |
178 | } | 179 | } |
@@ -323,30 +324,8 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode) | |||
323 | return -EIO; | 324 | return -EIO; |
324 | } | 325 | } |
325 | 326 | ||
326 | int __init musb_platform_init(struct musb *musb, void *board_data) | 327 | static void musb_platform_reg_init(struct musb *musb) |
327 | { | 328 | { |
328 | |||
329 | /* | ||
330 | * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE | ||
331 | * and OTG HOST modes, while rev 1.1 and greater require PE7 to | ||
332 | * be low for DEVICE mode and high for HOST mode. We set it high | ||
333 | * here because we are in host mode | ||
334 | */ | ||
335 | |||
336 | if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) { | ||
337 | printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d \n", | ||
338 | musb->config->gpio_vrsel); | ||
339 | return -ENODEV; | ||
340 | } | ||
341 | gpio_direction_output(musb->config->gpio_vrsel, 0); | ||
342 | |||
343 | usb_nop_xceiv_register(); | ||
344 | musb->xceiv = otg_get_transceiver(); | ||
345 | if (!musb->xceiv) { | ||
346 | gpio_free(musb->config->gpio_vrsel); | ||
347 | return -ENODEV; | ||
348 | } | ||
349 | |||
350 | if (ANOMALY_05000346) { | 329 | if (ANOMALY_05000346) { |
351 | bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value); | 330 | bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value); |
352 | SSYNC(); | 331 | SSYNC(); |
@@ -358,7 +337,8 @@ int __init musb_platform_init(struct musb *musb, void *board_data) | |||
358 | } | 337 | } |
359 | 338 | ||
360 | /* Configure PLL oscillator register */ | 339 | /* Configure PLL oscillator register */ |
361 | bfin_write_USB_PLLOSC_CTRL(0x30a8); | 340 | bfin_write_USB_PLLOSC_CTRL(0x3080 | |
341 | ((480/musb->config->clkin) << 1)); | ||
362 | SSYNC(); | 342 | SSYNC(); |
363 | 343 | ||
364 | bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1); | 344 | bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1); |
@@ -380,6 +360,33 @@ int __init musb_platform_init(struct musb *musb, void *board_data) | |||
380 | EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA | | 360 | EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA | |
381 | EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA); | 361 | EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA); |
382 | SSYNC(); | 362 | SSYNC(); |
363 | } | ||
364 | |||
365 | int __init musb_platform_init(struct musb *musb, void *board_data) | ||
366 | { | ||
367 | |||
368 | /* | ||
369 | * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE | ||
370 | * and OTG HOST modes, while rev 1.1 and greater require PE7 to | ||
371 | * be low for DEVICE mode and high for HOST mode. We set it high | ||
372 | * here because we are in host mode | ||
373 | */ | ||
374 | |||
375 | if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) { | ||
376 | printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d\n", | ||
377 | musb->config->gpio_vrsel); | ||
378 | return -ENODEV; | ||
379 | } | ||
380 | gpio_direction_output(musb->config->gpio_vrsel, 0); | ||
381 | |||
382 | usb_nop_xceiv_register(); | ||
383 | musb->xceiv = otg_get_transceiver(); | ||
384 | if (!musb->xceiv) { | ||
385 | gpio_free(musb->config->gpio_vrsel); | ||
386 | return -ENODEV; | ||
387 | } | ||
388 | |||
389 | musb_platform_reg_init(musb); | ||
383 | 390 | ||
384 | if (is_host_enabled(musb)) { | 391 | if (is_host_enabled(musb)) { |
385 | musb->board_set_vbus = bfin_set_vbus; | 392 | musb->board_set_vbus = bfin_set_vbus; |
@@ -394,6 +401,27 @@ int __init musb_platform_init(struct musb *musb, void *board_data) | |||
394 | return 0; | 401 | return 0; |
395 | } | 402 | } |
396 | 403 | ||
404 | #ifdef CONFIG_PM | ||
405 | void musb_platform_save_context(struct musb *musb, | ||
406 | struct musb_context_registers *musb_context) | ||
407 | { | ||
408 | if (is_host_active(musb)) | ||
409 | /* | ||
410 | * During hibernate gpio_vrsel will change from high to low | ||
411 | * low which will generate wakeup event resume the system | ||
412 | * immediately. Set it to 0 before hibernate to avoid this | ||
413 | * wakeup event. | ||
414 | */ | ||
415 | gpio_set_value(musb->config->gpio_vrsel, 0); | ||
416 | } | ||
417 | |||
418 | void musb_platform_restore_context(struct musb *musb, | ||
419 | struct musb_context_registers *musb_context) | ||
420 | { | ||
421 | musb_platform_reg_init(musb); | ||
422 | } | ||
423 | #endif | ||
424 | |||
397 | int musb_platform_exit(struct musb *musb) | 425 | int musb_platform_exit(struct musb *musb) |
398 | { | 426 | { |
399 | gpio_free(musb->config->gpio_vrsel); | 427 | gpio_free(musb->config->gpio_vrsel); |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index c9f9024c5515..99beebce8550 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -552,7 +552,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | |||
552 | if (int_usb & MUSB_INTR_SESSREQ) { | 552 | if (int_usb & MUSB_INTR_SESSREQ) { |
553 | void __iomem *mbase = musb->mregs; | 553 | void __iomem *mbase = musb->mregs; |
554 | 554 | ||
555 | if (devctl & MUSB_DEVCTL_BDEVICE) { | 555 | if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS |
556 | && (devctl & MUSB_DEVCTL_BDEVICE)) { | ||
556 | DBG(3, "SessReq while on B state\n"); | 557 | DBG(3, "SessReq while on B state\n"); |
557 | return IRQ_HANDLED; | 558 | return IRQ_HANDLED; |
558 | } | 559 | } |
@@ -1052,6 +1053,11 @@ static void musb_shutdown(struct platform_device *pdev) | |||
1052 | clk_put(musb->clock); | 1053 | clk_put(musb->clock); |
1053 | spin_unlock_irqrestore(&musb->lock, flags); | 1054 | spin_unlock_irqrestore(&musb->lock, flags); |
1054 | 1055 | ||
1056 | if (!is_otg_enabled(musb) && is_host_enabled(musb)) | ||
1057 | usb_remove_hcd(musb_to_hcd(musb)); | ||
1058 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
1059 | musb_platform_exit(musb); | ||
1060 | |||
1055 | /* FIXME power down */ | 1061 | /* FIXME power down */ |
1056 | } | 1062 | } |
1057 | 1063 | ||
@@ -2110,12 +2116,15 @@ bad_config: | |||
2110 | * Otherwise, wait till the gadget driver hooks up. | 2116 | * Otherwise, wait till the gadget driver hooks up. |
2111 | */ | 2117 | */ |
2112 | if (!is_otg_enabled(musb) && is_host_enabled(musb)) { | 2118 | if (!is_otg_enabled(musb) && is_host_enabled(musb)) { |
2119 | struct usb_hcd *hcd = musb_to_hcd(musb); | ||
2120 | |||
2113 | MUSB_HST_MODE(musb); | 2121 | MUSB_HST_MODE(musb); |
2114 | musb->xceiv->default_a = 1; | 2122 | musb->xceiv->default_a = 1; |
2115 | musb->xceiv->state = OTG_STATE_A_IDLE; | 2123 | musb->xceiv->state = OTG_STATE_A_IDLE; |
2116 | 2124 | ||
2117 | status = usb_add_hcd(musb_to_hcd(musb), -1, 0); | 2125 | status = usb_add_hcd(musb_to_hcd(musb), -1, 0); |
2118 | 2126 | ||
2127 | hcd->self.uses_pio_for_control = 1; | ||
2119 | DBG(1, "%s mode, status %d, devctl %02x %c\n", | 2128 | DBG(1, "%s mode, status %d, devctl %02x %c\n", |
2120 | "HOST", status, | 2129 | "HOST", status, |
2121 | musb_readb(musb->mregs, MUSB_DEVCTL), | 2130 | musb_readb(musb->mregs, MUSB_DEVCTL), |
@@ -2244,13 +2253,6 @@ static int __exit musb_remove(struct platform_device *pdev) | |||
2244 | */ | 2253 | */ |
2245 | musb_exit_debugfs(musb); | 2254 | musb_exit_debugfs(musb); |
2246 | musb_shutdown(pdev); | 2255 | musb_shutdown(pdev); |
2247 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
2248 | if (musb->board_mode == MUSB_HOST) | ||
2249 | usb_remove_hcd(musb_to_hcd(musb)); | ||
2250 | #endif | ||
2251 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
2252 | musb_platform_exit(musb); | ||
2253 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
2254 | 2256 | ||
2255 | musb_free(musb); | 2257 | musb_free(musb); |
2256 | iounmap(ctrl_base); | 2258 | iounmap(ctrl_base); |
@@ -2411,9 +2413,6 @@ static int musb_suspend(struct device *dev) | |||
2411 | unsigned long flags; | 2413 | unsigned long flags; |
2412 | struct musb *musb = dev_to_musb(&pdev->dev); | 2414 | struct musb *musb = dev_to_musb(&pdev->dev); |
2413 | 2415 | ||
2414 | if (!musb->clock) | ||
2415 | return 0; | ||
2416 | |||
2417 | spin_lock_irqsave(&musb->lock, flags); | 2416 | spin_lock_irqsave(&musb->lock, flags); |
2418 | 2417 | ||
2419 | if (is_peripheral_active(musb)) { | 2418 | if (is_peripheral_active(musb)) { |
@@ -2428,10 +2427,12 @@ static int musb_suspend(struct device *dev) | |||
2428 | 2427 | ||
2429 | musb_save_context(musb); | 2428 | musb_save_context(musb); |
2430 | 2429 | ||
2431 | if (musb->set_clock) | 2430 | if (musb->clock) { |
2432 | musb->set_clock(musb->clock, 0); | 2431 | if (musb->set_clock) |
2433 | else | 2432 | musb->set_clock(musb->clock, 0); |
2434 | clk_disable(musb->clock); | 2433 | else |
2434 | clk_disable(musb->clock); | ||
2435 | } | ||
2435 | spin_unlock_irqrestore(&musb->lock, flags); | 2436 | spin_unlock_irqrestore(&musb->lock, flags); |
2436 | return 0; | 2437 | return 0; |
2437 | } | 2438 | } |
@@ -2441,13 +2442,12 @@ static int musb_resume_noirq(struct device *dev) | |||
2441 | struct platform_device *pdev = to_platform_device(dev); | 2442 | struct platform_device *pdev = to_platform_device(dev); |
2442 | struct musb *musb = dev_to_musb(&pdev->dev); | 2443 | struct musb *musb = dev_to_musb(&pdev->dev); |
2443 | 2444 | ||
2444 | if (!musb->clock) | 2445 | if (musb->clock) { |
2445 | return 0; | 2446 | if (musb->set_clock) |
2446 | 2447 | musb->set_clock(musb->clock, 1); | |
2447 | if (musb->set_clock) | 2448 | else |
2448 | musb->set_clock(musb->clock, 1); | 2449 | clk_enable(musb->clock); |
2449 | else | 2450 | } |
2450 | clk_enable(musb->clock); | ||
2451 | 2451 | ||
2452 | musb_restore_context(musb); | 2452 | musb_restore_context(musb); |
2453 | 2453 | ||
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 69797e5b46a7..febaabcc2b35 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h | |||
@@ -487,7 +487,7 @@ struct musb_context_registers { | |||
487 | }; | 487 | }; |
488 | 488 | ||
489 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ | 489 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ |
490 | defined(CONFIG_ARCH_OMAP4) | 490 | defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_BLACKFIN) |
491 | extern void musb_platform_save_context(struct musb *musb, | 491 | extern void musb_platform_save_context(struct musb *musb, |
492 | struct musb_context_registers *musb_context); | 492 | struct musb_context_registers *musb_context); |
493 | extern void musb_platform_restore_context(struct musb *musb, | 493 | extern void musb_platform_restore_context(struct musb *musb, |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 1cbeaa17ffff..d5b9cfcc5977 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -92,6 +92,59 @@ | |||
92 | 92 | ||
93 | /* ----------------------------------------------------------------------- */ | 93 | /* ----------------------------------------------------------------------- */ |
94 | 94 | ||
95 | /* Maps the buffer to dma */ | ||
96 | |||
97 | static inline void map_dma_buffer(struct musb_request *request, | ||
98 | struct musb *musb) | ||
99 | { | ||
100 | if (request->request.dma == DMA_ADDR_INVALID) { | ||
101 | request->request.dma = dma_map_single( | ||
102 | musb->controller, | ||
103 | request->request.buf, | ||
104 | request->request.length, | ||
105 | request->tx | ||
106 | ? DMA_TO_DEVICE | ||
107 | : DMA_FROM_DEVICE); | ||
108 | request->mapped = 1; | ||
109 | } else { | ||
110 | dma_sync_single_for_device(musb->controller, | ||
111 | request->request.dma, | ||
112 | request->request.length, | ||
113 | request->tx | ||
114 | ? DMA_TO_DEVICE | ||
115 | : DMA_FROM_DEVICE); | ||
116 | request->mapped = 0; | ||
117 | } | ||
118 | } | ||
119 | |||
120 | /* Unmap the buffer from dma and maps it back to cpu */ | ||
121 | static inline void unmap_dma_buffer(struct musb_request *request, | ||
122 | struct musb *musb) | ||
123 | { | ||
124 | if (request->request.dma == DMA_ADDR_INVALID) { | ||
125 | DBG(20, "not unmapping a never mapped buffer\n"); | ||
126 | return; | ||
127 | } | ||
128 | if (request->mapped) { | ||
129 | dma_unmap_single(musb->controller, | ||
130 | request->request.dma, | ||
131 | request->request.length, | ||
132 | request->tx | ||
133 | ? DMA_TO_DEVICE | ||
134 | : DMA_FROM_DEVICE); | ||
135 | request->request.dma = DMA_ADDR_INVALID; | ||
136 | request->mapped = 0; | ||
137 | } else { | ||
138 | dma_sync_single_for_cpu(musb->controller, | ||
139 | request->request.dma, | ||
140 | request->request.length, | ||
141 | request->tx | ||
142 | ? DMA_TO_DEVICE | ||
143 | : DMA_FROM_DEVICE); | ||
144 | |||
145 | } | ||
146 | } | ||
147 | |||
95 | /* | 148 | /* |
96 | * Immediately complete a request. | 149 | * Immediately complete a request. |
97 | * | 150 | * |
@@ -119,24 +172,8 @@ __acquires(ep->musb->lock) | |||
119 | 172 | ||
120 | ep->busy = 1; | 173 | ep->busy = 1; |
121 | spin_unlock(&musb->lock); | 174 | spin_unlock(&musb->lock); |
122 | if (is_dma_capable()) { | 175 | if (is_dma_capable() && ep->dma) |
123 | if (req->mapped) { | 176 | unmap_dma_buffer(req, musb); |
124 | dma_unmap_single(musb->controller, | ||
125 | req->request.dma, | ||
126 | req->request.length, | ||
127 | req->tx | ||
128 | ? DMA_TO_DEVICE | ||
129 | : DMA_FROM_DEVICE); | ||
130 | req->request.dma = DMA_ADDR_INVALID; | ||
131 | req->mapped = 0; | ||
132 | } else if (req->request.dma != DMA_ADDR_INVALID) | ||
133 | dma_sync_single_for_cpu(musb->controller, | ||
134 | req->request.dma, | ||
135 | req->request.length, | ||
136 | req->tx | ||
137 | ? DMA_TO_DEVICE | ||
138 | : DMA_FROM_DEVICE); | ||
139 | } | ||
140 | if (request->status == 0) | 177 | if (request->status == 0) |
141 | DBG(5, "%s done request %p, %d/%d\n", | 178 | DBG(5, "%s done request %p, %d/%d\n", |
142 | ep->end_point.name, request, | 179 | ep->end_point.name, request, |
@@ -395,6 +432,13 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
395 | #endif | 432 | #endif |
396 | 433 | ||
397 | if (!use_dma) { | 434 | if (!use_dma) { |
435 | /* | ||
436 | * Unmap the dma buffer back to cpu if dma channel | ||
437 | * programming fails | ||
438 | */ | ||
439 | if (is_dma_capable() && musb_ep->dma) | ||
440 | unmap_dma_buffer(req, musb); | ||
441 | |||
398 | musb_write_fifo(musb_ep->hw_ep, fifo_count, | 442 | musb_write_fifo(musb_ep->hw_ep, fifo_count, |
399 | (u8 *) (request->buf + request->actual)); | 443 | (u8 *) (request->buf + request->actual)); |
400 | request->actual += fifo_count; | 444 | request->actual += fifo_count; |
@@ -644,10 +688,8 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
644 | */ | 688 | */ |
645 | 689 | ||
646 | csr |= MUSB_RXCSR_DMAENAB; | 690 | csr |= MUSB_RXCSR_DMAENAB; |
647 | if (!musb_ep->hb_mult && | ||
648 | musb_ep->hw_ep->rx_double_buffered) | ||
649 | csr |= MUSB_RXCSR_AUTOCLEAR; | ||
650 | #ifdef USE_MODE1 | 691 | #ifdef USE_MODE1 |
692 | csr |= MUSB_RXCSR_AUTOCLEAR; | ||
651 | /* csr |= MUSB_RXCSR_DMAMODE; */ | 693 | /* csr |= MUSB_RXCSR_DMAMODE; */ |
652 | 694 | ||
653 | /* this special sequence (enabling and then | 695 | /* this special sequence (enabling and then |
@@ -656,6 +698,10 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
656 | */ | 698 | */ |
657 | musb_writew(epio, MUSB_RXCSR, | 699 | musb_writew(epio, MUSB_RXCSR, |
658 | csr | MUSB_RXCSR_DMAMODE); | 700 | csr | MUSB_RXCSR_DMAMODE); |
701 | #else | ||
702 | if (!musb_ep->hb_mult && | ||
703 | musb_ep->hw_ep->rx_double_buffered) | ||
704 | csr |= MUSB_RXCSR_AUTOCLEAR; | ||
659 | #endif | 705 | #endif |
660 | musb_writew(epio, MUSB_RXCSR, csr); | 706 | musb_writew(epio, MUSB_RXCSR, csr); |
661 | 707 | ||
@@ -711,6 +757,21 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
711 | return; | 757 | return; |
712 | } | 758 | } |
713 | #endif | 759 | #endif |
760 | /* | ||
761 | * Unmap the dma buffer back to cpu if dma channel | ||
762 | * programming fails. This buffer is mapped if the | ||
763 | * channel allocation is successful | ||
764 | */ | ||
765 | if (is_dma_capable() && musb_ep->dma) { | ||
766 | unmap_dma_buffer(req, musb); | ||
767 | |||
768 | /* | ||
769 | * Clear DMAENAB and AUTOCLEAR for the | ||
770 | * PIO mode transfer | ||
771 | */ | ||
772 | csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); | ||
773 | musb_writew(epio, MUSB_RXCSR, csr); | ||
774 | } | ||
714 | 775 | ||
715 | musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) | 776 | musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) |
716 | (request->buf + request->actual)); | 777 | (request->buf + request->actual)); |
@@ -807,7 +868,7 @@ void musb_g_rx(struct musb *musb, u8 epnum) | |||
807 | 868 | ||
808 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) | 869 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) |
809 | /* Autoclear doesn't clear RxPktRdy for short packets */ | 870 | /* Autoclear doesn't clear RxPktRdy for short packets */ |
810 | if ((dma->desired_mode == 0) | 871 | if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered) |
811 | || (dma->actual_len | 872 | || (dma->actual_len |
812 | & (musb_ep->packet_sz - 1))) { | 873 | & (musb_ep->packet_sz - 1))) { |
813 | /* ack the read! */ | 874 | /* ack the read! */ |
@@ -818,8 +879,16 @@ void musb_g_rx(struct musb *musb, u8 epnum) | |||
818 | /* incomplete, and not short? wait for next IN packet */ | 879 | /* incomplete, and not short? wait for next IN packet */ |
819 | if ((request->actual < request->length) | 880 | if ((request->actual < request->length) |
820 | && (musb_ep->dma->actual_len | 881 | && (musb_ep->dma->actual_len |
821 | == musb_ep->packet_sz)) | 882 | == musb_ep->packet_sz)) { |
883 | /* In double buffer case, continue to unload fifo if | ||
884 | * there is Rx packet in FIFO. | ||
885 | **/ | ||
886 | csr = musb_readw(epio, MUSB_RXCSR); | ||
887 | if ((csr & MUSB_RXCSR_RXPKTRDY) && | ||
888 | hw_ep->rx_double_buffered) | ||
889 | goto exit; | ||
822 | return; | 890 | return; |
891 | } | ||
823 | #endif | 892 | #endif |
824 | musb_g_giveback(musb_ep, request, 0); | 893 | musb_g_giveback(musb_ep, request, 0); |
825 | 894 | ||
@@ -827,7 +896,9 @@ void musb_g_rx(struct musb *musb, u8 epnum) | |||
827 | if (!request) | 896 | if (!request) |
828 | return; | 897 | return; |
829 | } | 898 | } |
830 | 899 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) | |
900 | exit: | ||
901 | #endif | ||
831 | /* Analyze request */ | 902 | /* Analyze request */ |
832 | rxstate(musb, to_musb_request(request)); | 903 | rxstate(musb, to_musb_request(request)); |
833 | } | 904 | } |
@@ -916,13 +987,9 @@ static int musb_gadget_enable(struct usb_ep *ep, | |||
916 | * likewise high bandwidth periodic tx | 987 | * likewise high bandwidth periodic tx |
917 | */ | 988 | */ |
918 | /* Set TXMAXP with the FIFO size of the endpoint | 989 | /* Set TXMAXP with the FIFO size of the endpoint |
919 | * to disable double buffering mode. Currently, It seems that double | 990 | * to disable double buffering mode. |
920 | * buffering has problem if musb RTL revision number < 2.0. | ||
921 | */ | 991 | */ |
922 | if (musb->hwvers < MUSB_HWVERS_2000) | 992 | musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); |
923 | musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); | ||
924 | else | ||
925 | musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); | ||
926 | 993 | ||
927 | csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; | 994 | csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; |
928 | if (musb_readw(regs, MUSB_TXCSR) | 995 | if (musb_readw(regs, MUSB_TXCSR) |
@@ -958,10 +1025,7 @@ static int musb_gadget_enable(struct usb_ep *ep, | |||
958 | /* Set RXMAXP with the FIFO size of the endpoint | 1025 | /* Set RXMAXP with the FIFO size of the endpoint |
959 | * to disable double buffering mode. | 1026 | * to disable double buffering mode. |
960 | */ | 1027 | */ |
961 | if (musb->hwvers < MUSB_HWVERS_2000) | 1028 | musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); |
962 | musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx); | ||
963 | else | ||
964 | musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); | ||
965 | 1029 | ||
966 | /* force shared fifo to OUT-only mode */ | 1030 | /* force shared fifo to OUT-only mode */ |
967 | if (hw_ep->is_shared_fifo) { | 1031 | if (hw_ep->is_shared_fifo) { |
@@ -1147,28 +1211,9 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, | |||
1147 | request->epnum = musb_ep->current_epnum; | 1211 | request->epnum = musb_ep->current_epnum; |
1148 | request->tx = musb_ep->is_in; | 1212 | request->tx = musb_ep->is_in; |
1149 | 1213 | ||
1150 | if (is_dma_capable() && musb_ep->dma) { | 1214 | if (is_dma_capable() && musb_ep->dma) |
1151 | if (request->request.dma == DMA_ADDR_INVALID) { | 1215 | map_dma_buffer(request, musb); |
1152 | request->request.dma = dma_map_single( | 1216 | else |
1153 | musb->controller, | ||
1154 | request->request.buf, | ||
1155 | request->request.length, | ||
1156 | request->tx | ||
1157 | ? DMA_TO_DEVICE | ||
1158 | : DMA_FROM_DEVICE); | ||
1159 | request->mapped = 1; | ||
1160 | } else { | ||
1161 | dma_sync_single_for_device(musb->controller, | ||
1162 | request->request.dma, | ||
1163 | request->request.length, | ||
1164 | request->tx | ||
1165 | ? DMA_TO_DEVICE | ||
1166 | : DMA_FROM_DEVICE); | ||
1167 | request->mapped = 0; | ||
1168 | } | ||
1169 | } else if (!req->buf) { | ||
1170 | return -ENODATA; | ||
1171 | } else | ||
1172 | request->mapped = 0; | 1217 | request->mapped = 0; |
1173 | 1218 | ||
1174 | spin_lock_irqsave(&musb->lock, lockflags); | 1219 | spin_lock_irqsave(&musb->lock, lockflags); |
@@ -1695,8 +1740,10 @@ int __init musb_gadget_setup(struct musb *musb) | |||
1695 | musb_platform_try_idle(musb, 0); | 1740 | musb_platform_try_idle(musb, 0); |
1696 | 1741 | ||
1697 | status = device_register(&musb->g.dev); | 1742 | status = device_register(&musb->g.dev); |
1698 | if (status != 0) | 1743 | if (status != 0) { |
1744 | put_device(&musb->g.dev); | ||
1699 | the_gadget = NULL; | 1745 | the_gadget = NULL; |
1746 | } | ||
1700 | return status; | 1747 | return status; |
1701 | } | 1748 | } |
1702 | 1749 | ||
@@ -1786,6 +1833,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver, | |||
1786 | spin_unlock_irqrestore(&musb->lock, flags); | 1833 | spin_unlock_irqrestore(&musb->lock, flags); |
1787 | 1834 | ||
1788 | if (is_otg_enabled(musb)) { | 1835 | if (is_otg_enabled(musb)) { |
1836 | struct usb_hcd *hcd = musb_to_hcd(musb); | ||
1837 | |||
1789 | DBG(3, "OTG startup...\n"); | 1838 | DBG(3, "OTG startup...\n"); |
1790 | 1839 | ||
1791 | /* REVISIT: funcall to other code, which also | 1840 | /* REVISIT: funcall to other code, which also |
@@ -1800,6 +1849,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver, | |||
1800 | musb->gadget_driver = NULL; | 1849 | musb->gadget_driver = NULL; |
1801 | musb->g.dev.driver = NULL; | 1850 | musb->g.dev.driver = NULL; |
1802 | spin_unlock_irqrestore(&musb->lock, flags); | 1851 | spin_unlock_irqrestore(&musb->lock, flags); |
1852 | } else { | ||
1853 | hcd->self.uses_pio_for_control = 1; | ||
1803 | } | 1854 | } |
1804 | } | 1855 | } |
1805 | } | 1856 | } |
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h index 244267527a60..5a727c5b8676 100644 --- a/drivers/usb/musb/musb_regs.h +++ b/drivers/usb/musb/musb_regs.h | |||
@@ -633,8 +633,9 @@ static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum) | |||
633 | return 0; | 633 | return 0; |
634 | } | 634 | } |
635 | 635 | ||
636 | static inline void musb_read_txhubport(void __iomem *mbase, u8 epnum) | 636 | static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum) |
637 | { | 637 | { |
638 | return 0; | ||
638 | } | 639 | } |
639 | 640 | ||
640 | #endif /* CONFIG_BLACKFIN */ | 641 | #endif /* CONFIG_BLACKFIN */ |
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index 6f771af5cbdb..563114d613d6 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c | |||
@@ -158,6 +158,8 @@ static int dma_channel_program(struct dma_channel *channel, | |||
158 | dma_addr_t dma_addr, u32 len) | 158 | dma_addr_t dma_addr, u32 len) |
159 | { | 159 | { |
160 | struct musb_dma_channel *musb_channel = channel->private_data; | 160 | struct musb_dma_channel *musb_channel = channel->private_data; |
161 | struct musb_dma_controller *controller = musb_channel->controller; | ||
162 | struct musb *musb = controller->private_data; | ||
161 | 163 | ||
162 | DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n", | 164 | DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n", |
163 | musb_channel->epnum, | 165 | musb_channel->epnum, |
@@ -167,6 +169,18 @@ static int dma_channel_program(struct dma_channel *channel, | |||
167 | BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || | 169 | BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || |
168 | channel->status == MUSB_DMA_STATUS_BUSY); | 170 | channel->status == MUSB_DMA_STATUS_BUSY); |
169 | 171 | ||
172 | /* | ||
173 | * The DMA engine in RTL1.8 and above cannot handle | ||
174 | * DMA addresses that are not aligned to a 4 byte boundary. | ||
175 | * It ends up masking the last two bits of the address | ||
176 | * programmed in DMA_ADDR. | ||
177 | * | ||
178 | * Fail such DMA transfers, so that the backup PIO mode | ||
179 | * can carry out the transfer | ||
180 | */ | ||
181 | if ((musb->hwvers >= MUSB_HWVERS_1800) && (dma_addr % 4)) | ||
182 | return false; | ||
183 | |||
170 | channel->actual_len = 0; | 184 | channel->actual_len = 0; |
171 | musb_channel->start_addr = dma_addr; | 185 | musb_channel->start_addr = dma_addr; |
172 | musb_channel->len = len; | 186 | musb_channel->len = len; |