diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-17 16:53:00 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-17 16:53:00 -0400 |
| commit | dd26bf6d95f050c42cc8f15e750b09851e1fd30b (patch) | |
| tree | 22d0a6f223297754bd1ca9f4de4e6daf1f0f6430 /drivers/usb/musb | |
| parent | 7217fa9851c99ffe43cee9e3ba4b81a34ce7bac4 (diff) | |
| parent | fca10c81d99ff9956179058460dfddc0418f3902 (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6: (22 commits)
WUSB: correct format of wusb_chid sysfs file
WUSB: fix oops when completing URBs for disconnected devices
WUSB: disconnect all devices when stopping a WUSB HCD
USB: whci-hcd: check return value of usb_hcd_link_urb_to_ep()
USB: whci-hcd: provide a endpoint_reset method
USB: add reset endpoint operations
USB device codes for Motorola phone.
usb-storage: fix mistake in Makefile
USB: usb-serial ch341: support for DTR/RTS/CTS
Revert USB: usb-serial ch341: support for DTR/RTS/CTS
USB: musb: fix possible panic while resuming
USB: musb: fix isochronous TXDMA (take 2)
USB: musb: sanitize clearing TXCSR DMA bits (take 2)
USB: musb: bugfixes for multi-packet TXDMA support
USB: musb_host, fix ep0 fifo flushing
USB: usb-storage: augment unusual_devs entry for Simple Tech/Datafab
USB: musb_host, minor enqueue locking fix (v2)
USB: fix oops in cdc-wdm in case of malformed descriptors
USB: qcserial: Add extra device IDs
USB: option: Add ids for D-Link DWM-652 3.5G modem
...
Diffstat (limited to 'drivers/usb/musb')
| -rw-r--r-- | drivers/usb/musb/cppi_dma.c | 23 | ||||
| -rw-r--r-- | drivers/usb/musb/musb_core.c | 12 | ||||
| -rw-r--r-- | drivers/usb/musb/musb_gadget.c | 33 | ||||
| -rw-r--r-- | drivers/usb/musb/musb_host.c | 406 | ||||
| -rw-r--r-- | drivers/usb/musb/musbhsdma.c | 66 |
5 files changed, 291 insertions, 249 deletions
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index 569ef0fed0f6..1976e9b41800 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c | |||
| @@ -579,6 +579,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) | |||
| 579 | * trigger the "send a ZLP?" confusion. | 579 | * trigger the "send a ZLP?" confusion. |
| 580 | */ | 580 | */ |
| 581 | rndis = (maxpacket & 0x3f) == 0 | 581 | rndis = (maxpacket & 0x3f) == 0 |
| 582 | && length > maxpacket | ||
| 582 | && length < 0xffff | 583 | && length < 0xffff |
| 583 | && (length % maxpacket) != 0; | 584 | && (length % maxpacket) != 0; |
| 584 | 585 | ||
| @@ -1228,27 +1229,7 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx) | |||
| 1228 | 1229 | ||
| 1229 | hw_ep = tx_ch->hw_ep; | 1230 | hw_ep = tx_ch->hw_ep; |
| 1230 | 1231 | ||
| 1231 | /* Peripheral role never repurposes the | 1232 | musb_dma_completion(musb, index + 1, 1); |
| 1232 | * endpoint, so immediate completion is | ||
| 1233 | * safe. Host role waits for the fifo | ||
| 1234 | * to empty (TXPKTRDY irq) before going | ||
| 1235 | * to the next queued bulk transfer. | ||
| 1236 | */ | ||
| 1237 | if (is_host_active(cppi->musb)) { | ||
| 1238 | #if 0 | ||
| 1239 | /* WORKAROUND because we may | ||
| 1240 | * not always get TXKPTRDY ... | ||
| 1241 | */ | ||
| 1242 | int csr; | ||
| 1243 | |||
| 1244 | csr = musb_readw(hw_ep->regs, | ||
| 1245 | MUSB_TXCSR); | ||
| 1246 | if (csr & MUSB_TXCSR_TXPKTRDY) | ||
| 1247 | #endif | ||
| 1248 | completed = false; | ||
| 1249 | } | ||
| 1250 | if (completed) | ||
| 1251 | musb_dma_completion(musb, index + 1, 1); | ||
| 1252 | 1233 | ||
| 1253 | } else { | 1234 | } else { |
| 1254 | /* Bigger transfer than we could fit in | 1235 | /* Bigger transfer than we could fit in |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 338cd1611ab3..0112353ec97d 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
| @@ -2170,26 +2170,22 @@ static int musb_suspend(struct platform_device *pdev, pm_message_t message) | |||
| 2170 | return 0; | 2170 | return 0; |
| 2171 | } | 2171 | } |
| 2172 | 2172 | ||
| 2173 | static int musb_resume(struct platform_device *pdev) | 2173 | static int musb_resume_early(struct platform_device *pdev) |
| 2174 | { | 2174 | { |
| 2175 | unsigned long flags; | ||
| 2176 | struct musb *musb = dev_to_musb(&pdev->dev); | 2175 | struct musb *musb = dev_to_musb(&pdev->dev); |
| 2177 | 2176 | ||
| 2178 | if (!musb->clock) | 2177 | if (!musb->clock) |
| 2179 | return 0; | 2178 | return 0; |
| 2180 | 2179 | ||
| 2181 | spin_lock_irqsave(&musb->lock, flags); | ||
| 2182 | |||
| 2183 | if (musb->set_clock) | 2180 | if (musb->set_clock) |
| 2184 | musb->set_clock(musb->clock, 1); | 2181 | musb->set_clock(musb->clock, 1); |
| 2185 | else | 2182 | else |
| 2186 | clk_enable(musb->clock); | 2183 | clk_enable(musb->clock); |
| 2187 | 2184 | ||
| 2188 | /* for static cmos like DaVinci, register values were preserved | 2185 | /* for static cmos like DaVinci, register values were preserved |
| 2189 | * unless for some reason the whole soc powered down and we're | 2186 | * unless for some reason the whole soc powered down or the USB |
| 2190 | * not treating that as a whole-system restart (e.g. swsusp) | 2187 | * module got reset through the PSC (vs just being disabled). |
| 2191 | */ | 2188 | */ |
| 2192 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 2193 | return 0; | 2189 | return 0; |
| 2194 | } | 2190 | } |
| 2195 | 2191 | ||
| @@ -2207,7 +2203,7 @@ static struct platform_driver musb_driver = { | |||
| 2207 | .remove = __devexit_p(musb_remove), | 2203 | .remove = __devexit_p(musb_remove), |
| 2208 | .shutdown = musb_shutdown, | 2204 | .shutdown = musb_shutdown, |
| 2209 | .suspend = musb_suspend, | 2205 | .suspend = musb_suspend, |
| 2210 | .resume = musb_resume, | 2206 | .resume_early = musb_resume_early, |
| 2211 | }; | 2207 | }; |
| 2212 | 2208 | ||
| 2213 | /*-------------------------------------------------------------------------*/ | 2209 | /*-------------------------------------------------------------------------*/ |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index c7ebd0867fcc..f79440cdfe7e 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
| @@ -165,9 +165,15 @@ static void nuke(struct musb_ep *ep, const int status) | |||
| 165 | if (is_dma_capable() && ep->dma) { | 165 | if (is_dma_capable() && ep->dma) { |
| 166 | struct dma_controller *c = ep->musb->dma_controller; | 166 | struct dma_controller *c = ep->musb->dma_controller; |
| 167 | int value; | 167 | int value; |
| 168 | |||
| 168 | if (ep->is_in) { | 169 | if (ep->is_in) { |
| 170 | /* | ||
| 171 | * The programming guide says that we must not clear | ||
| 172 | * the DMAMODE bit before DMAENAB, so we only | ||
| 173 | * clear it in the second write... | ||
| 174 | */ | ||
| 169 | musb_writew(epio, MUSB_TXCSR, | 175 | musb_writew(epio, MUSB_TXCSR, |
| 170 | 0 | MUSB_TXCSR_FLUSHFIFO); | 176 | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO); |
| 171 | musb_writew(epio, MUSB_TXCSR, | 177 | musb_writew(epio, MUSB_TXCSR, |
| 172 | 0 | MUSB_TXCSR_FLUSHFIFO); | 178 | 0 | MUSB_TXCSR_FLUSHFIFO); |
| 173 | } else { | 179 | } else { |
| @@ -230,7 +236,7 @@ static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) | |||
| 230 | | IN token(s) are recd from Host. | 236 | | IN token(s) are recd from Host. |
| 231 | | -> DMA interrupt on completion | 237 | | -> DMA interrupt on completion |
| 232 | | calls TxAvail. | 238 | | calls TxAvail. |
| 233 | | -> stop DMA, ~DmaEenab, | 239 | | -> stop DMA, ~DMAENAB, |
| 234 | | -> set TxPktRdy for last short pkt or zlp | 240 | | -> set TxPktRdy for last short pkt or zlp |
| 235 | | -> Complete Request | 241 | | -> Complete Request |
| 236 | | -> Continue next request (call txstate) | 242 | | -> Continue next request (call txstate) |
| @@ -315,9 +321,17 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
| 315 | request->dma, request_size); | 321 | request->dma, request_size); |
| 316 | if (use_dma) { | 322 | if (use_dma) { |
| 317 | if (musb_ep->dma->desired_mode == 0) { | 323 | if (musb_ep->dma->desired_mode == 0) { |
| 318 | /* ASSERT: DMAENAB is clear */ | 324 | /* |
| 319 | csr &= ~(MUSB_TXCSR_AUTOSET | | 325 | * We must not clear the DMAMODE bit |
| 320 | MUSB_TXCSR_DMAMODE); | 326 | * before the DMAENAB bit -- and the |
| 327 | * latter doesn't always get cleared | ||
| 328 | * before we get here... | ||
| 329 | */ | ||
| 330 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
| 331 | | MUSB_TXCSR_DMAENAB); | ||
| 332 | musb_writew(epio, MUSB_TXCSR, csr | ||
| 333 | | MUSB_TXCSR_P_WZC_BITS); | ||
| 334 | csr &= ~MUSB_TXCSR_DMAMODE; | ||
| 321 | csr |= (MUSB_TXCSR_DMAENAB | | 335 | csr |= (MUSB_TXCSR_DMAENAB | |
| 322 | MUSB_TXCSR_MODE); | 336 | MUSB_TXCSR_MODE); |
| 323 | /* against programming guide */ | 337 | /* against programming guide */ |
| @@ -334,10 +348,7 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
| 334 | 348 | ||
| 335 | #elif defined(CONFIG_USB_TI_CPPI_DMA) | 349 | #elif defined(CONFIG_USB_TI_CPPI_DMA) |
| 336 | /* program endpoint CSR first, then setup DMA */ | 350 | /* program endpoint CSR first, then setup DMA */ |
| 337 | csr &= ~(MUSB_TXCSR_AUTOSET | 351 | csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); |
| 338 | | MUSB_TXCSR_DMAMODE | ||
| 339 | | MUSB_TXCSR_P_UNDERRUN | ||
| 340 | | MUSB_TXCSR_TXPKTRDY); | ||
| 341 | csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB; | 352 | csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB; |
| 342 | musb_writew(epio, MUSB_TXCSR, | 353 | musb_writew(epio, MUSB_TXCSR, |
| 343 | (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) | 354 | (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) |
| @@ -364,8 +375,8 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
| 364 | if (!use_dma) { | 375 | if (!use_dma) { |
| 365 | c->channel_release(musb_ep->dma); | 376 | c->channel_release(musb_ep->dma); |
| 366 | musb_ep->dma = NULL; | 377 | musb_ep->dma = NULL; |
| 367 | /* ASSERT: DMAENAB clear */ | 378 | csr &= ~MUSB_TXCSR_DMAENAB; |
| 368 | csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); | 379 | musb_writew(epio, MUSB_TXCSR, csr); |
| 369 | /* invariant: prequest->buf is non-null */ | 380 | /* invariant: prequest->buf is non-null */ |
| 370 | } | 381 | } |
| 371 | #elif defined(CONFIG_USB_TUSB_OMAP_DMA) | 382 | #elif defined(CONFIG_USB_TUSB_OMAP_DMA) |
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 499c431a6d62..db1b57415ec7 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | * Copyright 2005 Mentor Graphics Corporation | 4 | * Copyright 2005 Mentor Graphics Corporation |
| 5 | * Copyright (C) 2005-2006 by Texas Instruments | 5 | * Copyright (C) 2005-2006 by Texas Instruments |
| 6 | * Copyright (C) 2006-2007 Nokia Corporation | 6 | * Copyright (C) 2006-2007 Nokia Corporation |
| 7 | * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> | ||
| 7 | * | 8 | * |
| 8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
| @@ -96,8 +97,8 @@ | |||
| 96 | 97 | ||
| 97 | 98 | ||
| 98 | static void musb_ep_program(struct musb *musb, u8 epnum, | 99 | static void musb_ep_program(struct musb *musb, u8 epnum, |
| 99 | struct urb *urb, unsigned int nOut, | 100 | struct urb *urb, int is_out, |
| 100 | u8 *buf, u32 len); | 101 | u8 *buf, u32 offset, u32 len); |
| 101 | 102 | ||
| 102 | /* | 103 | /* |
| 103 | * Clear TX fifo. Needed to avoid BABBLE errors. | 104 | * Clear TX fifo. Needed to avoid BABBLE errors. |
| @@ -125,6 +126,29 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) | |||
| 125 | } | 126 | } |
| 126 | } | 127 | } |
| 127 | 128 | ||
| 129 | static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep) | ||
| 130 | { | ||
| 131 | void __iomem *epio = ep->regs; | ||
| 132 | u16 csr; | ||
| 133 | int retries = 5; | ||
| 134 | |||
| 135 | /* scrub any data left in the fifo */ | ||
| 136 | do { | ||
| 137 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 138 | if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY))) | ||
| 139 | break; | ||
| 140 | musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO); | ||
| 141 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 142 | udelay(10); | ||
| 143 | } while (--retries); | ||
| 144 | |||
| 145 | WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n", | ||
| 146 | ep->epnum, csr); | ||
| 147 | |||
| 148 | /* and reset for the next transfer */ | ||
| 149 | musb_writew(epio, MUSB_TXCSR, 0); | ||
| 150 | } | ||
| 151 | |||
| 128 | /* | 152 | /* |
| 129 | * Start transmit. Caller is responsible for locking shared resources. | 153 | * Start transmit. Caller is responsible for locking shared resources. |
| 130 | * musb must be locked. | 154 | * musb must be locked. |
| @@ -145,13 +169,15 @@ static inline void musb_h_tx_start(struct musb_hw_ep *ep) | |||
| 145 | 169 | ||
| 146 | } | 170 | } |
| 147 | 171 | ||
| 148 | static inline void cppi_host_txdma_start(struct musb_hw_ep *ep) | 172 | static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep) |
| 149 | { | 173 | { |
| 150 | u16 txcsr; | 174 | u16 txcsr; |
| 151 | 175 | ||
| 152 | /* NOTE: no locks here; caller should lock and select EP */ | 176 | /* NOTE: no locks here; caller should lock and select EP */ |
| 153 | txcsr = musb_readw(ep->regs, MUSB_TXCSR); | 177 | txcsr = musb_readw(ep->regs, MUSB_TXCSR); |
| 154 | txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; | 178 | txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; |
| 179 | if (is_cppi_enabled()) | ||
| 180 | txcsr |= MUSB_TXCSR_DMAMODE; | ||
| 155 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); | 181 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); |
| 156 | } | 182 | } |
| 157 | 183 | ||
| @@ -166,9 +192,10 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | |||
| 166 | { | 192 | { |
| 167 | u16 frame; | 193 | u16 frame; |
| 168 | u32 len; | 194 | u32 len; |
| 169 | void *buf; | ||
| 170 | void __iomem *mbase = musb->mregs; | 195 | void __iomem *mbase = musb->mregs; |
| 171 | struct urb *urb = next_urb(qh); | 196 | struct urb *urb = next_urb(qh); |
| 197 | void *buf = urb->transfer_buffer; | ||
| 198 | u32 offset = 0; | ||
| 172 | struct musb_hw_ep *hw_ep = qh->hw_ep; | 199 | struct musb_hw_ep *hw_ep = qh->hw_ep; |
| 173 | unsigned pipe = urb->pipe; | 200 | unsigned pipe = urb->pipe; |
| 174 | u8 address = usb_pipedevice(pipe); | 201 | u8 address = usb_pipedevice(pipe); |
| @@ -191,7 +218,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | |||
| 191 | case USB_ENDPOINT_XFER_ISOC: | 218 | case USB_ENDPOINT_XFER_ISOC: |
| 192 | qh->iso_idx = 0; | 219 | qh->iso_idx = 0; |
| 193 | qh->frame = 0; | 220 | qh->frame = 0; |
| 194 | buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset; | 221 | offset = urb->iso_frame_desc[0].offset; |
| 195 | len = urb->iso_frame_desc[0].length; | 222 | len = urb->iso_frame_desc[0].length; |
| 196 | break; | 223 | break; |
| 197 | default: /* bulk, interrupt */ | 224 | default: /* bulk, interrupt */ |
| @@ -209,14 +236,14 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | |||
| 209 | case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; | 236 | case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; |
| 210 | default: s = "-intr"; break; | 237 | default: s = "-intr"; break; |
| 211 | }; s; }), | 238 | }; s; }), |
| 212 | epnum, buf, len); | 239 | epnum, buf + offset, len); |
| 213 | 240 | ||
| 214 | /* Configure endpoint */ | 241 | /* Configure endpoint */ |
| 215 | if (is_in || hw_ep->is_shared_fifo) | 242 | if (is_in || hw_ep->is_shared_fifo) |
| 216 | hw_ep->in_qh = qh; | 243 | hw_ep->in_qh = qh; |
| 217 | else | 244 | else |
| 218 | hw_ep->out_qh = qh; | 245 | hw_ep->out_qh = qh; |
| 219 | musb_ep_program(musb, epnum, urb, !is_in, buf, len); | 246 | musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len); |
| 220 | 247 | ||
| 221 | /* transmit may have more work: start it when it is time */ | 248 | /* transmit may have more work: start it when it is time */ |
| 222 | if (is_in) | 249 | if (is_in) |
| @@ -227,7 +254,6 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | |||
| 227 | case USB_ENDPOINT_XFER_ISOC: | 254 | case USB_ENDPOINT_XFER_ISOC: |
| 228 | case USB_ENDPOINT_XFER_INT: | 255 | case USB_ENDPOINT_XFER_INT: |
| 229 | DBG(3, "check whether there's still time for periodic Tx\n"); | 256 | DBG(3, "check whether there's still time for periodic Tx\n"); |
| 230 | qh->iso_idx = 0; | ||
| 231 | frame = musb_readw(mbase, MUSB_FRAME); | 257 | frame = musb_readw(mbase, MUSB_FRAME); |
| 232 | /* FIXME this doesn't implement that scheduling policy ... | 258 | /* FIXME this doesn't implement that scheduling policy ... |
| 233 | * or handle framecounter wrapping | 259 | * or handle framecounter wrapping |
| @@ -256,7 +282,7 @@ start: | |||
| 256 | if (!hw_ep->tx_channel) | 282 | if (!hw_ep->tx_channel) |
| 257 | musb_h_tx_start(hw_ep); | 283 | musb_h_tx_start(hw_ep); |
| 258 | else if (is_cppi_enabled() || tusb_dma_omap()) | 284 | else if (is_cppi_enabled() || tusb_dma_omap()) |
| 259 | cppi_host_txdma_start(hw_ep); | 285 | musb_h_tx_dma_start(hw_ep); |
| 260 | } | 286 | } |
| 261 | } | 287 | } |
| 262 | 288 | ||
| @@ -567,10 +593,17 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) | |||
| 567 | csr = musb_readw(ep->regs, MUSB_TXCSR); | 593 | csr = musb_readw(ep->regs, MUSB_TXCSR); |
| 568 | if (csr & MUSB_TXCSR_MODE) { | 594 | if (csr & MUSB_TXCSR_MODE) { |
| 569 | musb_h_tx_flush_fifo(ep); | 595 | musb_h_tx_flush_fifo(ep); |
| 596 | csr = musb_readw(ep->regs, MUSB_TXCSR); | ||
| 570 | musb_writew(ep->regs, MUSB_TXCSR, | 597 | musb_writew(ep->regs, MUSB_TXCSR, |
| 571 | MUSB_TXCSR_FRCDATATOG); | 598 | csr | MUSB_TXCSR_FRCDATATOG); |
| 572 | } | 599 | } |
| 573 | /* clear mode (and everything else) to enable Rx */ | 600 | |
| 601 | /* | ||
| 602 | * Clear the MODE bit (and everything else) to enable Rx. | ||
| 603 | * NOTE: we mustn't clear the DMAMODE bit before DMAENAB. | ||
| 604 | */ | ||
| 605 | if (csr & MUSB_TXCSR_DMAMODE) | ||
| 606 | musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE); | ||
| 574 | musb_writew(ep->regs, MUSB_TXCSR, 0); | 607 | musb_writew(ep->regs, MUSB_TXCSR, 0); |
| 575 | 608 | ||
| 576 | /* scrub all previous state, clearing toggle */ | 609 | /* scrub all previous state, clearing toggle */ |
| @@ -601,14 +634,68 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) | |||
| 601 | ep->rx_reinit = 0; | 634 | ep->rx_reinit = 0; |
| 602 | } | 635 | } |
| 603 | 636 | ||
| 637 | static bool musb_tx_dma_program(struct dma_controller *dma, | ||
| 638 | struct musb_hw_ep *hw_ep, struct musb_qh *qh, | ||
| 639 | struct urb *urb, u32 offset, u32 length) | ||
| 640 | { | ||
| 641 | struct dma_channel *channel = hw_ep->tx_channel; | ||
| 642 | void __iomem *epio = hw_ep->regs; | ||
| 643 | u16 pkt_size = qh->maxpacket; | ||
| 644 | u16 csr; | ||
| 645 | u8 mode; | ||
| 646 | |||
| 647 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
| 648 | if (length > channel->max_len) | ||
| 649 | length = channel->max_len; | ||
| 650 | |||
| 651 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 652 | if (length > pkt_size) { | ||
| 653 | mode = 1; | ||
| 654 | csr |= MUSB_TXCSR_AUTOSET | ||
| 655 | | MUSB_TXCSR_DMAMODE | ||
| 656 | | MUSB_TXCSR_DMAENAB; | ||
| 657 | } else { | ||
| 658 | mode = 0; | ||
| 659 | csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); | ||
| 660 | csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */ | ||
| 661 | } | ||
| 662 | channel->desired_mode = mode; | ||
| 663 | musb_writew(epio, MUSB_TXCSR, csr); | ||
| 664 | #else | ||
| 665 | if (!is_cppi_enabled() && !tusb_dma_omap()) | ||
| 666 | return false; | ||
| 667 | |||
| 668 | channel->actual_len = 0; | ||
| 669 | |||
| 670 | /* | ||
| 671 | * TX uses "RNDIS" mode automatically but needs help | ||
| 672 | * to identify the zero-length-final-packet case. | ||
| 673 | */ | ||
| 674 | mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; | ||
| 675 | #endif | ||
| 676 | |||
| 677 | qh->segsize = length; | ||
| 678 | |||
| 679 | if (!dma->channel_program(channel, pkt_size, mode, | ||
| 680 | urb->transfer_dma + offset, length)) { | ||
| 681 | dma->channel_release(channel); | ||
| 682 | hw_ep->tx_channel = NULL; | ||
| 683 | |||
| 684 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 685 | csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB); | ||
| 686 | musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS); | ||
| 687 | return false; | ||
| 688 | } | ||
| 689 | return true; | ||
| 690 | } | ||
| 604 | 691 | ||
| 605 | /* | 692 | /* |
| 606 | * Program an HDRC endpoint as per the given URB | 693 | * Program an HDRC endpoint as per the given URB |
| 607 | * Context: irqs blocked, controller lock held | 694 | * Context: irqs blocked, controller lock held |
| 608 | */ | 695 | */ |
| 609 | static void musb_ep_program(struct musb *musb, u8 epnum, | 696 | static void musb_ep_program(struct musb *musb, u8 epnum, |
| 610 | struct urb *urb, unsigned int is_out, | 697 | struct urb *urb, int is_out, |
| 611 | u8 *buf, u32 len) | 698 | u8 *buf, u32 offset, u32 len) |
| 612 | { | 699 | { |
| 613 | struct dma_controller *dma_controller; | 700 | struct dma_controller *dma_controller; |
| 614 | struct dma_channel *dma_channel; | 701 | struct dma_channel *dma_channel; |
| @@ -667,12 +754,17 @@ static void musb_ep_program(struct musb *musb, u8 epnum, | |||
| 667 | 754 | ||
| 668 | /* general endpoint setup */ | 755 | /* general endpoint setup */ |
| 669 | if (epnum) { | 756 | if (epnum) { |
| 670 | /* ASSERT: TXCSR_DMAENAB was already cleared */ | ||
| 671 | |||
| 672 | /* flush all old state, set default */ | 757 | /* flush all old state, set default */ |
| 673 | musb_h_tx_flush_fifo(hw_ep); | 758 | musb_h_tx_flush_fifo(hw_ep); |
| 759 | |||
| 760 | /* | ||
| 761 | * We must not clear the DMAMODE bit before or in | ||
| 762 | * the same cycle with the DMAENAB bit, so we clear | ||
| 763 | * the latter first... | ||
| 764 | */ | ||
| 674 | csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT | 765 | csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT |
| 675 | | MUSB_TXCSR_DMAMODE | 766 | | MUSB_TXCSR_AUTOSET |
| 767 | | MUSB_TXCSR_DMAENAB | ||
| 676 | | MUSB_TXCSR_FRCDATATOG | 768 | | MUSB_TXCSR_FRCDATATOG |
| 677 | | MUSB_TXCSR_H_RXSTALL | 769 | | MUSB_TXCSR_H_RXSTALL |
| 678 | | MUSB_TXCSR_H_ERROR | 770 | | MUSB_TXCSR_H_ERROR |
| @@ -680,24 +772,20 @@ static void musb_ep_program(struct musb *musb, u8 epnum, | |||
| 680 | ); | 772 | ); |
| 681 | csr |= MUSB_TXCSR_MODE; | 773 | csr |= MUSB_TXCSR_MODE; |
| 682 | 774 | ||
| 683 | if (usb_gettoggle(urb->dev, | 775 | if (usb_gettoggle(urb->dev, qh->epnum, 1)) |
| 684 | qh->epnum, 1)) | ||
| 685 | csr |= MUSB_TXCSR_H_WR_DATATOGGLE | 776 | csr |= MUSB_TXCSR_H_WR_DATATOGGLE |
| 686 | | MUSB_TXCSR_H_DATATOGGLE; | 777 | | MUSB_TXCSR_H_DATATOGGLE; |
| 687 | else | 778 | else |
| 688 | csr |= MUSB_TXCSR_CLRDATATOG; | 779 | csr |= MUSB_TXCSR_CLRDATATOG; |
| 689 | 780 | ||
| 690 | /* twice in case of double packet buffering */ | ||
| 691 | musb_writew(epio, MUSB_TXCSR, csr); | 781 | musb_writew(epio, MUSB_TXCSR, csr); |
| 692 | /* REVISIT may need to clear FLUSHFIFO ... */ | 782 | /* REVISIT may need to clear FLUSHFIFO ... */ |
| 783 | csr &= ~MUSB_TXCSR_DMAMODE; | ||
| 693 | musb_writew(epio, MUSB_TXCSR, csr); | 784 | musb_writew(epio, MUSB_TXCSR, csr); |
| 694 | csr = musb_readw(epio, MUSB_TXCSR); | 785 | csr = musb_readw(epio, MUSB_TXCSR); |
| 695 | } else { | 786 | } else { |
| 696 | /* endpoint 0: just flush */ | 787 | /* endpoint 0: just flush */ |
| 697 | musb_writew(epio, MUSB_CSR0, | 788 | musb_h_ep0_flush_fifo(hw_ep); |
| 698 | csr | MUSB_CSR0_FLUSHFIFO); | ||
| 699 | musb_writew(epio, MUSB_CSR0, | ||
| 700 | csr | MUSB_CSR0_FLUSHFIFO); | ||
| 701 | } | 789 | } |
| 702 | 790 | ||
| 703 | /* target addr and (for multipoint) hub addr/port */ | 791 | /* target addr and (for multipoint) hub addr/port */ |
| @@ -734,113 +822,14 @@ static void musb_ep_program(struct musb *musb, u8 epnum, | |||
| 734 | else | 822 | else |
| 735 | load_count = min((u32) packet_sz, len); | 823 | load_count = min((u32) packet_sz, len); |
| 736 | 824 | ||
| 737 | #ifdef CONFIG_USB_INVENTRA_DMA | 825 | if (dma_channel && musb_tx_dma_program(dma_controller, |
| 738 | if (dma_channel) { | 826 | hw_ep, qh, urb, offset, len)) |
| 739 | 827 | load_count = 0; | |
| 740 | /* clear previous state */ | ||
| 741 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 742 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
| 743 | | MUSB_TXCSR_DMAMODE | ||
| 744 | | MUSB_TXCSR_DMAENAB); | ||
| 745 | csr |= MUSB_TXCSR_MODE; | ||
| 746 | musb_writew(epio, MUSB_TXCSR, | ||
| 747 | csr | MUSB_TXCSR_MODE); | ||
| 748 | |||
| 749 | qh->segsize = min(len, dma_channel->max_len); | ||
| 750 | |||
| 751 | if (qh->segsize <= packet_sz) | ||
| 752 | dma_channel->desired_mode = 0; | ||
| 753 | else | ||
| 754 | dma_channel->desired_mode = 1; | ||
| 755 | |||
| 756 | |||
| 757 | if (dma_channel->desired_mode == 0) { | ||
| 758 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
| 759 | | MUSB_TXCSR_DMAMODE); | ||
| 760 | csr |= (MUSB_TXCSR_DMAENAB); | ||
| 761 | /* against programming guide */ | ||
| 762 | } else | ||
| 763 | csr |= (MUSB_TXCSR_AUTOSET | ||
| 764 | | MUSB_TXCSR_DMAENAB | ||
| 765 | | MUSB_TXCSR_DMAMODE); | ||
| 766 | |||
| 767 | musb_writew(epio, MUSB_TXCSR, csr); | ||
| 768 | |||
| 769 | dma_ok = dma_controller->channel_program( | ||
| 770 | dma_channel, packet_sz, | ||
| 771 | dma_channel->desired_mode, | ||
| 772 | urb->transfer_dma, | ||
| 773 | qh->segsize); | ||
| 774 | if (dma_ok) { | ||
| 775 | load_count = 0; | ||
| 776 | } else { | ||
| 777 | dma_controller->channel_release(dma_channel); | ||
| 778 | if (is_out) | ||
| 779 | hw_ep->tx_channel = NULL; | ||
| 780 | else | ||
| 781 | hw_ep->rx_channel = NULL; | ||
| 782 | dma_channel = NULL; | ||
| 783 | } | ||
| 784 | } | ||
| 785 | #endif | ||
| 786 | |||
| 787 | /* candidate for DMA */ | ||
| 788 | if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { | ||
| 789 | |||
| 790 | /* program endpoint CSRs first, then setup DMA. | ||
| 791 | * assume CPPI setup succeeds. | ||
| 792 | * defer enabling dma. | ||
| 793 | */ | ||
| 794 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 795 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
| 796 | | MUSB_TXCSR_DMAMODE | ||
| 797 | | MUSB_TXCSR_DMAENAB); | ||
| 798 | csr |= MUSB_TXCSR_MODE; | ||
| 799 | musb_writew(epio, MUSB_TXCSR, | ||
| 800 | csr | MUSB_TXCSR_MODE); | ||
| 801 | |||
| 802 | dma_channel->actual_len = 0L; | ||
| 803 | qh->segsize = len; | ||
| 804 | |||
| 805 | /* TX uses "rndis" mode automatically, but needs help | ||
| 806 | * to identify the zero-length-final-packet case. | ||
| 807 | */ | ||
| 808 | dma_ok = dma_controller->channel_program( | ||
| 809 | dma_channel, packet_sz, | ||
| 810 | (urb->transfer_flags | ||
| 811 | & URB_ZERO_PACKET) | ||
| 812 | == URB_ZERO_PACKET, | ||
| 813 | urb->transfer_dma, | ||
| 814 | qh->segsize); | ||
| 815 | if (dma_ok) { | ||
| 816 | load_count = 0; | ||
| 817 | } else { | ||
| 818 | dma_controller->channel_release(dma_channel); | ||
| 819 | hw_ep->tx_channel = NULL; | ||
| 820 | dma_channel = NULL; | ||
| 821 | |||
| 822 | /* REVISIT there's an error path here that | ||
| 823 | * needs handling: can't do dma, but | ||
| 824 | * there's no pio buffer address... | ||
| 825 | */ | ||
| 826 | } | ||
| 827 | } | ||
| 828 | 828 | ||
| 829 | if (load_count) { | 829 | if (load_count) { |
| 830 | /* ASSERT: TXCSR_DMAENAB was already cleared */ | ||
| 831 | |||
| 832 | /* PIO to load FIFO */ | 830 | /* PIO to load FIFO */ |
| 833 | qh->segsize = load_count; | 831 | qh->segsize = load_count; |
| 834 | musb_write_fifo(hw_ep, load_count, buf); | 832 | musb_write_fifo(hw_ep, load_count, buf); |
| 835 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 836 | csr &= ~(MUSB_TXCSR_DMAENAB | ||
| 837 | | MUSB_TXCSR_DMAMODE | ||
| 838 | | MUSB_TXCSR_AUTOSET); | ||
| 839 | /* write CSR */ | ||
| 840 | csr |= MUSB_TXCSR_MODE; | ||
| 841 | |||
| 842 | if (epnum) | ||
| 843 | musb_writew(epio, MUSB_TXCSR, csr); | ||
| 844 | } | 833 | } |
| 845 | 834 | ||
| 846 | /* re-enable interrupt */ | 835 | /* re-enable interrupt */ |
| @@ -895,7 +884,7 @@ static void musb_ep_program(struct musb *musb, u8 epnum, | |||
| 895 | dma_channel, packet_sz, | 884 | dma_channel, packet_sz, |
| 896 | !(urb->transfer_flags | 885 | !(urb->transfer_flags |
| 897 | & URB_SHORT_NOT_OK), | 886 | & URB_SHORT_NOT_OK), |
| 898 | urb->transfer_dma, | 887 | urb->transfer_dma + offset, |
| 899 | qh->segsize); | 888 | qh->segsize); |
| 900 | if (!dma_ok) { | 889 | if (!dma_ok) { |
| 901 | dma_controller->channel_release( | 890 | dma_controller->channel_release( |
| @@ -1063,11 +1052,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb) | |||
| 1063 | csr &= ~MUSB_CSR0_H_NAKTIMEOUT; | 1052 | csr &= ~MUSB_CSR0_H_NAKTIMEOUT; |
| 1064 | musb_writew(epio, MUSB_CSR0, csr); | 1053 | musb_writew(epio, MUSB_CSR0, csr); |
| 1065 | } else { | 1054 | } else { |
| 1066 | csr |= MUSB_CSR0_FLUSHFIFO; | 1055 | musb_h_ep0_flush_fifo(hw_ep); |
| 1067 | musb_writew(epio, MUSB_CSR0, csr); | ||
| 1068 | musb_writew(epio, MUSB_CSR0, csr); | ||
| 1069 | csr &= ~MUSB_CSR0_H_NAKTIMEOUT; | ||
| 1070 | musb_writew(epio, MUSB_CSR0, csr); | ||
| 1071 | } | 1056 | } |
| 1072 | 1057 | ||
| 1073 | musb_writeb(epio, MUSB_NAKLIMIT0, 0); | 1058 | musb_writeb(epio, MUSB_NAKLIMIT0, 0); |
| @@ -1081,10 +1066,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb) | |||
| 1081 | * SHOULD NEVER HAPPEN! */ | 1066 | * SHOULD NEVER HAPPEN! */ |
| 1082 | ERR("no URB for end 0\n"); | 1067 | ERR("no URB for end 0\n"); |
| 1083 | 1068 | ||
| 1084 | musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO); | 1069 | musb_h_ep0_flush_fifo(hw_ep); |
| 1085 | musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO); | ||
| 1086 | musb_writew(epio, MUSB_CSR0, 0); | ||
| 1087 | |||
| 1088 | goto done; | 1070 | goto done; |
| 1089 | } | 1071 | } |
| 1090 | 1072 | ||
| @@ -1145,8 +1127,8 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
| 1145 | int pipe; | 1127 | int pipe; |
| 1146 | bool done = false; | 1128 | bool done = false; |
| 1147 | u16 tx_csr; | 1129 | u16 tx_csr; |
| 1148 | size_t wLength = 0; | 1130 | size_t length = 0; |
| 1149 | u8 *buf = NULL; | 1131 | size_t offset = 0; |
| 1150 | struct urb *urb; | 1132 | struct urb *urb; |
| 1151 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | 1133 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; |
| 1152 | void __iomem *epio = hw_ep->regs; | 1134 | void __iomem *epio = hw_ep->regs; |
| @@ -1164,7 +1146,7 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
| 1164 | /* with CPPI, DMA sometimes triggers "extra" irqs */ | 1146 | /* with CPPI, DMA sometimes triggers "extra" irqs */ |
| 1165 | if (!urb) { | 1147 | if (!urb) { |
| 1166 | DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); | 1148 | DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); |
| 1167 | goto finish; | 1149 | return; |
| 1168 | } | 1150 | } |
| 1169 | 1151 | ||
| 1170 | pipe = urb->pipe; | 1152 | pipe = urb->pipe; |
| @@ -1201,7 +1183,7 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
| 1201 | musb_writew(epio, MUSB_TXCSR, | 1183 | musb_writew(epio, MUSB_TXCSR, |
| 1202 | MUSB_TXCSR_H_WZC_BITS | 1184 | MUSB_TXCSR_H_WZC_BITS |
| 1203 | | MUSB_TXCSR_TXPKTRDY); | 1185 | | MUSB_TXCSR_TXPKTRDY); |
| 1204 | goto finish; | 1186 | return; |
| 1205 | } | 1187 | } |
| 1206 | 1188 | ||
| 1207 | if (status) { | 1189 | if (status) { |
| @@ -1233,29 +1215,89 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
| 1233 | /* second cppi case */ | 1215 | /* second cppi case */ |
| 1234 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | 1216 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { |
| 1235 | DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); | 1217 | DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); |
| 1236 | goto finish; | 1218 | return; |
| 1219 | } | ||
| 1220 | |||
| 1221 | if (is_dma_capable() && dma && !status) { | ||
| 1222 | /* | ||
| 1223 | * DMA has completed. But if we're using DMA mode 1 (multi | ||
| 1224 | * packet DMA), we need a terminal TXPKTRDY interrupt before | ||
| 1225 | * we can consider this transfer completed, lest we trash | ||
| 1226 | * its last packet when writing the next URB's data. So we | ||
| 1227 | * switch back to mode 0 to get that interrupt; we'll come | ||
| 1228 | * back here once it happens. | ||
| 1229 | */ | ||
| 1230 | if (tx_csr & MUSB_TXCSR_DMAMODE) { | ||
| 1231 | /* | ||
| 1232 | * We shouldn't clear DMAMODE with DMAENAB set; so | ||
| 1233 | * clear them in a safe order. That should be OK | ||
| 1234 | * once TXPKTRDY has been set (and I've never seen | ||
| 1235 | * it being 0 at this moment -- DMA interrupt latency | ||
| 1236 | * is significant) but if it hasn't been then we have | ||
| 1237 | * no choice but to stop being polite and ignore the | ||
| 1238 | * programmer's guide... :-) | ||
| 1239 | * | ||
| 1240 | * Note that we must write TXCSR with TXPKTRDY cleared | ||
| 1241 | * in order not to re-trigger the packet send (this bit | ||
| 1242 | * can't be cleared by CPU), and there's another caveat: | ||
| 1243 | * TXPKTRDY may be set shortly and then cleared in the | ||
| 1244 | * double-buffered FIFO mode, so we do an extra TXCSR | ||
| 1245 | * read for debouncing... | ||
| 1246 | */ | ||
| 1247 | tx_csr &= musb_readw(epio, MUSB_TXCSR); | ||
| 1248 | if (tx_csr & MUSB_TXCSR_TXPKTRDY) { | ||
| 1249 | tx_csr &= ~(MUSB_TXCSR_DMAENAB | | ||
| 1250 | MUSB_TXCSR_TXPKTRDY); | ||
| 1251 | musb_writew(epio, MUSB_TXCSR, | ||
| 1252 | tx_csr | MUSB_TXCSR_H_WZC_BITS); | ||
| 1253 | } | ||
| 1254 | tx_csr &= ~(MUSB_TXCSR_DMAMODE | | ||
| 1255 | MUSB_TXCSR_TXPKTRDY); | ||
| 1256 | musb_writew(epio, MUSB_TXCSR, | ||
| 1257 | tx_csr | MUSB_TXCSR_H_WZC_BITS); | ||
| 1258 | |||
| 1259 | /* | ||
| 1260 | * There is no guarantee that we'll get an interrupt | ||
| 1261 | * after clearing DMAMODE as we might have done this | ||
| 1262 | * too late (after TXPKTRDY was cleared by controller). | ||
| 1263 | * Re-read TXCSR as we have spoiled its previous value. | ||
| 1264 | */ | ||
| 1265 | tx_csr = musb_readw(epio, MUSB_TXCSR); | ||
| 1266 | } | ||
| 1237 | 1267 | ||
| 1268 | /* | ||
| 1269 | * We may get here from a DMA completion or TXPKTRDY interrupt. | ||
| 1270 | * In any case, we must check the FIFO status here and bail out | ||
| 1271 | * only if the FIFO still has data -- that should prevent the | ||
| 1272 | * "missed" TXPKTRDY interrupts and deal with double-buffered | ||
| 1273 | * FIFO mode too... | ||
| 1274 | */ | ||
| 1275 | if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) { | ||
| 1276 | DBG(2, "DMA complete but packet still in FIFO, " | ||
| 1277 | "CSR %04x\n", tx_csr); | ||
| 1278 | return; | ||
| 1279 | } | ||
| 1238 | } | 1280 | } |
| 1239 | 1281 | ||
| 1240 | /* REVISIT this looks wrong... */ | ||
| 1241 | if (!status || dma || usb_pipeisoc(pipe)) { | 1282 | if (!status || dma || usb_pipeisoc(pipe)) { |
| 1242 | if (dma) | 1283 | if (dma) |
| 1243 | wLength = dma->actual_len; | 1284 | length = dma->actual_len; |
| 1244 | else | 1285 | else |
| 1245 | wLength = qh->segsize; | 1286 | length = qh->segsize; |
| 1246 | qh->offset += wLength; | 1287 | qh->offset += length; |
| 1247 | 1288 | ||
| 1248 | if (usb_pipeisoc(pipe)) { | 1289 | if (usb_pipeisoc(pipe)) { |
| 1249 | struct usb_iso_packet_descriptor *d; | 1290 | struct usb_iso_packet_descriptor *d; |
| 1250 | 1291 | ||
| 1251 | d = urb->iso_frame_desc + qh->iso_idx; | 1292 | d = urb->iso_frame_desc + qh->iso_idx; |
| 1252 | d->actual_length = qh->segsize; | 1293 | d->actual_length = length; |
| 1294 | d->status = status; | ||
| 1253 | if (++qh->iso_idx >= urb->number_of_packets) { | 1295 | if (++qh->iso_idx >= urb->number_of_packets) { |
| 1254 | done = true; | 1296 | done = true; |
| 1255 | } else { | 1297 | } else { |
| 1256 | d++; | 1298 | d++; |
| 1257 | buf = urb->transfer_buffer + d->offset; | 1299 | offset = d->offset; |
| 1258 | wLength = d->length; | 1300 | length = d->length; |
| 1259 | } | 1301 | } |
| 1260 | } else if (dma) { | 1302 | } else if (dma) { |
| 1261 | done = true; | 1303 | done = true; |
| @@ -1268,10 +1310,8 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
| 1268 | & URB_ZERO_PACKET)) | 1310 | & URB_ZERO_PACKET)) |
| 1269 | done = true; | 1311 | done = true; |
| 1270 | if (!done) { | 1312 | if (!done) { |
| 1271 | buf = urb->transfer_buffer | 1313 | offset = qh->offset; |
| 1272 | + qh->offset; | 1314 | length = urb->transfer_buffer_length - offset; |
| 1273 | wLength = urb->transfer_buffer_length | ||
| 1274 | - qh->offset; | ||
| 1275 | } | 1315 | } |
| 1276 | } | 1316 | } |
| 1277 | } | 1317 | } |
| @@ -1290,28 +1330,31 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
| 1290 | urb->status = status; | 1330 | urb->status = status; |
| 1291 | urb->actual_length = qh->offset; | 1331 | urb->actual_length = qh->offset; |
| 1292 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); | 1332 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); |
| 1333 | return; | ||
| 1334 | } else if (usb_pipeisoc(pipe) && dma) { | ||
| 1335 | if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb, | ||
| 1336 | offset, length)) | ||
| 1337 | return; | ||
| 1338 | } else if (tx_csr & MUSB_TXCSR_DMAENAB) { | ||
| 1339 | DBG(1, "not complete, but DMA enabled?\n"); | ||
| 1340 | return; | ||
| 1341 | } | ||
| 1293 | 1342 | ||
| 1294 | } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) { | 1343 | /* |
| 1295 | /* WARN_ON(!buf); */ | 1344 | * PIO: start next packet in this URB. |
| 1296 | 1345 | * | |
| 1297 | /* REVISIT: some docs say that when hw_ep->tx_double_buffered, | 1346 | * REVISIT: some docs say that when hw_ep->tx_double_buffered, |
| 1298 | * (and presumably, fifo is not half-full) we should write TWO | 1347 | * (and presumably, FIFO is not half-full) we should write *two* |
| 1299 | * packets before updating TXCSR ... other docs disagree ... | 1348 | * packets before updating TXCSR; other docs disagree... |
| 1300 | */ | 1349 | */ |
| 1301 | /* PIO: start next packet in this URB */ | 1350 | if (length > qh->maxpacket) |
| 1302 | if (wLength > qh->maxpacket) | 1351 | length = qh->maxpacket; |
| 1303 | wLength = qh->maxpacket; | 1352 | musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset); |
| 1304 | musb_write_fifo(hw_ep, wLength, buf); | 1353 | qh->segsize = length; |
| 1305 | qh->segsize = wLength; | ||
| 1306 | |||
| 1307 | musb_ep_select(mbase, epnum); | ||
| 1308 | musb_writew(epio, MUSB_TXCSR, | ||
| 1309 | MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); | ||
| 1310 | } else | ||
| 1311 | DBG(1, "not complete, but dma enabled?\n"); | ||
| 1312 | 1354 | ||
| 1313 | finish: | 1355 | musb_ep_select(mbase, epnum); |
| 1314 | return; | 1356 | musb_writew(epio, MUSB_TXCSR, |
| 1357 | MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); | ||
| 1315 | } | 1358 | } |
| 1316 | 1359 | ||
| 1317 | 1360 | ||
| @@ -1841,7 +1884,7 @@ static int musb_urb_enqueue( | |||
| 1841 | unsigned long flags; | 1884 | unsigned long flags; |
| 1842 | struct musb *musb = hcd_to_musb(hcd); | 1885 | struct musb *musb = hcd_to_musb(hcd); |
| 1843 | struct usb_host_endpoint *hep = urb->ep; | 1886 | struct usb_host_endpoint *hep = urb->ep; |
| 1844 | struct musb_qh *qh = hep->hcpriv; | 1887 | struct musb_qh *qh; |
| 1845 | struct usb_endpoint_descriptor *epd = &hep->desc; | 1888 | struct usb_endpoint_descriptor *epd = &hep->desc; |
| 1846 | int ret; | 1889 | int ret; |
| 1847 | unsigned type_reg; | 1890 | unsigned type_reg; |
| @@ -1853,22 +1896,21 @@ static int musb_urb_enqueue( | |||
| 1853 | 1896 | ||
| 1854 | spin_lock_irqsave(&musb->lock, flags); | 1897 | spin_lock_irqsave(&musb->lock, flags); |
| 1855 | ret = usb_hcd_link_urb_to_ep(hcd, urb); | 1898 | ret = usb_hcd_link_urb_to_ep(hcd, urb); |
| 1899 | qh = ret ? NULL : hep->hcpriv; | ||
| 1900 | if (qh) | ||
| 1901 | urb->hcpriv = qh; | ||
| 1856 | spin_unlock_irqrestore(&musb->lock, flags); | 1902 | spin_unlock_irqrestore(&musb->lock, flags); |
| 1857 | if (ret) | ||
| 1858 | return ret; | ||
| 1859 | 1903 | ||
| 1860 | /* DMA mapping was already done, if needed, and this urb is on | 1904 | /* DMA mapping was already done, if needed, and this urb is on |
| 1861 | * hep->urb_list ... so there's little to do unless hep wasn't | 1905 | * hep->urb_list now ... so we're done, unless hep wasn't yet |
| 1862 | * yet scheduled onto a live qh. | 1906 | * scheduled onto a live qh. |
| 1863 | * | 1907 | * |
| 1864 | * REVISIT best to keep hep->hcpriv valid until the endpoint gets | 1908 | * REVISIT best to keep hep->hcpriv valid until the endpoint gets |
| 1865 | * disabled, testing for empty qh->ring and avoiding qh setup costs | 1909 | * disabled, testing for empty qh->ring and avoiding qh setup costs |
| 1866 | * except for the first urb queued after a config change. | 1910 | * except for the first urb queued after a config change. |
| 1867 | */ | 1911 | */ |
| 1868 | if (qh) { | 1912 | if (qh || ret) |
| 1869 | urb->hcpriv = qh; | 1913 | return ret; |
| 1870 | return 0; | ||
| 1871 | } | ||
| 1872 | 1914 | ||
| 1873 | /* Allocate and initialize qh, minimizing the work done each time | 1915 | /* Allocate and initialize qh, minimizing the work done each time |
| 1874 | * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. | 1916 | * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. |
| @@ -2044,7 +2086,7 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in) | |||
| 2044 | * endpoint's irq status here to avoid bogus irqs. | 2086 | * endpoint's irq status here to avoid bogus irqs. |
| 2045 | * clearing that status is platform-specific... | 2087 | * clearing that status is platform-specific... |
| 2046 | */ | 2088 | */ |
| 2047 | } else { | 2089 | } else if (ep->epnum) { |
| 2048 | musb_h_tx_flush_fifo(ep); | 2090 | musb_h_tx_flush_fifo(ep); |
| 2049 | csr = musb_readw(epio, MUSB_TXCSR); | 2091 | csr = musb_readw(epio, MUSB_TXCSR); |
| 2050 | csr &= ~(MUSB_TXCSR_AUTOSET | 2092 | csr &= ~(MUSB_TXCSR_AUTOSET |
| @@ -2058,6 +2100,8 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in) | |||
| 2058 | musb_writew(epio, MUSB_TXCSR, csr); | 2100 | musb_writew(epio, MUSB_TXCSR, csr); |
| 2059 | /* flush cpu writebuffer */ | 2101 | /* flush cpu writebuffer */ |
| 2060 | csr = musb_readw(epio, MUSB_TXCSR); | 2102 | csr = musb_readw(epio, MUSB_TXCSR); |
| 2103 | } else { | ||
| 2104 | musb_h_ep0_flush_fifo(ep); | ||
| 2061 | } | 2105 | } |
| 2062 | if (status == 0) | 2106 | if (status == 0) |
| 2063 | musb_advance_schedule(ep->musb, urb, ep, is_in); | 2107 | musb_advance_schedule(ep->musb, urb, ep, is_in); |
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index 8662e9e159c3..5e83f96d6b77 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c | |||
| @@ -195,30 +195,32 @@ static int dma_channel_abort(struct dma_channel *channel) | |||
| 195 | void __iomem *mbase = musb_channel->controller->base; | 195 | void __iomem *mbase = musb_channel->controller->base; |
| 196 | 196 | ||
| 197 | u8 bchannel = musb_channel->idx; | 197 | u8 bchannel = musb_channel->idx; |
| 198 | int offset; | ||
| 198 | u16 csr; | 199 | u16 csr; |
| 199 | 200 | ||
| 200 | if (channel->status == MUSB_DMA_STATUS_BUSY) { | 201 | if (channel->status == MUSB_DMA_STATUS_BUSY) { |
| 201 | if (musb_channel->transmit) { | 202 | if (musb_channel->transmit) { |
| 202 | 203 | offset = MUSB_EP_OFFSET(musb_channel->epnum, | |
| 203 | csr = musb_readw(mbase, | 204 | MUSB_TXCSR); |
| 204 | MUSB_EP_OFFSET(musb_channel->epnum, | 205 | |
| 205 | MUSB_TXCSR)); | 206 | /* |
| 206 | csr &= ~(MUSB_TXCSR_AUTOSET | | 207 | * The programming guide says that we must clear |
| 207 | MUSB_TXCSR_DMAENAB | | 208 | * the DMAENAB bit before the DMAMODE bit... |
| 208 | MUSB_TXCSR_DMAMODE); | 209 | */ |
| 209 | musb_writew(mbase, | 210 | csr = musb_readw(mbase, offset); |
| 210 | MUSB_EP_OFFSET(musb_channel->epnum, MUSB_TXCSR), | 211 | csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB); |
| 211 | csr); | 212 | musb_writew(mbase, offset, csr); |
| 213 | csr &= ~MUSB_TXCSR_DMAMODE; | ||
| 214 | musb_writew(mbase, offset, csr); | ||
| 212 | } else { | 215 | } else { |
| 213 | csr = musb_readw(mbase, | 216 | offset = MUSB_EP_OFFSET(musb_channel->epnum, |
| 214 | MUSB_EP_OFFSET(musb_channel->epnum, | 217 | MUSB_RXCSR); |
| 215 | MUSB_RXCSR)); | 218 | |
| 219 | csr = musb_readw(mbase, offset); | ||
| 216 | csr &= ~(MUSB_RXCSR_AUTOCLEAR | | 220 | csr &= ~(MUSB_RXCSR_AUTOCLEAR | |
| 217 | MUSB_RXCSR_DMAENAB | | 221 | MUSB_RXCSR_DMAENAB | |
| 218 | MUSB_RXCSR_DMAMODE); | 222 | MUSB_RXCSR_DMAMODE); |
| 219 | musb_writew(mbase, | 223 | musb_writew(mbase, offset, csr); |
| 220 | MUSB_EP_OFFSET(musb_channel->epnum, MUSB_RXCSR), | ||
| 221 | csr); | ||
| 222 | } | 224 | } |
| 223 | 225 | ||
| 224 | musb_writew(mbase, | 226 | musb_writew(mbase, |
| @@ -296,20 +298,28 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data) | |||
| 296 | && ((channel->desired_mode == 0) | 298 | && ((channel->desired_mode == 0) |
| 297 | || (channel->actual_len & | 299 | || (channel->actual_len & |
| 298 | (musb_channel->max_packet_sz - 1))) | 300 | (musb_channel->max_packet_sz - 1))) |
| 299 | ) { | 301 | ) { |
| 302 | u8 epnum = musb_channel->epnum; | ||
| 303 | int offset = MUSB_EP_OFFSET(epnum, | ||
| 304 | MUSB_TXCSR); | ||
| 305 | u16 txcsr; | ||
| 306 | |||
| 307 | /* | ||
| 308 | * The programming guide says that we | ||
| 309 | * must clear DMAENAB before DMAMODE. | ||
| 310 | */ | ||
| 311 | musb_ep_select(mbase, epnum); | ||
| 312 | txcsr = musb_readw(mbase, offset); | ||
| 313 | txcsr &= ~(MUSB_TXCSR_DMAENAB | ||
| 314 | | MUSB_TXCSR_AUTOSET); | ||
| 315 | musb_writew(mbase, offset, txcsr); | ||
| 300 | /* Send out the packet */ | 316 | /* Send out the packet */ |
| 301 | musb_ep_select(mbase, | 317 | txcsr &= ~MUSB_TXCSR_DMAMODE; |
| 302 | musb_channel->epnum); | 318 | txcsr |= MUSB_TXCSR_TXPKTRDY; |
| 303 | musb_writew(mbase, MUSB_EP_OFFSET( | 319 | musb_writew(mbase, offset, txcsr); |
| 304 | musb_channel->epnum, | ||
| 305 | MUSB_TXCSR), | ||
| 306 | MUSB_TXCSR_TXPKTRDY); | ||
| 307 | } else { | ||
| 308 | musb_dma_completion( | ||
| 309 | musb, | ||
| 310 | musb_channel->epnum, | ||
| 311 | musb_channel->transmit); | ||
| 312 | } | 320 | } |
| 321 | musb_dma_completion(musb, musb_channel->epnum, | ||
| 322 | musb_channel->transmit); | ||
| 313 | } | 323 | } |
| 314 | } | 324 | } |
| 315 | } | 325 | } |
