aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/imx21-hcd.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/usb/host/imx21-hcd.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'drivers/usb/host/imx21-hcd.c')
-rw-r--r--drivers/usb/host/imx21-hcd.c297
1 files changed, 225 insertions, 72 deletions
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 3e5630369c31..af05718bdc73 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -27,8 +27,8 @@
27 * * 32 transfer descriptors (called ETDs) 27 * * 32 transfer descriptors (called ETDs)
28 * * 4Kb of Data memory 28 * * 4Kb of Data memory
29 * 29 *
30 * The data memory is shared between the host and fuction controlers 30 * The data memory is shared between the host and function controllers
31 * (but this driver only supports the host controler) 31 * (but this driver only supports the host controller)
32 * 32 *
33 * So setting up a transfer involves: 33 * So setting up a transfer involves:
34 * * Allocating a ETD 34 * * Allocating a ETD
@@ -57,6 +57,7 @@
57#include <linux/slab.h> 57#include <linux/slab.h>
58#include <linux/usb.h> 58#include <linux/usb.h>
59#include <linux/usb/hcd.h> 59#include <linux/usb/hcd.h>
60#include <linux/dma-mapping.h>
60 61
61#include "imx21-hcd.h" 62#include "imx21-hcd.h"
62 63
@@ -136,9 +137,18 @@ static int imx21_hc_get_frame(struct usb_hcd *hcd)
136 return wrap_frame(readl(imx21->regs + USBH_FRMNUB)); 137 return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
137} 138}
138 139
140static inline bool unsuitable_for_dma(dma_addr_t addr)
141{
142 return (addr & 3) != 0;
143}
139 144
140#include "imx21-dbg.c" 145#include "imx21-dbg.c"
141 146
147static void nonisoc_urb_completed_for_etd(
148 struct imx21 *imx21, struct etd_priv *etd, int status);
149static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
150static void free_dmem(struct imx21 *imx21, struct etd_priv *etd);
151
142/* =========================================== */ 152/* =========================================== */
143/* ETD management */ 153/* ETD management */
144/* =========================================== */ 154/* =========================================== */
@@ -185,7 +195,8 @@ static void reset_etd(struct imx21 *imx21, int num)
185 etd_writel(imx21, num, i, 0); 195 etd_writel(imx21, num, i, 0);
186 etd->urb = NULL; 196 etd->urb = NULL;
187 etd->ep = NULL; 197 etd->ep = NULL;
188 etd->td = NULL;; 198 etd->td = NULL;
199 etd->bounce_buffer = NULL;
189} 200}
190 201
191static void free_etd(struct imx21 *imx21, int num) 202static void free_etd(struct imx21 *imx21, int num)
@@ -221,26 +232,94 @@ static void setup_etd_dword0(struct imx21 *imx21,
221 ((u32) maxpacket << DW0_MAXPKTSIZ)); 232 ((u32) maxpacket << DW0_MAXPKTSIZ));
222} 233}
223 234
224static void activate_etd(struct imx21 *imx21, 235/**
225 int etd_num, dma_addr_t dma, u8 dir) 236 * Copy buffer to data controller data memory.
237 * We cannot use memcpy_toio() because the hardware requires 32bit writes
238 */
239static void copy_to_dmem(
240 struct imx21 *imx21, int dmem_offset, void *src, int count)
241{
242 void __iomem *dmem = imx21->regs + USBOTG_DMEM + dmem_offset;
243 u32 word = 0;
244 u8 *p = src;
245 int byte = 0;
246 int i;
247
248 for (i = 0; i < count; i++) {
249 byte = i % 4;
250 word += (*p++ << (byte * 8));
251 if (byte == 3) {
252 writel(word, dmem);
253 dmem += 4;
254 word = 0;
255 }
256 }
257
258 if (count && byte != 3)
259 writel(word, dmem);
260}
261
262static void activate_etd(struct imx21 *imx21, int etd_num, u8 dir)
226{ 263{
227 u32 etd_mask = 1 << etd_num; 264 u32 etd_mask = 1 << etd_num;
228 struct etd_priv *etd = &imx21->etd[etd_num]; 265 struct etd_priv *etd = &imx21->etd[etd_num];
229 266
267 if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) {
268 /* For non aligned isoc the condition below is always true */
269 if (etd->len <= etd->dmem_size) {
270 /* Fits into data memory, use PIO */
271 if (dir != TD_DIR_IN) {
272 copy_to_dmem(imx21,
273 etd->dmem_offset,
274 etd->cpu_buffer, etd->len);
275 }
276 etd->dma_handle = 0;
277
278 } else {
279 /* Too big for data memory, use bounce buffer */
280 enum dma_data_direction dmadir;
281
282 if (dir == TD_DIR_IN) {
283 dmadir = DMA_FROM_DEVICE;
284 etd->bounce_buffer = kmalloc(etd->len,
285 GFP_ATOMIC);
286 } else {
287 dmadir = DMA_TO_DEVICE;
288 etd->bounce_buffer = kmemdup(etd->cpu_buffer,
289 etd->len,
290 GFP_ATOMIC);
291 }
292 if (!etd->bounce_buffer) {
293 dev_err(imx21->dev, "failed bounce alloc\n");
294 goto err_bounce_alloc;
295 }
296
297 etd->dma_handle =
298 dma_map_single(imx21->dev,
299 etd->bounce_buffer,
300 etd->len,
301 dmadir);
302 if (dma_mapping_error(imx21->dev, etd->dma_handle)) {
303 dev_err(imx21->dev, "failed bounce map\n");
304 goto err_bounce_map;
305 }
306 }
307 }
308
230 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask); 309 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
231 set_register_bits(imx21, USBH_ETDDONEEN, etd_mask); 310 set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
232 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 311 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
233 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); 312 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
234 313
235 if (dma) { 314 if (etd->dma_handle) {
236 set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask); 315 set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
237 clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask); 316 clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
238 clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask); 317 clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
239 writel(dma, imx21->regs + USB_ETDSMSA(etd_num)); 318 writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num));
240 set_register_bits(imx21, USB_ETDDMAEN, etd_mask); 319 set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
241 } else { 320 } else {
242 if (dir != TD_DIR_IN) { 321 if (dir != TD_DIR_IN) {
243 /* need to set for ZLP */ 322 /* need to set for ZLP and PIO */
244 set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 323 set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
245 set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); 324 set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
246 } 325 }
@@ -263,6 +342,14 @@ static void activate_etd(struct imx21 *imx21,
263 342
264 etd->active_count = 1; 343 etd->active_count = 1;
265 writel(etd_mask, imx21->regs + USBH_ETDENSET); 344 writel(etd_mask, imx21->regs + USBH_ETDENSET);
345 return;
346
347err_bounce_map:
348 kfree(etd->bounce_buffer);
349
350err_bounce_alloc:
351 free_dmem(imx21, etd);
352 nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM);
266} 353}
267 354
268/* =========================================== */ 355/* =========================================== */
@@ -323,16 +410,23 @@ static void activate_queued_etd(struct imx21 *imx21,
323 etd_writel(imx21, etd_num, 1, 410 etd_writel(imx21, etd_num, 1,
324 ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset); 411 ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset);
325 412
413 etd->dmem_offset = dmem_offset;
326 urb_priv->active = 1; 414 urb_priv->active = 1;
327 activate_etd(imx21, etd_num, etd->dma_handle, dir); 415 activate_etd(imx21, etd_num, dir);
328} 416}
329 417
330static void free_dmem(struct imx21 *imx21, int offset) 418static void free_dmem(struct imx21 *imx21, struct etd_priv *etd)
331{ 419{
332 struct imx21_dmem_area *area; 420 struct imx21_dmem_area *area;
333 struct etd_priv *etd, *tmp; 421 struct etd_priv *tmp;
334 int found = 0; 422 int found = 0;
423 int offset;
335 424
425 if (!etd->dmem_size)
426 return;
427 etd->dmem_size = 0;
428
429 offset = etd->dmem_offset;
336 list_for_each_entry(area, &imx21->dmem_list, list) { 430 list_for_each_entry(area, &imx21->dmem_list, list) {
337 if (area->offset == offset) { 431 if (area->offset == offset) {
338 debug_dmem_freed(imx21, area->size); 432 debug_dmem_freed(imx21, area->size);
@@ -378,20 +472,23 @@ static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
378/* =========================================== */ 472/* =========================================== */
379/* End handling */ 473/* End handling */
380/* =========================================== */ 474/* =========================================== */
381static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
382 475
383/* Endpoint now idle - release it's ETD(s) or asssign to queued request */ 476/* Endpoint now idle - release it's ETD(s) or asssign to queued request */
384static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv) 477static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
385{ 478{
386 int etd_num;
387 int i; 479 int i;
388 480
389 for (i = 0; i < NUM_ISO_ETDS; i++) { 481 for (i = 0; i < NUM_ISO_ETDS; i++) {
390 etd_num = ep_priv->etd[i]; 482 int etd_num = ep_priv->etd[i];
483 struct etd_priv *etd;
391 if (etd_num < 0) 484 if (etd_num < 0)
392 continue; 485 continue;
393 486
487 etd = &imx21->etd[etd_num];
394 ep_priv->etd[i] = -1; 488 ep_priv->etd[i] = -1;
489
490 free_dmem(imx21, etd); /* for isoc */
491
395 if (list_empty(&imx21->queue_for_etd)) { 492 if (list_empty(&imx21->queue_for_etd)) {
396 free_etd(imx21, etd_num); 493 free_etd(imx21, etd_num);
397 continue; 494 continue;
@@ -437,6 +534,24 @@ __acquires(imx21->lock)
437 ep_idle(imx21, ep_priv); 534 ep_idle(imx21, ep_priv);
438} 535}
439 536
537static void nonisoc_urb_completed_for_etd(
538 struct imx21 *imx21, struct etd_priv *etd, int status)
539{
540 struct usb_host_endpoint *ep = etd->ep;
541
542 urb_done(imx21->hcd, etd->urb, status);
543 etd->urb = NULL;
544
545 if (!list_empty(&ep->urb_list)) {
546 struct urb *urb = list_first_entry(
547 &ep->urb_list, struct urb, urb_list);
548
549 dev_vdbg(imx21->dev, "next URB %p\n", urb);
550 schedule_nonisoc_etd(imx21, urb);
551 }
552}
553
554
440/* =========================================== */ 555/* =========================================== */
441/* ISOC Handling ... */ 556/* ISOC Handling ... */
442/* =========================================== */ 557/* =========================================== */
@@ -489,6 +604,8 @@ too_late:
489 etd->ep = td->ep; 604 etd->ep = td->ep;
490 etd->urb = td->urb; 605 etd->urb = td->urb;
491 etd->len = td->len; 606 etd->len = td->len;
607 etd->dma_handle = td->dma_handle;
608 etd->cpu_buffer = td->cpu_buffer;
492 609
493 debug_isoc_submitted(imx21, cur_frame, td); 610 debug_isoc_submitted(imx21, cur_frame, td);
494 611
@@ -502,16 +619,17 @@ too_late:
502 (TD_NOTACCESSED << DW3_COMPCODE0) | 619 (TD_NOTACCESSED << DW3_COMPCODE0) |
503 (td->len << DW3_PKTLEN0)); 620 (td->len << DW3_PKTLEN0));
504 621
505 activate_etd(imx21, etd_num, td->data, dir); 622 activate_etd(imx21, etd_num, dir);
506 } 623 }
507} 624}
508 625
509static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) 626static void isoc_etd_done(struct usb_hcd *hcd, int etd_num)
510{ 627{
511 struct imx21 *imx21 = hcd_to_imx21(hcd); 628 struct imx21 *imx21 = hcd_to_imx21(hcd);
512 int etd_mask = 1 << etd_num; 629 int etd_mask = 1 << etd_num;
513 struct urb_priv *urb_priv = urb->hcpriv;
514 struct etd_priv *etd = imx21->etd + etd_num; 630 struct etd_priv *etd = imx21->etd + etd_num;
631 struct urb *urb = etd->urb;
632 struct urb_priv *urb_priv = urb->hcpriv;
515 struct td *td = etd->td; 633 struct td *td = etd->td;
516 struct usb_host_endpoint *ep = etd->ep; 634 struct usb_host_endpoint *ep = etd->ep;
517 int isoc_index = td->isoc_index; 635 int isoc_index = td->isoc_index;
@@ -545,8 +663,13 @@ static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
545 bytes_xfrd, td->len, urb, etd_num, isoc_index); 663 bytes_xfrd, td->len, urb, etd_num, isoc_index);
546 } 664 }
547 665
548 if (dir_in) 666 if (dir_in) {
549 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 667 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
668 if (!etd->dma_handle)
669 memcpy_fromio(etd->cpu_buffer,
670 imx21->regs + USBOTG_DMEM + etd->dmem_offset,
671 bytes_xfrd);
672 }
550 673
551 urb->actual_length += bytes_xfrd; 674 urb->actual_length += bytes_xfrd;
552 urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd; 675 urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
@@ -569,30 +692,43 @@ static struct ep_priv *alloc_isoc_ep(
569 int i; 692 int i;
570 693
571 ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC); 694 ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
572 if (ep_priv == NULL) 695 if (!ep_priv)
573 return NULL; 696 return NULL;
574 697
575 /* Allocate the ETDs */ 698 for (i = 0; i < NUM_ISO_ETDS; i++)
576 for (i = 0; i < NUM_ISO_ETDS; i++) { 699 ep_priv->etd[i] = -1;
577 ep_priv->etd[i] = alloc_etd(imx21);
578 if (ep_priv->etd[i] < 0) {
579 int j;
580 dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
581 for (j = 0; j < i; j++)
582 free_etd(imx21, ep_priv->etd[j]);
583 goto alloc_etd_failed;
584 }
585 imx21->etd[ep_priv->etd[i]].ep = ep;
586 }
587 700
588 INIT_LIST_HEAD(&ep_priv->td_list); 701 INIT_LIST_HEAD(&ep_priv->td_list);
589 ep_priv->ep = ep; 702 ep_priv->ep = ep;
590 ep->hcpriv = ep_priv; 703 ep->hcpriv = ep_priv;
591 return ep_priv; 704 return ep_priv;
705}
706
707static int alloc_isoc_etds(struct imx21 *imx21, struct ep_priv *ep_priv)
708{
709 int i, j;
710 int etd_num;
711
712 /* Allocate the ETDs if required */
713 for (i = 0; i < NUM_ISO_ETDS; i++) {
714 if (ep_priv->etd[i] < 0) {
715 etd_num = alloc_etd(imx21);
716 if (etd_num < 0)
717 goto alloc_etd_failed;
718
719 ep_priv->etd[i] = etd_num;
720 imx21->etd[etd_num].ep = ep_priv->ep;
721 }
722 }
723 return 0;
592 724
593alloc_etd_failed: 725alloc_etd_failed:
594 kfree(ep_priv); 726 dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
595 return NULL; 727 for (j = 0; j < i; j++) {
728 free_etd(imx21, ep_priv->etd[j]);
729 ep_priv->etd[j] = -1;
730 }
731 return -ENOMEM;
596} 732}
597 733
598static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd, 734static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
@@ -632,6 +768,10 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
632 ep_priv = ep->hcpriv; 768 ep_priv = ep->hcpriv;
633 } 769 }
634 770
771 ret = alloc_isoc_etds(imx21, ep_priv);
772 if (ret)
773 goto alloc_etd_failed;
774
635 ret = usb_hcd_link_urb_to_ep(hcd, urb); 775 ret = usb_hcd_link_urb_to_ep(hcd, urb);
636 if (ret) 776 if (ret)
637 goto link_failed; 777 goto link_failed;
@@ -688,12 +828,14 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
688 /* set up transfers */ 828 /* set up transfers */
689 td = urb_priv->isoc_td; 829 td = urb_priv->isoc_td;
690 for (i = 0; i < urb->number_of_packets; i++, td++) { 830 for (i = 0; i < urb->number_of_packets; i++, td++) {
831 unsigned int offset = urb->iso_frame_desc[i].offset;
691 td->ep = ep; 832 td->ep = ep;
692 td->urb = urb; 833 td->urb = urb;
693 td->len = urb->iso_frame_desc[i].length; 834 td->len = urb->iso_frame_desc[i].length;
694 td->isoc_index = i; 835 td->isoc_index = i;
695 td->frame = wrap_frame(urb->start_frame + urb->interval * i); 836 td->frame = wrap_frame(urb->start_frame + urb->interval * i);
696 td->data = urb->transfer_dma + urb->iso_frame_desc[i].offset; 837 td->dma_handle = urb->transfer_dma + offset;
838 td->cpu_buffer = urb->transfer_buffer + offset;
697 list_add_tail(&td->list, &ep_priv->td_list); 839 list_add_tail(&td->list, &ep_priv->td_list);
698 } 840 }
699 841
@@ -711,6 +853,7 @@ alloc_dmem_failed:
711 usb_hcd_unlink_urb_from_ep(hcd, urb); 853 usb_hcd_unlink_urb_from_ep(hcd, urb);
712 854
713link_failed: 855link_failed:
856alloc_etd_failed:
714alloc_ep_failed: 857alloc_ep_failed:
715 spin_unlock_irqrestore(&imx21->lock, flags); 858 spin_unlock_irqrestore(&imx21->lock, flags);
716 kfree(urb_priv->isoc_td); 859 kfree(urb_priv->isoc_td);
@@ -734,9 +877,7 @@ static void dequeue_isoc_urb(struct imx21 *imx21,
734 struct etd_priv *etd = imx21->etd + etd_num; 877 struct etd_priv *etd = imx21->etd + etd_num;
735 878
736 reset_etd(imx21, etd_num); 879 reset_etd(imx21, etd_num);
737 if (etd->dmem_size) 880 free_dmem(imx21, etd);
738 free_dmem(imx21, etd->dmem_offset);
739 etd->dmem_size = 0;
740 } 881 }
741 } 882 }
742 } 883 }
@@ -761,7 +902,6 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
761 int state = urb_priv->state; 902 int state = urb_priv->state;
762 int etd_num = ep_priv->etd[0]; 903 int etd_num = ep_priv->etd[0];
763 struct etd_priv *etd; 904 struct etd_priv *etd;
764 int dmem_offset;
765 u32 count; 905 u32 count;
766 u16 etd_buf_size; 906 u16 etd_buf_size;
767 u16 maxpacket; 907 u16 maxpacket;
@@ -786,13 +926,16 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
786 if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) { 926 if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
787 if (state == US_CTRL_SETUP) { 927 if (state == US_CTRL_SETUP) {
788 dir = TD_DIR_SETUP; 928 dir = TD_DIR_SETUP;
929 if (unsuitable_for_dma(urb->setup_dma))
930 usb_hcd_unmap_urb_setup_for_dma(imx21->hcd,
931 urb);
789 etd->dma_handle = urb->setup_dma; 932 etd->dma_handle = urb->setup_dma;
933 etd->cpu_buffer = urb->setup_packet;
790 bufround = 0; 934 bufround = 0;
791 count = 8; 935 count = 8;
792 datatoggle = TD_TOGGLE_DATA0; 936 datatoggle = TD_TOGGLE_DATA0;
793 } else { /* US_CTRL_ACK */ 937 } else { /* US_CTRL_ACK */
794 dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT; 938 dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
795 etd->dma_handle = urb->transfer_dma;
796 bufround = 0; 939 bufround = 0;
797 count = 0; 940 count = 0;
798 datatoggle = TD_TOGGLE_DATA1; 941 datatoggle = TD_TOGGLE_DATA1;
@@ -800,7 +943,11 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
800 } else { 943 } else {
801 dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN; 944 dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
802 bufround = (dir == TD_DIR_IN) ? 1 : 0; 945 bufround = (dir == TD_DIR_IN) ? 1 : 0;
946 if (unsuitable_for_dma(urb->transfer_dma))
947 usb_hcd_unmap_urb_for_dma(imx21->hcd, urb);
948
803 etd->dma_handle = urb->transfer_dma; 949 etd->dma_handle = urb->transfer_dma;
950 etd->cpu_buffer = urb->transfer_buffer;
804 if (usb_pipebulk(pipe) && (state == US_BULK0)) 951 if (usb_pipebulk(pipe) && (state == US_BULK0))
805 count = 0; 952 count = 0;
806 else 953 else
@@ -855,8 +1002,8 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
855 1002
856 /* allocate x and y buffer space at once */ 1003 /* allocate x and y buffer space at once */
857 etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket; 1004 etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
858 dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep); 1005 etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
859 if (dmem_offset < 0) { 1006 if (etd->dmem_offset < 0) {
860 /* Setup everything we can in HW and update when we get DMEM */ 1007 /* Setup everything we can in HW and update when we get DMEM */
861 etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16); 1008 etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
862 1009
@@ -867,26 +1014,26 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
867 } 1014 }
868 1015
869 etd_writel(imx21, etd_num, 1, 1016 etd_writel(imx21, etd_num, 1,
870 (((u32) dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) | 1017 (((u32) etd->dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
871 (u32) dmem_offset); 1018 (u32) etd->dmem_offset);
872 1019
873 urb_priv->active = 1; 1020 urb_priv->active = 1;
874 1021
875 /* enable the ETD to kick off transfer */ 1022 /* enable the ETD to kick off transfer */
876 dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n", 1023 dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
877 etd_num, count, dir != TD_DIR_IN ? "out" : "in"); 1024 etd_num, count, dir != TD_DIR_IN ? "out" : "in");
878 activate_etd(imx21, etd_num, etd->dma_handle, dir); 1025 activate_etd(imx21, etd_num, dir);
879 1026
880} 1027}
881 1028
882static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) 1029static void nonisoc_etd_done(struct usb_hcd *hcd, int etd_num)
883{ 1030{
884 struct imx21 *imx21 = hcd_to_imx21(hcd); 1031 struct imx21 *imx21 = hcd_to_imx21(hcd);
885 struct etd_priv *etd = &imx21->etd[etd_num]; 1032 struct etd_priv *etd = &imx21->etd[etd_num];
1033 struct urb *urb = etd->urb;
886 u32 etd_mask = 1 << etd_num; 1034 u32 etd_mask = 1 << etd_num;
887 struct urb_priv *urb_priv = urb->hcpriv; 1035 struct urb_priv *urb_priv = urb->hcpriv;
888 int dir; 1036 int dir;
889 u16 xbufaddr;
890 int cc; 1037 int cc;
891 u32 bytes_xfrd; 1038 u32 bytes_xfrd;
892 int etd_done; 1039 int etd_done;
@@ -894,7 +1041,6 @@ static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
894 disactivate_etd(imx21, etd_num); 1041 disactivate_etd(imx21, etd_num);
895 1042
896 dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3; 1043 dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
897 xbufaddr = etd_readl(imx21, etd_num, 1) & 0xffff;
898 cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf; 1044 cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
899 bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff); 1045 bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
900 1046
@@ -906,8 +1052,21 @@ static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
906 if (dir == TD_DIR_IN) { 1052 if (dir == TD_DIR_IN) {
907 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 1053 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
908 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); 1054 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
1055
1056 if (etd->bounce_buffer) {
1057 memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd);
1058 dma_unmap_single(imx21->dev,
1059 etd->dma_handle, etd->len, DMA_FROM_DEVICE);
1060 } else if (!etd->dma_handle && bytes_xfrd) {/* PIO */
1061 memcpy_fromio(etd->cpu_buffer,
1062 imx21->regs + USBOTG_DMEM + etd->dmem_offset,
1063 bytes_xfrd);
1064 }
909 } 1065 }
910 free_dmem(imx21, xbufaddr); 1066
1067 kfree(etd->bounce_buffer);
1068 etd->bounce_buffer = NULL;
1069 free_dmem(imx21, etd);
911 1070
912 urb->error_count = 0; 1071 urb->error_count = 0;
913 if (!(urb->transfer_flags & URB_SHORT_NOT_OK) 1072 if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
@@ -964,24 +1123,15 @@ static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
964 break; 1123 break;
965 } 1124 }
966 1125
967 if (!etd_done) { 1126 if (etd_done)
1127 nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]);
1128 else {
968 dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state); 1129 dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
969 schedule_nonisoc_etd(imx21, urb); 1130 schedule_nonisoc_etd(imx21, urb);
970 } else {
971 struct usb_host_endpoint *ep = urb->ep;
972
973 urb_done(hcd, urb, cc_to_error[cc]);
974 etd->urb = NULL;
975
976 if (!list_empty(&ep->urb_list)) {
977 urb = list_first_entry(&ep->urb_list,
978 struct urb, urb_list);
979 dev_vdbg(imx21->dev, "next URB %p\n", urb);
980 schedule_nonisoc_etd(imx21, urb);
981 }
982 } 1131 }
983} 1132}
984 1133
1134
985static struct ep_priv *alloc_ep(void) 1135static struct ep_priv *alloc_ep(void)
986{ 1136{
987 int i; 1137 int i;
@@ -1007,7 +1157,6 @@ static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
1007 struct etd_priv *etd; 1157 struct etd_priv *etd;
1008 int ret; 1158 int ret;
1009 unsigned long flags; 1159 unsigned long flags;
1010 int new_ep = 0;
1011 1160
1012 dev_vdbg(imx21->dev, 1161 dev_vdbg(imx21->dev,
1013 "enqueue urb=%p ep=%p len=%d " 1162 "enqueue urb=%p ep=%p len=%d "
@@ -1035,7 +1184,6 @@ static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
1035 } 1184 }
1036 ep->hcpriv = ep_priv; 1185 ep->hcpriv = ep_priv;
1037 ep_priv->ep = ep; 1186 ep_priv->ep = ep;
1038 new_ep = 1;
1039 } 1187 }
1040 1188
1041 ret = usb_hcd_link_urb_to_ep(hcd, urb); 1189 ret = usb_hcd_link_urb_to_ep(hcd, urb);
@@ -1124,9 +1272,13 @@ static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1124 } else if (urb_priv->active) { 1272 } else if (urb_priv->active) {
1125 int etd_num = ep_priv->etd[0]; 1273 int etd_num = ep_priv->etd[0];
1126 if (etd_num != -1) { 1274 if (etd_num != -1) {
1275 struct etd_priv *etd = &imx21->etd[etd_num];
1276
1127 disactivate_etd(imx21, etd_num); 1277 disactivate_etd(imx21, etd_num);
1128 free_dmem(imx21, etd_readl(imx21, etd_num, 1) & 0xffff); 1278 free_dmem(imx21, etd);
1129 imx21->etd[etd_num].urb = NULL; 1279 etd->urb = NULL;
1280 kfree(etd->bounce_buffer);
1281 etd->bounce_buffer = NULL;
1130 } 1282 }
1131 } 1283 }
1132 1284
@@ -1171,7 +1323,7 @@ static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
1171 * (and hence no interrupt occurs). 1323 * (and hence no interrupt occurs).
1172 * This causes the transfer in question to hang. 1324 * This causes the transfer in question to hang.
1173 * The kludge below checks for this condition at each SOF and processes any 1325 * The kludge below checks for this condition at each SOF and processes any
1174 * blocked ETDs (after an arbitary 10 frame wait) 1326 * blocked ETDs (after an arbitrary 10 frame wait)
1175 * 1327 *
1176 * With a single active transfer the usbtest test suite will run for days 1328 * With a single active transfer the usbtest test suite will run for days
1177 * without the kludge. 1329 * without the kludge.
@@ -1226,9 +1378,9 @@ static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
1226 } 1378 }
1227 1379
1228 if (usb_pipeisoc(etd->urb->pipe)) 1380 if (usb_pipeisoc(etd->urb->pipe))
1229 isoc_etd_done(hcd, etd->urb, etd_num); 1381 isoc_etd_done(hcd, etd_num);
1230 else 1382 else
1231 nonisoc_etd_done(hcd, etd->urb, etd_num); 1383 nonisoc_etd_done(hcd, etd_num);
1232 } 1384 }
1233 1385
1234 /* only enable SOF interrupt if it may be needed for the kludge */ 1386 /* only enable SOF interrupt if it may be needed for the kludge */
@@ -1320,8 +1472,8 @@ static int get_hub_descriptor(struct usb_hcd *hcd,
1320 0x0010 | /* No over current protection */ 1472 0x0010 | /* No over current protection */
1321 0); 1473 0);
1322 1474
1323 desc->bitmap[0] = 1 << 1; 1475 desc->u.hs.DeviceRemovable[0] = 1 << 1;
1324 desc->bitmap[1] = ~0; 1476 desc->u.hs.DeviceRemovable[1] = ~0;
1325 return 0; 1477 return 0;
1326} 1478}
1327 1479
@@ -1507,7 +1659,7 @@ static int imx21_hc_reset(struct usb_hcd *hcd)
1507 1659
1508 spin_lock_irqsave(&imx21->lock, flags); 1660 spin_lock_irqsave(&imx21->lock, flags);
1509 1661
1510 /* Reset the Host controler modules */ 1662 /* Reset the Host controller modules */
1511 writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH | 1663 writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
1512 USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC, 1664 USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
1513 imx21->regs + USBOTG_RST_CTRL); 1665 imx21->regs + USBOTG_RST_CTRL);
@@ -1696,6 +1848,7 @@ static int imx21_probe(struct platform_device *pdev)
1696 } 1848 }
1697 1849
1698 imx21 = hcd_to_imx21(hcd); 1850 imx21 = hcd_to_imx21(hcd);
1851 imx21->hcd = hcd;
1699 imx21->dev = &pdev->dev; 1852 imx21->dev = &pdev->dev;
1700 imx21->pdata = pdev->dev.platform_data; 1853 imx21->pdata = pdev->dev.platform_data;
1701 if (!imx21->pdata) 1854 if (!imx21->pdata)
@@ -1754,7 +1907,7 @@ failed_clock_set:
1754failed_clock_get: 1907failed_clock_get:
1755 iounmap(imx21->regs); 1908 iounmap(imx21->regs);
1756failed_ioremap: 1909failed_ioremap:
1757 release_mem_region(res->start, res->end - res->start); 1910 release_mem_region(res->start, resource_size(res));
1758failed_request_mem: 1911failed_request_mem:
1759 remove_debug_files(imx21); 1912 remove_debug_files(imx21);
1760 usb_put_hcd(hcd); 1913 usb_put_hcd(hcd);