aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb
diff options
context:
space:
mode:
authorMartin Fuzzey <mfuzzey@gmail.com>2010-09-30 18:21:59 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2010-10-22 13:22:03 -0400
commitd0cc3d4100e829d726d7c0fbf5b7b8d2146f60ba (patch)
tree94e3c28cc1e11448c74ed0f7be0a19af761f3c6d /drivers/usb
parent1dae423dd9b247b048eda00cb598c755e5933213 (diff)
USB: imx21-hcd accept arbitary transfer buffer alignement.
The hardware can only do DMA to 4 byte aligned addresses. When this requirement is not met use PIO or a bounce buffer. PIO is used when the buffer is small enough to directly use the hardware data memory (2*maxpacket). A bounce buffer is used for larger transfers. Signed-off-by: Martin Fuzzey <mfuzzey@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/host/imx21-hcd.c196
-rw-r--r--drivers/usb/host/imx21-hcd.h8
2 files changed, 169 insertions, 35 deletions
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 890a41ccc5cd..dd132eb7fee7 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -57,6 +57,7 @@
57#include <linux/slab.h> 57#include <linux/slab.h>
58#include <linux/usb.h> 58#include <linux/usb.h>
59#include <linux/usb/hcd.h> 59#include <linux/usb/hcd.h>
60#include <linux/dma-mapping.h>
60 61
61#include "imx21-hcd.h" 62#include "imx21-hcd.h"
62 63
@@ -136,9 +137,18 @@ static int imx21_hc_get_frame(struct usb_hcd *hcd)
136 return wrap_frame(readl(imx21->regs + USBH_FRMNUB)); 137 return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
137} 138}
138 139
140static inline bool unsuitable_for_dma(dma_addr_t addr)
141{
142 return (addr & 3) != 0;
143}
139 144
140#include "imx21-dbg.c" 145#include "imx21-dbg.c"
141 146
147static void nonisoc_urb_completed_for_etd(
148 struct imx21 *imx21, struct etd_priv *etd, int status);
149static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
150static void free_dmem(struct imx21 *imx21, struct etd_priv *etd);
151
142/* =========================================== */ 152/* =========================================== */
143/* ETD management */ 153/* ETD management */
144/* =========================================== */ 154/* =========================================== */
@@ -185,7 +195,8 @@ static void reset_etd(struct imx21 *imx21, int num)
185 etd_writel(imx21, num, i, 0); 195 etd_writel(imx21, num, i, 0);
186 etd->urb = NULL; 196 etd->urb = NULL;
187 etd->ep = NULL; 197 etd->ep = NULL;
188 etd->td = NULL;; 198 etd->td = NULL;
199 etd->bounce_buffer = NULL;
189} 200}
190 201
191static void free_etd(struct imx21 *imx21, int num) 202static void free_etd(struct imx21 *imx21, int num)
@@ -221,26 +232,94 @@ static void setup_etd_dword0(struct imx21 *imx21,
221 ((u32) maxpacket << DW0_MAXPKTSIZ)); 232 ((u32) maxpacket << DW0_MAXPKTSIZ));
222} 233}
223 234
224static void activate_etd(struct imx21 *imx21, 235/**
225 int etd_num, dma_addr_t dma, u8 dir) 236 * Copy buffer to data controller data memory.
237 * We cannot use memcpy_toio() because the hardware requires 32bit writes
238 */
239static void copy_to_dmem(
240 struct imx21 *imx21, int dmem_offset, void *src, int count)
241{
242 void __iomem *dmem = imx21->regs + USBOTG_DMEM + dmem_offset;
243 u32 word = 0;
244 u8 *p = src;
245 int byte = 0;
246 int i;
247
248 for (i = 0; i < count; i++) {
249 byte = i % 4;
250 word += (*p++ << (byte * 8));
251 if (byte == 3) {
252 writel(word, dmem);
253 dmem += 4;
254 word = 0;
255 }
256 }
257
258 if (count && byte != 3)
259 writel(word, dmem);
260}
261
262static void activate_etd(struct imx21 *imx21, int etd_num, u8 dir)
226{ 263{
227 u32 etd_mask = 1 << etd_num; 264 u32 etd_mask = 1 << etd_num;
228 struct etd_priv *etd = &imx21->etd[etd_num]; 265 struct etd_priv *etd = &imx21->etd[etd_num];
229 266
267 if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) {
268 /* For non aligned isoc the condition below is always true */
269 if (etd->len <= etd->dmem_size) {
270 /* Fits into data memory, use PIO */
271 if (dir != TD_DIR_IN) {
272 copy_to_dmem(imx21,
273 etd->dmem_offset,
274 etd->cpu_buffer, etd->len);
275 }
276 etd->dma_handle = 0;
277
278 } else {
279 /* Too big for data memory, use bounce buffer */
280 enum dma_data_direction dmadir;
281
282 if (dir == TD_DIR_IN) {
283 dmadir = DMA_FROM_DEVICE;
284 etd->bounce_buffer = kmalloc(etd->len,
285 GFP_ATOMIC);
286 } else {
287 dmadir = DMA_TO_DEVICE;
288 etd->bounce_buffer = kmemdup(etd->cpu_buffer,
289 etd->len,
290 GFP_ATOMIC);
291 }
292 if (!etd->bounce_buffer) {
293 dev_err(imx21->dev, "failed bounce alloc\n");
294 goto err_bounce_alloc;
295 }
296
297 etd->dma_handle =
298 dma_map_single(imx21->dev,
299 etd->bounce_buffer,
300 etd->len,
301 dmadir);
302 if (dma_mapping_error(imx21->dev, etd->dma_handle)) {
303 dev_err(imx21->dev, "failed bounce map\n");
304 goto err_bounce_map;
305 }
306 }
307 }
308
230 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask); 309 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
231 set_register_bits(imx21, USBH_ETDDONEEN, etd_mask); 310 set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
232 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 311 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
233 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); 312 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
234 313
235 if (dma) { 314 if (etd->dma_handle) {
236 set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask); 315 set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
237 clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask); 316 clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
238 clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask); 317 clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
239 writel(dma, imx21->regs + USB_ETDSMSA(etd_num)); 318 writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num));
240 set_register_bits(imx21, USB_ETDDMAEN, etd_mask); 319 set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
241 } else { 320 } else {
242 if (dir != TD_DIR_IN) { 321 if (dir != TD_DIR_IN) {
243 /* need to set for ZLP */ 322 /* need to set for ZLP and PIO */
244 set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 323 set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
245 set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); 324 set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
246 } 325 }
@@ -263,6 +342,14 @@ static void activate_etd(struct imx21 *imx21,
263 342
264 etd->active_count = 1; 343 etd->active_count = 1;
265 writel(etd_mask, imx21->regs + USBH_ETDENSET); 344 writel(etd_mask, imx21->regs + USBH_ETDENSET);
345 return;
346
347err_bounce_map:
348 kfree(etd->bounce_buffer);
349
350err_bounce_alloc:
351 free_dmem(imx21, etd);
352 nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM);
266} 353}
267 354
268/* =========================================== */ 355/* =========================================== */
@@ -325,7 +412,7 @@ static void activate_queued_etd(struct imx21 *imx21,
325 412
326 etd->dmem_offset = dmem_offset; 413 etd->dmem_offset = dmem_offset;
327 urb_priv->active = 1; 414 urb_priv->active = 1;
328 activate_etd(imx21, etd_num, etd->dma_handle, dir); 415 activate_etd(imx21, etd_num, dir);
329} 416}
330 417
331static void free_dmem(struct imx21 *imx21, struct etd_priv *etd) 418static void free_dmem(struct imx21 *imx21, struct etd_priv *etd)
@@ -385,7 +472,6 @@ static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
385/* =========================================== */ 472/* =========================================== */
386/* End handling */ 473/* End handling */
387/* =========================================== */ 474/* =========================================== */
388static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
389 475
390/* Endpoint now idle - release it's ETD(s) or asssign to queued request */ 476/* Endpoint now idle - release it's ETD(s) or asssign to queued request */
391static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv) 477static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
@@ -448,6 +534,24 @@ __acquires(imx21->lock)
448 ep_idle(imx21, ep_priv); 534 ep_idle(imx21, ep_priv);
449} 535}
450 536
537static void nonisoc_urb_completed_for_etd(
538 struct imx21 *imx21, struct etd_priv *etd, int status)
539{
540 struct usb_host_endpoint *ep = etd->ep;
541
542 urb_done(imx21->hcd, etd->urb, status);
543 etd->urb = NULL;
544
545 if (!list_empty(&ep->urb_list)) {
546 struct urb *urb = list_first_entry(
547 &ep->urb_list, struct urb, urb_list);
548
549 dev_vdbg(imx21->dev, "next URB %p\n", urb);
550 schedule_nonisoc_etd(imx21, urb);
551 }
552}
553
554
451/* =========================================== */ 555/* =========================================== */
452/* ISOC Handling ... */ 556/* ISOC Handling ... */
453/* =========================================== */ 557/* =========================================== */
@@ -500,6 +604,8 @@ too_late:
500 etd->ep = td->ep; 604 etd->ep = td->ep;
501 etd->urb = td->urb; 605 etd->urb = td->urb;
502 etd->len = td->len; 606 etd->len = td->len;
607 etd->dma_handle = td->dma_handle;
608 etd->cpu_buffer = td->cpu_buffer;
503 609
504 debug_isoc_submitted(imx21, cur_frame, td); 610 debug_isoc_submitted(imx21, cur_frame, td);
505 611
@@ -513,16 +619,17 @@ too_late:
513 (TD_NOTACCESSED << DW3_COMPCODE0) | 619 (TD_NOTACCESSED << DW3_COMPCODE0) |
514 (td->len << DW3_PKTLEN0)); 620 (td->len << DW3_PKTLEN0));
515 621
516 activate_etd(imx21, etd_num, td->data, dir); 622 activate_etd(imx21, etd_num, dir);
517 } 623 }
518} 624}
519 625
520static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) 626static void isoc_etd_done(struct usb_hcd *hcd, int etd_num)
521{ 627{
522 struct imx21 *imx21 = hcd_to_imx21(hcd); 628 struct imx21 *imx21 = hcd_to_imx21(hcd);
523 int etd_mask = 1 << etd_num; 629 int etd_mask = 1 << etd_num;
524 struct urb_priv *urb_priv = urb->hcpriv;
525 struct etd_priv *etd = imx21->etd + etd_num; 630 struct etd_priv *etd = imx21->etd + etd_num;
631 struct urb *urb = etd->urb;
632 struct urb_priv *urb_priv = urb->hcpriv;
526 struct td *td = etd->td; 633 struct td *td = etd->td;
527 struct usb_host_endpoint *ep = etd->ep; 634 struct usb_host_endpoint *ep = etd->ep;
528 int isoc_index = td->isoc_index; 635 int isoc_index = td->isoc_index;
@@ -556,8 +663,13 @@ static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
556 bytes_xfrd, td->len, urb, etd_num, isoc_index); 663 bytes_xfrd, td->len, urb, etd_num, isoc_index);
557 } 664 }
558 665
559 if (dir_in) 666 if (dir_in) {
560 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 667 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
668 if (!etd->dma_handle)
669 memcpy_fromio(etd->cpu_buffer,
670 imx21->regs + USBOTG_DMEM + etd->dmem_offset,
671 bytes_xfrd);
672 }
561 673
562 urb->actual_length += bytes_xfrd; 674 urb->actual_length += bytes_xfrd;
563 urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd; 675 urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
@@ -716,12 +828,14 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
716 /* set up transfers */ 828 /* set up transfers */
717 td = urb_priv->isoc_td; 829 td = urb_priv->isoc_td;
718 for (i = 0; i < urb->number_of_packets; i++, td++) { 830 for (i = 0; i < urb->number_of_packets; i++, td++) {
831 unsigned int offset = urb->iso_frame_desc[i].offset;
719 td->ep = ep; 832 td->ep = ep;
720 td->urb = urb; 833 td->urb = urb;
721 td->len = urb->iso_frame_desc[i].length; 834 td->len = urb->iso_frame_desc[i].length;
722 td->isoc_index = i; 835 td->isoc_index = i;
723 td->frame = wrap_frame(urb->start_frame + urb->interval * i); 836 td->frame = wrap_frame(urb->start_frame + urb->interval * i);
724 td->data = urb->transfer_dma + urb->iso_frame_desc[i].offset; 837 td->dma_handle = urb->transfer_dma + offset;
838 td->cpu_buffer = urb->transfer_buffer + offset;
725 list_add_tail(&td->list, &ep_priv->td_list); 839 list_add_tail(&td->list, &ep_priv->td_list);
726 } 840 }
727 841
@@ -812,13 +926,15 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
812 if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) { 926 if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
813 if (state == US_CTRL_SETUP) { 927 if (state == US_CTRL_SETUP) {
814 dir = TD_DIR_SETUP; 928 dir = TD_DIR_SETUP;
929 if (unsuitable_for_dma(urb->setup_dma))
930 unmap_urb_setup_for_dma(imx21->hcd, urb);
815 etd->dma_handle = urb->setup_dma; 931 etd->dma_handle = urb->setup_dma;
932 etd->cpu_buffer = urb->setup_packet;
816 bufround = 0; 933 bufround = 0;
817 count = 8; 934 count = 8;
818 datatoggle = TD_TOGGLE_DATA0; 935 datatoggle = TD_TOGGLE_DATA0;
819 } else { /* US_CTRL_ACK */ 936 } else { /* US_CTRL_ACK */
820 dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT; 937 dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
821 etd->dma_handle = urb->transfer_dma;
822 bufround = 0; 938 bufround = 0;
823 count = 0; 939 count = 0;
824 datatoggle = TD_TOGGLE_DATA1; 940 datatoggle = TD_TOGGLE_DATA1;
@@ -826,7 +942,11 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
826 } else { 942 } else {
827 dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN; 943 dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
828 bufround = (dir == TD_DIR_IN) ? 1 : 0; 944 bufround = (dir == TD_DIR_IN) ? 1 : 0;
945 if (unsuitable_for_dma(urb->transfer_dma))
946 unmap_urb_for_dma(imx21->hcd, urb);
947
829 etd->dma_handle = urb->transfer_dma; 948 etd->dma_handle = urb->transfer_dma;
949 etd->cpu_buffer = urb->transfer_buffer;
830 if (usb_pipebulk(pipe) && (state == US_BULK0)) 950 if (usb_pipebulk(pipe) && (state == US_BULK0))
831 count = 0; 951 count = 0;
832 else 952 else
@@ -901,14 +1021,15 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
901 /* enable the ETD to kick off transfer */ 1021 /* enable the ETD to kick off transfer */
902 dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n", 1022 dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
903 etd_num, count, dir != TD_DIR_IN ? "out" : "in"); 1023 etd_num, count, dir != TD_DIR_IN ? "out" : "in");
904 activate_etd(imx21, etd_num, etd->dma_handle, dir); 1024 activate_etd(imx21, etd_num, dir);
905 1025
906} 1026}
907 1027
908static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) 1028static void nonisoc_etd_done(struct usb_hcd *hcd, int etd_num)
909{ 1029{
910 struct imx21 *imx21 = hcd_to_imx21(hcd); 1030 struct imx21 *imx21 = hcd_to_imx21(hcd);
911 struct etd_priv *etd = &imx21->etd[etd_num]; 1031 struct etd_priv *etd = &imx21->etd[etd_num];
1032 struct urb *urb = etd->urb;
912 u32 etd_mask = 1 << etd_num; 1033 u32 etd_mask = 1 << etd_num;
913 struct urb_priv *urb_priv = urb->hcpriv; 1034 struct urb_priv *urb_priv = urb->hcpriv;
914 int dir; 1035 int dir;
@@ -930,7 +1051,20 @@ static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
930 if (dir == TD_DIR_IN) { 1051 if (dir == TD_DIR_IN) {
931 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 1052 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
932 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); 1053 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
1054
1055 if (etd->bounce_buffer) {
1056 memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd);
1057 dma_unmap_single(imx21->dev,
1058 etd->dma_handle, etd->len, DMA_FROM_DEVICE);
1059 } else if (!etd->dma_handle && bytes_xfrd) {/* PIO */
1060 memcpy_fromio(etd->cpu_buffer,
1061 imx21->regs + USBOTG_DMEM + etd->dmem_offset,
1062 bytes_xfrd);
1063 }
933 } 1064 }
1065
1066 kfree(etd->bounce_buffer);
1067 etd->bounce_buffer = NULL;
934 free_dmem(imx21, etd); 1068 free_dmem(imx21, etd);
935 1069
936 urb->error_count = 0; 1070 urb->error_count = 0;
@@ -988,24 +1122,15 @@ static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
988 break; 1122 break;
989 } 1123 }
990 1124
991 if (!etd_done) { 1125 if (etd_done)
1126 nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]);
1127 else {
992 dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state); 1128 dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
993 schedule_nonisoc_etd(imx21, urb); 1129 schedule_nonisoc_etd(imx21, urb);
994 } else {
995 struct usb_host_endpoint *ep = urb->ep;
996
997 urb_done(hcd, urb, cc_to_error[cc]);
998 etd->urb = NULL;
999
1000 if (!list_empty(&ep->urb_list)) {
1001 urb = list_first_entry(&ep->urb_list,
1002 struct urb, urb_list);
1003 dev_vdbg(imx21->dev, "next URB %p\n", urb);
1004 schedule_nonisoc_etd(imx21, urb);
1005 }
1006 } 1130 }
1007} 1131}
1008 1132
1133
1009static struct ep_priv *alloc_ep(void) 1134static struct ep_priv *alloc_ep(void)
1010{ 1135{
1011 int i; 1136 int i;
@@ -1146,9 +1271,13 @@ static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1146 } else if (urb_priv->active) { 1271 } else if (urb_priv->active) {
1147 int etd_num = ep_priv->etd[0]; 1272 int etd_num = ep_priv->etd[0];
1148 if (etd_num != -1) { 1273 if (etd_num != -1) {
1274 struct etd_priv *etd = &imx21->etd[etd_num];
1275
1149 disactivate_etd(imx21, etd_num); 1276 disactivate_etd(imx21, etd_num);
1150 free_dmem(imx21, &imx21->etd[etd_num]); 1277 free_dmem(imx21, etd);
1151 imx21->etd[etd_num].urb = NULL; 1278 etd->urb = NULL;
1279 kfree(etd->bounce_buffer);
1280 etd->bounce_buffer = NULL;
1152 } 1281 }
1153 } 1282 }
1154 1283
@@ -1248,9 +1377,9 @@ static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
1248 } 1377 }
1249 1378
1250 if (usb_pipeisoc(etd->urb->pipe)) 1379 if (usb_pipeisoc(etd->urb->pipe))
1251 isoc_etd_done(hcd, etd->urb, etd_num); 1380 isoc_etd_done(hcd, etd_num);
1252 else 1381 else
1253 nonisoc_etd_done(hcd, etd->urb, etd_num); 1382 nonisoc_etd_done(hcd, etd_num);
1254 } 1383 }
1255 1384
1256 /* only enable SOF interrupt if it may be needed for the kludge */ 1385 /* only enable SOF interrupt if it may be needed for the kludge */
@@ -1718,6 +1847,7 @@ static int imx21_probe(struct platform_device *pdev)
1718 } 1847 }
1719 1848
1720 imx21 = hcd_to_imx21(hcd); 1849 imx21 = hcd_to_imx21(hcd);
1850 imx21->hcd = hcd;
1721 imx21->dev = &pdev->dev; 1851 imx21->dev = &pdev->dev;
1722 imx21->pdata = pdev->dev.platform_data; 1852 imx21->pdata = pdev->dev.platform_data;
1723 if (!imx21->pdata) 1853 if (!imx21->pdata)
diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h
index 1b0d913780a5..87b29fd971b4 100644
--- a/drivers/usb/host/imx21-hcd.h
+++ b/drivers/usb/host/imx21-hcd.h
@@ -250,6 +250,7 @@
250#define USBCTRL_USB_BYP (1 << 2) 250#define USBCTRL_USB_BYP (1 << 2)
251#define USBCTRL_HOST1_TXEN_OE (1 << 1) 251#define USBCTRL_HOST1_TXEN_OE (1 << 1)
252 252
253#define USBOTG_DMEM 0x1000
253 254
254/* Values in TD blocks */ 255/* Values in TD blocks */
255#define TD_DIR_SETUP 0 256#define TD_DIR_SETUP 0
@@ -346,8 +347,8 @@ struct td {
346 struct list_head list; 347 struct list_head list;
347 struct urb *urb; 348 struct urb *urb;
348 struct usb_host_endpoint *ep; 349 struct usb_host_endpoint *ep;
349 dma_addr_t data; 350 dma_addr_t dma_handle;
350 unsigned long buf_addr; 351 void *cpu_buffer;
351 int len; 352 int len;
352 int frame; 353 int frame;
353 int isoc_index; 354 int isoc_index;
@@ -360,6 +361,8 @@ struct etd_priv {
360 struct td *td; 361 struct td *td;
361 struct list_head queue; 362 struct list_head queue;
362 dma_addr_t dma_handle; 363 dma_addr_t dma_handle;
364 void *cpu_buffer;
365 void *bounce_buffer;
363 int alloc; 366 int alloc;
364 int len; 367 int len;
365 int dmem_size; 368 int dmem_size;
@@ -412,6 +415,7 @@ struct debug_isoc_trace {
412struct imx21 { 415struct imx21 {
413 spinlock_t lock; 416 spinlock_t lock;
414 struct device *dev; 417 struct device *dev;
418 struct usb_hcd *hcd;
415 struct mx21_usbh_platform_data *pdata; 419 struct mx21_usbh_platform_data *pdata;
416 struct list_head dmem_list; 420 struct list_head dmem_list;
417 struct list_head queue_for_etd; /* eps queued due to etd shortage */ 421 struct list_head queue_for_etd; /* eps queued due to etd shortage */