aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSvetoslav Neykov <svetoslav@neykov.name>2013-03-30 06:54:03 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-03-30 11:20:47 -0400
commit938d323f14480ca8dcb9dbbe48add35a09246d09 (patch)
tree7587f55fc821abb8940590430270f64f2612e94d
parentefccca4ff59e672a6b50e99f0f4cb61b60d09ec8 (diff)
usb: chipidea: big-endian support
Convert between big-endian and little-endian format when accessing the usb controller structures which are little-endian by specification. Fix cases where the little-endian memory layout is taken for granted. The patch doesn't have any effect on the already supported little-endian architectures. Signed-off-by: Svetoslav Neykov <svetoslav@neykov.name> [Alex: minor cosmetic fixes] Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/usb/chipidea/core.c2
-rw-r--r--drivers/usb/chipidea/udc.c59
2 files changed, 32 insertions, 29 deletions
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 114d4c43abc6..450107e5f657 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -178,7 +178,7 @@ static int hw_device_init(struct ci13xxx *ci, void __iomem *base)
178 178
179 ci->hw_bank.cap = ci->hw_bank.abs; 179 ci->hw_bank.cap = ci->hw_bank.abs;
180 ci->hw_bank.cap += ci->platdata->capoffset; 180 ci->hw_bank.cap += ci->platdata->capoffset;
181 ci->hw_bank.op = ci->hw_bank.cap + ioread8(ci->hw_bank.cap); 181 ci->hw_bank.op = ci->hw_bank.cap + (ioread32(ci->hw_bank.cap) & 0xff);
182 182
183 hw_alloc_regmap(ci, false); 183 hw_alloc_regmap(ci, false);
184 reg = hw_read(ci, CAP_HCCPARAMS, HCCPARAMS_LEN) >> 184 reg = hw_read(ci, CAP_HCCPARAMS, HCCPARAMS_LEN) >>
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 32e6c99b8b98..ff393e1ecf4a 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -404,10 +404,10 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
404 return -ENOMEM; 404 return -ENOMEM;
405 405
406 memset(mReq->zptr, 0, sizeof(*mReq->zptr)); 406 memset(mReq->zptr, 0, sizeof(*mReq->zptr));
407 mReq->zptr->next = TD_TERMINATE; 407 mReq->zptr->next = cpu_to_le32(TD_TERMINATE);
408 mReq->zptr->token = TD_STATUS_ACTIVE; 408 mReq->zptr->token = cpu_to_le32(TD_STATUS_ACTIVE);
409 if (!mReq->req.no_interrupt) 409 if (!mReq->req.no_interrupt)
410 mReq->zptr->token |= TD_IOC; 410 mReq->zptr->token |= cpu_to_le32(TD_IOC);
411 } 411 }
412 ret = usb_gadget_map_request(&ci->gadget, &mReq->req, mEp->dir); 412 ret = usb_gadget_map_request(&ci->gadget, &mReq->req, mEp->dir);
413 if (ret) 413 if (ret)
@@ -418,32 +418,35 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
418 * TODO - handle requests which spawns into several TDs 418 * TODO - handle requests which spawns into several TDs
419 */ 419 */
420 memset(mReq->ptr, 0, sizeof(*mReq->ptr)); 420 memset(mReq->ptr, 0, sizeof(*mReq->ptr));
421 mReq->ptr->token = length << __ffs(TD_TOTAL_BYTES); 421 mReq->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
422 mReq->ptr->token &= TD_TOTAL_BYTES; 422 mReq->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
423 mReq->ptr->token |= TD_STATUS_ACTIVE; 423 mReq->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
424 if (mReq->zptr) { 424 if (mReq->zptr) {
425 mReq->ptr->next = mReq->zdma; 425 mReq->ptr->next = cpu_to_le32(mReq->zdma);
426 } else { 426 } else {
427 mReq->ptr->next = TD_TERMINATE; 427 mReq->ptr->next = cpu_to_le32(TD_TERMINATE);
428 if (!mReq->req.no_interrupt) 428 if (!mReq->req.no_interrupt)
429 mReq->ptr->token |= TD_IOC; 429 mReq->ptr->token |= cpu_to_le32(TD_IOC);
430 }
431 mReq->ptr->page[0] = cpu_to_le32(mReq->req.dma);
432 for (i = 1; i < 5; i++) {
433 u32 page = mReq->req.dma + i * CI13XXX_PAGE_SIZE;
434 page &= ~TD_RESERVED_MASK;
435 mReq->ptr->page[i] = cpu_to_le32(page);
430 } 436 }
431 mReq->ptr->page[0] = mReq->req.dma;
432 for (i = 1; i < 5; i++)
433 mReq->ptr->page[i] =
434 (mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;
435 437
436 if (!list_empty(&mEp->qh.queue)) { 438 if (!list_empty(&mEp->qh.queue)) {
437 struct ci13xxx_req *mReqPrev; 439 struct ci13xxx_req *mReqPrev;
438 int n = hw_ep_bit(mEp->num, mEp->dir); 440 int n = hw_ep_bit(mEp->num, mEp->dir);
439 int tmp_stat; 441 int tmp_stat;
442 u32 next = mReq->dma & TD_ADDR_MASK;
440 443
441 mReqPrev = list_entry(mEp->qh.queue.prev, 444 mReqPrev = list_entry(mEp->qh.queue.prev,
442 struct ci13xxx_req, queue); 445 struct ci13xxx_req, queue);
443 if (mReqPrev->zptr) 446 if (mReqPrev->zptr)
444 mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK; 447 mReqPrev->zptr->next = cpu_to_le32(next);
445 else 448 else
446 mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK; 449 mReqPrev->ptr->next = cpu_to_le32(next);
447 wmb(); 450 wmb();
448 if (hw_read(ci, OP_ENDPTPRIME, BIT(n))) 451 if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
449 goto done; 452 goto done;
@@ -457,9 +460,9 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
457 } 460 }
458 461
459 /* QH configuration */ 462 /* QH configuration */
460 mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */ 463 mEp->qh.ptr->td.next = cpu_to_le32(mReq->dma); /* TERMINATE = 0 */
461 mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */ 464 mEp->qh.ptr->td.token &= cpu_to_le32(~TD_STATUS); /* clear status */
462 mEp->qh.ptr->cap |= QH_ZLT; 465 mEp->qh.ptr->cap |= cpu_to_le32(QH_ZLT);
463 466
464 wmb(); /* synchronize before ep prime */ 467 wmb(); /* synchronize before ep prime */
465 468
@@ -481,11 +484,11 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
481 if (mReq->req.status != -EALREADY) 484 if (mReq->req.status != -EALREADY)
482 return -EINVAL; 485 return -EINVAL;
483 486
484 if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0) 487 if ((cpu_to_le32(TD_STATUS_ACTIVE) & mReq->ptr->token) != 0)
485 return -EBUSY; 488 return -EBUSY;
486 489
487 if (mReq->zptr) { 490 if (mReq->zptr) {
488 if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0) 491 if ((cpu_to_le32(TD_STATUS_ACTIVE) & mReq->zptr->token) != 0)
489 return -EBUSY; 492 return -EBUSY;
490 dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma); 493 dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
491 mReq->zptr = NULL; 494 mReq->zptr = NULL;
@@ -495,7 +498,7 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
495 498
496 usb_gadget_unmap_request(&mEp->ci->gadget, &mReq->req, mEp->dir); 499 usb_gadget_unmap_request(&mEp->ci->gadget, &mReq->req, mEp->dir);
497 500
498 mReq->req.status = mReq->ptr->token & TD_STATUS; 501 mReq->req.status = le32_to_cpu(mReq->ptr->token) & TD_STATUS;
499 if ((TD_STATUS_HALTED & mReq->req.status) != 0) 502 if ((TD_STATUS_HALTED & mReq->req.status) != 0)
500 mReq->req.status = -1; 503 mReq->req.status = -1;
501 else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0) 504 else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
@@ -503,7 +506,7 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
503 else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0) 506 else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
504 mReq->req.status = -1; 507 mReq->req.status = -1;
505 508
506 mReq->req.actual = mReq->ptr->token & TD_TOTAL_BYTES; 509 mReq->req.actual = le32_to_cpu(mReq->ptr->token) & TD_TOTAL_BYTES;
507 mReq->req.actual >>= __ffs(TD_TOTAL_BYTES); 510 mReq->req.actual >>= __ffs(TD_TOTAL_BYTES);
508 mReq->req.actual = mReq->req.length - mReq->req.actual; 511 mReq->req.actual = mReq->req.length - mReq->req.actual;
509 mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual; 512 mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual;
@@ -1004,15 +1007,15 @@ static int ep_enable(struct usb_ep *ep,
1004 mEp->qh.ptr->cap = 0; 1007 mEp->qh.ptr->cap = 0;
1005 1008
1006 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) 1009 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
1007 mEp->qh.ptr->cap |= QH_IOS; 1010 mEp->qh.ptr->cap |= cpu_to_le32(QH_IOS);
1008 else if (mEp->type == USB_ENDPOINT_XFER_ISOC) 1011 else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
1009 mEp->qh.ptr->cap &= ~QH_MULT; 1012 mEp->qh.ptr->cap &= cpu_to_le32(~QH_MULT);
1010 else 1013 else
1011 mEp->qh.ptr->cap &= ~QH_ZLT; 1014 mEp->qh.ptr->cap &= cpu_to_le32(~QH_ZLT);
1012 1015
1013 mEp->qh.ptr->cap |= 1016 mEp->qh.ptr->cap |= cpu_to_le32((mEp->ep.maxpacket << __ffs(QH_MAX_PKT))
1014 (mEp->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT; 1017 & QH_MAX_PKT);
1015 mEp->qh.ptr->td.next |= TD_TERMINATE; /* needed? */ 1018 mEp->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE); /* needed? */
1016 1019
1017 /* 1020 /*
1018 * Enable endpoints in the HW other than ep0 as ep0 1021 * Enable endpoints in the HW other than ep0 as ep0