diff options
author | Pavankumar Kondeti <pkondeti@codeaurora.org> | 2011-02-18 07:13:16 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2011-02-25 14:33:56 -0500 |
commit | 0e6ca1998e4c803b0be98f97a1d1e1ea562b8964 (patch) | |
tree | bd54db0582948c78a8759f7713e6b7b97833e3a0 /drivers/usb/gadget | |
parent | 91f58ae61913b40da35e119017e70b3420c6f3a0 (diff) |
USB: gadget: Implement hardware queuing in ci13xxx_udc
Chipidea USB controller provides a means (Add dTD TripWire semaphore)
for safely adding a new dTD to an active endpoint's linked list. Make
use of this feature to improve performance. Dynamically allocate and
free dTD for supporting zero length packet termination. Honor
no_interrupt flag set by gadget drivers.
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/gadget')
-rw-r--r-- | drivers/usb/gadget/ci13xxx_udc.c | 176 | ||||
-rw-r--r-- | drivers/usb/gadget/ci13xxx_udc.h | 4 |
2 files changed, 104 insertions, 76 deletions
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c index a1c67ae1572a..da01f333a51c 100644 --- a/drivers/usb/gadget/ci13xxx_udc.c +++ b/drivers/usb/gadget/ci13xxx_udc.c | |||
@@ -435,20 +435,6 @@ static int hw_ep_get_halt(int num, int dir) | |||
435 | } | 435 | } |
436 | 436 | ||
437 | /** | 437 | /** |
438 | * hw_ep_is_primed: test if endpoint is primed (execute without interruption) | ||
439 | * @num: endpoint number | ||
440 | * @dir: endpoint direction | ||
441 | * | ||
442 | * This function returns true if endpoint primed | ||
443 | */ | ||
444 | static int hw_ep_is_primed(int num, int dir) | ||
445 | { | ||
446 | u32 reg = hw_cread(CAP_ENDPTPRIME, ~0) | hw_cread(CAP_ENDPTSTAT, ~0); | ||
447 | |||
448 | return test_bit(hw_ep_bit(num, dir), (void *)®); | ||
449 | } | ||
450 | |||
451 | /** | ||
452 | * hw_test_and_clear_setup_status: test & clear setup status (execute without | 438 | * hw_test_and_clear_setup_status: test & clear setup status (execute without |
453 | * interruption) | 439 | * interruption) |
454 | * @n: bit number (endpoint) | 440 | * @n: bit number (endpoint) |
@@ -472,10 +458,6 @@ static int hw_ep_prime(int num, int dir, int is_ctrl) | |||
472 | { | 458 | { |
473 | int n = hw_ep_bit(num, dir); | 459 | int n = hw_ep_bit(num, dir); |
474 | 460 | ||
475 | /* the caller should flush first */ | ||
476 | if (hw_ep_is_primed(num, dir)) | ||
477 | return -EBUSY; | ||
478 | |||
479 | if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num))) | 461 | if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num))) |
480 | return -EAGAIN; | 462 | return -EAGAIN; |
481 | 463 | ||
@@ -1434,6 +1416,8 @@ static inline u8 _usb_addr(struct ci13xxx_ep *ep) | |||
1434 | static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) | 1416 | static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) |
1435 | { | 1417 | { |
1436 | unsigned i; | 1418 | unsigned i; |
1419 | int ret = 0; | ||
1420 | unsigned length = mReq->req.length; | ||
1437 | 1421 | ||
1438 | trace("%p, %p", mEp, mReq); | 1422 | trace("%p, %p", mEp, mReq); |
1439 | 1423 | ||
@@ -1441,53 +1425,91 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) | |||
1441 | if (mReq->req.status == -EALREADY) | 1425 | if (mReq->req.status == -EALREADY) |
1442 | return -EALREADY; | 1426 | return -EALREADY; |
1443 | 1427 | ||
1444 | if (hw_ep_is_primed(mEp->num, mEp->dir)) | ||
1445 | return -EBUSY; | ||
1446 | |||
1447 | mReq->req.status = -EALREADY; | 1428 | mReq->req.status = -EALREADY; |
1448 | 1429 | if (length && !mReq->req.dma) { | |
1449 | if (mReq->req.length && !mReq->req.dma) { | ||
1450 | mReq->req.dma = \ | 1430 | mReq->req.dma = \ |
1451 | dma_map_single(mEp->device, mReq->req.buf, | 1431 | dma_map_single(mEp->device, mReq->req.buf, |
1452 | mReq->req.length, mEp->dir ? | 1432 | length, mEp->dir ? DMA_TO_DEVICE : |
1453 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | 1433 | DMA_FROM_DEVICE); |
1454 | if (mReq->req.dma == 0) | 1434 | if (mReq->req.dma == 0) |
1455 | return -ENOMEM; | 1435 | return -ENOMEM; |
1456 | 1436 | ||
1457 | mReq->map = 1; | 1437 | mReq->map = 1; |
1458 | } | 1438 | } |
1459 | 1439 | ||
1440 | if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) { | ||
1441 | mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC, | ||
1442 | &mReq->zdma); | ||
1443 | if (mReq->zptr == NULL) { | ||
1444 | if (mReq->map) { | ||
1445 | dma_unmap_single(mEp->device, mReq->req.dma, | ||
1446 | length, mEp->dir ? DMA_TO_DEVICE : | ||
1447 | DMA_FROM_DEVICE); | ||
1448 | mReq->req.dma = 0; | ||
1449 | mReq->map = 0; | ||
1450 | } | ||
1451 | return -ENOMEM; | ||
1452 | } | ||
1453 | memset(mReq->zptr, 0, sizeof(*mReq->zptr)); | ||
1454 | mReq->zptr->next = TD_TERMINATE; | ||
1455 | mReq->zptr->token = TD_STATUS_ACTIVE; | ||
1456 | if (!mReq->req.no_interrupt) | ||
1457 | mReq->zptr->token |= TD_IOC; | ||
1458 | } | ||
1460 | /* | 1459 | /* |
1461 | * TD configuration | 1460 | * TD configuration |
1462 | * TODO - handle requests which spawns into several TDs | 1461 | * TODO - handle requests which spawns into several TDs |
1463 | */ | 1462 | */ |
1464 | memset(mReq->ptr, 0, sizeof(*mReq->ptr)); | 1463 | memset(mReq->ptr, 0, sizeof(*mReq->ptr)); |
1465 | mReq->ptr->next |= TD_TERMINATE; | 1464 | mReq->ptr->token = length << ffs_nr(TD_TOTAL_BYTES); |
1466 | mReq->ptr->token = mReq->req.length << ffs_nr(TD_TOTAL_BYTES); | ||
1467 | mReq->ptr->token &= TD_TOTAL_BYTES; | 1465 | mReq->ptr->token &= TD_TOTAL_BYTES; |
1468 | mReq->ptr->token |= TD_IOC; | ||
1469 | mReq->ptr->token |= TD_STATUS_ACTIVE; | 1466 | mReq->ptr->token |= TD_STATUS_ACTIVE; |
1467 | if (mReq->zptr) { | ||
1468 | mReq->ptr->next = mReq->zdma; | ||
1469 | } else { | ||
1470 | mReq->ptr->next = TD_TERMINATE; | ||
1471 | if (!mReq->req.no_interrupt) | ||
1472 | mReq->ptr->token |= TD_IOC; | ||
1473 | } | ||
1470 | mReq->ptr->page[0] = mReq->req.dma; | 1474 | mReq->ptr->page[0] = mReq->req.dma; |
1471 | for (i = 1; i < 5; i++) | 1475 | for (i = 1; i < 5; i++) |
1472 | mReq->ptr->page[i] = | 1476 | mReq->ptr->page[i] = |
1473 | (mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK; | 1477 | (mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK; |
1474 | 1478 | ||
1475 | /* | 1479 | if (!list_empty(&mEp->qh.queue)) { |
1476 | * QH configuration | 1480 | struct ci13xxx_req *mReqPrev; |
1477 | * At this point it's guaranteed exclusive access to qhead | 1481 | int n = hw_ep_bit(mEp->num, mEp->dir); |
1478 | * (endpt is not primed) so it's no need to use tripwire | 1482 | int tmp_stat; |
1479 | */ | 1483 | |
1484 | mReqPrev = list_entry(mEp->qh.queue.prev, | ||
1485 | struct ci13xxx_req, queue); | ||
1486 | if (mReqPrev->zptr) | ||
1487 | mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK; | ||
1488 | else | ||
1489 | mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK; | ||
1490 | wmb(); | ||
1491 | if (hw_cread(CAP_ENDPTPRIME, BIT(n))) | ||
1492 | goto done; | ||
1493 | do { | ||
1494 | hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW); | ||
1495 | tmp_stat = hw_cread(CAP_ENDPTSTAT, BIT(n)); | ||
1496 | } while (!hw_cread(CAP_USBCMD, USBCMD_ATDTW)); | ||
1497 | hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, 0); | ||
1498 | if (tmp_stat) | ||
1499 | goto done; | ||
1500 | } | ||
1501 | |||
1502 | /* QH configuration */ | ||
1480 | mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */ | 1503 | mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */ |
1481 | mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */ | 1504 | mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */ |
1482 | if (mReq->req.zero == 0) | 1505 | mEp->qh.ptr->cap |= QH_ZLT; |
1483 | mEp->qh.ptr->cap |= QH_ZLT; | ||
1484 | else | ||
1485 | mEp->qh.ptr->cap &= ~QH_ZLT; | ||
1486 | 1506 | ||
1487 | wmb(); /* synchronize before ep prime */ | 1507 | wmb(); /* synchronize before ep prime */ |
1488 | 1508 | ||
1489 | return hw_ep_prime(mEp->num, mEp->dir, | 1509 | ret = hw_ep_prime(mEp->num, mEp->dir, |
1490 | mEp->type == USB_ENDPOINT_XFER_CONTROL); | 1510 | mEp->type == USB_ENDPOINT_XFER_CONTROL); |
1511 | done: | ||
1512 | return ret; | ||
1491 | } | 1513 | } |
1492 | 1514 | ||
1493 | /** | 1515 | /** |
@@ -1504,8 +1526,15 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) | |||
1504 | if (mReq->req.status != -EALREADY) | 1526 | if (mReq->req.status != -EALREADY) |
1505 | return -EINVAL; | 1527 | return -EINVAL; |
1506 | 1528 | ||
1507 | if (hw_ep_is_primed(mEp->num, mEp->dir)) | 1529 | if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0) |
1508 | hw_ep_flush(mEp->num, mEp->dir); | 1530 | return -EBUSY; |
1531 | |||
1532 | if (mReq->zptr) { | ||
1533 | if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0) | ||
1534 | return -EBUSY; | ||
1535 | dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma); | ||
1536 | mReq->zptr = NULL; | ||
1537 | } | ||
1509 | 1538 | ||
1510 | mReq->req.status = 0; | 1539 | mReq->req.status = 0; |
1511 | 1540 | ||
@@ -1517,9 +1546,7 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) | |||
1517 | } | 1546 | } |
1518 | 1547 | ||
1519 | mReq->req.status = mReq->ptr->token & TD_STATUS; | 1548 | mReq->req.status = mReq->ptr->token & TD_STATUS; |
1520 | if ((TD_STATUS_ACTIVE & mReq->req.status) != 0) | 1549 | if ((TD_STATUS_HALTED & mReq->req.status) != 0) |
1521 | mReq->req.status = -ECONNRESET; | ||
1522 | else if ((TD_STATUS_HALTED & mReq->req.status) != 0) | ||
1523 | mReq->req.status = -1; | 1550 | mReq->req.status = -1; |
1524 | else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0) | 1551 | else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0) |
1525 | mReq->req.status = -1; | 1552 | mReq->req.status = -1; |
@@ -1783,7 +1810,7 @@ static int isr_tr_complete_low(struct ci13xxx_ep *mEp) | |||
1783 | __releases(mEp->lock) | 1810 | __releases(mEp->lock) |
1784 | __acquires(mEp->lock) | 1811 | __acquires(mEp->lock) |
1785 | { | 1812 | { |
1786 | struct ci13xxx_req *mReq; | 1813 | struct ci13xxx_req *mReq, *mReqTemp; |
1787 | int retval; | 1814 | int retval; |
1788 | 1815 | ||
1789 | trace("%p", mEp); | 1816 | trace("%p", mEp); |
@@ -1791,34 +1818,25 @@ __acquires(mEp->lock) | |||
1791 | if (list_empty(&mEp->qh.queue)) | 1818 | if (list_empty(&mEp->qh.queue)) |
1792 | return -EINVAL; | 1819 | return -EINVAL; |
1793 | 1820 | ||
1794 | /* pop oldest request */ | 1821 | list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue, |
1795 | mReq = list_entry(mEp->qh.queue.next, | 1822 | queue) { |
1796 | struct ci13xxx_req, queue); | 1823 | retval = _hardware_dequeue(mEp, mReq); |
1797 | list_del_init(&mReq->queue); | 1824 | if (retval < 0) |
1798 | 1825 | break; | |
1799 | retval = _hardware_dequeue(mEp, mReq); | 1826 | list_del_init(&mReq->queue); |
1800 | if (retval < 0) { | 1827 | dbg_done(_usb_addr(mEp), mReq->ptr->token, retval); |
1801 | dbg_event(_usb_addr(mEp), "DONE", retval); | 1828 | if (mReq->req.complete != NULL) { |
1802 | goto done; | 1829 | spin_unlock(mEp->lock); |
1803 | } | 1830 | mReq->req.complete(&mEp->ep, &mReq->req); |
1804 | 1831 | spin_lock(mEp->lock); | |
1805 | dbg_done(_usb_addr(mEp), mReq->ptr->token, retval); | 1832 | } |
1806 | |||
1807 | if (!list_empty(&mEp->qh.queue)) { | ||
1808 | struct ci13xxx_req* mReqEnq; | ||
1809 | |||
1810 | mReqEnq = list_entry(mEp->qh.queue.next, | ||
1811 | struct ci13xxx_req, queue); | ||
1812 | _hardware_enqueue(mEp, mReqEnq); | ||
1813 | } | 1833 | } |
1814 | 1834 | ||
1815 | if (mReq->req.complete != NULL) { | 1835 | if (retval == EBUSY) |
1816 | spin_unlock(mEp->lock); | 1836 | retval = 0; |
1817 | mReq->req.complete(&mEp->ep, &mReq->req); | 1837 | if (retval < 0) |
1818 | spin_lock(mEp->lock); | 1838 | dbg_event(_usb_addr(mEp), "DONE", retval); |
1819 | } | ||
1820 | 1839 | ||
1821 | done: | ||
1822 | return retval; | 1840 | return retval; |
1823 | } | 1841 | } |
1824 | 1842 | ||
@@ -2178,15 +2196,15 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req, | |||
2178 | /* push request */ | 2196 | /* push request */ |
2179 | mReq->req.status = -EINPROGRESS; | 2197 | mReq->req.status = -EINPROGRESS; |
2180 | mReq->req.actual = 0; | 2198 | mReq->req.actual = 0; |
2181 | list_add_tail(&mReq->queue, &mEp->qh.queue); | ||
2182 | 2199 | ||
2183 | if (list_is_singular(&mEp->qh.queue)) | 2200 | retval = _hardware_enqueue(mEp, mReq); |
2184 | retval = _hardware_enqueue(mEp, mReq); | ||
2185 | 2201 | ||
2186 | if (retval == -EALREADY) { | 2202 | if (retval == -EALREADY) { |
2187 | dbg_event(_usb_addr(mEp), "QUEUE", retval); | 2203 | dbg_event(_usb_addr(mEp), "QUEUE", retval); |
2188 | retval = 0; | 2204 | retval = 0; |
2189 | } | 2205 | } |
2206 | if (!retval) | ||
2207 | list_add_tail(&mReq->queue, &mEp->qh.queue); | ||
2190 | 2208 | ||
2191 | done: | 2209 | done: |
2192 | spin_unlock_irqrestore(mEp->lock, flags); | 2210 | spin_unlock_irqrestore(mEp->lock, flags); |
@@ -2206,19 +2224,25 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) | |||
2206 | 2224 | ||
2207 | trace("%p, %p", ep, req); | 2225 | trace("%p, %p", ep, req); |
2208 | 2226 | ||
2209 | if (ep == NULL || req == NULL || mEp->desc == NULL || | 2227 | if (ep == NULL || req == NULL || mReq->req.status != -EALREADY || |
2210 | list_empty(&mReq->queue) || list_empty(&mEp->qh.queue)) | 2228 | mEp->desc == NULL || list_empty(&mReq->queue) || |
2229 | list_empty(&mEp->qh.queue)) | ||
2211 | return -EINVAL; | 2230 | return -EINVAL; |
2212 | 2231 | ||
2213 | spin_lock_irqsave(mEp->lock, flags); | 2232 | spin_lock_irqsave(mEp->lock, flags); |
2214 | 2233 | ||
2215 | dbg_event(_usb_addr(mEp), "DEQUEUE", 0); | 2234 | dbg_event(_usb_addr(mEp), "DEQUEUE", 0); |
2216 | 2235 | ||
2217 | if (mReq->req.status == -EALREADY) | 2236 | hw_ep_flush(mEp->num, mEp->dir); |
2218 | _hardware_dequeue(mEp, mReq); | ||
2219 | 2237 | ||
2220 | /* pop request */ | 2238 | /* pop request */ |
2221 | list_del_init(&mReq->queue); | 2239 | list_del_init(&mReq->queue); |
2240 | if (mReq->map) { | ||
2241 | dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length, | ||
2242 | mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
2243 | mReq->req.dma = 0; | ||
2244 | mReq->map = 0; | ||
2245 | } | ||
2222 | req->status = -ECONNRESET; | 2246 | req->status = -ECONNRESET; |
2223 | 2247 | ||
2224 | if (mReq->req.complete != NULL) { | 2248 | if (mReq->req.complete != NULL) { |
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h index a2492b65f98c..3fad3adeacc8 100644 --- a/drivers/usb/gadget/ci13xxx_udc.h +++ b/drivers/usb/gadget/ci13xxx_udc.h | |||
@@ -33,6 +33,7 @@ struct ci13xxx_td { | |||
33 | /* 0 */ | 33 | /* 0 */ |
34 | u32 next; | 34 | u32 next; |
35 | #define TD_TERMINATE BIT(0) | 35 | #define TD_TERMINATE BIT(0) |
36 | #define TD_ADDR_MASK (0xFFFFFFEUL << 5) | ||
36 | /* 1 */ | 37 | /* 1 */ |
37 | u32 token; | 38 | u32 token; |
38 | #define TD_STATUS (0x00FFUL << 0) | 39 | #define TD_STATUS (0x00FFUL << 0) |
@@ -74,6 +75,8 @@ struct ci13xxx_req { | |||
74 | struct list_head queue; | 75 | struct list_head queue; |
75 | struct ci13xxx_td *ptr; | 76 | struct ci13xxx_td *ptr; |
76 | dma_addr_t dma; | 77 | dma_addr_t dma; |
78 | struct ci13xxx_td *zptr; | ||
79 | dma_addr_t zdma; | ||
77 | }; | 80 | }; |
78 | 81 | ||
79 | /* Extension of usb_ep */ | 82 | /* Extension of usb_ep */ |
@@ -152,6 +155,7 @@ struct ci13xxx { | |||
152 | #define USBCMD_RS BIT(0) | 155 | #define USBCMD_RS BIT(0) |
153 | #define USBCMD_RST BIT(1) | 156 | #define USBCMD_RST BIT(1) |
154 | #define USBCMD_SUTW BIT(13) | 157 | #define USBCMD_SUTW BIT(13) |
158 | #define USBCMD_ATDTW BIT(14) | ||
155 | 159 | ||
156 | /* USBSTS & USBINTR */ | 160 | /* USBSTS & USBINTR */ |
157 | #define USBi_UI BIT(0) | 161 | #define USBi_UI BIT(0) |