aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPavankumar Kondeti <pkondeti@codeaurora.org>2011-01-10 22:49:22 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2011-01-22 21:49:24 -0500
commitca9cfea09fc5802074f79d086547c6363ddc894b (patch)
tree2f7efabdb50682043d7b236d9a0740c982a47593
parent49d3df53a80deed2251b91f50ae9e1c5caf7ded7 (diff)
USB: gadget: Fix endpoint representation in ci13xxx_udc
Fix a bug where only half the number of endpoints supported by the hardware are exposed to gadget. If DEN filed in the DCCPARAMS register has 'N' then 'N' IN endpoints and 'N" OUT endpoints can be supported. But only 'N' bidirectional endpoints are added to the gadget ep_list. This patch also ensures that the data and handshake transactions of previous setup packet are flushed upon a new setup packet arrival on ep0. Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c264
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.h9
2 files changed, 142 insertions, 131 deletions
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index b0b90621dc1b..a1c67ae1572a 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -76,10 +76,21 @@ static DEFINE_SPINLOCK(udc_lock);
76 76
77/* control endpoint description */ 77/* control endpoint description */
78static const struct usb_endpoint_descriptor 78static const struct usb_endpoint_descriptor
79ctrl_endpt_desc = { 79ctrl_endpt_out_desc = {
80 .bLength = USB_DT_ENDPOINT_SIZE, 80 .bLength = USB_DT_ENDPOINT_SIZE,
81 .bDescriptorType = USB_DT_ENDPOINT, 81 .bDescriptorType = USB_DT_ENDPOINT,
82 82
83 .bEndpointAddress = USB_DIR_OUT,
84 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
85 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
86};
87
88static const struct usb_endpoint_descriptor
89ctrl_endpt_in_desc = {
90 .bLength = USB_DT_ENDPOINT_SIZE,
91 .bDescriptorType = USB_DT_ENDPOINT,
92
93 .bEndpointAddress = USB_DIR_IN,
83 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 94 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
84 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX), 95 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
85}; 96};
@@ -265,10 +276,10 @@ static int hw_device_init(void __iomem *base)
265 hw_bank.size /= sizeof(u32); 276 hw_bank.size /= sizeof(u32);
266 277
267 reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN); 278 reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
268 if (reg == 0 || reg > ENDPT_MAX) 279 hw_ep_max = reg * 2; /* cache hw ENDPT_MAX */
269 return -ENODEV;
270 280
271 hw_ep_max = reg; /* cache hw ENDPT_MAX */ 281 if (hw_ep_max == 0 || hw_ep_max > ENDPT_MAX)
282 return -ENODEV;
272 283
273 /* setup lock mode ? */ 284 /* setup lock mode ? */
274 285
@@ -1197,16 +1208,17 @@ static ssize_t show_qheads(struct device *dev, struct device_attribute *attr,
1197 } 1208 }
1198 1209
1199 spin_lock_irqsave(udc->lock, flags); 1210 spin_lock_irqsave(udc->lock, flags);
1200 for (i = 0; i < hw_ep_max; i++) { 1211 for (i = 0; i < hw_ep_max/2; i++) {
1201 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i]; 1212 struct ci13xxx_ep *mEpRx = &udc->ci13xxx_ep[i];
1213 struct ci13xxx_ep *mEpTx = &udc->ci13xxx_ep[i + hw_ep_max/2];
1202 n += scnprintf(buf + n, PAGE_SIZE - n, 1214 n += scnprintf(buf + n, PAGE_SIZE - n,
1203 "EP=%02i: RX=%08X TX=%08X\n", 1215 "EP=%02i: RX=%08X TX=%08X\n",
1204 i, (u32)mEp->qh[RX].dma, (u32)mEp->qh[TX].dma); 1216 i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma);
1205 for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) { 1217 for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) {
1206 n += scnprintf(buf + n, PAGE_SIZE - n, 1218 n += scnprintf(buf + n, PAGE_SIZE - n,
1207 " %04X: %08X %08X\n", j, 1219 " %04X: %08X %08X\n", j,
1208 *((u32 *)mEp->qh[RX].ptr + j), 1220 *((u32 *)mEpRx->qh.ptr + j),
1209 *((u32 *)mEp->qh[TX].ptr + j)); 1221 *((u32 *)mEpTx->qh.ptr + j));
1210 } 1222 }
1211 } 1223 }
1212 spin_unlock_irqrestore(udc->lock, flags); 1224 spin_unlock_irqrestore(udc->lock, flags);
@@ -1293,7 +1305,7 @@ static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
1293 unsigned long flags; 1305 unsigned long flags;
1294 struct list_head *ptr = NULL; 1306 struct list_head *ptr = NULL;
1295 struct ci13xxx_req *req = NULL; 1307 struct ci13xxx_req *req = NULL;
1296 unsigned i, j, k, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32); 1308 unsigned i, j, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
1297 1309
1298 dbg_trace("[%s] %p\n", __func__, buf); 1310 dbg_trace("[%s] %p\n", __func__, buf);
1299 if (attr == NULL || buf == NULL) { 1311 if (attr == NULL || buf == NULL) {
@@ -1303,22 +1315,20 @@ static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
1303 1315
1304 spin_lock_irqsave(udc->lock, flags); 1316 spin_lock_irqsave(udc->lock, flags);
1305 for (i = 0; i < hw_ep_max; i++) 1317 for (i = 0; i < hw_ep_max; i++)
1306 for (k = RX; k <= TX; k++) 1318 list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue)
1307 list_for_each(ptr, &udc->ci13xxx_ep[i].qh[k].queue) 1319 {
1308 { 1320 req = list_entry(ptr, struct ci13xxx_req, queue);
1309 req = list_entry(ptr,
1310 struct ci13xxx_req, queue);
1311 1321
1322 n += scnprintf(buf + n, PAGE_SIZE - n,
1323 "EP=%02i: TD=%08X %s\n",
1324 i % hw_ep_max/2, (u32)req->dma,
1325 ((i < hw_ep_max/2) ? "RX" : "TX"));
1326
1327 for (j = 0; j < qSize; j++)
1312 n += scnprintf(buf + n, PAGE_SIZE - n, 1328 n += scnprintf(buf + n, PAGE_SIZE - n,
1313 "EP=%02i: TD=%08X %s\n", 1329 " %04X: %08X\n", j,
1314 i, (u32)req->dma, 1330 *((u32 *)req->ptr + j));
1315 ((k == RX) ? "RX" : "TX")); 1331 }
1316
1317 for (j = 0; j < qSize; j++)
1318 n += scnprintf(buf + n, PAGE_SIZE - n,
1319 " %04X: %08X\n", j,
1320 *((u32 *)req->ptr + j));
1321 }
1322 spin_unlock_irqrestore(udc->lock, flags); 1332 spin_unlock_irqrestore(udc->lock, flags);
1323 1333
1324 return n; 1334 return n;
@@ -1467,12 +1477,12 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
1467 * At this point it's guaranteed exclusive access to qhead 1477 * At this point it's guaranteed exclusive access to qhead
1468 * (endpt is not primed) so it's no need to use tripwire 1478 * (endpt is not primed) so it's no need to use tripwire
1469 */ 1479 */
1470 mEp->qh[mEp->dir].ptr->td.next = mReq->dma; /* TERMINATE = 0 */ 1480 mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */
1471 mEp->qh[mEp->dir].ptr->td.token &= ~TD_STATUS; /* clear status */ 1481 mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */
1472 if (mReq->req.zero == 0) 1482 if (mReq->req.zero == 0)
1473 mEp->qh[mEp->dir].ptr->cap |= QH_ZLT; 1483 mEp->qh.ptr->cap |= QH_ZLT;
1474 else 1484 else
1475 mEp->qh[mEp->dir].ptr->cap &= ~QH_ZLT; 1485 mEp->qh.ptr->cap &= ~QH_ZLT;
1476 1486
1477 wmb(); /* synchronize before ep prime */ 1487 wmb(); /* synchronize before ep prime */
1478 1488
@@ -1542,11 +1552,11 @@ __acquires(mEp->lock)
1542 1552
1543 hw_ep_flush(mEp->num, mEp->dir); 1553 hw_ep_flush(mEp->num, mEp->dir);
1544 1554
1545 while (!list_empty(&mEp->qh[mEp->dir].queue)) { 1555 while (!list_empty(&mEp->qh.queue)) {
1546 1556
1547 /* pop oldest request */ 1557 /* pop oldest request */
1548 struct ci13xxx_req *mReq = \ 1558 struct ci13xxx_req *mReq = \
1549 list_entry(mEp->qh[mEp->dir].queue.next, 1559 list_entry(mEp->qh.queue.next,
1550 struct ci13xxx_req, queue); 1560 struct ci13xxx_req, queue);
1551 list_del_init(&mReq->queue); 1561 list_del_init(&mReq->queue);
1552 mReq->req.status = -ESHUTDOWN; 1562 mReq->req.status = -ESHUTDOWN;
@@ -1571,8 +1581,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
1571{ 1581{
1572 struct usb_ep *ep; 1582 struct usb_ep *ep;
1573 struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget); 1583 struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
1574 struct ci13xxx_ep *mEp = container_of(gadget->ep0,
1575 struct ci13xxx_ep, ep);
1576 1584
1577 trace("%p", gadget); 1585 trace("%p", gadget);
1578 1586
@@ -1583,7 +1591,8 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
1583 gadget_for_each_ep(ep, gadget) { 1591 gadget_for_each_ep(ep, gadget) {
1584 usb_ep_fifo_flush(ep); 1592 usb_ep_fifo_flush(ep);
1585 } 1593 }
1586 usb_ep_fifo_flush(gadget->ep0); 1594 usb_ep_fifo_flush(&udc->ep0out.ep);
1595 usb_ep_fifo_flush(&udc->ep0in.ep);
1587 1596
1588 udc->driver->disconnect(gadget); 1597 udc->driver->disconnect(gadget);
1589 1598
@@ -1591,11 +1600,12 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
1591 gadget_for_each_ep(ep, gadget) { 1600 gadget_for_each_ep(ep, gadget) {
1592 usb_ep_disable(ep); 1601 usb_ep_disable(ep);
1593 } 1602 }
1594 usb_ep_disable(gadget->ep0); 1603 usb_ep_disable(&udc->ep0out.ep);
1604 usb_ep_disable(&udc->ep0in.ep);
1595 1605
1596 if (mEp->status != NULL) { 1606 if (udc->status != NULL) {
1597 usb_ep_free_request(gadget->ep0, mEp->status); 1607 usb_ep_free_request(&udc->ep0in.ep, udc->status);
1598 mEp->status = NULL; 1608 udc->status = NULL;
1599 } 1609 }
1600 1610
1601 return 0; 1611 return 0;
@@ -1614,7 +1624,6 @@ static void isr_reset_handler(struct ci13xxx *udc)
1614__releases(udc->lock) 1624__releases(udc->lock)
1615__acquires(udc->lock) 1625__acquires(udc->lock)
1616{ 1626{
1617 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[0];
1618 int retval; 1627 int retval;
1619 1628
1620 trace("%p", udc); 1629 trace("%p", udc);
@@ -1635,11 +1644,15 @@ __acquires(udc->lock)
1635 if (retval) 1644 if (retval)
1636 goto done; 1645 goto done;
1637 1646
1638 retval = usb_ep_enable(&mEp->ep, &ctrl_endpt_desc); 1647 retval = usb_ep_enable(&udc->ep0out.ep, &ctrl_endpt_out_desc);
1648 if (retval)
1649 goto done;
1650
1651 retval = usb_ep_enable(&udc->ep0in.ep, &ctrl_endpt_in_desc);
1639 if (!retval) { 1652 if (!retval) {
1640 mEp->status = usb_ep_alloc_request(&mEp->ep, GFP_ATOMIC); 1653 udc->status = usb_ep_alloc_request(&udc->ep0in.ep, GFP_ATOMIC);
1641 if (mEp->status == NULL) { 1654 if (udc->status == NULL) {
1642 usb_ep_disable(&mEp->ep); 1655 usb_ep_disable(&udc->ep0out.ep);
1643 retval = -ENOMEM; 1656 retval = -ENOMEM;
1644 } 1657 }
1645 } 1658 }
@@ -1672,16 +1685,17 @@ static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
1672 1685
1673/** 1686/**
1674 * isr_get_status_response: get_status request response 1687 * isr_get_status_response: get_status request response
1675 * @ep: endpoint 1688 * @udc: udc struct
1676 * @setup: setup request packet 1689 * @setup: setup request packet
1677 * 1690 *
1678 * This function returns an error code 1691 * This function returns an error code
1679 */ 1692 */
1680static int isr_get_status_response(struct ci13xxx_ep *mEp, 1693static int isr_get_status_response(struct ci13xxx *udc,
1681 struct usb_ctrlrequest *setup) 1694 struct usb_ctrlrequest *setup)
1682__releases(mEp->lock) 1695__releases(mEp->lock)
1683__acquires(mEp->lock) 1696__acquires(mEp->lock)
1684{ 1697{
1698 struct ci13xxx_ep *mEp = &udc->ep0in;
1685 struct usb_request *req = NULL; 1699 struct usb_request *req = NULL;
1686 gfp_t gfp_flags = GFP_ATOMIC; 1700 gfp_t gfp_flags = GFP_ATOMIC;
1687 int dir, num, retval; 1701 int dir, num, retval;
@@ -1736,27 +1750,23 @@ __acquires(mEp->lock)
1736 1750
1737/** 1751/**
1738 * isr_setup_status_phase: queues the status phase of a setup transation 1752 * isr_setup_status_phase: queues the status phase of a setup transation
1739 * @mEp: endpoint 1753 * @udc: udc struct
1740 * 1754 *
1741 * This function returns an error code 1755 * This function returns an error code
1742 */ 1756 */
1743static int isr_setup_status_phase(struct ci13xxx_ep *mEp) 1757static int isr_setup_status_phase(struct ci13xxx *udc)
1744__releases(mEp->lock) 1758__releases(mEp->lock)
1745__acquires(mEp->lock) 1759__acquires(mEp->lock)
1746{ 1760{
1747 int retval; 1761 int retval;
1762 struct ci13xxx_ep *mEp;
1748 1763
1749 trace("%p", mEp); 1764 trace("%p", udc);
1750
1751 /* mEp is always valid & configured */
1752
1753 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
1754 mEp->dir = (mEp->dir == TX) ? RX : TX;
1755 1765
1756 mEp->status->no_interrupt = 1; 1766 mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in;
1757 1767
1758 spin_unlock(mEp->lock); 1768 spin_unlock(mEp->lock);
1759 retval = usb_ep_queue(&mEp->ep, mEp->status, GFP_ATOMIC); 1769 retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
1760 spin_lock(mEp->lock); 1770 spin_lock(mEp->lock);
1761 1771
1762 return retval; 1772 return retval;
@@ -1778,11 +1788,11 @@ __acquires(mEp->lock)
1778 1788
1779 trace("%p", mEp); 1789 trace("%p", mEp);
1780 1790
1781 if (list_empty(&mEp->qh[mEp->dir].queue)) 1791 if (list_empty(&mEp->qh.queue))
1782 return -EINVAL; 1792 return -EINVAL;
1783 1793
1784 /* pop oldest request */ 1794 /* pop oldest request */
1785 mReq = list_entry(mEp->qh[mEp->dir].queue.next, 1795 mReq = list_entry(mEp->qh.queue.next,
1786 struct ci13xxx_req, queue); 1796 struct ci13xxx_req, queue);
1787 list_del_init(&mReq->queue); 1797 list_del_init(&mReq->queue);
1788 1798
@@ -1794,10 +1804,10 @@ __acquires(mEp->lock)
1794 1804
1795 dbg_done(_usb_addr(mEp), mReq->ptr->token, retval); 1805 dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
1796 1806
1797 if (!list_empty(&mEp->qh[mEp->dir].queue)) { 1807 if (!list_empty(&mEp->qh.queue)) {
1798 struct ci13xxx_req* mReqEnq; 1808 struct ci13xxx_req* mReqEnq;
1799 1809
1800 mReqEnq = list_entry(mEp->qh[mEp->dir].queue.next, 1810 mReqEnq = list_entry(mEp->qh.queue.next,
1801 struct ci13xxx_req, queue); 1811 struct ci13xxx_req, queue);
1802 _hardware_enqueue(mEp, mReqEnq); 1812 _hardware_enqueue(mEp, mReqEnq);
1803 } 1813 }
@@ -1836,16 +1846,14 @@ __acquires(udc->lock)
1836 int type, num, err = -EINVAL; 1846 int type, num, err = -EINVAL;
1837 struct usb_ctrlrequest req; 1847 struct usb_ctrlrequest req;
1838 1848
1839
1840 if (mEp->desc == NULL) 1849 if (mEp->desc == NULL)
1841 continue; /* not configured */ 1850 continue; /* not configured */
1842 1851
1843 if ((mEp->dir == RX && hw_test_and_clear_complete(i)) || 1852 if (hw_test_and_clear_complete(i)) {
1844 (mEp->dir == TX && hw_test_and_clear_complete(i + 16))) {
1845 err = isr_tr_complete_low(mEp); 1853 err = isr_tr_complete_low(mEp);
1846 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) { 1854 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
1847 if (err > 0) /* needs status phase */ 1855 if (err > 0) /* needs status phase */
1848 err = isr_setup_status_phase(mEp); 1856 err = isr_setup_status_phase(udc);
1849 if (err < 0) { 1857 if (err < 0) {
1850 dbg_event(_usb_addr(mEp), 1858 dbg_event(_usb_addr(mEp),
1851 "ERROR", err); 1859 "ERROR", err);
@@ -1866,15 +1874,22 @@ __acquires(udc->lock)
1866 continue; 1874 continue;
1867 } 1875 }
1868 1876
1877 /*
1878 * Flush data and handshake transactions of previous
1879 * setup packet.
1880 */
1881 _ep_nuke(&udc->ep0out);
1882 _ep_nuke(&udc->ep0in);
1883
1869 /* read_setup_packet */ 1884 /* read_setup_packet */
1870 do { 1885 do {
1871 hw_test_and_set_setup_guard(); 1886 hw_test_and_set_setup_guard();
1872 memcpy(&req, &mEp->qh[RX].ptr->setup, sizeof(req)); 1887 memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
1873 } while (!hw_test_and_clear_setup_guard()); 1888 } while (!hw_test_and_clear_setup_guard());
1874 1889
1875 type = req.bRequestType; 1890 type = req.bRequestType;
1876 1891
1877 mEp->dir = (type & USB_DIR_IN) ? TX : RX; 1892 udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
1878 1893
1879 dbg_setup(_usb_addr(mEp), &req); 1894 dbg_setup(_usb_addr(mEp), &req);
1880 1895
@@ -1895,7 +1910,7 @@ __acquires(udc->lock)
1895 if (err) 1910 if (err)
1896 break; 1911 break;
1897 } 1912 }
1898 err = isr_setup_status_phase(mEp); 1913 err = isr_setup_status_phase(udc);
1899 break; 1914 break;
1900 case USB_REQ_GET_STATUS: 1915 case USB_REQ_GET_STATUS:
1901 if (type != (USB_DIR_IN|USB_RECIP_DEVICE) && 1916 if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
@@ -1905,7 +1920,7 @@ __acquires(udc->lock)
1905 if (le16_to_cpu(req.wLength) != 2 || 1920 if (le16_to_cpu(req.wLength) != 2 ||
1906 le16_to_cpu(req.wValue) != 0) 1921 le16_to_cpu(req.wValue) != 0)
1907 break; 1922 break;
1908 err = isr_get_status_response(mEp, &req); 1923 err = isr_get_status_response(udc, &req);
1909 break; 1924 break;
1910 case USB_REQ_SET_ADDRESS: 1925 case USB_REQ_SET_ADDRESS:
1911 if (type != (USB_DIR_OUT|USB_RECIP_DEVICE)) 1926 if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
@@ -1916,7 +1931,7 @@ __acquires(udc->lock)
1916 err = hw_usb_set_address((u8)le16_to_cpu(req.wValue)); 1931 err = hw_usb_set_address((u8)le16_to_cpu(req.wValue));
1917 if (err) 1932 if (err)
1918 break; 1933 break;
1919 err = isr_setup_status_phase(mEp); 1934 err = isr_setup_status_phase(udc);
1920 break; 1935 break;
1921 case USB_REQ_SET_FEATURE: 1936 case USB_REQ_SET_FEATURE:
1922 if (type != (USB_DIR_OUT|USB_RECIP_ENDPOINT) && 1937 if (type != (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
@@ -1932,12 +1947,12 @@ __acquires(udc->lock)
1932 spin_lock(udc->lock); 1947 spin_lock(udc->lock);
1933 if (err) 1948 if (err)
1934 break; 1949 break;
1935 err = isr_setup_status_phase(mEp); 1950 err = isr_setup_status_phase(udc);
1936 break; 1951 break;
1937 default: 1952 default:
1938delegate: 1953delegate:
1939 if (req.wLength == 0) /* no data phase */ 1954 if (req.wLength == 0) /* no data phase */
1940 mEp->dir = TX; 1955 udc->ep0_dir = TX;
1941 1956
1942 spin_unlock(udc->lock); 1957 spin_unlock(udc->lock);
1943 err = udc->driver->setup(&udc->gadget, &req); 1958 err = udc->driver->setup(&udc->gadget, &req);
@@ -1968,7 +1983,7 @@ static int ep_enable(struct usb_ep *ep,
1968 const struct usb_endpoint_descriptor *desc) 1983 const struct usb_endpoint_descriptor *desc)
1969{ 1984{
1970 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1985 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1971 int direction, retval = 0; 1986 int retval = 0;
1972 unsigned long flags; 1987 unsigned long flags;
1973 1988
1974 trace("%p, %p", ep, desc); 1989 trace("%p, %p", ep, desc);
@@ -1982,7 +1997,7 @@ static int ep_enable(struct usb_ep *ep,
1982 1997
1983 mEp->desc = desc; 1998 mEp->desc = desc;
1984 1999
1985 if (!list_empty(&mEp->qh[mEp->dir].queue)) 2000 if (!list_empty(&mEp->qh.queue))
1986 warn("enabling a non-empty endpoint!"); 2001 warn("enabling a non-empty endpoint!");
1987 2002
1988 mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX; 2003 mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX;
@@ -1991,29 +2006,22 @@ static int ep_enable(struct usb_ep *ep,
1991 2006
1992 mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize); 2007 mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize);
1993 2008
1994 direction = mEp->dir; 2009 dbg_event(_usb_addr(mEp), "ENABLE", 0);
1995 do {
1996 dbg_event(_usb_addr(mEp), "ENABLE", 0);
1997
1998 mEp->qh[mEp->dir].ptr->cap = 0;
1999
2000 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
2001 mEp->qh[mEp->dir].ptr->cap |= QH_IOS;
2002 else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
2003 mEp->qh[mEp->dir].ptr->cap &= ~QH_MULT;
2004 else
2005 mEp->qh[mEp->dir].ptr->cap &= ~QH_ZLT;
2006 2010
2007 mEp->qh[mEp->dir].ptr->cap |= 2011 mEp->qh.ptr->cap = 0;
2008 (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
2009 mEp->qh[mEp->dir].ptr->td.next |= TD_TERMINATE; /* needed? */
2010 2012
2011 retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type); 2013 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
2014 mEp->qh.ptr->cap |= QH_IOS;
2015 else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
2016 mEp->qh.ptr->cap &= ~QH_MULT;
2017 else
2018 mEp->qh.ptr->cap &= ~QH_ZLT;
2012 2019
2013 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) 2020 mEp->qh.ptr->cap |=
2014 mEp->dir = (mEp->dir == TX) ? RX : TX; 2021 (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
2022 mEp->qh.ptr->td.next |= TD_TERMINATE; /* needed? */
2015 2023
2016 } while (mEp->dir != direction); 2024 retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
2017 2025
2018 spin_unlock_irqrestore(mEp->lock, flags); 2026 spin_unlock_irqrestore(mEp->lock, flags);
2019 return retval; 2027 return retval;
@@ -2146,7 +2154,7 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
2146 spin_lock_irqsave(mEp->lock, flags); 2154 spin_lock_irqsave(mEp->lock, flags);
2147 2155
2148 if (mEp->type == USB_ENDPOINT_XFER_CONTROL && 2156 if (mEp->type == USB_ENDPOINT_XFER_CONTROL &&
2149 !list_empty(&mEp->qh[mEp->dir].queue)) { 2157 !list_empty(&mEp->qh.queue)) {
2150 _ep_nuke(mEp); 2158 _ep_nuke(mEp);
2151 retval = -EOVERFLOW; 2159 retval = -EOVERFLOW;
2152 warn("endpoint ctrl %X nuked", _usb_addr(mEp)); 2160 warn("endpoint ctrl %X nuked", _usb_addr(mEp));
@@ -2170,9 +2178,9 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
2170 /* push request */ 2178 /* push request */
2171 mReq->req.status = -EINPROGRESS; 2179 mReq->req.status = -EINPROGRESS;
2172 mReq->req.actual = 0; 2180 mReq->req.actual = 0;
2173 list_add_tail(&mReq->queue, &mEp->qh[mEp->dir].queue); 2181 list_add_tail(&mReq->queue, &mEp->qh.queue);
2174 2182
2175 if (list_is_singular(&mEp->qh[mEp->dir].queue)) 2183 if (list_is_singular(&mEp->qh.queue))
2176 retval = _hardware_enqueue(mEp, mReq); 2184 retval = _hardware_enqueue(mEp, mReq);
2177 2185
2178 if (retval == -EALREADY) { 2186 if (retval == -EALREADY) {
@@ -2199,7 +2207,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
2199 trace("%p, %p", ep, req); 2207 trace("%p, %p", ep, req);
2200 2208
2201 if (ep == NULL || req == NULL || mEp->desc == NULL || 2209 if (ep == NULL || req == NULL || mEp->desc == NULL ||
2202 list_empty(&mReq->queue) || list_empty(&mEp->qh[mEp->dir].queue)) 2210 list_empty(&mReq->queue) || list_empty(&mEp->qh.queue))
2203 return -EINVAL; 2211 return -EINVAL;
2204 2212
2205 spin_lock_irqsave(mEp->lock, flags); 2213 spin_lock_irqsave(mEp->lock, flags);
@@ -2244,7 +2252,7 @@ static int ep_set_halt(struct usb_ep *ep, int value)
2244#ifndef STALL_IN 2252#ifndef STALL_IN
2245 /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */ 2253 /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
2246 if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX && 2254 if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
2247 !list_empty(&mEp->qh[mEp->dir].queue)) { 2255 !list_empty(&mEp->qh.queue)) {
2248 spin_unlock_irqrestore(mEp->lock, flags); 2256 spin_unlock_irqrestore(mEp->lock, flags);
2249 return -EAGAIN; 2257 return -EAGAIN;
2250 } 2258 }
@@ -2355,7 +2363,7 @@ static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
2355 if (is_active) { 2363 if (is_active) {
2356 pm_runtime_get_sync(&_gadget->dev); 2364 pm_runtime_get_sync(&_gadget->dev);
2357 hw_device_reset(udc); 2365 hw_device_reset(udc);
2358 hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma); 2366 hw_device_state(udc->ep0out.qh.dma);
2359 } else { 2367 } else {
2360 hw_device_state(0); 2368 hw_device_state(0);
2361 if (udc->udc_driver->notify_event) 2369 if (udc->udc_driver->notify_event)
@@ -2390,7 +2398,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2390 int (*bind)(struct usb_gadget *)) 2398 int (*bind)(struct usb_gadget *))
2391{ 2399{
2392 struct ci13xxx *udc = _udc; 2400 struct ci13xxx *udc = _udc;
2393 unsigned long i, k, flags; 2401 unsigned long flags;
2402 int i, j;
2394 int retval = -ENOMEM; 2403 int retval = -ENOMEM;
2395 2404
2396 trace("%p", driver); 2405 trace("%p", driver);
@@ -2430,41 +2439,43 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2430 udc->gadget.dev.driver = NULL; 2439 udc->gadget.dev.driver = NULL;
2431 2440
2432 retval = 0; 2441 retval = 0;
2433 for (i = 0; i < hw_ep_max; i++) { 2442 for (i = 0; i < hw_ep_max/2; i++) {
2434 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i]; 2443 for (j = RX; j <= TX; j++) {
2444 int k = i + j * hw_ep_max/2;
2445 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
2435 2446
2436 scnprintf(mEp->name, sizeof(mEp->name), "ep%i", (int)i); 2447 scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
2448 (j == TX) ? "in" : "out");
2437 2449
2438 mEp->lock = udc->lock; 2450 mEp->lock = udc->lock;
2439 mEp->device = &udc->gadget.dev; 2451 mEp->device = &udc->gadget.dev;
2440 mEp->td_pool = udc->td_pool; 2452 mEp->td_pool = udc->td_pool;
2441 2453
2442 mEp->ep.name = mEp->name; 2454 mEp->ep.name = mEp->name;
2443 mEp->ep.ops = &usb_ep_ops; 2455 mEp->ep.ops = &usb_ep_ops;
2444 mEp->ep.maxpacket = CTRL_PAYLOAD_MAX; 2456 mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
2445 2457
2446 /* this allocation cannot be random */ 2458 INIT_LIST_HEAD(&mEp->qh.queue);
2447 for (k = RX; k <= TX; k++) {
2448 INIT_LIST_HEAD(&mEp->qh[k].queue);
2449 spin_unlock_irqrestore(udc->lock, flags); 2459 spin_unlock_irqrestore(udc->lock, flags);
2450 mEp->qh[k].ptr = dma_pool_alloc(udc->qh_pool, 2460 mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
2451 GFP_KERNEL, 2461 &mEp->qh.dma);
2452 &mEp->qh[k].dma);
2453 spin_lock_irqsave(udc->lock, flags); 2462 spin_lock_irqsave(udc->lock, flags);
2454 if (mEp->qh[k].ptr == NULL) 2463 if (mEp->qh.ptr == NULL)
2455 retval = -ENOMEM; 2464 retval = -ENOMEM;
2456 else 2465 else
2457 memset(mEp->qh[k].ptr, 0, 2466 memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
2458 sizeof(*mEp->qh[k].ptr)); 2467
2459 } 2468 /* skip ep0 out and in endpoints */
2460 if (i == 0) 2469 if (i == 0)
2461 udc->gadget.ep0 = &mEp->ep; 2470 continue;
2462 else 2471
2463 list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list); 2472 list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
2473 }
2464 } 2474 }
2465 if (retval) 2475 if (retval)
2466 goto done; 2476 goto done;
2467 2477
2478 udc->gadget.ep0 = &udc->ep0in.ep;
2468 /* bind gadget */ 2479 /* bind gadget */
2469 driver->driver.bus = NULL; 2480 driver->driver.bus = NULL;
2470 udc->gadget.dev.driver = &driver->driver; 2481 udc->gadget.dev.driver = &driver->driver;
@@ -2490,7 +2501,7 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2490 } 2501 }
2491 } 2502 }
2492 2503
2493 retval = hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma); 2504 retval = hw_device_state(udc->ep0out.qh.dma);
2494 if (retval) 2505 if (retval)
2495 pm_runtime_put_sync(&udc->gadget.dev); 2506 pm_runtime_put_sync(&udc->gadget.dev);
2496 2507
@@ -2508,7 +2519,7 @@ EXPORT_SYMBOL(usb_gadget_probe_driver);
2508int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 2519int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2509{ 2520{
2510 struct ci13xxx *udc = _udc; 2521 struct ci13xxx *udc = _udc;
2511 unsigned long i, k, flags; 2522 unsigned long i, flags;
2512 2523
2513 trace("%p", driver); 2524 trace("%p", driver);
2514 2525
@@ -2544,17 +2555,14 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2544 for (i = 0; i < hw_ep_max; i++) { 2555 for (i = 0; i < hw_ep_max; i++) {
2545 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i]; 2556 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
2546 2557
2547 if (i == 0) 2558 if (!list_empty(&mEp->ep.ep_list))
2548 udc->gadget.ep0 = NULL;
2549 else if (!list_empty(&mEp->ep.ep_list))
2550 list_del_init(&mEp->ep.ep_list); 2559 list_del_init(&mEp->ep.ep_list);
2551 2560
2552 for (k = RX; k <= TX; k++) 2561 if (mEp->qh.ptr != NULL)
2553 if (mEp->qh[k].ptr != NULL) 2562 dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma);
2554 dma_pool_free(udc->qh_pool,
2555 mEp->qh[k].ptr, mEp->qh[k].dma);
2556 } 2563 }
2557 2564
2565 udc->gadget.ep0 = NULL;
2558 udc->driver = NULL; 2566 udc->driver = NULL;
2559 2567
2560 spin_unlock_irqrestore(udc->lock, flags); 2568 spin_unlock_irqrestore(udc->lock, flags);
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index f61fed07f76b..a2492b65f98c 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -20,7 +20,7 @@
20 * DEFINE 20 * DEFINE
21 *****************************************************************************/ 21 *****************************************************************************/
22#define CI13XXX_PAGE_SIZE 4096ul /* page size for TD's */ 22#define CI13XXX_PAGE_SIZE 4096ul /* page size for TD's */
23#define ENDPT_MAX (16) 23#define ENDPT_MAX (32)
24#define CTRL_PAYLOAD_MAX (64) 24#define CTRL_PAYLOAD_MAX (64)
25#define RX (0) /* similar to USB_DIR_OUT but can be used as an index */ 25#define RX (0) /* similar to USB_DIR_OUT but can be used as an index */
26#define TX (1) /* similar to USB_DIR_IN but can be used as an index */ 26#define TX (1) /* similar to USB_DIR_IN but can be used as an index */
@@ -88,8 +88,7 @@ struct ci13xxx_ep {
88 struct list_head queue; 88 struct list_head queue;
89 struct ci13xxx_qh *ptr; 89 struct ci13xxx_qh *ptr;
90 dma_addr_t dma; 90 dma_addr_t dma;
91 } qh[2]; 91 } qh;
92 struct usb_request *status;
93 int wedge; 92 int wedge;
94 93
95 /* global resources */ 94 /* global resources */
@@ -119,9 +118,13 @@ struct ci13xxx {
119 118
120 struct dma_pool *qh_pool; /* DMA pool for queue heads */ 119 struct dma_pool *qh_pool; /* DMA pool for queue heads */
121 struct dma_pool *td_pool; /* DMA pool for transfer descs */ 120 struct dma_pool *td_pool; /* DMA pool for transfer descs */
121 struct usb_request *status; /* ep0 status request */
122 122
123 struct usb_gadget gadget; /* USB slave device */ 123 struct usb_gadget gadget; /* USB slave device */
124 struct ci13xxx_ep ci13xxx_ep[ENDPT_MAX]; /* extended endpts */ 124 struct ci13xxx_ep ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
125 u32 ep0_dir; /* ep0 direction */
126#define ep0out ci13xxx_ep[0]
127#define ep0in ci13xxx_ep[16]
125 128
126 struct usb_gadget_driver *driver; /* 3rd party gadget driver */ 129 struct usb_gadget_driver *driver; /* 3rd party gadget driver */
127 struct ci13xxx_udc_driver *udc_driver; /* device controller driver */ 130 struct ci13xxx_udc_driver *udc_driver; /* device controller driver */