aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorToshiharu Okada <toshiharu-linux@dsn.okisemi.com>2011-02-07 03:01:26 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2011-02-17 13:47:54 -0500
commitc17f459c6ea5b569825445279fa21adb3cf3067f (patch)
tree70914063d236ad6db8a663bb09d0cd2a095e93c1 /drivers
parent4f22ce7045c16e36d391fdfe331a397d55578493 (diff)
usb: pch_udc: Fixed issue which does not work with g_ether
This PCH_UDC driver does not work normally when "Ethernet gadget" is used. This patch fixed this issue. The following was modified. - The FIFO flush process. - The descriptor creation process. - The adjustment of DMA buffer align. Currently the PCH_UDC driver can work normally with "Ethernet gadget", "Serial gadget" or "File-backed Storage Gadget". Signed-off-by: Toshiharu Okada <toshiharu-linux@dsn.okisemi.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/usb/gadget/pch_udc.c178
1 files changed, 104 insertions, 74 deletions
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index b120dbb64d0..3e4b35e50c2 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -367,7 +367,6 @@ struct pch_udc_dev {
367static const char ep0_string[] = "ep0in"; 367static const char ep0_string[] = "ep0in";
368static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */ 368static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
369struct pch_udc_dev *pch_udc; /* pointer to device object */ 369struct pch_udc_dev *pch_udc; /* pointer to device object */
370
371static int speed_fs; 370static int speed_fs;
372module_param_named(speed_fs, speed_fs, bool, S_IRUGO); 371module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
373MODULE_PARM_DESC(speed_fs, "true for Full speed operation"); 372MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
@@ -383,6 +382,8 @@ MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
383 * @dma_mapped: DMA memory mapped for request 382 * @dma_mapped: DMA memory mapped for request
384 * @dma_done: DMA completed for request 383 * @dma_done: DMA completed for request
385 * @chain_len: chain length 384 * @chain_len: chain length
385 * @buf: Buffer memory for align adjustment
386 * @dma: DMA memory for align adjustment
386 */ 387 */
387struct pch_udc_request { 388struct pch_udc_request {
388 struct usb_request req; 389 struct usb_request req;
@@ -394,6 +395,8 @@ struct pch_udc_request {
394 dma_mapped:1, 395 dma_mapped:1,
395 dma_done:1; 396 dma_done:1;
396 unsigned chain_len; 397 unsigned chain_len;
398 void *buf;
399 dma_addr_t dma;
397}; 400};
398 401
399static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg) 402static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
@@ -615,7 +618,7 @@ static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
615/** 618/**
616 * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint 619 * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
617 * @ep: Reference to structure of type pch_udc_ep_regs 620 * @ep: Reference to structure of type pch_udc_ep_regs
618 * @buf_size: The buffer size 621 * @buf_size: The buffer word size
619 */ 622 */
620static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep, 623static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
621 u32 buf_size, u32 ep_in) 624 u32 buf_size, u32 ep_in)
@@ -635,7 +638,7 @@ static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
635/** 638/**
636 * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint 639 * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
637 * @ep: Reference to structure of type pch_udc_ep_regs 640 * @ep: Reference to structure of type pch_udc_ep_regs
638 * @pkt_size: The packet size 641 * @pkt_size: The packet byte size
639 */ 642 */
640static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size) 643static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
641{ 644{
@@ -920,25 +923,10 @@ static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
920 */ 923 */
921static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir) 924static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
922{ 925{
923 unsigned int loopcnt = 0;
924 struct pch_udc_dev *dev = ep->dev;
925
926 if (dir) { /* IN ep */ 926 if (dir) { /* IN ep */
927 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F); 927 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
928 return; 928 return;
929 } 929 }
930
931 if (pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP)
932 return;
933 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_MRXFLUSH);
934 /* Wait for RxFIFO Empty */
935 loopcnt = 10000;
936 while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
937 --loopcnt)
938 udelay(5);
939 if (!loopcnt)
940 dev_err(&dev->pdev->dev, "RxFIFO not Empty\n");
941 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_MRXFLUSH);
942} 930}
943 931
944/** 932/**
@@ -1220,14 +1208,31 @@ static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1220 1208
1221 dev = ep->dev; 1209 dev = ep->dev;
1222 if (req->dma_mapped) { 1210 if (req->dma_mapped) {
1223 if (ep->in) 1211 if (req->dma == DMA_ADDR_INVALID) {
1224 dma_unmap_single(&dev->pdev->dev, req->req.dma, 1212 if (ep->in)
1225 req->req.length, DMA_TO_DEVICE); 1213 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1226 else 1214 req->req.length,
1227 dma_unmap_single(&dev->pdev->dev, req->req.dma, 1215 DMA_TO_DEVICE);
1228 req->req.length, DMA_FROM_DEVICE); 1216 else
1217 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1218 req->req.length,
1219 DMA_FROM_DEVICE);
1220 req->req.dma = DMA_ADDR_INVALID;
1221 } else {
1222 if (ep->in)
1223 dma_unmap_single(&dev->pdev->dev, req->dma,
1224 req->req.length,
1225 DMA_TO_DEVICE);
1226 else {
1227 dma_unmap_single(&dev->pdev->dev, req->dma,
1228 req->req.length,
1229 DMA_FROM_DEVICE);
1230 memcpy(req->req.buf, req->buf, req->req.length);
1231 }
1232 kfree(req->buf);
1233 req->dma = DMA_ADDR_INVALID;
1234 }
1229 req->dma_mapped = 0; 1235 req->dma_mapped = 0;
1230 req->req.dma = DMA_ADDR_INVALID;
1231 } 1236 }
1232 ep->halted = 1; 1237 ep->halted = 1;
1233 spin_unlock(&dev->lock); 1238 spin_unlock(&dev->lock);
@@ -1268,12 +1273,18 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1268 struct pch_udc_data_dma_desc *td = req->td_data; 1273 struct pch_udc_data_dma_desc *td = req->td_data;
1269 unsigned i = req->chain_len; 1274 unsigned i = req->chain_len;
1270 1275
1276 dma_addr_t addr2;
1277 dma_addr_t addr = (dma_addr_t)td->next;
1278 td->next = 0x00;
1271 for (; i > 1; --i) { 1279 for (; i > 1; --i) {
1272 dma_addr_t addr = (dma_addr_t)td->next;
1273 /* do not free first desc., will be done by free for request */ 1280 /* do not free first desc., will be done by free for request */
1274 td = phys_to_virt(addr); 1281 td = phys_to_virt(addr);
1282 addr2 = (dma_addr_t)td->next;
1275 pci_pool_free(dev->data_requests, td, addr); 1283 pci_pool_free(dev->data_requests, td, addr);
1284 td->next = 0x00;
1285 addr = addr2;
1276 } 1286 }
1287 req->chain_len = 1;
1277} 1288}
1278 1289
1279/** 1290/**
@@ -1301,23 +1312,23 @@ static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1301 if (req->chain_len > 1) 1312 if (req->chain_len > 1)
1302 pch_udc_free_dma_chain(ep->dev, req); 1313 pch_udc_free_dma_chain(ep->dev, req);
1303 1314
1304 for (; ; bytes -= buf_len, ++len) { 1315 if (req->dma == DMA_ADDR_INVALID)
1305 if (ep->in) 1316 td->dataptr = req->req.dma;
1306 td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes); 1317 else
1307 else 1318 td->dataptr = req->dma;
1308 td->status = PCH_UDC_BS_HST_BSY;
1309 1319
1320 td->status = PCH_UDC_BS_HST_BSY;
1321 for (; ; bytes -= buf_len, ++len) {
1322 td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1310 if (bytes <= buf_len) 1323 if (bytes <= buf_len)
1311 break; 1324 break;
1312
1313 last = td; 1325 last = td;
1314 td = pci_pool_alloc(ep->dev->data_requests, gfp_flags, 1326 td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
1315 &dma_addr); 1327 &dma_addr);
1316 if (!td) 1328 if (!td)
1317 goto nomem; 1329 goto nomem;
1318
1319 i += buf_len; 1330 i += buf_len;
1320 td->dataptr = req->req.dma + i; 1331 td->dataptr = req->td_data->dataptr + i;
1321 last->next = dma_addr; 1332 last->next = dma_addr;
1322 } 1333 }
1323 1334
@@ -1352,28 +1363,15 @@ static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1352{ 1363{
1353 int retval; 1364 int retval;
1354 1365
1355 req->td_data->dataptr = req->req.dma;
1356 req->td_data->status |= PCH_UDC_DMA_LAST;
1357 /* Allocate and create a DMA chain */ 1366 /* Allocate and create a DMA chain */
1358 retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp); 1367 retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1359 if (retval) { 1368 if (retval) {
1360 pr_err("%s: could not create DMA chain: %d\n", 1369 pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1361 __func__, retval);
1362 return retval; 1370 return retval;
1363 } 1371 }
1364 if (!ep->in) 1372 if (ep->in)
1365 return 0;
1366 if (req->req.length <= ep->ep.maxpacket)
1367 req->td_data->status = PCH_UDC_DMA_LAST | PCH_UDC_BS_HST_BSY |
1368 req->req.length;
1369 /* if bytes < max packet then tx bytes must
1370 * be written in packet per buffer mode
1371 */
1372 if ((req->req.length < ep->ep.maxpacket) || !ep->num)
1373 req->td_data->status = (req->td_data->status & 1373 req->td_data->status = (req->td_data->status &
1374 ~PCH_UDC_RXTX_BYTES) | req->req.length; 1374 ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1375 req->td_data->status = (req->td_data->status &
1376 ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_BSY;
1377 return 0; 1375 return 0;
1378} 1376}
1379 1377
@@ -1529,6 +1527,7 @@ static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1529 if (!req) 1527 if (!req)
1530 return NULL; 1528 return NULL;
1531 req->req.dma = DMA_ADDR_INVALID; 1529 req->req.dma = DMA_ADDR_INVALID;
1530 req->dma = DMA_ADDR_INVALID;
1532 INIT_LIST_HEAD(&req->queue); 1531 INIT_LIST_HEAD(&req->queue);
1533 if (!ep->dev->dma_addr) 1532 if (!ep->dev->dma_addr)
1534 return &req->req; 1533 return &req->req;
@@ -1613,16 +1612,33 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1613 /* map the buffer for dma */ 1612 /* map the buffer for dma */
1614 if (usbreq->length && 1613 if (usbreq->length &&
1615 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) { 1614 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1616 if (ep->in) 1615 if (!((unsigned long)(usbreq->buf) & 0x03)) {
1617 usbreq->dma = dma_map_single(&dev->pdev->dev, 1616 if (ep->in)
1618 usbreq->buf, 1617 usbreq->dma = dma_map_single(&dev->pdev->dev,
1619 usbreq->length, 1618 usbreq->buf,
1620 DMA_TO_DEVICE); 1619 usbreq->length,
1621 else 1620 DMA_TO_DEVICE);
1622 usbreq->dma = dma_map_single(&dev->pdev->dev, 1621 else
1623 usbreq->buf, 1622 usbreq->dma = dma_map_single(&dev->pdev->dev,
1624 usbreq->length, 1623 usbreq->buf,
1625 DMA_FROM_DEVICE); 1624 usbreq->length,
1625 DMA_FROM_DEVICE);
1626 } else {
1627 req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1628 if (!req->buf)
1629 return -ENOMEM;
1630 if (ep->in) {
1631 memcpy(req->buf, usbreq->buf, usbreq->length);
1632 req->dma = dma_map_single(&dev->pdev->dev,
1633 req->buf,
1634 usbreq->length,
1635 DMA_TO_DEVICE);
1636 } else
1637 req->dma = dma_map_single(&dev->pdev->dev,
1638 req->buf,
1639 usbreq->length,
1640 DMA_FROM_DEVICE);
1641 }
1626 req->dma_mapped = 1; 1642 req->dma_mapped = 1;
1627 } 1643 }
1628 if (usbreq->length > 0) { 1644 if (usbreq->length > 0) {
@@ -1920,32 +1936,46 @@ static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
1920 struct pch_udc_request *req; 1936 struct pch_udc_request *req;
1921 struct pch_udc_dev *dev = ep->dev; 1937 struct pch_udc_dev *dev = ep->dev;
1922 unsigned int count; 1938 unsigned int count;
1939 struct pch_udc_data_dma_desc *td;
1940 dma_addr_t addr;
1923 1941
1924 if (list_empty(&ep->queue)) 1942 if (list_empty(&ep->queue))
1925 return; 1943 return;
1926
1927 /* next request */ 1944 /* next request */
1928 req = list_entry(ep->queue.next, struct pch_udc_request, queue); 1945 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1929 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
1930 PCH_UDC_BS_DMA_DONE)
1931 return;
1932 pch_udc_clear_dma(ep->dev, DMA_DIR_RX); 1946 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1933 pch_udc_ep_set_ddptr(ep, 0); 1947 pch_udc_ep_set_ddptr(ep, 0);
1934 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) != 1948 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
1935 PCH_UDC_RTS_SUCC) { 1949 PCH_UDC_BS_DMA_DONE)
1936 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) " 1950 td = req->td_data_last;
1937 "epstatus=0x%08x\n", 1951 else
1938 (req->td_data_last->status & PCH_UDC_RXTX_STS), 1952 td = req->td_data;
1939 (int)(ep->epsts));
1940 return;
1941 }
1942 count = req->td_data_last->status & PCH_UDC_RXTX_BYTES;
1943 1953
1954 while (1) {
1955 if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
1956 dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
1957 "epstatus=0x%08x\n",
1958 (req->td_data->status & PCH_UDC_RXTX_STS),
1959 (int)(ep->epsts));
1960 return;
1961 }
1962 if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
1963 if (td->status | PCH_UDC_DMA_LAST) {
1964 count = td->status & PCH_UDC_RXTX_BYTES;
1965 break;
1966 }
1967 if (td == req->td_data_last) {
1968 dev_err(&dev->pdev->dev, "Not complete RX descriptor");
1969 return;
1970 }
1971 addr = (dma_addr_t)td->next;
1972 td = phys_to_virt(addr);
1973 }
1944 /* on 64k packets the RXBYTES field is zero */ 1974 /* on 64k packets the RXBYTES field is zero */
1945 if (!count && (req->req.length == UDC_DMA_MAXPACKET)) 1975 if (!count && (req->req.length == UDC_DMA_MAXPACKET))
1946 count = UDC_DMA_MAXPACKET; 1976 count = UDC_DMA_MAXPACKET;
1947 req->td_data->status |= PCH_UDC_DMA_LAST; 1977 req->td_data->status |= PCH_UDC_DMA_LAST;
1948 req->td_data_last->status |= PCH_UDC_BS_HST_BSY; 1978 td->status |= PCH_UDC_BS_HST_BSY;
1949 1979
1950 req->dma_going = 0; 1980 req->dma_going = 0;
1951 req->req.actual = count; 1981 req->req.actual = count;