aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/musb
diff options
context:
space:
mode:
authorAjay Kumar Gupta <ajay.gupta@ti.com>2008-10-29 09:10:35 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2008-11-13 17:45:01 -0500
commit23d15e070c2fe5d341ca04275f6ea1b5a5fcb26f (patch)
tree9fa6808984c70ade8959f5ac766c1b32d9e629b1 /drivers/usb/musb
parentb60c72abdbd44ed2a63fa80455d0b7f18ce76d2b (diff)
usb: musb: fix BULK request on different available endpoints
Fixes co-working issue of usb serial device with usb/net devices while oter endpoints are free and can be used.This patch implements the policy that if endpoint resources are available then different BULK request goes to different endpoint otherwise they are multiplexed to one reserved endpoint as currently done. Switch statement case is reordered in musb_giveback() to take care of bulk request both in multiplex scenario and otherwise. NAK limit scheme has to be added for multiplexed BULK request scenario to avoid endpoint starvation due to usb/net devices. Signed-off-by: Ajay Kumar Gupta <ajay.gupta@ti.com> Signed-off-by: Felipe Balbi <felipe.balbi@nokia.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/musb')
-rw-r--r--drivers/usb/musb/musb_host.c82
-rw-r--r--drivers/usb/musb/musb_host.h1
2 files changed, 46 insertions, 37 deletions
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 981d49738ec5..e45e70bcc5e2 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -378,6 +378,19 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
378 378
379 switch (qh->type) { 379 switch (qh->type) {
380 380
381 case USB_ENDPOINT_XFER_CONTROL:
382 case USB_ENDPOINT_XFER_BULK:
383 /* fifo policy for these lists, except that NAKing
384 * should rotate a qh to the end (for fairness).
385 */
386 if (qh->mux == 1) {
387 head = qh->ring.prev;
388 list_del(&qh->ring);
389 kfree(qh);
390 qh = first_qh(head);
391 break;
392 }
393
381 case USB_ENDPOINT_XFER_ISOC: 394 case USB_ENDPOINT_XFER_ISOC:
382 case USB_ENDPOINT_XFER_INT: 395 case USB_ENDPOINT_XFER_INT:
383 /* this is where periodic bandwidth should be 396 /* this is where periodic bandwidth should be
@@ -388,17 +401,6 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
388 kfree(qh); 401 kfree(qh);
389 qh = NULL; 402 qh = NULL;
390 break; 403 break;
391
392 case USB_ENDPOINT_XFER_CONTROL:
393 case USB_ENDPOINT_XFER_BULK:
394 /* fifo policy for these lists, except that NAKing
395 * should rotate a qh to the end (for fairness).
396 */
397 head = qh->ring.prev;
398 list_del(&qh->ring);
399 kfree(qh);
400 qh = first_qh(head);
401 break;
402 } 404 }
403 } 405 }
404 return qh; 406 return qh;
@@ -1708,22 +1710,9 @@ static int musb_schedule(
1708 struct list_head *head = NULL; 1710 struct list_head *head = NULL;
1709 1711
1710 /* use fixed hardware for control and bulk */ 1712 /* use fixed hardware for control and bulk */
1711 switch (qh->type) { 1713 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1712 case USB_ENDPOINT_XFER_CONTROL:
1713 head = &musb->control; 1714 head = &musb->control;
1714 hw_ep = musb->control_ep; 1715 hw_ep = musb->control_ep;
1715 break;
1716 case USB_ENDPOINT_XFER_BULK:
1717 hw_ep = musb->bulk_ep;
1718 if (is_in)
1719 head = &musb->in_bulk;
1720 else
1721 head = &musb->out_bulk;
1722 break;
1723 }
1724 if (head) {
1725 idle = list_empty(head);
1726 list_add_tail(&qh->ring, head);
1727 goto success; 1716 goto success;
1728 } 1717 }
1729 1718
@@ -1762,19 +1751,34 @@ static int musb_schedule(
1762 else 1751 else
1763 diff = hw_ep->max_packet_sz_tx - qh->maxpacket; 1752 diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
1764 1753
1765 if (diff > 0 && best_diff > diff) { 1754 if (diff >= 0 && best_diff > diff) {
1766 best_diff = diff; 1755 best_diff = diff;
1767 best_end = epnum; 1756 best_end = epnum;
1768 } 1757 }
1769 } 1758 }
1770 if (best_end < 0) 1759 /* use bulk reserved ep1 if no other ep is free */
1760 if (best_end > 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
1761 hw_ep = musb->bulk_ep;
1762 if (is_in)
1763 head = &musb->in_bulk;
1764 else
1765 head = &musb->out_bulk;
1766 goto success;
1767 } else if (best_end < 0) {
1771 return -ENOSPC; 1768 return -ENOSPC;
1769 }
1772 1770
1773 idle = 1; 1771 idle = 1;
1772 qh->mux = 0;
1774 hw_ep = musb->endpoints + best_end; 1773 hw_ep = musb->endpoints + best_end;
1775 musb->periodic[best_end] = qh; 1774 musb->periodic[best_end] = qh;
1776 DBG(4, "qh %p periodic slot %d\n", qh, best_end); 1775 DBG(4, "qh %p periodic slot %d\n", qh, best_end);
1777success: 1776success:
1777 if (head) {
1778 idle = list_empty(head);
1779 list_add_tail(&qh->ring, head);
1780 qh->mux = 1;
1781 }
1778 qh->hw_ep = hw_ep; 1782 qh->hw_ep = hw_ep;
1779 qh->hep->hcpriv = qh; 1783 qh->hep->hcpriv = qh;
1780 if (idle) 1784 if (idle)
@@ -2052,11 +2056,13 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2052 sched = &musb->control; 2056 sched = &musb->control;
2053 break; 2057 break;
2054 case USB_ENDPOINT_XFER_BULK: 2058 case USB_ENDPOINT_XFER_BULK:
2055 if (usb_pipein(urb->pipe)) 2059 if (qh->mux == 1) {
2056 sched = &musb->in_bulk; 2060 if (usb_pipein(urb->pipe))
2057 else 2061 sched = &musb->in_bulk;
2058 sched = &musb->out_bulk; 2062 else
2059 break; 2063 sched = &musb->out_bulk;
2064 break;
2065 }
2060 default: 2066 default:
2061 /* REVISIT when we get a schedule tree, periodic 2067 /* REVISIT when we get a schedule tree, periodic
2062 * transfers won't always be at the head of a 2068 * transfers won't always be at the head of a
@@ -2104,11 +2110,13 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2104 sched = &musb->control; 2110 sched = &musb->control;
2105 break; 2111 break;
2106 case USB_ENDPOINT_XFER_BULK: 2112 case USB_ENDPOINT_XFER_BULK:
2107 if (is_in) 2113 if (qh->mux == 1) {
2108 sched = &musb->in_bulk; 2114 if (is_in)
2109 else 2115 sched = &musb->in_bulk;
2110 sched = &musb->out_bulk; 2116 else
2111 break; 2117 sched = &musb->out_bulk;
2118 break;
2119 }
2112 default: 2120 default:
2113 /* REVISIT when we get a schedule tree, periodic transfers 2121 /* REVISIT when we get a schedule tree, periodic transfers
2114 * won't always be at the head of a singleton queue... 2122 * won't always be at the head of a singleton queue...
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 77bcdb9d5b32..0b7fbcd21963 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -53,6 +53,7 @@ struct musb_qh {
53 53
54 struct list_head ring; /* of musb_qh */ 54 struct list_head ring; /* of musb_qh */
55 /* struct musb_qh *next; */ /* for periodic tree */ 55 /* struct musb_qh *next; */ /* for periodic tree */
56 u8 mux; /* qh multiplexed to hw_ep */
56 57
57 unsigned offset; /* in urb->transfer_buffer */ 58 unsigned offset; /* in urb->transfer_buffer */
58 unsigned segsize; /* current xfer fragment */ 59 unsigned segsize; /* current xfer fragment */