aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/musb/musb_host.c
diff options
context:
space:
mode:
authorSergei Shtylyov <sshtylyov@ru.mvista.com>2009-03-27 15:56:26 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-06-16 00:44:40 -0400
commit22a0d6f1383c85a7a9759cb805fd06c848c9c4d3 (patch)
treeafb1ea26aeb23ed0cc570325f4db2aaf2441c2bb /drivers/usb/musb/musb_host.c
parent3e5c6dc71146c2c3f21d60d3b4b25dc7755d5339 (diff)
musb_host: simplify check for active URB
The existance of the scheduling list shouldn't matter in determining whether there's currectly an URB executing on a hardware endpoint. What should actually matter is the 'in_qh' or 'out_qh' fields of the 'struct musb_hw_ep' -- those are set in musb_start_urb() and cleared in musb_giveback() when the endpoint's URB list drains. Hence we should be able to replace the big *switch* statements in musb_urb_dequeue() and musb_h_disable() with mere musb_ep_get_qh() calls... While at it, do some more changes: - add 'is_in' variable to musb_urb_dequeue(); - remove the unnecessary 'epnum' variable from musb_h_disable(); - fix the comment style in the vicinity. This is a minor shrink of source and object code. Signed-off-by: Sergei Shtylyov <sshtylyov@ru.mvista.com> Signed-off-by: David Brownell <dbrownell@users.sourceforge.net> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/musb/musb_host.c')
-rw-r--r--drivers/usb/musb/musb_host.c72
1 files changed, 14 insertions, 58 deletions
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 17d14f26bd6c..e0dacbb336d0 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2089,14 +2089,14 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2089{ 2089{
2090 struct musb *musb = hcd_to_musb(hcd); 2090 struct musb *musb = hcd_to_musb(hcd);
2091 struct musb_qh *qh; 2091 struct musb_qh *qh;
2092 struct list_head *sched;
2093 unsigned long flags; 2092 unsigned long flags;
2093 int is_in = usb_pipein(urb->pipe);
2094 int ret; 2094 int ret;
2095 2095
2096 DBG(4, "urb=%p, dev%d ep%d%s\n", urb, 2096 DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
2097 usb_pipedevice(urb->pipe), 2097 usb_pipedevice(urb->pipe),
2098 usb_pipeendpoint(urb->pipe), 2098 usb_pipeendpoint(urb->pipe),
2099 usb_pipein(urb->pipe) ? "in" : "out"); 2099 is_in ? "in" : "out");
2100 2100
2101 spin_lock_irqsave(&musb->lock, flags); 2101 spin_lock_irqsave(&musb->lock, flags);
2102 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 2102 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
@@ -2107,45 +2107,23 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2107 if (!qh) 2107 if (!qh)
2108 goto done; 2108 goto done;
2109 2109
2110 /* Any URB not actively programmed into endpoint hardware can be 2110 /*
2111 * Any URB not actively programmed into endpoint hardware can be
2111 * immediately given back; that's any URB not at the head of an 2112 * immediately given back; that's any URB not at the head of an
2112 * endpoint queue, unless someday we get real DMA queues. And even 2113 * endpoint queue, unless someday we get real DMA queues. And even
2113 * if it's at the head, it might not be known to the hardware... 2114 * if it's at the head, it might not be known to the hardware...
2114 * 2115 *
2115 * Otherwise abort current transfer, pending dma, etc.; urb->status 2116 * Otherwise abort current transfer, pending DMA, etc.; urb->status
2116 * has already been updated. This is a synchronous abort; it'd be 2117 * has already been updated. This is a synchronous abort; it'd be
2117 * OK to hold off until after some IRQ, though. 2118 * OK to hold off until after some IRQ, though.
2119 *
2120 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2118 */ 2121 */
2119 if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list) 2122 if (!qh->is_ready
2120 ret = -EINPROGRESS; 2123 || urb->urb_list.prev != &qh->hep->urb_list
2121 else { 2124 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2122 switch (qh->type) {
2123 case USB_ENDPOINT_XFER_CONTROL:
2124 sched = &musb->control;
2125 break;
2126 case USB_ENDPOINT_XFER_BULK:
2127 if (qh->mux == 1) {
2128 if (usb_pipein(urb->pipe))
2129 sched = &musb->in_bulk;
2130 else
2131 sched = &musb->out_bulk;
2132 break;
2133 }
2134 default:
2135 /* REVISIT when we get a schedule tree, periodic
2136 * transfers won't always be at the head of a
2137 * singleton queue...
2138 */
2139 sched = NULL;
2140 break;
2141 }
2142 }
2143
2144 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2145 if (ret < 0 || (sched && qh != first_qh(sched))) {
2146 int ready = qh->is_ready; 2125 int ready = qh->is_ready;
2147 2126
2148 ret = 0;
2149 qh->is_ready = 0; 2127 qh->is_ready = 0;
2150 __musb_giveback(musb, urb, 0); 2128 __musb_giveback(musb, urb, 0);
2151 qh->is_ready = ready; 2129 qh->is_ready = ready;
@@ -2169,13 +2147,11 @@ done:
2169static void 2147static void
2170musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) 2148musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2171{ 2149{
2172 u8 epnum = hep->desc.bEndpointAddress; 2150 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2173 unsigned long flags; 2151 unsigned long flags;
2174 struct musb *musb = hcd_to_musb(hcd); 2152 struct musb *musb = hcd_to_musb(hcd);
2175 u8 is_in = epnum & USB_DIR_IN;
2176 struct musb_qh *qh; 2153 struct musb_qh *qh;
2177 struct urb *urb; 2154 struct urb *urb;
2178 struct list_head *sched;
2179 2155
2180 spin_lock_irqsave(&musb->lock, flags); 2156 spin_lock_irqsave(&musb->lock, flags);
2181 2157
@@ -2183,31 +2159,11 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2183 if (qh == NULL) 2159 if (qh == NULL)
2184 goto exit; 2160 goto exit;
2185 2161
2186 switch (qh->type) { 2162 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2187 case USB_ENDPOINT_XFER_CONTROL:
2188 sched = &musb->control;
2189 break;
2190 case USB_ENDPOINT_XFER_BULK:
2191 if (qh->mux == 1) {
2192 if (is_in)
2193 sched = &musb->in_bulk;
2194 else
2195 sched = &musb->out_bulk;
2196 break;
2197 }
2198 default:
2199 /* REVISIT when we get a schedule tree, periodic transfers
2200 * won't always be at the head of a singleton queue...
2201 */
2202 sched = NULL;
2203 break;
2204 }
2205
2206 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2207 2163
2208 /* kick first urb off the hardware, if needed */ 2164 /* Kick the first URB off the hardware, if needed */
2209 qh->is_ready = 0; 2165 qh->is_ready = 0;
2210 if (!sched || qh == first_qh(sched)) { 2166 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2211 urb = next_urb(qh); 2167 urb = next_urb(qh);
2212 2168
2213 /* make software (then hardware) stop ASAP */ 2169 /* make software (then hardware) stop ASAP */