aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
authorAndiry Xu <andiry.xu@amd.com>2010-07-22 18:23:31 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2010-08-10 17:35:41 -0400
commit8e51adccd4c4b9ffcd509d7f2afce0a906139f75 (patch)
treeddd035c4775db7f504878574d3925f5bf4c87ccd /drivers/usb/host/xhci-ring.c
parentd18240db797ed749b511b8dc910c5dcf08be46d6 (diff)
USB: xHCI: Introduce urb_priv structure
Add urb_priv data structure to xHCI driver. This structure allows multiple xhci TDs to be linked to one urb, which is essential for isochronous transfer. For non-isochronous urb, only one TD is needed for one urb; for isochronous urb, the TD number for the urb is equal to urb->number_of_packets. The length field of urb_priv indicates the number of TDs in the urb. The td_cnt field indicates the number of TDs already processed by xHC. When td_cnt matches length, the urb can be given back to usbcore. When an urb is dequeued or cancelled, add all the unprocessed TDs to the endpoint's cancelled_td_list. When process a cancelled TD, increase td_cnt field. When td_cnt equals urb_priv->length, giveback the cancelled urb. Signed-off-by: Andiry Xu <andiry.xu@amd.com> Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c91
1 files changed, 63 insertions, 28 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 4c3501003b8e..fa8c93559133 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -578,16 +578,24 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
578 struct xhci_td *cur_td, int status, char *adjective) 578 struct xhci_td *cur_td, int status, char *adjective)
579{ 579{
580 struct usb_hcd *hcd = xhci_to_hcd(xhci); 580 struct usb_hcd *hcd = xhci_to_hcd(xhci);
581 struct urb *urb;
582 struct urb_priv *urb_priv;
581 583
582 cur_td->urb->hcpriv = NULL; 584 urb = cur_td->urb;
583 usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb); 585 urb_priv = urb->hcpriv;
584 xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb); 586 urb_priv->td_cnt++;
585 587
586 spin_unlock(&xhci->lock); 588 /* Only giveback urb when this is the last td in urb */
587 usb_hcd_giveback_urb(hcd, cur_td->urb, status); 589 if (urb_priv->td_cnt == urb_priv->length) {
588 kfree(cur_td); 590 usb_hcd_unlink_urb_from_ep(hcd, urb);
589 spin_lock(&xhci->lock); 591 xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
590 xhci_dbg(xhci, "%s URB given back\n", adjective); 592
593 spin_unlock(&xhci->lock);
594 usb_hcd_giveback_urb(hcd, urb, status);
595 xhci_urb_free_priv(xhci, urb_priv);
596 spin_lock(&xhci->lock);
597 xhci_dbg(xhci, "%s URB given back\n", adjective);
598 }
591} 599}
592 600
593/* 601/*
@@ -1272,6 +1280,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1272 struct urb *urb = NULL; 1280 struct urb *urb = NULL;
1273 struct xhci_ep_ctx *ep_ctx; 1281 struct xhci_ep_ctx *ep_ctx;
1274 int ret = 0; 1282 int ret = 0;
1283 struct urb_priv *urb_priv;
1275 u32 trb_comp_code; 1284 u32 trb_comp_code;
1276 1285
1277 slot_id = TRB_TO_SLOT_ID(event->flags); 1286 slot_id = TRB_TO_SLOT_ID(event->flags);
@@ -1325,6 +1334,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1325td_cleanup: 1334td_cleanup:
1326 /* Clean up the endpoint's TD list */ 1335 /* Clean up the endpoint's TD list */
1327 urb = td->urb; 1336 urb = td->urb;
1337 urb_priv = urb->hcpriv;
1328 1338
1329 /* Do one last check of the actual transfer length. 1339 /* Do one last check of the actual transfer length.
1330 * If the host controller said we transferred more data than 1340 * If the host controller said we transferred more data than
@@ -1349,7 +1359,10 @@ td_cleanup:
1349 if (!list_empty(&td->cancelled_td_list)) 1359 if (!list_empty(&td->cancelled_td_list))
1350 list_del(&td->cancelled_td_list); 1360 list_del(&td->cancelled_td_list);
1351 1361
1352 ret = 1; 1362 urb_priv->td_cnt++;
1363 /* Giveback the urb when all the tds are completed */
1364 if (urb_priv->td_cnt == urb_priv->length)
1365 ret = 1;
1353 } 1366 }
1354 1367
1355 return ret; 1368 return ret;
@@ -1588,6 +1601,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1588 union xhci_trb *event_trb; 1601 union xhci_trb *event_trb;
1589 struct urb *urb = NULL; 1602 struct urb *urb = NULL;
1590 int status = -EINPROGRESS; 1603 int status = -EINPROGRESS;
1604 struct urb_priv *urb_priv;
1591 struct xhci_ep_ctx *ep_ctx; 1605 struct xhci_ep_ctx *ep_ctx;
1592 u32 trb_comp_code; 1606 u32 trb_comp_code;
1593 int ret = 0; 1607 int ret = 0;
@@ -1770,6 +1784,7 @@ cleanup:
1770 1784
1771 if (ret) { 1785 if (ret) {
1772 urb = td->urb; 1786 urb = td->urb;
1787 urb_priv = urb->hcpriv;
1773 /* Leave the TD around for the reset endpoint function 1788 /* Leave the TD around for the reset endpoint function
1774 * to use(but only if it's not a control endpoint, 1789 * to use(but only if it's not a control endpoint,
1775 * since we already queued the Set TR dequeue pointer 1790 * since we already queued the Set TR dequeue pointer
@@ -1778,7 +1793,7 @@ cleanup:
1778 if (usb_endpoint_xfer_control(&urb->ep->desc) || 1793 if (usb_endpoint_xfer_control(&urb->ep->desc) ||
1779 (trb_comp_code != COMP_STALL && 1794 (trb_comp_code != COMP_STALL &&
1780 trb_comp_code != COMP_BABBLE)) 1795 trb_comp_code != COMP_BABBLE))
1781 kfree(td); 1796 xhci_urb_free_priv(xhci, urb_priv);
1782 1797
1783 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); 1798 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
1784 xhci_dbg(xhci, "Giveback URB %p, len = %d, " 1799 xhci_dbg(xhci, "Giveback URB %p, len = %d, "
@@ -1979,10 +1994,12 @@ static int prepare_transfer(struct xhci_hcd *xhci,
1979 unsigned int stream_id, 1994 unsigned int stream_id,
1980 unsigned int num_trbs, 1995 unsigned int num_trbs,
1981 struct urb *urb, 1996 struct urb *urb,
1982 struct xhci_td **td, 1997 unsigned int td_index,
1983 gfp_t mem_flags) 1998 gfp_t mem_flags)
1984{ 1999{
1985 int ret; 2000 int ret;
2001 struct urb_priv *urb_priv;
2002 struct xhci_td *td;
1986 struct xhci_ring *ep_ring; 2003 struct xhci_ring *ep_ring;
1987 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 2004 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1988 2005
@@ -1998,24 +2015,29 @@ static int prepare_transfer(struct xhci_hcd *xhci,
1998 num_trbs, mem_flags); 2015 num_trbs, mem_flags);
1999 if (ret) 2016 if (ret)
2000 return ret; 2017 return ret;
2001 *td = kzalloc(sizeof(struct xhci_td), mem_flags);
2002 if (!*td)
2003 return -ENOMEM;
2004 INIT_LIST_HEAD(&(*td)->td_list);
2005 INIT_LIST_HEAD(&(*td)->cancelled_td_list);
2006 2018
2007 ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb); 2019 urb_priv = urb->hcpriv;
2008 if (unlikely(ret)) { 2020 td = urb_priv->td[td_index];
2009 kfree(*td); 2021
2010 return ret; 2022 INIT_LIST_HEAD(&td->td_list);
2023 INIT_LIST_HEAD(&td->cancelled_td_list);
2024
2025 if (td_index == 0) {
2026 ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
2027 if (unlikely(ret)) {
2028 xhci_urb_free_priv(xhci, urb_priv);
2029 urb->hcpriv = NULL;
2030 return ret;
2031 }
2011 } 2032 }
2012 2033
2013 (*td)->urb = urb; 2034 td->urb = urb;
2014 urb->hcpriv = (void *) (*td);
2015 /* Add this TD to the tail of the endpoint ring's TD list */ 2035 /* Add this TD to the tail of the endpoint ring's TD list */
2016 list_add_tail(&(*td)->td_list, &ep_ring->td_list); 2036 list_add_tail(&td->td_list, &ep_ring->td_list);
2017 (*td)->start_seg = ep_ring->enq_seg; 2037 td->start_seg = ep_ring->enq_seg;
2018 (*td)->first_trb = ep_ring->enqueue; 2038 td->first_trb = ep_ring->enqueue;
2039
2040 urb_priv->td[td_index] = td;
2019 2041
2020 return 0; 2042 return 0;
2021} 2043}
@@ -2154,6 +2176,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2154{ 2176{
2155 struct xhci_ring *ep_ring; 2177 struct xhci_ring *ep_ring;
2156 unsigned int num_trbs; 2178 unsigned int num_trbs;
2179 struct urb_priv *urb_priv;
2157 struct xhci_td *td; 2180 struct xhci_td *td;
2158 struct scatterlist *sg; 2181 struct scatterlist *sg;
2159 int num_sgs; 2182 int num_sgs;
@@ -2174,9 +2197,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2174 2197
2175 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], 2198 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
2176 ep_index, urb->stream_id, 2199 ep_index, urb->stream_id,
2177 num_trbs, urb, &td, mem_flags); 2200 num_trbs, urb, 0, mem_flags);
2178 if (trb_buff_len < 0) 2201 if (trb_buff_len < 0)
2179 return trb_buff_len; 2202 return trb_buff_len;
2203
2204 urb_priv = urb->hcpriv;
2205 td = urb_priv->td[0];
2206
2180 /* 2207 /*
2181 * Don't give the first TRB to the hardware (by toggling the cycle bit) 2208 * Don't give the first TRB to the hardware (by toggling the cycle bit)
2182 * until we've finished creating all the other TRBs. The ring's cycle 2209 * until we've finished creating all the other TRBs. The ring's cycle
@@ -2297,6 +2324,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2297 struct urb *urb, int slot_id, unsigned int ep_index) 2324 struct urb *urb, int slot_id, unsigned int ep_index)
2298{ 2325{
2299 struct xhci_ring *ep_ring; 2326 struct xhci_ring *ep_ring;
2327 struct urb_priv *urb_priv;
2300 struct xhci_td *td; 2328 struct xhci_td *td;
2301 int num_trbs; 2329 int num_trbs;
2302 struct xhci_generic_trb *start_trb; 2330 struct xhci_generic_trb *start_trb;
@@ -2342,10 +2370,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2342 2370
2343 ret = prepare_transfer(xhci, xhci->devs[slot_id], 2371 ret = prepare_transfer(xhci, xhci->devs[slot_id],
2344 ep_index, urb->stream_id, 2372 ep_index, urb->stream_id,
2345 num_trbs, urb, &td, mem_flags); 2373 num_trbs, urb, 0, mem_flags);
2346 if (ret < 0) 2374 if (ret < 0)
2347 return ret; 2375 return ret;
2348 2376
2377 urb_priv = urb->hcpriv;
2378 td = urb_priv->td[0];
2379
2349 /* 2380 /*
2350 * Don't give the first TRB to the hardware (by toggling the cycle bit) 2381 * Don't give the first TRB to the hardware (by toggling the cycle bit)
2351 * until we've finished creating all the other TRBs. The ring's cycle 2382 * until we've finished creating all the other TRBs. The ring's cycle
@@ -2431,6 +2462,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2431 struct xhci_generic_trb *start_trb; 2462 struct xhci_generic_trb *start_trb;
2432 int start_cycle; 2463 int start_cycle;
2433 u32 field, length_field; 2464 u32 field, length_field;
2465 struct urb_priv *urb_priv;
2434 struct xhci_td *td; 2466 struct xhci_td *td;
2435 2467
2436 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 2468 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
@@ -2458,10 +2490,13 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2458 num_trbs++; 2490 num_trbs++;
2459 ret = prepare_transfer(xhci, xhci->devs[slot_id], 2491 ret = prepare_transfer(xhci, xhci->devs[slot_id],
2460 ep_index, urb->stream_id, 2492 ep_index, urb->stream_id,
2461 num_trbs, urb, &td, mem_flags); 2493 num_trbs, urb, 0, mem_flags);
2462 if (ret < 0) 2494 if (ret < 0)
2463 return ret; 2495 return ret;
2464 2496
2497 urb_priv = urb->hcpriv;
2498 td = urb_priv->td[0];
2499
2465 /* 2500 /*
2466 * Don't give the first TRB to the hardware (by toggling the cycle bit) 2501 * Don't give the first TRB to the hardware (by toggling the cycle bit)
2467 * until we've finished creating all the other TRBs. The ring's cycle 2502 * until we've finished creating all the other TRBs. The ring's cycle