diff options
author | Andiry Xu <andiry.xu@amd.com> | 2010-07-22 18:23:31 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2010-08-10 17:35:41 -0400 |
commit | 8e51adccd4c4b9ffcd509d7f2afce0a906139f75 (patch) | |
tree | ddd035c4775db7f504878574d3925f5bf4c87ccd | |
parent | d18240db797ed749b511b8dc910c5dcf08be46d6 (diff) |
USB: xHCI: Introduce urb_priv structure
Add urb_priv data structure to xHCI driver. This structure allows multiple
xhci TDs to be linked to one urb, which is essential for isochronous
transfer. For non-isochronous urb, only one TD is needed for one urb;
for isochronous urb, the TD number for the urb is equal to
urb->number_of_packets.
The length field of urb_priv indicates the number of TDs in the urb.
The td_cnt field indicates the number of TDs already processed by xHC.
When td_cnt matches length, the urb can be given back to usbcore.
When an urb is dequeued or cancelled, add all the unprocessed TDs to the
endpoint's cancelled_td_list. When process a cancelled TD, increase
td_cnt field. When td_cnt equals urb_priv->length, giveback the
cancelled urb.
Signed-off-by: Andiry Xu <andiry.xu@amd.com>
Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 16 | ||||
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 91 | ||||
-rw-r--r-- | drivers/usb/host/xhci.c | 45 | ||||
-rw-r--r-- | drivers/usb/host/xhci.h | 7 |
4 files changed, 125 insertions, 34 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 64d036804715..44eeaa016f1b 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -1390,6 +1390,22 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, | |||
1390 | return command; | 1390 | return command; |
1391 | } | 1391 | } |
1392 | 1392 | ||
1393 | void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv) | ||
1394 | { | ||
1395 | int last; | ||
1396 | |||
1397 | if (!urb_priv) | ||
1398 | return; | ||
1399 | |||
1400 | last = urb_priv->length - 1; | ||
1401 | if (last >= 0) { | ||
1402 | int i; | ||
1403 | for (i = 0; i <= last; i++) | ||
1404 | kfree(urb_priv->td[i]); | ||
1405 | } | ||
1406 | kfree(urb_priv); | ||
1407 | } | ||
1408 | |||
1393 | void xhci_free_command(struct xhci_hcd *xhci, | 1409 | void xhci_free_command(struct xhci_hcd *xhci, |
1394 | struct xhci_command *command) | 1410 | struct xhci_command *command) |
1395 | { | 1411 | { |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 4c3501003b8e..fa8c93559133 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -578,16 +578,24 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, | |||
578 | struct xhci_td *cur_td, int status, char *adjective) | 578 | struct xhci_td *cur_td, int status, char *adjective) |
579 | { | 579 | { |
580 | struct usb_hcd *hcd = xhci_to_hcd(xhci); | 580 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
581 | struct urb *urb; | ||
582 | struct urb_priv *urb_priv; | ||
581 | 583 | ||
582 | cur_td->urb->hcpriv = NULL; | 584 | urb = cur_td->urb; |
583 | usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb); | 585 | urb_priv = urb->hcpriv; |
584 | xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb); | 586 | urb_priv->td_cnt++; |
585 | 587 | ||
586 | spin_unlock(&xhci->lock); | 588 | /* Only giveback urb when this is the last td in urb */ |
587 | usb_hcd_giveback_urb(hcd, cur_td->urb, status); | 589 | if (urb_priv->td_cnt == urb_priv->length) { |
588 | kfree(cur_td); | 590 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
589 | spin_lock(&xhci->lock); | 591 | xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb); |
590 | xhci_dbg(xhci, "%s URB given back\n", adjective); | 592 | |
593 | spin_unlock(&xhci->lock); | ||
594 | usb_hcd_giveback_urb(hcd, urb, status); | ||
595 | xhci_urb_free_priv(xhci, urb_priv); | ||
596 | spin_lock(&xhci->lock); | ||
597 | xhci_dbg(xhci, "%s URB given back\n", adjective); | ||
598 | } | ||
591 | } | 599 | } |
592 | 600 | ||
593 | /* | 601 | /* |
@@ -1272,6 +1280,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
1272 | struct urb *urb = NULL; | 1280 | struct urb *urb = NULL; |
1273 | struct xhci_ep_ctx *ep_ctx; | 1281 | struct xhci_ep_ctx *ep_ctx; |
1274 | int ret = 0; | 1282 | int ret = 0; |
1283 | struct urb_priv *urb_priv; | ||
1275 | u32 trb_comp_code; | 1284 | u32 trb_comp_code; |
1276 | 1285 | ||
1277 | slot_id = TRB_TO_SLOT_ID(event->flags); | 1286 | slot_id = TRB_TO_SLOT_ID(event->flags); |
@@ -1325,6 +1334,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
1325 | td_cleanup: | 1334 | td_cleanup: |
1326 | /* Clean up the endpoint's TD list */ | 1335 | /* Clean up the endpoint's TD list */ |
1327 | urb = td->urb; | 1336 | urb = td->urb; |
1337 | urb_priv = urb->hcpriv; | ||
1328 | 1338 | ||
1329 | /* Do one last check of the actual transfer length. | 1339 | /* Do one last check of the actual transfer length. |
1330 | * If the host controller said we transferred more data than | 1340 | * If the host controller said we transferred more data than |
@@ -1349,7 +1359,10 @@ td_cleanup: | |||
1349 | if (!list_empty(&td->cancelled_td_list)) | 1359 | if (!list_empty(&td->cancelled_td_list)) |
1350 | list_del(&td->cancelled_td_list); | 1360 | list_del(&td->cancelled_td_list); |
1351 | 1361 | ||
1352 | ret = 1; | 1362 | urb_priv->td_cnt++; |
1363 | /* Giveback the urb when all the tds are completed */ | ||
1364 | if (urb_priv->td_cnt == urb_priv->length) | ||
1365 | ret = 1; | ||
1353 | } | 1366 | } |
1354 | 1367 | ||
1355 | return ret; | 1368 | return ret; |
@@ -1588,6 +1601,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
1588 | union xhci_trb *event_trb; | 1601 | union xhci_trb *event_trb; |
1589 | struct urb *urb = NULL; | 1602 | struct urb *urb = NULL; |
1590 | int status = -EINPROGRESS; | 1603 | int status = -EINPROGRESS; |
1604 | struct urb_priv *urb_priv; | ||
1591 | struct xhci_ep_ctx *ep_ctx; | 1605 | struct xhci_ep_ctx *ep_ctx; |
1592 | u32 trb_comp_code; | 1606 | u32 trb_comp_code; |
1593 | int ret = 0; | 1607 | int ret = 0; |
@@ -1770,6 +1784,7 @@ cleanup: | |||
1770 | 1784 | ||
1771 | if (ret) { | 1785 | if (ret) { |
1772 | urb = td->urb; | 1786 | urb = td->urb; |
1787 | urb_priv = urb->hcpriv; | ||
1773 | /* Leave the TD around for the reset endpoint function | 1788 | /* Leave the TD around for the reset endpoint function |
1774 | * to use(but only if it's not a control endpoint, | 1789 | * to use(but only if it's not a control endpoint, |
1775 | * since we already queued the Set TR dequeue pointer | 1790 | * since we already queued the Set TR dequeue pointer |
@@ -1778,7 +1793,7 @@ cleanup: | |||
1778 | if (usb_endpoint_xfer_control(&urb->ep->desc) || | 1793 | if (usb_endpoint_xfer_control(&urb->ep->desc) || |
1779 | (trb_comp_code != COMP_STALL && | 1794 | (trb_comp_code != COMP_STALL && |
1780 | trb_comp_code != COMP_BABBLE)) | 1795 | trb_comp_code != COMP_BABBLE)) |
1781 | kfree(td); | 1796 | xhci_urb_free_priv(xhci, urb_priv); |
1782 | 1797 | ||
1783 | usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); | 1798 | usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); |
1784 | xhci_dbg(xhci, "Giveback URB %p, len = %d, " | 1799 | xhci_dbg(xhci, "Giveback URB %p, len = %d, " |
@@ -1979,10 +1994,12 @@ static int prepare_transfer(struct xhci_hcd *xhci, | |||
1979 | unsigned int stream_id, | 1994 | unsigned int stream_id, |
1980 | unsigned int num_trbs, | 1995 | unsigned int num_trbs, |
1981 | struct urb *urb, | 1996 | struct urb *urb, |
1982 | struct xhci_td **td, | 1997 | unsigned int td_index, |
1983 | gfp_t mem_flags) | 1998 | gfp_t mem_flags) |
1984 | { | 1999 | { |
1985 | int ret; | 2000 | int ret; |
2001 | struct urb_priv *urb_priv; | ||
2002 | struct xhci_td *td; | ||
1986 | struct xhci_ring *ep_ring; | 2003 | struct xhci_ring *ep_ring; |
1987 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); | 2004 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); |
1988 | 2005 | ||
@@ -1998,24 +2015,29 @@ static int prepare_transfer(struct xhci_hcd *xhci, | |||
1998 | num_trbs, mem_flags); | 2015 | num_trbs, mem_flags); |
1999 | if (ret) | 2016 | if (ret) |
2000 | return ret; | 2017 | return ret; |
2001 | *td = kzalloc(sizeof(struct xhci_td), mem_flags); | ||
2002 | if (!*td) | ||
2003 | return -ENOMEM; | ||
2004 | INIT_LIST_HEAD(&(*td)->td_list); | ||
2005 | INIT_LIST_HEAD(&(*td)->cancelled_td_list); | ||
2006 | 2018 | ||
2007 | ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb); | 2019 | urb_priv = urb->hcpriv; |
2008 | if (unlikely(ret)) { | 2020 | td = urb_priv->td[td_index]; |
2009 | kfree(*td); | 2021 | |
2010 | return ret; | 2022 | INIT_LIST_HEAD(&td->td_list); |
2023 | INIT_LIST_HEAD(&td->cancelled_td_list); | ||
2024 | |||
2025 | if (td_index == 0) { | ||
2026 | ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb); | ||
2027 | if (unlikely(ret)) { | ||
2028 | xhci_urb_free_priv(xhci, urb_priv); | ||
2029 | urb->hcpriv = NULL; | ||
2030 | return ret; | ||
2031 | } | ||
2011 | } | 2032 | } |
2012 | 2033 | ||
2013 | (*td)->urb = urb; | 2034 | td->urb = urb; |
2014 | urb->hcpriv = (void *) (*td); | ||
2015 | /* Add this TD to the tail of the endpoint ring's TD list */ | 2035 | /* Add this TD to the tail of the endpoint ring's TD list */ |
2016 | list_add_tail(&(*td)->td_list, &ep_ring->td_list); | 2036 | list_add_tail(&td->td_list, &ep_ring->td_list); |
2017 | (*td)->start_seg = ep_ring->enq_seg; | 2037 | td->start_seg = ep_ring->enq_seg; |
2018 | (*td)->first_trb = ep_ring->enqueue; | 2038 | td->first_trb = ep_ring->enqueue; |
2039 | |||
2040 | urb_priv->td[td_index] = td; | ||
2019 | 2041 | ||
2020 | return 0; | 2042 | return 0; |
2021 | } | 2043 | } |
@@ -2154,6 +2176,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2154 | { | 2176 | { |
2155 | struct xhci_ring *ep_ring; | 2177 | struct xhci_ring *ep_ring; |
2156 | unsigned int num_trbs; | 2178 | unsigned int num_trbs; |
2179 | struct urb_priv *urb_priv; | ||
2157 | struct xhci_td *td; | 2180 | struct xhci_td *td; |
2158 | struct scatterlist *sg; | 2181 | struct scatterlist *sg; |
2159 | int num_sgs; | 2182 | int num_sgs; |
@@ -2174,9 +2197,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2174 | 2197 | ||
2175 | trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], | 2198 | trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], |
2176 | ep_index, urb->stream_id, | 2199 | ep_index, urb->stream_id, |
2177 | num_trbs, urb, &td, mem_flags); | 2200 | num_trbs, urb, 0, mem_flags); |
2178 | if (trb_buff_len < 0) | 2201 | if (trb_buff_len < 0) |
2179 | return trb_buff_len; | 2202 | return trb_buff_len; |
2203 | |||
2204 | urb_priv = urb->hcpriv; | ||
2205 | td = urb_priv->td[0]; | ||
2206 | |||
2180 | /* | 2207 | /* |
2181 | * Don't give the first TRB to the hardware (by toggling the cycle bit) | 2208 | * Don't give the first TRB to the hardware (by toggling the cycle bit) |
2182 | * until we've finished creating all the other TRBs. The ring's cycle | 2209 | * until we've finished creating all the other TRBs. The ring's cycle |
@@ -2297,6 +2324,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2297 | struct urb *urb, int slot_id, unsigned int ep_index) | 2324 | struct urb *urb, int slot_id, unsigned int ep_index) |
2298 | { | 2325 | { |
2299 | struct xhci_ring *ep_ring; | 2326 | struct xhci_ring *ep_ring; |
2327 | struct urb_priv *urb_priv; | ||
2300 | struct xhci_td *td; | 2328 | struct xhci_td *td; |
2301 | int num_trbs; | 2329 | int num_trbs; |
2302 | struct xhci_generic_trb *start_trb; | 2330 | struct xhci_generic_trb *start_trb; |
@@ -2342,10 +2370,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2342 | 2370 | ||
2343 | ret = prepare_transfer(xhci, xhci->devs[slot_id], | 2371 | ret = prepare_transfer(xhci, xhci->devs[slot_id], |
2344 | ep_index, urb->stream_id, | 2372 | ep_index, urb->stream_id, |
2345 | num_trbs, urb, &td, mem_flags); | 2373 | num_trbs, urb, 0, mem_flags); |
2346 | if (ret < 0) | 2374 | if (ret < 0) |
2347 | return ret; | 2375 | return ret; |
2348 | 2376 | ||
2377 | urb_priv = urb->hcpriv; | ||
2378 | td = urb_priv->td[0]; | ||
2379 | |||
2349 | /* | 2380 | /* |
2350 | * Don't give the first TRB to the hardware (by toggling the cycle bit) | 2381 | * Don't give the first TRB to the hardware (by toggling the cycle bit) |
2351 | * until we've finished creating all the other TRBs. The ring's cycle | 2382 | * until we've finished creating all the other TRBs. The ring's cycle |
@@ -2431,6 +2462,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2431 | struct xhci_generic_trb *start_trb; | 2462 | struct xhci_generic_trb *start_trb; |
2432 | int start_cycle; | 2463 | int start_cycle; |
2433 | u32 field, length_field; | 2464 | u32 field, length_field; |
2465 | struct urb_priv *urb_priv; | ||
2434 | struct xhci_td *td; | 2466 | struct xhci_td *td; |
2435 | 2467 | ||
2436 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); | 2468 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); |
@@ -2458,10 +2490,13 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2458 | num_trbs++; | 2490 | num_trbs++; |
2459 | ret = prepare_transfer(xhci, xhci->devs[slot_id], | 2491 | ret = prepare_transfer(xhci, xhci->devs[slot_id], |
2460 | ep_index, urb->stream_id, | 2492 | ep_index, urb->stream_id, |
2461 | num_trbs, urb, &td, mem_flags); | 2493 | num_trbs, urb, 0, mem_flags); |
2462 | if (ret < 0) | 2494 | if (ret < 0) |
2463 | return ret; | 2495 | return ret; |
2464 | 2496 | ||
2497 | urb_priv = urb->hcpriv; | ||
2498 | td = urb_priv->td[0]; | ||
2499 | |||
2465 | /* | 2500 | /* |
2466 | * Don't give the first TRB to the hardware (by toggling the cycle bit) | 2501 | * Don't give the first TRB to the hardware (by toggling the cycle bit) |
2467 | * until we've finished creating all the other TRBs. The ring's cycle | 2502 | * until we've finished creating all the other TRBs. The ring's cycle |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 3106d22ae053..295a0a2063a6 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -804,7 +804,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
804 | unsigned long flags; | 804 | unsigned long flags; |
805 | int ret = 0; | 805 | int ret = 0; |
806 | unsigned int slot_id, ep_index; | 806 | unsigned int slot_id, ep_index; |
807 | 807 | struct urb_priv *urb_priv; | |
808 | int size, i; | ||
808 | 809 | ||
809 | if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) | 810 | if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) |
810 | return -EINVAL; | 811 | return -EINVAL; |
@@ -824,6 +825,30 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
824 | ret = -ESHUTDOWN; | 825 | ret = -ESHUTDOWN; |
825 | goto exit; | 826 | goto exit; |
826 | } | 827 | } |
828 | |||
829 | if (usb_endpoint_xfer_isoc(&urb->ep->desc)) | ||
830 | size = urb->number_of_packets; | ||
831 | else | ||
832 | size = 1; | ||
833 | |||
834 | urb_priv = kzalloc(sizeof(struct urb_priv) + | ||
835 | size * sizeof(struct xhci_td *), mem_flags); | ||
836 | if (!urb_priv) | ||
837 | return -ENOMEM; | ||
838 | |||
839 | for (i = 0; i < size; i++) { | ||
840 | urb_priv->td[i] = kzalloc(sizeof(struct xhci_td), mem_flags); | ||
841 | if (!urb_priv->td[i]) { | ||
842 | urb_priv->length = i; | ||
843 | xhci_urb_free_priv(xhci, urb_priv); | ||
844 | return -ENOMEM; | ||
845 | } | ||
846 | } | ||
847 | |||
848 | urb_priv->length = size; | ||
849 | urb_priv->td_cnt = 0; | ||
850 | urb->hcpriv = urb_priv; | ||
851 | |||
827 | if (usb_endpoint_xfer_control(&urb->ep->desc)) { | 852 | if (usb_endpoint_xfer_control(&urb->ep->desc)) { |
828 | /* Check to see if the max packet size for the default control | 853 | /* Check to see if the max packet size for the default control |
829 | * endpoint changed during FS device enumeration | 854 | * endpoint changed during FS device enumeration |
@@ -877,6 +902,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
877 | exit: | 902 | exit: |
878 | return ret; | 903 | return ret; |
879 | dying: | 904 | dying: |
905 | xhci_urb_free_priv(xhci, urb_priv); | ||
906 | urb->hcpriv = NULL; | ||
880 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " | 907 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " |
881 | "non-responsive xHCI host.\n", | 908 | "non-responsive xHCI host.\n", |
882 | urb->ep->desc.bEndpointAddress, urb); | 909 | urb->ep->desc.bEndpointAddress, urb); |
@@ -918,9 +945,10 @@ dying: | |||
918 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | 945 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) |
919 | { | 946 | { |
920 | unsigned long flags; | 947 | unsigned long flags; |
921 | int ret; | 948 | int ret, i; |
922 | u32 temp; | 949 | u32 temp; |
923 | struct xhci_hcd *xhci; | 950 | struct xhci_hcd *xhci; |
951 | struct urb_priv *urb_priv; | ||
924 | struct xhci_td *td; | 952 | struct xhci_td *td; |
925 | unsigned int ep_index; | 953 | unsigned int ep_index; |
926 | struct xhci_ring *ep_ring; | 954 | struct xhci_ring *ep_ring; |
@@ -935,12 +963,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
935 | temp = xhci_readl(xhci, &xhci->op_regs->status); | 963 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
936 | if (temp == 0xffffffff) { | 964 | if (temp == 0xffffffff) { |
937 | xhci_dbg(xhci, "HW died, freeing TD.\n"); | 965 | xhci_dbg(xhci, "HW died, freeing TD.\n"); |
938 | td = (struct xhci_td *) urb->hcpriv; | 966 | urb_priv = urb->hcpriv; |
939 | 967 | ||
940 | usb_hcd_unlink_urb_from_ep(hcd, urb); | 968 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
941 | spin_unlock_irqrestore(&xhci->lock, flags); | 969 | spin_unlock_irqrestore(&xhci->lock, flags); |
942 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN); | 970 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN); |
943 | kfree(td); | 971 | xhci_urb_free_priv(xhci, urb_priv); |
944 | return ret; | 972 | return ret; |
945 | } | 973 | } |
946 | if (xhci->xhc_state & XHCI_STATE_DYING) { | 974 | if (xhci->xhc_state & XHCI_STATE_DYING) { |
@@ -968,9 +996,14 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
968 | 996 | ||
969 | xhci_dbg(xhci, "Endpoint ring:\n"); | 997 | xhci_dbg(xhci, "Endpoint ring:\n"); |
970 | xhci_debug_ring(xhci, ep_ring); | 998 | xhci_debug_ring(xhci, ep_ring); |
971 | td = (struct xhci_td *) urb->hcpriv; | ||
972 | 999 | ||
973 | list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); | 1000 | urb_priv = urb->hcpriv; |
1001 | |||
1002 | for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { | ||
1003 | td = urb_priv->td[i]; | ||
1004 | list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); | ||
1005 | } | ||
1006 | |||
974 | /* Queue a stop endpoint command, but only if this is | 1007 | /* Queue a stop endpoint command, but only if this is |
975 | * the first cancellation to be handled. | 1008 | * the first cancellation to be handled. |
976 | */ | 1009 | */ |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index f4dfb26a65aa..ebf62082950b 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -1090,6 +1090,12 @@ struct xhci_scratchpad { | |||
1090 | dma_addr_t *sp_dma_buffers; | 1090 | dma_addr_t *sp_dma_buffers; |
1091 | }; | 1091 | }; |
1092 | 1092 | ||
1093 | struct urb_priv { | ||
1094 | int length; | ||
1095 | int td_cnt; | ||
1096 | struct xhci_td *td[0]; | ||
1097 | }; | ||
1098 | |||
1093 | /* | 1099 | /* |
1094 | * Each segment table entry is 4*32bits long. 1K seems like an ok size: | 1100 | * Each segment table entry is 4*32bits long. 1K seems like an ok size: |
1095 | * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, | 1101 | * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, |
@@ -1347,6 +1353,7 @@ struct xhci_ring *xhci_stream_id_to_ring( | |||
1347 | struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, | 1353 | struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, |
1348 | bool allocate_in_ctx, bool allocate_completion, | 1354 | bool allocate_in_ctx, bool allocate_completion, |
1349 | gfp_t mem_flags); | 1355 | gfp_t mem_flags); |
1356 | void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv); | ||
1350 | void xhci_free_command(struct xhci_hcd *xhci, | 1357 | void xhci_free_command(struct xhci_hcd *xhci, |
1351 | struct xhci_command *command); | 1358 | struct xhci_command *command); |
1352 | 1359 | ||