aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@suse.de>2011-08-22 16:16:56 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2011-08-22 16:17:04 -0400
commitea8c7fd9b0b479ef8f831d19b66c5cb246aec496 (patch)
tree656f13e55431b9d89ffced32099ac850f6d127ac
parent93ee7a9340d64f20295aacc3fb6a22b759323280 (diff)
parent48df4a6fd8c40c0bbcbca2044f5f2bc75dcf6db1 (diff)
Merge branch 'for-usb-linus' of git+ssh://master.kernel.org/pub/scm/linux/kernel/git/sarah/xhci into usb-linus
* 'for-usb-linus' of git+ssh://master.kernel.org/pub/scm/linux/kernel/git/sarah/xhci: xhci: Handle zero-length isochronous packets. USB: Avoid NULL pointer deref in usb_hcd_alloc_bandwidth. xhci: Remove TDs from TD lists when URBs are canceled. xhci: Fix failed enqueue in the middle of isoch TD. xhci: Fix memory leak during failed enqueue. xHCI: report USB2 port in resuming as suspend xHCI: fix port U3 status check condition
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/host/xhci-hub.c17
-rw-r--r--drivers/usb/host/xhci-ring.c90
-rw-r--r--drivers/usb/host/xhci.c28
4 files changed, 102 insertions, 35 deletions
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 8669ba3fe794..73cbbd85219f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1775,6 +1775,8 @@ int usb_hcd_alloc_bandwidth(struct usb_device *udev,
1775 struct usb_interface *iface = usb_ifnum_to_if(udev, 1775 struct usb_interface *iface = usb_ifnum_to_if(udev,
1776 cur_alt->desc.bInterfaceNumber); 1776 cur_alt->desc.bInterfaceNumber);
1777 1777
1778 if (!iface)
1779 return -EINVAL;
1778 if (iface->resetting_device) { 1780 if (iface->resetting_device) {
1779 /* 1781 /*
1780 * The USB core just reset the device, so the xHCI host 1782 * The USB core just reset the device, so the xHCI host
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0be788cc2fdb..1e96d1f1fe6b 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -463,11 +463,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
463 && (temp & PORT_POWER)) 463 && (temp & PORT_POWER))
464 status |= USB_PORT_STAT_SUSPEND; 464 status |= USB_PORT_STAT_SUSPEND;
465 } 465 }
466 if ((temp & PORT_PLS_MASK) == XDEV_RESUME) { 466 if ((temp & PORT_PLS_MASK) == XDEV_RESUME &&
467 !DEV_SUPERSPEED(temp)) {
467 if ((temp & PORT_RESET) || !(temp & PORT_PE)) 468 if ((temp & PORT_RESET) || !(temp & PORT_PE))
468 goto error; 469 goto error;
469 if (!DEV_SUPERSPEED(temp) && time_after_eq(jiffies, 470 if (time_after_eq(jiffies,
470 bus_state->resume_done[wIndex])) { 471 bus_state->resume_done[wIndex])) {
471 xhci_dbg(xhci, "Resume USB2 port %d\n", 472 xhci_dbg(xhci, "Resume USB2 port %d\n",
472 wIndex + 1); 473 wIndex + 1);
473 bus_state->resume_done[wIndex] = 0; 474 bus_state->resume_done[wIndex] = 0;
@@ -487,6 +488,14 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
487 xhci_ring_device(xhci, slot_id); 488 xhci_ring_device(xhci, slot_id);
488 bus_state->port_c_suspend |= 1 << wIndex; 489 bus_state->port_c_suspend |= 1 << wIndex;
489 bus_state->suspended_ports &= ~(1 << wIndex); 490 bus_state->suspended_ports &= ~(1 << wIndex);
491 } else {
492 /*
493 * The resume has been signaling for less than
494 * 20ms. Report the port status as SUSPEND,
495 * let the usbcore check port status again
496 * and clear resume signaling later.
497 */
498 status |= USB_PORT_STAT_SUSPEND;
490 } 499 }
491 } 500 }
492 if ((temp & PORT_PLS_MASK) == XDEV_U0 501 if ((temp & PORT_PLS_MASK) == XDEV_U0
@@ -664,7 +673,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
664 xhci_dbg(xhci, "PORTSC %04x\n", temp); 673 xhci_dbg(xhci, "PORTSC %04x\n", temp);
665 if (temp & PORT_RESET) 674 if (temp & PORT_RESET)
666 goto error; 675 goto error;
667 if (temp & XDEV_U3) { 676 if ((temp & PORT_PLS_MASK) == XDEV_U3) {
668 if ((temp & PORT_PE) == 0) 677 if ((temp & PORT_PE) == 0)
669 goto error; 678 goto error;
670 679
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 7113d16e2d3a..54139a2f06ce 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -514,8 +514,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
514 (unsigned long long) addr); 514 (unsigned long long) addr);
515} 515}
516 516
517/* flip_cycle means flip the cycle bit of all but the first and last TRB.
518 * (The last TRB actually points to the ring enqueue pointer, which is not part
519 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
520 */
517static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 521static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
518 struct xhci_td *cur_td) 522 struct xhci_td *cur_td, bool flip_cycle)
519{ 523{
520 struct xhci_segment *cur_seg; 524 struct xhci_segment *cur_seg;
521 union xhci_trb *cur_trb; 525 union xhci_trb *cur_trb;
@@ -528,6 +532,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
528 * leave the pointers intact. 532 * leave the pointers intact.
529 */ 533 */
530 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); 534 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
535 /* Flip the cycle bit (link TRBs can't be the first
536 * or last TRB).
537 */
538 if (flip_cycle)
539 cur_trb->generic.field[3] ^=
540 cpu_to_le32(TRB_CYCLE);
531 xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); 541 xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
532 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 542 xhci_dbg(xhci, "Address = %p (0x%llx dma); "
533 "in seg %p (0x%llx dma)\n", 543 "in seg %p (0x%llx dma)\n",
@@ -541,6 +551,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
541 cur_trb->generic.field[2] = 0; 551 cur_trb->generic.field[2] = 0;
542 /* Preserve only the cycle bit of this TRB */ 552 /* Preserve only the cycle bit of this TRB */
543 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); 553 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
554 /* Flip the cycle bit except on the first or last TRB */
555 if (flip_cycle && cur_trb != cur_td->first_trb &&
556 cur_trb != cur_td->last_trb)
557 cur_trb->generic.field[3] ^=
558 cpu_to_le32(TRB_CYCLE);
544 cur_trb->generic.field[3] |= cpu_to_le32( 559 cur_trb->generic.field[3] |= cpu_to_le32(
545 TRB_TYPE(TRB_TR_NOOP)); 560 TRB_TYPE(TRB_TR_NOOP));
546 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " 561 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
@@ -719,14 +734,14 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
719 cur_td->urb->stream_id, 734 cur_td->urb->stream_id,
720 cur_td, &deq_state); 735 cur_td, &deq_state);
721 else 736 else
722 td_to_noop(xhci, ep_ring, cur_td); 737 td_to_noop(xhci, ep_ring, cur_td, false);
723remove_finished_td: 738remove_finished_td:
724 /* 739 /*
725 * The event handler won't see a completion for this TD anymore, 740 * The event handler won't see a completion for this TD anymore,
726 * so remove it from the endpoint ring's TD list. Keep it in 741 * so remove it from the endpoint ring's TD list. Keep it in
727 * the cancelled TD list for URB completion later. 742 * the cancelled TD list for URB completion later.
728 */ 743 */
729 list_del(&cur_td->td_list); 744 list_del_init(&cur_td->td_list);
730 } 745 }
731 last_unlinked_td = cur_td; 746 last_unlinked_td = cur_td;
732 xhci_stop_watchdog_timer_in_irq(xhci, ep); 747 xhci_stop_watchdog_timer_in_irq(xhci, ep);
@@ -754,7 +769,7 @@ remove_finished_td:
754 do { 769 do {
755 cur_td = list_entry(ep->cancelled_td_list.next, 770 cur_td = list_entry(ep->cancelled_td_list.next,
756 struct xhci_td, cancelled_td_list); 771 struct xhci_td, cancelled_td_list);
757 list_del(&cur_td->cancelled_td_list); 772 list_del_init(&cur_td->cancelled_td_list);
758 773
759 /* Clean up the cancelled URB */ 774 /* Clean up the cancelled URB */
760 /* Doesn't matter what we pass for status, since the core will 775 /* Doesn't matter what we pass for status, since the core will
@@ -862,9 +877,9 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
862 cur_td = list_first_entry(&ring->td_list, 877 cur_td = list_first_entry(&ring->td_list,
863 struct xhci_td, 878 struct xhci_td,
864 td_list); 879 td_list);
865 list_del(&cur_td->td_list); 880 list_del_init(&cur_td->td_list);
866 if (!list_empty(&cur_td->cancelled_td_list)) 881 if (!list_empty(&cur_td->cancelled_td_list))
867 list_del(&cur_td->cancelled_td_list); 882 list_del_init(&cur_td->cancelled_td_list);
868 xhci_giveback_urb_in_irq(xhci, cur_td, 883 xhci_giveback_urb_in_irq(xhci, cur_td,
869 -ESHUTDOWN, "killed"); 884 -ESHUTDOWN, "killed");
870 } 885 }
@@ -873,7 +888,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
873 &temp_ep->cancelled_td_list, 888 &temp_ep->cancelled_td_list,
874 struct xhci_td, 889 struct xhci_td,
875 cancelled_td_list); 890 cancelled_td_list);
876 list_del(&cur_td->cancelled_td_list); 891 list_del_init(&cur_td->cancelled_td_list);
877 xhci_giveback_urb_in_irq(xhci, cur_td, 892 xhci_giveback_urb_in_irq(xhci, cur_td,
878 -ESHUTDOWN, "killed"); 893 -ESHUTDOWN, "killed");
879 } 894 }
@@ -1565,10 +1580,10 @@ td_cleanup:
1565 else 1580 else
1566 *status = 0; 1581 *status = 0;
1567 } 1582 }
1568 list_del(&td->td_list); 1583 list_del_init(&td->td_list);
1569 /* Was this TD slated to be cancelled but completed anyway? */ 1584 /* Was this TD slated to be cancelled but completed anyway? */
1570 if (!list_empty(&td->cancelled_td_list)) 1585 if (!list_empty(&td->cancelled_td_list))
1571 list_del(&td->cancelled_td_list); 1586 list_del_init(&td->cancelled_td_list);
1572 1587
1573 urb_priv->td_cnt++; 1588 urb_priv->td_cnt++;
1574 /* Giveback the urb when all the tds are completed */ 1589 /* Giveback the urb when all the tds are completed */
@@ -2500,11 +2515,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
2500 2515
2501 if (td_index == 0) { 2516 if (td_index == 0) {
2502 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); 2517 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2503 if (unlikely(ret)) { 2518 if (unlikely(ret))
2504 xhci_urb_free_priv(xhci, urb_priv);
2505 urb->hcpriv = NULL;
2506 return ret; 2519 return ret;
2507 }
2508 } 2520 }
2509 2521
2510 td->urb = urb; 2522 td->urb = urb;
@@ -2672,6 +2684,10 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
2672{ 2684{
2673 int packets_transferred; 2685 int packets_transferred;
2674 2686
2687 /* One TRB with a zero-length data packet. */
2688 if (running_total == 0 && trb_buff_len == 0)
2689 return 0;
2690
2675 /* All the TRB queueing functions don't count the current TRB in 2691 /* All the TRB queueing functions don't count the current TRB in
2676 * running_total. 2692 * running_total.
2677 */ 2693 */
@@ -3113,20 +3129,15 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
3113 struct urb *urb, int i) 3129 struct urb *urb, int i)
3114{ 3130{
3115 int num_trbs = 0; 3131 int num_trbs = 0;
3116 u64 addr, td_len, running_total; 3132 u64 addr, td_len;
3117 3133
3118 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); 3134 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3119 td_len = urb->iso_frame_desc[i].length; 3135 td_len = urb->iso_frame_desc[i].length;
3120 3136
3121 running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); 3137 num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3122 running_total &= TRB_MAX_BUFF_SIZE - 1; 3138 TRB_MAX_BUFF_SIZE);
3123 if (running_total != 0) 3139 if (num_trbs == 0)
3124 num_trbs++;
3125
3126 while (running_total < td_len) {
3127 num_trbs++; 3140 num_trbs++;
3128 running_total += TRB_MAX_BUFF_SIZE;
3129 }
3130 3141
3131 return num_trbs; 3142 return num_trbs;
3132} 3143}
@@ -3226,6 +3237,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3226 start_trb = &ep_ring->enqueue->generic; 3237 start_trb = &ep_ring->enqueue->generic;
3227 start_cycle = ep_ring->cycle_state; 3238 start_cycle = ep_ring->cycle_state;
3228 3239
3240 urb_priv = urb->hcpriv;
3229 /* Queue the first TRB, even if it's zero-length */ 3241 /* Queue the first TRB, even if it's zero-length */
3230 for (i = 0; i < num_tds; i++) { 3242 for (i = 0; i < num_tds; i++) {
3231 unsigned int total_packet_count; 3243 unsigned int total_packet_count;
@@ -3237,9 +3249,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3237 addr = start_addr + urb->iso_frame_desc[i].offset; 3249 addr = start_addr + urb->iso_frame_desc[i].offset;
3238 td_len = urb->iso_frame_desc[i].length; 3250 td_len = urb->iso_frame_desc[i].length;
3239 td_remain_len = td_len; 3251 td_remain_len = td_len;
3240 /* FIXME: Ignoring zero-length packets, can those happen? */
3241 total_packet_count = roundup(td_len, 3252 total_packet_count = roundup(td_len,
3242 le16_to_cpu(urb->ep->desc.wMaxPacketSize)); 3253 le16_to_cpu(urb->ep->desc.wMaxPacketSize));
3254 /* A zero-length transfer still involves at least one packet. */
3255 if (total_packet_count == 0)
3256 total_packet_count++;
3243 burst_count = xhci_get_burst_count(xhci, urb->dev, urb, 3257 burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
3244 total_packet_count); 3258 total_packet_count);
3245 residue = xhci_get_last_burst_packet_count(xhci, 3259 residue = xhci_get_last_burst_packet_count(xhci,
@@ -3249,12 +3263,13 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3249 3263
3250 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3264 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3251 urb->stream_id, trbs_per_td, urb, i, mem_flags); 3265 urb->stream_id, trbs_per_td, urb, i, mem_flags);
3252 if (ret < 0) 3266 if (ret < 0) {
3253 return ret; 3267 if (i == 0)
3268 return ret;
3269 goto cleanup;
3270 }
3254 3271
3255 urb_priv = urb->hcpriv;
3256 td = urb_priv->td[i]; 3272 td = urb_priv->td[i];
3257
3258 for (j = 0; j < trbs_per_td; j++) { 3273 for (j = 0; j < trbs_per_td; j++) {
3259 u32 remainder = 0; 3274 u32 remainder = 0;
3260 field = TRB_TBC(burst_count) | TRB_TLBPC(residue); 3275 field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
@@ -3344,6 +3359,27 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3344 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3359 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3345 start_cycle, start_trb); 3360 start_cycle, start_trb);
3346 return 0; 3361 return 0;
3362cleanup:
3363 /* Clean up a partially enqueued isoc transfer. */
3364
3365 for (i--; i >= 0; i--)
3366 list_del_init(&urb_priv->td[i]->td_list);
3367
3368 /* Use the first TD as a temporary variable to turn the TDs we've queued
3369 * into No-ops with a software-owned cycle bit. That way the hardware
3370 * won't accidentally start executing bogus TDs when we partially
3371 * overwrite them. td->first_trb and td->start_seg are already set.
3372 */
3373 urb_priv->td[0]->last_trb = ep_ring->enqueue;
3374 /* Every TRB except the first & last will have its cycle bit flipped. */
3375 td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3376
3377 /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3378 ep_ring->enqueue = urb_priv->td[0]->first_trb;
3379 ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3380 ep_ring->cycle_state = start_cycle;
3381 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3382 return ret;
3347} 3383}
3348 3384
3349/* 3385/*
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1c4432d8fc10..3a0f695138f4 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1085,8 +1085,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1085 if (urb->dev->speed == USB_SPEED_FULL) { 1085 if (urb->dev->speed == USB_SPEED_FULL) {
1086 ret = xhci_check_maxpacket(xhci, slot_id, 1086 ret = xhci_check_maxpacket(xhci, slot_id,
1087 ep_index, urb); 1087 ep_index, urb);
1088 if (ret < 0) 1088 if (ret < 0) {
1089 xhci_urb_free_priv(xhci, urb_priv);
1090 urb->hcpriv = NULL;
1089 return ret; 1091 return ret;
1092 }
1090 } 1093 }
1091 1094
1092 /* We have a spinlock and interrupts disabled, so we must pass 1095 /* We have a spinlock and interrupts disabled, so we must pass
@@ -1097,6 +1100,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1097 goto dying; 1100 goto dying;
1098 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 1101 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1099 slot_id, ep_index); 1102 slot_id, ep_index);
1103 if (ret)
1104 goto free_priv;
1100 spin_unlock_irqrestore(&xhci->lock, flags); 1105 spin_unlock_irqrestore(&xhci->lock, flags);
1101 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { 1106 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1102 spin_lock_irqsave(&xhci->lock, flags); 1107 spin_lock_irqsave(&xhci->lock, flags);
@@ -1117,6 +1122,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1117 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 1122 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1118 slot_id, ep_index); 1123 slot_id, ep_index);
1119 } 1124 }
1125 if (ret)
1126 goto free_priv;
1120 spin_unlock_irqrestore(&xhci->lock, flags); 1127 spin_unlock_irqrestore(&xhci->lock, flags);
1121 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { 1128 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1122 spin_lock_irqsave(&xhci->lock, flags); 1129 spin_lock_irqsave(&xhci->lock, flags);
@@ -1124,6 +1131,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1124 goto dying; 1131 goto dying;
1125 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 1132 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1126 slot_id, ep_index); 1133 slot_id, ep_index);
1134 if (ret)
1135 goto free_priv;
1127 spin_unlock_irqrestore(&xhci->lock, flags); 1136 spin_unlock_irqrestore(&xhci->lock, flags);
1128 } else { 1137 } else {
1129 spin_lock_irqsave(&xhci->lock, flags); 1138 spin_lock_irqsave(&xhci->lock, flags);
@@ -1131,18 +1140,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1131 goto dying; 1140 goto dying;
1132 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, 1141 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1133 slot_id, ep_index); 1142 slot_id, ep_index);
1143 if (ret)
1144 goto free_priv;
1134 spin_unlock_irqrestore(&xhci->lock, flags); 1145 spin_unlock_irqrestore(&xhci->lock, flags);
1135 } 1146 }
1136exit: 1147exit:
1137 return ret; 1148 return ret;
1138dying: 1149dying:
1139 xhci_urb_free_priv(xhci, urb_priv);
1140 urb->hcpriv = NULL;
1141 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " 1150 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1142 "non-responsive xHCI host.\n", 1151 "non-responsive xHCI host.\n",
1143 urb->ep->desc.bEndpointAddress, urb); 1152 urb->ep->desc.bEndpointAddress, urb);
1153 ret = -ESHUTDOWN;
1154free_priv:
1155 xhci_urb_free_priv(xhci, urb_priv);
1156 urb->hcpriv = NULL;
1144 spin_unlock_irqrestore(&xhci->lock, flags); 1157 spin_unlock_irqrestore(&xhci->lock, flags);
1145 return -ESHUTDOWN; 1158 return ret;
1146} 1159}
1147 1160
1148/* Get the right ring for the given URB. 1161/* Get the right ring for the given URB.
@@ -1239,6 +1252,13 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1239 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { 1252 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1240 xhci_dbg(xhci, "HW died, freeing TD.\n"); 1253 xhci_dbg(xhci, "HW died, freeing TD.\n");
1241 urb_priv = urb->hcpriv; 1254 urb_priv = urb->hcpriv;
1255 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1256 td = urb_priv->td[i];
1257 if (!list_empty(&td->td_list))
1258 list_del_init(&td->td_list);
1259 if (!list_empty(&td->cancelled_td_list))
1260 list_del_init(&td->cancelled_td_list);
1261 }
1242 1262
1243 usb_hcd_unlink_urb_from_ep(hcd, urb); 1263 usb_hcd_unlink_urb_from_ep(hcd, urb);
1244 spin_unlock_irqrestore(&xhci->lock, flags); 1264 spin_unlock_irqrestore(&xhci->lock, flags);