aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci.c')
-rw-r--r--drivers/usb/host/xhci.c21
1 files changed, 17 insertions, 4 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1c4432d8fc10..8e84acff1134 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1085,8 +1085,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1085 if (urb->dev->speed == USB_SPEED_FULL) { 1085 if (urb->dev->speed == USB_SPEED_FULL) {
1086 ret = xhci_check_maxpacket(xhci, slot_id, 1086 ret = xhci_check_maxpacket(xhci, slot_id,
1087 ep_index, urb); 1087 ep_index, urb);
1088 if (ret < 0) 1088 if (ret < 0) {
1089 xhci_urb_free_priv(xhci, urb_priv);
1090 urb->hcpriv = NULL;
1089 return ret; 1091 return ret;
1092 }
1090 } 1093 }
1091 1094
1092 /* We have a spinlock and interrupts disabled, so we must pass 1095 /* We have a spinlock and interrupts disabled, so we must pass
@@ -1097,6 +1100,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1097 goto dying; 1100 goto dying;
1098 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 1101 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1099 slot_id, ep_index); 1102 slot_id, ep_index);
1103 if (ret)
1104 goto free_priv;
1100 spin_unlock_irqrestore(&xhci->lock, flags); 1105 spin_unlock_irqrestore(&xhci->lock, flags);
1101 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { 1106 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1102 spin_lock_irqsave(&xhci->lock, flags); 1107 spin_lock_irqsave(&xhci->lock, flags);
@@ -1117,6 +1122,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1117 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 1122 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1118 slot_id, ep_index); 1123 slot_id, ep_index);
1119 } 1124 }
1125 if (ret)
1126 goto free_priv;
1120 spin_unlock_irqrestore(&xhci->lock, flags); 1127 spin_unlock_irqrestore(&xhci->lock, flags);
1121 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { 1128 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1122 spin_lock_irqsave(&xhci->lock, flags); 1129 spin_lock_irqsave(&xhci->lock, flags);
@@ -1124,6 +1131,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1124 goto dying; 1131 goto dying;
1125 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 1132 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1126 slot_id, ep_index); 1133 slot_id, ep_index);
1134 if (ret)
1135 goto free_priv;
1127 spin_unlock_irqrestore(&xhci->lock, flags); 1136 spin_unlock_irqrestore(&xhci->lock, flags);
1128 } else { 1137 } else {
1129 spin_lock_irqsave(&xhci->lock, flags); 1138 spin_lock_irqsave(&xhci->lock, flags);
@@ -1131,18 +1140,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1131 goto dying; 1140 goto dying;
1132 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, 1141 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1133 slot_id, ep_index); 1142 slot_id, ep_index);
1143 if (ret)
1144 goto free_priv;
1134 spin_unlock_irqrestore(&xhci->lock, flags); 1145 spin_unlock_irqrestore(&xhci->lock, flags);
1135 } 1146 }
1136exit: 1147exit:
1137 return ret; 1148 return ret;
1138dying: 1149dying:
1139 xhci_urb_free_priv(xhci, urb_priv);
1140 urb->hcpriv = NULL;
1141 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " 1150 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1142 "non-responsive xHCI host.\n", 1151 "non-responsive xHCI host.\n",
1143 urb->ep->desc.bEndpointAddress, urb); 1152 urb->ep->desc.bEndpointAddress, urb);
1153 ret = -ESHUTDOWN;
1154free_priv:
1155 xhci_urb_free_priv(xhci, urb_priv);
1156 urb->hcpriv = NULL;
1144 spin_unlock_irqrestore(&xhci->lock, flags); 1157 spin_unlock_irqrestore(&xhci->lock, flags);
1145 return -ESHUTDOWN; 1158 return ret;
1146} 1159}
1147 1160
1148/* Get the right ring for the given URB. 1161/* Get the right ring for the given URB.