aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2011-07-22 17:34:34 -0400
committerSarah Sharp <sarah.a.sharp@linux.intel.com>2011-08-09 17:48:45 -0400
commitd13565c12828ce0cd2a3862bf6260164a0653352 (patch)
tree07d69f033253186d220fe0cb6da96dd20170d411
parent8a8ff2f9399b23b968901f585ccb5a70a537c5ae (diff)
xhci: Fix memory leak during failed enqueue.
When the isochronous transfer support was introduced, and the xHCI driver switched to using urb->hcpriv to store an "urb_priv" pointer, a couple of memory leaks were introduced into the URB enqueue function in its error handling paths. xhci_urb_enqueue allocates urb_priv, but it doesn't free it if changing the control endpoint's max packet size fails or the bulk endpoint is in the middle of allocating or deallocating streams. xhci_urb_enqueue also doesn't free urb_priv if any of the four endpoint types' enqueue functions fail. Instead, it expects those functions to free urb_priv if an error occurs. However, the bulk, control, and interrupt enqueue functions do not free urb_priv if the endpoint ring is NULL. It will, however, get freed if prepare_transfer() fails in those enqueue functions. Several of the error paths in the isochronous endpoint enqueue function also fail to free it. xhci_queue_isoc_tx_prepare() doesn't free urb_priv if prepare_ring() indicates there is not enough room for all the isochronous TDs in this URB. If individual isochronous TDs fail to be queued (perhaps due to an endpoint state change), urb_priv is also leaked. This argues that the freeing of urb_priv should be done in the function that allocated it, xhci_urb_enqueue. This patch looks rather ugly, but refactoring the code will have to wait because this patch needs to be backported to stable kernels. This patch should be backported to kernels as old as 2.6.36. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Cc: Andiry Xu <andiry.xu@amd.com> Cc: stable@kernel.org
-rw-r--r--drivers/usb/host/xhci-ring.c5
-rw-r--r--drivers/usb/host/xhci.c21
2 files changed, 18 insertions, 8 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 7113d16e2d3a..9d3f9dd1ad28 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2500,11 +2500,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
2500 2500
2501 if (td_index == 0) { 2501 if (td_index == 0) {
2502 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); 2502 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2503 if (unlikely(ret)) { 2503 if (unlikely(ret))
2504 xhci_urb_free_priv(xhci, urb_priv);
2505 urb->hcpriv = NULL;
2506 return ret; 2504 return ret;
2507 }
2508 } 2505 }
2509 2506
2510 td->urb = urb; 2507 td->urb = urb;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1c4432d8fc10..8e84acff1134 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1085,8 +1085,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1085 if (urb->dev->speed == USB_SPEED_FULL) { 1085 if (urb->dev->speed == USB_SPEED_FULL) {
1086 ret = xhci_check_maxpacket(xhci, slot_id, 1086 ret = xhci_check_maxpacket(xhci, slot_id,
1087 ep_index, urb); 1087 ep_index, urb);
1088 if (ret < 0) 1088 if (ret < 0) {
1089 xhci_urb_free_priv(xhci, urb_priv);
1090 urb->hcpriv = NULL;
1089 return ret; 1091 return ret;
1092 }
1090 } 1093 }
1091 1094
1092 /* We have a spinlock and interrupts disabled, so we must pass 1095 /* We have a spinlock and interrupts disabled, so we must pass
@@ -1097,6 +1100,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1097 goto dying; 1100 goto dying;
1098 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 1101 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1099 slot_id, ep_index); 1102 slot_id, ep_index);
1103 if (ret)
1104 goto free_priv;
1100 spin_unlock_irqrestore(&xhci->lock, flags); 1105 spin_unlock_irqrestore(&xhci->lock, flags);
1101 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { 1106 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1102 spin_lock_irqsave(&xhci->lock, flags); 1107 spin_lock_irqsave(&xhci->lock, flags);
@@ -1117,6 +1122,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1117 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 1122 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1118 slot_id, ep_index); 1123 slot_id, ep_index);
1119 } 1124 }
1125 if (ret)
1126 goto free_priv;
1120 spin_unlock_irqrestore(&xhci->lock, flags); 1127 spin_unlock_irqrestore(&xhci->lock, flags);
1121 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { 1128 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1122 spin_lock_irqsave(&xhci->lock, flags); 1129 spin_lock_irqsave(&xhci->lock, flags);
@@ -1124,6 +1131,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1124 goto dying; 1131 goto dying;
1125 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 1132 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1126 slot_id, ep_index); 1133 slot_id, ep_index);
1134 if (ret)
1135 goto free_priv;
1127 spin_unlock_irqrestore(&xhci->lock, flags); 1136 spin_unlock_irqrestore(&xhci->lock, flags);
1128 } else { 1137 } else {
1129 spin_lock_irqsave(&xhci->lock, flags); 1138 spin_lock_irqsave(&xhci->lock, flags);
@@ -1131,18 +1140,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1131 goto dying; 1140 goto dying;
1132 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, 1141 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1133 slot_id, ep_index); 1142 slot_id, ep_index);
1143 if (ret)
1144 goto free_priv;
1134 spin_unlock_irqrestore(&xhci->lock, flags); 1145 spin_unlock_irqrestore(&xhci->lock, flags);
1135 } 1146 }
1136exit: 1147exit:
1137 return ret; 1148 return ret;
1138dying: 1149dying:
1139 xhci_urb_free_priv(xhci, urb_priv);
1140 urb->hcpriv = NULL;
1141 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " 1150 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1142 "non-responsive xHCI host.\n", 1151 "non-responsive xHCI host.\n",
1143 urb->ep->desc.bEndpointAddress, urb); 1152 urb->ep->desc.bEndpointAddress, urb);
1153 ret = -ESHUTDOWN;
1154free_priv:
1155 xhci_urb_free_priv(xhci, urb_priv);
1156 urb->hcpriv = NULL;
1144 spin_unlock_irqrestore(&xhci->lock, flags); 1157 spin_unlock_irqrestore(&xhci->lock, flags);
1145 return -ESHUTDOWN; 1158 return ret;
1146} 1159}
1147 1160
1148/* Get the right ring for the given URB. 1161/* Get the right ring for the given URB.