aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/wusbcore
diff options
context:
space:
mode:
authorThomas Pugliese <thomas.pugliese@gmail.com>2013-10-01 11:14:56 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-10-03 18:46:26 -0400
commit33186c441684de348636f94412d2fc256e641113 (patch)
treedf8348447bd8abb82f60fc564ed78809d5f8fe10 /drivers/usb/wusbcore
parent02c123ee99c793f65af2dbda17d5fe87d448f808 (diff)
usb: wusbcore: avoid stack overflow in URB enqueue error path
This patch modifies wa_urb_enqueue to return an error and not call the urb completion routine if it failed to enqueue the urb because the HWA device is gone. This prevents a stack overflow due to infinite submit/complete recursion when unplugging the HWA while connected to a HID device. Signed-off-by: Thomas Pugliese <thomas.pugliese@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/usb/wusbcore')
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c51
1 files changed, 38 insertions, 13 deletions
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 9dabd8957d60..13faac0ea99f 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -1052,7 +1052,7 @@ error_seg_submit:
1052 * result never kicks in, the xfer will timeout from the USB code and 1052 * result never kicks in, the xfer will timeout from the USB code and
1053 * dequeue() will be called. 1053 * dequeue() will be called.
1054 */ 1054 */
1055static void wa_urb_enqueue_b(struct wa_xfer *xfer) 1055static int wa_urb_enqueue_b(struct wa_xfer *xfer)
1056{ 1056{
1057 int result; 1057 int result;
1058 unsigned long flags; 1058 unsigned long flags;
@@ -1063,18 +1063,22 @@ static void wa_urb_enqueue_b(struct wa_xfer *xfer)
1063 unsigned done; 1063 unsigned done;
1064 1064
1065 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp); 1065 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
1066 if (result < 0) 1066 if (result < 0) {
1067 pr_err("%s: error_rpipe_get\n", __func__);
1067 goto error_rpipe_get; 1068 goto error_rpipe_get;
1069 }
1068 result = -ENODEV; 1070 result = -ENODEV;
1069 /* FIXME: segmentation broken -- kills DWA */ 1071 /* FIXME: segmentation broken -- kills DWA */
1070 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */ 1072 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
1071 if (urb->dev == NULL) { 1073 if (urb->dev == NULL) {
1072 mutex_unlock(&wusbhc->mutex); 1074 mutex_unlock(&wusbhc->mutex);
1075 pr_err("%s: error usb dev gone\n", __func__);
1073 goto error_dev_gone; 1076 goto error_dev_gone;
1074 } 1077 }
1075 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev); 1078 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
1076 if (wusb_dev == NULL) { 1079 if (wusb_dev == NULL) {
1077 mutex_unlock(&wusbhc->mutex); 1080 mutex_unlock(&wusbhc->mutex);
1081 pr_err("%s: error wusb dev gone\n", __func__);
1078 goto error_dev_gone; 1082 goto error_dev_gone;
1079 } 1083 }
1080 mutex_unlock(&wusbhc->mutex); 1084 mutex_unlock(&wusbhc->mutex);
@@ -1082,21 +1086,28 @@ static void wa_urb_enqueue_b(struct wa_xfer *xfer)
1082 spin_lock_irqsave(&xfer->lock, flags); 1086 spin_lock_irqsave(&xfer->lock, flags);
1083 xfer->wusb_dev = wusb_dev; 1087 xfer->wusb_dev = wusb_dev;
1084 result = urb->status; 1088 result = urb->status;
1085 if (urb->status != -EINPROGRESS) 1089 if (urb->status != -EINPROGRESS) {
1090 pr_err("%s: error_dequeued\n", __func__);
1086 goto error_dequeued; 1091 goto error_dequeued;
1092 }
1087 1093
1088 result = __wa_xfer_setup(xfer, urb); 1094 result = __wa_xfer_setup(xfer, urb);
1089 if (result < 0) 1095 if (result < 0) {
1096 pr_err("%s: error_xfer_setup\n", __func__);
1090 goto error_xfer_setup; 1097 goto error_xfer_setup;
1098 }
1091 result = __wa_xfer_submit(xfer); 1099 result = __wa_xfer_submit(xfer);
1092 if (result < 0) 1100 if (result < 0) {
1101 pr_err("%s: error_xfer_submit\n", __func__);
1093 goto error_xfer_submit; 1102 goto error_xfer_submit;
1103 }
1094 spin_unlock_irqrestore(&xfer->lock, flags); 1104 spin_unlock_irqrestore(&xfer->lock, flags);
1095 return; 1105 return 0;
1096 1106
1097 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback() 1107 /*
1098 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean 1108 * this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1099 * upundo setup(). 1109 * does a wa_xfer_put() that will call wa_xfer_destroy() and undo
1110 * setup().
1100 */ 1111 */
1101error_xfer_setup: 1112error_xfer_setup:
1102error_dequeued: 1113error_dequeued:
@@ -1108,8 +1119,7 @@ error_dev_gone:
1108 rpipe_put(xfer->ep->hcpriv); 1119 rpipe_put(xfer->ep->hcpriv);
1109error_rpipe_get: 1120error_rpipe_get:
1110 xfer->result = result; 1121 xfer->result = result;
1111 wa_xfer_giveback(xfer); 1122 return result;
1112 return;
1113 1123
1114error_xfer_submit: 1124error_xfer_submit:
1115 done = __wa_xfer_is_done(xfer); 1125 done = __wa_xfer_is_done(xfer);
@@ -1117,6 +1127,8 @@ error_xfer_submit:
1117 spin_unlock_irqrestore(&xfer->lock, flags); 1127 spin_unlock_irqrestore(&xfer->lock, flags);
1118 if (done) 1128 if (done)
1119 wa_xfer_completion(xfer); 1129 wa_xfer_completion(xfer);
1130 /* return success since the completion routine will run. */
1131 return 0;
1120} 1132}
1121 1133
1122/* 1134/*
@@ -1150,7 +1162,8 @@ void wa_urb_enqueue_run(struct work_struct *ws)
1150 list_del_init(&xfer->list_node); 1162 list_del_init(&xfer->list_node);
1151 1163
1152 urb = xfer->urb; 1164 urb = xfer->urb;
1153 wa_urb_enqueue_b(xfer); 1165 if (wa_urb_enqueue_b(xfer) < 0)
1166 wa_xfer_giveback(xfer);
1154 usb_put_urb(urb); /* taken when queuing */ 1167 usb_put_urb(urb); /* taken when queuing */
1155 } 1168 }
1156} 1169}
@@ -1256,7 +1269,19 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1256 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); 1269 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1257 queue_work(wusbd, &wa->xfer_enqueue_work); 1270 queue_work(wusbd, &wa->xfer_enqueue_work);
1258 } else { 1271 } else {
1259 wa_urb_enqueue_b(xfer); 1272 result = wa_urb_enqueue_b(xfer);
1273 if (result < 0) {
1274 /*
1275 * URB submit/enqueue failed. Clean up, return an
1276 * error and do not run the callback. This avoids
1277 * an infinite submit/complete loop.
1278 */
1279 dev_err(dev, "%s: URB enqueue failed: %d\n",
1280 __func__, result);
1281 wa_put(xfer->wa);
1282 wa_xfer_put(xfer);
1283 return result;
1284 }
1260 } 1285 }
1261 return 0; 1286 return 0;
1262 1287