aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/wusbcore
diff options
context:
space:
mode:
authorThomas Pugliese <thomas.pugliese@gmail.com>2013-08-15 13:21:30 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-08-15 20:35:31 -0400
commit6d33f7bb8c3863e54f8bdede0a2bf97a3585ac20 (patch)
treeb183a4ab10a38356822932cffced7d96e745ebff /drivers/usb/wusbcore
parent224563b6ce034b82f8511969d9496113da34fb2c (diff)
USB: WUSBCORE: clear RPIPE stall for control endpoints
When the HWA encounters a STALL on a control endpoint, it should clear the RPIPE_STALL feature on the RPIPE before processing the next transfer request. Otherwise, all transfer requests on that endpoint after the first STALL will fail because the RPIPE is still in the halted state. This also removes the unneccessary call to spin_lock_irqsave for a nested lock that was present in the first patch. Signed-off-by: Thomas Pugliese <thomas.pugliese@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/usb/wusbcore')
-rw-r--r--drivers/usb/wusbcore/wa-hc.h15
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c21
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c84
3 files changed, 108 insertions, 12 deletions
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
index d6bea3e0b54a..cf250c21e946 100644
--- a/drivers/usb/wusbcore/wa-hc.h
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -91,6 +91,7 @@
91struct wusbhc; 91struct wusbhc;
92struct wahc; 92struct wahc;
93extern void wa_urb_enqueue_run(struct work_struct *ws); 93extern void wa_urb_enqueue_run(struct work_struct *ws);
94extern void wa_process_errored_transfers_run(struct work_struct *ws);
94 95
95/** 96/**
96 * RPipe instance 97 * RPipe instance
@@ -190,8 +191,14 @@ struct wahc {
190 191
191 struct list_head xfer_list; 192 struct list_head xfer_list;
192 struct list_head xfer_delayed_list; 193 struct list_head xfer_delayed_list;
194 struct list_head xfer_errored_list;
195 /*
196 * lock for the above xfer lists. Can be taken while a xfer->lock is
197 * held but not in the reverse order.
198 */
193 spinlock_t xfer_list_lock; 199 spinlock_t xfer_list_lock;
194 struct work_struct xfer_work; 200 struct work_struct xfer_enqueue_work;
201 struct work_struct xfer_error_work;
195 atomic_t xfer_id_count; 202 atomic_t xfer_id_count;
196}; 203};
197 204
@@ -244,8 +251,10 @@ static inline void wa_init(struct wahc *wa)
244 edc_init(&wa->dti_edc); 251 edc_init(&wa->dti_edc);
245 INIT_LIST_HEAD(&wa->xfer_list); 252 INIT_LIST_HEAD(&wa->xfer_list);
246 INIT_LIST_HEAD(&wa->xfer_delayed_list); 253 INIT_LIST_HEAD(&wa->xfer_delayed_list);
254 INIT_LIST_HEAD(&wa->xfer_errored_list);
247 spin_lock_init(&wa->xfer_list_lock); 255 spin_lock_init(&wa->xfer_list_lock);
248 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run); 256 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
257 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
249 atomic_set(&wa->xfer_id_count, 1); 258 atomic_set(&wa->xfer_id_count, 1);
250} 259}
251 260
@@ -269,6 +278,8 @@ static inline void rpipe_put(struct wa_rpipe *rpipe)
269 278
270} 279}
271extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *); 280extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *);
281extern void rpipe_clear_feature_stalled(struct wahc *,
282 struct usb_host_endpoint *);
272extern int wa_rpipes_create(struct wahc *); 283extern int wa_rpipes_create(struct wahc *);
273extern void wa_rpipes_destroy(struct wahc *); 284extern void wa_rpipes_destroy(struct wahc *);
274static inline void rpipe_avail_dec(struct wa_rpipe *rpipe) 285static inline void rpipe_avail_dec(struct wa_rpipe *rpipe)
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index 9a595c1ed867..fd4f1ce6256a 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -527,3 +527,24 @@ void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep)
527 mutex_unlock(&wa->rpipe_mutex); 527 mutex_unlock(&wa->rpipe_mutex);
528} 528}
529EXPORT_SYMBOL_GPL(rpipe_ep_disable); 529EXPORT_SYMBOL_GPL(rpipe_ep_disable);
530
531/* Clear the stalled status of an RPIPE. */
532void rpipe_clear_feature_stalled(struct wahc *wa, struct usb_host_endpoint *ep)
533{
534 struct wa_rpipe *rpipe;
535
536 mutex_lock(&wa->rpipe_mutex);
537 rpipe = ep->hcpriv;
538 if (rpipe != NULL) {
539 u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
540
541 usb_control_msg(
542 wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
543 USB_REQ_CLEAR_FEATURE,
544 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
545 RPIPE_STALL, index, NULL, 0, 1000);
546 }
547 mutex_unlock(&wa->rpipe_mutex);
548}
549EXPORT_SYMBOL_GPL(rpipe_clear_feature_stalled);
550
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index f5c81afc6e96..d74fe1ae16ac 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -1100,7 +1100,7 @@ error_xfer_submit:
1100 */ 1100 */
1101void wa_urb_enqueue_run(struct work_struct *ws) 1101void wa_urb_enqueue_run(struct work_struct *ws)
1102{ 1102{
1103 struct wahc *wa = container_of(ws, struct wahc, xfer_work); 1103 struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
1104 struct wa_xfer *xfer, *next; 1104 struct wa_xfer *xfer, *next;
1105 struct urb *urb; 1105 struct urb *urb;
1106 LIST_HEAD(tmp_list); 1106 LIST_HEAD(tmp_list);
@@ -1126,6 +1126,49 @@ void wa_urb_enqueue_run(struct work_struct *ws)
1126EXPORT_SYMBOL_GPL(wa_urb_enqueue_run); 1126EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1127 1127
1128/* 1128/*
1129 * Process the errored transfers on the Wire Adapter outside of interrupt.
1130 */
1131void wa_process_errored_transfers_run(struct work_struct *ws)
1132{
1133 struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
1134 struct wa_xfer *xfer, *next;
1135 LIST_HEAD(tmp_list);
1136
1137 pr_info("%s: Run delayed STALL processing.\n", __func__);
1138
1139 /* Create a copy of the wa->xfer_errored_list while holding the lock */
1140 spin_lock_irq(&wa->xfer_list_lock);
1141 list_cut_position(&tmp_list, &wa->xfer_errored_list,
1142 wa->xfer_errored_list.prev);
1143 spin_unlock_irq(&wa->xfer_list_lock);
1144
1145 /*
1146 * run rpipe_clear_feature_stalled from temp list without list lock
1147 * held.
1148 */
1149 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1150 struct usb_host_endpoint *ep;
1151 unsigned long flags;
1152 struct wa_rpipe *rpipe;
1153
1154 spin_lock_irqsave(&xfer->lock, flags);
1155 ep = xfer->ep;
1156 rpipe = ep->hcpriv;
1157 spin_unlock_irqrestore(&xfer->lock, flags);
1158
1159 /* clear RPIPE feature stalled without holding a lock. */
1160 rpipe_clear_feature_stalled(wa, ep);
1161
1162 /* complete the xfer. This removes it from the tmp list. */
1163 wa_xfer_completion(xfer);
1164
1165 /* check for work. */
1166 wa_xfer_delayed_run(rpipe);
1167 }
1168}
1169EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
1170
1171/*
1129 * Submit a transfer to the Wire Adapter in a delayed way 1172 * Submit a transfer to the Wire Adapter in a delayed way
1130 * 1173 *
1131 * The process of enqueuing involves possible sleeps() [see 1174 * The process of enqueuing involves possible sleeps() [see
@@ -1180,7 +1223,7 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1180 spin_lock_irqsave(&wa->xfer_list_lock, my_flags); 1223 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1181 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list); 1224 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1182 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); 1225 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1183 queue_work(wusbd, &wa->xfer_work); 1226 queue_work(wusbd, &wa->xfer_enqueue_work);
1184 } else { 1227 } else {
1185 wa_urb_enqueue_b(xfer); 1228 wa_urb_enqueue_b(xfer);
1186 } 1229 }
@@ -1222,7 +1265,8 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1222 1265
1223 xfer = urb->hcpriv; 1266 xfer = urb->hcpriv;
1224 if (xfer == NULL) { 1267 if (xfer == NULL) {
1225 /* NOthing setup yet enqueue will see urb->status != 1268 /*
1269 * Nothing setup yet enqueue will see urb->status !=
1226 * -EINPROGRESS (by hcd layer) and bail out with 1270 * -EINPROGRESS (by hcd layer) and bail out with
1227 * error, no need to do completion 1271 * error, no need to do completion
1228 */ 1272 */
@@ -1360,7 +1404,7 @@ static int wa_xfer_status_to_errno(u8 status)
1360 * 1404 *
1361 * inbound transfers: need to schedule a DTI read 1405 * inbound transfers: need to schedule a DTI read
1362 * 1406 *
1363 * FIXME: this functio needs to be broken up in parts 1407 * FIXME: this function needs to be broken up in parts
1364 */ 1408 */
1365static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer) 1409static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1366{ 1410{
@@ -1482,17 +1526,37 @@ error_submit_buf_in:
1482 seg->result = result; 1526 seg->result = result;
1483 kfree(wa->buf_in_urb->sg); 1527 kfree(wa->buf_in_urb->sg);
1484error_sg_alloc: 1528error_sg_alloc:
1529 __wa_xfer_abort(xfer);
1485error_complete: 1530error_complete:
1486 seg->status = WA_SEG_ERROR; 1531 seg->status = WA_SEG_ERROR;
1487 xfer->segs_done++; 1532 xfer->segs_done++;
1488 rpipe_ready = rpipe_avail_inc(rpipe); 1533 rpipe_ready = rpipe_avail_inc(rpipe);
1489 __wa_xfer_abort(xfer);
1490 done = __wa_xfer_is_done(xfer); 1534 done = __wa_xfer_is_done(xfer);
1491 spin_unlock_irqrestore(&xfer->lock, flags); 1535 /*
1492 if (done) 1536 * queue work item to clear STALL for control endpoints.
1493 wa_xfer_completion(xfer); 1537 * Otherwise, let endpoint_reset take care of it.
1494 if (rpipe_ready) 1538 */
1495 wa_xfer_delayed_run(rpipe); 1539 if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
1540 usb_endpoint_xfer_control(&xfer->ep->desc) &&
1541 done) {
1542
1543 dev_info(dev, "Control EP stall. Queue delayed work.\n");
1544 spin_lock_irq(&wa->xfer_list_lock);
1545 /* remove xfer from xfer_list. */
1546 list_del(&xfer->list_node);
1547 /* add xfer to xfer_errored_list. */
1548 list_add_tail(&xfer->list_node, &wa->xfer_errored_list);
1549 spin_unlock_irq(&wa->xfer_list_lock);
1550 spin_unlock_irqrestore(&xfer->lock, flags);
1551 queue_work(wusbd, &wa->xfer_error_work);
1552 } else {
1553 spin_unlock_irqrestore(&xfer->lock, flags);
1554 if (done)
1555 wa_xfer_completion(xfer);
1556 if (rpipe_ready)
1557 wa_xfer_delayed_run(rpipe);
1558 }
1559
1496 return; 1560 return;
1497 1561
1498error_bad_seg: 1562error_bad_seg: