aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c122
1 files changed, 87 insertions, 35 deletions
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 3860bdf3a6c6..d26083673652 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -367,15 +367,11 @@ static void __wa_xfer_abort_cb(struct urb *urb)
367 * 367 *
368 * The callback (see above) does nothing but freeing up the data by 368 * The callback (see above) does nothing but freeing up the data by
369 * putting the URB. Because the URB is allocated at the head of the 369 * putting the URB. Because the URB is allocated at the head of the
370 * struct, the whole space we allocated is kfreed. 370 * struct, the whole space we allocated is kfreed. *
371 *
372 * We'll get an 'aborted transaction' xfer result on DTI, that'll
373 * politely ignore because at this point the transaction has been
374 * marked as aborted already.
375 */ 371 */
376static void __wa_xfer_abort(struct wa_xfer *xfer) 372static int __wa_xfer_abort(struct wa_xfer *xfer)
377{ 373{
378 int result; 374 int result = -ENOMEM;
379 struct device *dev = &xfer->wa->usb_iface->dev; 375 struct device *dev = &xfer->wa->usb_iface->dev;
380 struct wa_xfer_abort_buffer *b; 376 struct wa_xfer_abort_buffer *b;
381 struct wa_rpipe *rpipe = xfer->ep->hcpriv; 377 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
@@ -396,7 +392,7 @@ static void __wa_xfer_abort(struct wa_xfer *xfer)
396 result = usb_submit_urb(&b->urb, GFP_ATOMIC); 392 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
397 if (result < 0) 393 if (result < 0)
398 goto error_submit; 394 goto error_submit;
399 return; /* callback frees! */ 395 return result; /* callback frees! */
400 396
401 397
402error_submit: 398error_submit:
@@ -405,7 +401,7 @@ error_submit:
405 xfer, result); 401 xfer, result);
406 kfree(b); 402 kfree(b);
407error_kmalloc: 403error_kmalloc:
408 return; 404 return result;
409 405
410} 406}
411 407
@@ -1295,7 +1291,7 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1295 struct wa_xfer *xfer; 1291 struct wa_xfer *xfer;
1296 struct wa_seg *seg; 1292 struct wa_seg *seg;
1297 struct wa_rpipe *rpipe; 1293 struct wa_rpipe *rpipe;
1298 unsigned cnt; 1294 unsigned cnt, done = 0, xfer_abort_pending;
1299 unsigned rpipe_ready = 0; 1295 unsigned rpipe_ready = 0;
1300 1296
1301 xfer = urb->hcpriv; 1297 xfer = urb->hcpriv;
@@ -1309,6 +1305,7 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1309 goto out; 1305 goto out;
1310 } 1306 }
1311 spin_lock_irqsave(&xfer->lock, flags); 1307 spin_lock_irqsave(&xfer->lock, flags);
1308 pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
1312 rpipe = xfer->ep->hcpriv; 1309 rpipe = xfer->ep->hcpriv;
1313 if (rpipe == NULL) { 1310 if (rpipe == NULL) {
1314 pr_debug("%s: xfer id 0x%08X has no RPIPE. %s", 1311 pr_debug("%s: xfer id 0x%08X has no RPIPE. %s",
@@ -1324,9 +1321,11 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1324 if (xfer->seg == NULL) /* still hasn't reached */ 1321 if (xfer->seg == NULL) /* still hasn't reached */
1325 goto out_unlock; /* setup(), enqueue_b() completes */ 1322 goto out_unlock; /* setup(), enqueue_b() completes */
1326 /* Ok, the xfer is in flight already, it's been setup and submitted.*/ 1323 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1327 __wa_xfer_abort(xfer); 1324 xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
1328 for (cnt = 0; cnt < xfer->segs; cnt++) { 1325 for (cnt = 0; cnt < xfer->segs; cnt++) {
1329 seg = xfer->seg[cnt]; 1326 seg = xfer->seg[cnt];
1327 pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
1328 __func__, wa_xfer_id(xfer), cnt, seg->status);
1330 switch (seg->status) { 1329 switch (seg->status) {
1331 case WA_SEG_NOTREADY: 1330 case WA_SEG_NOTREADY:
1332 case WA_SEG_READY: 1331 case WA_SEG_READY:
@@ -1335,42 +1334,50 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1335 WARN_ON(1); 1334 WARN_ON(1);
1336 break; 1335 break;
1337 case WA_SEG_DELAYED: 1336 case WA_SEG_DELAYED:
1337 /*
1338 * delete from rpipe delayed list. If no segments on
1339 * this xfer have been submitted, __wa_xfer_is_done will
1340 * trigger a giveback below. Otherwise, the submitted
1341 * segments will be completed in the DTI interrupt.
1342 */
1338 seg->status = WA_SEG_ABORTED; 1343 seg->status = WA_SEG_ABORTED;
1339 spin_lock_irqsave(&rpipe->seg_lock, flags2); 1344 spin_lock_irqsave(&rpipe->seg_lock, flags2);
1340 list_del(&seg->list_node); 1345 list_del(&seg->list_node);
1341 xfer->segs_done++; 1346 xfer->segs_done++;
1342 rpipe_ready = rpipe_avail_inc(rpipe);
1343 spin_unlock_irqrestore(&rpipe->seg_lock, flags2); 1347 spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1344 break; 1348 break;
1345 case WA_SEG_SUBMITTED:
1346 seg->status = WA_SEG_ABORTED;
1347 usb_unlink_urb(&seg->tr_urb);
1348 if (xfer->is_inbound == 0)
1349 usb_unlink_urb(seg->dto_urb);
1350 xfer->segs_done++;
1351 rpipe_ready = rpipe_avail_inc(rpipe);
1352 break;
1353 case WA_SEG_PENDING:
1354 seg->status = WA_SEG_ABORTED;
1355 xfer->segs_done++;
1356 rpipe_ready = rpipe_avail_inc(rpipe);
1357 break;
1358 case WA_SEG_DTI_PENDING:
1359 usb_unlink_urb(wa->dti_urb);
1360 seg->status = WA_SEG_ABORTED;
1361 xfer->segs_done++;
1362 rpipe_ready = rpipe_avail_inc(rpipe);
1363 break;
1364 case WA_SEG_DONE: 1349 case WA_SEG_DONE:
1365 case WA_SEG_ERROR: 1350 case WA_SEG_ERROR:
1366 case WA_SEG_ABORTED: 1351 case WA_SEG_ABORTED:
1367 break; 1352 break;
1353 /*
1354 * In the states below, the HWA device already knows
1355 * about the transfer. If an abort request was sent,
1356 * allow the HWA to process it and wait for the
1357 * results. Otherwise, the DTI state and seg completed
1358 * counts can get out of sync.
1359 */
1360 case WA_SEG_SUBMITTED:
1361 case WA_SEG_PENDING:
1362 case WA_SEG_DTI_PENDING:
1363 /*
1364 * Check if the abort was successfully sent. This could
1365 * be false if the HWA has been removed but we haven't
1366 * gotten the disconnect notification yet.
1367 */
1368 if (!xfer_abort_pending) {
1369 seg->status = WA_SEG_ABORTED;
1370 rpipe_ready = rpipe_avail_inc(rpipe);
1371 xfer->segs_done++;
1372 }
1373 break;
1368 } 1374 }
1369 } 1375 }
1370 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */ 1376 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
1371 __wa_xfer_is_done(xfer); 1377 done = __wa_xfer_is_done(xfer);
1372 spin_unlock_irqrestore(&xfer->lock, flags); 1378 spin_unlock_irqrestore(&xfer->lock, flags);
1373 wa_xfer_completion(xfer); 1379 if (done)
1380 wa_xfer_completion(xfer);
1374 if (rpipe_ready) 1381 if (rpipe_ready)
1375 wa_xfer_delayed_run(rpipe); 1382 wa_xfer_delayed_run(rpipe);
1376 return 0; 1383 return 0;
@@ -1441,9 +1448,51 @@ static int wa_xfer_status_to_errno(u8 status)
1441} 1448}
1442 1449
1443/* 1450/*
1451 * If a last segment flag and/or a transfer result error is encountered,
1452 * no other segment transfer results will be returned from the device.
1453 * Mark the remaining submitted or pending xfers as completed so that
1454 * the xfer will complete cleanly.
1455 */
1456static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
1457 struct wa_seg *incoming_seg)
1458{
1459 int index;
1460 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
1461
1462 for (index = incoming_seg->index + 1; index < xfer->segs_submitted;
1463 index++) {
1464 struct wa_seg *current_seg = xfer->seg[index];
1465
1466 BUG_ON(current_seg == NULL);
1467
1468 switch (current_seg->status) {
1469 case WA_SEG_SUBMITTED:
1470 case WA_SEG_PENDING:
1471 case WA_SEG_DTI_PENDING:
1472 rpipe_avail_inc(rpipe);
1473 /*
1474 * do not increment RPIPE avail for the WA_SEG_DELAYED case
1475 * since it has not been submitted to the RPIPE.
1476 */
1477 case WA_SEG_DELAYED:
1478 xfer->segs_done++;
1479 current_seg->status = incoming_seg->status;
1480 break;
1481 case WA_SEG_ABORTED:
1482 break;
1483 default:
1484 WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
1485 __func__, wa_xfer_id(xfer), index,
1486 current_seg->status);
1487 break;
1488 }
1489 }
1490}
1491
1492/*
1444 * Process a xfer result completion message 1493 * Process a xfer result completion message
1445 * 1494 *
1446 * inbound transfers: need to schedule a DTI read 1495 * inbound transfers: need to schedule a buf_in_urb read
1447 * 1496 *
1448 * FIXME: this function needs to be broken up in parts 1497 * FIXME: this function needs to be broken up in parts
1449 */ 1498 */
@@ -1484,6 +1533,8 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
1484 seg->result = wa_xfer_status_to_errno(usb_status); 1533 seg->result = wa_xfer_status_to_errno(usb_status);
1485 dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n", 1534 dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
1486 xfer, xfer->id, seg->index, usb_status); 1535 xfer, xfer->id, seg->index, usb_status);
1536 seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
1537 WA_SEG_ABORTED : WA_SEG_ERROR;
1487 goto error_complete; 1538 goto error_complete;
1488 } 1539 }
1489 /* FIXME: we ignore warnings, tally them for stats */ 1540 /* FIXME: we ignore warnings, tally them for stats */
@@ -1569,10 +1620,11 @@ error_submit_buf_in:
1569 wa->buf_in_urb->sg = NULL; 1620 wa->buf_in_urb->sg = NULL;
1570error_sg_alloc: 1621error_sg_alloc:
1571 __wa_xfer_abort(xfer); 1622 __wa_xfer_abort(xfer);
1572error_complete:
1573 seg->status = WA_SEG_ERROR; 1623 seg->status = WA_SEG_ERROR;
1624error_complete:
1574 xfer->segs_done++; 1625 xfer->segs_done++;
1575 rpipe_ready = rpipe_avail_inc(rpipe); 1626 rpipe_ready = rpipe_avail_inc(rpipe);
1627 wa_complete_remaining_xfer_segs(xfer, seg);
1576 done = __wa_xfer_is_done(xfer); 1628 done = __wa_xfer_is_done(xfer);
1577 /* 1629 /*
1578 * queue work item to clear STALL for control endpoints. 1630 * queue work item to clear STALL for control endpoints.