aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c1332
1 files changed, 984 insertions, 348 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index bfc99a939455..bc3f4f427065 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -301,28 +301,6 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
301 return 1; 301 return 1;
302} 302}
303 303
304void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
305{
306 u64 temp;
307 dma_addr_t deq;
308
309 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
310 xhci->event_ring->dequeue);
311 if (deq == 0 && !in_interrupt())
312 xhci_warn(xhci, "WARN something wrong with SW event ring "
313 "dequeue ptr.\n");
314 /* Update HC event ring dequeue pointer */
315 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
316 temp &= ERST_PTR_MASK;
317 /* Don't clear the EHB bit (which is RW1C) because
318 * there might be more events to service.
319 */
320 temp &= ~ERST_EHB;
321 xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n");
322 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
323 &xhci->ir_set->erst_dequeue);
324}
325
326/* Ring the host controller doorbell after placing a command on the ring */ 304/* Ring the host controller doorbell after placing a command on the ring */
327void xhci_ring_cmd_db(struct xhci_hcd *xhci) 305void xhci_ring_cmd_db(struct xhci_hcd *xhci)
328{ 306{
@@ -359,11 +337,6 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
359 field = xhci_readl(xhci, db_addr) & DB_MASK; 337 field = xhci_readl(xhci, db_addr) & DB_MASK;
360 field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id); 338 field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
361 xhci_writel(xhci, field, db_addr); 339 xhci_writel(xhci, field, db_addr);
362 /* Flush PCI posted writes - FIXME Matthew Wilcox says this
363 * isn't time-critical and we shouldn't make the CPU wait for
364 * the flush.
365 */
366 xhci_readl(xhci, db_addr);
367 } 340 }
368} 341}
369 342
@@ -419,6 +392,50 @@ static struct xhci_segment *find_trb_seg(
419 return cur_seg; 392 return cur_seg;
420} 393}
421 394
395
396static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
397 unsigned int slot_id, unsigned int ep_index,
398 unsigned int stream_id)
399{
400 struct xhci_virt_ep *ep;
401
402 ep = &xhci->devs[slot_id]->eps[ep_index];
403 /* Common case: no streams */
404 if (!(ep->ep_state & EP_HAS_STREAMS))
405 return ep->ring;
406
407 if (stream_id == 0) {
408 xhci_warn(xhci,
409 "WARN: Slot ID %u, ep index %u has streams, "
410 "but URB has no stream ID.\n",
411 slot_id, ep_index);
412 return NULL;
413 }
414
415 if (stream_id < ep->stream_info->num_streams)
416 return ep->stream_info->stream_rings[stream_id];
417
418 xhci_warn(xhci,
419 "WARN: Slot ID %u, ep index %u has "
420 "stream IDs 1 to %u allocated, "
421 "but stream ID %u is requested.\n",
422 slot_id, ep_index,
423 ep->stream_info->num_streams - 1,
424 stream_id);
425 return NULL;
426}
427
428/* Get the right ring for the given URB.
429 * If the endpoint supports streams, boundary check the URB's stream ID.
430 * If the endpoint doesn't support streams, return the singular endpoint ring.
431 */
432static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
433 struct urb *urb)
434{
435 return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
436 xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
437}
438
422/* 439/*
423 * Move the xHC's endpoint ring dequeue pointer past cur_td. 440 * Move the xHC's endpoint ring dequeue pointer past cur_td.
424 * Record the new state of the xHC's endpoint ring dequeue segment, 441 * Record the new state of the xHC's endpoint ring dequeue segment,
@@ -578,16 +595,24 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
578 struct xhci_td *cur_td, int status, char *adjective) 595 struct xhci_td *cur_td, int status, char *adjective)
579{ 596{
580 struct usb_hcd *hcd = xhci_to_hcd(xhci); 597 struct usb_hcd *hcd = xhci_to_hcd(xhci);
598 struct urb *urb;
599 struct urb_priv *urb_priv;
581 600
582 cur_td->urb->hcpriv = NULL; 601 urb = cur_td->urb;
583 usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb); 602 urb_priv = urb->hcpriv;
584 xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb); 603 urb_priv->td_cnt++;
585 604
586 spin_unlock(&xhci->lock); 605 /* Only giveback urb when this is the last td in urb */
587 usb_hcd_giveback_urb(hcd, cur_td->urb, status); 606 if (urb_priv->td_cnt == urb_priv->length) {
588 kfree(cur_td); 607 usb_hcd_unlink_urb_from_ep(hcd, urb);
589 spin_lock(&xhci->lock); 608 xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
590 xhci_dbg(xhci, "%s URB given back\n", adjective); 609
610 spin_unlock(&xhci->lock);
611 usb_hcd_giveback_urb(hcd, urb, status);
612 xhci_urb_free_priv(xhci, urb_priv);
613 spin_lock(&xhci->lock);
614 xhci_dbg(xhci, "%s URB given back\n", adjective);
615 }
591} 616}
592 617
593/* 618/*
@@ -1132,7 +1157,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
1132 1157
1133 /* Update event ring dequeue pointer before dropping the lock */ 1158 /* Update event ring dequeue pointer before dropping the lock */
1134 inc_deq(xhci, xhci->event_ring, true); 1159 inc_deq(xhci, xhci->event_ring, true);
1135 xhci_set_hc_event_deq(xhci);
1136 1160
1137 spin_unlock(&xhci->lock); 1161 spin_unlock(&xhci->lock);
1138 /* Pass this up to the core */ 1162 /* Pass this up to the core */
@@ -1258,6 +1282,421 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1258} 1282}
1259 1283
1260/* 1284/*
1285 * Finish the td processing, remove the td from td list;
1286 * Return 1 if the urb can be given back.
1287 */
1288static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1289 union xhci_trb *event_trb, struct xhci_transfer_event *event,
1290 struct xhci_virt_ep *ep, int *status, bool skip)
1291{
1292 struct xhci_virt_device *xdev;
1293 struct xhci_ring *ep_ring;
1294 unsigned int slot_id;
1295 int ep_index;
1296 struct urb *urb = NULL;
1297 struct xhci_ep_ctx *ep_ctx;
1298 int ret = 0;
1299 struct urb_priv *urb_priv;
1300 u32 trb_comp_code;
1301
1302 slot_id = TRB_TO_SLOT_ID(event->flags);
1303 xdev = xhci->devs[slot_id];
1304 ep_index = TRB_TO_EP_ID(event->flags) - 1;
1305 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1306 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1307 trb_comp_code = GET_COMP_CODE(event->transfer_len);
1308
1309 if (skip)
1310 goto td_cleanup;
1311
1312 if (trb_comp_code == COMP_STOP_INVAL ||
1313 trb_comp_code == COMP_STOP) {
1314 /* The Endpoint Stop Command completion will take care of any
1315 * stopped TDs. A stopped TD may be restarted, so don't update
1316 * the ring dequeue pointer or take this TD off any lists yet.
1317 */
1318 ep->stopped_td = td;
1319 ep->stopped_trb = event_trb;
1320 return 0;
1321 } else {
1322 if (trb_comp_code == COMP_STALL) {
1323 /* The transfer is completed from the driver's
1324 * perspective, but we need to issue a set dequeue
1325 * command for this stalled endpoint to move the dequeue
1326 * pointer past the TD. We can't do that here because
1327 * the halt condition must be cleared first. Let the
1328 * USB class driver clear the stall later.
1329 */
1330 ep->stopped_td = td;
1331 ep->stopped_trb = event_trb;
1332 ep->stopped_stream = ep_ring->stream_id;
1333 } else if (xhci_requires_manual_halt_cleanup(xhci,
1334 ep_ctx, trb_comp_code)) {
1335 /* Other types of errors halt the endpoint, but the
1336 * class driver doesn't call usb_reset_endpoint() unless
1337 * the error is -EPIPE. Clear the halted status in the
1338 * xHCI hardware manually.
1339 */
1340 xhci_cleanup_halted_endpoint(xhci,
1341 slot_id, ep_index, ep_ring->stream_id,
1342 td, event_trb);
1343 } else {
1344 /* Update ring dequeue pointer */
1345 while (ep_ring->dequeue != td->last_trb)
1346 inc_deq(xhci, ep_ring, false);
1347 inc_deq(xhci, ep_ring, false);
1348 }
1349
1350td_cleanup:
1351 /* Clean up the endpoint's TD list */
1352 urb = td->urb;
1353 urb_priv = urb->hcpriv;
1354
1355 /* Do one last check of the actual transfer length.
1356 * If the host controller said we transferred more data than
1357 * the buffer length, urb->actual_length will be a very big
1358 * number (since it's unsigned). Play it safe and say we didn't
1359 * transfer anything.
1360 */
1361 if (urb->actual_length > urb->transfer_buffer_length) {
1362 xhci_warn(xhci, "URB transfer length is wrong, "
1363 "xHC issue? req. len = %u, "
1364 "act. len = %u\n",
1365 urb->transfer_buffer_length,
1366 urb->actual_length);
1367 urb->actual_length = 0;
1368 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1369 *status = -EREMOTEIO;
1370 else
1371 *status = 0;
1372 }
1373 list_del(&td->td_list);
1374 /* Was this TD slated to be cancelled but completed anyway? */
1375 if (!list_empty(&td->cancelled_td_list))
1376 list_del(&td->cancelled_td_list);
1377
1378 urb_priv->td_cnt++;
1379 /* Giveback the urb when all the tds are completed */
1380 if (urb_priv->td_cnt == urb_priv->length)
1381 ret = 1;
1382 }
1383
1384 return ret;
1385}
1386
1387/*
1388 * Process control tds, update urb status and actual_length.
1389 */
1390static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1391 union xhci_trb *event_trb, struct xhci_transfer_event *event,
1392 struct xhci_virt_ep *ep, int *status)
1393{
1394 struct xhci_virt_device *xdev;
1395 struct xhci_ring *ep_ring;
1396 unsigned int slot_id;
1397 int ep_index;
1398 struct xhci_ep_ctx *ep_ctx;
1399 u32 trb_comp_code;
1400
1401 slot_id = TRB_TO_SLOT_ID(event->flags);
1402 xdev = xhci->devs[slot_id];
1403 ep_index = TRB_TO_EP_ID(event->flags) - 1;
1404 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1405 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1406 trb_comp_code = GET_COMP_CODE(event->transfer_len);
1407
1408 xhci_debug_trb(xhci, xhci->event_ring->dequeue);
1409 switch (trb_comp_code) {
1410 case COMP_SUCCESS:
1411 if (event_trb == ep_ring->dequeue) {
1412 xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
1413 "without IOC set??\n");
1414 *status = -ESHUTDOWN;
1415 } else if (event_trb != td->last_trb) {
1416 xhci_warn(xhci, "WARN: Success on ctrl data TRB "
1417 "without IOC set??\n");
1418 *status = -ESHUTDOWN;
1419 } else {
1420 xhci_dbg(xhci, "Successful control transfer!\n");
1421 *status = 0;
1422 }
1423 break;
1424 case COMP_SHORT_TX:
1425 xhci_warn(xhci, "WARN: short transfer on control ep\n");
1426 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1427 *status = -EREMOTEIO;
1428 else
1429 *status = 0;
1430 break;
1431 default:
1432 if (!xhci_requires_manual_halt_cleanup(xhci,
1433 ep_ctx, trb_comp_code))
1434 break;
1435 xhci_dbg(xhci, "TRB error code %u, "
1436 "halted endpoint index = %u\n",
1437 trb_comp_code, ep_index);
1438 /* else fall through */
1439 case COMP_STALL:
1440 /* Did we transfer part of the data (middle) phase? */
1441 if (event_trb != ep_ring->dequeue &&
1442 event_trb != td->last_trb)
1443 td->urb->actual_length =
1444 td->urb->transfer_buffer_length
1445 - TRB_LEN(event->transfer_len);
1446 else
1447 td->urb->actual_length = 0;
1448
1449 xhci_cleanup_halted_endpoint(xhci,
1450 slot_id, ep_index, 0, td, event_trb);
1451 return finish_td(xhci, td, event_trb, event, ep, status, true);
1452 }
1453 /*
1454 * Did we transfer any data, despite the errors that might have
1455 * happened? I.e. did we get past the setup stage?
1456 */
1457 if (event_trb != ep_ring->dequeue) {
1458 /* The event was for the status stage */
1459 if (event_trb == td->last_trb) {
1460 if (td->urb->actual_length != 0) {
1461 /* Don't overwrite a previously set error code
1462 */
1463 if ((*status == -EINPROGRESS || *status == 0) &&
1464 (td->urb->transfer_flags
1465 & URB_SHORT_NOT_OK))
1466 /* Did we already see a short data
1467 * stage? */
1468 *status = -EREMOTEIO;
1469 } else {
1470 td->urb->actual_length =
1471 td->urb->transfer_buffer_length;
1472 }
1473 } else {
1474 /* Maybe the event was for the data stage? */
1475 if (trb_comp_code != COMP_STOP_INVAL) {
1476 /* We didn't stop on a link TRB in the middle */
1477 td->urb->actual_length =
1478 td->urb->transfer_buffer_length -
1479 TRB_LEN(event->transfer_len);
1480 xhci_dbg(xhci, "Waiting for status "
1481 "stage event\n");
1482 return 0;
1483 }
1484 }
1485 }
1486
1487 return finish_td(xhci, td, event_trb, event, ep, status, false);
1488}
1489
1490/*
1491 * Process isochronous tds, update urb packet status and actual_length.
1492 */
1493static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1494 union xhci_trb *event_trb, struct xhci_transfer_event *event,
1495 struct xhci_virt_ep *ep, int *status)
1496{
1497 struct xhci_ring *ep_ring;
1498 struct urb_priv *urb_priv;
1499 int idx;
1500 int len = 0;
1501 int skip_td = 0;
1502 union xhci_trb *cur_trb;
1503 struct xhci_segment *cur_seg;
1504 u32 trb_comp_code;
1505
1506 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1507 trb_comp_code = GET_COMP_CODE(event->transfer_len);
1508 urb_priv = td->urb->hcpriv;
1509 idx = urb_priv->td_cnt;
1510
1511 if (ep->skip) {
1512 /* The transfer is partly done */
1513 *status = -EXDEV;
1514 td->urb->iso_frame_desc[idx].status = -EXDEV;
1515 } else {
1516 /* handle completion code */
1517 switch (trb_comp_code) {
1518 case COMP_SUCCESS:
1519 td->urb->iso_frame_desc[idx].status = 0;
1520 xhci_dbg(xhci, "Successful isoc transfer!\n");
1521 break;
1522 case COMP_SHORT_TX:
1523 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1524 td->urb->iso_frame_desc[idx].status =
1525 -EREMOTEIO;
1526 else
1527 td->urb->iso_frame_desc[idx].status = 0;
1528 break;
1529 case COMP_BW_OVER:
1530 td->urb->iso_frame_desc[idx].status = -ECOMM;
1531 skip_td = 1;
1532 break;
1533 case COMP_BUFF_OVER:
1534 case COMP_BABBLE:
1535 td->urb->iso_frame_desc[idx].status = -EOVERFLOW;
1536 skip_td = 1;
1537 break;
1538 case COMP_STALL:
1539 td->urb->iso_frame_desc[idx].status = -EPROTO;
1540 skip_td = 1;
1541 break;
1542 case COMP_STOP:
1543 case COMP_STOP_INVAL:
1544 break;
1545 default:
1546 td->urb->iso_frame_desc[idx].status = -1;
1547 break;
1548 }
1549 }
1550
1551 /* calc actual length */
1552 if (ep->skip) {
1553 td->urb->iso_frame_desc[idx].actual_length = 0;
1554 return finish_td(xhci, td, event_trb, event, ep, status, true);
1555 }
1556
1557 if (trb_comp_code == COMP_SUCCESS || skip_td == 1) {
1558 td->urb->iso_frame_desc[idx].actual_length =
1559 td->urb->iso_frame_desc[idx].length;
1560 td->urb->actual_length +=
1561 td->urb->iso_frame_desc[idx].length;
1562 } else {
1563 for (cur_trb = ep_ring->dequeue,
1564 cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
1565 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1566 if ((cur_trb->generic.field[3] &
1567 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
1568 (cur_trb->generic.field[3] &
1569 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
1570 len +=
1571 TRB_LEN(cur_trb->generic.field[2]);
1572 }
1573 len += TRB_LEN(cur_trb->generic.field[2]) -
1574 TRB_LEN(event->transfer_len);
1575
1576 if (trb_comp_code != COMP_STOP_INVAL) {
1577 td->urb->iso_frame_desc[idx].actual_length = len;
1578 td->urb->actual_length += len;
1579 }
1580 }
1581
1582 if ((idx == urb_priv->length - 1) && *status == -EINPROGRESS)
1583 *status = 0;
1584
1585 return finish_td(xhci, td, event_trb, event, ep, status, false);
1586}
1587
1588/*
1589 * Process bulk and interrupt tds, update urb status and actual_length.
1590 */
1591static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1592 union xhci_trb *event_trb, struct xhci_transfer_event *event,
1593 struct xhci_virt_ep *ep, int *status)
1594{
1595 struct xhci_ring *ep_ring;
1596 union xhci_trb *cur_trb;
1597 struct xhci_segment *cur_seg;
1598 u32 trb_comp_code;
1599
1600 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1601 trb_comp_code = GET_COMP_CODE(event->transfer_len);
1602
1603 switch (trb_comp_code) {
1604 case COMP_SUCCESS:
1605 /* Double check that the HW transferred everything. */
1606 if (event_trb != td->last_trb) {
1607 xhci_warn(xhci, "WARN Successful completion "
1608 "on short TX\n");
1609 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1610 *status = -EREMOTEIO;
1611 else
1612 *status = 0;
1613 } else {
1614 if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
1615 xhci_dbg(xhci, "Successful bulk "
1616 "transfer!\n");
1617 else
1618 xhci_dbg(xhci, "Successful interrupt "
1619 "transfer!\n");
1620 *status = 0;
1621 }
1622 break;
1623 case COMP_SHORT_TX:
1624 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1625 *status = -EREMOTEIO;
1626 else
1627 *status = 0;
1628 break;
1629 default:
1630 /* Others already handled above */
1631 break;
1632 }
1633 dev_dbg(&td->urb->dev->dev,
1634 "ep %#x - asked for %d bytes, "
1635 "%d bytes untransferred\n",
1636 td->urb->ep->desc.bEndpointAddress,
1637 td->urb->transfer_buffer_length,
1638 TRB_LEN(event->transfer_len));
1639 /* Fast path - was this the last TRB in the TD for this URB? */
1640 if (event_trb == td->last_trb) {
1641 if (TRB_LEN(event->transfer_len) != 0) {
1642 td->urb->actual_length =
1643 td->urb->transfer_buffer_length -
1644 TRB_LEN(event->transfer_len);
1645 if (td->urb->transfer_buffer_length <
1646 td->urb->actual_length) {
1647 xhci_warn(xhci, "HC gave bad length "
1648 "of %d bytes left\n",
1649 TRB_LEN(event->transfer_len));
1650 td->urb->actual_length = 0;
1651 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1652 *status = -EREMOTEIO;
1653 else
1654 *status = 0;
1655 }
1656 /* Don't overwrite a previously set error code */
1657 if (*status == -EINPROGRESS) {
1658 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1659 *status = -EREMOTEIO;
1660 else
1661 *status = 0;
1662 }
1663 } else {
1664 td->urb->actual_length =
1665 td->urb->transfer_buffer_length;
1666 /* Ignore a short packet completion if the
1667 * untransferred length was zero.
1668 */
1669 if (*status == -EREMOTEIO)
1670 *status = 0;
1671 }
1672 } else {
1673 /* Slow path - walk the list, starting from the dequeue
1674 * pointer, to get the actual length transferred.
1675 */
1676 td->urb->actual_length = 0;
1677 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
1678 cur_trb != event_trb;
1679 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1680 if ((cur_trb->generic.field[3] &
1681 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
1682 (cur_trb->generic.field[3] &
1683 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
1684 td->urb->actual_length +=
1685 TRB_LEN(cur_trb->generic.field[2]);
1686 }
1687 /* If the ring didn't stop on a Link or No-op TRB, add
1688 * in the actual bytes transferred from the Normal TRB
1689 */
1690 if (trb_comp_code != COMP_STOP_INVAL)
1691 td->urb->actual_length +=
1692 TRB_LEN(cur_trb->generic.field[2]) -
1693 TRB_LEN(event->transfer_len);
1694 }
1695
1696 return finish_td(xhci, td, event_trb, event, ep, status, false);
1697}
1698
1699/*
1261 * If this function returns an error condition, it means it got a Transfer 1700 * If this function returns an error condition, it means it got a Transfer
1262 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. 1701 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
1263 * At this point, the host controller is probably hosed and should be reset. 1702 * At this point, the host controller is probably hosed and should be reset.
@@ -1276,10 +1715,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1276 union xhci_trb *event_trb; 1715 union xhci_trb *event_trb;
1277 struct urb *urb = NULL; 1716 struct urb *urb = NULL;
1278 int status = -EINPROGRESS; 1717 int status = -EINPROGRESS;
1718 struct urb_priv *urb_priv;
1279 struct xhci_ep_ctx *ep_ctx; 1719 struct xhci_ep_ctx *ep_ctx;
1280 u32 trb_comp_code; 1720 u32 trb_comp_code;
1721 int ret = 0;
1281 1722
1282 xhci_dbg(xhci, "In %s\n", __func__);
1283 slot_id = TRB_TO_SLOT_ID(event->flags); 1723 slot_id = TRB_TO_SLOT_ID(event->flags);
1284 xdev = xhci->devs[slot_id]; 1724 xdev = xhci->devs[slot_id];
1285 if (!xdev) { 1725 if (!xdev) {
@@ -1293,51 +1733,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1293 ep = &xdev->eps[ep_index]; 1733 ep = &xdev->eps[ep_index];
1294 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1734 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1295 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1735 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1296 if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { 1736 if (!ep_ring ||
1737 (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
1297 xhci_err(xhci, "ERROR Transfer event for disabled endpoint " 1738 xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
1298 "or incorrect stream ring\n"); 1739 "or incorrect stream ring\n");
1299 return -ENODEV; 1740 return -ENODEV;
1300 } 1741 }
1301 1742
1302 event_dma = event->buffer; 1743 event_dma = event->buffer;
1303 /* This TRB should be in the TD at the head of this ring's TD list */
1304 xhci_dbg(xhci, "%s - checking for list empty\n", __func__);
1305 if (list_empty(&ep_ring->td_list)) {
1306 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
1307 TRB_TO_SLOT_ID(event->flags), ep_index);
1308 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
1309 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
1310 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
1311 urb = NULL;
1312 goto cleanup;
1313 }
1314 xhci_dbg(xhci, "%s - getting list entry\n", __func__);
1315 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
1316
1317 /* Is this a TRB in the currently executing TD? */
1318 xhci_dbg(xhci, "%s - looking for TD\n", __func__);
1319 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
1320 td->last_trb, event_dma);
1321 xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg);
1322 if (!event_seg) {
1323 /* HC is busted, give up! */
1324 xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
1325 return -ESHUTDOWN;
1326 }
1327 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
1328 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
1329 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
1330 xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n",
1331 lower_32_bits(event->buffer));
1332 xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n",
1333 upper_32_bits(event->buffer));
1334 xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
1335 (unsigned int) event->transfer_len);
1336 xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
1337 (unsigned int) event->flags);
1338
1339 /* Look for common error cases */
1340 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1744 trb_comp_code = GET_COMP_CODE(event->transfer_len);
1745 /* Look for common error cases */
1341 switch (trb_comp_code) { 1746 switch (trb_comp_code) {
1342 /* Skip codes that require special handling depending on 1747 /* Skip codes that require special handling depending on
1343 * transfer type 1748 * transfer type
@@ -1373,278 +1778,156 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1373 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); 1778 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
1374 status = -ENOSR; 1779 status = -ENOSR;
1375 break; 1780 break;
1781 case COMP_BW_OVER:
1782 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
1783 break;
1784 case COMP_BUFF_OVER:
1785 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
1786 break;
1787 case COMP_UNDERRUN:
1788 /*
1789 * When the Isoch ring is empty, the xHC will generate
1790 * a Ring Overrun Event for IN Isoch endpoint or Ring
1791 * Underrun Event for OUT Isoch endpoint.
1792 */
1793 xhci_dbg(xhci, "underrun event on endpoint\n");
1794 if (!list_empty(&ep_ring->td_list))
1795 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
1796 "still with TDs queued?\n",
1797 TRB_TO_SLOT_ID(event->flags), ep_index);
1798 goto cleanup;
1799 case COMP_OVERRUN:
1800 xhci_dbg(xhci, "overrun event on endpoint\n");
1801 if (!list_empty(&ep_ring->td_list))
1802 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
1803 "still with TDs queued?\n",
1804 TRB_TO_SLOT_ID(event->flags), ep_index);
1805 goto cleanup;
1806 case COMP_MISSED_INT:
1807 /*
1808 * When encounter missed service error, one or more isoc tds
1809 * may be missed by xHC.
1810 * Set skip flag of the ep_ring; Complete the missed tds as
1811 * short transfer when process the ep_ring next time.
1812 */
1813 ep->skip = true;
1814 xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
1815 goto cleanup;
1376 default: 1816 default:
1377 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { 1817 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
1378 status = 0; 1818 status = 0;
1379 break; 1819 break;
1380 } 1820 }
1381 xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n"); 1821 xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
1382 urb = NULL; 1822 "busted\n");
1383 goto cleanup; 1823 goto cleanup;
1384 } 1824 }
1385 /* Now update the urb's actual_length and give back to the core */
1386 /* Was this a control transfer? */
1387 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
1388 xhci_debug_trb(xhci, xhci->event_ring->dequeue);
1389 switch (trb_comp_code) {
1390 case COMP_SUCCESS:
1391 if (event_trb == ep_ring->dequeue) {
1392 xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
1393 status = -ESHUTDOWN;
1394 } else if (event_trb != td->last_trb) {
1395 xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n");
1396 status = -ESHUTDOWN;
1397 } else {
1398 xhci_dbg(xhci, "Successful control transfer!\n");
1399 status = 0;
1400 }
1401 break;
1402 case COMP_SHORT_TX:
1403 xhci_warn(xhci, "WARN: short transfer on control ep\n");
1404 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1405 status = -EREMOTEIO;
1406 else
1407 status = 0;
1408 break;
1409 1825
1410 default: 1826 do {
1411 if (!xhci_requires_manual_halt_cleanup(xhci, 1827 /* This TRB should be in the TD at the head of this ring's
1412 ep_ctx, trb_comp_code)) 1828 * TD list.
1413 break;
1414 xhci_dbg(xhci, "TRB error code %u, "
1415 "halted endpoint index = %u\n",
1416 trb_comp_code, ep_index);
1417 /* else fall through */
1418 case COMP_STALL:
1419 /* Did we transfer part of the data (middle) phase? */
1420 if (event_trb != ep_ring->dequeue &&
1421 event_trb != td->last_trb)
1422 td->urb->actual_length =
1423 td->urb->transfer_buffer_length
1424 - TRB_LEN(event->transfer_len);
1425 else
1426 td->urb->actual_length = 0;
1427
1428 xhci_cleanup_halted_endpoint(xhci,
1429 slot_id, ep_index, 0, td, event_trb);
1430 goto td_cleanup;
1431 }
1432 /*
1433 * Did we transfer any data, despite the errors that might have
1434 * happened? I.e. did we get past the setup stage?
1435 */ 1829 */
1436 if (event_trb != ep_ring->dequeue) { 1830 if (list_empty(&ep_ring->td_list)) {
1437 /* The event was for the status stage */ 1831 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
1438 if (event_trb == td->last_trb) { 1832 "with no TDs queued?\n",
1439 if (td->urb->actual_length != 0) { 1833 TRB_TO_SLOT_ID(event->flags), ep_index);
1440 /* Don't overwrite a previously set error code */ 1834 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
1441 if ((status == -EINPROGRESS || 1835 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
1442 status == 0) && 1836 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
1443 (td->urb->transfer_flags 1837 if (ep->skip) {
1444 & URB_SHORT_NOT_OK)) 1838 ep->skip = false;
1445 /* Did we already see a short data stage? */ 1839 xhci_dbg(xhci, "td_list is empty while skip "
1446 status = -EREMOTEIO; 1840 "flag set. Clear skip flag.\n");
1447 } else {
1448 td->urb->actual_length =
1449 td->urb->transfer_buffer_length;
1450 }
1451 } else {
1452 /* Maybe the event was for the data stage? */
1453 if (trb_comp_code != COMP_STOP_INVAL) {
1454 /* We didn't stop on a link TRB in the middle */
1455 td->urb->actual_length =
1456 td->urb->transfer_buffer_length -
1457 TRB_LEN(event->transfer_len);
1458 xhci_dbg(xhci, "Waiting for status stage event\n");
1459 urb = NULL;
1460 goto cleanup;
1461 }
1462 } 1841 }
1842 ret = 0;
1843 goto cleanup;
1463 } 1844 }
1464 } else { 1845
1465 switch (trb_comp_code) { 1846 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
1466 case COMP_SUCCESS: 1847 /* Is this a TRB in the currently executing TD? */
1467 /* Double check that the HW transferred everything. */ 1848 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
1468 if (event_trb != td->last_trb) { 1849 td->last_trb, event_dma);
1469 xhci_warn(xhci, "WARN Successful completion " 1850 if (event_seg && ep->skip) {
1470 "on short TX\n"); 1851 xhci_dbg(xhci, "Found td. Clear skip flag.\n");
1471 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1852 ep->skip = false;
1472 status = -EREMOTEIO; 1853 }
1473 else 1854 if (!event_seg &&
1474 status = 0; 1855 (!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc))) {
1475 } else { 1856 /* HC is busted, give up! */
1476 if (usb_endpoint_xfer_bulk(&td->urb->ep->desc)) 1857 xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not "
1477 xhci_dbg(xhci, "Successful bulk " 1858 "part of current TD\n");
1478 "transfer!\n"); 1859 return -ESHUTDOWN;
1479 else
1480 xhci_dbg(xhci, "Successful interrupt "
1481 "transfer!\n");
1482 status = 0;
1483 }
1484 break;
1485 case COMP_SHORT_TX:
1486 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1487 status = -EREMOTEIO;
1488 else
1489 status = 0;
1490 break;
1491 default:
1492 /* Others already handled above */
1493 break;
1494 } 1860 }
1495 dev_dbg(&td->urb->dev->dev,
1496 "ep %#x - asked for %d bytes, "
1497 "%d bytes untransferred\n",
1498 td->urb->ep->desc.bEndpointAddress,
1499 td->urb->transfer_buffer_length,
1500 TRB_LEN(event->transfer_len));
1501 /* Fast path - was this the last TRB in the TD for this URB? */
1502 if (event_trb == td->last_trb) {
1503 if (TRB_LEN(event->transfer_len) != 0) {
1504 td->urb->actual_length =
1505 td->urb->transfer_buffer_length -
1506 TRB_LEN(event->transfer_len);
1507 if (td->urb->transfer_buffer_length <
1508 td->urb->actual_length) {
1509 xhci_warn(xhci, "HC gave bad length "
1510 "of %d bytes left\n",
1511 TRB_LEN(event->transfer_len));
1512 td->urb->actual_length = 0;
1513 if (td->urb->transfer_flags &
1514 URB_SHORT_NOT_OK)
1515 status = -EREMOTEIO;
1516 else
1517 status = 0;
1518 }
1519 /* Don't overwrite a previously set error code */
1520 if (status == -EINPROGRESS) {
1521 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1522 status = -EREMOTEIO;
1523 else
1524 status = 0;
1525 }
1526 } else {
1527 td->urb->actual_length = td->urb->transfer_buffer_length;
1528 /* Ignore a short packet completion if the
1529 * untransferred length was zero.
1530 */
1531 if (status == -EREMOTEIO)
1532 status = 0;
1533 }
1534 } else {
1535 /* Slow path - walk the list, starting from the dequeue
1536 * pointer, to get the actual length transferred.
1537 */
1538 union xhci_trb *cur_trb;
1539 struct xhci_segment *cur_seg;
1540 1861
1541 td->urb->actual_length = 0; 1862 if (event_seg) {
1542 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; 1863 event_trb = &event_seg->trbs[(event_dma -
1543 cur_trb != event_trb; 1864 event_seg->dma) / sizeof(*event_trb)];
1544 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 1865 /*
1545 if ((cur_trb->generic.field[3] & 1866 * No-op TRB should not trigger interrupts.
1546 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && 1867 * If event_trb is a no-op TRB, it means the
1547 (cur_trb->generic.field[3] & 1868 * corresponding TD has been cancelled. Just ignore
1548 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) 1869 * the TD.
1549 td->urb->actual_length +=
1550 TRB_LEN(cur_trb->generic.field[2]);
1551 }
1552 /* If the ring didn't stop on a Link or No-op TRB, add
1553 * in the actual bytes transferred from the Normal TRB
1554 */ 1870 */
1555 if (trb_comp_code != COMP_STOP_INVAL) 1871 if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK)
1556 td->urb->actual_length += 1872 == TRB_TYPE(TRB_TR_NOOP)) {
1557 TRB_LEN(cur_trb->generic.field[2]) - 1873 xhci_dbg(xhci, "event_trb is a no-op TRB. "
1558 TRB_LEN(event->transfer_len); 1874 "Skip it\n");
1875 goto cleanup;
1876 }
1559 } 1877 }
1560 } 1878
1561 if (trb_comp_code == COMP_STOP_INVAL || 1879 /* Now update the urb's actual_length and give back to
1562 trb_comp_code == COMP_STOP) { 1880 * the core
1563 /* The Endpoint Stop Command completion will take care of any
1564 * stopped TDs. A stopped TD may be restarted, so don't update
1565 * the ring dequeue pointer or take this TD off any lists yet.
1566 */ 1881 */
1567 ep->stopped_td = td; 1882 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
1568 ep->stopped_trb = event_trb; 1883 ret = process_ctrl_td(xhci, td, event_trb, event, ep,
1569 } else { 1884 &status);
1570 if (trb_comp_code == COMP_STALL) { 1885 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
1571 /* The transfer is completed from the driver's 1886 ret = process_isoc_td(xhci, td, event_trb, event, ep,
1572 * perspective, but we need to issue a set dequeue 1887 &status);
1573 * command for this stalled endpoint to move the dequeue 1888 else
1574 * pointer past the TD. We can't do that here because 1889 ret = process_bulk_intr_td(xhci, td, event_trb, event,
1575 * the halt condition must be cleared first. Let the 1890 ep, &status);
1576 * USB class driver clear the stall later.
1577 */
1578 ep->stopped_td = td;
1579 ep->stopped_trb = event_trb;
1580 ep->stopped_stream = ep_ring->stream_id;
1581 } else if (xhci_requires_manual_halt_cleanup(xhci,
1582 ep_ctx, trb_comp_code)) {
1583 /* Other types of errors halt the endpoint, but the
1584 * class driver doesn't call usb_reset_endpoint() unless
1585 * the error is -EPIPE. Clear the halted status in the
1586 * xHCI hardware manually.
1587 */
1588 xhci_cleanup_halted_endpoint(xhci,
1589 slot_id, ep_index, ep_ring->stream_id, td, event_trb);
1590 } else {
1591 /* Update ring dequeue pointer */
1592 while (ep_ring->dequeue != td->last_trb)
1593 inc_deq(xhci, ep_ring, false);
1594 inc_deq(xhci, ep_ring, false);
1595 }
1596 1891
1597td_cleanup: 1892cleanup:
1598 /* Clean up the endpoint's TD list */ 1893 /*
1599 urb = td->urb; 1894 * Do not update event ring dequeue pointer if ep->skip is set.
1600 /* Do one last check of the actual transfer length. 1895 * Will roll back to continue process missed tds.
1601 * If the host controller said we transferred more data than
1602 * the buffer length, urb->actual_length will be a very big
1603 * number (since it's unsigned). Play it safe and say we didn't
1604 * transfer anything.
1605 */ 1896 */
1606 if (urb->actual_length > urb->transfer_buffer_length) { 1897 if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
1607 xhci_warn(xhci, "URB transfer length is wrong, " 1898 inc_deq(xhci, xhci->event_ring, true);
1608 "xHC issue? req. len = %u, "
1609 "act. len = %u\n",
1610 urb->transfer_buffer_length,
1611 urb->actual_length);
1612 urb->actual_length = 0;
1613 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1614 status = -EREMOTEIO;
1615 else
1616 status = 0;
1617 } 1899 }
1618 list_del(&td->td_list);
1619 /* Was this TD slated to be cancelled but completed anyway? */
1620 if (!list_empty(&td->cancelled_td_list))
1621 list_del(&td->cancelled_td_list);
1622 1900
1623 /* Leave the TD around for the reset endpoint function to use 1901 if (ret) {
1624 * (but only if it's not a control endpoint, since we already 1902 urb = td->urb;
1625 * queued the Set TR dequeue pointer command for stalled 1903 urb_priv = urb->hcpriv;
1626 * control endpoints). 1904 /* Leave the TD around for the reset endpoint function
1627 */ 1905 * to use(but only if it's not a control endpoint,
1628 if (usb_endpoint_xfer_control(&urb->ep->desc) || 1906 * since we already queued the Set TR dequeue pointer
1629 (trb_comp_code != COMP_STALL && 1907 * command for stalled control endpoints).
1630 trb_comp_code != COMP_BABBLE)) { 1908 */
1631 kfree(td); 1909 if (usb_endpoint_xfer_control(&urb->ep->desc) ||
1910 (trb_comp_code != COMP_STALL &&
1911 trb_comp_code != COMP_BABBLE))
1912 xhci_urb_free_priv(xhci, urb_priv);
1913
1914 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
1915 xhci_dbg(xhci, "Giveback URB %p, len = %d, "
1916 "status = %d\n",
1917 urb, urb->actual_length, status);
1918 spin_unlock(&xhci->lock);
1919 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
1920 spin_lock(&xhci->lock);
1632 } 1921 }
1633 urb->hcpriv = NULL;
1634 }
1635cleanup:
1636 inc_deq(xhci, xhci->event_ring, true);
1637 xhci_set_hc_event_deq(xhci);
1638 1922
1639 /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ 1923 /*
1640 if (urb) { 1924 * If ep->skip is set, it means there are missed tds on the
1641 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); 1925 * endpoint ring need to take care of.
1642 xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n", 1926 * Process them as short transfer until reach the td pointed by
1643 urb, urb->actual_length, status); 1927 * the event.
1644 spin_unlock(&xhci->lock); 1928 */
1645 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); 1929 } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
1646 spin_lock(&xhci->lock); 1930
1647 }
1648 return 0; 1931 return 0;
1649} 1932}
1650 1933
@@ -1652,7 +1935,7 @@ cleanup:
1652 * This function handles all OS-owned events on the event ring. It may drop 1935 * This function handles all OS-owned events on the event ring. It may drop
1653 * xhci->lock between event processing (e.g. to pass up port status changes). 1936 * xhci->lock between event processing (e.g. to pass up port status changes).
1654 */ 1937 */
1655void xhci_handle_event(struct xhci_hcd *xhci) 1938static void xhci_handle_event(struct xhci_hcd *xhci)
1656{ 1939{
1657 union xhci_trb *event; 1940 union xhci_trb *event;
1658 int update_ptrs = 1; 1941 int update_ptrs = 1;
@@ -1710,15 +1993,130 @@ void xhci_handle_event(struct xhci_hcd *xhci)
1710 return; 1993 return;
1711 } 1994 }
1712 1995
1713 if (update_ptrs) { 1996 if (update_ptrs)
1714 /* Update SW and HC event ring dequeue pointer */ 1997 /* Update SW event ring dequeue pointer */
1715 inc_deq(xhci, xhci->event_ring, true); 1998 inc_deq(xhci, xhci->event_ring, true);
1716 xhci_set_hc_event_deq(xhci); 1999
1717 }
1718 /* Are there more items on the event ring? */ 2000 /* Are there more items on the event ring? */
1719 xhci_handle_event(xhci); 2001 xhci_handle_event(xhci);
1720} 2002}
1721 2003
2004/*
2005 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2006 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
2007 * indicators of an event TRB error, but we check the status *first* to be safe.
2008 */
2009irqreturn_t xhci_irq(struct usb_hcd *hcd)
2010{
2011 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2012 u32 status;
2013 union xhci_trb *trb;
2014 u64 temp_64;
2015 union xhci_trb *event_ring_deq;
2016 dma_addr_t deq;
2017
2018 spin_lock(&xhci->lock);
2019 trb = xhci->event_ring->dequeue;
2020 /* Check if the xHC generated the interrupt, or the irq is shared */
2021 status = xhci_readl(xhci, &xhci->op_regs->status);
2022 if (status == 0xffffffff)
2023 goto hw_died;
2024
2025 if (!(status & STS_EINT)) {
2026 spin_unlock(&xhci->lock);
2027 xhci_warn(xhci, "Spurious interrupt.\n");
2028 return IRQ_NONE;
2029 }
2030 xhci_dbg(xhci, "op reg status = %08x\n", status);
2031 xhci_dbg(xhci, "Event ring dequeue ptr:\n");
2032 xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
2033 (unsigned long long)
2034 xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
2035 lower_32_bits(trb->link.segment_ptr),
2036 upper_32_bits(trb->link.segment_ptr),
2037 (unsigned int) trb->link.intr_target,
2038 (unsigned int) trb->link.control);
2039
2040 if (status & STS_FATAL) {
2041 xhci_warn(xhci, "WARNING: Host System Error\n");
2042 xhci_halt(xhci);
2043hw_died:
2044 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
2045 spin_unlock(&xhci->lock);
2046 return -ESHUTDOWN;
2047 }
2048
2049 /*
2050 * Clear the op reg interrupt status first,
2051 * so we can receive interrupts from other MSI-X interrupters.
2052 * Write 1 to clear the interrupt status.
2053 */
2054 status |= STS_EINT;
2055 xhci_writel(xhci, status, &xhci->op_regs->status);
2056 /* FIXME when MSI-X is supported and there are multiple vectors */
2057 /* Clear the MSI-X event interrupt status */
2058
2059 if (hcd->irq != -1) {
2060 u32 irq_pending;
2061 /* Acknowledge the PCI interrupt */
2062 irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
2063 irq_pending |= 0x3;
2064 xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
2065 }
2066
2067 if (xhci->xhc_state & XHCI_STATE_DYING) {
2068 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2069 "Shouldn't IRQs be disabled?\n");
2070 /* Clear the event handler busy flag (RW1C);
2071 * the event ring should be empty.
2072 */
2073 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2074 xhci_write_64(xhci, temp_64 | ERST_EHB,
2075 &xhci->ir_set->erst_dequeue);
2076 spin_unlock(&xhci->lock);
2077
2078 return IRQ_HANDLED;
2079 }
2080
2081 event_ring_deq = xhci->event_ring->dequeue;
2082 /* FIXME this should be a delayed service routine
2083 * that clears the EHB.
2084 */
2085 xhci_handle_event(xhci);
2086
2087 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2088 /* If necessary, update the HW's version of the event ring deq ptr. */
2089 if (event_ring_deq != xhci->event_ring->dequeue) {
2090 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2091 xhci->event_ring->dequeue);
2092 if (deq == 0)
2093 xhci_warn(xhci, "WARN something wrong with SW event "
2094 "ring dequeue ptr.\n");
2095 /* Update HC event ring dequeue pointer */
2096 temp_64 &= ERST_PTR_MASK;
2097 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2098 }
2099
2100 /* Clear the event handler busy flag (RW1C); event ring is empty. */
2101 temp_64 |= ERST_EHB;
2102 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2103
2104 spin_unlock(&xhci->lock);
2105
2106 return IRQ_HANDLED;
2107}
2108
2109irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
2110{
2111 irqreturn_t ret;
2112
2113 set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
2114
2115 ret = xhci_irq(hcd);
2116
2117 return ret;
2118}
2119
1722/**** Endpoint Ring Operations ****/ 2120/**** Endpoint Ring Operations ****/
1723 2121
1724/* 2122/*
@@ -1827,10 +2225,12 @@ static int prepare_transfer(struct xhci_hcd *xhci,
1827 unsigned int stream_id, 2225 unsigned int stream_id,
1828 unsigned int num_trbs, 2226 unsigned int num_trbs,
1829 struct urb *urb, 2227 struct urb *urb,
1830 struct xhci_td **td, 2228 unsigned int td_index,
1831 gfp_t mem_flags) 2229 gfp_t mem_flags)
1832{ 2230{
1833 int ret; 2231 int ret;
2232 struct urb_priv *urb_priv;
2233 struct xhci_td *td;
1834 struct xhci_ring *ep_ring; 2234 struct xhci_ring *ep_ring;
1835 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 2235 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1836 2236
@@ -1846,24 +2246,29 @@ static int prepare_transfer(struct xhci_hcd *xhci,
1846 num_trbs, mem_flags); 2246 num_trbs, mem_flags);
1847 if (ret) 2247 if (ret)
1848 return ret; 2248 return ret;
1849 *td = kzalloc(sizeof(struct xhci_td), mem_flags);
1850 if (!*td)
1851 return -ENOMEM;
1852 INIT_LIST_HEAD(&(*td)->td_list);
1853 INIT_LIST_HEAD(&(*td)->cancelled_td_list);
1854 2249
1855 ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb); 2250 urb_priv = urb->hcpriv;
1856 if (unlikely(ret)) { 2251 td = urb_priv->td[td_index];
1857 kfree(*td); 2252
1858 return ret; 2253 INIT_LIST_HEAD(&td->td_list);
2254 INIT_LIST_HEAD(&td->cancelled_td_list);
2255
2256 if (td_index == 0) {
2257 ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
2258 if (unlikely(ret)) {
2259 xhci_urb_free_priv(xhci, urb_priv);
2260 urb->hcpriv = NULL;
2261 return ret;
2262 }
1859 } 2263 }
1860 2264
1861 (*td)->urb = urb; 2265 td->urb = urb;
1862 urb->hcpriv = (void *) (*td);
1863 /* Add this TD to the tail of the endpoint ring's TD list */ 2266 /* Add this TD to the tail of the endpoint ring's TD list */
1864 list_add_tail(&(*td)->td_list, &ep_ring->td_list); 2267 list_add_tail(&td->td_list, &ep_ring->td_list);
1865 (*td)->start_seg = ep_ring->enq_seg; 2268 td->start_seg = ep_ring->enq_seg;
1866 (*td)->first_trb = ep_ring->enqueue; 2269 td->first_trb = ep_ring->enqueue;
2270
2271 urb_priv->td[td_index] = td;
1867 2272
1868 return 0; 2273 return 0;
1869} 2274}
@@ -2002,6 +2407,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2002{ 2407{
2003 struct xhci_ring *ep_ring; 2408 struct xhci_ring *ep_ring;
2004 unsigned int num_trbs; 2409 unsigned int num_trbs;
2410 struct urb_priv *urb_priv;
2005 struct xhci_td *td; 2411 struct xhci_td *td;
2006 struct scatterlist *sg; 2412 struct scatterlist *sg;
2007 int num_sgs; 2413 int num_sgs;
@@ -2022,9 +2428,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2022 2428
2023 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], 2429 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
2024 ep_index, urb->stream_id, 2430 ep_index, urb->stream_id,
2025 num_trbs, urb, &td, mem_flags); 2431 num_trbs, urb, 0, mem_flags);
2026 if (trb_buff_len < 0) 2432 if (trb_buff_len < 0)
2027 return trb_buff_len; 2433 return trb_buff_len;
2434
2435 urb_priv = urb->hcpriv;
2436 td = urb_priv->td[0];
2437
2028 /* 2438 /*
2029 * Don't give the first TRB to the hardware (by toggling the cycle bit) 2439 * Don't give the first TRB to the hardware (by toggling the cycle bit)
2030 * until we've finished creating all the other TRBs. The ring's cycle 2440 * until we've finished creating all the other TRBs. The ring's cycle
@@ -2145,6 +2555,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2145 struct urb *urb, int slot_id, unsigned int ep_index) 2555 struct urb *urb, int slot_id, unsigned int ep_index)
2146{ 2556{
2147 struct xhci_ring *ep_ring; 2557 struct xhci_ring *ep_ring;
2558 struct urb_priv *urb_priv;
2148 struct xhci_td *td; 2559 struct xhci_td *td;
2149 int num_trbs; 2560 int num_trbs;
2150 struct xhci_generic_trb *start_trb; 2561 struct xhci_generic_trb *start_trb;
@@ -2190,10 +2601,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2190 2601
2191 ret = prepare_transfer(xhci, xhci->devs[slot_id], 2602 ret = prepare_transfer(xhci, xhci->devs[slot_id],
2192 ep_index, urb->stream_id, 2603 ep_index, urb->stream_id,
2193 num_trbs, urb, &td, mem_flags); 2604 num_trbs, urb, 0, mem_flags);
2194 if (ret < 0) 2605 if (ret < 0)
2195 return ret; 2606 return ret;
2196 2607
2608 urb_priv = urb->hcpriv;
2609 td = urb_priv->td[0];
2610
2197 /* 2611 /*
2198 * Don't give the first TRB to the hardware (by toggling the cycle bit) 2612 * Don't give the first TRB to the hardware (by toggling the cycle bit)
2199 * until we've finished creating all the other TRBs. The ring's cycle 2613 * until we've finished creating all the other TRBs. The ring's cycle
@@ -2279,6 +2693,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2279 struct xhci_generic_trb *start_trb; 2693 struct xhci_generic_trb *start_trb;
2280 int start_cycle; 2694 int start_cycle;
2281 u32 field, length_field; 2695 u32 field, length_field;
2696 struct urb_priv *urb_priv;
2282 struct xhci_td *td; 2697 struct xhci_td *td;
2283 2698
2284 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 2699 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
@@ -2306,10 +2721,13 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2306 num_trbs++; 2721 num_trbs++;
2307 ret = prepare_transfer(xhci, xhci->devs[slot_id], 2722 ret = prepare_transfer(xhci, xhci->devs[slot_id],
2308 ep_index, urb->stream_id, 2723 ep_index, urb->stream_id,
2309 num_trbs, urb, &td, mem_flags); 2724 num_trbs, urb, 0, mem_flags);
2310 if (ret < 0) 2725 if (ret < 0)
2311 return ret; 2726 return ret;
2312 2727
2728 urb_priv = urb->hcpriv;
2729 td = urb_priv->td[0];
2730
2313 /* 2731 /*
2314 * Don't give the first TRB to the hardware (by toggling the cycle bit) 2732 * Don't give the first TRB to the hardware (by toggling the cycle bit)
2315 * until we've finished creating all the other TRBs. The ring's cycle 2733 * until we've finished creating all the other TRBs. The ring's cycle
@@ -2366,6 +2784,224 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2366 return 0; 2784 return 0;
2367} 2785}
2368 2786
2787static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
2788 struct urb *urb, int i)
2789{
2790 int num_trbs = 0;
2791 u64 addr, td_len, running_total;
2792
2793 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
2794 td_len = urb->iso_frame_desc[i].length;
2795
2796 running_total = TRB_MAX_BUFF_SIZE -
2797 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
2798 if (running_total != 0)
2799 num_trbs++;
2800
2801 while (running_total < td_len) {
2802 num_trbs++;
2803 running_total += TRB_MAX_BUFF_SIZE;
2804 }
2805
2806 return num_trbs;
2807}
2808
2809/* This is for isoc transfer */
2810static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2811 struct urb *urb, int slot_id, unsigned int ep_index)
2812{
2813 struct xhci_ring *ep_ring;
2814 struct urb_priv *urb_priv;
2815 struct xhci_td *td;
2816 int num_tds, trbs_per_td;
2817 struct xhci_generic_trb *start_trb;
2818 bool first_trb;
2819 int start_cycle;
2820 u32 field, length_field;
2821 int running_total, trb_buff_len, td_len, td_remain_len, ret;
2822 u64 start_addr, addr;
2823 int i, j;
2824
2825 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
2826
2827 num_tds = urb->number_of_packets;
2828 if (num_tds < 1) {
2829 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
2830 return -EINVAL;
2831 }
2832
2833 if (!in_interrupt())
2834 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d),"
2835 " addr = %#llx, num_tds = %d\n",
2836 urb->ep->desc.bEndpointAddress,
2837 urb->transfer_buffer_length,
2838 urb->transfer_buffer_length,
2839 (unsigned long long)urb->transfer_dma,
2840 num_tds);
2841
2842 start_addr = (u64) urb->transfer_dma;
2843 start_trb = &ep_ring->enqueue->generic;
2844 start_cycle = ep_ring->cycle_state;
2845
2846 /* Queue the first TRB, even if it's zero-length */
2847 for (i = 0; i < num_tds; i++) {
2848 first_trb = true;
2849
2850 running_total = 0;
2851 addr = start_addr + urb->iso_frame_desc[i].offset;
2852 td_len = urb->iso_frame_desc[i].length;
2853 td_remain_len = td_len;
2854
2855 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
2856
2857 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
2858 urb->stream_id, trbs_per_td, urb, i, mem_flags);
2859 if (ret < 0)
2860 return ret;
2861
2862 urb_priv = urb->hcpriv;
2863 td = urb_priv->td[i];
2864
2865 for (j = 0; j < trbs_per_td; j++) {
2866 u32 remainder = 0;
2867 field = 0;
2868
2869 if (first_trb) {
2870 /* Queue the isoc TRB */
2871 field |= TRB_TYPE(TRB_ISOC);
2872 /* Assume URB_ISO_ASAP is set */
2873 field |= TRB_SIA;
2874 if (i > 0)
2875 field |= ep_ring->cycle_state;
2876 first_trb = false;
2877 } else {
2878 /* Queue other normal TRBs */
2879 field |= TRB_TYPE(TRB_NORMAL);
2880 field |= ep_ring->cycle_state;
2881 }
2882
2883 /* Chain all the TRBs together; clear the chain bit in
2884 * the last TRB to indicate it's the last TRB in the
2885 * chain.
2886 */
2887 if (j < trbs_per_td - 1) {
2888 field |= TRB_CHAIN;
2889 } else {
2890 td->last_trb = ep_ring->enqueue;
2891 field |= TRB_IOC;
2892 }
2893
2894 /* Calculate TRB length */
2895 trb_buff_len = TRB_MAX_BUFF_SIZE -
2896 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
2897 if (trb_buff_len > td_remain_len)
2898 trb_buff_len = td_remain_len;
2899
2900 remainder = xhci_td_remainder(td_len - running_total);
2901 length_field = TRB_LEN(trb_buff_len) |
2902 remainder |
2903 TRB_INTR_TARGET(0);
2904 queue_trb(xhci, ep_ring, false, false,
2905 lower_32_bits(addr),
2906 upper_32_bits(addr),
2907 length_field,
2908 /* We always want to know if the TRB was short,
2909 * or we won't get an event when it completes.
2910 * (Unless we use event data TRBs, which are a
2911 * waste of space and HC resources.)
2912 */
2913 field | TRB_ISP);
2914 running_total += trb_buff_len;
2915
2916 addr += trb_buff_len;
2917 td_remain_len -= trb_buff_len;
2918 }
2919
2920 /* Check TD length */
2921 if (running_total != td_len) {
2922 xhci_err(xhci, "ISOC TD length unmatch\n");
2923 return -EINVAL;
2924 }
2925 }
2926
2927 wmb();
2928 start_trb->field[3] |= start_cycle;
2929
2930 ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
2931 return 0;
2932}
2933
2934/*
2935 * Check transfer ring to guarantee there is enough room for the urb.
2936 * Update ISO URB start_frame and interval.
2937 * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
2938 * update the urb->start_frame by now.
2939 * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
2940 */
2941int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
2942 struct urb *urb, int slot_id, unsigned int ep_index)
2943{
2944 struct xhci_virt_device *xdev;
2945 struct xhci_ring *ep_ring;
2946 struct xhci_ep_ctx *ep_ctx;
2947 int start_frame;
2948 int xhci_interval;
2949 int ep_interval;
2950 int num_tds, num_trbs, i;
2951 int ret;
2952
2953 xdev = xhci->devs[slot_id];
2954 ep_ring = xdev->eps[ep_index].ring;
2955 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2956
2957 num_trbs = 0;
2958 num_tds = urb->number_of_packets;
2959 for (i = 0; i < num_tds; i++)
2960 num_trbs += count_isoc_trbs_needed(xhci, urb, i);
2961
2962 /* Check the ring to guarantee there is enough room for the whole urb.
2963 * Do not insert any td of the urb to the ring if the check failed.
2964 */
2965 ret = prepare_ring(xhci, ep_ring, ep_ctx->ep_info & EP_STATE_MASK,
2966 num_trbs, mem_flags);
2967 if (ret)
2968 return ret;
2969
2970 start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
2971 start_frame &= 0x3fff;
2972
2973 urb->start_frame = start_frame;
2974 if (urb->dev->speed == USB_SPEED_LOW ||
2975 urb->dev->speed == USB_SPEED_FULL)
2976 urb->start_frame >>= 3;
2977
2978 xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
2979 ep_interval = urb->interval;
2980 /* Convert to microframes */
2981 if (urb->dev->speed == USB_SPEED_LOW ||
2982 urb->dev->speed == USB_SPEED_FULL)
2983 ep_interval *= 8;
2984 /* FIXME change this to a warning and a suggestion to use the new API
2985 * to set the polling interval (once the API is added).
2986 */
2987 if (xhci_interval != ep_interval) {
2988 if (!printk_ratelimit())
2989 dev_dbg(&urb->dev->dev, "Driver uses different interval"
2990 " (%d microframe%s) than xHCI "
2991 "(%d microframe%s)\n",
2992 ep_interval,
2993 ep_interval == 1 ? "" : "s",
2994 xhci_interval,
2995 xhci_interval == 1 ? "" : "s");
2996 urb->interval = xhci_interval;
2997 /* Convert back to frames for LS/FS devices */
2998 if (urb->dev->speed == USB_SPEED_LOW ||
2999 urb->dev->speed == USB_SPEED_FULL)
3000 urb->interval /= 8;
3001 }
3002 return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
3003}
3004
2369/**** Command Ring Operations ****/ 3005/**** Command Ring Operations ****/
2370 3006
2371/* Generic function for queueing a command TRB on the command ring. 3007/* Generic function for queueing a command TRB on the command ring.