diff options
Diffstat (limited to 'drivers/usb/host/xhci.c')
-rw-r--r-- | drivers/usb/host/xhci.c | 416 |
1 files changed, 406 insertions, 10 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 7e4277273908..40e0a0c221b8 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -21,6 +21,7 @@ | |||
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
24 | #include <linux/log2.h> | ||
24 | #include <linux/module.h> | 25 | #include <linux/module.h> |
25 | #include <linux/moduleparam.h> | 26 | #include <linux/moduleparam.h> |
26 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
@@ -352,11 +353,7 @@ void xhci_event_ring_work(unsigned long arg) | |||
352 | if (!xhci->devs[i]) | 353 | if (!xhci->devs[i]) |
353 | continue; | 354 | continue; |
354 | for (j = 0; j < 31; ++j) { | 355 | for (j = 0; j < 31; ++j) { |
355 | struct xhci_ring *ring = xhci->devs[i]->eps[j].ring; | 356 | xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]); |
356 | if (!ring) | ||
357 | continue; | ||
358 | xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j); | ||
359 | xhci_debug_segment(xhci, ring->deq_seg); | ||
360 | } | 357 | } |
361 | } | 358 | } |
362 | 359 | ||
@@ -726,8 +723,21 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
726 | spin_lock_irqsave(&xhci->lock, flags); | 723 | spin_lock_irqsave(&xhci->lock, flags); |
727 | if (xhci->xhc_state & XHCI_STATE_DYING) | 724 | if (xhci->xhc_state & XHCI_STATE_DYING) |
728 | goto dying; | 725 | goto dying; |
729 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, | 726 | if (xhci->devs[slot_id]->eps[ep_index].ep_state & |
730 | slot_id, ep_index); | 727 | EP_GETTING_STREAMS) { |
728 | xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " | ||
729 | "is transitioning to using streams.\n"); | ||
730 | ret = -EINVAL; | ||
731 | } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & | ||
732 | EP_GETTING_NO_STREAMS) { | ||
733 | xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " | ||
734 | "is transitioning to " | ||
735 | "not having streams.\n"); | ||
736 | ret = -EINVAL; | ||
737 | } else { | ||
738 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, | ||
739 | slot_id, ep_index); | ||
740 | } | ||
731 | spin_unlock_irqrestore(&xhci->lock, flags); | 741 | spin_unlock_irqrestore(&xhci->lock, flags); |
732 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { | 742 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { |
733 | spin_lock_irqsave(&xhci->lock, flags); | 743 | spin_lock_irqsave(&xhci->lock, flags); |
@@ -825,7 +835,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
825 | xhci_debug_ring(xhci, xhci->event_ring); | 835 | xhci_debug_ring(xhci, xhci->event_ring); |
826 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | 836 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
827 | ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; | 837 | ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; |
828 | ep_ring = ep->ring; | 838 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); |
839 | if (!ep_ring) { | ||
840 | ret = -EINVAL; | ||
841 | goto done; | ||
842 | } | ||
843 | |||
829 | xhci_dbg(xhci, "Endpoint ring:\n"); | 844 | xhci_dbg(xhci, "Endpoint ring:\n"); |
830 | xhci_debug_ring(xhci, ep_ring); | 845 | xhci_debug_ring(xhci, ep_ring); |
831 | td = (struct xhci_td *) urb->hcpriv; | 846 | td = (struct xhci_td *) urb->hcpriv; |
@@ -1369,7 +1384,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, | |||
1369 | * or it will attempt to resend it on the next doorbell ring. | 1384 | * or it will attempt to resend it on the next doorbell ring. |
1370 | */ | 1385 | */ |
1371 | xhci_find_new_dequeue_state(xhci, udev->slot_id, | 1386 | xhci_find_new_dequeue_state(xhci, udev->slot_id, |
1372 | ep_index, ep->stopped_td, | 1387 | ep_index, ep->stopped_stream, ep->stopped_td, |
1373 | &deq_state); | 1388 | &deq_state); |
1374 | 1389 | ||
1375 | /* HW with the reset endpoint quirk will use the saved dequeue state to | 1390 | /* HW with the reset endpoint quirk will use the saved dequeue state to |
@@ -1378,10 +1393,12 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, | |||
1378 | if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { | 1393 | if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { |
1379 | xhci_dbg(xhci, "Queueing new dequeue state\n"); | 1394 | xhci_dbg(xhci, "Queueing new dequeue state\n"); |
1380 | xhci_queue_new_dequeue_state(xhci, udev->slot_id, | 1395 | xhci_queue_new_dequeue_state(xhci, udev->slot_id, |
1381 | ep_index, &deq_state); | 1396 | ep_index, ep->stopped_stream, &deq_state); |
1382 | } else { | 1397 | } else { |
1383 | /* Better hope no one uses the input context between now and the | 1398 | /* Better hope no one uses the input context between now and the |
1384 | * reset endpoint completion! | 1399 | * reset endpoint completion! |
1400 | * XXX: No idea how this hardware will react when stream rings | ||
1401 | * are enabled. | ||
1385 | */ | 1402 | */ |
1386 | xhci_dbg(xhci, "Setting up input context for " | 1403 | xhci_dbg(xhci, "Setting up input context for " |
1387 | "configure endpoint command\n"); | 1404 | "configure endpoint command\n"); |
@@ -1438,12 +1455,391 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
1438 | kfree(virt_ep->stopped_td); | 1455 | kfree(virt_ep->stopped_td); |
1439 | xhci_ring_cmd_db(xhci); | 1456 | xhci_ring_cmd_db(xhci); |
1440 | } | 1457 | } |
1458 | virt_ep->stopped_td = NULL; | ||
1459 | virt_ep->stopped_trb = NULL; | ||
1460 | virt_ep->stopped_stream = 0; | ||
1441 | spin_unlock_irqrestore(&xhci->lock, flags); | 1461 | spin_unlock_irqrestore(&xhci->lock, flags); |
1442 | 1462 | ||
1443 | if (ret) | 1463 | if (ret) |
1444 | xhci_warn(xhci, "FIXME allocate a new ring segment\n"); | 1464 | xhci_warn(xhci, "FIXME allocate a new ring segment\n"); |
1445 | } | 1465 | } |
1446 | 1466 | ||
1467 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, | ||
1468 | struct usb_device *udev, struct usb_host_endpoint *ep, | ||
1469 | unsigned int slot_id) | ||
1470 | { | ||
1471 | int ret; | ||
1472 | unsigned int ep_index; | ||
1473 | unsigned int ep_state; | ||
1474 | |||
1475 | if (!ep) | ||
1476 | return -EINVAL; | ||
1477 | ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, __func__); | ||
1478 | if (ret <= 0) | ||
1479 | return -EINVAL; | ||
1480 | if (ep->ss_ep_comp.bmAttributes == 0) { | ||
1481 | xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" | ||
1482 | " descriptor for ep 0x%x does not support streams\n", | ||
1483 | ep->desc.bEndpointAddress); | ||
1484 | return -EINVAL; | ||
1485 | } | ||
1486 | |||
1487 | ep_index = xhci_get_endpoint_index(&ep->desc); | ||
1488 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; | ||
1489 | if (ep_state & EP_HAS_STREAMS || | ||
1490 | ep_state & EP_GETTING_STREAMS) { | ||
1491 | xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " | ||
1492 | "already has streams set up.\n", | ||
1493 | ep->desc.bEndpointAddress); | ||
1494 | xhci_warn(xhci, "Send email to xHCI maintainer and ask for " | ||
1495 | "dynamic stream context array reallocation.\n"); | ||
1496 | return -EINVAL; | ||
1497 | } | ||
1498 | if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { | ||
1499 | xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " | ||
1500 | "endpoint 0x%x; URBs are pending.\n", | ||
1501 | ep->desc.bEndpointAddress); | ||
1502 | return -EINVAL; | ||
1503 | } | ||
1504 | return 0; | ||
1505 | } | ||
1506 | |||
1507 | static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, | ||
1508 | unsigned int *num_streams, unsigned int *num_stream_ctxs) | ||
1509 | { | ||
1510 | unsigned int max_streams; | ||
1511 | |||
1512 | /* The stream context array size must be a power of two */ | ||
1513 | *num_stream_ctxs = roundup_pow_of_two(*num_streams); | ||
1514 | /* | ||
1515 | * Find out how many primary stream array entries the host controller | ||
1516 | * supports. Later we may use secondary stream arrays (similar to 2nd | ||
1517 | * level page entries), but that's an optional feature for xHCI host | ||
1518 | * controllers. xHCs must support at least 4 stream IDs. | ||
1519 | */ | ||
1520 | max_streams = HCC_MAX_PSA(xhci->hcc_params); | ||
1521 | if (*num_stream_ctxs > max_streams) { | ||
1522 | xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", | ||
1523 | max_streams); | ||
1524 | *num_stream_ctxs = max_streams; | ||
1525 | *num_streams = max_streams; | ||
1526 | } | ||
1527 | } | ||
1528 | |||
1529 | /* Returns an error code if one of the endpoint already has streams. | ||
1530 | * This does not change any data structures, it only checks and gathers | ||
1531 | * information. | ||
1532 | */ | ||
1533 | static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, | ||
1534 | struct usb_device *udev, | ||
1535 | struct usb_host_endpoint **eps, unsigned int num_eps, | ||
1536 | unsigned int *num_streams, u32 *changed_ep_bitmask) | ||
1537 | { | ||
1538 | unsigned int max_streams; | ||
1539 | unsigned int endpoint_flag; | ||
1540 | int i; | ||
1541 | int ret; | ||
1542 | |||
1543 | for (i = 0; i < num_eps; i++) { | ||
1544 | ret = xhci_check_streams_endpoint(xhci, udev, | ||
1545 | eps[i], udev->slot_id); | ||
1546 | if (ret < 0) | ||
1547 | return ret; | ||
1548 | |||
1549 | max_streams = USB_SS_MAX_STREAMS( | ||
1550 | eps[i]->ss_ep_comp.bmAttributes); | ||
1551 | if (max_streams < (*num_streams - 1)) { | ||
1552 | xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", | ||
1553 | eps[i]->desc.bEndpointAddress, | ||
1554 | max_streams); | ||
1555 | *num_streams = max_streams+1; | ||
1556 | } | ||
1557 | |||
1558 | endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); | ||
1559 | if (*changed_ep_bitmask & endpoint_flag) | ||
1560 | return -EINVAL; | ||
1561 | *changed_ep_bitmask |= endpoint_flag; | ||
1562 | } | ||
1563 | return 0; | ||
1564 | } | ||
1565 | |||
1566 | static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, | ||
1567 | struct usb_device *udev, | ||
1568 | struct usb_host_endpoint **eps, unsigned int num_eps) | ||
1569 | { | ||
1570 | u32 changed_ep_bitmask = 0; | ||
1571 | unsigned int slot_id; | ||
1572 | unsigned int ep_index; | ||
1573 | unsigned int ep_state; | ||
1574 | int i; | ||
1575 | |||
1576 | slot_id = udev->slot_id; | ||
1577 | if (!xhci->devs[slot_id]) | ||
1578 | return 0; | ||
1579 | |||
1580 | for (i = 0; i < num_eps; i++) { | ||
1581 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | ||
1582 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; | ||
1583 | /* Are streams already being freed for the endpoint? */ | ||
1584 | if (ep_state & EP_GETTING_NO_STREAMS) { | ||
1585 | xhci_warn(xhci, "WARN Can't disable streams for " | ||
1586 | "endpoint 0x%x\n, " | ||
1587 | "streams are being disabled already.", | ||
1588 | eps[i]->desc.bEndpointAddress); | ||
1589 | return 0; | ||
1590 | } | ||
1591 | /* Are there actually any streams to free? */ | ||
1592 | if (!(ep_state & EP_HAS_STREAMS) && | ||
1593 | !(ep_state & EP_GETTING_STREAMS)) { | ||
1594 | xhci_warn(xhci, "WARN Can't disable streams for " | ||
1595 | "endpoint 0x%x\n, " | ||
1596 | "streams are already disabled!", | ||
1597 | eps[i]->desc.bEndpointAddress); | ||
1598 | xhci_warn(xhci, "WARN xhci_free_streams() called " | ||
1599 | "with non-streams endpoint\n"); | ||
1600 | return 0; | ||
1601 | } | ||
1602 | changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); | ||
1603 | } | ||
1604 | return changed_ep_bitmask; | ||
1605 | } | ||
1606 | |||
1607 | /* | ||
1608 | * The USB device drivers use this function (though the HCD interface in USB | ||
1609 | * core) to prepare a set of bulk endpoints to use streams. Streams are used to | ||
1610 | * coordinate mass storage command queueing across multiple endpoints (basically | ||
1611 | * a stream ID == a task ID). | ||
1612 | * | ||
1613 | * Setting up streams involves allocating the same size stream context array | ||
1614 | * for each endpoint and issuing a configure endpoint command for all endpoints. | ||
1615 | * | ||
1616 | * Don't allow the call to succeed if one endpoint only supports one stream | ||
1617 | * (which means it doesn't support streams at all). | ||
1618 | * | ||
1619 | * Drivers may get less stream IDs than they asked for, if the host controller | ||
1620 | * hardware or endpoints claim they can't support the number of requested | ||
1621 | * stream IDs. | ||
1622 | */ | ||
1623 | int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, | ||
1624 | struct usb_host_endpoint **eps, unsigned int num_eps, | ||
1625 | unsigned int num_streams, gfp_t mem_flags) | ||
1626 | { | ||
1627 | int i, ret; | ||
1628 | struct xhci_hcd *xhci; | ||
1629 | struct xhci_virt_device *vdev; | ||
1630 | struct xhci_command *config_cmd; | ||
1631 | unsigned int ep_index; | ||
1632 | unsigned int num_stream_ctxs; | ||
1633 | unsigned long flags; | ||
1634 | u32 changed_ep_bitmask = 0; | ||
1635 | |||
1636 | if (!eps) | ||
1637 | return -EINVAL; | ||
1638 | |||
1639 | /* Add one to the number of streams requested to account for | ||
1640 | * stream 0 that is reserved for xHCI usage. | ||
1641 | */ | ||
1642 | num_streams += 1; | ||
1643 | xhci = hcd_to_xhci(hcd); | ||
1644 | xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", | ||
1645 | num_streams); | ||
1646 | |||
1647 | config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); | ||
1648 | if (!config_cmd) { | ||
1649 | xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); | ||
1650 | return -ENOMEM; | ||
1651 | } | ||
1652 | |||
1653 | /* Check to make sure all endpoints are not already configured for | ||
1654 | * streams. While we're at it, find the maximum number of streams that | ||
1655 | * all the endpoints will support and check for duplicate endpoints. | ||
1656 | */ | ||
1657 | spin_lock_irqsave(&xhci->lock, flags); | ||
1658 | ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, | ||
1659 | num_eps, &num_streams, &changed_ep_bitmask); | ||
1660 | if (ret < 0) { | ||
1661 | xhci_free_command(xhci, config_cmd); | ||
1662 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1663 | return ret; | ||
1664 | } | ||
1665 | if (num_streams <= 1) { | ||
1666 | xhci_warn(xhci, "WARN: endpoints can't handle " | ||
1667 | "more than one stream.\n"); | ||
1668 | xhci_free_command(xhci, config_cmd); | ||
1669 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1670 | return -EINVAL; | ||
1671 | } | ||
1672 | vdev = xhci->devs[udev->slot_id]; | ||
1673 | /* Mark each endpoint as being in transistion, so | ||
1674 | * xhci_urb_enqueue() will reject all URBs. | ||
1675 | */ | ||
1676 | for (i = 0; i < num_eps; i++) { | ||
1677 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | ||
1678 | vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; | ||
1679 | } | ||
1680 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1681 | |||
1682 | /* Setup internal data structures and allocate HW data structures for | ||
1683 | * streams (but don't install the HW structures in the input context | ||
1684 | * until we're sure all memory allocation succeeded). | ||
1685 | */ | ||
1686 | xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); | ||
1687 | xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", | ||
1688 | num_stream_ctxs, num_streams); | ||
1689 | |||
1690 | for (i = 0; i < num_eps; i++) { | ||
1691 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | ||
1692 | vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, | ||
1693 | num_stream_ctxs, | ||
1694 | num_streams, mem_flags); | ||
1695 | if (!vdev->eps[ep_index].stream_info) | ||
1696 | goto cleanup; | ||
1697 | /* Set maxPstreams in endpoint context and update deq ptr to | ||
1698 | * point to stream context array. FIXME | ||
1699 | */ | ||
1700 | } | ||
1701 | |||
1702 | /* Set up the input context for a configure endpoint command. */ | ||
1703 | for (i = 0; i < num_eps; i++) { | ||
1704 | struct xhci_ep_ctx *ep_ctx; | ||
1705 | |||
1706 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | ||
1707 | ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); | ||
1708 | |||
1709 | xhci_endpoint_copy(xhci, config_cmd->in_ctx, | ||
1710 | vdev->out_ctx, ep_index); | ||
1711 | xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, | ||
1712 | vdev->eps[ep_index].stream_info); | ||
1713 | } | ||
1714 | /* Tell the HW to drop its old copy of the endpoint context info | ||
1715 | * and add the updated copy from the input context. | ||
1716 | */ | ||
1717 | xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, | ||
1718 | vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); | ||
1719 | |||
1720 | /* Issue and wait for the configure endpoint command */ | ||
1721 | ret = xhci_configure_endpoint(xhci, udev, config_cmd, | ||
1722 | false, false); | ||
1723 | |||
1724 | /* xHC rejected the configure endpoint command for some reason, so we | ||
1725 | * leave the old ring intact and free our internal streams data | ||
1726 | * structure. | ||
1727 | */ | ||
1728 | if (ret < 0) | ||
1729 | goto cleanup; | ||
1730 | |||
1731 | spin_lock_irqsave(&xhci->lock, flags); | ||
1732 | for (i = 0; i < num_eps; i++) { | ||
1733 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | ||
1734 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; | ||
1735 | xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", | ||
1736 | udev->slot_id, ep_index); | ||
1737 | vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; | ||
1738 | } | ||
1739 | xhci_free_command(xhci, config_cmd); | ||
1740 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1741 | |||
1742 | /* Subtract 1 for stream 0, which drivers can't use */ | ||
1743 | return num_streams - 1; | ||
1744 | |||
1745 | cleanup: | ||
1746 | /* If it didn't work, free the streams! */ | ||
1747 | for (i = 0; i < num_eps; i++) { | ||
1748 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | ||
1749 | xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); | ||
1750 | vdev->eps[ep_index].stream_info = NULL; | ||
1751 | /* FIXME Unset maxPstreams in endpoint context and | ||
1752 | * update deq ptr to point to normal string ring. | ||
1753 | */ | ||
1754 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; | ||
1755 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; | ||
1756 | xhci_endpoint_zero(xhci, vdev, eps[i]); | ||
1757 | } | ||
1758 | xhci_free_command(xhci, config_cmd); | ||
1759 | return -ENOMEM; | ||
1760 | } | ||
1761 | |||
1762 | /* Transition the endpoint from using streams to being a "normal" endpoint | ||
1763 | * without streams. | ||
1764 | * | ||
1765 | * Modify the endpoint context state, submit a configure endpoint command, | ||
1766 | * and free all endpoint rings for streams if that completes successfully. | ||
1767 | */ | ||
1768 | int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, | ||
1769 | struct usb_host_endpoint **eps, unsigned int num_eps, | ||
1770 | gfp_t mem_flags) | ||
1771 | { | ||
1772 | int i, ret; | ||
1773 | struct xhci_hcd *xhci; | ||
1774 | struct xhci_virt_device *vdev; | ||
1775 | struct xhci_command *command; | ||
1776 | unsigned int ep_index; | ||
1777 | unsigned long flags; | ||
1778 | u32 changed_ep_bitmask; | ||
1779 | |||
1780 | xhci = hcd_to_xhci(hcd); | ||
1781 | vdev = xhci->devs[udev->slot_id]; | ||
1782 | |||
1783 | /* Set up a configure endpoint command to remove the streams rings */ | ||
1784 | spin_lock_irqsave(&xhci->lock, flags); | ||
1785 | changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, | ||
1786 | udev, eps, num_eps); | ||
1787 | if (changed_ep_bitmask == 0) { | ||
1788 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1789 | return -EINVAL; | ||
1790 | } | ||
1791 | |||
1792 | /* Use the xhci_command structure from the first endpoint. We may have | ||
1793 | * allocated too many, but the driver may call xhci_free_streams() for | ||
1794 | * each endpoint it grouped into one call to xhci_alloc_streams(). | ||
1795 | */ | ||
1796 | ep_index = xhci_get_endpoint_index(&eps[0]->desc); | ||
1797 | command = vdev->eps[ep_index].stream_info->free_streams_command; | ||
1798 | for (i = 0; i < num_eps; i++) { | ||
1799 | struct xhci_ep_ctx *ep_ctx; | ||
1800 | |||
1801 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | ||
1802 | ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); | ||
1803 | xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= | ||
1804 | EP_GETTING_NO_STREAMS; | ||
1805 | |||
1806 | xhci_endpoint_copy(xhci, command->in_ctx, | ||
1807 | vdev->out_ctx, ep_index); | ||
1808 | xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx, | ||
1809 | &vdev->eps[ep_index]); | ||
1810 | } | ||
1811 | xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, | ||
1812 | vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); | ||
1813 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1814 | |||
1815 | /* Issue and wait for the configure endpoint command, | ||
1816 | * which must succeed. | ||
1817 | */ | ||
1818 | ret = xhci_configure_endpoint(xhci, udev, command, | ||
1819 | false, true); | ||
1820 | |||
1821 | /* xHC rejected the configure endpoint command for some reason, so we | ||
1822 | * leave the streams rings intact. | ||
1823 | */ | ||
1824 | if (ret < 0) | ||
1825 | return ret; | ||
1826 | |||
1827 | spin_lock_irqsave(&xhci->lock, flags); | ||
1828 | for (i = 0; i < num_eps; i++) { | ||
1829 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | ||
1830 | xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); | ||
1831 | vdev->eps[ep_index].stream_info = NULL; | ||
1832 | /* FIXME Unset maxPstreams in endpoint context and | ||
1833 | * update deq ptr to point to normal string ring. | ||
1834 | */ | ||
1835 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; | ||
1836 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; | ||
1837 | } | ||
1838 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1839 | |||
1840 | return 0; | ||
1841 | } | ||
1842 | |||
1447 | /* | 1843 | /* |
1448 | * This submits a Reset Device Command, which will set the device state to 0, | 1844 | * This submits a Reset Device Command, which will set the device state to 0, |
1449 | * set the device address to 0, and disable all the endpoints except the default | 1845 | * set the device address to 0, and disable all the endpoints except the default |