aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/usb/host/xhci-ring.c319
-rw-r--r--drivers/usb/host/xhci.h5
2 files changed, 324 insertions, 0 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index fa8c93559133..da3519e76e2b 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1472,6 +1472,104 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1472} 1472}
1473 1473
1474/* 1474/*
1475 * Process isochronous tds, update urb packet status and actual_length.
1476 */
1477static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1478 union xhci_trb *event_trb, struct xhci_transfer_event *event,
1479 struct xhci_virt_ep *ep, int *status)
1480{
1481 struct xhci_ring *ep_ring;
1482 struct urb_priv *urb_priv;
1483 int idx;
1484 int len = 0;
1485 int skip_td = 0;
1486 union xhci_trb *cur_trb;
1487 struct xhci_segment *cur_seg;
1488 u32 trb_comp_code;
1489
1490 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1491 trb_comp_code = GET_COMP_CODE(event->transfer_len);
1492 urb_priv = td->urb->hcpriv;
1493 idx = urb_priv->td_cnt;
1494
1495 if (ep->skip) {
1496 /* The transfer is partly done */
1497 *status = -EXDEV;
1498 td->urb->iso_frame_desc[idx].status = -EXDEV;
1499 } else {
1500 /* handle completion code */
1501 switch (trb_comp_code) {
1502 case COMP_SUCCESS:
1503 td->urb->iso_frame_desc[idx].status = 0;
1504 xhci_dbg(xhci, "Successful isoc transfer!\n");
1505 break;
1506 case COMP_SHORT_TX:
1507 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1508 td->urb->iso_frame_desc[idx].status =
1509 -EREMOTEIO;
1510 else
1511 td->urb->iso_frame_desc[idx].status = 0;
1512 break;
1513 case COMP_BW_OVER:
1514 td->urb->iso_frame_desc[idx].status = -ECOMM;
1515 skip_td = 1;
1516 break;
1517 case COMP_BUFF_OVER:
1518 case COMP_BABBLE:
1519 td->urb->iso_frame_desc[idx].status = -EOVERFLOW;
1520 skip_td = 1;
1521 break;
1522 case COMP_STALL:
1523 td->urb->iso_frame_desc[idx].status = -EPROTO;
1524 skip_td = 1;
1525 break;
1526 case COMP_STOP:
1527 case COMP_STOP_INVAL:
1528 break;
1529 default:
1530 td->urb->iso_frame_desc[idx].status = -1;
1531 break;
1532 }
1533 }
1534
1535 /* calc actual length */
1536 if (ep->skip) {
1537 td->urb->iso_frame_desc[idx].actual_length = 0;
1538 return finish_td(xhci, td, event_trb, event, ep, status, true);
1539 }
1540
1541 if (trb_comp_code == COMP_SUCCESS || skip_td == 1) {
1542 td->urb->iso_frame_desc[idx].actual_length =
1543 td->urb->iso_frame_desc[idx].length;
1544 td->urb->actual_length +=
1545 td->urb->iso_frame_desc[idx].length;
1546 } else {
1547 for (cur_trb = ep_ring->dequeue,
1548 cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
1549 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1550 if ((cur_trb->generic.field[3] &
1551 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
1552 (cur_trb->generic.field[3] &
1553 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
1554 len +=
1555 TRB_LEN(cur_trb->generic.field[2]);
1556 }
1557 len += TRB_LEN(cur_trb->generic.field[2]) -
1558 TRB_LEN(event->transfer_len);
1559
1560 if (trb_comp_code != COMP_STOP_INVAL) {
1561 td->urb->iso_frame_desc[idx].actual_length = len;
1562 td->urb->actual_length += len;
1563 }
1564 }
1565
1566 if ((idx == urb_priv->length - 1) && *status == -EINPROGRESS)
1567 *status = 0;
1568
1569 return finish_td(xhci, td, event_trb, event, ep, status, false);
1570}
1571
1572/*
1475 * Process bulk and interrupt tds, update urb status and actual_length. 1573 * Process bulk and interrupt tds, update urb status and actual_length.
1476 */ 1574 */
1477static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, 1575static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
@@ -1768,6 +1866,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1768 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) 1866 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
1769 ret = process_ctrl_td(xhci, td, event_trb, event, ep, 1867 ret = process_ctrl_td(xhci, td, event_trb, event, ep,
1770 &status); 1868 &status);
1869 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
1870 ret = process_isoc_td(xhci, td, event_trb, event, ep,
1871 &status);
1771 else 1872 else
1772 ret = process_bulk_intr_td(xhci, td, event_trb, event, 1873 ret = process_bulk_intr_td(xhci, td, event_trb, event,
1773 ep, &status); 1874 ep, &status);
@@ -2553,6 +2654,224 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2553 return 0; 2654 return 0;
2554} 2655}
2555 2656
2657static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
2658 struct urb *urb, int i)
2659{
2660 int num_trbs = 0;
2661 u64 addr, td_len, running_total;
2662
2663 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
2664 td_len = urb->iso_frame_desc[i].length;
2665
2666 running_total = TRB_MAX_BUFF_SIZE -
2667 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
2668 if (running_total != 0)
2669 num_trbs++;
2670
2671 while (running_total < td_len) {
2672 num_trbs++;
2673 running_total += TRB_MAX_BUFF_SIZE;
2674 }
2675
2676 return num_trbs;
2677}
2678
2679/* This is for isoc transfer */
2680static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2681 struct urb *urb, int slot_id, unsigned int ep_index)
2682{
2683 struct xhci_ring *ep_ring;
2684 struct urb_priv *urb_priv;
2685 struct xhci_td *td;
2686 int num_tds, trbs_per_td;
2687 struct xhci_generic_trb *start_trb;
2688 bool first_trb;
2689 int start_cycle;
2690 u32 field, length_field;
2691 int running_total, trb_buff_len, td_len, td_remain_len, ret;
2692 u64 start_addr, addr;
2693 int i, j;
2694
2695 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
2696
2697 num_tds = urb->number_of_packets;
2698 if (num_tds < 1) {
2699 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
2700 return -EINVAL;
2701 }
2702
2703 if (!in_interrupt())
2704 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d),"
2705 " addr = %#llx, num_tds = %d\n",
2706 urb->ep->desc.bEndpointAddress,
2707 urb->transfer_buffer_length,
2708 urb->transfer_buffer_length,
2709 (unsigned long long)urb->transfer_dma,
2710 num_tds);
2711
2712 start_addr = (u64) urb->transfer_dma;
2713 start_trb = &ep_ring->enqueue->generic;
2714 start_cycle = ep_ring->cycle_state;
2715
2716 /* Queue the first TRB, even if it's zero-length */
2717 for (i = 0; i < num_tds; i++) {
2718 first_trb = true;
2719
2720 running_total = 0;
2721 addr = start_addr + urb->iso_frame_desc[i].offset;
2722 td_len = urb->iso_frame_desc[i].length;
2723 td_remain_len = td_len;
2724
2725 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
2726
2727 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
2728 urb->stream_id, trbs_per_td, urb, i, mem_flags);
2729 if (ret < 0)
2730 return ret;
2731
2732 urb_priv = urb->hcpriv;
2733 td = urb_priv->td[i];
2734
2735 for (j = 0; j < trbs_per_td; j++) {
2736 u32 remainder = 0;
2737 field = 0;
2738
2739 if (first_trb) {
2740 /* Queue the isoc TRB */
2741 field |= TRB_TYPE(TRB_ISOC);
2742 /* Assume URB_ISO_ASAP is set */
2743 field |= TRB_SIA;
2744 if (i > 0)
2745 field |= ep_ring->cycle_state;
2746 first_trb = false;
2747 } else {
2748 /* Queue other normal TRBs */
2749 field |= TRB_TYPE(TRB_NORMAL);
2750 field |= ep_ring->cycle_state;
2751 }
2752
2753 /* Chain all the TRBs together; clear the chain bit in
2754 * the last TRB to indicate it's the last TRB in the
2755 * chain.
2756 */
2757 if (j < trbs_per_td - 1) {
2758 field |= TRB_CHAIN;
2759 } else {
2760 td->last_trb = ep_ring->enqueue;
2761 field |= TRB_IOC;
2762 }
2763
2764 /* Calculate TRB length */
2765 trb_buff_len = TRB_MAX_BUFF_SIZE -
2766 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
2767 if (trb_buff_len > td_remain_len)
2768 trb_buff_len = td_remain_len;
2769
2770 remainder = xhci_td_remainder(td_len - running_total);
2771 length_field = TRB_LEN(trb_buff_len) |
2772 remainder |
2773 TRB_INTR_TARGET(0);
2774 queue_trb(xhci, ep_ring, false, false,
2775 lower_32_bits(addr),
2776 upper_32_bits(addr),
2777 length_field,
2778 /* We always want to know if the TRB was short,
2779 * or we won't get an event when it completes.
2780 * (Unless we use event data TRBs, which are a
2781 * waste of space and HC resources.)
2782 */
2783 field | TRB_ISP);
2784 running_total += trb_buff_len;
2785
2786 addr += trb_buff_len;
2787 td_remain_len -= trb_buff_len;
2788 }
2789
2790 /* Check TD length */
2791 if (running_total != td_len) {
2792 xhci_err(xhci, "ISOC TD length unmatch\n");
2793 return -EINVAL;
2794 }
2795 }
2796
2797 wmb();
2798 start_trb->field[3] |= start_cycle;
2799
2800 ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
2801 return 0;
2802}
2803
2804/*
2805 * Check transfer ring to guarantee there is enough room for the urb.
2806 * Update ISO URB start_frame and interval.
2807 * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
2808 * update the urb->start_frame by now.
2809 * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
2810 */
2811int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
2812 struct urb *urb, int slot_id, unsigned int ep_index)
2813{
2814 struct xhci_virt_device *xdev;
2815 struct xhci_ring *ep_ring;
2816 struct xhci_ep_ctx *ep_ctx;
2817 int start_frame;
2818 int xhci_interval;
2819 int ep_interval;
2820 int num_tds, num_trbs, i;
2821 int ret;
2822
2823 xdev = xhci->devs[slot_id];
2824 ep_ring = xdev->eps[ep_index].ring;
2825 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2826
2827 num_trbs = 0;
2828 num_tds = urb->number_of_packets;
2829 for (i = 0; i < num_tds; i++)
2830 num_trbs += count_isoc_trbs_needed(xhci, urb, i);
2831
2832 /* Check the ring to guarantee there is enough room for the whole urb.
2833 * Do not insert any td of the urb to the ring if the check failed.
2834 */
2835 ret = prepare_ring(xhci, ep_ring, ep_ctx->ep_info & EP_STATE_MASK,
2836 num_trbs, mem_flags);
2837 if (ret)
2838 return ret;
2839
2840 start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
2841 start_frame &= 0x3fff;
2842
2843 urb->start_frame = start_frame;
2844 if (urb->dev->speed == USB_SPEED_LOW ||
2845 urb->dev->speed == USB_SPEED_FULL)
2846 urb->start_frame >>= 3;
2847
2848 xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
2849 ep_interval = urb->interval;
2850 /* Convert to microframes */
2851 if (urb->dev->speed == USB_SPEED_LOW ||
2852 urb->dev->speed == USB_SPEED_FULL)
2853 ep_interval *= 8;
2854 /* FIXME change this to a warning and a suggestion to use the new API
2855 * to set the polling interval (once the API is added).
2856 */
2857 if (xhci_interval != ep_interval) {
2858 if (!printk_ratelimit())
2859 dev_dbg(&urb->dev->dev, "Driver uses different interval"
2860 " (%d microframe%s) than xHCI "
2861 "(%d microframe%s)\n",
2862 ep_interval,
2863 ep_interval == 1 ? "" : "s",
2864 xhci_interval,
2865 xhci_interval == 1 ? "" : "s");
2866 urb->interval = xhci_interval;
2867 /* Convert back to frames for LS/FS devices */
2868 if (urb->dev->speed == USB_SPEED_LOW ||
2869 urb->dev->speed == USB_SPEED_FULL)
2870 urb->interval /= 8;
2871 }
2872 return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
2873}
2874
2556/**** Command Ring Operations ****/ 2875/**** Command Ring Operations ****/
2557 2876
2558/* Generic function for queueing a command TRB on the command ring. 2877/* Generic function for queueing a command TRB on the command ring.
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index ebf62082950b..e1383d91468b 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -919,6 +919,9 @@ struct xhci_event_cmd {
919/* Control transfer TRB specific fields */ 919/* Control transfer TRB specific fields */
920#define TRB_DIR_IN (1<<16) 920#define TRB_DIR_IN (1<<16)
921 921
922/* Isochronous TRB specific fields */
923#define TRB_SIA (1<<31)
924
922struct xhci_generic_trb { 925struct xhci_generic_trb {
923 u32 field[4]; 926 u32 field[4];
924}; 927};
@@ -1416,6 +1419,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1416 int slot_id, unsigned int ep_index); 1419 int slot_id, unsigned int ep_index);
1417int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, 1420int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1418 int slot_id, unsigned int ep_index); 1421 int slot_id, unsigned int ep_index);
1422int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
1423 struct urb *urb, int slot_id, unsigned int ep_index);
1419int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 1424int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1420 u32 slot_id, bool command_must_succeed); 1425 u32 slot_id, bool command_must_succeed);
1421int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 1426int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,