aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c109
1 files changed, 82 insertions, 27 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 7113d16e2d3a..952e2ded61af 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -514,8 +514,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
514 (unsigned long long) addr); 514 (unsigned long long) addr);
515} 515}
516 516
517/* flip_cycle means flip the cycle bit of all but the first and last TRB.
518 * (The last TRB actually points to the ring enqueue pointer, which is not part
519 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
520 */
517static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 521static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
518 struct xhci_td *cur_td) 522 struct xhci_td *cur_td, bool flip_cycle)
519{ 523{
520 struct xhci_segment *cur_seg; 524 struct xhci_segment *cur_seg;
521 union xhci_trb *cur_trb; 525 union xhci_trb *cur_trb;
@@ -528,6 +532,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
528 * leave the pointers intact. 532 * leave the pointers intact.
529 */ 533 */
530 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); 534 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
535 /* Flip the cycle bit (link TRBs can't be the first
536 * or last TRB).
537 */
538 if (flip_cycle)
539 cur_trb->generic.field[3] ^=
540 cpu_to_le32(TRB_CYCLE);
531 xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); 541 xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
532 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 542 xhci_dbg(xhci, "Address = %p (0x%llx dma); "
533 "in seg %p (0x%llx dma)\n", 543 "in seg %p (0x%llx dma)\n",
@@ -541,6 +551,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
541 cur_trb->generic.field[2] = 0; 551 cur_trb->generic.field[2] = 0;
542 /* Preserve only the cycle bit of this TRB */ 552 /* Preserve only the cycle bit of this TRB */
543 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); 553 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
554 /* Flip the cycle bit except on the first or last TRB */
555 if (flip_cycle && cur_trb != cur_td->first_trb &&
556 cur_trb != cur_td->last_trb)
557 cur_trb->generic.field[3] ^=
558 cpu_to_le32(TRB_CYCLE);
544 cur_trb->generic.field[3] |= cpu_to_le32( 559 cur_trb->generic.field[3] |= cpu_to_le32(
545 TRB_TYPE(TRB_TR_NOOP)); 560 TRB_TYPE(TRB_TR_NOOP));
546 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " 561 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
@@ -719,14 +734,14 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
719 cur_td->urb->stream_id, 734 cur_td->urb->stream_id,
720 cur_td, &deq_state); 735 cur_td, &deq_state);
721 else 736 else
722 td_to_noop(xhci, ep_ring, cur_td); 737 td_to_noop(xhci, ep_ring, cur_td, false);
723remove_finished_td: 738remove_finished_td:
724 /* 739 /*
725 * The event handler won't see a completion for this TD anymore, 740 * The event handler won't see a completion for this TD anymore,
726 * so remove it from the endpoint ring's TD list. Keep it in 741 * so remove it from the endpoint ring's TD list. Keep it in
727 * the cancelled TD list for URB completion later. 742 * the cancelled TD list for URB completion later.
728 */ 743 */
729 list_del(&cur_td->td_list); 744 list_del_init(&cur_td->td_list);
730 } 745 }
731 last_unlinked_td = cur_td; 746 last_unlinked_td = cur_td;
732 xhci_stop_watchdog_timer_in_irq(xhci, ep); 747 xhci_stop_watchdog_timer_in_irq(xhci, ep);
@@ -754,7 +769,7 @@ remove_finished_td:
754 do { 769 do {
755 cur_td = list_entry(ep->cancelled_td_list.next, 770 cur_td = list_entry(ep->cancelled_td_list.next,
756 struct xhci_td, cancelled_td_list); 771 struct xhci_td, cancelled_td_list);
757 list_del(&cur_td->cancelled_td_list); 772 list_del_init(&cur_td->cancelled_td_list);
758 773
759 /* Clean up the cancelled URB */ 774 /* Clean up the cancelled URB */
760 /* Doesn't matter what we pass for status, since the core will 775 /* Doesn't matter what we pass for status, since the core will
@@ -862,9 +877,9 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
862 cur_td = list_first_entry(&ring->td_list, 877 cur_td = list_first_entry(&ring->td_list,
863 struct xhci_td, 878 struct xhci_td,
864 td_list); 879 td_list);
865 list_del(&cur_td->td_list); 880 list_del_init(&cur_td->td_list);
866 if (!list_empty(&cur_td->cancelled_td_list)) 881 if (!list_empty(&cur_td->cancelled_td_list))
867 list_del(&cur_td->cancelled_td_list); 882 list_del_init(&cur_td->cancelled_td_list);
868 xhci_giveback_urb_in_irq(xhci, cur_td, 883 xhci_giveback_urb_in_irq(xhci, cur_td,
869 -ESHUTDOWN, "killed"); 884 -ESHUTDOWN, "killed");
870 } 885 }
@@ -873,7 +888,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
873 &temp_ep->cancelled_td_list, 888 &temp_ep->cancelled_td_list,
874 struct xhci_td, 889 struct xhci_td,
875 cancelled_td_list); 890 cancelled_td_list);
876 list_del(&cur_td->cancelled_td_list); 891 list_del_init(&cur_td->cancelled_td_list);
877 xhci_giveback_urb_in_irq(xhci, cur_td, 892 xhci_giveback_urb_in_irq(xhci, cur_td,
878 -ESHUTDOWN, "killed"); 893 -ESHUTDOWN, "killed");
879 } 894 }
@@ -1565,10 +1580,10 @@ td_cleanup:
1565 else 1580 else
1566 *status = 0; 1581 *status = 0;
1567 } 1582 }
1568 list_del(&td->td_list); 1583 list_del_init(&td->td_list);
1569 /* Was this TD slated to be cancelled but completed anyway? */ 1584 /* Was this TD slated to be cancelled but completed anyway? */
1570 if (!list_empty(&td->cancelled_td_list)) 1585 if (!list_empty(&td->cancelled_td_list))
1571 list_del(&td->cancelled_td_list); 1586 list_del_init(&td->cancelled_td_list);
1572 1587
1573 urb_priv->td_cnt++; 1588 urb_priv->td_cnt++;
1574 /* Giveback the urb when all the tds are completed */ 1589 /* Giveback the urb when all the tds are completed */
@@ -1919,8 +1934,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1919 int status = -EINPROGRESS; 1934 int status = -EINPROGRESS;
1920 struct urb_priv *urb_priv; 1935 struct urb_priv *urb_priv;
1921 struct xhci_ep_ctx *ep_ctx; 1936 struct xhci_ep_ctx *ep_ctx;
1937 struct list_head *tmp;
1922 u32 trb_comp_code; 1938 u32 trb_comp_code;
1923 int ret = 0; 1939 int ret = 0;
1940 int td_num = 0;
1924 1941
1925 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1942 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1926 xdev = xhci->devs[slot_id]; 1943 xdev = xhci->devs[slot_id];
@@ -1942,6 +1959,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1942 return -ENODEV; 1959 return -ENODEV;
1943 } 1960 }
1944 1961
1962 /* Count current td numbers if ep->skip is set */
1963 if (ep->skip) {
1964 list_for_each(tmp, &ep_ring->td_list)
1965 td_num++;
1966 }
1967
1945 event_dma = le64_to_cpu(event->buffer); 1968 event_dma = le64_to_cpu(event->buffer);
1946 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1969 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1947 /* Look for common error cases */ 1970 /* Look for common error cases */
@@ -2053,7 +2076,18 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2053 goto cleanup; 2076 goto cleanup;
2054 } 2077 }
2055 2078
2079 /* We've skipped all the TDs on the ep ring when ep->skip set */
2080 if (ep->skip && td_num == 0) {
2081 ep->skip = false;
2082 xhci_dbg(xhci, "All tds on the ep_ring skipped. "
2083 "Clear skip flag.\n");
2084 ret = 0;
2085 goto cleanup;
2086 }
2087
2056 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 2088 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2089 if (ep->skip)
2090 td_num--;
2057 2091
2058 /* Is this a TRB in the currently executing TD? */ 2092 /* Is this a TRB in the currently executing TD? */
2059 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, 2093 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
@@ -2500,11 +2534,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
2500 2534
2501 if (td_index == 0) { 2535 if (td_index == 0) {
2502 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); 2536 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2503 if (unlikely(ret)) { 2537 if (unlikely(ret))
2504 xhci_urb_free_priv(xhci, urb_priv);
2505 urb->hcpriv = NULL;
2506 return ret; 2538 return ret;
2507 }
2508 } 2539 }
2509 2540
2510 td->urb = urb; 2541 td->urb = urb;
@@ -2672,6 +2703,10 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
2672{ 2703{
2673 int packets_transferred; 2704 int packets_transferred;
2674 2705
2706 /* One TRB with a zero-length data packet. */
2707 if (running_total == 0 && trb_buff_len == 0)
2708 return 0;
2709
2675 /* All the TRB queueing functions don't count the current TRB in 2710 /* All the TRB queueing functions don't count the current TRB in
2676 * running_total. 2711 * running_total.
2677 */ 2712 */
@@ -3113,20 +3148,15 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
3113 struct urb *urb, int i) 3148 struct urb *urb, int i)
3114{ 3149{
3115 int num_trbs = 0; 3150 int num_trbs = 0;
3116 u64 addr, td_len, running_total; 3151 u64 addr, td_len;
3117 3152
3118 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); 3153 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3119 td_len = urb->iso_frame_desc[i].length; 3154 td_len = urb->iso_frame_desc[i].length;
3120 3155
3121 running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); 3156 num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3122 running_total &= TRB_MAX_BUFF_SIZE - 1; 3157 TRB_MAX_BUFF_SIZE);
3123 if (running_total != 0) 3158 if (num_trbs == 0)
3124 num_trbs++;
3125
3126 while (running_total < td_len) {
3127 num_trbs++; 3159 num_trbs++;
3128 running_total += TRB_MAX_BUFF_SIZE;
3129 }
3130 3160
3131 return num_trbs; 3161 return num_trbs;
3132} 3162}
@@ -3226,6 +3256,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3226 start_trb = &ep_ring->enqueue->generic; 3256 start_trb = &ep_ring->enqueue->generic;
3227 start_cycle = ep_ring->cycle_state; 3257 start_cycle = ep_ring->cycle_state;
3228 3258
3259 urb_priv = urb->hcpriv;
3229 /* Queue the first TRB, even if it's zero-length */ 3260 /* Queue the first TRB, even if it's zero-length */
3230 for (i = 0; i < num_tds; i++) { 3261 for (i = 0; i < num_tds; i++) {
3231 unsigned int total_packet_count; 3262 unsigned int total_packet_count;
@@ -3237,9 +3268,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3237 addr = start_addr + urb->iso_frame_desc[i].offset; 3268 addr = start_addr + urb->iso_frame_desc[i].offset;
3238 td_len = urb->iso_frame_desc[i].length; 3269 td_len = urb->iso_frame_desc[i].length;
3239 td_remain_len = td_len; 3270 td_remain_len = td_len;
3240 /* FIXME: Ignoring zero-length packets, can those happen? */
3241 total_packet_count = roundup(td_len, 3271 total_packet_count = roundup(td_len,
3242 le16_to_cpu(urb->ep->desc.wMaxPacketSize)); 3272 le16_to_cpu(urb->ep->desc.wMaxPacketSize));
3273 /* A zero-length transfer still involves at least one packet. */
3274 if (total_packet_count == 0)
3275 total_packet_count++;
3243 burst_count = xhci_get_burst_count(xhci, urb->dev, urb, 3276 burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
3244 total_packet_count); 3277 total_packet_count);
3245 residue = xhci_get_last_burst_packet_count(xhci, 3278 residue = xhci_get_last_burst_packet_count(xhci,
@@ -3249,12 +3282,13 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3249 3282
3250 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3283 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3251 urb->stream_id, trbs_per_td, urb, i, mem_flags); 3284 urb->stream_id, trbs_per_td, urb, i, mem_flags);
3252 if (ret < 0) 3285 if (ret < 0) {
3253 return ret; 3286 if (i == 0)
3287 return ret;
3288 goto cleanup;
3289 }
3254 3290
3255 urb_priv = urb->hcpriv;
3256 td = urb_priv->td[i]; 3291 td = urb_priv->td[i];
3257
3258 for (j = 0; j < trbs_per_td; j++) { 3292 for (j = 0; j < trbs_per_td; j++) {
3259 u32 remainder = 0; 3293 u32 remainder = 0;
3260 field = TRB_TBC(burst_count) | TRB_TLBPC(residue); 3294 field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
@@ -3344,6 +3378,27 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3344 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3378 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3345 start_cycle, start_trb); 3379 start_cycle, start_trb);
3346 return 0; 3380 return 0;
3381cleanup:
3382 /* Clean up a partially enqueued isoc transfer. */
3383
3384 for (i--; i >= 0; i--)
3385 list_del_init(&urb_priv->td[i]->td_list);
3386
3387 /* Use the first TD as a temporary variable to turn the TDs we've queued
3388 * into No-ops with a software-owned cycle bit. That way the hardware
3389 * won't accidentally start executing bogus TDs when we partially
3390 * overwrite them. td->first_trb and td->start_seg are already set.
3391 */
3392 urb_priv->td[0]->last_trb = ep_ring->enqueue;
3393 /* Every TRB except the first & last will have its cycle bit flipped. */
3394 td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3395
3396 /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3397 ep_ring->enqueue = urb_priv->td[0]->first_trb;
3398 ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3399 ep_ring->cycle_state = start_cycle;
3400 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3401 return ret;
3347} 3402}
3348 3403
3349/* 3404/*