diff options
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 112 |
1 files changed, 69 insertions, 43 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 54139a2f06ce..9f1d4b15d818 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -185,7 +185,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
185 | * prepare_transfer()? | 185 | * prepare_transfer()? |
186 | */ | 186 | */ |
187 | static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, | 187 | static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, |
188 | bool consumer, bool more_trbs_coming) | 188 | bool consumer, bool more_trbs_coming, bool isoc) |
189 | { | 189 | { |
190 | u32 chain; | 190 | u32 chain; |
191 | union xhci_trb *next; | 191 | union xhci_trb *next; |
@@ -212,11 +212,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, | |||
212 | if (!chain && !more_trbs_coming) | 212 | if (!chain && !more_trbs_coming) |
213 | break; | 213 | break; |
214 | 214 | ||
215 | /* If we're not dealing with 0.95 hardware, | 215 | /* If we're not dealing with 0.95 hardware or |
216 | * isoc rings on AMD 0.96 host, | ||
216 | * carry over the chain bit of the previous TRB | 217 | * carry over the chain bit of the previous TRB |
217 | * (which may mean the chain bit is cleared). | 218 | * (which may mean the chain bit is cleared). |
218 | */ | 219 | */ |
219 | if (!xhci_link_trb_quirk(xhci)) { | 220 | if (!(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)) |
221 | && !xhci_link_trb_quirk(xhci)) { | ||
220 | next->link.control &= | 222 | next->link.control &= |
221 | cpu_to_le32(~TRB_CHAIN); | 223 | cpu_to_le32(~TRB_CHAIN); |
222 | next->link.control |= | 224 | next->link.control |= |
@@ -814,23 +816,24 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) | |||
814 | struct xhci_ring *ring; | 816 | struct xhci_ring *ring; |
815 | struct xhci_td *cur_td; | 817 | struct xhci_td *cur_td; |
816 | int ret, i, j; | 818 | int ret, i, j; |
819 | unsigned long flags; | ||
817 | 820 | ||
818 | ep = (struct xhci_virt_ep *) arg; | 821 | ep = (struct xhci_virt_ep *) arg; |
819 | xhci = ep->xhci; | 822 | xhci = ep->xhci; |
820 | 823 | ||
821 | spin_lock(&xhci->lock); | 824 | spin_lock_irqsave(&xhci->lock, flags); |
822 | 825 | ||
823 | ep->stop_cmds_pending--; | 826 | ep->stop_cmds_pending--; |
824 | if (xhci->xhc_state & XHCI_STATE_DYING) { | 827 | if (xhci->xhc_state & XHCI_STATE_DYING) { |
825 | xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " | 828 | xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " |
826 | "xHCI as DYING, exiting.\n"); | 829 | "xHCI as DYING, exiting.\n"); |
827 | spin_unlock(&xhci->lock); | 830 | spin_unlock_irqrestore(&xhci->lock, flags); |
828 | return; | 831 | return; |
829 | } | 832 | } |
830 | if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { | 833 | if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { |
831 | xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " | 834 | xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " |
832 | "exiting.\n"); | 835 | "exiting.\n"); |
833 | spin_unlock(&xhci->lock); | 836 | spin_unlock_irqrestore(&xhci->lock, flags); |
834 | return; | 837 | return; |
835 | } | 838 | } |
836 | 839 | ||
@@ -842,11 +845,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) | |||
842 | xhci->xhc_state |= XHCI_STATE_DYING; | 845 | xhci->xhc_state |= XHCI_STATE_DYING; |
843 | /* Disable interrupts from the host controller and start halting it */ | 846 | /* Disable interrupts from the host controller and start halting it */ |
844 | xhci_quiesce(xhci); | 847 | xhci_quiesce(xhci); |
845 | spin_unlock(&xhci->lock); | 848 | spin_unlock_irqrestore(&xhci->lock, flags); |
846 | 849 | ||
847 | ret = xhci_halt(xhci); | 850 | ret = xhci_halt(xhci); |
848 | 851 | ||
849 | spin_lock(&xhci->lock); | 852 | spin_lock_irqsave(&xhci->lock, flags); |
850 | if (ret < 0) { | 853 | if (ret < 0) { |
851 | /* This is bad; the host is not responding to commands and it's | 854 | /* This is bad; the host is not responding to commands and it's |
852 | * not allowing itself to be halted. At least interrupts are | 855 | * not allowing itself to be halted. At least interrupts are |
@@ -894,7 +897,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) | |||
894 | } | 897 | } |
895 | } | 898 | } |
896 | } | 899 | } |
897 | spin_unlock(&xhci->lock); | 900 | spin_unlock_irqrestore(&xhci->lock, flags); |
898 | xhci_dbg(xhci, "Calling usb_hc_died()\n"); | 901 | xhci_dbg(xhci, "Calling usb_hc_died()\n"); |
899 | usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); | 902 | usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); |
900 | xhci_dbg(xhci, "xHCI host controller is dead.\n"); | 903 | xhci_dbg(xhci, "xHCI host controller is dead.\n"); |
@@ -1329,10 +1332,8 @@ static void handle_port_status(struct xhci_hcd *xhci, | |||
1329 | 1332 | ||
1330 | if (DEV_SUPERSPEED(temp)) { | 1333 | if (DEV_SUPERSPEED(temp)) { |
1331 | xhci_dbg(xhci, "resume SS port %d\n", port_id); | 1334 | xhci_dbg(xhci, "resume SS port %d\n", port_id); |
1332 | temp = xhci_port_state_to_neutral(temp); | 1335 | xhci_set_link_state(xhci, port_array, faked_port_index, |
1333 | temp &= ~PORT_PLS_MASK; | 1336 | XDEV_U0); |
1334 | temp |= PORT_LINK_STROBE | XDEV_U0; | ||
1335 | xhci_writel(xhci, temp, port_array[faked_port_index]); | ||
1336 | slot_id = xhci_find_slot_id_by_port(hcd, xhci, | 1337 | slot_id = xhci_find_slot_id_by_port(hcd, xhci, |
1337 | faked_port_index); | 1338 | faked_port_index); |
1338 | if (!slot_id) { | 1339 | if (!slot_id) { |
@@ -1342,10 +1343,8 @@ static void handle_port_status(struct xhci_hcd *xhci, | |||
1342 | xhci_ring_device(xhci, slot_id); | 1343 | xhci_ring_device(xhci, slot_id); |
1343 | xhci_dbg(xhci, "resume SS port %d finished\n", port_id); | 1344 | xhci_dbg(xhci, "resume SS port %d finished\n", port_id); |
1344 | /* Clear PORT_PLC */ | 1345 | /* Clear PORT_PLC */ |
1345 | temp = xhci_readl(xhci, port_array[faked_port_index]); | 1346 | xhci_test_and_clear_bit(xhci, port_array, |
1346 | temp = xhci_port_state_to_neutral(temp); | 1347 | faked_port_index, PORT_PLC); |
1347 | temp |= PORT_PLC; | ||
1348 | xhci_writel(xhci, temp, port_array[faked_port_index]); | ||
1349 | } else { | 1348 | } else { |
1350 | xhci_dbg(xhci, "resume HS port %d\n", port_id); | 1349 | xhci_dbg(xhci, "resume HS port %d\n", port_id); |
1351 | bus_state->resume_done[faked_port_index] = jiffies + | 1350 | bus_state->resume_done[faked_port_index] = jiffies + |
@@ -1356,6 +1355,10 @@ static void handle_port_status(struct xhci_hcd *xhci, | |||
1356 | } | 1355 | } |
1357 | } | 1356 | } |
1358 | 1357 | ||
1358 | if (hcd->speed != HCD_USB3) | ||
1359 | xhci_test_and_clear_bit(xhci, port_array, faked_port_index, | ||
1360 | PORT_PLC); | ||
1361 | |||
1359 | cleanup: | 1362 | cleanup: |
1360 | /* Update event ring dequeue pointer before dropping the lock */ | 1363 | /* Update event ring dequeue pointer before dropping the lock */ |
1361 | inc_deq(xhci, xhci->event_ring, true); | 1364 | inc_deq(xhci, xhci->event_ring, true); |
@@ -1934,8 +1937,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
1934 | int status = -EINPROGRESS; | 1937 | int status = -EINPROGRESS; |
1935 | struct urb_priv *urb_priv; | 1938 | struct urb_priv *urb_priv; |
1936 | struct xhci_ep_ctx *ep_ctx; | 1939 | struct xhci_ep_ctx *ep_ctx; |
1940 | struct list_head *tmp; | ||
1937 | u32 trb_comp_code; | 1941 | u32 trb_comp_code; |
1938 | int ret = 0; | 1942 | int ret = 0; |
1943 | int td_num = 0; | ||
1939 | 1944 | ||
1940 | slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); | 1945 | slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); |
1941 | xdev = xhci->devs[slot_id]; | 1946 | xdev = xhci->devs[slot_id]; |
@@ -1957,6 +1962,12 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
1957 | return -ENODEV; | 1962 | return -ENODEV; |
1958 | } | 1963 | } |
1959 | 1964 | ||
1965 | /* Count current td numbers if ep->skip is set */ | ||
1966 | if (ep->skip) { | ||
1967 | list_for_each(tmp, &ep_ring->td_list) | ||
1968 | td_num++; | ||
1969 | } | ||
1970 | |||
1960 | event_dma = le64_to_cpu(event->buffer); | 1971 | event_dma = le64_to_cpu(event->buffer); |
1961 | trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); | 1972 | trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); |
1962 | /* Look for common error cases */ | 1973 | /* Look for common error cases */ |
@@ -2068,7 +2079,18 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
2068 | goto cleanup; | 2079 | goto cleanup; |
2069 | } | 2080 | } |
2070 | 2081 | ||
2082 | /* We've skipped all the TDs on the ep ring when ep->skip set */ | ||
2083 | if (ep->skip && td_num == 0) { | ||
2084 | ep->skip = false; | ||
2085 | xhci_dbg(xhci, "All tds on the ep_ring skipped. " | ||
2086 | "Clear skip flag.\n"); | ||
2087 | ret = 0; | ||
2088 | goto cleanup; | ||
2089 | } | ||
2090 | |||
2071 | td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); | 2091 | td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); |
2092 | if (ep->skip) | ||
2093 | td_num--; | ||
2072 | 2094 | ||
2073 | /* Is this a TRB in the currently executing TD? */ | 2095 | /* Is this a TRB in the currently executing TD? */ |
2074 | event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, | 2096 | event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, |
@@ -2173,7 +2195,8 @@ cleanup: | |||
2173 | if ((urb->actual_length != urb->transfer_buffer_length && | 2195 | if ((urb->actual_length != urb->transfer_buffer_length && |
2174 | (urb->transfer_flags & | 2196 | (urb->transfer_flags & |
2175 | URB_SHORT_NOT_OK)) || | 2197 | URB_SHORT_NOT_OK)) || |
2176 | status != 0) | 2198 | (status != 0 && |
2199 | !usb_endpoint_xfer_isoc(&urb->ep->desc))) | ||
2177 | xhci_dbg(xhci, "Giveback URB %p, len = %d, " | 2200 | xhci_dbg(xhci, "Giveback URB %p, len = %d, " |
2178 | "expected = %x, status = %d\n", | 2201 | "expected = %x, status = %d\n", |
2179 | urb, urb->actual_length, | 2202 | urb, urb->actual_length, |
@@ -2390,7 +2413,7 @@ irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd) | |||
2390 | * prepare_transfer()? | 2413 | * prepare_transfer()? |
2391 | */ | 2414 | */ |
2392 | static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | 2415 | static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, |
2393 | bool consumer, bool more_trbs_coming, | 2416 | bool consumer, bool more_trbs_coming, bool isoc, |
2394 | u32 field1, u32 field2, u32 field3, u32 field4) | 2417 | u32 field1, u32 field2, u32 field3, u32 field4) |
2395 | { | 2418 | { |
2396 | struct xhci_generic_trb *trb; | 2419 | struct xhci_generic_trb *trb; |
@@ -2400,7 +2423,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | |||
2400 | trb->field[1] = cpu_to_le32(field2); | 2423 | trb->field[1] = cpu_to_le32(field2); |
2401 | trb->field[2] = cpu_to_le32(field3); | 2424 | trb->field[2] = cpu_to_le32(field3); |
2402 | trb->field[3] = cpu_to_le32(field4); | 2425 | trb->field[3] = cpu_to_le32(field4); |
2403 | inc_enq(xhci, ring, consumer, more_trbs_coming); | 2426 | inc_enq(xhci, ring, consumer, more_trbs_coming, isoc); |
2404 | } | 2427 | } |
2405 | 2428 | ||
2406 | /* | 2429 | /* |
@@ -2408,7 +2431,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | |||
2408 | * FIXME allocate segments if the ring is full. | 2431 | * FIXME allocate segments if the ring is full. |
2409 | */ | 2432 | */ |
2410 | static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | 2433 | static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, |
2411 | u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) | 2434 | u32 ep_state, unsigned int num_trbs, bool isoc, gfp_t mem_flags) |
2412 | { | 2435 | { |
2413 | /* Make sure the endpoint has been added to xHC schedule */ | 2436 | /* Make sure the endpoint has been added to xHC schedule */ |
2414 | switch (ep_state) { | 2437 | switch (ep_state) { |
@@ -2450,10 +2473,11 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
2450 | next = ring->enqueue; | 2473 | next = ring->enqueue; |
2451 | 2474 | ||
2452 | while (last_trb(xhci, ring, ring->enq_seg, next)) { | 2475 | while (last_trb(xhci, ring, ring->enq_seg, next)) { |
2453 | /* If we're not dealing with 0.95 hardware, | 2476 | /* If we're not dealing with 0.95 hardware or isoc rings |
2454 | * clear the chain bit. | 2477 | * on AMD 0.96 host, clear the chain bit. |
2455 | */ | 2478 | */ |
2456 | if (!xhci_link_trb_quirk(xhci)) | 2479 | if (!xhci_link_trb_quirk(xhci) && !(isoc && |
2480 | (xhci->quirks & XHCI_AMD_0x96_HOST))) | ||
2457 | next->link.control &= cpu_to_le32(~TRB_CHAIN); | 2481 | next->link.control &= cpu_to_le32(~TRB_CHAIN); |
2458 | else | 2482 | else |
2459 | next->link.control |= cpu_to_le32(TRB_CHAIN); | 2483 | next->link.control |= cpu_to_le32(TRB_CHAIN); |
@@ -2486,6 +2510,7 @@ static int prepare_transfer(struct xhci_hcd *xhci, | |||
2486 | unsigned int num_trbs, | 2510 | unsigned int num_trbs, |
2487 | struct urb *urb, | 2511 | struct urb *urb, |
2488 | unsigned int td_index, | 2512 | unsigned int td_index, |
2513 | bool isoc, | ||
2489 | gfp_t mem_flags) | 2514 | gfp_t mem_flags) |
2490 | { | 2515 | { |
2491 | int ret; | 2516 | int ret; |
@@ -2503,7 +2528,7 @@ static int prepare_transfer(struct xhci_hcd *xhci, | |||
2503 | 2528 | ||
2504 | ret = prepare_ring(xhci, ep_ring, | 2529 | ret = prepare_ring(xhci, ep_ring, |
2505 | le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, | 2530 | le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, |
2506 | num_trbs, mem_flags); | 2531 | num_trbs, isoc, mem_flags); |
2507 | if (ret) | 2532 | if (ret) |
2508 | return ret; | 2533 | return ret; |
2509 | 2534 | ||
@@ -2692,7 +2717,7 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len, | |||
2692 | * running_total. | 2717 | * running_total. |
2693 | */ | 2718 | */ |
2694 | packets_transferred = (running_total + trb_buff_len) / | 2719 | packets_transferred = (running_total + trb_buff_len) / |
2695 | le16_to_cpu(urb->ep->desc.wMaxPacketSize); | 2720 | usb_endpoint_maxp(&urb->ep->desc); |
2696 | 2721 | ||
2697 | return xhci_td_remainder(total_packet_count - packets_transferred); | 2722 | return xhci_td_remainder(total_packet_count - packets_transferred); |
2698 | } | 2723 | } |
@@ -2722,11 +2747,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2722 | num_trbs = count_sg_trbs_needed(xhci, urb); | 2747 | num_trbs = count_sg_trbs_needed(xhci, urb); |
2723 | num_sgs = urb->num_sgs; | 2748 | num_sgs = urb->num_sgs; |
2724 | total_packet_count = roundup(urb->transfer_buffer_length, | 2749 | total_packet_count = roundup(urb->transfer_buffer_length, |
2725 | le16_to_cpu(urb->ep->desc.wMaxPacketSize)); | 2750 | usb_endpoint_maxp(&urb->ep->desc)); |
2726 | 2751 | ||
2727 | trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], | 2752 | trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], |
2728 | ep_index, urb->stream_id, | 2753 | ep_index, urb->stream_id, |
2729 | num_trbs, urb, 0, mem_flags); | 2754 | num_trbs, urb, 0, false, mem_flags); |
2730 | if (trb_buff_len < 0) | 2755 | if (trb_buff_len < 0) |
2731 | return trb_buff_len; | 2756 | return trb_buff_len; |
2732 | 2757 | ||
@@ -2821,7 +2846,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2821 | more_trbs_coming = true; | 2846 | more_trbs_coming = true; |
2822 | else | 2847 | else |
2823 | more_trbs_coming = false; | 2848 | more_trbs_coming = false; |
2824 | queue_trb(xhci, ep_ring, false, more_trbs_coming, | 2849 | queue_trb(xhci, ep_ring, false, more_trbs_coming, false, |
2825 | lower_32_bits(addr), | 2850 | lower_32_bits(addr), |
2826 | upper_32_bits(addr), | 2851 | upper_32_bits(addr), |
2827 | length_field, | 2852 | length_field, |
@@ -2912,7 +2937,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2912 | 2937 | ||
2913 | ret = prepare_transfer(xhci, xhci->devs[slot_id], | 2938 | ret = prepare_transfer(xhci, xhci->devs[slot_id], |
2914 | ep_index, urb->stream_id, | 2939 | ep_index, urb->stream_id, |
2915 | num_trbs, urb, 0, mem_flags); | 2940 | num_trbs, urb, 0, false, mem_flags); |
2916 | if (ret < 0) | 2941 | if (ret < 0) |
2917 | return ret; | 2942 | return ret; |
2918 | 2943 | ||
@@ -2929,7 +2954,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2929 | 2954 | ||
2930 | running_total = 0; | 2955 | running_total = 0; |
2931 | total_packet_count = roundup(urb->transfer_buffer_length, | 2956 | total_packet_count = roundup(urb->transfer_buffer_length, |
2932 | le16_to_cpu(urb->ep->desc.wMaxPacketSize)); | 2957 | usb_endpoint_maxp(&urb->ep->desc)); |
2933 | /* How much data is in the first TRB? */ | 2958 | /* How much data is in the first TRB? */ |
2934 | addr = (u64) urb->transfer_dma; | 2959 | addr = (u64) urb->transfer_dma; |
2935 | trb_buff_len = TRB_MAX_BUFF_SIZE - | 2960 | trb_buff_len = TRB_MAX_BUFF_SIZE - |
@@ -2984,7 +3009,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2984 | more_trbs_coming = true; | 3009 | more_trbs_coming = true; |
2985 | else | 3010 | else |
2986 | more_trbs_coming = false; | 3011 | more_trbs_coming = false; |
2987 | queue_trb(xhci, ep_ring, false, more_trbs_coming, | 3012 | queue_trb(xhci, ep_ring, false, more_trbs_coming, false, |
2988 | lower_32_bits(addr), | 3013 | lower_32_bits(addr), |
2989 | upper_32_bits(addr), | 3014 | upper_32_bits(addr), |
2990 | length_field, | 3015 | length_field, |
@@ -3044,7 +3069,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3044 | num_trbs++; | 3069 | num_trbs++; |
3045 | ret = prepare_transfer(xhci, xhci->devs[slot_id], | 3070 | ret = prepare_transfer(xhci, xhci->devs[slot_id], |
3046 | ep_index, urb->stream_id, | 3071 | ep_index, urb->stream_id, |
3047 | num_trbs, urb, 0, mem_flags); | 3072 | num_trbs, urb, 0, false, mem_flags); |
3048 | if (ret < 0) | 3073 | if (ret < 0) |
3049 | return ret; | 3074 | return ret; |
3050 | 3075 | ||
@@ -3077,7 +3102,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3077 | } | 3102 | } |
3078 | } | 3103 | } |
3079 | 3104 | ||
3080 | queue_trb(xhci, ep_ring, false, true, | 3105 | queue_trb(xhci, ep_ring, false, true, false, |
3081 | setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, | 3106 | setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, |
3082 | le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, | 3107 | le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, |
3083 | TRB_LEN(8) | TRB_INTR_TARGET(0), | 3108 | TRB_LEN(8) | TRB_INTR_TARGET(0), |
@@ -3097,7 +3122,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3097 | if (urb->transfer_buffer_length > 0) { | 3122 | if (urb->transfer_buffer_length > 0) { |
3098 | if (setup->bRequestType & USB_DIR_IN) | 3123 | if (setup->bRequestType & USB_DIR_IN) |
3099 | field |= TRB_DIR_IN; | 3124 | field |= TRB_DIR_IN; |
3100 | queue_trb(xhci, ep_ring, false, true, | 3125 | queue_trb(xhci, ep_ring, false, true, false, |
3101 | lower_32_bits(urb->transfer_dma), | 3126 | lower_32_bits(urb->transfer_dma), |
3102 | upper_32_bits(urb->transfer_dma), | 3127 | upper_32_bits(urb->transfer_dma), |
3103 | length_field, | 3128 | length_field, |
@@ -3113,7 +3138,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3113 | field = 0; | 3138 | field = 0; |
3114 | else | 3139 | else |
3115 | field = TRB_DIR_IN; | 3140 | field = TRB_DIR_IN; |
3116 | queue_trb(xhci, ep_ring, false, false, | 3141 | queue_trb(xhci, ep_ring, false, false, false, |
3117 | 0, | 3142 | 0, |
3118 | 0, | 3143 | 0, |
3119 | TRB_INTR_TARGET(0), | 3144 | TRB_INTR_TARGET(0), |
@@ -3250,7 +3275,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3250 | td_len = urb->iso_frame_desc[i].length; | 3275 | td_len = urb->iso_frame_desc[i].length; |
3251 | td_remain_len = td_len; | 3276 | td_remain_len = td_len; |
3252 | total_packet_count = roundup(td_len, | 3277 | total_packet_count = roundup(td_len, |
3253 | le16_to_cpu(urb->ep->desc.wMaxPacketSize)); | 3278 | usb_endpoint_maxp(&urb->ep->desc)); |
3254 | /* A zero-length transfer still involves at least one packet. */ | 3279 | /* A zero-length transfer still involves at least one packet. */ |
3255 | if (total_packet_count == 0) | 3280 | if (total_packet_count == 0) |
3256 | total_packet_count++; | 3281 | total_packet_count++; |
@@ -3262,7 +3287,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3262 | trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); | 3287 | trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); |
3263 | 3288 | ||
3264 | ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, | 3289 | ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, |
3265 | urb->stream_id, trbs_per_td, urb, i, mem_flags); | 3290 | urb->stream_id, trbs_per_td, urb, i, true, |
3291 | mem_flags); | ||
3266 | if (ret < 0) { | 3292 | if (ret < 0) { |
3267 | if (i == 0) | 3293 | if (i == 0) |
3268 | return ret; | 3294 | return ret; |
@@ -3332,7 +3358,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3332 | remainder | | 3358 | remainder | |
3333 | TRB_INTR_TARGET(0); | 3359 | TRB_INTR_TARGET(0); |
3334 | 3360 | ||
3335 | queue_trb(xhci, ep_ring, false, more_trbs_coming, | 3361 | queue_trb(xhci, ep_ring, false, more_trbs_coming, true, |
3336 | lower_32_bits(addr), | 3362 | lower_32_bits(addr), |
3337 | upper_32_bits(addr), | 3363 | upper_32_bits(addr), |
3338 | length_field, | 3364 | length_field, |
@@ -3414,7 +3440,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3414 | * Do not insert any td of the urb to the ring if the check failed. | 3440 | * Do not insert any td of the urb to the ring if the check failed. |
3415 | */ | 3441 | */ |
3416 | ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, | 3442 | ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, |
3417 | num_trbs, mem_flags); | 3443 | num_trbs, true, mem_flags); |
3418 | if (ret) | 3444 | if (ret) |
3419 | return ret; | 3445 | return ret; |
3420 | 3446 | ||
@@ -3473,7 +3499,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, | |||
3473 | reserved_trbs++; | 3499 | reserved_trbs++; |
3474 | 3500 | ||
3475 | ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, | 3501 | ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, |
3476 | reserved_trbs, GFP_ATOMIC); | 3502 | reserved_trbs, false, GFP_ATOMIC); |
3477 | if (ret < 0) { | 3503 | if (ret < 0) { |
3478 | xhci_err(xhci, "ERR: No room for command on command ring\n"); | 3504 | xhci_err(xhci, "ERR: No room for command on command ring\n"); |
3479 | if (command_must_succeed) | 3505 | if (command_must_succeed) |
@@ -3481,8 +3507,8 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, | |||
3481 | "unfailable commands failed.\n"); | 3507 | "unfailable commands failed.\n"); |
3482 | return ret; | 3508 | return ret; |
3483 | } | 3509 | } |
3484 | queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3, | 3510 | queue_trb(xhci, xhci->cmd_ring, false, false, false, field1, field2, |
3485 | field4 | xhci->cmd_ring->cycle_state); | 3511 | field3, field4 | xhci->cmd_ring->cycle_state); |
3486 | return 0; | 3512 | return 0; |
3487 | } | 3513 | } |
3488 | 3514 | ||