aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c80
1 files changed, 43 insertions, 37 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 952e2ded61a..940321b3ec6 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -185,7 +185,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
185 * prepare_transfer()? 185 * prepare_transfer()?
186 */ 186 */
187static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, 187static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
188 bool consumer, bool more_trbs_coming) 188 bool consumer, bool more_trbs_coming, bool isoc)
189{ 189{
190 u32 chain; 190 u32 chain;
191 union xhci_trb *next; 191 union xhci_trb *next;
@@ -212,11 +212,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
212 if (!chain && !more_trbs_coming) 212 if (!chain && !more_trbs_coming)
213 break; 213 break;
214 214
215 /* If we're not dealing with 0.95 hardware, 215 /* If we're not dealing with 0.95 hardware or
216 * isoc rings on AMD 0.96 host,
216 * carry over the chain bit of the previous TRB 217 * carry over the chain bit of the previous TRB
217 * (which may mean the chain bit is cleared). 218 * (which may mean the chain bit is cleared).
218 */ 219 */
219 if (!xhci_link_trb_quirk(xhci)) { 220 if (!(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST))
221 && !xhci_link_trb_quirk(xhci)) {
220 next->link.control &= 222 next->link.control &=
221 cpu_to_le32(~TRB_CHAIN); 223 cpu_to_le32(~TRB_CHAIN);
222 next->link.control |= 224 next->link.control |=
@@ -1329,10 +1331,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
1329 1331
1330 if (DEV_SUPERSPEED(temp)) { 1332 if (DEV_SUPERSPEED(temp)) {
1331 xhci_dbg(xhci, "resume SS port %d\n", port_id); 1333 xhci_dbg(xhci, "resume SS port %d\n", port_id);
1332 temp = xhci_port_state_to_neutral(temp); 1334 xhci_set_link_state(xhci, port_array, faked_port_index,
1333 temp &= ~PORT_PLS_MASK; 1335 XDEV_U0);
1334 temp |= PORT_LINK_STROBE | XDEV_U0;
1335 xhci_writel(xhci, temp, port_array[faked_port_index]);
1336 slot_id = xhci_find_slot_id_by_port(hcd, xhci, 1336 slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1337 faked_port_index); 1337 faked_port_index);
1338 if (!slot_id) { 1338 if (!slot_id) {
@@ -1342,10 +1342,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
1342 xhci_ring_device(xhci, slot_id); 1342 xhci_ring_device(xhci, slot_id);
1343 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); 1343 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1344 /* Clear PORT_PLC */ 1344 /* Clear PORT_PLC */
1345 temp = xhci_readl(xhci, port_array[faked_port_index]); 1345 xhci_test_and_clear_bit(xhci, port_array,
1346 temp = xhci_port_state_to_neutral(temp); 1346 faked_port_index, PORT_PLC);
1347 temp |= PORT_PLC;
1348 xhci_writel(xhci, temp, port_array[faked_port_index]);
1349 } else { 1347 } else {
1350 xhci_dbg(xhci, "resume HS port %d\n", port_id); 1348 xhci_dbg(xhci, "resume HS port %d\n", port_id);
1351 bus_state->resume_done[faked_port_index] = jiffies + 1349 bus_state->resume_done[faked_port_index] = jiffies +
@@ -1356,6 +1354,10 @@ static void handle_port_status(struct xhci_hcd *xhci,
1356 } 1354 }
1357 } 1355 }
1358 1356
1357 if (hcd->speed != HCD_USB3)
1358 xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1359 PORT_PLC);
1360
1359cleanup: 1361cleanup:
1360 /* Update event ring dequeue pointer before dropping the lock */ 1362 /* Update event ring dequeue pointer before dropping the lock */
1361 inc_deq(xhci, xhci->event_ring, true); 1363 inc_deq(xhci, xhci->event_ring, true);
@@ -2192,7 +2194,8 @@ cleanup:
2192 if ((urb->actual_length != urb->transfer_buffer_length && 2194 if ((urb->actual_length != urb->transfer_buffer_length &&
2193 (urb->transfer_flags & 2195 (urb->transfer_flags &
2194 URB_SHORT_NOT_OK)) || 2196 URB_SHORT_NOT_OK)) ||
2195 status != 0) 2197 (status != 0 &&
2198 !usb_endpoint_xfer_isoc(&urb->ep->desc)))
2196 xhci_dbg(xhci, "Giveback URB %p, len = %d, " 2199 xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2197 "expected = %x, status = %d\n", 2200 "expected = %x, status = %d\n",
2198 urb, urb->actual_length, 2201 urb, urb->actual_length,
@@ -2409,7 +2412,7 @@ irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
2409 * prepare_transfer()? 2412 * prepare_transfer()?
2410 */ 2413 */
2411static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 2414static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2412 bool consumer, bool more_trbs_coming, 2415 bool consumer, bool more_trbs_coming, bool isoc,
2413 u32 field1, u32 field2, u32 field3, u32 field4) 2416 u32 field1, u32 field2, u32 field3, u32 field4)
2414{ 2417{
2415 struct xhci_generic_trb *trb; 2418 struct xhci_generic_trb *trb;
@@ -2419,7 +2422,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2419 trb->field[1] = cpu_to_le32(field2); 2422 trb->field[1] = cpu_to_le32(field2);
2420 trb->field[2] = cpu_to_le32(field3); 2423 trb->field[2] = cpu_to_le32(field3);
2421 trb->field[3] = cpu_to_le32(field4); 2424 trb->field[3] = cpu_to_le32(field4);
2422 inc_enq(xhci, ring, consumer, more_trbs_coming); 2425 inc_enq(xhci, ring, consumer, more_trbs_coming, isoc);
2423} 2426}
2424 2427
2425/* 2428/*
@@ -2427,7 +2430,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2427 * FIXME allocate segments if the ring is full. 2430 * FIXME allocate segments if the ring is full.
2428 */ 2431 */
2429static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 2432static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2430 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) 2433 u32 ep_state, unsigned int num_trbs, bool isoc, gfp_t mem_flags)
2431{ 2434{
2432 /* Make sure the endpoint has been added to xHC schedule */ 2435 /* Make sure the endpoint has been added to xHC schedule */
2433 switch (ep_state) { 2436 switch (ep_state) {
@@ -2469,10 +2472,11 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2469 next = ring->enqueue; 2472 next = ring->enqueue;
2470 2473
2471 while (last_trb(xhci, ring, ring->enq_seg, next)) { 2474 while (last_trb(xhci, ring, ring->enq_seg, next)) {
2472 /* If we're not dealing with 0.95 hardware, 2475 /* If we're not dealing with 0.95 hardware or isoc rings
2473 * clear the chain bit. 2476 * on AMD 0.96 host, clear the chain bit.
2474 */ 2477 */
2475 if (!xhci_link_trb_quirk(xhci)) 2478 if (!xhci_link_trb_quirk(xhci) && !(isoc &&
2479 (xhci->quirks & XHCI_AMD_0x96_HOST)))
2476 next->link.control &= cpu_to_le32(~TRB_CHAIN); 2480 next->link.control &= cpu_to_le32(~TRB_CHAIN);
2477 else 2481 else
2478 next->link.control |= cpu_to_le32(TRB_CHAIN); 2482 next->link.control |= cpu_to_le32(TRB_CHAIN);
@@ -2505,6 +2509,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
2505 unsigned int num_trbs, 2509 unsigned int num_trbs,
2506 struct urb *urb, 2510 struct urb *urb,
2507 unsigned int td_index, 2511 unsigned int td_index,
2512 bool isoc,
2508 gfp_t mem_flags) 2513 gfp_t mem_flags)
2509{ 2514{
2510 int ret; 2515 int ret;
@@ -2522,7 +2527,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
2522 2527
2523 ret = prepare_ring(xhci, ep_ring, 2528 ret = prepare_ring(xhci, ep_ring,
2524 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, 2529 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
2525 num_trbs, mem_flags); 2530 num_trbs, isoc, mem_flags);
2526 if (ret) 2531 if (ret)
2527 return ret; 2532 return ret;
2528 2533
@@ -2711,7 +2716,7 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
2711 * running_total. 2716 * running_total.
2712 */ 2717 */
2713 packets_transferred = (running_total + trb_buff_len) / 2718 packets_transferred = (running_total + trb_buff_len) /
2714 le16_to_cpu(urb->ep->desc.wMaxPacketSize); 2719 usb_endpoint_maxp(&urb->ep->desc);
2715 2720
2716 return xhci_td_remainder(total_packet_count - packets_transferred); 2721 return xhci_td_remainder(total_packet_count - packets_transferred);
2717} 2722}
@@ -2741,11 +2746,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2741 num_trbs = count_sg_trbs_needed(xhci, urb); 2746 num_trbs = count_sg_trbs_needed(xhci, urb);
2742 num_sgs = urb->num_sgs; 2747 num_sgs = urb->num_sgs;
2743 total_packet_count = roundup(urb->transfer_buffer_length, 2748 total_packet_count = roundup(urb->transfer_buffer_length,
2744 le16_to_cpu(urb->ep->desc.wMaxPacketSize)); 2749 usb_endpoint_maxp(&urb->ep->desc));
2745 2750
2746 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], 2751 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
2747 ep_index, urb->stream_id, 2752 ep_index, urb->stream_id,
2748 num_trbs, urb, 0, mem_flags); 2753 num_trbs, urb, 0, false, mem_flags);
2749 if (trb_buff_len < 0) 2754 if (trb_buff_len < 0)
2750 return trb_buff_len; 2755 return trb_buff_len;
2751 2756
@@ -2840,7 +2845,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2840 more_trbs_coming = true; 2845 more_trbs_coming = true;
2841 else 2846 else
2842 more_trbs_coming = false; 2847 more_trbs_coming = false;
2843 queue_trb(xhci, ep_ring, false, more_trbs_coming, 2848 queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
2844 lower_32_bits(addr), 2849 lower_32_bits(addr),
2845 upper_32_bits(addr), 2850 upper_32_bits(addr),
2846 length_field, 2851 length_field,
@@ -2931,7 +2936,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2931 2936
2932 ret = prepare_transfer(xhci, xhci->devs[slot_id], 2937 ret = prepare_transfer(xhci, xhci->devs[slot_id],
2933 ep_index, urb->stream_id, 2938 ep_index, urb->stream_id,
2934 num_trbs, urb, 0, mem_flags); 2939 num_trbs, urb, 0, false, mem_flags);
2935 if (ret < 0) 2940 if (ret < 0)
2936 return ret; 2941 return ret;
2937 2942
@@ -2948,7 +2953,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2948 2953
2949 running_total = 0; 2954 running_total = 0;
2950 total_packet_count = roundup(urb->transfer_buffer_length, 2955 total_packet_count = roundup(urb->transfer_buffer_length,
2951 le16_to_cpu(urb->ep->desc.wMaxPacketSize)); 2956 usb_endpoint_maxp(&urb->ep->desc));
2952 /* How much data is in the first TRB? */ 2957 /* How much data is in the first TRB? */
2953 addr = (u64) urb->transfer_dma; 2958 addr = (u64) urb->transfer_dma;
2954 trb_buff_len = TRB_MAX_BUFF_SIZE - 2959 trb_buff_len = TRB_MAX_BUFF_SIZE -
@@ -3003,7 +3008,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3003 more_trbs_coming = true; 3008 more_trbs_coming = true;
3004 else 3009 else
3005 more_trbs_coming = false; 3010 more_trbs_coming = false;
3006 queue_trb(xhci, ep_ring, false, more_trbs_coming, 3011 queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
3007 lower_32_bits(addr), 3012 lower_32_bits(addr),
3008 upper_32_bits(addr), 3013 upper_32_bits(addr),
3009 length_field, 3014 length_field,
@@ -3063,7 +3068,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3063 num_trbs++; 3068 num_trbs++;
3064 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3069 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3065 ep_index, urb->stream_id, 3070 ep_index, urb->stream_id,
3066 num_trbs, urb, 0, mem_flags); 3071 num_trbs, urb, 0, false, mem_flags);
3067 if (ret < 0) 3072 if (ret < 0)
3068 return ret; 3073 return ret;
3069 3074
@@ -3096,7 +3101,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3096 } 3101 }
3097 } 3102 }
3098 3103
3099 queue_trb(xhci, ep_ring, false, true, 3104 queue_trb(xhci, ep_ring, false, true, false,
3100 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, 3105 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3101 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, 3106 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3102 TRB_LEN(8) | TRB_INTR_TARGET(0), 3107 TRB_LEN(8) | TRB_INTR_TARGET(0),
@@ -3116,7 +3121,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3116 if (urb->transfer_buffer_length > 0) { 3121 if (urb->transfer_buffer_length > 0) {
3117 if (setup->bRequestType & USB_DIR_IN) 3122 if (setup->bRequestType & USB_DIR_IN)
3118 field |= TRB_DIR_IN; 3123 field |= TRB_DIR_IN;
3119 queue_trb(xhci, ep_ring, false, true, 3124 queue_trb(xhci, ep_ring, false, true, false,
3120 lower_32_bits(urb->transfer_dma), 3125 lower_32_bits(urb->transfer_dma),
3121 upper_32_bits(urb->transfer_dma), 3126 upper_32_bits(urb->transfer_dma),
3122 length_field, 3127 length_field,
@@ -3132,7 +3137,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3132 field = 0; 3137 field = 0;
3133 else 3138 else
3134 field = TRB_DIR_IN; 3139 field = TRB_DIR_IN;
3135 queue_trb(xhci, ep_ring, false, false, 3140 queue_trb(xhci, ep_ring, false, false, false,
3136 0, 3141 0,
3137 0, 3142 0,
3138 TRB_INTR_TARGET(0), 3143 TRB_INTR_TARGET(0),
@@ -3269,7 +3274,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3269 td_len = urb->iso_frame_desc[i].length; 3274 td_len = urb->iso_frame_desc[i].length;
3270 td_remain_len = td_len; 3275 td_remain_len = td_len;
3271 total_packet_count = roundup(td_len, 3276 total_packet_count = roundup(td_len,
3272 le16_to_cpu(urb->ep->desc.wMaxPacketSize)); 3277 usb_endpoint_maxp(&urb->ep->desc));
3273 /* A zero-length transfer still involves at least one packet. */ 3278 /* A zero-length transfer still involves at least one packet. */
3274 if (total_packet_count == 0) 3279 if (total_packet_count == 0)
3275 total_packet_count++; 3280 total_packet_count++;
@@ -3281,7 +3286,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3281 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); 3286 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
3282 3287
3283 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3288 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3284 urb->stream_id, trbs_per_td, urb, i, mem_flags); 3289 urb->stream_id, trbs_per_td, urb, i, true,
3290 mem_flags);
3285 if (ret < 0) { 3291 if (ret < 0) {
3286 if (i == 0) 3292 if (i == 0)
3287 return ret; 3293 return ret;
@@ -3351,7 +3357,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3351 remainder | 3357 remainder |
3352 TRB_INTR_TARGET(0); 3358 TRB_INTR_TARGET(0);
3353 3359
3354 queue_trb(xhci, ep_ring, false, more_trbs_coming, 3360 queue_trb(xhci, ep_ring, false, more_trbs_coming, true,
3355 lower_32_bits(addr), 3361 lower_32_bits(addr),
3356 upper_32_bits(addr), 3362 upper_32_bits(addr),
3357 length_field, 3363 length_field,
@@ -3433,7 +3439,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3433 * Do not insert any td of the urb to the ring if the check failed. 3439 * Do not insert any td of the urb to the ring if the check failed.
3434 */ 3440 */
3435 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, 3441 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3436 num_trbs, mem_flags); 3442 num_trbs, true, mem_flags);
3437 if (ret) 3443 if (ret)
3438 return ret; 3444 return ret;
3439 3445
@@ -3492,7 +3498,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
3492 reserved_trbs++; 3498 reserved_trbs++;
3493 3499
3494 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, 3500 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
3495 reserved_trbs, GFP_ATOMIC); 3501 reserved_trbs, false, GFP_ATOMIC);
3496 if (ret < 0) { 3502 if (ret < 0) {
3497 xhci_err(xhci, "ERR: No room for command on command ring\n"); 3503 xhci_err(xhci, "ERR: No room for command on command ring\n");
3498 if (command_must_succeed) 3504 if (command_must_succeed)
@@ -3500,8 +3506,8 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
3500 "unfailable commands failed.\n"); 3506 "unfailable commands failed.\n");
3501 return ret; 3507 return ret;
3502 } 3508 }
3503 queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3, 3509 queue_trb(xhci, xhci->cmd_ring, false, false, false, field1, field2,
3504 field4 | xhci->cmd_ring->cycle_state); 3510 field3, field4 | xhci->cmd_ring->cycle_state);
3505 return 0; 3511 return 0;
3506} 3512}
3507 3513