diff options
author | Sarah Sharp <sarah.a.sharp@linux.intel.com> | 2009-07-27 15:05:21 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-07-28 17:31:13 -0400 |
commit | c92bcfa7b4038d8ffe1f02e21269f18eb0b64144 (patch) | |
tree | 779257c92d050d3d19eb0351f73ee59bcc5fa84f /drivers/usb/host | |
parent | d115b04818e57bdbc7ccde4d0660b15e33013dc8 (diff) |
USB: xhci: Stall handling bug fixes.
Correct the xHCI code to handle stalls on USB endpoints. We need to move
the endpoint ring's dequeue pointer past the stalled transfer, or the HW
will try to restart the transfer the next time the doorbell is rung.
Don't attempt to clear a halt on an endpoint if we haven't seen a stalled
transfer for it. The USB core will attempt to clear a halt on all
endpoints when it selects a new configuration.
Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host')
-rw-r--r-- | drivers/usb/host/xhci-hcd.c | 24 | ||||
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 133 | ||||
-rw-r--r-- | drivers/usb/host/xhci.h | 12 |
3 files changed, 120 insertions, 49 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index 057a07e876be..816c39caca1c 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c | |||
@@ -1089,6 +1089,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
1089 | unsigned int ep_index; | 1089 | unsigned int ep_index; |
1090 | unsigned long flags; | 1090 | unsigned long flags; |
1091 | int ret; | 1091 | int ret; |
1092 | struct xhci_dequeue_state deq_state; | ||
1093 | struct xhci_ring *ep_ring; | ||
1092 | 1094 | ||
1093 | xhci = hcd_to_xhci(hcd); | 1095 | xhci = hcd_to_xhci(hcd); |
1094 | udev = (struct usb_device *) ep->hcpriv; | 1096 | udev = (struct usb_device *) ep->hcpriv; |
@@ -1098,11 +1100,33 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
1098 | if (!ep->hcpriv) | 1100 | if (!ep->hcpriv) |
1099 | return; | 1101 | return; |
1100 | ep_index = xhci_get_endpoint_index(&ep->desc); | 1102 | ep_index = xhci_get_endpoint_index(&ep->desc); |
1103 | ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index]; | ||
1104 | if (!ep_ring->stopped_td) { | ||
1105 | xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", | ||
1106 | ep->desc.bEndpointAddress); | ||
1107 | return; | ||
1108 | } | ||
1101 | 1109 | ||
1102 | xhci_dbg(xhci, "Queueing reset endpoint command\n"); | 1110 | xhci_dbg(xhci, "Queueing reset endpoint command\n"); |
1103 | spin_lock_irqsave(&xhci->lock, flags); | 1111 | spin_lock_irqsave(&xhci->lock, flags); |
1104 | ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); | 1112 | ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); |
1113 | /* | ||
1114 | * Can't change the ring dequeue pointer until it's transitioned to the | ||
1115 | * stopped state, which is only upon a successful reset endpoint | ||
1116 | * command. Better hope that last command worked! | ||
1117 | */ | ||
1105 | if (!ret) { | 1118 | if (!ret) { |
1119 | xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); | ||
1120 | /* We need to move the HW's dequeue pointer past this TD, | ||
1121 | * or it will attempt to resend it on the next doorbell ring. | ||
1122 | */ | ||
1123 | xhci_find_new_dequeue_state(xhci, udev->slot_id, | ||
1124 | ep_index, ep_ring->stopped_td, &deq_state); | ||
1125 | xhci_dbg(xhci, "Queueing new dequeue state\n"); | ||
1126 | xhci_queue_new_dequeue_state(xhci, ep_ring, | ||
1127 | udev->slot_id, | ||
1128 | ep_index, &deq_state); | ||
1129 | kfree(ep_ring->stopped_td); | ||
1106 | xhci_ring_cmd_db(xhci); | 1130 | xhci_ring_cmd_db(xhci); |
1107 | } | 1131 | } |
1108 | spin_unlock_irqrestore(&xhci->lock, flags); | 1132 | spin_unlock_irqrestore(&xhci->lock, flags); |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index ea31753c3137..aa88a067148b 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -335,12 +335,6 @@ static struct xhci_segment *find_trb_seg( | |||
335 | return cur_seg; | 335 | return cur_seg; |
336 | } | 336 | } |
337 | 337 | ||
338 | struct dequeue_state { | ||
339 | struct xhci_segment *new_deq_seg; | ||
340 | union xhci_trb *new_deq_ptr; | ||
341 | int new_cycle_state; | ||
342 | }; | ||
343 | |||
344 | /* | 338 | /* |
345 | * Move the xHC's endpoint ring dequeue pointer past cur_td. | 339 | * Move the xHC's endpoint ring dequeue pointer past cur_td. |
346 | * Record the new state of the xHC's endpoint ring dequeue segment, | 340 | * Record the new state of the xHC's endpoint ring dequeue segment, |
@@ -355,26 +349,30 @@ struct dequeue_state { | |||
355 | * - Finally we move the dequeue state one TRB further, toggling the cycle bit | 349 | * - Finally we move the dequeue state one TRB further, toggling the cycle bit |
356 | * if we've moved it past a link TRB with the toggle cycle bit set. | 350 | * if we've moved it past a link TRB with the toggle cycle bit set. |
357 | */ | 351 | */ |
358 | static void find_new_dequeue_state(struct xhci_hcd *xhci, | 352 | void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, |
359 | unsigned int slot_id, unsigned int ep_index, | 353 | unsigned int slot_id, unsigned int ep_index, |
360 | struct xhci_td *cur_td, struct dequeue_state *state) | 354 | struct xhci_td *cur_td, struct xhci_dequeue_state *state) |
361 | { | 355 | { |
362 | struct xhci_virt_device *dev = xhci->devs[slot_id]; | 356 | struct xhci_virt_device *dev = xhci->devs[slot_id]; |
363 | struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; | 357 | struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; |
364 | struct xhci_generic_trb *trb; | 358 | struct xhci_generic_trb *trb; |
365 | struct xhci_ep_ctx *ep_ctx; | 359 | struct xhci_ep_ctx *ep_ctx; |
360 | dma_addr_t addr; | ||
366 | 361 | ||
367 | state->new_cycle_state = 0; | 362 | state->new_cycle_state = 0; |
363 | xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); | ||
368 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, | 364 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, |
369 | ep_ring->stopped_trb, | 365 | ep_ring->stopped_trb, |
370 | &state->new_cycle_state); | 366 | &state->new_cycle_state); |
371 | if (!state->new_deq_seg) | 367 | if (!state->new_deq_seg) |
372 | BUG(); | 368 | BUG(); |
373 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ | 369 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ |
370 | xhci_dbg(xhci, "Finding endpoint context\n"); | ||
374 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | 371 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); |
375 | state->new_cycle_state = 0x1 & ep_ctx->deq; | 372 | state->new_cycle_state = 0x1 & ep_ctx->deq; |
376 | 373 | ||
377 | state->new_deq_ptr = cur_td->last_trb; | 374 | state->new_deq_ptr = cur_td->last_trb; |
375 | xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); | ||
378 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, | 376 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, |
379 | state->new_deq_ptr, | 377 | state->new_deq_ptr, |
380 | &state->new_cycle_state); | 378 | &state->new_cycle_state); |
@@ -388,6 +386,12 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci, | |||
388 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); | 386 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); |
389 | 387 | ||
390 | /* Don't update the ring cycle state for the producer (us). */ | 388 | /* Don't update the ring cycle state for the producer (us). */ |
389 | xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", | ||
390 | state->new_deq_seg); | ||
391 | addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); | ||
392 | xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", | ||
393 | (unsigned long long) addr); | ||
394 | xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n"); | ||
391 | ep_ring->dequeue = state->new_deq_ptr; | 395 | ep_ring->dequeue = state->new_deq_ptr; |
392 | ep_ring->deq_seg = state->new_deq_seg; | 396 | ep_ring->deq_seg = state->new_deq_seg; |
393 | } | 397 | } |
@@ -437,6 +441,30 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |||
437 | unsigned int ep_index, struct xhci_segment *deq_seg, | 441 | unsigned int ep_index, struct xhci_segment *deq_seg, |
438 | union xhci_trb *deq_ptr, u32 cycle_state); | 442 | union xhci_trb *deq_ptr, u32 cycle_state); |
439 | 443 | ||
444 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | ||
445 | struct xhci_ring *ep_ring, unsigned int slot_id, | ||
446 | unsigned int ep_index, struct xhci_dequeue_state *deq_state) | ||
447 | { | ||
448 | xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " | ||
449 | "new deq ptr = %p (0x%llx dma), new cycle = %u\n", | ||
450 | deq_state->new_deq_seg, | ||
451 | (unsigned long long)deq_state->new_deq_seg->dma, | ||
452 | deq_state->new_deq_ptr, | ||
453 | (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), | ||
454 | deq_state->new_cycle_state); | ||
455 | queue_set_tr_deq(xhci, slot_id, ep_index, | ||
456 | deq_state->new_deq_seg, | ||
457 | deq_state->new_deq_ptr, | ||
458 | (u32) deq_state->new_cycle_state); | ||
459 | /* Stop the TD queueing code from ringing the doorbell until | ||
460 | * this command completes. The HC won't set the dequeue pointer | ||
461 | * if the ring is running, and ringing the doorbell starts the | ||
462 | * ring running. | ||
463 | */ | ||
464 | ep_ring->state |= SET_DEQ_PENDING; | ||
465 | xhci_ring_cmd_db(xhci); | ||
466 | } | ||
467 | |||
440 | /* | 468 | /* |
441 | * When we get a command completion for a Stop Endpoint Command, we need to | 469 | * When we get a command completion for a Stop Endpoint Command, we need to |
442 | * unlink any cancelled TDs from the ring. There are two ways to do that: | 470 | * unlink any cancelled TDs from the ring. There are two ways to do that: |
@@ -457,7 +485,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
457 | struct xhci_td *cur_td = 0; | 485 | struct xhci_td *cur_td = 0; |
458 | struct xhci_td *last_unlinked_td; | 486 | struct xhci_td *last_unlinked_td; |
459 | 487 | ||
460 | struct dequeue_state deq_state; | 488 | struct xhci_dequeue_state deq_state; |
461 | #ifdef CONFIG_USB_HCD_STAT | 489 | #ifdef CONFIG_USB_HCD_STAT |
462 | ktime_t stop_time = ktime_get(); | 490 | ktime_t stop_time = ktime_get(); |
463 | #endif | 491 | #endif |
@@ -485,7 +513,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
485 | * move the xHC endpoint ring dequeue pointer past this TD. | 513 | * move the xHC endpoint ring dequeue pointer past this TD. |
486 | */ | 514 | */ |
487 | if (cur_td == ep_ring->stopped_td) | 515 | if (cur_td == ep_ring->stopped_td) |
488 | find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, | 516 | xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, |
489 | &deq_state); | 517 | &deq_state); |
490 | else | 518 | else |
491 | td_to_noop(xhci, ep_ring, cur_td); | 519 | td_to_noop(xhci, ep_ring, cur_td); |
@@ -501,24 +529,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
501 | 529 | ||
502 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ | 530 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ |
503 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { | 531 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { |
504 | xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " | 532 | xhci_queue_new_dequeue_state(xhci, ep_ring, |
505 | "new deq ptr = %p (0x%llx dma), new cycle = %u\n", | 533 | slot_id, ep_index, &deq_state); |
506 | deq_state.new_deq_seg, | ||
507 | (unsigned long long)deq_state.new_deq_seg->dma, | ||
508 | deq_state.new_deq_ptr, | ||
509 | (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr), | ||
510 | deq_state.new_cycle_state); | ||
511 | queue_set_tr_deq(xhci, slot_id, ep_index, | ||
512 | deq_state.new_deq_seg, | ||
513 | deq_state.new_deq_ptr, | ||
514 | (u32) deq_state.new_cycle_state); | ||
515 | /* Stop the TD queueing code from ringing the doorbell until | ||
516 | * this command completes. The HC won't set the dequeue pointer | ||
517 | * if the ring is running, and ringing the doorbell starts the | ||
518 | * ring running. | ||
519 | */ | ||
520 | ep_ring->state |= SET_DEQ_PENDING; | ||
521 | xhci_ring_cmd_db(xhci); | ||
522 | } else { | 534 | } else { |
523 | /* Otherwise just ring the doorbell to restart the ring */ | 535 | /* Otherwise just ring the doorbell to restart the ring */ |
524 | ring_ep_doorbell(xhci, slot_id, ep_index); | 536 | ring_ep_doorbell(xhci, slot_id, ep_index); |
@@ -929,12 +941,15 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
929 | if (event_trb != ep_ring->dequeue) { | 941 | if (event_trb != ep_ring->dequeue) { |
930 | /* The event was for the status stage */ | 942 | /* The event was for the status stage */ |
931 | if (event_trb == td->last_trb) { | 943 | if (event_trb == td->last_trb) { |
932 | /* Did we already see a short data stage? */ | 944 | if (td->urb->actual_length != 0) { |
933 | if (td->urb->actual_length != 0) | 945 | /* Don't overwrite a previously set error code */ |
934 | status = -EREMOTEIO; | 946 | if (status == -EINPROGRESS || status == 0) |
935 | else | 947 | /* Did we already see a short data stage? */ |
948 | status = -EREMOTEIO; | ||
949 | } else { | ||
936 | td->urb->actual_length = | 950 | td->urb->actual_length = |
937 | td->urb->transfer_buffer_length; | 951 | td->urb->transfer_buffer_length; |
952 | } | ||
938 | } else { | 953 | } else { |
939 | /* Maybe the event was for the data stage? */ | 954 | /* Maybe the event was for the data stage? */ |
940 | if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) { | 955 | if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) { |
@@ -992,16 +1007,20 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
992 | TRB_LEN(event->transfer_len)); | 1007 | TRB_LEN(event->transfer_len)); |
993 | td->urb->actual_length = 0; | 1008 | td->urb->actual_length = 0; |
994 | } | 1009 | } |
995 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) | 1010 | /* Don't overwrite a previously set error code */ |
996 | status = -EREMOTEIO; | 1011 | if (status == -EINPROGRESS) { |
997 | else | 1012 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) |
998 | status = 0; | 1013 | status = -EREMOTEIO; |
1014 | else | ||
1015 | status = 0; | ||
1016 | } | ||
999 | } else { | 1017 | } else { |
1000 | td->urb->actual_length = td->urb->transfer_buffer_length; | 1018 | td->urb->actual_length = td->urb->transfer_buffer_length; |
1001 | /* Ignore a short packet completion if the | 1019 | /* Ignore a short packet completion if the |
1002 | * untransferred length was zero. | 1020 | * untransferred length was zero. |
1003 | */ | 1021 | */ |
1004 | status = 0; | 1022 | if (status == -EREMOTEIO) |
1023 | status = 0; | ||
1005 | } | 1024 | } |
1006 | } else { | 1025 | } else { |
1007 | /* Slow path - walk the list, starting from the dequeue | 1026 | /* Slow path - walk the list, starting from the dequeue |
@@ -1028,19 +1047,30 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
1028 | TRB_LEN(event->transfer_len); | 1047 | TRB_LEN(event->transfer_len); |
1029 | } | 1048 | } |
1030 | } | 1049 | } |
1031 | /* The Endpoint Stop Command completion will take care of | ||
1032 | * any stopped TDs. A stopped TD may be restarted, so don't update the | ||
1033 | * ring dequeue pointer or take this TD off any lists yet. | ||
1034 | */ | ||
1035 | if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || | 1050 | if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || |
1036 | GET_COMP_CODE(event->transfer_len) == COMP_STOP) { | 1051 | GET_COMP_CODE(event->transfer_len) == COMP_STOP) { |
1052 | /* The Endpoint Stop Command completion will take care of any | ||
1053 | * stopped TDs. A stopped TD may be restarted, so don't update | ||
1054 | * the ring dequeue pointer or take this TD off any lists yet. | ||
1055 | */ | ||
1037 | ep_ring->stopped_td = td; | 1056 | ep_ring->stopped_td = td; |
1038 | ep_ring->stopped_trb = event_trb; | 1057 | ep_ring->stopped_trb = event_trb; |
1039 | } else { | 1058 | } else { |
1040 | /* Update ring dequeue pointer */ | 1059 | if (GET_COMP_CODE(event->transfer_len) == COMP_STALL) { |
1041 | while (ep_ring->dequeue != td->last_trb) | 1060 | /* The transfer is completed from the driver's |
1061 | * perspective, but we need to issue a set dequeue | ||
1062 | * command for this stalled endpoint to move the dequeue | ||
1063 | * pointer past the TD. We can't do that here because | ||
1064 | * the halt condition must be cleared first. | ||
1065 | */ | ||
1066 | ep_ring->stopped_td = td; | ||
1067 | ep_ring->stopped_trb = event_trb; | ||
1068 | } else { | ||
1069 | /* Update ring dequeue pointer */ | ||
1070 | while (ep_ring->dequeue != td->last_trb) | ||
1071 | inc_deq(xhci, ep_ring, false); | ||
1042 | inc_deq(xhci, ep_ring, false); | 1072 | inc_deq(xhci, ep_ring, false); |
1043 | inc_deq(xhci, ep_ring, false); | 1073 | } |
1044 | 1074 | ||
1045 | /* Clean up the endpoint's TD list */ | 1075 | /* Clean up the endpoint's TD list */ |
1046 | urb = td->urb; | 1076 | urb = td->urb; |
@@ -1050,7 +1080,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
1050 | list_del(&td->cancelled_td_list); | 1080 | list_del(&td->cancelled_td_list); |
1051 | ep_ring->cancels_pending--; | 1081 | ep_ring->cancels_pending--; |
1052 | } | 1082 | } |
1053 | kfree(td); | 1083 | /* Leave the TD around for the reset endpoint function to use */ |
1084 | if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) { | ||
1085 | kfree(td); | ||
1086 | } | ||
1054 | urb->hcpriv = NULL; | 1087 | urb->hcpriv = NULL; |
1055 | } | 1088 | } |
1056 | cleanup: | 1089 | cleanup: |
@@ -1166,13 +1199,13 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
1166 | */ | 1199 | */ |
1167 | xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); | 1200 | xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); |
1168 | return -ENOENT; | 1201 | return -ENOENT; |
1169 | case EP_STATE_HALTED: | ||
1170 | case EP_STATE_ERROR: | 1202 | case EP_STATE_ERROR: |
1171 | xhci_warn(xhci, "WARN waiting for halt or error on ep " | 1203 | xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); |
1172 | "to be cleared\n"); | ||
1173 | /* FIXME event handling code for error needs to clear it */ | 1204 | /* FIXME event handling code for error needs to clear it */ |
1174 | /* XXX not sure if this should be -ENOENT or not */ | 1205 | /* XXX not sure if this should be -ENOENT or not */ |
1175 | return -EINVAL; | 1206 | return -EINVAL; |
1207 | case EP_STATE_HALTED: | ||
1208 | xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); | ||
1176 | case EP_STATE_STOPPED: | 1209 | case EP_STATE_STOPPED: |
1177 | case EP_STATE_RUNNING: | 1210 | case EP_STATE_RUNNING: |
1178 | break; | 1211 | break; |
@@ -1724,10 +1757,12 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |||
1724 | u32 type = TRB_TYPE(TRB_SET_DEQ); | 1757 | u32 type = TRB_TYPE(TRB_SET_DEQ); |
1725 | 1758 | ||
1726 | addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); | 1759 | addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); |
1727 | if (addr == 0) | 1760 | if (addr == 0) { |
1728 | xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); | 1761 | xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); |
1729 | xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", | 1762 | xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", |
1730 | deq_seg, deq_ptr); | 1763 | deq_seg, deq_ptr); |
1764 | return 0; | ||
1765 | } | ||
1731 | return queue_command(xhci, lower_32_bits(addr) | cycle_state, | 1766 | return queue_command(xhci, lower_32_bits(addr) | cycle_state, |
1732 | upper_32_bits(addr), 0, | 1767 | upper_32_bits(addr), 0, |
1733 | trb_slot_id | trb_ep_index | type); | 1768 | trb_slot_id | trb_ep_index | type); |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 9c108c632704..d31d32206ba3 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -952,6 +952,12 @@ struct xhci_ring { | |||
952 | u32 cycle_state; | 952 | u32 cycle_state; |
953 | }; | 953 | }; |
954 | 954 | ||
955 | struct xhci_dequeue_state { | ||
956 | struct xhci_segment *new_deq_seg; | ||
957 | union xhci_trb *new_deq_ptr; | ||
958 | int new_cycle_state; | ||
959 | }; | ||
960 | |||
955 | struct xhci_erst_entry { | 961 | struct xhci_erst_entry { |
956 | /* 64-bit event ring segment address */ | 962 | /* 64-bit event ring segment address */ |
957 | u64 seg_addr; | 963 | u64 seg_addr; |
@@ -1203,6 +1209,12 @@ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | |||
1203 | u32 slot_id); | 1209 | u32 slot_id); |
1204 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, | 1210 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, |
1205 | unsigned int ep_index); | 1211 | unsigned int ep_index); |
1212 | void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | ||
1213 | unsigned int slot_id, unsigned int ep_index, | ||
1214 | struct xhci_td *cur_td, struct xhci_dequeue_state *state); | ||
1215 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | ||
1216 | struct xhci_ring *ep_ring, unsigned int slot_id, | ||
1217 | unsigned int ep_index, struct xhci_dequeue_state *deq_state); | ||
1206 | 1218 | ||
1207 | /* xHCI roothub code */ | 1219 | /* xHCI roothub code */ |
1208 | int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, | 1220 | int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, |