diff options
Diffstat (limited to 'drivers/usb/host')
-rw-r--r-- | drivers/usb/host/xhci-hub.c | 21 | ||||
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 107 | ||||
-rw-r--r-- | drivers/usb/host/xhci.c | 194 | ||||
-rw-r--r-- | drivers/usb/host/xhci.h | 31 |
4 files changed, 216 insertions, 137 deletions
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 1ad6bc1951c7..3ce9c0ac2614 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
@@ -20,7 +20,8 @@ | |||
20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/gfp.h> | 23 | |
24 | #include <linux/slab.h> | ||
24 | #include <asm/unaligned.h> | 25 | #include <asm/unaligned.h> |
25 | 26 | ||
26 | #include "xhci.h" | 27 | #include "xhci.h" |
@@ -284,12 +285,22 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) | |||
284 | 285 | ||
285 | spin_lock_irqsave(&xhci->lock, flags); | 286 | spin_lock_irqsave(&xhci->lock, flags); |
286 | for (i = LAST_EP_INDEX; i > 0; i--) { | 287 | for (i = LAST_EP_INDEX; i > 0; i--) { |
287 | if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) | 288 | if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) { |
288 | xhci_queue_stop_endpoint(xhci, slot_id, i, suspend); | 289 | struct xhci_command *command; |
290 | command = xhci_alloc_command(xhci, false, false, | ||
291 | GFP_NOIO); | ||
292 | if (!command) { | ||
293 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
294 | xhci_free_command(xhci, cmd); | ||
295 | return -ENOMEM; | ||
296 | |||
297 | } | ||
298 | xhci_queue_stop_endpoint(xhci, command, slot_id, i, | ||
299 | suspend); | ||
300 | } | ||
289 | } | 301 | } |
290 | cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring); | ||
291 | list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list); | 302 | list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list); |
292 | xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend); | 303 | xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend); |
293 | xhci_ring_cmd_db(xhci); | 304 | xhci_ring_cmd_db(xhci); |
294 | spin_unlock_irqrestore(&xhci->lock, flags); | 305 | spin_unlock_irqrestore(&xhci->lock, flags); |
295 | 306 | ||
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 7a0e3c720c00..b172a7dee6ac 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -123,16 +123,6 @@ static int enqueue_is_link_trb(struct xhci_ring *ring) | |||
123 | return TRB_TYPE_LINK_LE32(link->control); | 123 | return TRB_TYPE_LINK_LE32(link->control); |
124 | } | 124 | } |
125 | 125 | ||
126 | union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring) | ||
127 | { | ||
128 | /* Enqueue pointer can be left pointing to the link TRB, | ||
129 | * we must handle that | ||
130 | */ | ||
131 | if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control)) | ||
132 | return ring->enq_seg->next->trbs; | ||
133 | return ring->enqueue; | ||
134 | } | ||
135 | |||
136 | /* Updates trb to point to the next TRB in the ring, and updates seg if the next | 126 | /* Updates trb to point to the next TRB in the ring, and updates seg if the next |
137 | * TRB is in a new segment. This does not skip over link TRBs, and it does not | 127 | * TRB is in a new segment. This does not skip over link TRBs, and it does not |
138 | * effect the ring dequeue or enqueue pointers. | 128 | * effect the ring dequeue or enqueue pointers. |
@@ -684,12 +674,14 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
684 | } | 674 | } |
685 | } | 675 | } |
686 | 676 | ||
687 | static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | 677 | static int queue_set_tr_deq(struct xhci_hcd *xhci, |
678 | struct xhci_command *cmd, int slot_id, | ||
688 | unsigned int ep_index, unsigned int stream_id, | 679 | unsigned int ep_index, unsigned int stream_id, |
689 | struct xhci_segment *deq_seg, | 680 | struct xhci_segment *deq_seg, |
690 | union xhci_trb *deq_ptr, u32 cycle_state); | 681 | union xhci_trb *deq_ptr, u32 cycle_state); |
691 | 682 | ||
692 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | 683 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, |
684 | struct xhci_command *cmd, | ||
693 | unsigned int slot_id, unsigned int ep_index, | 685 | unsigned int slot_id, unsigned int ep_index, |
694 | unsigned int stream_id, | 686 | unsigned int stream_id, |
695 | struct xhci_dequeue_state *deq_state) | 687 | struct xhci_dequeue_state *deq_state) |
@@ -704,7 +696,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | |||
704 | deq_state->new_deq_ptr, | 696 | deq_state->new_deq_ptr, |
705 | (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), | 697 | (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), |
706 | deq_state->new_cycle_state); | 698 | deq_state->new_cycle_state); |
707 | queue_set_tr_deq(xhci, slot_id, ep_index, stream_id, | 699 | queue_set_tr_deq(xhci, cmd, slot_id, ep_index, stream_id, |
708 | deq_state->new_deq_seg, | 700 | deq_state->new_deq_seg, |
709 | deq_state->new_deq_ptr, | 701 | deq_state->new_deq_ptr, |
710 | (u32) deq_state->new_cycle_state); | 702 | (u32) deq_state->new_cycle_state); |
@@ -858,7 +850,9 @@ remove_finished_td: | |||
858 | 850 | ||
859 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ | 851 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ |
860 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { | 852 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { |
861 | xhci_queue_new_dequeue_state(xhci, | 853 | struct xhci_command *command; |
854 | command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); | ||
855 | xhci_queue_new_dequeue_state(xhci, command, | ||
862 | slot_id, ep_index, | 856 | slot_id, ep_index, |
863 | ep->stopped_td->urb->stream_id, | 857 | ep->stopped_td->urb->stream_id, |
864 | &deq_state); | 858 | &deq_state); |
@@ -1206,9 +1200,11 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, | |||
1206 | * because the HW can't handle two commands being queued in a row. | 1200 | * because the HW can't handle two commands being queued in a row. |
1207 | */ | 1201 | */ |
1208 | if (xhci->quirks & XHCI_RESET_EP_QUIRK) { | 1202 | if (xhci->quirks & XHCI_RESET_EP_QUIRK) { |
1203 | struct xhci_command *command; | ||
1204 | command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); | ||
1209 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | 1205 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
1210 | "Queueing configure endpoint command"); | 1206 | "Queueing configure endpoint command"); |
1211 | xhci_queue_configure_endpoint(xhci, | 1207 | xhci_queue_configure_endpoint(xhci, command, |
1212 | xhci->devs[slot_id]->in_ctx->dma, slot_id, | 1208 | xhci->devs[slot_id]->in_ctx->dma, slot_id, |
1213 | false); | 1209 | false); |
1214 | xhci_ring_cmd_db(xhci); | 1210 | xhci_ring_cmd_db(xhci); |
@@ -1465,7 +1461,7 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, | |||
1465 | add_flags - SLOT_FLAG == drop_flags) { | 1461 | add_flags - SLOT_FLAG == drop_flags) { |
1466 | ep_state = virt_dev->eps[ep_index].ep_state; | 1462 | ep_state = virt_dev->eps[ep_index].ep_state; |
1467 | if (!(ep_state & EP_HALTED)) | 1463 | if (!(ep_state & EP_HALTED)) |
1468 | goto bandwidth_change; | 1464 | return; |
1469 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | 1465 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
1470 | "Completed config ep cmd - " | 1466 | "Completed config ep cmd - " |
1471 | "last ep index = %d, state = %d", | 1467 | "last ep index = %d, state = %d", |
@@ -1475,11 +1471,6 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, | |||
1475 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); | 1471 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
1476 | return; | 1472 | return; |
1477 | } | 1473 | } |
1478 | bandwidth_change: | ||
1479 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, | ||
1480 | "Completed config ep cmd"); | ||
1481 | virt_dev->cmd_status = cmd_comp_code; | ||
1482 | complete(&virt_dev->cmd_completion); | ||
1483 | return; | 1474 | return; |
1484 | } | 1475 | } |
1485 | 1476 | ||
@@ -1938,11 +1929,16 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, | |||
1938 | struct xhci_td *td, union xhci_trb *event_trb) | 1929 | struct xhci_td *td, union xhci_trb *event_trb) |
1939 | { | 1930 | { |
1940 | struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; | 1931 | struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; |
1932 | struct xhci_command *command; | ||
1933 | command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); | ||
1934 | if (!command) | ||
1935 | return; | ||
1936 | |||
1941 | ep->ep_state |= EP_HALTED; | 1937 | ep->ep_state |= EP_HALTED; |
1942 | ep->stopped_td = td; | 1938 | ep->stopped_td = td; |
1943 | ep->stopped_stream = stream_id; | 1939 | ep->stopped_stream = stream_id; |
1944 | 1940 | ||
1945 | xhci_queue_reset_ep(xhci, slot_id, ep_index); | 1941 | xhci_queue_reset_ep(xhci, command, slot_id, ep_index); |
1946 | xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); | 1942 | xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); |
1947 | 1943 | ||
1948 | ep->stopped_td = NULL; | 1944 | ep->stopped_td = NULL; |
@@ -2654,7 +2650,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
2654 | * successful event after a short transfer. | 2650 | * successful event after a short transfer. |
2655 | * Ignore it. | 2651 | * Ignore it. |
2656 | */ | 2652 | */ |
2657 | if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && | 2653 | if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && |
2658 | ep_ring->last_td_was_short) { | 2654 | ep_ring->last_td_was_short) { |
2659 | ep_ring->last_td_was_short = false; | 2655 | ep_ring->last_td_was_short = false; |
2660 | ret = 0; | 2656 | ret = 0; |
@@ -3996,8 +3992,9 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3996 | * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB | 3992 | * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB |
3997 | * because the command event handler may want to resubmit a failed command. | 3993 | * because the command event handler may want to resubmit a failed command. |
3998 | */ | 3994 | */ |
3999 | static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, | 3995 | static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4000 | u32 field3, u32 field4, bool command_must_succeed) | 3996 | u32 field1, u32 field2, |
3997 | u32 field3, u32 field4, bool command_must_succeed) | ||
4001 | { | 3998 | { |
4002 | int reserved_trbs = xhci->cmd_ring_reserved_trbs; | 3999 | int reserved_trbs = xhci->cmd_ring_reserved_trbs; |
4003 | int ret; | 4000 | int ret; |
@@ -4014,57 +4011,65 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, | |||
4014 | "unfailable commands failed.\n"); | 4011 | "unfailable commands failed.\n"); |
4015 | return ret; | 4012 | return ret; |
4016 | } | 4013 | } |
4014 | if (cmd->completion) | ||
4015 | cmd->command_trb = xhci->cmd_ring->enqueue; | ||
4016 | else | ||
4017 | kfree(cmd); | ||
4018 | |||
4017 | queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, | 4019 | queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, |
4018 | field4 | xhci->cmd_ring->cycle_state); | 4020 | field4 | xhci->cmd_ring->cycle_state); |
4019 | return 0; | 4021 | return 0; |
4020 | } | 4022 | } |
4021 | 4023 | ||
4022 | /* Queue a slot enable or disable request on the command ring */ | 4024 | /* Queue a slot enable or disable request on the command ring */ |
4023 | int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) | 4025 | int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4026 | u32 trb_type, u32 slot_id) | ||
4024 | { | 4027 | { |
4025 | return queue_command(xhci, 0, 0, 0, | 4028 | return queue_command(xhci, cmd, 0, 0, 0, |
4026 | TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false); | 4029 | TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false); |
4027 | } | 4030 | } |
4028 | 4031 | ||
4029 | /* Queue an address device command TRB */ | 4032 | /* Queue an address device command TRB */ |
4030 | int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 4033 | int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4031 | u32 slot_id, enum xhci_setup_dev setup) | 4034 | dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup) |
4032 | { | 4035 | { |
4033 | return queue_command(xhci, lower_32_bits(in_ctx_ptr), | 4036 | return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), |
4034 | upper_32_bits(in_ctx_ptr), 0, | 4037 | upper_32_bits(in_ctx_ptr), 0, |
4035 | TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id) | 4038 | TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id) |
4036 | | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false); | 4039 | | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false); |
4037 | } | 4040 | } |
4038 | 4041 | ||
4039 | int xhci_queue_vendor_command(struct xhci_hcd *xhci, | 4042 | int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4040 | u32 field1, u32 field2, u32 field3, u32 field4) | 4043 | u32 field1, u32 field2, u32 field3, u32 field4) |
4041 | { | 4044 | { |
4042 | return queue_command(xhci, field1, field2, field3, field4, false); | 4045 | return queue_command(xhci, cmd, field1, field2, field3, field4, false); |
4043 | } | 4046 | } |
4044 | 4047 | ||
4045 | /* Queue a reset device command TRB */ | 4048 | /* Queue a reset device command TRB */ |
4046 | int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id) | 4049 | int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4050 | u32 slot_id) | ||
4047 | { | 4051 | { |
4048 | return queue_command(xhci, 0, 0, 0, | 4052 | return queue_command(xhci, cmd, 0, 0, 0, |
4049 | TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), | 4053 | TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), |
4050 | false); | 4054 | false); |
4051 | } | 4055 | } |
4052 | 4056 | ||
4053 | /* Queue a configure endpoint command TRB */ | 4057 | /* Queue a configure endpoint command TRB */ |
4054 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 4058 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, |
4059 | struct xhci_command *cmd, dma_addr_t in_ctx_ptr, | ||
4055 | u32 slot_id, bool command_must_succeed) | 4060 | u32 slot_id, bool command_must_succeed) |
4056 | { | 4061 | { |
4057 | return queue_command(xhci, lower_32_bits(in_ctx_ptr), | 4062 | return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), |
4058 | upper_32_bits(in_ctx_ptr), 0, | 4063 | upper_32_bits(in_ctx_ptr), 0, |
4059 | TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), | 4064 | TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), |
4060 | command_must_succeed); | 4065 | command_must_succeed); |
4061 | } | 4066 | } |
4062 | 4067 | ||
4063 | /* Queue an evaluate context command TRB */ | 4068 | /* Queue an evaluate context command TRB */ |
4064 | int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 4069 | int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4065 | u32 slot_id, bool command_must_succeed) | 4070 | dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) |
4066 | { | 4071 | { |
4067 | return queue_command(xhci, lower_32_bits(in_ctx_ptr), | 4072 | return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), |
4068 | upper_32_bits(in_ctx_ptr), 0, | 4073 | upper_32_bits(in_ctx_ptr), 0, |
4069 | TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), | 4074 | TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), |
4070 | command_must_succeed); | 4075 | command_must_succeed); |
@@ -4074,25 +4079,26 @@ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | |||
4074 | * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop | 4079 | * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop |
4075 | * activity on an endpoint that is about to be suspended. | 4080 | * activity on an endpoint that is about to be suspended. |
4076 | */ | 4081 | */ |
4077 | int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, | 4082 | int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4078 | unsigned int ep_index, int suspend) | 4083 | int slot_id, unsigned int ep_index, int suspend) |
4079 | { | 4084 | { |
4080 | u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); | 4085 | u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); |
4081 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); | 4086 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); |
4082 | u32 type = TRB_TYPE(TRB_STOP_RING); | 4087 | u32 type = TRB_TYPE(TRB_STOP_RING); |
4083 | u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend); | 4088 | u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend); |
4084 | 4089 | ||
4085 | return queue_command(xhci, 0, 0, 0, | 4090 | return queue_command(xhci, cmd, 0, 0, 0, |
4086 | trb_slot_id | trb_ep_index | type | trb_suspend, false); | 4091 | trb_slot_id | trb_ep_index | type | trb_suspend, false); |
4087 | } | 4092 | } |
4088 | 4093 | ||
4089 | /* Set Transfer Ring Dequeue Pointer command. | 4094 | /* Set Transfer Ring Dequeue Pointer command. |
4090 | * This should not be used for endpoints that have streams enabled. | 4095 | * This should not be used for endpoints that have streams enabled. |
4091 | */ | 4096 | */ |
4092 | static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | 4097 | static int queue_set_tr_deq(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4093 | unsigned int ep_index, unsigned int stream_id, | 4098 | int slot_id, |
4094 | struct xhci_segment *deq_seg, | 4099 | unsigned int ep_index, unsigned int stream_id, |
4095 | union xhci_trb *deq_ptr, u32 cycle_state) | 4100 | struct xhci_segment *deq_seg, |
4101 | union xhci_trb *deq_ptr, u32 cycle_state) | ||
4096 | { | 4102 | { |
4097 | dma_addr_t addr; | 4103 | dma_addr_t addr; |
4098 | u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); | 4104 | u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); |
@@ -4119,18 +4125,19 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |||
4119 | ep->queued_deq_ptr = deq_ptr; | 4125 | ep->queued_deq_ptr = deq_ptr; |
4120 | if (stream_id) | 4126 | if (stream_id) |
4121 | trb_sct = SCT_FOR_TRB(SCT_PRI_TR); | 4127 | trb_sct = SCT_FOR_TRB(SCT_PRI_TR); |
4122 | return queue_command(xhci, lower_32_bits(addr) | trb_sct | cycle_state, | 4128 | return queue_command(xhci, cmd, |
4129 | lower_32_bits(addr) | trb_sct | cycle_state, | ||
4123 | upper_32_bits(addr), trb_stream_id, | 4130 | upper_32_bits(addr), trb_stream_id, |
4124 | trb_slot_id | trb_ep_index | type, false); | 4131 | trb_slot_id | trb_ep_index | type, false); |
4125 | } | 4132 | } |
4126 | 4133 | ||
4127 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, | 4134 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4128 | unsigned int ep_index) | 4135 | int slot_id, unsigned int ep_index) |
4129 | { | 4136 | { |
4130 | u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); | 4137 | u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); |
4131 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); | 4138 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); |
4132 | u32 type = TRB_TYPE(TRB_RESET_EP); | 4139 | u32 type = TRB_TYPE(TRB_RESET_EP); |
4133 | 4140 | ||
4134 | return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type, | 4141 | return queue_command(xhci, cmd, 0, 0, 0, |
4135 | false); | 4142 | trb_slot_id | trb_ep_index | type, false); |
4136 | } | 4143 | } |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 92e1dda7246b..9a4c6dfa26dc 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -641,10 +641,14 @@ int xhci_run(struct usb_hcd *hcd) | |||
641 | writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); | 641 | writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); |
642 | xhci_print_ir_set(xhci, 0); | 642 | xhci_print_ir_set(xhci, 0); |
643 | 643 | ||
644 | if (xhci->quirks & XHCI_NEC_HOST) | 644 | if (xhci->quirks & XHCI_NEC_HOST) { |
645 | xhci_queue_vendor_command(xhci, 0, 0, 0, | 645 | struct xhci_command *command; |
646 | command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); | ||
647 | if (!command) | ||
648 | return -ENOMEM; | ||
649 | xhci_queue_vendor_command(xhci, command, 0, 0, 0, | ||
646 | TRB_TYPE(TRB_NEC_GET_FW)); | 650 | TRB_TYPE(TRB_NEC_GET_FW)); |
647 | 651 | } | |
648 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 652 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
649 | "Finished xhci_run for USB2 roothub"); | 653 | "Finished xhci_run for USB2 roothub"); |
650 | return 0; | 654 | return 0; |
@@ -1187,10 +1191,10 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
1187 | static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | 1191 | static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, |
1188 | unsigned int ep_index, struct urb *urb) | 1192 | unsigned int ep_index, struct urb *urb) |
1189 | { | 1193 | { |
1190 | struct xhci_container_ctx *in_ctx; | ||
1191 | struct xhci_container_ctx *out_ctx; | 1194 | struct xhci_container_ctx *out_ctx; |
1192 | struct xhci_input_control_ctx *ctrl_ctx; | 1195 | struct xhci_input_control_ctx *ctrl_ctx; |
1193 | struct xhci_ep_ctx *ep_ctx; | 1196 | struct xhci_ep_ctx *ep_ctx; |
1197 | struct xhci_command *command; | ||
1194 | int max_packet_size; | 1198 | int max_packet_size; |
1195 | int hw_max_packet_size; | 1199 | int hw_max_packet_size; |
1196 | int ret = 0; | 1200 | int ret = 0; |
@@ -1215,18 +1219,24 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | |||
1215 | /* FIXME: This won't work if a non-default control endpoint | 1219 | /* FIXME: This won't work if a non-default control endpoint |
1216 | * changes max packet sizes. | 1220 | * changes max packet sizes. |
1217 | */ | 1221 | */ |
1218 | in_ctx = xhci->devs[slot_id]->in_ctx; | 1222 | |
1219 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | 1223 | command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); |
1224 | if (!command) | ||
1225 | return -ENOMEM; | ||
1226 | |||
1227 | command->in_ctx = xhci->devs[slot_id]->in_ctx; | ||
1228 | ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx); | ||
1220 | if (!ctrl_ctx) { | 1229 | if (!ctrl_ctx) { |
1221 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | 1230 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
1222 | __func__); | 1231 | __func__); |
1223 | return -ENOMEM; | 1232 | ret = -ENOMEM; |
1233 | goto command_cleanup; | ||
1224 | } | 1234 | } |
1225 | /* Set up the modified control endpoint 0 */ | 1235 | /* Set up the modified control endpoint 0 */ |
1226 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, | 1236 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, |
1227 | xhci->devs[slot_id]->out_ctx, ep_index); | 1237 | xhci->devs[slot_id]->out_ctx, ep_index); |
1228 | 1238 | ||
1229 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); | 1239 | ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); |
1230 | ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); | 1240 | ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); |
1231 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); | 1241 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); |
1232 | 1242 | ||
@@ -1234,17 +1244,20 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | |||
1234 | ctrl_ctx->drop_flags = 0; | 1244 | ctrl_ctx->drop_flags = 0; |
1235 | 1245 | ||
1236 | xhci_dbg(xhci, "Slot %d input context\n", slot_id); | 1246 | xhci_dbg(xhci, "Slot %d input context\n", slot_id); |
1237 | xhci_dbg_ctx(xhci, in_ctx, ep_index); | 1247 | xhci_dbg_ctx(xhci, command->in_ctx, ep_index); |
1238 | xhci_dbg(xhci, "Slot %d output context\n", slot_id); | 1248 | xhci_dbg(xhci, "Slot %d output context\n", slot_id); |
1239 | xhci_dbg_ctx(xhci, out_ctx, ep_index); | 1249 | xhci_dbg_ctx(xhci, out_ctx, ep_index); |
1240 | 1250 | ||
1241 | ret = xhci_configure_endpoint(xhci, urb->dev, NULL, | 1251 | ret = xhci_configure_endpoint(xhci, urb->dev, command, |
1242 | true, false); | 1252 | true, false); |
1243 | 1253 | ||
1244 | /* Clean up the input context for later use by bandwidth | 1254 | /* Clean up the input context for later use by bandwidth |
1245 | * functions. | 1255 | * functions. |
1246 | */ | 1256 | */ |
1247 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); | 1257 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); |
1258 | command_cleanup: | ||
1259 | kfree(command->completion); | ||
1260 | kfree(command); | ||
1248 | } | 1261 | } |
1249 | return ret; | 1262 | return ret; |
1250 | } | 1263 | } |
@@ -1465,6 +1478,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
1465 | unsigned int ep_index; | 1478 | unsigned int ep_index; |
1466 | struct xhci_ring *ep_ring; | 1479 | struct xhci_ring *ep_ring; |
1467 | struct xhci_virt_ep *ep; | 1480 | struct xhci_virt_ep *ep; |
1481 | struct xhci_command *command; | ||
1468 | 1482 | ||
1469 | xhci = hcd_to_xhci(hcd); | 1483 | xhci = hcd_to_xhci(hcd); |
1470 | spin_lock_irqsave(&xhci->lock, flags); | 1484 | spin_lock_irqsave(&xhci->lock, flags); |
@@ -1534,12 +1548,14 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
1534 | * the first cancellation to be handled. | 1548 | * the first cancellation to be handled. |
1535 | */ | 1549 | */ |
1536 | if (!(ep->ep_state & EP_HALT_PENDING)) { | 1550 | if (!(ep->ep_state & EP_HALT_PENDING)) { |
1551 | command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); | ||
1537 | ep->ep_state |= EP_HALT_PENDING; | 1552 | ep->ep_state |= EP_HALT_PENDING; |
1538 | ep->stop_cmds_pending++; | 1553 | ep->stop_cmds_pending++; |
1539 | ep->stop_cmd_timer.expires = jiffies + | 1554 | ep->stop_cmd_timer.expires = jiffies + |
1540 | XHCI_STOP_EP_CMD_TIMEOUT * HZ; | 1555 | XHCI_STOP_EP_CMD_TIMEOUT * HZ; |
1541 | add_timer(&ep->stop_cmd_timer); | 1556 | add_timer(&ep->stop_cmd_timer); |
1542 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0); | 1557 | xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, |
1558 | ep_index, 0); | ||
1543 | xhci_ring_cmd_db(xhci); | 1559 | xhci_ring_cmd_db(xhci); |
1544 | } | 1560 | } |
1545 | done: | 1561 | done: |
@@ -2576,21 +2592,16 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
2576 | int ret; | 2592 | int ret; |
2577 | int timeleft; | 2593 | int timeleft; |
2578 | unsigned long flags; | 2594 | unsigned long flags; |
2579 | struct xhci_container_ctx *in_ctx; | ||
2580 | struct xhci_input_control_ctx *ctrl_ctx; | 2595 | struct xhci_input_control_ctx *ctrl_ctx; |
2581 | struct completion *cmd_completion; | ||
2582 | u32 *cmd_status; | ||
2583 | struct xhci_virt_device *virt_dev; | 2596 | struct xhci_virt_device *virt_dev; |
2584 | union xhci_trb *cmd_trb; | 2597 | |
2598 | if (!command) | ||
2599 | return -EINVAL; | ||
2585 | 2600 | ||
2586 | spin_lock_irqsave(&xhci->lock, flags); | 2601 | spin_lock_irqsave(&xhci->lock, flags); |
2587 | virt_dev = xhci->devs[udev->slot_id]; | 2602 | virt_dev = xhci->devs[udev->slot_id]; |
2588 | 2603 | ||
2589 | if (command) | 2604 | ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx); |
2590 | in_ctx = command->in_ctx; | ||
2591 | else | ||
2592 | in_ctx = virt_dev->in_ctx; | ||
2593 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
2594 | if (!ctrl_ctx) { | 2605 | if (!ctrl_ctx) { |
2595 | spin_unlock_irqrestore(&xhci->lock, flags); | 2606 | spin_unlock_irqrestore(&xhci->lock, flags); |
2596 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | 2607 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
@@ -2607,7 +2618,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
2607 | return -ENOMEM; | 2618 | return -ENOMEM; |
2608 | } | 2619 | } |
2609 | if ((xhci->quirks & XHCI_SW_BW_CHECKING) && | 2620 | if ((xhci->quirks & XHCI_SW_BW_CHECKING) && |
2610 | xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) { | 2621 | xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { |
2611 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) | 2622 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) |
2612 | xhci_free_host_resources(xhci, ctrl_ctx); | 2623 | xhci_free_host_resources(xhci, ctrl_ctx); |
2613 | spin_unlock_irqrestore(&xhci->lock, flags); | 2624 | spin_unlock_irqrestore(&xhci->lock, flags); |
@@ -2615,27 +2626,18 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
2615 | return -ENOMEM; | 2626 | return -ENOMEM; |
2616 | } | 2627 | } |
2617 | 2628 | ||
2618 | if (command) { | 2629 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); |
2619 | cmd_completion = command->completion; | ||
2620 | cmd_status = &command->status; | ||
2621 | command->command_trb = xhci_find_next_enqueue(xhci->cmd_ring); | ||
2622 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); | ||
2623 | } else { | ||
2624 | cmd_completion = &virt_dev->cmd_completion; | ||
2625 | cmd_status = &virt_dev->cmd_status; | ||
2626 | } | ||
2627 | init_completion(cmd_completion); | ||
2628 | 2630 | ||
2629 | cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring); | ||
2630 | if (!ctx_change) | 2631 | if (!ctx_change) |
2631 | ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, | 2632 | ret = xhci_queue_configure_endpoint(xhci, command, |
2633 | command->in_ctx->dma, | ||
2632 | udev->slot_id, must_succeed); | 2634 | udev->slot_id, must_succeed); |
2633 | else | 2635 | else |
2634 | ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, | 2636 | ret = xhci_queue_evaluate_context(xhci, command, |
2637 | command->in_ctx->dma, | ||
2635 | udev->slot_id, must_succeed); | 2638 | udev->slot_id, must_succeed); |
2636 | if (ret < 0) { | 2639 | if (ret < 0) { |
2637 | if (command) | 2640 | list_del(&command->cmd_list); |
2638 | list_del(&command->cmd_list); | ||
2639 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) | 2641 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) |
2640 | xhci_free_host_resources(xhci, ctrl_ctx); | 2642 | xhci_free_host_resources(xhci, ctrl_ctx); |
2641 | spin_unlock_irqrestore(&xhci->lock, flags); | 2643 | spin_unlock_irqrestore(&xhci->lock, flags); |
@@ -2648,7 +2650,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
2648 | 2650 | ||
2649 | /* Wait for the configure endpoint command to complete */ | 2651 | /* Wait for the configure endpoint command to complete */ |
2650 | timeleft = wait_for_completion_interruptible_timeout( | 2652 | timeleft = wait_for_completion_interruptible_timeout( |
2651 | cmd_completion, | 2653 | command->completion, |
2652 | XHCI_CMD_DEFAULT_TIMEOUT); | 2654 | XHCI_CMD_DEFAULT_TIMEOUT); |
2653 | if (timeleft <= 0) { | 2655 | if (timeleft <= 0) { |
2654 | xhci_warn(xhci, "%s while waiting for %s command\n", | 2656 | xhci_warn(xhci, "%s while waiting for %s command\n", |
@@ -2657,16 +2659,18 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
2657 | "configure endpoint" : | 2659 | "configure endpoint" : |
2658 | "evaluate context"); | 2660 | "evaluate context"); |
2659 | /* cancel the configure endpoint command */ | 2661 | /* cancel the configure endpoint command */ |
2660 | ret = xhci_cancel_cmd(xhci, command, cmd_trb); | 2662 | ret = xhci_cancel_cmd(xhci, command, command->command_trb); |
2661 | if (ret < 0) | 2663 | if (ret < 0) |
2662 | return ret; | 2664 | return ret; |
2663 | return -ETIME; | 2665 | return -ETIME; |
2664 | } | 2666 | } |
2665 | 2667 | ||
2666 | if (!ctx_change) | 2668 | if (!ctx_change) |
2667 | ret = xhci_configure_endpoint_result(xhci, udev, cmd_status); | 2669 | ret = xhci_configure_endpoint_result(xhci, udev, |
2670 | &command->status); | ||
2668 | else | 2671 | else |
2669 | ret = xhci_evaluate_context_result(xhci, udev, cmd_status); | 2672 | ret = xhci_evaluate_context_result(xhci, udev, |
2673 | &command->status); | ||
2670 | 2674 | ||
2671 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { | 2675 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
2672 | spin_lock_irqsave(&xhci->lock, flags); | 2676 | spin_lock_irqsave(&xhci->lock, flags); |
@@ -2714,6 +2718,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
2714 | struct xhci_virt_device *virt_dev; | 2718 | struct xhci_virt_device *virt_dev; |
2715 | struct xhci_input_control_ctx *ctrl_ctx; | 2719 | struct xhci_input_control_ctx *ctrl_ctx; |
2716 | struct xhci_slot_ctx *slot_ctx; | 2720 | struct xhci_slot_ctx *slot_ctx; |
2721 | struct xhci_command *command; | ||
2717 | 2722 | ||
2718 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); | 2723 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
2719 | if (ret <= 0) | 2724 | if (ret <= 0) |
@@ -2725,12 +2730,19 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
2725 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); | 2730 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
2726 | virt_dev = xhci->devs[udev->slot_id]; | 2731 | virt_dev = xhci->devs[udev->slot_id]; |
2727 | 2732 | ||
2733 | command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); | ||
2734 | if (!command) | ||
2735 | return -ENOMEM; | ||
2736 | |||
2737 | command->in_ctx = virt_dev->in_ctx; | ||
2738 | |||
2728 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ | 2739 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ |
2729 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); | 2740 | ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx); |
2730 | if (!ctrl_ctx) { | 2741 | if (!ctrl_ctx) { |
2731 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | 2742 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
2732 | __func__); | 2743 | __func__); |
2733 | return -ENOMEM; | 2744 | ret = -ENOMEM; |
2745 | goto command_cleanup; | ||
2734 | } | 2746 | } |
2735 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); | 2747 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
2736 | ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); | 2748 | ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); |
@@ -2738,20 +2750,20 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
2738 | 2750 | ||
2739 | /* Don't issue the command if there's no endpoints to update. */ | 2751 | /* Don't issue the command if there's no endpoints to update. */ |
2740 | if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && | 2752 | if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && |
2741 | ctrl_ctx->drop_flags == 0) | 2753 | ctrl_ctx->drop_flags == 0) { |
2742 | return 0; | 2754 | ret = 0; |
2743 | 2755 | goto command_cleanup; | |
2756 | } | ||
2744 | xhci_dbg(xhci, "New Input Control Context:\n"); | 2757 | xhci_dbg(xhci, "New Input Control Context:\n"); |
2745 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | 2758 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
2746 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, | 2759 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, |
2747 | LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); | 2760 | LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); |
2748 | 2761 | ||
2749 | ret = xhci_configure_endpoint(xhci, udev, NULL, | 2762 | ret = xhci_configure_endpoint(xhci, udev, command, |
2750 | false, false); | 2763 | false, false); |
2751 | if (ret) { | 2764 | if (ret) |
2752 | /* Callee should call reset_bandwidth() */ | 2765 | /* Callee should call reset_bandwidth() */ |
2753 | return ret; | 2766 | goto command_cleanup; |
2754 | } | ||
2755 | 2767 | ||
2756 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); | 2768 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); |
2757 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, | 2769 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, |
@@ -2783,6 +2795,9 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
2783 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; | 2795 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; |
2784 | virt_dev->eps[i].new_ring = NULL; | 2796 | virt_dev->eps[i].new_ring = NULL; |
2785 | } | 2797 | } |
2798 | command_cleanup: | ||
2799 | kfree(command->completion); | ||
2800 | kfree(command); | ||
2786 | 2801 | ||
2787 | return ret; | 2802 | return ret; |
2788 | } | 2803 | } |
@@ -2884,9 +2899,14 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, | |||
2884 | * issue a configure endpoint command later. | 2899 | * issue a configure endpoint command later. |
2885 | */ | 2900 | */ |
2886 | if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { | 2901 | if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { |
2902 | struct xhci_command *command; | ||
2903 | /* Can't sleep if we're called from cleanup_halted_endpoint() */ | ||
2904 | command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); | ||
2905 | if (!command) | ||
2906 | return; | ||
2887 | xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, | 2907 | xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, |
2888 | "Queueing new dequeue state"); | 2908 | "Queueing new dequeue state"); |
2889 | xhci_queue_new_dequeue_state(xhci, udev->slot_id, | 2909 | xhci_queue_new_dequeue_state(xhci, command, udev->slot_id, |
2890 | ep_index, ep->stopped_stream, &deq_state); | 2910 | ep_index, ep->stopped_stream, &deq_state); |
2891 | } else { | 2911 | } else { |
2892 | /* Better hope no one uses the input context between now and the | 2912 | /* Better hope no one uses the input context between now and the |
@@ -2917,6 +2937,7 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
2917 | unsigned long flags; | 2937 | unsigned long flags; |
2918 | int ret; | 2938 | int ret; |
2919 | struct xhci_virt_ep *virt_ep; | 2939 | struct xhci_virt_ep *virt_ep; |
2940 | struct xhci_command *command; | ||
2920 | 2941 | ||
2921 | xhci = hcd_to_xhci(hcd); | 2942 | xhci = hcd_to_xhci(hcd); |
2922 | udev = (struct usb_device *) ep->hcpriv; | 2943 | udev = (struct usb_device *) ep->hcpriv; |
@@ -2939,10 +2960,14 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
2939 | return; | 2960 | return; |
2940 | } | 2961 | } |
2941 | 2962 | ||
2963 | command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); | ||
2964 | if (!command) | ||
2965 | return; | ||
2966 | |||
2942 | xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, | 2967 | xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, |
2943 | "Queueing reset endpoint command"); | 2968 | "Queueing reset endpoint command"); |
2944 | spin_lock_irqsave(&xhci->lock, flags); | 2969 | spin_lock_irqsave(&xhci->lock, flags); |
2945 | ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); | 2970 | ret = xhci_queue_reset_ep(xhci, command, udev->slot_id, ep_index); |
2946 | /* | 2971 | /* |
2947 | * Can't change the ring dequeue pointer until it's transitioned to the | 2972 | * Can't change the ring dequeue pointer until it's transitioned to the |
2948 | * stopped state, which is only upon a successful reset endpoint | 2973 | * stopped state, which is only upon a successful reset endpoint |
@@ -3473,10 +3498,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
3473 | 3498 | ||
3474 | /* Attempt to submit the Reset Device command to the command ring */ | 3499 | /* Attempt to submit the Reset Device command to the command ring */ |
3475 | spin_lock_irqsave(&xhci->lock, flags); | 3500 | spin_lock_irqsave(&xhci->lock, flags); |
3476 | reset_device_cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring); | ||
3477 | 3501 | ||
3478 | list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); | 3502 | list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); |
3479 | ret = xhci_queue_reset_device(xhci, slot_id); | 3503 | ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id); |
3480 | if (ret) { | 3504 | if (ret) { |
3481 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | 3505 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
3482 | list_del(&reset_device_cmd->cmd_list); | 3506 | list_del(&reset_device_cmd->cmd_list); |
@@ -3589,6 +3613,11 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
3589 | unsigned long flags; | 3613 | unsigned long flags; |
3590 | u32 state; | 3614 | u32 state; |
3591 | int i, ret; | 3615 | int i, ret; |
3616 | struct xhci_command *command; | ||
3617 | |||
3618 | command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); | ||
3619 | if (!command) | ||
3620 | return; | ||
3592 | 3621 | ||
3593 | #ifndef CONFIG_USB_DEFAULT_PERSIST | 3622 | #ifndef CONFIG_USB_DEFAULT_PERSIST |
3594 | /* | 3623 | /* |
@@ -3604,8 +3633,10 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
3604 | /* If the host is halted due to driver unload, we still need to free the | 3633 | /* If the host is halted due to driver unload, we still need to free the |
3605 | * device. | 3634 | * device. |
3606 | */ | 3635 | */ |
3607 | if (ret <= 0 && ret != -ENODEV) | 3636 | if (ret <= 0 && ret != -ENODEV) { |
3637 | kfree(command); | ||
3608 | return; | 3638 | return; |
3639 | } | ||
3609 | 3640 | ||
3610 | virt_dev = xhci->devs[udev->slot_id]; | 3641 | virt_dev = xhci->devs[udev->slot_id]; |
3611 | 3642 | ||
@@ -3622,16 +3653,19 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
3622 | (xhci->xhc_state & XHCI_STATE_HALTED)) { | 3653 | (xhci->xhc_state & XHCI_STATE_HALTED)) { |
3623 | xhci_free_virt_device(xhci, udev->slot_id); | 3654 | xhci_free_virt_device(xhci, udev->slot_id); |
3624 | spin_unlock_irqrestore(&xhci->lock, flags); | 3655 | spin_unlock_irqrestore(&xhci->lock, flags); |
3656 | kfree(command); | ||
3625 | return; | 3657 | return; |
3626 | } | 3658 | } |
3627 | 3659 | ||
3628 | if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { | 3660 | if (xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, |
3661 | udev->slot_id)) { | ||
3629 | spin_unlock_irqrestore(&xhci->lock, flags); | 3662 | spin_unlock_irqrestore(&xhci->lock, flags); |
3630 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | 3663 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
3631 | return; | 3664 | return; |
3632 | } | 3665 | } |
3633 | xhci_ring_cmd_db(xhci); | 3666 | xhci_ring_cmd_db(xhci); |
3634 | spin_unlock_irqrestore(&xhci->lock, flags); | 3667 | spin_unlock_irqrestore(&xhci->lock, flags); |
3668 | |||
3635 | /* | 3669 | /* |
3636 | * Event command completion handler will free any data structures | 3670 | * Event command completion handler will free any data structures |
3637 | * associated with the slot. XXX Can free sleep? | 3671 | * associated with the slot. XXX Can free sleep? |
@@ -3671,27 +3705,35 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
3671 | unsigned long flags; | 3705 | unsigned long flags; |
3672 | int timeleft; | 3706 | int timeleft; |
3673 | int ret; | 3707 | int ret; |
3674 | union xhci_trb *cmd_trb; | 3708 | struct xhci_command *command; |
3709 | |||
3710 | command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); | ||
3711 | if (!command) | ||
3712 | return 0; | ||
3675 | 3713 | ||
3676 | spin_lock_irqsave(&xhci->lock, flags); | 3714 | spin_lock_irqsave(&xhci->lock, flags); |
3677 | cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring); | 3715 | command->completion = &xhci->addr_dev; |
3678 | ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); | 3716 | ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); |
3679 | if (ret) { | 3717 | if (ret) { |
3680 | spin_unlock_irqrestore(&xhci->lock, flags); | 3718 | spin_unlock_irqrestore(&xhci->lock, flags); |
3681 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | 3719 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
3720 | kfree(command); | ||
3682 | return 0; | 3721 | return 0; |
3683 | } | 3722 | } |
3684 | xhci_ring_cmd_db(xhci); | 3723 | xhci_ring_cmd_db(xhci); |
3685 | spin_unlock_irqrestore(&xhci->lock, flags); | 3724 | spin_unlock_irqrestore(&xhci->lock, flags); |
3686 | 3725 | ||
3687 | /* XXX: how much time for xHC slot assignment? */ | 3726 | /* XXX: how much time for xHC slot assignment? */ |
3688 | timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, | 3727 | timeleft = wait_for_completion_interruptible_timeout( |
3728 | command->completion, | ||
3689 | XHCI_CMD_DEFAULT_TIMEOUT); | 3729 | XHCI_CMD_DEFAULT_TIMEOUT); |
3690 | if (timeleft <= 0) { | 3730 | if (timeleft <= 0) { |
3691 | xhci_warn(xhci, "%s while waiting for a slot\n", | 3731 | xhci_warn(xhci, "%s while waiting for a slot\n", |
3692 | timeleft == 0 ? "Timeout" : "Signal"); | 3732 | timeleft == 0 ? "Timeout" : "Signal"); |
3693 | /* cancel the enable slot request */ | 3733 | /* cancel the enable slot request */ |
3694 | return xhci_cancel_cmd(xhci, NULL, cmd_trb); | 3734 | ret = xhci_cancel_cmd(xhci, NULL, command->command_trb); |
3735 | kfree(command); | ||
3736 | return ret; | ||
3695 | } | 3737 | } |
3696 | 3738 | ||
3697 | if (!xhci->slot_id) { | 3739 | if (!xhci->slot_id) { |
@@ -3699,6 +3741,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
3699 | xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", | 3741 | xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", |
3700 | HCS_MAX_SLOTS( | 3742 | HCS_MAX_SLOTS( |
3701 | readl(&xhci->cap_regs->hcs_params1))); | 3743 | readl(&xhci->cap_regs->hcs_params1))); |
3744 | kfree(command); | ||
3702 | return 0; | 3745 | return 0; |
3703 | } | 3746 | } |
3704 | 3747 | ||
@@ -3733,6 +3776,8 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
3733 | pm_runtime_get_noresume(hcd->self.controller); | 3776 | pm_runtime_get_noresume(hcd->self.controller); |
3734 | #endif | 3777 | #endif |
3735 | 3778 | ||
3779 | |||
3780 | kfree(command); | ||
3736 | /* Is this a LS or FS device under a HS hub? */ | 3781 | /* Is this a LS or FS device under a HS hub? */ |
3737 | /* Hub or peripherial? */ | 3782 | /* Hub or peripherial? */ |
3738 | return 1; | 3783 | return 1; |
@@ -3740,7 +3785,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
3740 | disable_slot: | 3785 | disable_slot: |
3741 | /* Disable slot, if we can do it without mem alloc */ | 3786 | /* Disable slot, if we can do it without mem alloc */ |
3742 | spin_lock_irqsave(&xhci->lock, flags); | 3787 | spin_lock_irqsave(&xhci->lock, flags); |
3743 | if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) | 3788 | command->completion = NULL; |
3789 | command->status = 0; | ||
3790 | if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, | ||
3791 | udev->slot_id)) | ||
3744 | xhci_ring_cmd_db(xhci); | 3792 | xhci_ring_cmd_db(xhci); |
3745 | spin_unlock_irqrestore(&xhci->lock, flags); | 3793 | spin_unlock_irqrestore(&xhci->lock, flags); |
3746 | return 0; | 3794 | return 0; |
@@ -3764,7 +3812,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, | |||
3764 | struct xhci_slot_ctx *slot_ctx; | 3812 | struct xhci_slot_ctx *slot_ctx; |
3765 | struct xhci_input_control_ctx *ctrl_ctx; | 3813 | struct xhci_input_control_ctx *ctrl_ctx; |
3766 | u64 temp_64; | 3814 | u64 temp_64; |
3767 | union xhci_trb *cmd_trb; | 3815 | struct xhci_command *command; |
3768 | 3816 | ||
3769 | if (!udev->slot_id) { | 3817 | if (!udev->slot_id) { |
3770 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, | 3818 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
@@ -3785,11 +3833,19 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, | |||
3785 | return -EINVAL; | 3833 | return -EINVAL; |
3786 | } | 3834 | } |
3787 | 3835 | ||
3836 | command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); | ||
3837 | if (!command) | ||
3838 | return -ENOMEM; | ||
3839 | |||
3840 | command->in_ctx = virt_dev->in_ctx; | ||
3841 | command->completion = &xhci->addr_dev; | ||
3842 | |||
3788 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | 3843 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
3789 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); | 3844 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
3790 | if (!ctrl_ctx) { | 3845 | if (!ctrl_ctx) { |
3791 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | 3846 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
3792 | __func__); | 3847 | __func__); |
3848 | kfree(command); | ||
3793 | return -EINVAL; | 3849 | return -EINVAL; |
3794 | } | 3850 | } |
3795 | /* | 3851 | /* |
@@ -3811,21 +3867,21 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, | |||
3811 | le32_to_cpu(slot_ctx->dev_info) >> 27); | 3867 | le32_to_cpu(slot_ctx->dev_info) >> 27); |
3812 | 3868 | ||
3813 | spin_lock_irqsave(&xhci->lock, flags); | 3869 | spin_lock_irqsave(&xhci->lock, flags); |
3814 | cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring); | 3870 | ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, |
3815 | ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, | ||
3816 | udev->slot_id, setup); | 3871 | udev->slot_id, setup); |
3817 | if (ret) { | 3872 | if (ret) { |
3818 | spin_unlock_irqrestore(&xhci->lock, flags); | 3873 | spin_unlock_irqrestore(&xhci->lock, flags); |
3819 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, | 3874 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
3820 | "FIXME: allocate a command ring segment"); | 3875 | "FIXME: allocate a command ring segment"); |
3876 | kfree(command); | ||
3821 | return ret; | 3877 | return ret; |
3822 | } | 3878 | } |
3823 | xhci_ring_cmd_db(xhci); | 3879 | xhci_ring_cmd_db(xhci); |
3824 | spin_unlock_irqrestore(&xhci->lock, flags); | 3880 | spin_unlock_irqrestore(&xhci->lock, flags); |
3825 | 3881 | ||
3826 | /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ | 3882 | /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ |
3827 | timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, | 3883 | timeleft = wait_for_completion_interruptible_timeout( |
3828 | XHCI_CMD_DEFAULT_TIMEOUT); | 3884 | command->completion, XHCI_CMD_DEFAULT_TIMEOUT); |
3829 | /* FIXME: From section 4.3.4: "Software shall be responsible for timing | 3885 | /* FIXME: From section 4.3.4: "Software shall be responsible for timing |
3830 | * the SetAddress() "recovery interval" required by USB and aborting the | 3886 | * the SetAddress() "recovery interval" required by USB and aborting the |
3831 | * command on a timeout. | 3887 | * command on a timeout. |
@@ -3834,7 +3890,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, | |||
3834 | xhci_warn(xhci, "%s while waiting for setup %s command\n", | 3890 | xhci_warn(xhci, "%s while waiting for setup %s command\n", |
3835 | timeleft == 0 ? "Timeout" : "Signal", act); | 3891 | timeleft == 0 ? "Timeout" : "Signal", act); |
3836 | /* cancel the address device command */ | 3892 | /* cancel the address device command */ |
3837 | ret = xhci_cancel_cmd(xhci, NULL, cmd_trb); | 3893 | ret = xhci_cancel_cmd(xhci, NULL, command->command_trb); |
3894 | kfree(command); | ||
3838 | if (ret < 0) | 3895 | if (ret < 0) |
3839 | return ret; | 3896 | return ret; |
3840 | return -ETIME; | 3897 | return -ETIME; |
@@ -3871,6 +3928,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, | |||
3871 | break; | 3928 | break; |
3872 | } | 3929 | } |
3873 | if (ret) { | 3930 | if (ret) { |
3931 | kfree(command); | ||
3874 | return ret; | 3932 | return ret; |
3875 | } | 3933 | } |
3876 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); | 3934 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
@@ -3905,7 +3963,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, | |||
3905 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, | 3963 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
3906 | "Internal device address = %d", | 3964 | "Internal device address = %d", |
3907 | le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); | 3965 | le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); |
3908 | 3966 | kfree(command); | |
3909 | return 0; | 3967 | return 0; |
3910 | } | 3968 | } |
3911 | 3969 | ||
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index cc67c7686706..c0fdb4984b0d 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -1807,13 +1807,14 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg, | |||
1807 | dma_addr_t suspect_dma); | 1807 | dma_addr_t suspect_dma); |
1808 | int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code); | 1808 | int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code); |
1809 | void xhci_ring_cmd_db(struct xhci_hcd *xhci); | 1809 | void xhci_ring_cmd_db(struct xhci_hcd *xhci); |
1810 | int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id); | 1810 | int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, |
1811 | int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 1811 | u32 trb_type, u32 slot_id); |
1812 | u32 slot_id, enum xhci_setup_dev); | 1812 | int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, |
1813 | int xhci_queue_vendor_command(struct xhci_hcd *xhci, | 1813 | dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev); |
1814 | int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, | ||
1814 | u32 field1, u32 field2, u32 field3, u32 field4); | 1815 | u32 field1, u32 field2, u32 field3, u32 field4); |
1815 | int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, | 1816 | int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, |
1816 | unsigned int ep_index, int suspend); | 1817 | int slot_id, unsigned int ep_index, int suspend); |
1817 | int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, | 1818 | int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, |
1818 | int slot_id, unsigned int ep_index); | 1819 | int slot_id, unsigned int ep_index); |
1819 | int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, | 1820 | int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, |
@@ -1822,18 +1823,21 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, | |||
1822 | int slot_id, unsigned int ep_index); | 1823 | int slot_id, unsigned int ep_index); |
1823 | int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, | 1824 | int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, |
1824 | struct urb *urb, int slot_id, unsigned int ep_index); | 1825 | struct urb *urb, int slot_id, unsigned int ep_index); |
1825 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 1826 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, |
1826 | u32 slot_id, bool command_must_succeed); | 1827 | struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, |
1827 | int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 1828 | bool command_must_succeed); |
1828 | u32 slot_id, bool command_must_succeed); | 1829 | int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, |
1829 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, | 1830 | dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed); |
1830 | unsigned int ep_index); | 1831 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, |
1831 | int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id); | 1832 | int slot_id, unsigned int ep_index); |
1833 | int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, | ||
1834 | u32 slot_id); | ||
1832 | void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | 1835 | void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, |
1833 | unsigned int slot_id, unsigned int ep_index, | 1836 | unsigned int slot_id, unsigned int ep_index, |
1834 | unsigned int stream_id, struct xhci_td *cur_td, | 1837 | unsigned int stream_id, struct xhci_td *cur_td, |
1835 | struct xhci_dequeue_state *state); | 1838 | struct xhci_dequeue_state *state); |
1836 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | 1839 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, |
1840 | struct xhci_command *cmd, | ||
1837 | unsigned int slot_id, unsigned int ep_index, | 1841 | unsigned int slot_id, unsigned int ep_index, |
1838 | unsigned int stream_id, | 1842 | unsigned int stream_id, |
1839 | struct xhci_dequeue_state *deq_state); | 1843 | struct xhci_dequeue_state *deq_state); |
@@ -1847,7 +1851,6 @@ int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command, | |||
1847 | union xhci_trb *cmd_trb); | 1851 | union xhci_trb *cmd_trb); |
1848 | void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, | 1852 | void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, |
1849 | unsigned int ep_index, unsigned int stream_id); | 1853 | unsigned int ep_index, unsigned int stream_id); |
1850 | union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring); | ||
1851 | 1854 | ||
1852 | /* xHCI roothub code */ | 1855 | /* xHCI roothub code */ |
1853 | void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array, | 1856 | void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array, |