aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
authorHans de Goede <hdegoede@redhat.com>2014-08-20 09:41:52 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-09-24 00:46:10 -0400
commit1e3452e3f08c5af7fb4b08551aaa96b6627c7416 (patch)
tree2908d29ee0bae8e47385d5f86a12c42874826b4d /drivers/usb/host/xhci-ring.c
parentfac1f48584c1b6c745412cf8c5dbdc1725aad8f2 (diff)
xhci: Move allocating of command for new_dequeue_state to queue_set_tr_deq()
There are multiple reasons for this: 1) This fixes a missing check for xhci_alloc_command failing in xhci_handle_cmd_stop_ep() 2) This adds a warning when we cannot set the new dequeue state because of xhci_alloc_command failing 3) It puts the allocation of the command after the sanity checks in queue_set_tr_deq(), avoiding leaking the command if those fail 4) Since queue_set_tr_deq now owns the command it can free it if queue_command fails 5) It reduces code duplication Signed-off-by: Hans de Goede <hdegoede@redhat.com> Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c35
1 files changed, 22 insertions, 13 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index abed30b82905..8ec5463c9316 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -572,14 +572,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
572 } 572 }
573} 573}
574 574
575static int queue_set_tr_deq(struct xhci_hcd *xhci, 575static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
576 struct xhci_command *cmd, int slot_id,
577 unsigned int ep_index, unsigned int stream_id, 576 unsigned int ep_index, unsigned int stream_id,
578 struct xhci_segment *deq_seg, 577 struct xhci_segment *deq_seg,
579 union xhci_trb *deq_ptr, u32 cycle_state); 578 union xhci_trb *deq_ptr, u32 cycle_state);
580 579
581void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 580void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
582 struct xhci_command *cmd,
583 unsigned int slot_id, unsigned int ep_index, 581 unsigned int slot_id, unsigned int ep_index,
584 unsigned int stream_id, 582 unsigned int stream_id,
585 struct xhci_dequeue_state *deq_state) 583 struct xhci_dequeue_state *deq_state)
@@ -594,7 +592,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
594 deq_state->new_deq_ptr, 592 deq_state->new_deq_ptr,
595 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), 593 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
596 deq_state->new_cycle_state); 594 deq_state->new_cycle_state);
597 queue_set_tr_deq(xhci, cmd, slot_id, ep_index, stream_id, 595 queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
598 deq_state->new_deq_seg, 596 deq_state->new_deq_seg,
599 deq_state->new_deq_ptr, 597 deq_state->new_deq_ptr,
600 (u32) deq_state->new_cycle_state); 598 (u32) deq_state->new_cycle_state);
@@ -743,12 +741,8 @@ remove_finished_td:
743 741
744 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 742 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
745 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 743 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
746 struct xhci_command *command; 744 xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
747 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); 745 ep->stopped_td->urb->stream_id, &deq_state);
748 xhci_queue_new_dequeue_state(xhci, command,
749 slot_id, ep_index,
750 ep->stopped_td->urb->stream_id,
751 &deq_state);
752 xhci_ring_cmd_db(xhci); 746 xhci_ring_cmd_db(xhci);
753 } else { 747 } else {
754 /* Otherwise ring the doorbell(s) to restart queued transfers */ 748 /* Otherwise ring the doorbell(s) to restart queued transfers */
@@ -3929,8 +3923,7 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
3929/* Set Transfer Ring Dequeue Pointer command. 3923/* Set Transfer Ring Dequeue Pointer command.
3930 * This should not be used for endpoints that have streams enabled. 3924 * This should not be used for endpoints that have streams enabled.
3931 */ 3925 */
3932static int queue_set_tr_deq(struct xhci_hcd *xhci, struct xhci_command *cmd, 3926static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
3933 int slot_id,
3934 unsigned int ep_index, unsigned int stream_id, 3927 unsigned int ep_index, unsigned int stream_id,
3935 struct xhci_segment *deq_seg, 3928 struct xhci_segment *deq_seg,
3936 union xhci_trb *deq_ptr, u32 cycle_state) 3929 union xhci_trb *deq_ptr, u32 cycle_state)
@@ -3942,6 +3935,8 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, struct xhci_command *cmd,
3942 u32 trb_sct = 0; 3935 u32 trb_sct = 0;
3943 u32 type = TRB_TYPE(TRB_SET_DEQ); 3936 u32 type = TRB_TYPE(TRB_SET_DEQ);
3944 struct xhci_virt_ep *ep; 3937 struct xhci_virt_ep *ep;
3938 struct xhci_command *cmd;
3939 int ret;
3945 3940
3946 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); 3941 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
3947 if (addr == 0) { 3942 if (addr == 0) {
@@ -3956,14 +3951,28 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, struct xhci_command *cmd,
3956 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n"); 3951 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
3957 return 0; 3952 return 0;
3958 } 3953 }
3954
3955 /* This function gets called from contexts where it cannot sleep */
3956 cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
3957 if (!cmd) {
3958 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n");
3959 return 0;
3960 }
3961
3959 ep->queued_deq_seg = deq_seg; 3962 ep->queued_deq_seg = deq_seg;
3960 ep->queued_deq_ptr = deq_ptr; 3963 ep->queued_deq_ptr = deq_ptr;
3961 if (stream_id) 3964 if (stream_id)
3962 trb_sct = SCT_FOR_TRB(SCT_PRI_TR); 3965 trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
3963 return queue_command(xhci, cmd, 3966 ret = queue_command(xhci, cmd,
3964 lower_32_bits(addr) | trb_sct | cycle_state, 3967 lower_32_bits(addr) | trb_sct | cycle_state,
3965 upper_32_bits(addr), trb_stream_id, 3968 upper_32_bits(addr), trb_stream_id,
3966 trb_slot_id | trb_ep_index | type, false); 3969 trb_slot_id | trb_ep_index | type, false);
3970 if (ret < 0) {
3971 xhci_free_command(xhci, cmd);
3972 return ret;
3973 }
3974
3975 return 0;
3967} 3976}
3968 3977
3969int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, 3978int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,