aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/usb/host/xhci-hcd.c82
-rw-r--r--drivers/usb/host/xhci-mem.c55
-rw-r--r--drivers/usb/host/xhci-ring.c68
-rw-r--r--drivers/usb/host/xhci.h34
4 files changed, 186 insertions, 53 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 9c985d1245f3..2fcc360f0648 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -612,8 +612,8 @@ int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
612} 612}
613 613
614static int xhci_configure_endpoint(struct xhci_hcd *xhci, 614static int xhci_configure_endpoint(struct xhci_hcd *xhci,
615 struct usb_device *udev, struct xhci_virt_device *virt_dev, 615 struct usb_device *udev, struct xhci_command *command,
616 bool ctx_change); 616 bool ctx_change, bool must_succeed);
617 617
618/* 618/*
619 * Full speed devices may have a max packet size greater than 8 bytes, but the 619 * Full speed devices may have a max packet size greater than 8 bytes, but the
@@ -645,7 +645,8 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
645 xhci_dbg(xhci, "Issuing evaluate context command.\n"); 645 xhci_dbg(xhci, "Issuing evaluate context command.\n");
646 646
647 /* Set up the modified control endpoint 0 */ 647 /* Set up the modified control endpoint 0 */
648 xhci_endpoint_copy(xhci, xhci->devs[slot_id], ep_index); 648 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
649 xhci->devs[slot_id]->out_ctx, ep_index);
649 in_ctx = xhci->devs[slot_id]->in_ctx; 650 in_ctx = xhci->devs[slot_id]->in_ctx;
650 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 651 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
651 ep_ctx->ep_info2 &= ~MAX_PACKET_MASK; 652 ep_ctx->ep_info2 &= ~MAX_PACKET_MASK;
@@ -664,8 +665,8 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
664 xhci_dbg(xhci, "Slot %d output context\n", slot_id); 665 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
665 xhci_dbg_ctx(xhci, out_ctx, ep_index); 666 xhci_dbg_ctx(xhci, out_ctx, ep_index);
666 667
667 ret = xhci_configure_endpoint(xhci, urb->dev, 668 ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
668 xhci->devs[slot_id], true); 669 true, false);
669 670
670 /* Clean up the input context for later use by bandwidth 671 /* Clean up the input context for later use by bandwidth
671 * functions. 672 * functions.
@@ -1038,11 +1039,11 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir
1038} 1039}
1039 1040
1040static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, 1041static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1041 struct usb_device *udev, struct xhci_virt_device *virt_dev) 1042 struct usb_device *udev, int *cmd_status)
1042{ 1043{
1043 int ret; 1044 int ret;
1044 1045
1045 switch (virt_dev->cmd_status) { 1046 switch (*cmd_status) {
1046 case COMP_ENOMEM: 1047 case COMP_ENOMEM:
1047 dev_warn(&udev->dev, "Not enough host controller resources " 1048 dev_warn(&udev->dev, "Not enough host controller resources "
1048 "for new device state.\n"); 1049 "for new device state.\n");
@@ -1068,7 +1069,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1068 break; 1069 break;
1069 default: 1070 default:
1070 xhci_err(xhci, "ERROR: unexpected command completion " 1071 xhci_err(xhci, "ERROR: unexpected command completion "
1071 "code 0x%x.\n", virt_dev->cmd_status); 1072 "code 0x%x.\n", *cmd_status);
1072 ret = -EINVAL; 1073 ret = -EINVAL;
1073 break; 1074 break;
1074 } 1075 }
@@ -1076,11 +1077,12 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1076} 1077}
1077 1078
1078static int xhci_evaluate_context_result(struct xhci_hcd *xhci, 1079static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1079 struct usb_device *udev, struct xhci_virt_device *virt_dev) 1080 struct usb_device *udev, int *cmd_status)
1080{ 1081{
1081 int ret; 1082 int ret;
1083 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1082 1084
1083 switch (virt_dev->cmd_status) { 1085 switch (*cmd_status) {
1084 case COMP_EINVAL: 1086 case COMP_EINVAL:
1085 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " 1087 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1086 "context command.\n"); 1088 "context command.\n");
@@ -1101,7 +1103,7 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1101 break; 1103 break;
1102 default: 1104 default:
1103 xhci_err(xhci, "ERROR: unexpected command completion " 1105 xhci_err(xhci, "ERROR: unexpected command completion "
1104 "code 0x%x.\n", virt_dev->cmd_status); 1106 "code 0x%x.\n", *cmd_status);
1105 ret = -EINVAL; 1107 ret = -EINVAL;
1106 break; 1108 break;
1107 } 1109 }
@@ -1112,19 +1114,37 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1112 * and wait for it to finish. 1114 * and wait for it to finish.
1113 */ 1115 */
1114static int xhci_configure_endpoint(struct xhci_hcd *xhci, 1116static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1115 struct usb_device *udev, struct xhci_virt_device *virt_dev, 1117 struct usb_device *udev,
1116 bool ctx_change) 1118 struct xhci_command *command,
1119 bool ctx_change, bool must_succeed)
1117{ 1120{
1118 int ret; 1121 int ret;
1119 int timeleft; 1122 int timeleft;
1120 unsigned long flags; 1123 unsigned long flags;
1124 struct xhci_container_ctx *in_ctx;
1125 struct completion *cmd_completion;
1126 int *cmd_status;
1127 struct xhci_virt_device *virt_dev;
1121 1128
1122 spin_lock_irqsave(&xhci->lock, flags); 1129 spin_lock_irqsave(&xhci->lock, flags);
1130 virt_dev = xhci->devs[udev->slot_id];
1131 if (command) {
1132 in_ctx = command->in_ctx;
1133 cmd_completion = command->completion;
1134 cmd_status = &command->status;
1135 command->command_trb = xhci->cmd_ring->enqueue;
1136 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
1137 } else {
1138 in_ctx = virt_dev->in_ctx;
1139 cmd_completion = &virt_dev->cmd_completion;
1140 cmd_status = &virt_dev->cmd_status;
1141 }
1142
1123 if (!ctx_change) 1143 if (!ctx_change)
1124 ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma, 1144 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
1125 udev->slot_id); 1145 udev->slot_id, must_succeed);
1126 else 1146 else
1127 ret = xhci_queue_evaluate_context(xhci, virt_dev->in_ctx->dma, 1147 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
1128 udev->slot_id); 1148 udev->slot_id);
1129 if (ret < 0) { 1149 if (ret < 0) {
1130 spin_unlock_irqrestore(&xhci->lock, flags); 1150 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1136,7 +1156,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1136 1156
1137 /* Wait for the configure endpoint command to complete */ 1157 /* Wait for the configure endpoint command to complete */
1138 timeleft = wait_for_completion_interruptible_timeout( 1158 timeleft = wait_for_completion_interruptible_timeout(
1139 &virt_dev->cmd_completion, 1159 cmd_completion,
1140 USB_CTRL_SET_TIMEOUT); 1160 USB_CTRL_SET_TIMEOUT);
1141 if (timeleft <= 0) { 1161 if (timeleft <= 0) {
1142 xhci_warn(xhci, "%s while waiting for %s command\n", 1162 xhci_warn(xhci, "%s while waiting for %s command\n",
@@ -1149,8 +1169,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1149 } 1169 }
1150 1170
1151 if (!ctx_change) 1171 if (!ctx_change)
1152 return xhci_configure_endpoint_result(xhci, udev, virt_dev); 1172 return xhci_configure_endpoint_result(xhci, udev, cmd_status);
1153 return xhci_evaluate_context_result(xhci, udev, virt_dev); 1173 return xhci_evaluate_context_result(xhci, udev, cmd_status);
1154} 1174}
1155 1175
1156/* Called after one or more calls to xhci_add_endpoint() or 1176/* Called after one or more calls to xhci_add_endpoint() or
@@ -1196,7 +1216,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1196 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 1216 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
1197 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); 1217 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
1198 1218
1199 ret = xhci_configure_endpoint(xhci, udev, virt_dev, false); 1219 ret = xhci_configure_endpoint(xhci, udev, NULL,
1220 false, false);
1200 if (ret) { 1221 if (ret) {
1201 /* Callee should call reset_bandwidth() */ 1222 /* Callee should call reset_bandwidth() */
1202 return ret; 1223 return ret;
@@ -1248,19 +1269,19 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1248} 1269}
1249 1270
1250static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, 1271static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
1251 unsigned int slot_id, u32 add_flags, u32 drop_flags) 1272 struct xhci_container_ctx *in_ctx,
1273 struct xhci_container_ctx *out_ctx,
1274 u32 add_flags, u32 drop_flags)
1252{ 1275{
1253 struct xhci_input_control_ctx *ctrl_ctx; 1276 struct xhci_input_control_ctx *ctrl_ctx;
1254 ctrl_ctx = xhci_get_input_control_ctx(xhci, 1277 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1255 xhci->devs[slot_id]->in_ctx);
1256 ctrl_ctx->add_flags = add_flags; 1278 ctrl_ctx->add_flags = add_flags;
1257 ctrl_ctx->drop_flags = drop_flags; 1279 ctrl_ctx->drop_flags = drop_flags;
1258 xhci_slot_copy(xhci, xhci->devs[slot_id]); 1280 xhci_slot_copy(xhci, in_ctx, out_ctx);
1259 ctrl_ctx->add_flags |= SLOT_FLAG; 1281 ctrl_ctx->add_flags |= SLOT_FLAG;
1260 1282
1261 xhci_dbg(xhci, "Slot ID %d Input Context:\n", slot_id); 1283 xhci_dbg(xhci, "Input Context:\n");
1262 xhci_dbg_ctx(xhci, xhci->devs[slot_id]->in_ctx, 1284 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
1263 xhci_last_valid_endpoint(add_flags));
1264} 1285}
1265 1286
1266void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, 1287void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
@@ -1272,7 +1293,8 @@ void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
1272 u32 added_ctxs; 1293 u32 added_ctxs;
1273 dma_addr_t addr; 1294 dma_addr_t addr;
1274 1295
1275 xhci_endpoint_copy(xhci, xhci->devs[slot_id], ep_index); 1296 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1297 xhci->devs[slot_id]->out_ctx, ep_index);
1276 in_ctx = xhci->devs[slot_id]->in_ctx; 1298 in_ctx = xhci->devs[slot_id]->in_ctx;
1277 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 1299 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1278 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, 1300 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
@@ -1288,8 +1310,8 @@ void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
1288 ep_ctx->deq = addr | deq_state->new_cycle_state; 1310 ep_ctx->deq = addr | deq_state->new_cycle_state;
1289 1311
1290 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); 1312 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
1291 xhci_setup_input_ctx_for_config_ep(xhci, slot_id, 1313 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
1292 added_ctxs, added_ctxs); 1314 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
1293} 1315}
1294 1316
1295void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, 1317void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 75458ecc8eab..6e6797a38780 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -319,6 +319,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
319 goto fail; 319 goto fail;
320 320
321 init_completion(&dev->cmd_completion); 321 init_completion(&dev->cmd_completion);
322 INIT_LIST_HEAD(&dev->cmd_list);
322 323
323 /* Point to output device context in dcbaa. */ 324 /* Point to output device context in dcbaa. */
324 xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; 325 xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
@@ -624,13 +625,15 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
624 * issue a configure endpoint command. 625 * issue a configure endpoint command.
625 */ 626 */
626void xhci_endpoint_copy(struct xhci_hcd *xhci, 627void xhci_endpoint_copy(struct xhci_hcd *xhci,
627 struct xhci_virt_device *vdev, unsigned int ep_index) 628 struct xhci_container_ctx *in_ctx,
629 struct xhci_container_ctx *out_ctx,
630 unsigned int ep_index)
628{ 631{
629 struct xhci_ep_ctx *out_ep_ctx; 632 struct xhci_ep_ctx *out_ep_ctx;
630 struct xhci_ep_ctx *in_ep_ctx; 633 struct xhci_ep_ctx *in_ep_ctx;
631 634
632 out_ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); 635 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
633 in_ep_ctx = xhci_get_ep_ctx(xhci, vdev->in_ctx, ep_index); 636 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
634 637
635 in_ep_ctx->ep_info = out_ep_ctx->ep_info; 638 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
636 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; 639 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
@@ -643,13 +646,15 @@ void xhci_endpoint_copy(struct xhci_hcd *xhci,
643 * issue a configure endpoint command. Only the context entries field matters, 646 * issue a configure endpoint command. Only the context entries field matters,
644 * but we'll copy the whole thing anyway. 647 * but we'll copy the whole thing anyway.
645 */ 648 */
646void xhci_slot_copy(struct xhci_hcd *xhci, struct xhci_virt_device *vdev) 649void xhci_slot_copy(struct xhci_hcd *xhci,
650 struct xhci_container_ctx *in_ctx,
651 struct xhci_container_ctx *out_ctx)
647{ 652{
648 struct xhci_slot_ctx *in_slot_ctx; 653 struct xhci_slot_ctx *in_slot_ctx;
649 struct xhci_slot_ctx *out_slot_ctx; 654 struct xhci_slot_ctx *out_slot_ctx;
650 655
651 in_slot_ctx = xhci_get_slot_ctx(xhci, vdev->in_ctx); 656 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
652 out_slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); 657 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
653 658
654 in_slot_ctx->dev_info = out_slot_ctx->dev_info; 659 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
655 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; 660 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
@@ -754,6 +759,44 @@ static void scratchpad_free(struct xhci_hcd *xhci)
754 xhci->scratchpad = NULL; 759 xhci->scratchpad = NULL;
755} 760}
756 761
762struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
763 bool allocate_completion, gfp_t mem_flags)
764{
765 struct xhci_command *command;
766
767 command = kzalloc(sizeof(*command), mem_flags);
768 if (!command)
769 return NULL;
770
771 command->in_ctx =
772 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags);
773 if (!command->in_ctx)
774 return NULL;
775
776 if (allocate_completion) {
777 command->completion =
778 kzalloc(sizeof(struct completion), mem_flags);
779 if (!command->completion) {
780 xhci_free_container_ctx(xhci, command->in_ctx);
781 return NULL;
782 }
783 init_completion(command->completion);
784 }
785
786 command->status = 0;
787 INIT_LIST_HEAD(&command->cmd_list);
788 return command;
789}
790
791void xhci_free_command(struct xhci_hcd *xhci,
792 struct xhci_command *command)
793{
794 xhci_free_container_ctx(xhci,
795 command->in_ctx);
796 kfree(command->completion);
797 kfree(command);
798}
799
757void xhci_mem_cleanup(struct xhci_hcd *xhci) 800void xhci_mem_cleanup(struct xhci_hcd *xhci)
758{ 801{
759 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 802 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 6a72d2022b45..a9379b3bebaf 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -675,7 +675,8 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
675 if (xhci->quirks & XHCI_RESET_EP_QUIRK) { 675 if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
676 xhci_dbg(xhci, "Queueing configure endpoint command\n"); 676 xhci_dbg(xhci, "Queueing configure endpoint command\n");
677 xhci_queue_configure_endpoint(xhci, 677 xhci_queue_configure_endpoint(xhci,
678 xhci->devs[slot_id]->in_ctx->dma, slot_id); 678 xhci->devs[slot_id]->in_ctx->dma, slot_id,
679 false);
679 xhci_ring_cmd_db(xhci); 680 xhci_ring_cmd_db(xhci);
680 } else { 681 } else {
681 /* Clear our internal halted state and restart the ring */ 682 /* Clear our internal halted state and restart the ring */
@@ -691,6 +692,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
691 u64 cmd_dma; 692 u64 cmd_dma;
692 dma_addr_t cmd_dequeue_dma; 693 dma_addr_t cmd_dequeue_dma;
693 struct xhci_input_control_ctx *ctrl_ctx; 694 struct xhci_input_control_ctx *ctrl_ctx;
695 struct xhci_virt_device *virt_dev;
694 unsigned int ep_index; 696 unsigned int ep_index;
695 struct xhci_ring *ep_ring; 697 struct xhci_ring *ep_ring;
696 unsigned int ep_state; 698 unsigned int ep_state;
@@ -721,6 +723,25 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
721 xhci_free_virt_device(xhci, slot_id); 723 xhci_free_virt_device(xhci, slot_id);
722 break; 724 break;
723 case TRB_TYPE(TRB_CONFIG_EP): 725 case TRB_TYPE(TRB_CONFIG_EP):
726 virt_dev = xhci->devs[slot_id];
727 /* Check to see if a command in the device's command queue
728 * matches this one. Signal the completion or free the command.
729 */
730 if (!list_empty(&virt_dev->cmd_list)) {
731 struct xhci_command *command;
732 command = list_entry(virt_dev->cmd_list.next,
733 struct xhci_command, cmd_list);
734 if (xhci->cmd_ring->dequeue == command->command_trb) {
735 command->status =
736 GET_COMP_CODE(event->status);
737 list_del(&command->cmd_list);
738 if (command->completion)
739 complete(command->completion);
740 else
741 xhci_free_command(xhci, command);
742 }
743 break;
744 }
724 /* 745 /*
725 * Configure endpoint commands can come from the USB core 746 * Configure endpoint commands can come from the USB core
726 * configuration or alt setting changes, or because the HW 747 * configuration or alt setting changes, or because the HW
@@ -729,7 +750,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
729 * not waiting on the configure endpoint command. 750 * not waiting on the configure endpoint command.
730 */ 751 */
731 ctrl_ctx = xhci_get_input_control_ctx(xhci, 752 ctrl_ctx = xhci_get_input_control_ctx(xhci,
732 xhci->devs[slot_id]->in_ctx); 753 virt_dev->in_ctx);
733 /* Input ctx add_flags are the endpoint index plus one */ 754 /* Input ctx add_flags are the endpoint index plus one */
734 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; 755 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
735 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 756 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
@@ -1858,12 +1879,27 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1858 1879
1859/**** Command Ring Operations ****/ 1880/**** Command Ring Operations ****/
1860 1881
1861/* Generic function for queueing a command TRB on the command ring */ 1882/* Generic function for queueing a command TRB on the command ring.
1862static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4) 1883 * Check to make sure there's room on the command ring for one command TRB.
1884 * Also check that there's room reserved for commands that must not fail.
1885 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
1886 * then only check for the number of reserved spots.
1887 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
1888 * because the command event handler may want to resubmit a failed command.
1889 */
1890static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
1891 u32 field3, u32 field4, bool command_must_succeed)
1863{ 1892{
1864 if (!room_on_ring(xhci, xhci->cmd_ring, 1)) { 1893 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
1894 if (!command_must_succeed)
1895 reserved_trbs++;
1896
1897 if (!room_on_ring(xhci, xhci->cmd_ring, reserved_trbs)) {
1865 if (!in_interrupt()) 1898 if (!in_interrupt())
1866 xhci_err(xhci, "ERR: No room for command on command ring\n"); 1899 xhci_err(xhci, "ERR: No room for command on command ring\n");
1900 if (command_must_succeed)
1901 xhci_err(xhci, "ERR: Reserved TRB counting for "
1902 "unfailable commands failed.\n");
1867 return -ENOMEM; 1903 return -ENOMEM;
1868 } 1904 }
1869 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, 1905 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
@@ -1874,7 +1910,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 fiel
1874/* Queue a no-op command on the command ring */ 1910/* Queue a no-op command on the command ring */
1875static int queue_cmd_noop(struct xhci_hcd *xhci) 1911static int queue_cmd_noop(struct xhci_hcd *xhci)
1876{ 1912{
1877 return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP)); 1913 return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false);
1878} 1914}
1879 1915
1880/* 1916/*
@@ -1893,7 +1929,7 @@ void *xhci_setup_one_noop(struct xhci_hcd *xhci)
1893int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) 1929int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
1894{ 1930{
1895 return queue_command(xhci, 0, 0, 0, 1931 return queue_command(xhci, 0, 0, 0,
1896 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id)); 1932 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
1897} 1933}
1898 1934
1899/* Queue an address device command TRB */ 1935/* Queue an address device command TRB */
@@ -1902,16 +1938,18 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1902{ 1938{
1903 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 1939 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
1904 upper_32_bits(in_ctx_ptr), 0, 1940 upper_32_bits(in_ctx_ptr), 0,
1905 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); 1941 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
1942 false);
1906} 1943}
1907 1944
1908/* Queue a configure endpoint command TRB */ 1945/* Queue a configure endpoint command TRB */
1909int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 1946int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1910 u32 slot_id) 1947 u32 slot_id, bool command_must_succeed)
1911{ 1948{
1912 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 1949 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
1913 upper_32_bits(in_ctx_ptr), 0, 1950 upper_32_bits(in_ctx_ptr), 0,
1914 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); 1951 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
1952 command_must_succeed);
1915} 1953}
1916 1954
1917/* Queue an evaluate context command TRB */ 1955/* Queue an evaluate context command TRB */
@@ -1920,7 +1958,8 @@ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1920{ 1958{
1921 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 1959 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
1922 upper_32_bits(in_ctx_ptr), 0, 1960 upper_32_bits(in_ctx_ptr), 0,
1923 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id)); 1961 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
1962 false);
1924} 1963}
1925 1964
1926int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, 1965int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
@@ -1931,7 +1970,7 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
1931 u32 type = TRB_TYPE(TRB_STOP_RING); 1970 u32 type = TRB_TYPE(TRB_STOP_RING);
1932 1971
1933 return queue_command(xhci, 0, 0, 0, 1972 return queue_command(xhci, 0, 0, 0,
1934 trb_slot_id | trb_ep_index | type); 1973 trb_slot_id | trb_ep_index | type, false);
1935} 1974}
1936 1975
1937/* Set Transfer Ring Dequeue Pointer command. 1976/* Set Transfer Ring Dequeue Pointer command.
@@ -1955,7 +1994,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
1955 } 1994 }
1956 return queue_command(xhci, lower_32_bits(addr) | cycle_state, 1995 return queue_command(xhci, lower_32_bits(addr) | cycle_state,
1957 upper_32_bits(addr), 0, 1996 upper_32_bits(addr), 0,
1958 trb_slot_id | trb_ep_index | type); 1997 trb_slot_id | trb_ep_index | type, false);
1959} 1998}
1960 1999
1961int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, 2000int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
@@ -1965,5 +2004,6 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
1965 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 2004 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
1966 u32 type = TRB_TYPE(TRB_RESET_EP); 2005 u32 type = TRB_TYPE(TRB_RESET_EP);
1967 2006
1968 return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type); 2007 return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
2008 false);
1969} 2009}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 627092286d1b..36f7d4f91d9f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -620,6 +620,22 @@ struct xhci_input_control_ctx {
620 u32 rsvd2[6]; 620 u32 rsvd2[6];
621}; 621};
622 622
623/* Represents everything that is needed to issue a command on the command ring.
624 * It's useful to pre-allocate these for commands that cannot fail due to
625 * out-of-memory errors, like freeing streams.
626 */
627struct xhci_command {
628 /* Input context for changing device state */
629 struct xhci_container_ctx *in_ctx;
630 u32 status;
631 /* If completion is null, no one is waiting on this command
632 * and the structure can be freed after the command completes.
633 */
634 struct completion *completion;
635 union xhci_trb *command_trb;
636 struct list_head cmd_list;
637};
638
623/* drop context bitmasks */ 639/* drop context bitmasks */
624#define DROP_EP(x) (0x1 << x) 640#define DROP_EP(x) (0x1 << x)
625/* add context bitmasks */ 641/* add context bitmasks */
@@ -658,6 +674,7 @@ struct xhci_virt_device {
658 struct completion cmd_completion; 674 struct completion cmd_completion;
659 /* Status of the last command issued for this device */ 675 /* Status of the last command issued for this device */
660 u32 cmd_status; 676 u32 cmd_status;
677 struct list_head cmd_list;
661}; 678};
662 679
663 680
@@ -920,6 +937,8 @@ union xhci_trb {
920 * It must also be greater than 16. 937 * It must also be greater than 16.
921 */ 938 */
922#define TRBS_PER_SEGMENT 64 939#define TRBS_PER_SEGMENT 64
940/* Allow two commands + a link TRB, along with any reserved command TRBs */
941#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
923#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16) 942#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
924/* TRB buffer pointers can't cross 64KB boundaries */ 943/* TRB buffer pointers can't cross 64KB boundaries */
925#define TRB_MAX_BUFF_SHIFT 16 944#define TRB_MAX_BUFF_SHIFT 16
@@ -1040,6 +1059,7 @@ struct xhci_hcd {
1040 /* data structures */ 1059 /* data structures */
1041 struct xhci_device_context_array *dcbaa; 1060 struct xhci_device_context_array *dcbaa;
1042 struct xhci_ring *cmd_ring; 1061 struct xhci_ring *cmd_ring;
1062 unsigned int cmd_ring_reserved_trbs;
1043 struct xhci_ring *event_ring; 1063 struct xhci_ring *event_ring;
1044 struct xhci_erst erst; 1064 struct xhci_erst erst;
1045 /* Scratchpad */ 1065 /* Scratchpad */
@@ -1178,12 +1198,20 @@ unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index);
1178unsigned int xhci_last_valid_endpoint(u32 added_ctxs); 1198unsigned int xhci_last_valid_endpoint(u32 added_ctxs);
1179void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep); 1199void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
1180void xhci_endpoint_copy(struct xhci_hcd *xhci, 1200void xhci_endpoint_copy(struct xhci_hcd *xhci,
1181 struct xhci_virt_device *vdev, unsigned int ep_index); 1201 struct xhci_container_ctx *in_ctx,
1182void xhci_slot_copy(struct xhci_hcd *xhci, struct xhci_virt_device *vdev); 1202 struct xhci_container_ctx *out_ctx,
1203 unsigned int ep_index);
1204void xhci_slot_copy(struct xhci_hcd *xhci,
1205 struct xhci_container_ctx *in_ctx,
1206 struct xhci_container_ctx *out_ctx);
1183int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, 1207int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
1184 struct usb_device *udev, struct usb_host_endpoint *ep, 1208 struct usb_device *udev, struct usb_host_endpoint *ep,
1185 gfp_t mem_flags); 1209 gfp_t mem_flags);
1186void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); 1210void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
1211struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1212 bool allocate_completion, gfp_t mem_flags);
1213void xhci_free_command(struct xhci_hcd *xhci,
1214 struct xhci_command *command);
1187 1215
1188#ifdef CONFIG_PCI 1216#ifdef CONFIG_PCI
1189/* xHCI PCI glue */ 1217/* xHCI PCI glue */
@@ -1229,7 +1257,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1229int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, 1257int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1230 int slot_id, unsigned int ep_index); 1258 int slot_id, unsigned int ep_index);
1231int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 1259int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1232 u32 slot_id); 1260 u32 slot_id, bool command_must_succeed);
1233int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 1261int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1234 u32 slot_id); 1262 u32 slot_id);
1235int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, 1263int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,