aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-hcd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-hcd.c')
-rw-r--r--drivers/usb/host/xhci-hcd.c63
1 files changed, 31 insertions, 32 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 489657c853e7..dba3e07ccd09 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -687,11 +687,14 @@ done:
687 * different endpoint descriptor in usb_host_endpoint. 687 * different endpoint descriptor in usb_host_endpoint.
688 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 688 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
689 * not allowed. 689 * not allowed.
690 *
691 * The USB core will not allow URBs to be queued to an endpoint that is being
692 * disabled, so there's no need for mutual exclusion to protect
693 * the xhci->devs[slot_id] structure.
690 */ 694 */
691int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 695int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
692 struct usb_host_endpoint *ep) 696 struct usb_host_endpoint *ep)
693{ 697{
694 unsigned long flags;
695 struct xhci_hcd *xhci; 698 struct xhci_hcd *xhci;
696 struct xhci_device_control *in_ctx; 699 struct xhci_device_control *in_ctx;
697 unsigned int last_ctx; 700 unsigned int last_ctx;
@@ -714,11 +717,9 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
714 return 0; 717 return 0;
715 } 718 }
716 719
717 spin_lock_irqsave(&xhci->lock, flags);
718 if (!xhci->devs || !xhci->devs[udev->slot_id]) { 720 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
719 xhci_warn(xhci, "xHCI %s called with unaddressed device\n", 721 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
720 __func__); 722 __func__);
721 spin_unlock_irqrestore(&xhci->lock, flags);
722 return -EINVAL; 723 return -EINVAL;
723 } 724 }
724 725
@@ -732,7 +733,6 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
732 in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { 733 in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
733 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", 734 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
734 __func__, ep); 735 __func__, ep);
735 spin_unlock_irqrestore(&xhci->lock, flags);
736 return 0; 736 return 0;
737 } 737 }
738 738
@@ -752,8 +752,6 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
752 752
753 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); 753 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
754 754
755 spin_unlock_irqrestore(&xhci->lock, flags);
756
757 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", 755 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
758 (unsigned int) ep->desc.bEndpointAddress, 756 (unsigned int) ep->desc.bEndpointAddress,
759 udev->slot_id, 757 udev->slot_id,
@@ -771,11 +769,14 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
771 * different endpoint descriptor in usb_host_endpoint. 769 * different endpoint descriptor in usb_host_endpoint.
772 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 770 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
773 * not allowed. 771 * not allowed.
772 *
773 * The USB core will not allow URBs to be queued to an endpoint until the
774 * configuration or alt setting is installed in the device, so there's no need
775 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
774 */ 776 */
775int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 777int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
776 struct usb_host_endpoint *ep) 778 struct usb_host_endpoint *ep)
777{ 779{
778 unsigned long flags;
779 struct xhci_hcd *xhci; 780 struct xhci_hcd *xhci;
780 struct xhci_device_control *in_ctx; 781 struct xhci_device_control *in_ctx;
781 unsigned int ep_index; 782 unsigned int ep_index;
@@ -802,11 +803,9 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
802 return 0; 803 return 0;
803 } 804 }
804 805
805 spin_lock_irqsave(&xhci->lock, flags);
806 if (!xhci->devs || !xhci->devs[udev->slot_id]) { 806 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
807 xhci_warn(xhci, "xHCI %s called with unaddressed device\n", 807 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
808 __func__); 808 __func__);
809 spin_unlock_irqrestore(&xhci->lock, flags);
810 return -EINVAL; 809 return -EINVAL;
811 } 810 }
812 811
@@ -819,14 +818,18 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
819 if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { 818 if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
820 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", 819 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
821 __func__, ep); 820 __func__, ep);
822 spin_unlock_irqrestore(&xhci->lock, flags);
823 return 0; 821 return 0;
824 } 822 }
825 823
826 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], udev, ep) < 0) { 824 /*
825 * Configuration and alternate setting changes must be done in
826 * process context, not interrupt context (or so documenation
827 * for usb_set_interface() and usb_set_configuration() claim).
828 */
829 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
830 udev, ep, GFP_KERNEL) < 0) {
827 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", 831 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
828 __func__, ep->desc.bEndpointAddress); 832 __func__, ep->desc.bEndpointAddress);
829 spin_unlock_irqrestore(&xhci->lock, flags);
830 return -ENOMEM; 833 return -ENOMEM;
831 } 834 }
832 835
@@ -847,7 +850,6 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
847 in_ctx->slot.dev_info |= LAST_CTX(last_ctx); 850 in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
848 } 851 }
849 new_slot_info = in_ctx->slot.dev_info; 852 new_slot_info = in_ctx->slot.dev_info;
850 spin_unlock_irqrestore(&xhci->lock, flags);
851 853
852 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", 854 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
853 (unsigned int) ep->desc.bEndpointAddress, 855 (unsigned int) ep->desc.bEndpointAddress,
@@ -883,6 +885,16 @@ static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
883 } 885 }
884} 886}
885 887
888/* Called after one or more calls to xhci_add_endpoint() or
889 * xhci_drop_endpoint(). If this call fails, the USB core is expected
890 * to call xhci_reset_bandwidth().
891 *
892 * Since we are in the middle of changing either configuration or
893 * installing a new alt setting, the USB core won't allow URBs to be
894 * enqueued for any endpoint on the old config or interface. Nothing
895 * else should be touching the xhci->devs[slot_id] structure, so we
896 * don't need to take the xhci->lock for manipulating that.
897 */
886int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 898int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
887{ 899{
888 int i; 900 int i;
@@ -897,11 +909,9 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
897 return ret; 909 return ret;
898 xhci = hcd_to_xhci(hcd); 910 xhci = hcd_to_xhci(hcd);
899 911
900 spin_lock_irqsave(&xhci->lock, flags);
901 if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) { 912 if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) {
902 xhci_warn(xhci, "xHCI %s called with unaddressed device\n", 913 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
903 __func__); 914 __func__);
904 spin_unlock_irqrestore(&xhci->lock, flags);
905 return -EINVAL; 915 return -EINVAL;
906 } 916 }
907 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 917 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
@@ -916,11 +926,12 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
916 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 926 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma,
917 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); 927 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
918 928
929 spin_lock_irqsave(&xhci->lock, flags);
919 ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma, 930 ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma,
920 udev->slot_id); 931 udev->slot_id);
921 if (ret < 0) { 932 if (ret < 0) {
922 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
923 spin_unlock_irqrestore(&xhci->lock, flags); 933 spin_unlock_irqrestore(&xhci->lock, flags);
934 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
924 return -ENOMEM; 935 return -ENOMEM;
925 } 936 }
926 xhci_ring_cmd_db(xhci); 937 xhci_ring_cmd_db(xhci);
@@ -937,7 +948,6 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
937 return -ETIME; 948 return -ETIME;
938 } 949 }
939 950
940 spin_lock_irqsave(&xhci->lock, flags);
941 switch (virt_dev->cmd_status) { 951 switch (virt_dev->cmd_status) {
942 case COMP_ENOMEM: 952 case COMP_ENOMEM:
943 dev_warn(&udev->dev, "Not enough host controller resources " 953 dev_warn(&udev->dev, "Not enough host controller resources "
@@ -968,7 +978,6 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
968 } 978 }
969 if (ret) { 979 if (ret) {
970 /* Callee should call reset_bandwidth() */ 980 /* Callee should call reset_bandwidth() */
971 spin_unlock_irqrestore(&xhci->lock, flags);
972 return ret; 981 return ret;
973 } 982 }
974 983
@@ -986,14 +995,11 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
986 } 995 }
987 } 996 }
988 997
989 spin_unlock_irqrestore(&xhci->lock, flags);
990
991 return ret; 998 return ret;
992} 999}
993 1000
994void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 1001void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
995{ 1002{
996 unsigned long flags;
997 struct xhci_hcd *xhci; 1003 struct xhci_hcd *xhci;
998 struct xhci_virt_device *virt_dev; 1004 struct xhci_virt_device *virt_dev;
999 int i, ret; 1005 int i, ret;
@@ -1003,11 +1009,9 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1003 return; 1009 return;
1004 xhci = hcd_to_xhci(hcd); 1010 xhci = hcd_to_xhci(hcd);
1005 1011
1006 spin_lock_irqsave(&xhci->lock, flags);
1007 if (!xhci->devs || !xhci->devs[udev->slot_id]) { 1012 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1008 xhci_warn(xhci, "xHCI %s called with unaddressed device\n", 1013 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1009 __func__); 1014 __func__);
1010 spin_unlock_irqrestore(&xhci->lock, flags);
1011 return; 1015 return;
1012 } 1016 }
1013 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1017 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
@@ -1020,7 +1024,6 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1020 } 1024 }
1021 } 1025 }
1022 xhci_zero_in_ctx(virt_dev); 1026 xhci_zero_in_ctx(virt_dev);
1023 spin_unlock_irqrestore(&xhci->lock, flags);
1024} 1027}
1025 1028
1026/* 1029/*
@@ -1046,7 +1049,7 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
1046 spin_unlock_irqrestore(&xhci->lock, flags); 1049 spin_unlock_irqrestore(&xhci->lock, flags);
1047 /* 1050 /*
1048 * Event command completion handler will free any data structures 1051 * Event command completion handler will free any data structures
1049 * associated with the slot 1052 * associated with the slot. XXX Can free sleep?
1050 */ 1053 */
1051} 1054}
1052 1055
@@ -1081,15 +1084,15 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
1081 return 0; 1084 return 0;
1082 } 1085 }
1083 1086
1084 spin_lock_irqsave(&xhci->lock, flags);
1085 if (!xhci->slot_id) { 1087 if (!xhci->slot_id) {
1086 xhci_err(xhci, "Error while assigning device slot ID\n"); 1088 xhci_err(xhci, "Error while assigning device slot ID\n");
1087 spin_unlock_irqrestore(&xhci->lock, flags);
1088 return 0; 1089 return 0;
1089 } 1090 }
1091 /* xhci_alloc_virt_device() does not touch rings; no need to lock */
1090 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) { 1092 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
1091 /* Disable slot, if we can do it without mem alloc */ 1093 /* Disable slot, if we can do it without mem alloc */
1092 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 1094 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
1095 spin_lock_irqsave(&xhci->lock, flags);
1093 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) 1096 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
1094 xhci_ring_cmd_db(xhci); 1097 xhci_ring_cmd_db(xhci);
1095 spin_unlock_irqrestore(&xhci->lock, flags); 1098 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1098,7 +1101,6 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
1098 udev->slot_id = xhci->slot_id; 1101 udev->slot_id = xhci->slot_id;
1099 /* Is this a LS or FS device under a HS hub? */ 1102 /* Is this a LS or FS device under a HS hub? */
1100 /* Hub or peripherial? */ 1103 /* Hub or peripherial? */
1101 spin_unlock_irqrestore(&xhci->lock, flags);
1102 return 1; 1104 return 1;
1103} 1105}
1104 1106
@@ -1125,7 +1127,6 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1125 return -EINVAL; 1127 return -EINVAL;
1126 } 1128 }
1127 1129
1128 spin_lock_irqsave(&xhci->lock, flags);
1129 virt_dev = xhci->devs[udev->slot_id]; 1130 virt_dev = xhci->devs[udev->slot_id];
1130 1131
1131 /* If this is a Set Address to an unconfigured device, setup ep 0 */ 1132 /* If this is a Set Address to an unconfigured device, setup ep 0 */
@@ -1133,6 +1134,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1133 xhci_setup_addressable_virt_dev(xhci, udev); 1134 xhci_setup_addressable_virt_dev(xhci, udev);
1134 /* Otherwise, assume the core has the device configured how it wants */ 1135 /* Otherwise, assume the core has the device configured how it wants */
1135 1136
1137 spin_lock_irqsave(&xhci->lock, flags);
1136 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma, 1138 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma,
1137 udev->slot_id); 1139 udev->slot_id);
1138 if (ret) { 1140 if (ret) {
@@ -1157,7 +1159,6 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1157 return -ETIME; 1159 return -ETIME;
1158 } 1160 }
1159 1161
1160 spin_lock_irqsave(&xhci->lock, flags);
1161 switch (virt_dev->cmd_status) { 1162 switch (virt_dev->cmd_status) {
1162 case COMP_CTX_STATE: 1163 case COMP_CTX_STATE:
1163 case COMP_EBADSLT: 1164 case COMP_EBADSLT:
@@ -1179,7 +1180,6 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1179 break; 1180 break;
1180 } 1181 }
1181 if (ret) { 1182 if (ret) {
1182 spin_unlock_irqrestore(&xhci->lock, flags);
1183 return ret; 1183 return ret;
1184 } 1184 }
1185 temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]); 1185 temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]);
@@ -1211,7 +1211,6 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1211 /* Mirror flags in the output context for future ep enable/disable */ 1211 /* Mirror flags in the output context for future ep enable/disable */
1212 virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG; 1212 virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
1213 virt_dev->out_ctx->drop_flags = 0; 1213 virt_dev->out_ctx->drop_flags = 0;
1214 spin_unlock_irqrestore(&xhci->lock, flags);
1215 1214
1216 xhci_dbg(xhci, "Device address = %d\n", udev->devnum); 1215 xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
1217 /* XXX Meh, not sure if anyone else but choose_address uses this. */ 1216 /* XXX Meh, not sure if anyone else but choose_address uses this. */