aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAlan Stern <stern@rowland.harvard.edu>2007-08-02 15:06:54 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2007-10-12 17:55:02 -0400
commit9a9bf406df3ce238089d14f4cb41157ca56d8ad3 (patch)
treebc4aff46d139d6f0f8cc852744fe2fe991ea53c8 /drivers
parent9439eb94b5c374d5b02699f8897fc43aa3603701 (diff)
USB: separate out endpoint queue management and DMA mapping routines
This patch (as953) separates out three key portions from usb_hcd_submit_urb(), usb_hcd_unlink_urb(), and usb_hcd_giveback_urb() and puts them in separate functions of their own. In the next patch, these functions will be called directly by host controller drivers while holding their private spinlocks, which will remove the possibility of some unpleasant races. The code responsible for mapping and unmapping DMA buffers is also placed into a couple of separate subroutines, for the sake of cleanliness and consistency. Signed-off-by: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/usb/core/hcd.c316
1 files changed, 166 insertions, 150 deletions
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index f8e7deb03ee9..eb2121788264 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -914,99 +914,117 @@ EXPORT_SYMBOL (usb_calc_bus_time);
914 914
915/*-------------------------------------------------------------------------*/ 915/*-------------------------------------------------------------------------*/
916 916
917static void urb_unlink(struct usb_hcd *hcd, struct urb *urb) 917static int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb)
918{ 918{
919 unsigned long flags; 919 unsigned long flags;
920 int rc = 0;
920 921
921 /* clear all state linking urb to this dev (and hcd) */
922 spin_lock_irqsave(&hcd_urb_list_lock, flags); 922 spin_lock_irqsave(&hcd_urb_list_lock, flags);
923 list_del_init (&urb->urb_list);
924 spin_unlock_irqrestore(&hcd_urb_list_lock, flags);
925 923
926 if (hcd->self.uses_dma && !is_root_hub(urb->dev)) { 924 /* Check that the URB isn't being killed */
927 if (usb_endpoint_xfer_control(&urb->ep->desc) 925 if (unlikely(urb->reject)) {
928 && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) 926 rc = -EPERM;
929 dma_unmap_single (hcd->self.controller, urb->setup_dma, 927 goto done;
930 sizeof (struct usb_ctrlrequest),
931 DMA_TO_DEVICE);
932 if (urb->transfer_buffer_length != 0
933 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP))
934 dma_unmap_single (hcd->self.controller,
935 urb->transfer_dma,
936 urb->transfer_buffer_length,
937 usb_urb_dir_in(urb)
938 ? DMA_FROM_DEVICE
939 : DMA_TO_DEVICE);
940 } 928 }
941}
942
943/* may be called in any context with a valid urb->dev usecount
944 * caller surrenders "ownership" of urb
945 * expects usb_submit_urb() to have sanity checked and conditioned all
946 * inputs in the urb
947 */
948int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
949{
950 int status;
951 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
952 unsigned long flags;
953
954 if (!hcd)
955 return -ENODEV;
956 929
957 usbmon_urb_submit(&hcd->self, urb); 930 if (unlikely(!urb->ep->enabled)) {
931 rc = -ENOENT;
932 goto done;
933 }
958 934
959 /* 935 /*
960 * Atomically queue the urb, first to our records, then to the HCD. 936 * Check the host controller's state and add the URB to the
961 * Access to urb->status is controlled by urb->lock ... changes on 937 * endpoint's queue.
962 * i/o completion (normal or fault) or unlinking.
963 */ 938 */
964 939 switch (hcd->state) {
965 // FIXME: verify that quiescing hc works right (RH cleans up)
966
967 spin_lock_irqsave(&hcd_urb_list_lock, flags);
968 if (unlikely(!urb->ep->enabled))
969 status = -ENOENT;
970 else if (unlikely (urb->reject))
971 status = -EPERM;
972 else switch (hcd->state) {
973 case HC_STATE_RUNNING: 940 case HC_STATE_RUNNING:
974 case HC_STATE_RESUMING: 941 case HC_STATE_RESUMING:
975 list_add_tail (&urb->urb_list, &urb->ep->urb_list); 942 list_add_tail(&urb->urb_list, &urb->ep->urb_list);
976 status = 0;
977 break; 943 break;
978 default: 944 default:
979 status = -ESHUTDOWN; 945 rc = -ESHUTDOWN;
980 break; 946 goto done;
981 } 947 }
948 done:
982 spin_unlock_irqrestore(&hcd_urb_list_lock, flags); 949 spin_unlock_irqrestore(&hcd_urb_list_lock, flags);
983 if (status) { 950 return rc;
984 INIT_LIST_HEAD (&urb->urb_list); 951}
985 usbmon_urb_submit_error(&hcd->self, urb, status); 952
986 return status; 953static int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
954 int status)
955{
956 unsigned long flags;
957 struct list_head *tmp;
958 int rc = 0;
959
960 /*
961 * we contend for urb->status with the hcd core,
962 * which changes it while returning the urb.
963 *
964 * Caller guaranteed that the urb pointer hasn't been freed, and
965 * that it was submitted. But as a rule it can't know whether or
966 * not it's already been unlinked ... so we respect the reversed
967 * lock sequence needed for the usb_hcd_giveback_urb() code paths
968 * (urb lock, then hcd_urb_list_lock) in case some other CPU is now
969 * unlinking it.
970 */
971 spin_lock_irqsave(&urb->lock, flags);
972 spin_lock(&hcd_urb_list_lock);
973
974 /* insist the urb is still queued */
975 list_for_each(tmp, &urb->ep->urb_list) {
976 if (tmp == &urb->urb_list)
977 break;
978 }
979 if (tmp != &urb->urb_list) {
980 rc = -EIDRM;
981 goto done;
987 } 982 }
988 983
989 /* increment urb's reference count as part of giving it to the HCD 984 /* Any status except -EINPROGRESS means something already started to
990 * (which now controls it). HCD guarantees that it either returns 985 * unlink this URB from the hardware. So there's no more work to do.
991 * an error or calls giveback(), but not both.
992 */ 986 */
993 urb = usb_get_urb (urb); 987 if (urb->status != -EINPROGRESS) {
994 atomic_inc (&urb->use_count); 988 rc = -EBUSY;
995
996 if (is_root_hub(urb->dev)) {
997 /* NOTE: requirement on hub callers (usbfs and the hub
998 * driver, for now) that URBs' urb->transfer_buffer be
999 * valid and usb_buffer_{sync,unmap}() not be needed, since
1000 * they could clobber root hub response data.
1001 */
1002 status = rh_urb_enqueue (hcd, urb);
1003 goto done; 989 goto done;
1004 } 990 }
991 urb->status = status;
992
993 /* IRQ setup can easily be broken so that USB controllers
994 * never get completion IRQs ... maybe even the ones we need to
995 * finish unlinking the initial failed usb_set_address()
996 * or device descriptor fetch.
997 */
998 if (!test_bit(HCD_FLAG_SAW_IRQ, &hcd->flags) &&
999 !is_root_hub(urb->dev)) {
1000 dev_warn(hcd->self.controller, "Unlink after no-IRQ? "
1001 "Controller is probably using the wrong IRQ.\n");
1002 set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
1003 }
1004
1005 done:
1006 spin_unlock(&hcd_urb_list_lock);
1007 spin_unlock_irqrestore (&urb->lock, flags);
1008 return rc;
1009}
1010
1011static void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb)
1012{
1013 unsigned long flags;
1005 1014
1006 /* lower level hcd code should use *_dma exclusively, 1015 /* clear all state linking urb to this dev (and hcd) */
1016 spin_lock_irqsave(&hcd_urb_list_lock, flags);
1017 list_del_init(&urb->urb_list);
1018 spin_unlock_irqrestore(&hcd_urb_list_lock, flags);
1019}
1020
1021static void map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1022{
1023 /* Map the URB's buffers for DMA access.
1024 * Lower level HCD code should use *_dma exclusively,
1007 * unless it uses pio or talks to another transport. 1025 * unless it uses pio or talks to another transport.
1008 */ 1026 */
1009 if (hcd->self.uses_dma) { 1027 if (hcd->self.uses_dma && !is_root_hub(urb->dev)) {
1010 if (usb_endpoint_xfer_control(&urb->ep->desc) 1028 if (usb_endpoint_xfer_control(&urb->ep->desc)
1011 && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) 1029 && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP))
1012 urb->setup_dma = dma_map_single ( 1030 urb->setup_dma = dma_map_single (
@@ -1024,16 +1042,73 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
1024 ? DMA_FROM_DEVICE 1042 ? DMA_FROM_DEVICE
1025 : DMA_TO_DEVICE); 1043 : DMA_TO_DEVICE);
1026 } 1044 }
1045}
1027 1046
1028 status = hcd->driver->urb_enqueue (hcd, urb->ep, urb, mem_flags); 1047static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1029done: 1048{
1030 if (unlikely (status)) { 1049 if (hcd->self.uses_dma && !is_root_hub(urb->dev)) {
1031 urb_unlink(hcd, urb); 1050 if (usb_endpoint_xfer_control(&urb->ep->desc)
1032 atomic_dec (&urb->use_count); 1051 && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP))
1033 if (urb->reject) 1052 dma_unmap_single(hcd->self.controller, urb->setup_dma,
1034 wake_up (&usb_kill_urb_queue); 1053 sizeof(struct usb_ctrlrequest),
1054 DMA_TO_DEVICE);
1055 if (urb->transfer_buffer_length != 0
1056 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP))
1057 dma_unmap_single(hcd->self.controller,
1058 urb->transfer_dma,
1059 urb->transfer_buffer_length,
1060 usb_urb_dir_in(urb)
1061 ? DMA_FROM_DEVICE
1062 : DMA_TO_DEVICE);
1063 }
1064}
1065
1066/*-------------------------------------------------------------------------*/
1067
1068/* may be called in any context with a valid urb->dev usecount
1069 * caller surrenders "ownership" of urb
1070 * expects usb_submit_urb() to have sanity checked and conditioned all
1071 * inputs in the urb
1072 */
1073int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
1074{
1075 int status;
1076 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
1077
1078 /* increment urb's reference count as part of giving it to the HCD
1079 * (which will control it). HCD guarantees that it either returns
1080 * an error or calls giveback(), but not both.
1081 */
1082 usb_get_urb(urb);
1083 atomic_inc(&urb->use_count);
1084 usbmon_urb_submit(&hcd->self, urb);
1085
1086 /* NOTE requirements on root-hub callers (usbfs and the hub
1087 * driver, for now): URBs' urb->transfer_buffer must be
1088 * valid and usb_buffer_{sync,unmap}() not be needed, since
1089 * they could clobber root hub response data. Also, control
1090 * URBs must be submitted in process context with interrupts
1091 * enabled.
1092 */
1093 status = usb_hcd_link_urb_to_ep(hcd, urb);
1094 if (!status) {
1095 map_urb_for_dma(hcd, urb);
1096 if (is_root_hub(urb->dev))
1097 status = rh_urb_enqueue(hcd, urb);
1098 else
1099 status = hcd->driver->urb_enqueue(hcd, urb->ep, urb,
1100 mem_flags);
1101 }
1102
1103 if (unlikely(status)) {
1035 usbmon_urb_submit_error(&hcd->self, urb, status); 1104 usbmon_urb_submit_error(&hcd->self, urb, status);
1036 usb_put_urb (urb); 1105 unmap_urb_for_dma(hcd, urb);
1106 usb_hcd_unlink_urb_from_ep(hcd, urb);
1107 INIT_LIST_HEAD(&urb->urb_list);
1108 atomic_dec(&urb->use_count);
1109 if (urb->reject)
1110 wake_up(&usb_kill_urb_queue);
1111 usb_put_urb(urb);
1037 } 1112 }
1038 return status; 1113 return status;
1039} 1114}
@@ -1074,78 +1149,20 @@ unlink1 (struct usb_hcd *hcd, struct urb *urb)
1074 */ 1149 */
1075int usb_hcd_unlink_urb (struct urb *urb, int status) 1150int usb_hcd_unlink_urb (struct urb *urb, int status)
1076{ 1151{
1077 struct usb_hcd *hcd = NULL; 1152 struct usb_hcd *hcd;
1078 struct device *sys = NULL; 1153 int retval;
1079 unsigned long flags;
1080 struct list_head *tmp;
1081 int retval;
1082
1083 /*
1084 * we contend for urb->status with the hcd core,
1085 * which changes it while returning the urb.
1086 *
1087 * Caller guaranteed that the urb pointer hasn't been freed, and
1088 * that it was submitted. But as a rule it can't know whether or
1089 * not it's already been unlinked ... so we respect the reversed
1090 * lock sequence needed for the usb_hcd_giveback_urb() code paths
1091 * (urb lock, then hcd_urb_list_lock) in case some other CPU is now
1092 * unlinking it.
1093 */
1094 spin_lock_irqsave (&urb->lock, flags);
1095 spin_lock(&hcd_urb_list_lock);
1096 1154
1097 sys = &urb->dev->dev;
1098 hcd = bus_to_hcd(urb->dev->bus); 1155 hcd = bus_to_hcd(urb->dev->bus);
1099 if (hcd == NULL) {
1100 retval = -ENODEV;
1101 goto done;
1102 }
1103
1104 /* insist the urb is still queued */
1105 list_for_each(tmp, &urb->ep->urb_list) {
1106 if (tmp == &urb->urb_list)
1107 break;
1108 }
1109 if (tmp != &urb->urb_list) {
1110 retval = -EIDRM;
1111 goto done;
1112 }
1113 1156
1114 /* Any status except -EINPROGRESS means something already started to 1157 retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1115 * unlink this URB from the hardware. So there's no more work to do. 1158 if (!retval)
1116 */ 1159 retval = unlink1(hcd, urb);
1117 if (urb->status != -EINPROGRESS) {
1118 retval = -EBUSY;
1119 goto done;
1120 }
1121 1160
1122 /* IRQ setup can easily be broken so that USB controllers
1123 * never get completion IRQs ... maybe even the ones we need to
1124 * finish unlinking the initial failed usb_set_address()
1125 * or device descriptor fetch.
1126 */
1127 if (!test_bit(HCD_FLAG_SAW_IRQ, &hcd->flags) &&
1128 !is_root_hub(urb->dev)) {
1129 dev_warn (hcd->self.controller, "Unlink after no-IRQ? "
1130 "Controller is probably using the wrong IRQ.\n");
1131 set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
1132 }
1133
1134 urb->status = status;
1135
1136 spin_unlock(&hcd_urb_list_lock);
1137 spin_unlock_irqrestore (&urb->lock, flags);
1138
1139 retval = unlink1 (hcd, urb);
1140 if (retval == 0) 1161 if (retval == 0)
1141 retval = -EINPROGRESS; 1162 retval = -EINPROGRESS;
1142 return retval; 1163 else if (retval != -EIDRM)
1143 1164 dev_dbg(&urb->dev->dev, "hcd_unlink_urb %p fail %d\n",
1144done: 1165 urb, retval);
1145 spin_unlock(&hcd_urb_list_lock);
1146 spin_unlock_irqrestore (&urb->lock, flags);
1147 if (retval != -EIDRM && sys && sys->driver)
1148 dev_dbg (sys, "hcd_unlink_urb %p fail %d\n", urb, retval);
1149 return retval; 1166 return retval;
1150} 1167}
1151 1168
@@ -1165,7 +1182,8 @@ done:
1165 */ 1182 */
1166void usb_hcd_giveback_urb (struct usb_hcd *hcd, struct urb *urb) 1183void usb_hcd_giveback_urb (struct usb_hcd *hcd, struct urb *urb)
1167{ 1184{
1168 urb_unlink(hcd, urb); 1185 usb_hcd_unlink_urb_from_ep(hcd, urb);
1186 unmap_urb_for_dma(hcd, urb);
1169 usbmon_urb_complete (&hcd->self, urb); 1187 usbmon_urb_complete (&hcd->self, urb);
1170 usb_unanchor_urb(urb); 1188 usb_unanchor_urb(urb);
1171 1189
@@ -1194,12 +1212,12 @@ void usb_hcd_endpoint_disable (struct usb_device *udev,
1194 struct usb_hcd *hcd; 1212 struct usb_hcd *hcd;
1195 struct urb *urb; 1213 struct urb *urb;
1196 1214
1215 might_sleep();
1197 hcd = bus_to_hcd(udev->bus); 1216 hcd = bus_to_hcd(udev->bus);
1198 local_irq_disable ();
1199 1217
1200 /* ep is already gone from udev->ep_{in,out}[]; no more submits */ 1218 /* ep is already gone from udev->ep_{in,out}[]; no more submits */
1201rescan: 1219rescan:
1202 spin_lock(&hcd_urb_list_lock); 1220 spin_lock_irq(&hcd_urb_list_lock);
1203 list_for_each_entry (urb, &ep->urb_list, urb_list) { 1221 list_for_each_entry (urb, &ep->urb_list, urb_list) {
1204 int tmp; 1222 int tmp;
1205 int is_in; 1223 int is_in;
@@ -1244,13 +1262,11 @@ rescan:
1244 /* list contents may have changed */ 1262 /* list contents may have changed */
1245 goto rescan; 1263 goto rescan;
1246 } 1264 }
1247 spin_unlock(&hcd_urb_list_lock); 1265 spin_unlock_irq(&hcd_urb_list_lock);
1248 local_irq_enable ();
1249 1266
1250 /* synchronize with the hardware, so old configuration state 1267 /* synchronize with the hardware, so old configuration state
1251 * clears out immediately (and will be freed). 1268 * clears out immediately (and will be freed).
1252 */ 1269 */
1253 might_sleep ();
1254 if (hcd->driver->endpoint_disable) 1270 if (hcd->driver->endpoint_disable)
1255 hcd->driver->endpoint_disable (hcd, ep); 1271 hcd->driver->endpoint_disable (hcd, ep);
1256 1272