diff options
author | Alan Stern <stern@rowland.harvard.edu> | 2007-08-08 11:48:02 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2007-10-12 17:55:10 -0400 |
commit | e9df41c5c5899259541dc928872cad4d07b82076 (patch) | |
tree | 12bb0917eeecbe62b2b5d3dc576806c7f2728550 /drivers/usb/core/hcd.c | |
parent | b0e396e3097ce4914c643bc3f0c2fe0098f551eb (diff) |
USB: make HCDs responsible for managing endpoint queues
This patch (as954) implements a suggestion of David Brownell's. Now
the host controller drivers are responsible for linking and unlinking
URBs to/from their endpoint queues. This eliminates the possiblity of
strange situations where usbcore thinks an URB is linked but the HCD
thinks it isn't. It also means HCDs no longer have to check for URBs
being dequeued before they were fully enqueued.
In addition to the core changes, this requires changing every host
controller driver and the root-hub URB handler. For the most part the
required changes are fairly small; drivers have to call
usb_hcd_link_urb_to_ep() in their urb_enqueue method,
usb_hcd_check_unlink_urb() in their urb_dequeue method, and
usb_hcd_unlink_urb_from_ep() before giving URBs back. A few HCDs make
matters more complicated by the way they split up the flow of control.
In addition some method interfaces get changed. The endpoint argument
for urb_enqueue is now redundant so it is removed. The unlink status
is required by usb_hcd_check_unlink_urb(), so it has been added to
urb_dequeue.
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
CC: David Brownell <david-b@pacbell.net>
CC: Olav Kongas <ok@artecdesign.ee>
CC: Tony Olech <tony.olech@elandigitalsystems.com>
CC: Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/core/hcd.c')
-rw-r--r-- | drivers/usb/core/hcd.c | 255 |
1 files changed, 131 insertions, 124 deletions
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index a5a46a55376..a853f63b925 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
@@ -356,11 +356,17 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb) | |||
356 | const u8 *bufp = tbuf; | 356 | const u8 *bufp = tbuf; |
357 | int len = 0; | 357 | int len = 0; |
358 | int patch_wakeup = 0; | 358 | int patch_wakeup = 0; |
359 | int status = 0; | 359 | int status; |
360 | int n; | 360 | int n; |
361 | 361 | ||
362 | might_sleep(); | 362 | might_sleep(); |
363 | 363 | ||
364 | spin_lock_irq(&hcd_root_hub_lock); | ||
365 | status = usb_hcd_link_urb_to_ep(hcd, urb); | ||
366 | spin_unlock_irq(&hcd_root_hub_lock); | ||
367 | if (status) | ||
368 | return status; | ||
369 | |||
364 | cmd = (struct usb_ctrlrequest *) urb->setup_packet; | 370 | cmd = (struct usb_ctrlrequest *) urb->setup_packet; |
365 | typeReq = (cmd->bRequestType << 8) | cmd->bRequest; | 371 | typeReq = (cmd->bRequestType << 8) | cmd->bRequest; |
366 | wValue = le16_to_cpu (cmd->wValue); | 372 | wValue = le16_to_cpu (cmd->wValue); |
@@ -525,10 +531,9 @@ error: | |||
525 | 531 | ||
526 | /* any errors get returned through the urb completion */ | 532 | /* any errors get returned through the urb completion */ |
527 | spin_lock_irq(&hcd_root_hub_lock); | 533 | spin_lock_irq(&hcd_root_hub_lock); |
528 | spin_lock(&urb->lock); | ||
529 | if (urb->status == -EINPROGRESS) | 534 | if (urb->status == -EINPROGRESS) |
530 | urb->status = status; | 535 | urb->status = status; |
531 | spin_unlock(&urb->lock); | 536 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
532 | 537 | ||
533 | /* This peculiar use of spinlocks echoes what real HC drivers do. | 538 | /* This peculiar use of spinlocks echoes what real HC drivers do. |
534 | * Avoiding calls to local_irq_disable/enable makes the code | 539 | * Avoiding calls to local_irq_disable/enable makes the code |
@@ -571,26 +576,21 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd) | |||
571 | spin_lock_irqsave(&hcd_root_hub_lock, flags); | 576 | spin_lock_irqsave(&hcd_root_hub_lock, flags); |
572 | urb = hcd->status_urb; | 577 | urb = hcd->status_urb; |
573 | if (urb) { | 578 | if (urb) { |
574 | spin_lock(&urb->lock); | 579 | hcd->poll_pending = 0; |
575 | if (urb->status == -EINPROGRESS) { | 580 | hcd->status_urb = NULL; |
576 | hcd->poll_pending = 0; | 581 | urb->status = 0; |
577 | hcd->status_urb = NULL; | 582 | urb->hcpriv = NULL; |
578 | urb->status = 0; | 583 | urb->actual_length = length; |
579 | urb->hcpriv = NULL; | 584 | memcpy(urb->transfer_buffer, buffer, length); |
580 | urb->actual_length = length; | ||
581 | memcpy(urb->transfer_buffer, buffer, length); | ||
582 | } else /* urb has been unlinked */ | ||
583 | length = 0; | ||
584 | spin_unlock(&urb->lock); | ||
585 | 585 | ||
586 | usb_hcd_unlink_urb_from_ep(hcd, urb); | ||
586 | spin_unlock(&hcd_root_hub_lock); | 587 | spin_unlock(&hcd_root_hub_lock); |
587 | usb_hcd_giveback_urb(hcd, urb); | 588 | usb_hcd_giveback_urb(hcd, urb); |
588 | spin_lock(&hcd_root_hub_lock); | 589 | spin_lock(&hcd_root_hub_lock); |
589 | } else | 590 | } else { |
590 | length = 0; | 591 | length = 0; |
591 | |||
592 | if (length <= 0) | ||
593 | hcd->poll_pending = 1; | 592 | hcd->poll_pending = 1; |
593 | } | ||
594 | spin_unlock_irqrestore(&hcd_root_hub_lock, flags); | 594 | spin_unlock_irqrestore(&hcd_root_hub_lock, flags); |
595 | } | 595 | } |
596 | 596 | ||
@@ -619,24 +619,26 @@ static int rh_queue_status (struct usb_hcd *hcd, struct urb *urb) | |||
619 | int len = 1 + (urb->dev->maxchild / 8); | 619 | int len = 1 + (urb->dev->maxchild / 8); |
620 | 620 | ||
621 | spin_lock_irqsave (&hcd_root_hub_lock, flags); | 621 | spin_lock_irqsave (&hcd_root_hub_lock, flags); |
622 | if (urb->status != -EINPROGRESS) /* already unlinked */ | 622 | if (hcd->status_urb || urb->transfer_buffer_length < len) { |
623 | retval = urb->status; | ||
624 | else if (hcd->status_urb || urb->transfer_buffer_length < len) { | ||
625 | dev_dbg (hcd->self.controller, "not queuing rh status urb\n"); | 623 | dev_dbg (hcd->self.controller, "not queuing rh status urb\n"); |
626 | retval = -EINVAL; | 624 | retval = -EINVAL; |
627 | } else { | 625 | goto done; |
628 | hcd->status_urb = urb; | 626 | } |
629 | urb->hcpriv = hcd; /* indicate it's queued */ | ||
630 | 627 | ||
631 | if (!hcd->uses_new_polling) | 628 | retval = usb_hcd_link_urb_to_ep(hcd, urb); |
632 | mod_timer (&hcd->rh_timer, | 629 | if (retval) |
633 | (jiffies/(HZ/4) + 1) * (HZ/4)); | 630 | goto done; |
634 | 631 | ||
635 | /* If a status change has already occurred, report it ASAP */ | 632 | hcd->status_urb = urb; |
636 | else if (hcd->poll_pending) | 633 | urb->hcpriv = hcd; /* indicate it's queued */ |
637 | mod_timer (&hcd->rh_timer, jiffies); | 634 | if (!hcd->uses_new_polling) |
638 | retval = 0; | 635 | mod_timer(&hcd->rh_timer, (jiffies/(HZ/4) + 1) * (HZ/4)); |
639 | } | 636 | |
637 | /* If a status change has already occurred, report it ASAP */ | ||
638 | else if (hcd->poll_pending) | ||
639 | mod_timer(&hcd->rh_timer, jiffies); | ||
640 | retval = 0; | ||
641 | done: | ||
640 | spin_unlock_irqrestore (&hcd_root_hub_lock, flags); | 642 | spin_unlock_irqrestore (&hcd_root_hub_lock, flags); |
641 | return retval; | 643 | return retval; |
642 | } | 644 | } |
@@ -655,11 +657,16 @@ static int rh_urb_enqueue (struct usb_hcd *hcd, struct urb *urb) | |||
655 | /* Unlinks of root-hub control URBs are legal, but they don't do anything | 657 | /* Unlinks of root-hub control URBs are legal, but they don't do anything |
656 | * since these URBs always execute synchronously. | 658 | * since these URBs always execute synchronously. |
657 | */ | 659 | */ |
658 | static int usb_rh_urb_dequeue (struct usb_hcd *hcd, struct urb *urb) | 660 | static int usb_rh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) |
659 | { | 661 | { |
660 | unsigned long flags; | 662 | unsigned long flags; |
663 | int rc; | ||
661 | 664 | ||
662 | spin_lock_irqsave(&hcd_root_hub_lock, flags); | 665 | spin_lock_irqsave(&hcd_root_hub_lock, flags); |
666 | rc = usb_hcd_check_unlink_urb(hcd, urb, status); | ||
667 | if (rc) | ||
668 | goto done; | ||
669 | |||
663 | if (usb_endpoint_num(&urb->ep->desc) == 0) { /* Control URB */ | 670 | if (usb_endpoint_num(&urb->ep->desc) == 0) { /* Control URB */ |
664 | ; /* Do nothing */ | 671 | ; /* Do nothing */ |
665 | 672 | ||
@@ -669,14 +676,16 @@ static int usb_rh_urb_dequeue (struct usb_hcd *hcd, struct urb *urb) | |||
669 | if (urb == hcd->status_urb) { | 676 | if (urb == hcd->status_urb) { |
670 | hcd->status_urb = NULL; | 677 | hcd->status_urb = NULL; |
671 | urb->hcpriv = NULL; | 678 | urb->hcpriv = NULL; |
679 | usb_hcd_unlink_urb_from_ep(hcd, urb); | ||
672 | 680 | ||
673 | spin_unlock(&hcd_root_hub_lock); | 681 | spin_unlock(&hcd_root_hub_lock); |
674 | usb_hcd_giveback_urb(hcd, urb); | 682 | usb_hcd_giveback_urb(hcd, urb); |
675 | spin_lock(&hcd_root_hub_lock); | 683 | spin_lock(&hcd_root_hub_lock); |
676 | } | 684 | } |
677 | } | 685 | } |
686 | done: | ||
678 | spin_unlock_irqrestore(&hcd_root_hub_lock, flags); | 687 | spin_unlock_irqrestore(&hcd_root_hub_lock, flags); |
679 | return 0; | 688 | return rc; |
680 | } | 689 | } |
681 | 690 | ||
682 | 691 | ||
@@ -977,12 +986,26 @@ EXPORT_SYMBOL (usb_calc_bus_time); | |||
977 | 986 | ||
978 | /*-------------------------------------------------------------------------*/ | 987 | /*-------------------------------------------------------------------------*/ |
979 | 988 | ||
980 | static int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb) | 989 | /** |
990 | * usb_hcd_link_urb_to_ep - add an URB to its endpoint queue | ||
991 | * @hcd: host controller to which @urb was submitted | ||
992 | * @urb: URB being submitted | ||
993 | * | ||
994 | * Host controller drivers should call this routine in their enqueue() | ||
995 | * method. The HCD's private spinlock must be held and interrupts must | ||
996 | * be disabled. The actions carried out here are required for URB | ||
997 | * submission, as well as for endpoint shutdown and for usb_kill_urb. | ||
998 | * | ||
999 | * Returns 0 for no error, otherwise a negative error code (in which case | ||
1000 | * the enqueue() method must fail). If no error occurs but enqueue() fails | ||
1001 | * anyway, it must call usb_hcd_unlink_urb_from_ep() before releasing | ||
1002 | * the private spinlock and returning. | ||
1003 | */ | ||
1004 | int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb) | ||
981 | { | 1005 | { |
982 | unsigned long flags; | ||
983 | int rc = 0; | 1006 | int rc = 0; |
984 | 1007 | ||
985 | spin_lock_irqsave(&hcd_urb_list_lock, flags); | 1008 | spin_lock(&hcd_urb_list_lock); |
986 | 1009 | ||
987 | /* Check that the URB isn't being killed */ | 1010 | /* Check that the URB isn't being killed */ |
988 | if (unlikely(urb->reject)) { | 1011 | if (unlikely(urb->reject)) { |
@@ -1009,48 +1032,48 @@ static int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb) | |||
1009 | goto done; | 1032 | goto done; |
1010 | } | 1033 | } |
1011 | done: | 1034 | done: |
1012 | spin_unlock_irqrestore(&hcd_urb_list_lock, flags); | 1035 | spin_unlock(&hcd_urb_list_lock); |
1013 | return rc; | 1036 | return rc; |
1014 | } | 1037 | } |
1038 | EXPORT_SYMBOL_GPL(usb_hcd_link_urb_to_ep); | ||
1015 | 1039 | ||
1016 | static int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb, | 1040 | /** |
1041 | * usb_hcd_check_unlink_urb - check whether an URB may be unlinked | ||
1042 | * @hcd: host controller to which @urb was submitted | ||
1043 | * @urb: URB being checked for unlinkability | ||
1044 | * @status: error code to store in @urb if the unlink succeeds | ||
1045 | * | ||
1046 | * Host controller drivers should call this routine in their dequeue() | ||
1047 | * method. The HCD's private spinlock must be held and interrupts must | ||
1048 | * be disabled. The actions carried out here are required for making | ||
1049 | * sure than an unlink is valid. | ||
1050 | * | ||
1051 | * Returns 0 for no error, otherwise a negative error code (in which case | ||
1052 | * the dequeue() method must fail). The possible error codes are: | ||
1053 | * | ||
1054 | * -EIDRM: @urb was not submitted or has already completed. | ||
1055 | * The completion function may not have been called yet. | ||
1056 | * | ||
1057 | * -EBUSY: @urb has already been unlinked. | ||
1058 | */ | ||
1059 | int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb, | ||
1017 | int status) | 1060 | int status) |
1018 | { | 1061 | { |
1019 | unsigned long flags; | ||
1020 | struct list_head *tmp; | 1062 | struct list_head *tmp; |
1021 | int rc = 0; | ||
1022 | |||
1023 | /* | ||
1024 | * we contend for urb->status with the hcd core, | ||
1025 | * which changes it while returning the urb. | ||
1026 | * | ||
1027 | * Caller guaranteed that the urb pointer hasn't been freed, and | ||
1028 | * that it was submitted. But as a rule it can't know whether or | ||
1029 | * not it's already been unlinked ... so we respect the reversed | ||
1030 | * lock sequence needed for the usb_hcd_giveback_urb() code paths | ||
1031 | * (urb lock, then hcd_urb_list_lock) in case some other CPU is now | ||
1032 | * unlinking it. | ||
1033 | */ | ||
1034 | spin_lock_irqsave(&urb->lock, flags); | ||
1035 | spin_lock(&hcd_urb_list_lock); | ||
1036 | 1063 | ||
1037 | /* insist the urb is still queued */ | 1064 | /* insist the urb is still queued */ |
1038 | list_for_each(tmp, &urb->ep->urb_list) { | 1065 | list_for_each(tmp, &urb->ep->urb_list) { |
1039 | if (tmp == &urb->urb_list) | 1066 | if (tmp == &urb->urb_list) |
1040 | break; | 1067 | break; |
1041 | } | 1068 | } |
1042 | if (tmp != &urb->urb_list) { | 1069 | if (tmp != &urb->urb_list) |
1043 | rc = -EIDRM; | 1070 | return -EIDRM; |
1044 | goto done; | ||
1045 | } | ||
1046 | 1071 | ||
1047 | /* Any status except -EINPROGRESS means something already started to | 1072 | /* Any status except -EINPROGRESS means something already started to |
1048 | * unlink this URB from the hardware. So there's no more work to do. | 1073 | * unlink this URB from the hardware. So there's no more work to do. |
1049 | */ | 1074 | */ |
1050 | if (urb->status != -EINPROGRESS) { | 1075 | if (urb->status != -EINPROGRESS) |
1051 | rc = -EBUSY; | 1076 | return -EBUSY; |
1052 | goto done; | ||
1053 | } | ||
1054 | urb->status = status; | 1077 | urb->status = status; |
1055 | 1078 | ||
1056 | /* IRQ setup can easily be broken so that USB controllers | 1079 | /* IRQ setup can easily be broken so that USB controllers |
@@ -1065,21 +1088,28 @@ static int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb, | |||
1065 | set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags); | 1088 | set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags); |
1066 | } | 1089 | } |
1067 | 1090 | ||
1068 | done: | 1091 | return 0; |
1069 | spin_unlock(&hcd_urb_list_lock); | ||
1070 | spin_unlock_irqrestore (&urb->lock, flags); | ||
1071 | return rc; | ||
1072 | } | 1092 | } |
1093 | EXPORT_SYMBOL_GPL(usb_hcd_check_unlink_urb); | ||
1073 | 1094 | ||
1074 | static void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb) | 1095 | /** |
1096 | * usb_hcd_unlink_urb_from_ep - remove an URB from its endpoint queue | ||
1097 | * @hcd: host controller to which @urb was submitted | ||
1098 | * @urb: URB being unlinked | ||
1099 | * | ||
1100 | * Host controller drivers should call this routine before calling | ||
1101 | * usb_hcd_giveback_urb(). The HCD's private spinlock must be held and | ||
1102 | * interrupts must be disabled. The actions carried out here are required | ||
1103 | * for URB completion. | ||
1104 | */ | ||
1105 | void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb) | ||
1075 | { | 1106 | { |
1076 | unsigned long flags; | ||
1077 | |||
1078 | /* clear all state linking urb to this dev (and hcd) */ | 1107 | /* clear all state linking urb to this dev (and hcd) */ |
1079 | spin_lock_irqsave(&hcd_urb_list_lock, flags); | 1108 | spin_lock(&hcd_urb_list_lock); |
1080 | list_del_init(&urb->urb_list); | 1109 | list_del_init(&urb->urb_list); |
1081 | spin_unlock_irqrestore(&hcd_urb_list_lock, flags); | 1110 | spin_unlock(&hcd_urb_list_lock); |
1082 | } | 1111 | } |
1112 | EXPORT_SYMBOL_GPL(usb_hcd_unlink_urb_from_ep); | ||
1083 | 1113 | ||
1084 | static void map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) | 1114 | static void map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) |
1085 | { | 1115 | { |
@@ -1153,20 +1183,15 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags) | |||
1153 | * URBs must be submitted in process context with interrupts | 1183 | * URBs must be submitted in process context with interrupts |
1154 | * enabled. | 1184 | * enabled. |
1155 | */ | 1185 | */ |
1156 | status = usb_hcd_link_urb_to_ep(hcd, urb); | 1186 | map_urb_for_dma(hcd, urb); |
1157 | if (!status) { | 1187 | if (is_root_hub(urb->dev)) |
1158 | map_urb_for_dma(hcd, urb); | 1188 | status = rh_urb_enqueue(hcd, urb); |
1159 | if (is_root_hub(urb->dev)) | 1189 | else |
1160 | status = rh_urb_enqueue(hcd, urb); | 1190 | status = hcd->driver->urb_enqueue(hcd, urb, mem_flags); |
1161 | else | ||
1162 | status = hcd->driver->urb_enqueue(hcd, urb->ep, urb, | ||
1163 | mem_flags); | ||
1164 | } | ||
1165 | 1191 | ||
1166 | if (unlikely(status)) { | 1192 | if (unlikely(status)) { |
1167 | usbmon_urb_submit_error(&hcd->self, urb, status); | 1193 | usbmon_urb_submit_error(&hcd->self, urb, status); |
1168 | unmap_urb_for_dma(hcd, urb); | 1194 | unmap_urb_for_dma(hcd, urb); |
1169 | usb_hcd_unlink_urb_from_ep(hcd, urb); | ||
1170 | INIT_LIST_HEAD(&urb->urb_list); | 1195 | INIT_LIST_HEAD(&urb->urb_list); |
1171 | atomic_dec(&urb->use_count); | 1196 | atomic_dec(&urb->use_count); |
1172 | if (urb->reject) | 1197 | if (urb->reject) |
@@ -1183,24 +1208,19 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags) | |||
1183 | * soon as practical. we've already set up the urb's return status, | 1208 | * soon as practical. we've already set up the urb's return status, |
1184 | * but we can't know if the callback completed already. | 1209 | * but we can't know if the callback completed already. |
1185 | */ | 1210 | */ |
1186 | static int | 1211 | static int unlink1(struct usb_hcd *hcd, struct urb *urb, int status) |
1187 | unlink1 (struct usb_hcd *hcd, struct urb *urb) | ||
1188 | { | 1212 | { |
1189 | int value; | 1213 | int value; |
1190 | 1214 | ||
1191 | if (is_root_hub(urb->dev)) | 1215 | if (is_root_hub(urb->dev)) |
1192 | value = usb_rh_urb_dequeue (hcd, urb); | 1216 | value = usb_rh_urb_dequeue(hcd, urb, status); |
1193 | else { | 1217 | else { |
1194 | 1218 | ||
1195 | /* The only reason an HCD might fail this call is if | 1219 | /* The only reason an HCD might fail this call is if |
1196 | * it has not yet fully queued the urb to begin with. | 1220 | * it has not yet fully queued the urb to begin with. |
1197 | * Such failures should be harmless. */ | 1221 | * Such failures should be harmless. */ |
1198 | value = hcd->driver->urb_dequeue (hcd, urb); | 1222 | value = hcd->driver->urb_dequeue(hcd, urb, status); |
1199 | } | 1223 | } |
1200 | |||
1201 | if (value != 0) | ||
1202 | dev_dbg (hcd->self.controller, "dequeue %p --> %d\n", | ||
1203 | urb, value); | ||
1204 | return value; | 1224 | return value; |
1205 | } | 1225 | } |
1206 | 1226 | ||
@@ -1216,14 +1236,11 @@ int usb_hcd_unlink_urb (struct urb *urb, int status) | |||
1216 | int retval; | 1236 | int retval; |
1217 | 1237 | ||
1218 | hcd = bus_to_hcd(urb->dev->bus); | 1238 | hcd = bus_to_hcd(urb->dev->bus); |
1219 | 1239 | retval = unlink1(hcd, urb, status); | |
1220 | retval = usb_hcd_check_unlink_urb(hcd, urb, status); | ||
1221 | if (!retval) | ||
1222 | retval = unlink1(hcd, urb); | ||
1223 | 1240 | ||
1224 | if (retval == 0) | 1241 | if (retval == 0) |
1225 | retval = -EINPROGRESS; | 1242 | retval = -EINPROGRESS; |
1226 | else if (retval != -EIDRM) | 1243 | else if (retval != -EIDRM && retval != -EBUSY) |
1227 | dev_dbg(&urb->dev->dev, "hcd_unlink_urb %p fail %d\n", | 1244 | dev_dbg(&urb->dev->dev, "hcd_unlink_urb %p fail %d\n", |
1228 | urb, retval); | 1245 | urb, retval); |
1229 | return retval; | 1246 | return retval; |
@@ -1245,7 +1262,6 @@ int usb_hcd_unlink_urb (struct urb *urb, int status) | |||
1245 | */ | 1262 | */ |
1246 | void usb_hcd_giveback_urb (struct usb_hcd *hcd, struct urb *urb) | 1263 | void usb_hcd_giveback_urb (struct usb_hcd *hcd, struct urb *urb) |
1247 | { | 1264 | { |
1248 | usb_hcd_unlink_urb_from_ep(hcd, urb); | ||
1249 | unmap_urb_for_dma(hcd, urb); | 1265 | unmap_urb_for_dma(hcd, urb); |
1250 | usbmon_urb_complete (&hcd->self, urb); | 1266 | usbmon_urb_complete (&hcd->self, urb); |
1251 | usb_unanchor_urb(urb); | 1267 | usb_unanchor_urb(urb); |
@@ -1282,7 +1298,6 @@ void usb_hcd_endpoint_disable (struct usb_device *udev, | |||
1282 | rescan: | 1298 | rescan: |
1283 | spin_lock_irq(&hcd_urb_list_lock); | 1299 | spin_lock_irq(&hcd_urb_list_lock); |
1284 | list_for_each_entry (urb, &ep->urb_list, urb_list) { | 1300 | list_for_each_entry (urb, &ep->urb_list, urb_list) { |
1285 | int tmp; | ||
1286 | int is_in; | 1301 | int is_in; |
1287 | 1302 | ||
1288 | /* the urb may already have been unlinked */ | 1303 | /* the urb may already have been unlinked */ |
@@ -1292,34 +1307,26 @@ rescan: | |||
1292 | is_in = usb_urb_dir_in(urb); | 1307 | is_in = usb_urb_dir_in(urb); |
1293 | spin_unlock(&hcd_urb_list_lock); | 1308 | spin_unlock(&hcd_urb_list_lock); |
1294 | 1309 | ||
1295 | spin_lock (&urb->lock); | 1310 | /* kick hcd */ |
1296 | tmp = urb->status; | 1311 | unlink1(hcd, urb, -ESHUTDOWN); |
1297 | if (tmp == -EINPROGRESS) | 1312 | dev_dbg (hcd->self.controller, |
1298 | urb->status = -ESHUTDOWN; | 1313 | "shutdown urb %p ep%d%s%s\n", |
1299 | spin_unlock (&urb->lock); | 1314 | urb, usb_endpoint_num(&ep->desc), |
1300 | 1315 | is_in ? "in" : "out", | |
1301 | /* kick hcd unless it's already returning this */ | 1316 | ({ char *s; |
1302 | if (tmp == -EINPROGRESS) { | 1317 | |
1303 | unlink1 (hcd, urb); | 1318 | switch (usb_endpoint_type(&ep->desc)) { |
1304 | dev_dbg (hcd->self.controller, | 1319 | case USB_ENDPOINT_XFER_CONTROL: |
1305 | "shutdown urb %p ep%d%s%s\n", | 1320 | s = ""; break; |
1306 | urb, usb_endpoint_num(&ep->desc), | 1321 | case USB_ENDPOINT_XFER_BULK: |
1307 | is_in ? "in" : "out", | 1322 | s = "-bulk"; break; |
1308 | ({ char *s; | 1323 | case USB_ENDPOINT_XFER_INT: |
1309 | 1324 | s = "-intr"; break; | |
1310 | switch (usb_endpoint_type(&ep->desc)) { | 1325 | default: |
1311 | case USB_ENDPOINT_XFER_CONTROL: | 1326 | s = "-iso"; break; |
1312 | s = ""; break; | 1327 | }; |
1313 | case USB_ENDPOINT_XFER_BULK: | 1328 | s; |
1314 | s = "-bulk"; break; | 1329 | })); |
1315 | case USB_ENDPOINT_XFER_INT: | ||
1316 | s = "-intr"; break; | ||
1317 | default: | ||
1318 | s = "-iso"; break; | ||
1319 | }; | ||
1320 | s; | ||
1321 | })); | ||
1322 | } | ||
1323 | usb_put_urb (urb); | 1330 | usb_put_urb (urb); |
1324 | 1331 | ||
1325 | /* list contents may have changed */ | 1332 | /* list contents may have changed */ |