aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/core
diff options
context:
space:
mode:
authorAlan Stern <stern@rowland.harvard.edu>2006-08-11 16:01:45 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2006-09-27 14:58:54 -0400
commit455b25fb209c8241e2163b491228b28667d82c1c (patch)
treec7155eb33fa8b2916d52472fb517165b3b32813c /drivers/usb/core
parentde06a3b842b31b31220637c869f112cfbc1a5ef6 (diff)
usbcore: make hcd_endpoint_disable wait for queue to drain
The inconsistent lock state problem in usbcore (the one that shows up when an HCD is unloaded) comes down to two inter-related problems: usb_rh_urb_dequeue() isn't set up to be called with interrupts disabled. hcd_endpoint_disable() doesn't wait for all URBs on the endpoint's queue to complete. The two problems are related because the one type of URB that isn't likely to be complete when hcd_endpoint_disable() returns is a root-hub URB. Right now usb_rh_urb_dequeue() waits for them to complete, and it assumes interrupts are enabled so it can wait. But hcd_endpoint_disable() calls it with interrupts disabled. Now, it should be legal to unlink root-hub URBs with interrupts disabled. The solution is to move the waiting into hcd_endpoint_disable(), where it belongs. This patch (as754) does that. It turns out to be completely safe to replace the del_timer_sync() with a simple del_timer(). It doesn't matter if the timer routine is running; hcd_root_hub_lock will synchronize the two threads and the status URB will complete with an unlink error, as it should. Signed-off-by: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/core')
-rw-r--r--drivers/usb/core/hcd.c65
1 files changed, 35 insertions, 30 deletions
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index dc9628c5893..ea20a3a5a9b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -633,31 +633,20 @@ static int rh_urb_enqueue (struct usb_hcd *hcd, struct urb *urb)
633 633
634/*-------------------------------------------------------------------------*/ 634/*-------------------------------------------------------------------------*/
635 635
636/* Asynchronous unlinks of root-hub control URBs are legal, but they 636/* Unlinks of root-hub control URBs are legal, but they don't do anything
637 * don't do anything. Status URB unlinks must be made in process context 637 * since these URBs always execute synchronously.
638 * with interrupts enabled.
639 */ 638 */
640static int usb_rh_urb_dequeue (struct usb_hcd *hcd, struct urb *urb) 639static int usb_rh_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
641{ 640{
642 if (usb_pipeendpoint(urb->pipe) == 0) { /* Control URB */ 641 unsigned long flags;
643 if (in_interrupt())
644 return 0; /* nothing to do */
645
646 spin_lock_irq(&urb->lock); /* from usb_kill_urb */
647 ++urb->reject;
648 spin_unlock_irq(&urb->lock);
649
650 wait_event(usb_kill_urb_queue,
651 atomic_read(&urb->use_count) == 0);
652 642
653 spin_lock_irq(&urb->lock); 643 if (usb_pipeendpoint(urb->pipe) == 0) { /* Control URB */
654 --urb->reject; 644 ; /* Do nothing */
655 spin_unlock_irq(&urb->lock);
656 645
657 } else { /* Status URB */ 646 } else { /* Status URB */
658 if (!hcd->uses_new_polling) 647 if (!hcd->uses_new_polling)
659 del_timer_sync (&hcd->rh_timer); 648 del_timer (&hcd->rh_timer);
660 local_irq_disable (); 649 local_irq_save (flags);
661 spin_lock (&hcd_root_hub_lock); 650 spin_lock (&hcd_root_hub_lock);
662 if (urb == hcd->status_urb) { 651 if (urb == hcd->status_urb) {
663 hcd->status_urb = NULL; 652 hcd->status_urb = NULL;
@@ -667,7 +656,7 @@ static int usb_rh_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
667 spin_unlock (&hcd_root_hub_lock); 656 spin_unlock (&hcd_root_hub_lock);
668 if (urb) 657 if (urb)
669 usb_hcd_giveback_urb (hcd, urb, NULL); 658 usb_hcd_giveback_urb (hcd, urb, NULL);
670 local_irq_enable (); 659 local_irq_restore (flags);
671 } 660 }
672 661
673 return 0; 662 return 0;
@@ -1355,7 +1344,8 @@ done:
1355/*-------------------------------------------------------------------------*/ 1344/*-------------------------------------------------------------------------*/
1356 1345
1357/* disables the endpoint: cancels any pending urbs, then synchronizes with 1346/* disables the endpoint: cancels any pending urbs, then synchronizes with
1358 * the hcd to make sure all endpoint state is gone from hardware. use for 1347 * the hcd to make sure all endpoint state is gone from hardware, and then
1348 * waits until the endpoint's queue is completely drained. use for
1359 * set_configuration, set_interface, driver removal, physical disconnect. 1349 * set_configuration, set_interface, driver removal, physical disconnect.
1360 * 1350 *
1361 * example: a qh stored in ep->hcpriv, holding state related to endpoint 1351 * example: a qh stored in ep->hcpriv, holding state related to endpoint
@@ -1374,22 +1364,13 @@ hcd_endpoint_disable (struct usb_device *udev, struct usb_host_endpoint *ep)
1374 1364
1375 local_irq_disable (); 1365 local_irq_disable ();
1376 1366
1377 /* FIXME move most of this into message.c as part of its
1378 * endpoint disable logic
1379 */
1380
1381 /* ep is already gone from udev->ep_{in,out}[]; no more submits */ 1367 /* ep is already gone from udev->ep_{in,out}[]; no more submits */
1382rescan: 1368rescan:
1383 spin_lock (&hcd_data_lock); 1369 spin_lock (&hcd_data_lock);
1384 list_for_each_entry (urb, &ep->urb_list, urb_list) { 1370 list_for_each_entry (urb, &ep->urb_list, urb_list) {
1385 int tmp; 1371 int tmp;
1386 1372
1387 /* another cpu may be in hcd, spinning on hcd_data_lock 1373 /* the urb may already have been unlinked */
1388 * to giveback() this urb. the races here should be
1389 * small, but a full fix needs a new "can't submit"
1390 * urb state.
1391 * FIXME urb->reject should allow that...
1392 */
1393 if (urb->status != -EINPROGRESS) 1374 if (urb->status != -EINPROGRESS)
1394 continue; 1375 continue;
1395 usb_get_urb (urb); 1376 usb_get_urb (urb);
@@ -1431,6 +1412,30 @@ rescan:
1431 might_sleep (); 1412 might_sleep ();
1432 if (hcd->driver->endpoint_disable) 1413 if (hcd->driver->endpoint_disable)
1433 hcd->driver->endpoint_disable (hcd, ep); 1414 hcd->driver->endpoint_disable (hcd, ep);
1415
1416 /* Wait until the endpoint queue is completely empty. Most HCDs
1417 * will have done this already in their endpoint_disable method,
1418 * but some might not. And there could be root-hub control URBs
1419 * still pending since they aren't affected by the HCDs'
1420 * endpoint_disable methods.
1421 */
1422 while (!list_empty (&ep->urb_list)) {
1423 spin_lock_irq (&hcd_data_lock);
1424
1425 /* The list may have changed while we acquired the spinlock */
1426 urb = NULL;
1427 if (!list_empty (&ep->urb_list)) {
1428 urb = list_entry (ep->urb_list.prev, struct urb,
1429 urb_list);
1430 usb_get_urb (urb);
1431 }
1432 spin_unlock_irq (&hcd_data_lock);
1433
1434 if (urb) {
1435 usb_kill_urb (urb);
1436 usb_put_urb (urb);
1437 }
1438 }
1434} 1439}
1435 1440
1436/*-------------------------------------------------------------------------*/ 1441/*-------------------------------------------------------------------------*/