aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/core
diff options
context:
space:
mode:
authorOliver Neukum <oliver@neukum.org>2008-07-29 10:18:47 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2008-10-17 17:40:51 -0400
commit6a2839bedc1502b3f0366cc3ad1099a1d92cf8fb (patch)
tree3b8861acf1a573d97066157c1fab16e574d645b1 /drivers/usb/core
parent55b447bf79ad25591437d24b78caa9d0ae4fec82 (diff)
USB: extend poisoning to anchors
this extends the poisoning concept to anchors. This way poisoning will work with fire and forget drivers. Signed-off-by: Oliver Neukum <oneukum@suse.de> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/core')
-rw-r--r--drivers/usb/core/urb.c57
1 files changed, 48 insertions, 9 deletions
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index a7945ab208c2..eebc070c3cc7 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -10,6 +10,8 @@
10 10
11#define to_urb(d) container_of(d, struct urb, kref) 11#define to_urb(d) container_of(d, struct urb, kref)
12 12
13static DEFINE_SPINLOCK(usb_reject_lock);
14
13static void urb_destroy(struct kref *kref) 15static void urb_destroy(struct kref *kref)
14{ 16{
15 struct urb *urb = to_urb(kref); 17 struct urb *urb = to_urb(kref);
@@ -127,6 +129,13 @@ void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
127 usb_get_urb(urb); 129 usb_get_urb(urb);
128 list_add_tail(&urb->anchor_list, &anchor->urb_list); 130 list_add_tail(&urb->anchor_list, &anchor->urb_list);
129 urb->anchor = anchor; 131 urb->anchor = anchor;
132
133 if (unlikely(anchor->poisoned)) {
134 spin_lock(&usb_reject_lock);
135 urb->reject++;
136 spin_unlock(&usb_reject_lock);
137 }
138
130 spin_unlock_irqrestore(&anchor->lock, flags); 139 spin_unlock_irqrestore(&anchor->lock, flags);
131} 140}
132EXPORT_SYMBOL_GPL(usb_anchor_urb); 141EXPORT_SYMBOL_GPL(usb_anchor_urb);
@@ -522,7 +531,6 @@ int usb_unlink_urb(struct urb *urb)
522} 531}
523EXPORT_SYMBOL_GPL(usb_unlink_urb); 532EXPORT_SYMBOL_GPL(usb_unlink_urb);
524 533
525static DEFINE_MUTEX(usb_reject_mutex);
526/** 534/**
527 * usb_kill_urb - cancel a transfer request and wait for it to finish 535 * usb_kill_urb - cancel a transfer request and wait for it to finish
528 * @urb: pointer to URB describing a previously submitted request, 536 * @urb: pointer to URB describing a previously submitted request,
@@ -548,16 +556,16 @@ void usb_kill_urb(struct urb *urb)
548 might_sleep(); 556 might_sleep();
549 if (!(urb && urb->dev && urb->ep)) 557 if (!(urb && urb->dev && urb->ep))
550 return; 558 return;
551 mutex_lock(&usb_reject_mutex); 559 spin_lock_irq(&usb_reject_lock);
552 ++urb->reject; 560 ++urb->reject;
553 mutex_unlock(&usb_reject_mutex); 561 spin_unlock_irq(&usb_reject_lock);
554 562
555 usb_hcd_unlink_urb(urb, -ENOENT); 563 usb_hcd_unlink_urb(urb, -ENOENT);
556 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); 564 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
557 565
558 mutex_lock(&usb_reject_mutex); 566 spin_lock_irq(&usb_reject_lock);
559 --urb->reject; 567 --urb->reject;
560 mutex_unlock(&usb_reject_mutex); 568 spin_unlock_irq(&usb_reject_lock);
561} 569}
562EXPORT_SYMBOL_GPL(usb_kill_urb); 570EXPORT_SYMBOL_GPL(usb_kill_urb);
563 571
@@ -586,9 +594,9 @@ void usb_poison_urb(struct urb *urb)
586 might_sleep(); 594 might_sleep();
587 if (!(urb && urb->dev && urb->ep)) 595 if (!(urb && urb->dev && urb->ep))
588 return; 596 return;
589 mutex_lock(&usb_reject_mutex); 597 spin_lock_irq(&usb_reject_lock);
590 ++urb->reject; 598 ++urb->reject;
591 mutex_unlock(&usb_reject_mutex); 599 spin_unlock_irq(&usb_reject_lock);
592 600
593 usb_hcd_unlink_urb(urb, -ENOENT); 601 usb_hcd_unlink_urb(urb, -ENOENT);
594 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); 602 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
@@ -597,12 +605,14 @@ EXPORT_SYMBOL_GPL(usb_poison_urb);
597 605
598void usb_unpoison_urb(struct urb *urb) 606void usb_unpoison_urb(struct urb *urb)
599{ 607{
608 unsigned long flags;
609
600 if (!urb) 610 if (!urb)
601 return; 611 return;
602 612
603 mutex_lock(&usb_reject_mutex); 613 spin_lock_irqsave(&usb_reject_lock, flags);
604 --urb->reject; 614 --urb->reject;
605 mutex_unlock(&usb_reject_mutex); 615 spin_unlock_irqrestore(&usb_reject_lock, flags);
606} 616}
607EXPORT_SYMBOL_GPL(usb_unpoison_urb); 617EXPORT_SYMBOL_GPL(usb_unpoison_urb);
608 618
@@ -633,6 +643,35 @@ void usb_kill_anchored_urbs(struct usb_anchor *anchor)
633} 643}
634EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); 644EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
635 645
646
647/**
648 * usb_poison_anchored_urbs - cease all traffic from an anchor
649 * @anchor: anchor the requests are bound to
650 *
651 * this allows all outstanding URBs to be poisoned starting
652 * from the back of the queue. Newly added URBs will also be
653 * poisoned
654 */
655void usb_poison_anchored_urbs(struct usb_anchor *anchor)
656{
657 struct urb *victim;
658
659 spin_lock_irq(&anchor->lock);
660 anchor->poisoned = 1;
661 while (!list_empty(&anchor->urb_list)) {
662 victim = list_entry(anchor->urb_list.prev, struct urb,
663 anchor_list);
664 /* we must make sure the URB isn't freed before we kill it*/
665 usb_get_urb(victim);
666 spin_unlock_irq(&anchor->lock);
667 /* this will unanchor the URB */
668 usb_poison_urb(victim);
669 usb_put_urb(victim);
670 spin_lock_irq(&anchor->lock);
671 }
672 spin_unlock_irq(&anchor->lock);
673}
674EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
636/** 675/**
637 * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse 676 * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse
638 * @anchor: anchor the requests are bound to 677 * @anchor: anchor the requests are bound to