diff options
author | Ming Lei <tom.leiming@gmail.com> | 2008-12-12 08:38:45 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-01-07 13:00:08 -0500 |
commit | 49367d8f1d9f26482cf7089489e90f0afd0a942c (patch) | |
tree | e0b8beb733f49772f258f90ce7af856a10fbdaac /drivers/usb/core/urb.c | |
parent | 3b23dd6f8a718e5339de4f7d86ce76a078b5f771 (diff) |
USB: mark "reject" field of struct urb as atomic_t
It is enough to protect accesses to reject field of urb
by marking it as atomic_t,also it is the only reason of
existence of usb_reject_lock,so remove the lock to make
code more clean.
Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Acked-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/core/urb.c')
-rw-r--r-- | drivers/usb/core/urb.c | 23 |
1 files changed, 5 insertions, 18 deletions
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 1f68af9db3f..b5e9948698b 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c | |||
@@ -10,7 +10,6 @@ | |||
10 | 10 | ||
11 | #define to_urb(d) container_of(d, struct urb, kref) | 11 | #define to_urb(d) container_of(d, struct urb, kref) |
12 | 12 | ||
13 | static DEFINE_SPINLOCK(usb_reject_lock); | ||
14 | 13 | ||
15 | static void urb_destroy(struct kref *kref) | 14 | static void urb_destroy(struct kref *kref) |
16 | { | 15 | { |
@@ -131,9 +130,7 @@ void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor) | |||
131 | urb->anchor = anchor; | 130 | urb->anchor = anchor; |
132 | 131 | ||
133 | if (unlikely(anchor->poisoned)) { | 132 | if (unlikely(anchor->poisoned)) { |
134 | spin_lock(&usb_reject_lock); | 133 | atomic_inc(&urb->reject); |
135 | urb->reject++; | ||
136 | spin_unlock(&usb_reject_lock); | ||
137 | } | 134 | } |
138 | 135 | ||
139 | spin_unlock_irqrestore(&anchor->lock, flags); | 136 | spin_unlock_irqrestore(&anchor->lock, flags); |
@@ -565,16 +562,12 @@ void usb_kill_urb(struct urb *urb) | |||
565 | might_sleep(); | 562 | might_sleep(); |
566 | if (!(urb && urb->dev && urb->ep)) | 563 | if (!(urb && urb->dev && urb->ep)) |
567 | return; | 564 | return; |
568 | spin_lock_irq(&usb_reject_lock); | 565 | atomic_inc(&urb->reject); |
569 | ++urb->reject; | ||
570 | spin_unlock_irq(&usb_reject_lock); | ||
571 | 566 | ||
572 | usb_hcd_unlink_urb(urb, -ENOENT); | 567 | usb_hcd_unlink_urb(urb, -ENOENT); |
573 | wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); | 568 | wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); |
574 | 569 | ||
575 | spin_lock_irq(&usb_reject_lock); | 570 | atomic_dec(&urb->reject); |
576 | --urb->reject; | ||
577 | spin_unlock_irq(&usb_reject_lock); | ||
578 | } | 571 | } |
579 | EXPORT_SYMBOL_GPL(usb_kill_urb); | 572 | EXPORT_SYMBOL_GPL(usb_kill_urb); |
580 | 573 | ||
@@ -606,9 +599,7 @@ void usb_poison_urb(struct urb *urb) | |||
606 | might_sleep(); | 599 | might_sleep(); |
607 | if (!(urb && urb->dev && urb->ep)) | 600 | if (!(urb && urb->dev && urb->ep)) |
608 | return; | 601 | return; |
609 | spin_lock_irq(&usb_reject_lock); | 602 | atomic_inc(&urb->reject); |
610 | ++urb->reject; | ||
611 | spin_unlock_irq(&usb_reject_lock); | ||
612 | 603 | ||
613 | usb_hcd_unlink_urb(urb, -ENOENT); | 604 | usb_hcd_unlink_urb(urb, -ENOENT); |
614 | wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); | 605 | wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); |
@@ -617,14 +608,10 @@ EXPORT_SYMBOL_GPL(usb_poison_urb); | |||
617 | 608 | ||
618 | void usb_unpoison_urb(struct urb *urb) | 609 | void usb_unpoison_urb(struct urb *urb) |
619 | { | 610 | { |
620 | unsigned long flags; | ||
621 | |||
622 | if (!urb) | 611 | if (!urb) |
623 | return; | 612 | return; |
624 | 613 | ||
625 | spin_lock_irqsave(&usb_reject_lock, flags); | 614 | atomic_dec(&urb->reject); |
626 | --urb->reject; | ||
627 | spin_unlock_irqrestore(&usb_reject_lock, flags); | ||
628 | } | 615 | } |
629 | EXPORT_SYMBOL_GPL(usb_unpoison_urb); | 616 | EXPORT_SYMBOL_GPL(usb_unpoison_urb); |
630 | 617 | ||