diff options
author | Alan Stern <stern@rowland.harvard.edu> | 2006-05-19 16:39:52 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2006-06-21 18:04:12 -0400 |
commit | 10b8e47d6b32bfba22874354c62770cb4e42aa6c (patch) | |
tree | 5477d441bd4e01c4e3a4a9c2ef7b4e677b49d157 /drivers/usb/host | |
parent | c433472658b4df11bd3590a59be79194a1ff43ae (diff) |
[PATCH] UHCI: fix race in ISO dequeuing
This patch (as688) fixes a small race in uhci-hcd. Because ISO queues
aren't controlled by queue headers, they can't be unlinked. Only
individual URBs can. So whenever multiple ISO URBs are dequeued, it's
necessary to make sure the hardware is done with each one. We can't
assume that dequeuing the first URB will suffice to unlink the entire
queue.
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host')
-rw-r--r-- | drivers/usb/host/uhci-q.c | 48 |
1 files changed, 36 insertions, 12 deletions
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index 76b0a9e95a7a..96ce4c87c871 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c | |||
@@ -194,7 +194,6 @@ static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb) | |||
194 | 194 | ||
195 | list_for_each_entry(td, &urbp->td_list, list) | 195 | list_for_each_entry(td, &urbp->td_list, list) |
196 | uhci_remove_td_from_frame_list(uhci, td); | 196 | uhci_remove_td_from_frame_list(uhci, td); |
197 | wmb(); | ||
198 | } | 197 | } |
199 | 198 | ||
200 | static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, | 199 | static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, |
@@ -253,17 +252,25 @@ static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | |||
253 | * When a queue is stopped and a dequeued URB is given back, adjust | 252 | * When a queue is stopped and a dequeued URB is given back, adjust |
254 | * the previous TD link (if the URB isn't first on the queue) or | 253 | * the previous TD link (if the URB isn't first on the queue) or |
255 | * save its toggle value (if it is first and is currently executing). | 254 | * save its toggle value (if it is first and is currently executing). |
255 | * | ||
256 | * Returns 0 if the URB should not yet be given back, 1 otherwise. | ||
256 | */ | 257 | */ |
257 | static void uhci_cleanup_queue(struct uhci_qh *qh, | 258 | static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh, |
258 | struct urb *urb) | 259 | struct urb *urb) |
259 | { | 260 | { |
260 | struct urb_priv *urbp = urb->hcpriv; | 261 | struct urb_priv *urbp = urb->hcpriv; |
261 | struct uhci_td *td; | 262 | struct uhci_td *td; |
263 | int ret = 1; | ||
262 | 264 | ||
263 | /* Isochronous pipes don't use toggles and their TD link pointers | 265 | /* Isochronous pipes don't use toggles and their TD link pointers |
264 | * get adjusted during uhci_urb_dequeue(). */ | 266 | * get adjusted during uhci_urb_dequeue(). But since their queues |
265 | if (qh->type == USB_ENDPOINT_XFER_ISOC) | 267 | * cannot truly be stopped, we have to watch out for dequeues |
266 | return; | 268 | * occurring after the nominal unlink frame. */ |
269 | if (qh->type == USB_ENDPOINT_XFER_ISOC) { | ||
270 | ret = (uhci->frame_number + uhci->is_stopped != | ||
271 | qh->unlink_frame); | ||
272 | return ret; | ||
273 | } | ||
267 | 274 | ||
268 | /* If the URB isn't first on its queue, adjust the link pointer | 275 | /* If the URB isn't first on its queue, adjust the link pointer |
269 | * of the last TD in the previous URB. The toggle doesn't need | 276 | * of the last TD in the previous URB. The toggle doesn't need |
@@ -279,24 +286,25 @@ static void uhci_cleanup_queue(struct uhci_qh *qh, | |||
279 | td = list_entry(urbp->td_list.prev, struct uhci_td, | 286 | td = list_entry(urbp->td_list.prev, struct uhci_td, |
280 | list); | 287 | list); |
281 | ptd->link = td->link; | 288 | ptd->link = td->link; |
282 | return; | 289 | return ret; |
283 | } | 290 | } |
284 | 291 | ||
285 | /* If the QH element pointer is UHCI_PTR_TERM then then currently | 292 | /* If the QH element pointer is UHCI_PTR_TERM then then currently |
286 | * executing URB has already been unlinked, so this one isn't it. */ | 293 | * executing URB has already been unlinked, so this one isn't it. */ |
287 | if (qh_element(qh) == UHCI_PTR_TERM) | 294 | if (qh_element(qh) == UHCI_PTR_TERM) |
288 | return; | 295 | return ret; |
289 | qh->element = UHCI_PTR_TERM; | 296 | qh->element = UHCI_PTR_TERM; |
290 | 297 | ||
291 | /* Control pipes have to worry about toggles */ | 298 | /* Control pipes have to worry about toggles */ |
292 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) | 299 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) |
293 | return; | 300 | return ret; |
294 | 301 | ||
295 | /* Save the next toggle value */ | 302 | /* Save the next toggle value */ |
296 | WARN_ON(list_empty(&urbp->td_list)); | 303 | WARN_ON(list_empty(&urbp->td_list)); |
297 | td = list_entry(urbp->td_list.next, struct uhci_td, list); | 304 | td = list_entry(urbp->td_list.next, struct uhci_td, list); |
298 | qh->needs_fixup = 1; | 305 | qh->needs_fixup = 1; |
299 | qh->initial_toggle = uhci_toggle(td_token(td)); | 306 | qh->initial_toggle = uhci_toggle(td_token(td)); |
307 | return ret; | ||
300 | } | 308 | } |
301 | 309 | ||
302 | /* | 310 | /* |
@@ -953,7 +961,6 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, | |||
953 | } else { | 961 | } else { |
954 | /* FIXME: Sanity check */ | 962 | /* FIXME: Sanity check */ |
955 | } | 963 | } |
956 | urb->start_frame &= (UHCI_NUMFRAMES - 1); | ||
957 | 964 | ||
958 | for (i = 0; i < urb->number_of_packets; i++) { | 965 | for (i = 0; i < urb->number_of_packets; i++) { |
959 | td = uhci_alloc_td(uhci); | 966 | td = uhci_alloc_td(uhci); |
@@ -1120,16 +1127,26 @@ static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) | |||
1120 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | 1127 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
1121 | unsigned long flags; | 1128 | unsigned long flags; |
1122 | struct urb_priv *urbp; | 1129 | struct urb_priv *urbp; |
1130 | struct uhci_qh *qh; | ||
1123 | 1131 | ||
1124 | spin_lock_irqsave(&uhci->lock, flags); | 1132 | spin_lock_irqsave(&uhci->lock, flags); |
1125 | urbp = urb->hcpriv; | 1133 | urbp = urb->hcpriv; |
1126 | if (!urbp) /* URB was never linked! */ | 1134 | if (!urbp) /* URB was never linked! */ |
1127 | goto done; | 1135 | goto done; |
1136 | qh = urbp->qh; | ||
1128 | 1137 | ||
1129 | /* Remove Isochronous TDs from the frame list ASAP */ | 1138 | /* Remove Isochronous TDs from the frame list ASAP */ |
1130 | if (urbp->qh->type == USB_ENDPOINT_XFER_ISOC) | 1139 | if (qh->type == USB_ENDPOINT_XFER_ISOC) { |
1131 | uhci_unlink_isochronous_tds(uhci, urb); | 1140 | uhci_unlink_isochronous_tds(uhci, urb); |
1132 | uhci_unlink_qh(uhci, urbp->qh); | 1141 | mb(); |
1142 | |||
1143 | /* If the URB has already started, update the QH unlink time */ | ||
1144 | uhci_get_current_frame_number(uhci); | ||
1145 | if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number)) | ||
1146 | qh->unlink_frame = uhci->frame_number; | ||
1147 | } | ||
1148 | |||
1149 | uhci_unlink_qh(uhci, qh); | ||
1133 | 1150 | ||
1134 | done: | 1151 | done: |
1135 | spin_unlock_irqrestore(&uhci->lock, flags); | 1152 | spin_unlock_irqrestore(&uhci->lock, flags); |
@@ -1250,7 +1267,14 @@ restart: | |||
1250 | list_for_each_entry(urbp, &qh->queue, node) { | 1267 | list_for_each_entry(urbp, &qh->queue, node) { |
1251 | urb = urbp->urb; | 1268 | urb = urbp->urb; |
1252 | if (urb->status != -EINPROGRESS) { | 1269 | if (urb->status != -EINPROGRESS) { |
1253 | uhci_cleanup_queue(qh, urb); | 1270 | |
1271 | /* Fix up the TD links and save the toggles for | ||
1272 | * non-Isochronous queues. For Isochronous queues, | ||
1273 | * test for too-recent dequeues. */ | ||
1274 | if (!uhci_cleanup_queue(uhci, qh, urb)) { | ||
1275 | qh->is_stopped = 0; | ||
1276 | return; | ||
1277 | } | ||
1254 | uhci_giveback_urb(uhci, qh, urb, regs); | 1278 | uhci_giveback_urb(uhci, qh, urb, regs); |
1255 | goto restart; | 1279 | goto restart; |
1256 | } | 1280 | } |