aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/uhci-q.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/uhci-q.c')
-rw-r--r--drivers/usb/host/uhci-q.c631
1 files changed, 274 insertions, 357 deletions
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index c4194182dcc4..44bba9a6d196 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -151,53 +151,6 @@ static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
151 wmb(); 151 wmb();
152} 152}
153 153
154/*
155 * Remove an URB's TDs from the hardware schedule
156 */
157static void uhci_remove_tds_from_schedule(struct uhci_hcd *uhci,
158 struct urb *urb, int status)
159{
160 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
161
162 /* Isochronous TDs get unlinked directly from the frame list */
163 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
164 uhci_unlink_isochronous_tds(uhci, urb);
165 return;
166 }
167
168 /* If the URB isn't first on its queue, adjust the link pointer
169 * of the last TD in the previous URB. */
170 if (urbp->node.prev != &urbp->qh->queue) {
171 struct urb_priv *purbp;
172 struct uhci_td *ptd, *ltd;
173
174 if (status == -EINPROGRESS)
175 status = 0;
176 purbp = list_entry(urbp->node.prev, struct urb_priv, node);
177 ptd = list_entry(purbp->td_list.prev, struct uhci_td,
178 list);
179 ltd = list_entry(urbp->td_list.prev, struct uhci_td,
180 list);
181 ptd->link = ltd->link;
182 }
183
184 /* If the URB completed with an error, then the QH element certainly
185 * points to one of the URB's TDs. If it completed normally then
186 * the QH element has certainly moved on to the next URB. And if
187 * the URB is still in progress then it must have been dequeued.
188 * The QH element either hasn't reached it yet or is somewhere in
189 * the middle. If the URB wasn't first we can assume that it
190 * hasn't started yet (see above): Otherwise all the preceding URBs
191 * would have completed and been removed from the queue, so this one
192 * _would_ be first.
193 *
194 * If the QH element is inside this URB, clear it. It will be
195 * set properly when the QH is activated.
196 */
197 if (status < 0)
198 urbp->qh->element = UHCI_PTR_TERM;
199}
200
201static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, 154static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
202 struct usb_device *udev, struct usb_host_endpoint *hep) 155 struct usb_device *udev, struct usb_host_endpoint *hep)
203{ 156{
@@ -251,6 +204,90 @@ static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
251} 204}
252 205
253/* 206/*
207 * When the currently executing URB is dequeued, save its current toggle value
208 */
209static void uhci_save_toggle(struct uhci_qh *qh, struct urb *urb)
210{
211 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
212 struct uhci_td *td;
213
214 /* If the QH element pointer is UHCI_PTR_TERM then then currently
215 * executing URB has already been unlinked, so this one isn't it. */
216 if (qh_element(qh) == UHCI_PTR_TERM ||
217 qh->queue.next != &urbp->node)
218 return;
219 qh->element = UHCI_PTR_TERM;
220
221 /* Only bulk and interrupt pipes have to worry about toggles */
222 if (!(usb_pipetype(urb->pipe) == PIPE_BULK ||
223 usb_pipetype(urb->pipe) == PIPE_INTERRUPT))
224 return;
225
226 /* Find the first active TD; that's the device's toggle state */
227 list_for_each_entry(td, &urbp->td_list, list) {
228 if (td_status(td) & TD_CTRL_ACTIVE) {
229 qh->needs_fixup = 1;
230 qh->initial_toggle = uhci_toggle(td_token(td));
231 return;
232 }
233 }
234
235 WARN_ON(1);
236}
237
238/*
239 * Fix up the data toggles for URBs in a queue, when one of them
240 * terminates early (short transfer, error, or dequeued).
241 */
242static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
243{
244 struct urb_priv *urbp = NULL;
245 struct uhci_td *td;
246 unsigned int toggle = qh->initial_toggle;
247 unsigned int pipe;
248
249 /* Fixups for a short transfer start with the second URB in the
250 * queue (the short URB is the first). */
251 if (skip_first)
252 urbp = list_entry(qh->queue.next, struct urb_priv, node);
253
254 /* When starting with the first URB, if the QH element pointer is
255 * still valid then we know the URB's toggles are okay. */
256 else if (qh_element(qh) != UHCI_PTR_TERM)
257 toggle = 2;
258
259 /* Fix up the toggle for the URBs in the queue. Normally this
260 * loop won't run more than once: When an error or short transfer
261 * occurs, the queue usually gets emptied. */
262 list_prepare_entry(urbp, &qh->queue, node);
263 list_for_each_entry_continue(urbp, &qh->queue, node) {
264
265 /* If the first TD has the right toggle value, we don't
266 * need to change any toggles in this URB */
267 td = list_entry(urbp->td_list.next, struct uhci_td, list);
268 if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
269 td = list_entry(urbp->td_list.next, struct uhci_td,
270 list);
271 toggle = uhci_toggle(td_token(td)) ^ 1;
272
273 /* Otherwise all the toggles in the URB have to be switched */
274 } else {
275 list_for_each_entry(td, &urbp->td_list, list) {
276 td->token ^= __constant_cpu_to_le32(
277 TD_TOKEN_TOGGLE);
278 toggle ^= 1;
279 }
280 }
281 }
282
283 wmb();
284 pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
285 usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
286 usb_pipeout(pipe), toggle);
287 qh->needs_fixup = 0;
288}
289
290/*
254 * Put a QH on the schedule in both hardware and software 291 * Put a QH on the schedule in both hardware and software
255 */ 292 */
256static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) 293static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
@@ -276,6 +313,9 @@ static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
276 313
277 /* Move the QH from its old list to the end of the appropriate 314 /* Move the QH from its old list to the end of the appropriate
278 * skeleton's list */ 315 * skeleton's list */
316 if (qh == uhci->next_qh)
317 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
318 node);
279 list_move_tail(&qh->node, &qh->skel->node); 319 list_move_tail(&qh->node, &qh->skel->node);
280 320
281 /* Link it into the schedule */ 321 /* Link it into the schedule */
@@ -310,6 +350,9 @@ static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
310 uhci_set_next_interrupt(uhci); 350 uhci_set_next_interrupt(uhci);
311 351
312 /* Move the QH from its old list to the end of the unlinking list */ 352 /* Move the QH from its old list to the end of the unlinking list */
353 if (qh == uhci->next_qh)
354 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
355 node);
313 list_move_tail(&qh->node, &uhci->skel_unlink_qh->node); 356 list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
314} 357}
315 358
@@ -323,6 +366,9 @@ static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
323{ 366{
324 WARN_ON(qh->state == QH_STATE_ACTIVE); 367 WARN_ON(qh->state == QH_STATE_ACTIVE);
325 368
369 if (qh == uhci->next_qh)
370 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
371 node);
326 list_move(&qh->node, &uhci->idle_qh_list); 372 list_move(&qh->node, &uhci->idle_qh_list);
327 qh->state = QH_STATE_IDLE; 373 qh->state = QH_STATE_IDLE;
328 374
@@ -344,11 +390,9 @@ static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
344 390
345 urbp->urb = urb; 391 urbp->urb = urb;
346 urb->hcpriv = urbp; 392 urb->hcpriv = urbp;
347 urbp->fsbrtime = jiffies;
348 393
349 INIT_LIST_HEAD(&urbp->node); 394 INIT_LIST_HEAD(&urbp->node);
350 INIT_LIST_HEAD(&urbp->td_list); 395 INIT_LIST_HEAD(&urbp->td_list);
351 INIT_LIST_HEAD(&urbp->urb_list);
352 396
353 return urbp; 397 return urbp;
354} 398}
@@ -373,9 +417,6 @@ static void uhci_free_urb_priv(struct uhci_hcd *uhci,
373{ 417{
374 struct uhci_td *td, *tmp; 418 struct uhci_td *td, *tmp;
375 419
376 if (!list_empty(&urbp->urb_list))
377 dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list!\n",
378 urbp->urb);
379 if (!list_empty(&urbp->node)) 420 if (!list_empty(&urbp->node))
380 dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n", 421 dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n",
381 urbp->urb); 422 urbp->urb);
@@ -453,71 +494,6 @@ static int uhci_map_status(int status, int dir_out)
453} 494}
454 495
455/* 496/*
456 * Fix up the data toggles for URBs in a queue, when one of them
457 * terminates early (short transfer, error, or dequeued).
458 */
459static void uhci_fixup_toggles(struct urb *urb)
460{
461 struct list_head *head;
462 struct uhci_td *td;
463 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
464 int prevactive = 0;
465 unsigned int toggle = 0;
466 struct urb_priv *turbp, *list_end;
467
468 /*
469 * We need to find out what the last successful toggle was so
470 * we can update the data toggles for the following transfers.
471 *
472 * There are 2 ways the last successful completed TD is found:
473 *
474 * 1) The TD is NOT active and the actual length < expected length
475 * 2) The TD is NOT active and it's the last TD in the chain
476 *
477 * and a third way the first uncompleted TD is found:
478 *
479 * 3) The TD is active and the previous TD is NOT active
480 */
481 head = &urbp->td_list;
482 list_for_each_entry(td, head, list) {
483 unsigned int ctrlstat = td_status(td);
484
485 if (!(ctrlstat & TD_CTRL_ACTIVE) &&
486 (uhci_actual_length(ctrlstat) <
487 uhci_expected_length(td_token(td)) ||
488 td->list.next == head))
489 toggle = uhci_toggle(td_token(td)) ^ 1;
490 else if ((ctrlstat & TD_CTRL_ACTIVE) && !prevactive)
491 toggle = uhci_toggle(td_token(td));
492
493 prevactive = ctrlstat & TD_CTRL_ACTIVE;
494 }
495
496 /*
497 * Fix up the toggle for the following URBs in the queue.
498 *
499 * We can stop as soon as we find an URB with toggles set correctly,
500 * because then all the following URBs will be correct also.
501 */
502 list_end = list_entry(&urbp->qh->queue, struct urb_priv, node);
503 turbp = urbp;
504 while ((turbp = list_entry(turbp->node.next, struct urb_priv, node))
505 != list_end) {
506 td = list_entry(turbp->td_list.next, struct uhci_td, list);
507 if (uhci_toggle(td_token(td)) == toggle)
508 return;
509
510 list_for_each_entry(td, &turbp->td_list, list) {
511 td->token ^= __constant_cpu_to_le32(TD_TOKEN_TOGGLE);
512 toggle ^= 1;
513 }
514 }
515
516 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
517 usb_pipeout(urb->pipe), toggle);
518}
519
520/*
521 * Control transfers 497 * Control transfers
522 */ 498 */
523static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, 499static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
@@ -765,6 +741,9 @@ err:
765 } 741 }
766 } 742 }
767 743
744 /* Note that the queue has stopped */
745 urbp->qh->element = UHCI_PTR_TERM;
746 urbp->qh->is_stopped = 1;
768 return ret; 747 return ret;
769} 748}
770 749
@@ -927,7 +906,10 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
927 */ 906 */
928 if (!urbp->short_transfer) { 907 if (!urbp->short_transfer) {
929 urbp->short_transfer = 1; 908 urbp->short_transfer = 1;
930 uhci_fixup_toggles(urb); 909 urbp->qh->initial_toggle =
910 uhci_toggle(td_token(td)) ^ 1;
911 uhci_fixup_toggles(urbp->qh, 1);
912
931 td = list_entry(urbp->td_list.prev, 913 td = list_entry(urbp->td_list.prev,
932 struct uhci_td, list); 914 struct uhci_td, list);
933 urbp->qh->element = td->link; 915 urbp->qh->element = td->link;
@@ -962,6 +944,13 @@ err:
962 } 944 }
963 } 945 }
964#endif 946#endif
947
948 /* Note that the queue has stopped and save the next toggle value */
949 urbp->qh->element = UHCI_PTR_TERM;
950 urbp->qh->is_stopped = 1;
951 urbp->qh->needs_fixup = 1;
952 urbp->qh->initial_toggle = uhci_toggle(td_token(td)) ^
953 (ret == -EREMOTEIO);
965 return ret; 954 return ret;
966} 955}
967 956
@@ -995,76 +984,39 @@ static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
995/* 984/*
996 * Isochronous transfers 985 * Isochronous transfers
997 */ 986 */
998static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end) 987static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
999{ 988 struct uhci_qh *qh)
1000 struct urb *last_urb = NULL;
1001 struct urb_priv *up;
1002 int ret = 0;
1003
1004 list_for_each_entry(up, &uhci->urb_list, urb_list) {
1005 struct urb *u = up->urb;
1006
1007 /* look for pending URBs with identical pipe handle */
1008 if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
1009 (u->status == -EINPROGRESS) && (u != urb)) {
1010 if (!last_urb)
1011 *start = u->start_frame;
1012 last_urb = u;
1013 }
1014 }
1015
1016 if (last_urb) {
1017 *end = (last_urb->start_frame + last_urb->number_of_packets *
1018 last_urb->interval) & (UHCI_NUMFRAMES-1);
1019 ret = 0;
1020 } else
1021 ret = -1; /* no previous urb found */
1022
1023 return ret;
1024}
1025
1026static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
1027{ 989{
1028 int limits; 990 struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */
1029 unsigned int start = 0, end = 0; 991 int i, frame;
992 unsigned long destination, status;
993 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1030 994
1031 if (urb->number_of_packets > 900) /* 900? Why? */ 995 if (urb->number_of_packets > 900) /* 900? Why? */
1032 return -EFBIG; 996 return -EFBIG;
1033 997
1034 limits = isochronous_find_limits(uhci, urb, &start, &end); 998 status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
999 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1035 1000
1001 /* Figure out the starting frame number */
1036 if (urb->transfer_flags & URB_ISO_ASAP) { 1002 if (urb->transfer_flags & URB_ISO_ASAP) {
1037 if (limits) { 1003 if (list_empty(&qh->queue)) {
1038 uhci_get_current_frame_number(uhci); 1004 uhci_get_current_frame_number(uhci);
1039 urb->start_frame = (uhci->frame_number + 10) 1005 urb->start_frame = (uhci->frame_number + 10);
1040 & (UHCI_NUMFRAMES - 1); 1006
1041 } else 1007 } else { /* Go right after the last one */
1042 urb->start_frame = end; 1008 struct urb *last_urb;
1009
1010 last_urb = list_entry(qh->queue.prev,
1011 struct urb_priv, node)->urb;
1012 urb->start_frame = (last_urb->start_frame +
1013 last_urb->number_of_packets *
1014 last_urb->interval);
1015 }
1043 } else { 1016 } else {
1044 urb->start_frame &= (UHCI_NUMFRAMES - 1);
1045 /* FIXME: Sanity check */ 1017 /* FIXME: Sanity check */
1046 } 1018 }
1047 1019 urb->start_frame &= (UHCI_NUMFRAMES - 1);
1048 return 0;
1049}
1050
1051/*
1052 * Isochronous transfers
1053 */
1054static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1055 struct uhci_qh *qh)
1056{
1057 struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */
1058 int i, ret, frame;
1059 unsigned long destination, status;
1060 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1061
1062 status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1063 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1064
1065 ret = isochronous_find_start(uhci, urb);
1066 if (ret)
1067 return ret;
1068 1020
1069 for (i = 0; i < urb->number_of_packets; i++) { 1021 for (i = 0; i < urb->number_of_packets; i++) {
1070 td = uhci_alloc_td(uhci); 1022 td = uhci_alloc_td(uhci);
@@ -1203,7 +1155,6 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd,
1203 /* Add this URB to the QH */ 1155 /* Add this URB to the QH */
1204 urbp->qh = qh; 1156 urbp->qh = qh;
1205 list_add_tail(&urbp->node, &qh->queue); 1157 list_add_tail(&urbp->node, &qh->queue);
1206 list_add_tail(&urbp->urb_list, &uhci->urb_list);
1207 1158
1208 /* If the new URB is the first and only one on this QH then either 1159 /* If the new URB is the first and only one on this QH then either
1209 * the QH is new and idle or else it's unlinked and waiting to 1160 * the QH is new and idle or else it's unlinked and waiting to
@@ -1224,49 +1175,66 @@ done:
1224 return ret; 1175 return ret;
1225} 1176}
1226 1177
1178static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1179{
1180 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1181 unsigned long flags;
1182 struct urb_priv *urbp;
1183
1184 spin_lock_irqsave(&uhci->lock, flags);
1185 urbp = urb->hcpriv;
1186 if (!urbp) /* URB was never linked! */
1187 goto done;
1188
1189 /* Remove Isochronous TDs from the frame list ASAP */
1190 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
1191 uhci_unlink_isochronous_tds(uhci, urb);
1192 uhci_unlink_qh(uhci, urbp->qh);
1193
1194done:
1195 spin_unlock_irqrestore(&uhci->lock, flags);
1196 return 0;
1197}
1198
1227/* 1199/*
1228 * Return the result of a transfer 1200 * Finish unlinking an URB and give it back
1229 */ 1201 */
1230static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb) 1202static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
1203 struct urb *urb, struct pt_regs *regs)
1204__releases(uhci->lock)
1205__acquires(uhci->lock)
1231{ 1206{
1232 int status;
1233 int okay_to_giveback = 0;
1234 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; 1207 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1235 1208
1236 switch (usb_pipetype(urb->pipe)) { 1209 /* Isochronous TDs get unlinked directly from the frame list */
1237 case PIPE_CONTROL: 1210 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
1238 status = uhci_result_control(uhci, urb); 1211 uhci_unlink_isochronous_tds(uhci, urb);
1239 break;
1240 case PIPE_ISOCHRONOUS:
1241 status = uhci_result_isochronous(uhci, urb);
1242 break;
1243 default: /* PIPE_BULK or PIPE_INTERRUPT */
1244 status = uhci_result_common(uhci, urb);
1245 break;
1246 }
1247 1212
1248 spin_lock(&urb->lock); 1213 /* If the URB isn't first on its queue, adjust the link pointer
1249 if (urb->status == -EINPROGRESS) { /* Not yet dequeued */ 1214 * of the last TD in the previous URB. */
1250 if (status != -EINPROGRESS) { /* URB has completed */ 1215 else if (qh->queue.next != &urbp->node) {
1251 urb->status = status; 1216 struct urb_priv *purbp;
1217 struct uhci_td *ptd, *ltd;
1252 1218
1253 /* If the URB got a real error (as opposed to 1219 purbp = list_entry(urbp->node.prev, struct urb_priv, node);
1254 * simply being dequeued), we don't have to 1220 ptd = list_entry(purbp->td_list.prev, struct uhci_td,
1255 * unlink the QH. Fix this later... */ 1221 list);
1256 if (status < 0) 1222 ltd = list_entry(urbp->td_list.prev, struct uhci_td,
1257 uhci_unlink_qh(uhci, urbp->qh); 1223 list);
1258 else 1224 ptd->link = ltd->link;
1259 okay_to_giveback = 1;
1260 }
1261 } else { /* Already dequeued */
1262 if (urbp->qh->state == QH_STATE_UNLINKING &&
1263 uhci->frame_number + uhci->is_stopped !=
1264 urbp->qh->unlink_frame)
1265 okay_to_giveback = 1;
1266 } 1225 }
1267 spin_unlock(&urb->lock); 1226
1268 if (!okay_to_giveback) 1227 /* Take the URB off the QH's queue. If the queue is now empty,
1269 return; 1228 * this is a perfect time for a toggle fixup. */
1229 list_del_init(&urbp->node);
1230 if (list_empty(&qh->queue) && qh->needs_fixup) {
1231 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1232 usb_pipeout(urb->pipe), qh->initial_toggle);
1233 qh->needs_fixup = 0;
1234 }
1235
1236 uhci_dec_fsbr(uhci, urb); /* Safe since it checks */
1237 uhci_free_urb_priv(uhci, urbp);
1270 1238
1271 switch (usb_pipetype(urb->pipe)) { 1239 switch (usb_pipetype(urb->pipe)) {
1272 case PIPE_ISOCHRONOUS: 1240 case PIPE_ISOCHRONOUS:
@@ -1277,122 +1245,107 @@ static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
1277 case PIPE_INTERRUPT: 1245 case PIPE_INTERRUPT:
1278 /* Release bandwidth for Interrupt or Isoc. transfers */ 1246 /* Release bandwidth for Interrupt or Isoc. transfers */
1279 /* Make sure we don't release if we have a queued URB */ 1247 /* Make sure we don't release if we have a queued URB */
1280 if (list_empty(&urbp->qh->queue) && urb->bandwidth) 1248 if (list_empty(&qh->queue) && urb->bandwidth)
1281 usb_release_bandwidth(urb->dev, urb, 0); 1249 usb_release_bandwidth(urb->dev, urb, 0);
1282 else 1250 else
1283 /* bandwidth was passed on to queued URB, */ 1251 /* bandwidth was passed on to queued URB, */
1284 /* so don't let usb_unlink_urb() release it */ 1252 /* so don't let usb_unlink_urb() release it */
1285 urb->bandwidth = 0; 1253 urb->bandwidth = 0;
1286 /* Falls through */
1287 case PIPE_BULK:
1288 if (status < 0)
1289 uhci_fixup_toggles(urb);
1290 break;
1291 default: /* PIPE_CONTROL */
1292 break; 1254 break;
1293 } 1255 }
1294 1256
1295 /* Take the URB's TDs off the hardware schedule */ 1257 spin_unlock(&uhci->lock);
1296 uhci_remove_tds_from_schedule(uhci, urb, status); 1258 usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, regs);
1297 1259 spin_lock(&uhci->lock);
1298 /* Take the URB off the QH's queue and see if the QH is now unused */
1299 list_del_init(&urbp->node);
1300 if (list_empty(&urbp->qh->queue))
1301 uhci_unlink_qh(uhci, urbp->qh);
1302 1260
1303 uhci_dec_fsbr(uhci, urb); /* Safe since it checks */ 1261 /* If the queue is now empty, we can unlink the QH and give up its
1262 * reserved bandwidth. */
1263 if (list_empty(&qh->queue)) {
1264 uhci_unlink_qh(uhci, qh);
1304 1265
1305 /* Queue it for giving back */ 1266 /* Bandwidth stuff not yet implemented */
1306 list_move_tail(&urbp->urb_list, &uhci->complete_list); 1267 }
1307} 1268}
1308 1269
1309/* 1270/*
1310 * Check out the QHs waiting to be fully unlinked 1271 * Scan the URBs in a QH's queue
1311 */ 1272 */
1312static void uhci_scan_unlinking_qhs(struct uhci_hcd *uhci) 1273#define QH_FINISHED_UNLINKING(qh) \
1313{ 1274 (qh->state == QH_STATE_UNLINKING && \
1314 struct uhci_qh *qh, *tmp; 1275 uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
1315 1276
1316 list_for_each_entry_safe(qh, tmp, &uhci->skel_unlink_qh->node, node) { 1277static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh,
1317 1278 struct pt_regs *regs)
1318 /* If the queue is empty and the QH is fully unlinked then
1319 * it can become IDLE. */
1320 if (list_empty(&qh->queue)) {
1321 if (uhci->frame_number + uhci->is_stopped !=
1322 qh->unlink_frame)
1323 uhci_make_qh_idle(uhci, qh);
1324
1325 /* If none of the QH's URBs have been dequeued then the QH
1326 * should be re-activated. */
1327 } else {
1328 struct urb_priv *urbp;
1329 int any_dequeued = 0;
1330
1331 list_for_each_entry(urbp, &qh->queue, node) {
1332 if (urbp->urb->status != -EINPROGRESS) {
1333 any_dequeued = 1;
1334 break;
1335 }
1336 }
1337 if (!any_dequeued)
1338 uhci_activate_qh(uhci, qh);
1339 }
1340 }
1341}
1342
1343static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1344{ 1279{
1345 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1346 unsigned long flags;
1347 struct urb_priv *urbp; 1280 struct urb_priv *urbp;
1281 struct urb *urb;
1282 int status;
1348 1283
1349 spin_lock_irqsave(&uhci->lock, flags); 1284 while (!list_empty(&qh->queue)) {
1350 urbp = urb->hcpriv; 1285 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1351 if (!urbp) /* URB was never linked! */ 1286 urb = urbp->urb;
1352 goto done;
1353 1287
1354 /* Remove Isochronous TDs from the frame list ASAP */ 1288 switch (usb_pipetype(urb->pipe)) {
1355 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) 1289 case PIPE_CONTROL:
1356 uhci_unlink_isochronous_tds(uhci, urb); 1290 status = uhci_result_control(uhci, urb);
1357 uhci_unlink_qh(uhci, urbp->qh); 1291 break;
1292 case PIPE_ISOCHRONOUS:
1293 status = uhci_result_isochronous(uhci, urb);
1294 break;
1295 default: /* PIPE_BULK or PIPE_INTERRUPT */
1296 status = uhci_result_common(uhci, urb);
1297 break;
1298 }
1299 if (status == -EINPROGRESS)
1300 break;
1358 1301
1359done: 1302 spin_lock(&urb->lock);
1360 spin_unlock_irqrestore(&uhci->lock, flags); 1303 if (urb->status == -EINPROGRESS) /* Not dequeued */
1361 return 0; 1304 urb->status = status;
1362} 1305 else
1306 status = -ECONNRESET;
1307 spin_unlock(&urb->lock);
1363 1308
1364static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb) 1309 /* Dequeued but completed URBs can't be given back unless
1365{ 1310 * the QH is stopped or has finished unlinking. */
1366 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; 1311 if (status == -ECONNRESET &&
1367 struct list_head *head; 1312 !(qh->is_stopped || QH_FINISHED_UNLINKING(qh)))
1368 struct uhci_td *td; 1313 return;
1369 int count = 0;
1370 1314
1371 uhci_dec_fsbr(uhci, urb); 1315 uhci_giveback_urb(uhci, qh, urb, regs);
1316 if (qh->is_stopped)
1317 break;
1318 }
1372 1319
1373 urbp->fsbr_timeout = 1; 1320 /* If the QH is neither stopped nor finished unlinking (normal case),
1321 * our work here is done. */
1322 restart:
1323 if (!(qh->is_stopped || QH_FINISHED_UNLINKING(qh)))
1324 return;
1374 1325
1375 /* 1326 /* Otherwise give back each of the dequeued URBs */
1376 * Ideally we would want to fix qh->element as well, but it's 1327 list_for_each_entry(urbp, &qh->queue, node) {
1377 * read/write by the HC, so that can introduce a race. It's not 1328 urb = urbp->urb;
1378 * really worth the hassle 1329 if (urb->status != -EINPROGRESS) {
1379 */ 1330 uhci_save_toggle(qh, urb);
1331 uhci_giveback_urb(uhci, qh, urb, regs);
1332 goto restart;
1333 }
1334 }
1335 qh->is_stopped = 0;
1380 1336
1381 head = &urbp->td_list; 1337 /* There are no more dequeued URBs. If there are still URBs on the
1382 list_for_each_entry(td, head, list) { 1338 * queue, the QH can now be re-activated. */
1383 /* 1339 if (!list_empty(&qh->queue)) {
1384 * Make sure we don't do the last one (since it'll have the 1340 if (qh->needs_fixup)
1385 * TERM bit set) as well as we skip every so many TDs to 1341 uhci_fixup_toggles(qh, 0);
1386 * make sure it doesn't hog the bandwidth 1342 uhci_activate_qh(uhci, qh);
1387 */
1388 if (td->list.next != head && (count % DEPTH_INTERVAL) ==
1389 (DEPTH_INTERVAL - 1))
1390 td->link |= UHCI_PTR_DEPTH;
1391
1392 count++;
1393 } 1343 }
1394 1344
1395 return 0; 1345 /* The queue is empty. The QH can become idle if it is fully
1346 * unlinked. */
1347 else if (QH_FINISHED_UNLINKING(qh))
1348 uhci_make_qh_idle(uhci, qh);
1396} 1349}
1397 1350
1398static void uhci_free_pending_tds(struct uhci_hcd *uhci) 1351static void uhci_free_pending_tds(struct uhci_hcd *uhci)
@@ -1406,36 +1359,13 @@ static void uhci_free_pending_tds(struct uhci_hcd *uhci)
1406 } 1359 }
1407} 1360}
1408 1361
1409static void 1362/*
1410uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs) 1363 * Process events in the schedule, but only in one thread at a time
1411__releases(uhci->lock) 1364 */
1412__acquires(uhci->lock)
1413{
1414 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1415
1416 uhci_free_urb_priv(uhci, (struct urb_priv *) (urb->hcpriv));
1417
1418 spin_unlock(&uhci->lock);
1419 usb_hcd_giveback_urb(hcd, urb, regs);
1420 spin_lock(&uhci->lock);
1421}
1422
1423static void uhci_finish_completion(struct uhci_hcd *uhci, struct pt_regs *regs)
1424{
1425 struct urb_priv *urbp, *tmp;
1426
1427 list_for_each_entry_safe(urbp, tmp, &uhci->complete_list, urb_list) {
1428 struct urb *urb = urbp->urb;
1429
1430 list_del_init(&urbp->urb_list);
1431 uhci_finish_urb(uhci_to_hcd(uhci), urb, regs);
1432 }
1433}
1434
1435/* Process events in the schedule, but only in one thread at a time */
1436static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs) 1365static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs)
1437{ 1366{
1438 struct urb_priv *urbp, *tmp; 1367 int i;
1368 struct uhci_qh *qh;
1439 1369
1440 /* Don't allow re-entrant calls */ 1370 /* Don't allow re-entrant calls */
1441 if (uhci->scan_in_progress) { 1371 if (uhci->scan_in_progress) {
@@ -1452,26 +1382,24 @@ static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs)
1452 if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age) 1382 if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age)
1453 uhci_free_pending_tds(uhci); 1383 uhci_free_pending_tds(uhci);
1454 1384
1455 /* Walk the list of pending URBs to see which ones completed 1385 /* Go through all the QH queues and process the URBs in each one */
1456 * (must be _safe because uhci_transfer_result() dequeues URBs) */ 1386 for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
1457 list_for_each_entry_safe(urbp, tmp, &uhci->urb_list, urb_list) { 1387 uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
1458 struct urb *urb = urbp->urb; 1388 struct uhci_qh, node);
1459 1389 while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
1460 /* Checks the status and does all of the magic necessary */ 1390 uhci->next_qh = list_entry(qh->node.next,
1461 uhci_transfer_result(uhci, urb); 1391 struct uhci_qh, node);
1392 uhci_scan_qh(uhci, qh, regs);
1393 }
1462 } 1394 }
1463 uhci_finish_completion(uhci, regs);
1464
1465 /* If the controller is stopped, we can finish these off right now */
1466 if (uhci->is_stopped)
1467 uhci_free_pending_tds(uhci);
1468 1395
1469 if (uhci->need_rescan) 1396 if (uhci->need_rescan)
1470 goto rescan; 1397 goto rescan;
1471 uhci->scan_in_progress = 0; 1398 uhci->scan_in_progress = 0;
1472 1399
1473 /* Check out the QHs waiting for unlinking */ 1400 /* If the controller is stopped, we can finish these off right now */
1474 uhci_scan_unlinking_qhs(uhci); 1401 if (uhci->is_stopped)
1402 uhci_free_pending_tds(uhci);
1475 1403
1476 if (list_empty(&uhci->td_remove_list) && 1404 if (list_empty(&uhci->td_remove_list) &&
1477 list_empty(&uhci->skel_unlink_qh->node)) 1405 list_empty(&uhci->skel_unlink_qh->node))
@@ -1482,19 +1410,8 @@ static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs)
1482 1410
1483static void check_fsbr(struct uhci_hcd *uhci) 1411static void check_fsbr(struct uhci_hcd *uhci)
1484{ 1412{
1485 struct urb_priv *up; 1413 /* For now, don't scan URBs for FSBR timeouts.
1486 1414 * Add it back in later... */
1487 list_for_each_entry(up, &uhci->urb_list, urb_list) {
1488 struct urb *u = up->urb;
1489
1490 spin_lock(&u->lock);
1491
1492 /* Check if the FSBR timed out */
1493 if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
1494 uhci_fsbr_timeout(uhci, u);
1495
1496 spin_unlock(&u->lock);
1497 }
1498 1415
1499 /* Really disable FSBR */ 1416 /* Really disable FSBR */
1500 if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) { 1417 if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {