diff options
Diffstat (limited to 'drivers/usb/host/uhci-q.c')
-rw-r--r-- | drivers/usb/host/uhci-q.c | 219 |
1 files changed, 183 insertions, 36 deletions
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index 68e66b33e726..f4ebdb3e488f 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c | |||
@@ -13,7 +13,7 @@ | |||
13 | * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface | 13 | * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface |
14 | * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). | 14 | * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). |
15 | * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) | 15 | * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) |
16 | * (C) Copyright 2004-2006 Alan Stern, stern@rowland.harvard.edu | 16 | * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu |
17 | */ | 17 | */ |
18 | 18 | ||
19 | 19 | ||
@@ -45,15 +45,43 @@ static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) | |||
45 | */ | 45 | */ |
46 | static void uhci_fsbr_on(struct uhci_hcd *uhci) | 46 | static void uhci_fsbr_on(struct uhci_hcd *uhci) |
47 | { | 47 | { |
48 | struct uhci_qh *fsbr_qh, *lqh, *tqh; | ||
49 | |||
48 | uhci->fsbr_is_on = 1; | 50 | uhci->fsbr_is_on = 1; |
49 | uhci->skel_term_qh->link = cpu_to_le32( | 51 | lqh = list_entry(uhci->skel_async_qh->node.prev, |
50 | uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH; | 52 | struct uhci_qh, node); |
53 | |||
54 | /* Find the first FSBR QH. Linear search through the list is | ||
55 | * acceptable because normally FSBR gets turned on as soon as | ||
56 | * one QH needs it. */ | ||
57 | fsbr_qh = NULL; | ||
58 | list_for_each_entry_reverse(tqh, &uhci->skel_async_qh->node, node) { | ||
59 | if (tqh->skel < SKEL_FSBR) | ||
60 | break; | ||
61 | fsbr_qh = tqh; | ||
62 | } | ||
63 | |||
64 | /* No FSBR QH means we must insert the terminating skeleton QH */ | ||
65 | if (!fsbr_qh) { | ||
66 | uhci->skel_term_qh->link = LINK_TO_QH(uhci->skel_term_qh); | ||
67 | wmb(); | ||
68 | lqh->link = uhci->skel_term_qh->link; | ||
69 | |||
70 | /* Otherwise loop the last QH to the first FSBR QH */ | ||
71 | } else | ||
72 | lqh->link = LINK_TO_QH(fsbr_qh); | ||
51 | } | 73 | } |
52 | 74 | ||
53 | static void uhci_fsbr_off(struct uhci_hcd *uhci) | 75 | static void uhci_fsbr_off(struct uhci_hcd *uhci) |
54 | { | 76 | { |
77 | struct uhci_qh *lqh; | ||
78 | |||
55 | uhci->fsbr_is_on = 0; | 79 | uhci->fsbr_is_on = 0; |
56 | uhci->skel_term_qh->link = UHCI_PTR_TERM; | 80 | lqh = list_entry(uhci->skel_async_qh->node.prev, |
81 | struct uhci_qh, node); | ||
82 | |||
83 | /* End the async list normally and unlink the terminating QH */ | ||
84 | lqh->link = uhci->skel_term_qh->link = UHCI_PTR_TERM; | ||
57 | } | 85 | } |
58 | 86 | ||
59 | static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb) | 87 | static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb) |
@@ -158,11 +186,11 @@ static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci, | |||
158 | 186 | ||
159 | td->link = ltd->link; | 187 | td->link = ltd->link; |
160 | wmb(); | 188 | wmb(); |
161 | ltd->link = cpu_to_le32(td->dma_handle); | 189 | ltd->link = LINK_TO_TD(td); |
162 | } else { | 190 | } else { |
163 | td->link = uhci->frame[framenum]; | 191 | td->link = uhci->frame[framenum]; |
164 | wmb(); | 192 | wmb(); |
165 | uhci->frame[framenum] = cpu_to_le32(td->dma_handle); | 193 | uhci->frame[framenum] = LINK_TO_TD(td); |
166 | uhci->frame_cpu[framenum] = td; | 194 | uhci->frame_cpu[framenum] = td; |
167 | } | 195 | } |
168 | } | 196 | } |
@@ -184,7 +212,7 @@ static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci, | |||
184 | struct uhci_td *ntd; | 212 | struct uhci_td *ntd; |
185 | 213 | ||
186 | ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list); | 214 | ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list); |
187 | uhci->frame[td->frame] = cpu_to_le32(ntd->dma_handle); | 215 | uhci->frame[td->frame] = LINK_TO_TD(ntd); |
188 | uhci->frame_cpu[td->frame] = ntd; | 216 | uhci->frame_cpu[td->frame] = ntd; |
189 | } | 217 | } |
190 | } else { | 218 | } else { |
@@ -405,12 +433,81 @@ static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first) | |||
405 | } | 433 | } |
406 | 434 | ||
407 | /* | 435 | /* |
408 | * Put a QH on the schedule in both hardware and software | 436 | * Link an Isochronous QH into its skeleton's list |
409 | */ | 437 | */ |
410 | static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | 438 | static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh) |
439 | { | ||
440 | list_add_tail(&qh->node, &uhci->skel_iso_qh->node); | ||
441 | |||
442 | /* Isochronous QHs aren't linked by the hardware */ | ||
443 | } | ||
444 | |||
445 | /* | ||
446 | * Link a high-period interrupt QH into the schedule at the end of its | ||
447 | * skeleton's list | ||
448 | */ | ||
449 | static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) | ||
411 | { | 450 | { |
412 | struct uhci_qh *pqh; | 451 | struct uhci_qh *pqh; |
413 | 452 | ||
453 | list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node); | ||
454 | |||
455 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); | ||
456 | qh->link = pqh->link; | ||
457 | wmb(); | ||
458 | pqh->link = LINK_TO_QH(qh); | ||
459 | } | ||
460 | |||
461 | /* | ||
462 | * Link a period-1 interrupt or async QH into the schedule at the | ||
463 | * correct spot in the async skeleton's list, and update the FSBR link | ||
464 | */ | ||
465 | static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh) | ||
466 | { | ||
467 | struct uhci_qh *pqh, *lqh; | ||
468 | __le32 link_to_new_qh; | ||
469 | __le32 *extra_link = &link_to_new_qh; | ||
470 | |||
471 | /* Find the predecessor QH for our new one and insert it in the list. | ||
472 | * The list of QHs is expected to be short, so linear search won't | ||
473 | * take too long. */ | ||
474 | list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) { | ||
475 | if (pqh->skel <= qh->skel) | ||
476 | break; | ||
477 | } | ||
478 | list_add(&qh->node, &pqh->node); | ||
479 | qh->link = pqh->link; | ||
480 | |||
481 | link_to_new_qh = LINK_TO_QH(qh); | ||
482 | |||
483 | /* If this is now the first FSBR QH, take special action */ | ||
484 | if (uhci->fsbr_is_on && pqh->skel < SKEL_FSBR && | ||
485 | qh->skel >= SKEL_FSBR) { | ||
486 | lqh = list_entry(uhci->skel_async_qh->node.prev, | ||
487 | struct uhci_qh, node); | ||
488 | |||
489 | /* If the new QH is also the last one, we must unlink | ||
490 | * the terminating skeleton QH and make the new QH point | ||
491 | * back to itself. */ | ||
492 | if (qh == lqh) { | ||
493 | qh->link = link_to_new_qh; | ||
494 | extra_link = &uhci->skel_term_qh->link; | ||
495 | |||
496 | /* Otherwise the last QH must point to the new QH */ | ||
497 | } else | ||
498 | extra_link = &lqh->link; | ||
499 | } | ||
500 | |||
501 | /* Link it into the schedule */ | ||
502 | wmb(); | ||
503 | *extra_link = pqh->link = link_to_new_qh; | ||
504 | } | ||
505 | |||
506 | /* | ||
507 | * Put a QH on the schedule in both hardware and software | ||
508 | */ | ||
509 | static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | ||
510 | { | ||
414 | WARN_ON(list_empty(&qh->queue)); | 511 | WARN_ON(list_empty(&qh->queue)); |
415 | 512 | ||
416 | /* Set the element pointer if it isn't set already. | 513 | /* Set the element pointer if it isn't set already. |
@@ -421,7 +518,7 @@ static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | |||
421 | struct uhci_td *td = list_entry(urbp->td_list.next, | 518 | struct uhci_td *td = list_entry(urbp->td_list.next, |
422 | struct uhci_td, list); | 519 | struct uhci_td, list); |
423 | 520 | ||
424 | qh->element = cpu_to_le32(td->dma_handle); | 521 | qh->element = LINK_TO_TD(td); |
425 | } | 522 | } |
426 | 523 | ||
427 | /* Treat the queue as if it has just advanced */ | 524 | /* Treat the queue as if it has just advanced */ |
@@ -432,18 +529,64 @@ static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | |||
432 | return; | 529 | return; |
433 | qh->state = QH_STATE_ACTIVE; | 530 | qh->state = QH_STATE_ACTIVE; |
434 | 531 | ||
435 | /* Move the QH from its old list to the end of the appropriate | 532 | /* Move the QH from its old list to the correct spot in the appropriate |
436 | * skeleton's list */ | 533 | * skeleton's list */ |
437 | if (qh == uhci->next_qh) | 534 | if (qh == uhci->next_qh) |
438 | uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, | 535 | uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, |
439 | node); | 536 | node); |
440 | list_move_tail(&qh->node, &qh->skel->node); | 537 | list_del(&qh->node); |
538 | |||
539 | if (qh->skel == SKEL_ISO) | ||
540 | link_iso(uhci, qh); | ||
541 | else if (qh->skel < SKEL_ASYNC) | ||
542 | link_interrupt(uhci, qh); | ||
543 | else | ||
544 | link_async(uhci, qh); | ||
545 | } | ||
546 | |||
547 | /* | ||
548 | * Unlink a high-period interrupt QH from the schedule | ||
549 | */ | ||
550 | static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) | ||
551 | { | ||
552 | struct uhci_qh *pqh; | ||
441 | 553 | ||
442 | /* Link it into the schedule */ | ||
443 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); | 554 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); |
444 | qh->link = pqh->link; | 555 | pqh->link = qh->link; |
445 | wmb(); | 556 | mb(); |
446 | pqh->link = UHCI_PTR_QH | cpu_to_le32(qh->dma_handle); | 557 | } |
558 | |||
559 | /* | ||
560 | * Unlink a period-1 interrupt or async QH from the schedule | ||
561 | */ | ||
562 | static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh) | ||
563 | { | ||
564 | struct uhci_qh *pqh, *lqh; | ||
565 | __le32 link_to_next_qh = qh->link; | ||
566 | |||
567 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); | ||
568 | |||
569 | /* If this is the first FSBQ QH, take special action */ | ||
570 | if (uhci->fsbr_is_on && pqh->skel < SKEL_FSBR && | ||
571 | qh->skel >= SKEL_FSBR) { | ||
572 | lqh = list_entry(uhci->skel_async_qh->node.prev, | ||
573 | struct uhci_qh, node); | ||
574 | |||
575 | /* If this QH is also the last one, we must link in | ||
576 | * the terminating skeleton QH. */ | ||
577 | if (qh == lqh) { | ||
578 | link_to_next_qh = LINK_TO_QH(uhci->skel_term_qh); | ||
579 | uhci->skel_term_qh->link = link_to_next_qh; | ||
580 | wmb(); | ||
581 | qh->link = link_to_next_qh; | ||
582 | |||
583 | /* Otherwise the last QH must point to the new first FSBR QH */ | ||
584 | } else | ||
585 | lqh->link = link_to_next_qh; | ||
586 | } | ||
587 | |||
588 | pqh->link = link_to_next_qh; | ||
589 | mb(); | ||
447 | } | 590 | } |
448 | 591 | ||
449 | /* | 592 | /* |
@@ -451,17 +594,18 @@ static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | |||
451 | */ | 594 | */ |
452 | static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | 595 | static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
453 | { | 596 | { |
454 | struct uhci_qh *pqh; | ||
455 | |||
456 | if (qh->state == QH_STATE_UNLINKING) | 597 | if (qh->state == QH_STATE_UNLINKING) |
457 | return; | 598 | return; |
458 | WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev); | 599 | WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev); |
459 | qh->state = QH_STATE_UNLINKING; | 600 | qh->state = QH_STATE_UNLINKING; |
460 | 601 | ||
461 | /* Unlink the QH from the schedule and record when we did it */ | 602 | /* Unlink the QH from the schedule and record when we did it */ |
462 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); | 603 | if (qh->skel == SKEL_ISO) |
463 | pqh->link = qh->link; | 604 | ; |
464 | mb(); | 605 | else if (qh->skel < SKEL_ASYNC) |
606 | unlink_interrupt(uhci, qh); | ||
607 | else | ||
608 | unlink_async(uhci, qh); | ||
465 | 609 | ||
466 | uhci_get_current_frame_number(uhci); | 610 | uhci_get_current_frame_number(uhci); |
467 | qh->unlink_frame = uhci->frame_number; | 611 | qh->unlink_frame = uhci->frame_number; |
@@ -697,6 +841,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
697 | dma_addr_t data = urb->transfer_dma; | 841 | dma_addr_t data = urb->transfer_dma; |
698 | __le32 *plink; | 842 | __le32 *plink; |
699 | struct urb_priv *urbp = urb->hcpriv; | 843 | struct urb_priv *urbp = urb->hcpriv; |
844 | int skel; | ||
700 | 845 | ||
701 | /* The "pipe" thing contains the destination in bits 8--18 */ | 846 | /* The "pipe" thing contains the destination in bits 8--18 */ |
702 | destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; | 847 | destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; |
@@ -737,7 +882,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
737 | td = uhci_alloc_td(uhci); | 882 | td = uhci_alloc_td(uhci); |
738 | if (!td) | 883 | if (!td) |
739 | goto nomem; | 884 | goto nomem; |
740 | *plink = cpu_to_le32(td->dma_handle); | 885 | *plink = LINK_TO_TD(td); |
741 | 886 | ||
742 | /* Alternate Data0/1 (start with Data1) */ | 887 | /* Alternate Data0/1 (start with Data1) */ |
743 | destination ^= TD_TOKEN_TOGGLE; | 888 | destination ^= TD_TOKEN_TOGGLE; |
@@ -757,7 +902,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
757 | td = uhci_alloc_td(uhci); | 902 | td = uhci_alloc_td(uhci); |
758 | if (!td) | 903 | if (!td) |
759 | goto nomem; | 904 | goto nomem; |
760 | *plink = cpu_to_le32(td->dma_handle); | 905 | *plink = LINK_TO_TD(td); |
761 | 906 | ||
762 | /* | 907 | /* |
763 | * It's IN if the pipe is an output pipe or we're not expecting | 908 | * It's IN if the pipe is an output pipe or we're not expecting |
@@ -784,7 +929,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
784 | td = uhci_alloc_td(uhci); | 929 | td = uhci_alloc_td(uhci); |
785 | if (!td) | 930 | if (!td) |
786 | goto nomem; | 931 | goto nomem; |
787 | *plink = cpu_to_le32(td->dma_handle); | 932 | *plink = LINK_TO_TD(td); |
788 | 933 | ||
789 | uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); | 934 | uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); |
790 | wmb(); | 935 | wmb(); |
@@ -797,11 +942,13 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
797 | * isn't in the CONFIGURED state. */ | 942 | * isn't in the CONFIGURED state. */ |
798 | if (urb->dev->speed == USB_SPEED_LOW || | 943 | if (urb->dev->speed == USB_SPEED_LOW || |
799 | urb->dev->state != USB_STATE_CONFIGURED) | 944 | urb->dev->state != USB_STATE_CONFIGURED) |
800 | qh->skel = uhci->skel_ls_control_qh; | 945 | skel = SKEL_LS_CONTROL; |
801 | else { | 946 | else { |
802 | qh->skel = uhci->skel_fs_control_qh; | 947 | skel = SKEL_FS_CONTROL; |
803 | uhci_add_fsbr(uhci, urb); | 948 | uhci_add_fsbr(uhci, urb); |
804 | } | 949 | } |
950 | if (qh->state != QH_STATE_ACTIVE) | ||
951 | qh->skel = skel; | ||
805 | 952 | ||
806 | urb->actual_length = -8; /* Account for the SETUP packet */ | 953 | urb->actual_length = -8; /* Account for the SETUP packet */ |
807 | return 0; | 954 | return 0; |
@@ -860,7 +1007,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, | |||
860 | td = uhci_alloc_td(uhci); | 1007 | td = uhci_alloc_td(uhci); |
861 | if (!td) | 1008 | if (!td) |
862 | goto nomem; | 1009 | goto nomem; |
863 | *plink = cpu_to_le32(td->dma_handle); | 1010 | *plink = LINK_TO_TD(td); |
864 | } | 1011 | } |
865 | uhci_add_td_to_urbp(td, urbp); | 1012 | uhci_add_td_to_urbp(td, urbp); |
866 | uhci_fill_td(td, status, | 1013 | uhci_fill_td(td, status, |
@@ -888,7 +1035,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, | |||
888 | td = uhci_alloc_td(uhci); | 1035 | td = uhci_alloc_td(uhci); |
889 | if (!td) | 1036 | if (!td) |
890 | goto nomem; | 1037 | goto nomem; |
891 | *plink = cpu_to_le32(td->dma_handle); | 1038 | *plink = LINK_TO_TD(td); |
892 | 1039 | ||
893 | uhci_add_td_to_urbp(td, urbp); | 1040 | uhci_add_td_to_urbp(td, urbp); |
894 | uhci_fill_td(td, status, | 1041 | uhci_fill_td(td, status, |
@@ -914,7 +1061,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, | |||
914 | td = uhci_alloc_td(uhci); | 1061 | td = uhci_alloc_td(uhci); |
915 | if (!td) | 1062 | if (!td) |
916 | goto nomem; | 1063 | goto nomem; |
917 | *plink = cpu_to_le32(td->dma_handle); | 1064 | *plink = LINK_TO_TD(td); |
918 | 1065 | ||
919 | uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); | 1066 | uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); |
920 | wmb(); | 1067 | wmb(); |
@@ -931,7 +1078,7 @@ nomem: | |||
931 | return -ENOMEM; | 1078 | return -ENOMEM; |
932 | } | 1079 | } |
933 | 1080 | ||
934 | static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, | 1081 | static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, |
935 | struct uhci_qh *qh) | 1082 | struct uhci_qh *qh) |
936 | { | 1083 | { |
937 | int ret; | 1084 | int ret; |
@@ -940,7 +1087,8 @@ static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, | |||
940 | if (urb->dev->speed == USB_SPEED_LOW) | 1087 | if (urb->dev->speed == USB_SPEED_LOW) |
941 | return -EINVAL; | 1088 | return -EINVAL; |
942 | 1089 | ||
943 | qh->skel = uhci->skel_bulk_qh; | 1090 | if (qh->state != QH_STATE_ACTIVE) |
1091 | qh->skel = SKEL_BULK; | ||
944 | ret = uhci_submit_common(uhci, urb, qh); | 1092 | ret = uhci_submit_common(uhci, urb, qh); |
945 | if (ret == 0) | 1093 | if (ret == 0) |
946 | uhci_add_fsbr(uhci, urb); | 1094 | uhci_add_fsbr(uhci, urb); |
@@ -968,7 +1116,7 @@ static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, | |||
968 | if (exponent < 0) | 1116 | if (exponent < 0) |
969 | return -EINVAL; | 1117 | return -EINVAL; |
970 | qh->period = 1 << exponent; | 1118 | qh->period = 1 << exponent; |
971 | qh->skel = uhci->skelqh[UHCI_SKEL_INDEX(exponent)]; | 1119 | qh->skel = SKEL_INDEX(exponent); |
972 | 1120 | ||
973 | /* For now, interrupt phase is fixed by the layout | 1121 | /* For now, interrupt phase is fixed by the layout |
974 | * of the QH lists. */ | 1122 | * of the QH lists. */ |
@@ -1005,7 +1153,7 @@ static int uhci_fixup_short_transfer(struct uhci_hcd *uhci, | |||
1005 | * the queue at the status stage transaction, which is | 1153 | * the queue at the status stage transaction, which is |
1006 | * the last TD. */ | 1154 | * the last TD. */ |
1007 | WARN_ON(list_empty(&urbp->td_list)); | 1155 | WARN_ON(list_empty(&urbp->td_list)); |
1008 | qh->element = cpu_to_le32(td->dma_handle); | 1156 | qh->element = LINK_TO_TD(td); |
1009 | tmp = td->list.prev; | 1157 | tmp = td->list.prev; |
1010 | ret = -EINPROGRESS; | 1158 | ret = -EINPROGRESS; |
1011 | 1159 | ||
@@ -1216,7 +1364,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, | |||
1216 | qh->iso_status = 0; | 1364 | qh->iso_status = 0; |
1217 | } | 1365 | } |
1218 | 1366 | ||
1219 | qh->skel = uhci->skel_iso_qh; | 1367 | qh->skel = SKEL_ISO; |
1220 | if (!qh->bandwidth_reserved) | 1368 | if (!qh->bandwidth_reserved) |
1221 | uhci_reserve_bandwidth(uhci, qh); | 1369 | uhci_reserve_bandwidth(uhci, qh); |
1222 | return 0; | 1370 | return 0; |
@@ -1566,8 +1714,7 @@ static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh) | |||
1566 | if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) { | 1714 | if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) { |
1567 | 1715 | ||
1568 | /* Detect the Intel bug and work around it */ | 1716 | /* Detect the Intel bug and work around it */ |
1569 | if (qh->post_td && qh_element(qh) == | 1717 | if (qh->post_td && qh_element(qh) == LINK_TO_TD(qh->post_td)) { |
1570 | cpu_to_le32(qh->post_td->dma_handle)) { | ||
1571 | qh->element = qh->post_td->link; | 1718 | qh->element = qh->post_td->link; |
1572 | qh->advance_jiffies = jiffies; | 1719 | qh->advance_jiffies = jiffies; |
1573 | ret = 1; | 1720 | ret = 1; |