diff options
Diffstat (limited to 'drivers/usb/host/uhci-q.c')
-rw-r--r-- | drivers/usb/host/uhci-q.c | 191 |
1 files changed, 150 insertions, 41 deletions
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index 68e66b33e726..4aed305982ec 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c | |||
@@ -13,7 +13,7 @@ | |||
13 | * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface | 13 | * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface |
14 | * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). | 14 | * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). |
15 | * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) | 15 | * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) |
16 | * (C) Copyright 2004-2006 Alan Stern, stern@rowland.harvard.edu | 16 | * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu |
17 | */ | 17 | */ |
18 | 18 | ||
19 | 19 | ||
@@ -45,15 +45,27 @@ static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) | |||
45 | */ | 45 | */ |
46 | static void uhci_fsbr_on(struct uhci_hcd *uhci) | 46 | static void uhci_fsbr_on(struct uhci_hcd *uhci) |
47 | { | 47 | { |
48 | struct uhci_qh *lqh; | ||
49 | |||
50 | /* The terminating skeleton QH always points back to the first | ||
51 | * FSBR QH. Make the last async QH point to the terminating | ||
52 | * skeleton QH. */ | ||
48 | uhci->fsbr_is_on = 1; | 53 | uhci->fsbr_is_on = 1; |
49 | uhci->skel_term_qh->link = cpu_to_le32( | 54 | lqh = list_entry(uhci->skel_async_qh->node.prev, |
50 | uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH; | 55 | struct uhci_qh, node); |
56 | lqh->link = LINK_TO_QH(uhci->skel_term_qh); | ||
51 | } | 57 | } |
52 | 58 | ||
53 | static void uhci_fsbr_off(struct uhci_hcd *uhci) | 59 | static void uhci_fsbr_off(struct uhci_hcd *uhci) |
54 | { | 60 | { |
61 | struct uhci_qh *lqh; | ||
62 | |||
63 | /* Remove the link from the last async QH to the terminating | ||
64 | * skeleton QH. */ | ||
55 | uhci->fsbr_is_on = 0; | 65 | uhci->fsbr_is_on = 0; |
56 | uhci->skel_term_qh->link = UHCI_PTR_TERM; | 66 | lqh = list_entry(uhci->skel_async_qh->node.prev, |
67 | struct uhci_qh, node); | ||
68 | lqh->link = UHCI_PTR_TERM; | ||
57 | } | 69 | } |
58 | 70 | ||
59 | static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb) | 71 | static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb) |
@@ -111,10 +123,14 @@ static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci) | |||
111 | 123 | ||
112 | static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) | 124 | static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) |
113 | { | 125 | { |
114 | if (!list_empty(&td->list)) | 126 | if (!list_empty(&td->list)) { |
115 | dev_warn(uhci_dev(uhci), "td %p still in list!\n", td); | 127 | dev_warn(uhci_dev(uhci), "td %p still in list!\n", td); |
116 | if (!list_empty(&td->fl_list)) | 128 | WARN_ON(1); |
129 | } | ||
130 | if (!list_empty(&td->fl_list)) { | ||
117 | dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td); | 131 | dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td); |
132 | WARN_ON(1); | ||
133 | } | ||
118 | 134 | ||
119 | dma_pool_free(uhci->td_pool, td, td->dma_handle); | 135 | dma_pool_free(uhci->td_pool, td, td->dma_handle); |
120 | } | 136 | } |
@@ -158,11 +174,11 @@ static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci, | |||
158 | 174 | ||
159 | td->link = ltd->link; | 175 | td->link = ltd->link; |
160 | wmb(); | 176 | wmb(); |
161 | ltd->link = cpu_to_le32(td->dma_handle); | 177 | ltd->link = LINK_TO_TD(td); |
162 | } else { | 178 | } else { |
163 | td->link = uhci->frame[framenum]; | 179 | td->link = uhci->frame[framenum]; |
164 | wmb(); | 180 | wmb(); |
165 | uhci->frame[framenum] = cpu_to_le32(td->dma_handle); | 181 | uhci->frame[framenum] = LINK_TO_TD(td); |
166 | uhci->frame_cpu[framenum] = td; | 182 | uhci->frame_cpu[framenum] = td; |
167 | } | 183 | } |
168 | } | 184 | } |
@@ -184,7 +200,7 @@ static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci, | |||
184 | struct uhci_td *ntd; | 200 | struct uhci_td *ntd; |
185 | 201 | ||
186 | ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list); | 202 | ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list); |
187 | uhci->frame[td->frame] = cpu_to_le32(ntd->dma_handle); | 203 | uhci->frame[td->frame] = LINK_TO_TD(ntd); |
188 | uhci->frame_cpu[td->frame] = ntd; | 204 | uhci->frame_cpu[td->frame] = ntd; |
189 | } | 205 | } |
190 | } else { | 206 | } else { |
@@ -279,8 +295,10 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, | |||
279 | static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | 295 | static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
280 | { | 296 | { |
281 | WARN_ON(qh->state != QH_STATE_IDLE && qh->udev); | 297 | WARN_ON(qh->state != QH_STATE_IDLE && qh->udev); |
282 | if (!list_empty(&qh->queue)) | 298 | if (!list_empty(&qh->queue)) { |
283 | dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh); | 299 | dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh); |
300 | WARN_ON(1); | ||
301 | } | ||
284 | 302 | ||
285 | list_del(&qh->node); | 303 | list_del(&qh->node); |
286 | if (qh->udev) { | 304 | if (qh->udev) { |
@@ -405,12 +423,66 @@ static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first) | |||
405 | } | 423 | } |
406 | 424 | ||
407 | /* | 425 | /* |
408 | * Put a QH on the schedule in both hardware and software | 426 | * Link an Isochronous QH into its skeleton's list |
409 | */ | 427 | */ |
410 | static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | 428 | static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh) |
429 | { | ||
430 | list_add_tail(&qh->node, &uhci->skel_iso_qh->node); | ||
431 | |||
432 | /* Isochronous QHs aren't linked by the hardware */ | ||
433 | } | ||
434 | |||
435 | /* | ||
436 | * Link a high-period interrupt QH into the schedule at the end of its | ||
437 | * skeleton's list | ||
438 | */ | ||
439 | static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) | ||
411 | { | 440 | { |
412 | struct uhci_qh *pqh; | 441 | struct uhci_qh *pqh; |
413 | 442 | ||
443 | list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node); | ||
444 | |||
445 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); | ||
446 | qh->link = pqh->link; | ||
447 | wmb(); | ||
448 | pqh->link = LINK_TO_QH(qh); | ||
449 | } | ||
450 | |||
451 | /* | ||
452 | * Link a period-1 interrupt or async QH into the schedule at the | ||
453 | * correct spot in the async skeleton's list, and update the FSBR link | ||
454 | */ | ||
455 | static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh) | ||
456 | { | ||
457 | struct uhci_qh *pqh; | ||
458 | __le32 link_to_new_qh; | ||
459 | |||
460 | /* Find the predecessor QH for our new one and insert it in the list. | ||
461 | * The list of QHs is expected to be short, so linear search won't | ||
462 | * take too long. */ | ||
463 | list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) { | ||
464 | if (pqh->skel <= qh->skel) | ||
465 | break; | ||
466 | } | ||
467 | list_add(&qh->node, &pqh->node); | ||
468 | |||
469 | /* Link it into the schedule */ | ||
470 | qh->link = pqh->link; | ||
471 | wmb(); | ||
472 | link_to_new_qh = LINK_TO_QH(qh); | ||
473 | pqh->link = link_to_new_qh; | ||
474 | |||
475 | /* If this is now the first FSBR QH, link the terminating skeleton | ||
476 | * QH to it. */ | ||
477 | if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR) | ||
478 | uhci->skel_term_qh->link = link_to_new_qh; | ||
479 | } | ||
480 | |||
481 | /* | ||
482 | * Put a QH on the schedule in both hardware and software | ||
483 | */ | ||
484 | static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | ||
485 | { | ||
414 | WARN_ON(list_empty(&qh->queue)); | 486 | WARN_ON(list_empty(&qh->queue)); |
415 | 487 | ||
416 | /* Set the element pointer if it isn't set already. | 488 | /* Set the element pointer if it isn't set already. |
@@ -421,7 +493,7 @@ static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | |||
421 | struct uhci_td *td = list_entry(urbp->td_list.next, | 493 | struct uhci_td *td = list_entry(urbp->td_list.next, |
422 | struct uhci_td, list); | 494 | struct uhci_td, list); |
423 | 495 | ||
424 | qh->element = cpu_to_le32(td->dma_handle); | 496 | qh->element = LINK_TO_TD(td); |
425 | } | 497 | } |
426 | 498 | ||
427 | /* Treat the queue as if it has just advanced */ | 499 | /* Treat the queue as if it has just advanced */ |
@@ -432,36 +504,68 @@ static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | |||
432 | return; | 504 | return; |
433 | qh->state = QH_STATE_ACTIVE; | 505 | qh->state = QH_STATE_ACTIVE; |
434 | 506 | ||
435 | /* Move the QH from its old list to the end of the appropriate | 507 | /* Move the QH from its old list to the correct spot in the appropriate |
436 | * skeleton's list */ | 508 | * skeleton's list */ |
437 | if (qh == uhci->next_qh) | 509 | if (qh == uhci->next_qh) |
438 | uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, | 510 | uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, |
439 | node); | 511 | node); |
440 | list_move_tail(&qh->node, &qh->skel->node); | 512 | list_del(&qh->node); |
513 | |||
514 | if (qh->skel == SKEL_ISO) | ||
515 | link_iso(uhci, qh); | ||
516 | else if (qh->skel < SKEL_ASYNC) | ||
517 | link_interrupt(uhci, qh); | ||
518 | else | ||
519 | link_async(uhci, qh); | ||
520 | } | ||
521 | |||
522 | /* | ||
523 | * Unlink a high-period interrupt QH from the schedule | ||
524 | */ | ||
525 | static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) | ||
526 | { | ||
527 | struct uhci_qh *pqh; | ||
441 | 528 | ||
442 | /* Link it into the schedule */ | ||
443 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); | 529 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); |
444 | qh->link = pqh->link; | 530 | pqh->link = qh->link; |
445 | wmb(); | 531 | mb(); |
446 | pqh->link = UHCI_PTR_QH | cpu_to_le32(qh->dma_handle); | ||
447 | } | 532 | } |
448 | 533 | ||
449 | /* | 534 | /* |
450 | * Take a QH off the hardware schedule | 535 | * Unlink a period-1 interrupt or async QH from the schedule |
451 | */ | 536 | */ |
452 | static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | 537 | static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh) |
453 | { | 538 | { |
454 | struct uhci_qh *pqh; | 539 | struct uhci_qh *pqh; |
540 | __le32 link_to_next_qh = qh->link; | ||
455 | 541 | ||
542 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); | ||
543 | pqh->link = link_to_next_qh; | ||
544 | |||
545 | /* If this was the old first FSBR QH, link the terminating skeleton | ||
546 | * QH to the next (new first FSBR) QH. */ | ||
547 | if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR) | ||
548 | uhci->skel_term_qh->link = link_to_next_qh; | ||
549 | mb(); | ||
550 | } | ||
551 | |||
552 | /* | ||
553 | * Take a QH off the hardware schedule | ||
554 | */ | ||
555 | static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | ||
556 | { | ||
456 | if (qh->state == QH_STATE_UNLINKING) | 557 | if (qh->state == QH_STATE_UNLINKING) |
457 | return; | 558 | return; |
458 | WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev); | 559 | WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev); |
459 | qh->state = QH_STATE_UNLINKING; | 560 | qh->state = QH_STATE_UNLINKING; |
460 | 561 | ||
461 | /* Unlink the QH from the schedule and record when we did it */ | 562 | /* Unlink the QH from the schedule and record when we did it */ |
462 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); | 563 | if (qh->skel == SKEL_ISO) |
463 | pqh->link = qh->link; | 564 | ; |
464 | mb(); | 565 | else if (qh->skel < SKEL_ASYNC) |
566 | unlink_interrupt(uhci, qh); | ||
567 | else | ||
568 | unlink_async(uhci, qh); | ||
465 | 569 | ||
466 | uhci_get_current_frame_number(uhci); | 570 | uhci_get_current_frame_number(uhci); |
467 | qh->unlink_frame = uhci->frame_number; | 571 | qh->unlink_frame = uhci->frame_number; |
@@ -642,9 +746,11 @@ static void uhci_free_urb_priv(struct uhci_hcd *uhci, | |||
642 | { | 746 | { |
643 | struct uhci_td *td, *tmp; | 747 | struct uhci_td *td, *tmp; |
644 | 748 | ||
645 | if (!list_empty(&urbp->node)) | 749 | if (!list_empty(&urbp->node)) { |
646 | dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n", | 750 | dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n", |
647 | urbp->urb); | 751 | urbp->urb); |
752 | WARN_ON(1); | ||
753 | } | ||
648 | 754 | ||
649 | list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { | 755 | list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { |
650 | uhci_remove_td_from_urbp(td); | 756 | uhci_remove_td_from_urbp(td); |
@@ -697,6 +803,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
697 | dma_addr_t data = urb->transfer_dma; | 803 | dma_addr_t data = urb->transfer_dma; |
698 | __le32 *plink; | 804 | __le32 *plink; |
699 | struct urb_priv *urbp = urb->hcpriv; | 805 | struct urb_priv *urbp = urb->hcpriv; |
806 | int skel; | ||
700 | 807 | ||
701 | /* The "pipe" thing contains the destination in bits 8--18 */ | 808 | /* The "pipe" thing contains the destination in bits 8--18 */ |
702 | destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; | 809 | destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; |
@@ -737,7 +844,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
737 | td = uhci_alloc_td(uhci); | 844 | td = uhci_alloc_td(uhci); |
738 | if (!td) | 845 | if (!td) |
739 | goto nomem; | 846 | goto nomem; |
740 | *plink = cpu_to_le32(td->dma_handle); | 847 | *plink = LINK_TO_TD(td); |
741 | 848 | ||
742 | /* Alternate Data0/1 (start with Data1) */ | 849 | /* Alternate Data0/1 (start with Data1) */ |
743 | destination ^= TD_TOKEN_TOGGLE; | 850 | destination ^= TD_TOKEN_TOGGLE; |
@@ -757,7 +864,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
757 | td = uhci_alloc_td(uhci); | 864 | td = uhci_alloc_td(uhci); |
758 | if (!td) | 865 | if (!td) |
759 | goto nomem; | 866 | goto nomem; |
760 | *plink = cpu_to_le32(td->dma_handle); | 867 | *plink = LINK_TO_TD(td); |
761 | 868 | ||
762 | /* | 869 | /* |
763 | * It's IN if the pipe is an output pipe or we're not expecting | 870 | * It's IN if the pipe is an output pipe or we're not expecting |
@@ -784,7 +891,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
784 | td = uhci_alloc_td(uhci); | 891 | td = uhci_alloc_td(uhci); |
785 | if (!td) | 892 | if (!td) |
786 | goto nomem; | 893 | goto nomem; |
787 | *plink = cpu_to_le32(td->dma_handle); | 894 | *plink = LINK_TO_TD(td); |
788 | 895 | ||
789 | uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); | 896 | uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); |
790 | wmb(); | 897 | wmb(); |
@@ -797,11 +904,13 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
797 | * isn't in the CONFIGURED state. */ | 904 | * isn't in the CONFIGURED state. */ |
798 | if (urb->dev->speed == USB_SPEED_LOW || | 905 | if (urb->dev->speed == USB_SPEED_LOW || |
799 | urb->dev->state != USB_STATE_CONFIGURED) | 906 | urb->dev->state != USB_STATE_CONFIGURED) |
800 | qh->skel = uhci->skel_ls_control_qh; | 907 | skel = SKEL_LS_CONTROL; |
801 | else { | 908 | else { |
802 | qh->skel = uhci->skel_fs_control_qh; | 909 | skel = SKEL_FS_CONTROL; |
803 | uhci_add_fsbr(uhci, urb); | 910 | uhci_add_fsbr(uhci, urb); |
804 | } | 911 | } |
912 | if (qh->state != QH_STATE_ACTIVE) | ||
913 | qh->skel = skel; | ||
805 | 914 | ||
806 | urb->actual_length = -8; /* Account for the SETUP packet */ | 915 | urb->actual_length = -8; /* Account for the SETUP packet */ |
807 | return 0; | 916 | return 0; |
@@ -860,7 +969,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, | |||
860 | td = uhci_alloc_td(uhci); | 969 | td = uhci_alloc_td(uhci); |
861 | if (!td) | 970 | if (!td) |
862 | goto nomem; | 971 | goto nomem; |
863 | *plink = cpu_to_le32(td->dma_handle); | 972 | *plink = LINK_TO_TD(td); |
864 | } | 973 | } |
865 | uhci_add_td_to_urbp(td, urbp); | 974 | uhci_add_td_to_urbp(td, urbp); |
866 | uhci_fill_td(td, status, | 975 | uhci_fill_td(td, status, |
@@ -888,7 +997,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, | |||
888 | td = uhci_alloc_td(uhci); | 997 | td = uhci_alloc_td(uhci); |
889 | if (!td) | 998 | if (!td) |
890 | goto nomem; | 999 | goto nomem; |
891 | *plink = cpu_to_le32(td->dma_handle); | 1000 | *plink = LINK_TO_TD(td); |
892 | 1001 | ||
893 | uhci_add_td_to_urbp(td, urbp); | 1002 | uhci_add_td_to_urbp(td, urbp); |
894 | uhci_fill_td(td, status, | 1003 | uhci_fill_td(td, status, |
@@ -914,7 +1023,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, | |||
914 | td = uhci_alloc_td(uhci); | 1023 | td = uhci_alloc_td(uhci); |
915 | if (!td) | 1024 | if (!td) |
916 | goto nomem; | 1025 | goto nomem; |
917 | *plink = cpu_to_le32(td->dma_handle); | 1026 | *plink = LINK_TO_TD(td); |
918 | 1027 | ||
919 | uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); | 1028 | uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); |
920 | wmb(); | 1029 | wmb(); |
@@ -931,7 +1040,7 @@ nomem: | |||
931 | return -ENOMEM; | 1040 | return -ENOMEM; |
932 | } | 1041 | } |
933 | 1042 | ||
934 | static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, | 1043 | static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, |
935 | struct uhci_qh *qh) | 1044 | struct uhci_qh *qh) |
936 | { | 1045 | { |
937 | int ret; | 1046 | int ret; |
@@ -940,7 +1049,8 @@ static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, | |||
940 | if (urb->dev->speed == USB_SPEED_LOW) | 1049 | if (urb->dev->speed == USB_SPEED_LOW) |
941 | return -EINVAL; | 1050 | return -EINVAL; |
942 | 1051 | ||
943 | qh->skel = uhci->skel_bulk_qh; | 1052 | if (qh->state != QH_STATE_ACTIVE) |
1053 | qh->skel = SKEL_BULK; | ||
944 | ret = uhci_submit_common(uhci, urb, qh); | 1054 | ret = uhci_submit_common(uhci, urb, qh); |
945 | if (ret == 0) | 1055 | if (ret == 0) |
946 | uhci_add_fsbr(uhci, urb); | 1056 | uhci_add_fsbr(uhci, urb); |
@@ -968,7 +1078,7 @@ static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, | |||
968 | if (exponent < 0) | 1078 | if (exponent < 0) |
969 | return -EINVAL; | 1079 | return -EINVAL; |
970 | qh->period = 1 << exponent; | 1080 | qh->period = 1 << exponent; |
971 | qh->skel = uhci->skelqh[UHCI_SKEL_INDEX(exponent)]; | 1081 | qh->skel = SKEL_INDEX(exponent); |
972 | 1082 | ||
973 | /* For now, interrupt phase is fixed by the layout | 1083 | /* For now, interrupt phase is fixed by the layout |
974 | * of the QH lists. */ | 1084 | * of the QH lists. */ |
@@ -1005,7 +1115,7 @@ static int uhci_fixup_short_transfer(struct uhci_hcd *uhci, | |||
1005 | * the queue at the status stage transaction, which is | 1115 | * the queue at the status stage transaction, which is |
1006 | * the last TD. */ | 1116 | * the last TD. */ |
1007 | WARN_ON(list_empty(&urbp->td_list)); | 1117 | WARN_ON(list_empty(&urbp->td_list)); |
1008 | qh->element = cpu_to_le32(td->dma_handle); | 1118 | qh->element = LINK_TO_TD(td); |
1009 | tmp = td->list.prev; | 1119 | tmp = td->list.prev; |
1010 | ret = -EINPROGRESS; | 1120 | ret = -EINPROGRESS; |
1011 | 1121 | ||
@@ -1069,7 +1179,7 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) | |||
1069 | 1179 | ||
1070 | if (debug > 1 && errbuf) { | 1180 | if (debug > 1 && errbuf) { |
1071 | /* Print the chain for debugging */ | 1181 | /* Print the chain for debugging */ |
1072 | uhci_show_qh(urbp->qh, errbuf, | 1182 | uhci_show_qh(uhci, urbp->qh, errbuf, |
1073 | ERRBUF_LEN, 0); | 1183 | ERRBUF_LEN, 0); |
1074 | lprintk(errbuf); | 1184 | lprintk(errbuf); |
1075 | } | 1185 | } |
@@ -1216,7 +1326,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, | |||
1216 | qh->iso_status = 0; | 1326 | qh->iso_status = 0; |
1217 | } | 1327 | } |
1218 | 1328 | ||
1219 | qh->skel = uhci->skel_iso_qh; | 1329 | qh->skel = SKEL_ISO; |
1220 | if (!qh->bandwidth_reserved) | 1330 | if (!qh->bandwidth_reserved) |
1221 | uhci_reserve_bandwidth(uhci, qh); | 1331 | uhci_reserve_bandwidth(uhci, qh); |
1222 | return 0; | 1332 | return 0; |
@@ -1566,8 +1676,7 @@ static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh) | |||
1566 | if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) { | 1676 | if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) { |
1567 | 1677 | ||
1568 | /* Detect the Intel bug and work around it */ | 1678 | /* Detect the Intel bug and work around it */ |
1569 | if (qh->post_td && qh_element(qh) == | 1679 | if (qh->post_td && qh_element(qh) == LINK_TO_TD(qh->post_td)) { |
1570 | cpu_to_le32(qh->post_td->dma_handle)) { | ||
1571 | qh->element = qh->post_td->link; | 1680 | qh->element = qh->post_td->link; |
1572 | qh->advance_jiffies = jiffies; | 1681 | qh->advance_jiffies = jiffies; |
1573 | ret = 1; | 1682 | ret = 1; |