diff options
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 42 |
1 files changed, 17 insertions, 25 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 70cacbbe7fb9..7113d16e2d3a 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -113,15 +113,13 @@ static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | |||
113 | if (ring == xhci->event_ring) | 113 | if (ring == xhci->event_ring) |
114 | return trb == &seg->trbs[TRBS_PER_SEGMENT]; | 114 | return trb == &seg->trbs[TRBS_PER_SEGMENT]; |
115 | else | 115 | else |
116 | return (le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK) | 116 | return TRB_TYPE_LINK_LE32(trb->link.control); |
117 | == TRB_TYPE(TRB_LINK); | ||
118 | } | 117 | } |
119 | 118 | ||
120 | static int enqueue_is_link_trb(struct xhci_ring *ring) | 119 | static int enqueue_is_link_trb(struct xhci_ring *ring) |
121 | { | 120 | { |
122 | struct xhci_link_trb *link = &ring->enqueue->link; | 121 | struct xhci_link_trb *link = &ring->enqueue->link; |
123 | return ((le32_to_cpu(link->control) & TRB_TYPE_BITMASK) == | 122 | return TRB_TYPE_LINK_LE32(link->control); |
124 | TRB_TYPE(TRB_LINK)); | ||
125 | } | 123 | } |
126 | 124 | ||
127 | /* Updates trb to point to the next TRB in the ring, and updates seg if the next | 125 | /* Updates trb to point to the next TRB in the ring, and updates seg if the next |
@@ -372,7 +370,7 @@ static struct xhci_segment *find_trb_seg( | |||
372 | while (cur_seg->trbs > trb || | 370 | while (cur_seg->trbs > trb || |
373 | &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { | 371 | &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { |
374 | generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; | 372 | generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; |
375 | if (le32_to_cpu(generic_trb->field[3]) & LINK_TOGGLE) | 373 | if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE)) |
376 | *cycle_state ^= 0x1; | 374 | *cycle_state ^= 0x1; |
377 | cur_seg = cur_seg->next; | 375 | cur_seg = cur_seg->next; |
378 | if (cur_seg == start_seg) | 376 | if (cur_seg == start_seg) |
@@ -489,8 +487,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
489 | } | 487 | } |
490 | 488 | ||
491 | trb = &state->new_deq_ptr->generic; | 489 | trb = &state->new_deq_ptr->generic; |
492 | if ((le32_to_cpu(trb->field[3]) & TRB_TYPE_BITMASK) == | 490 | if (TRB_TYPE_LINK_LE32(trb->field[3]) && |
493 | TRB_TYPE(TRB_LINK) && (le32_to_cpu(trb->field[3]) & LINK_TOGGLE)) | 491 | (trb->field[3] & cpu_to_le32(LINK_TOGGLE))) |
494 | state->new_cycle_state ^= 0x1; | 492 | state->new_cycle_state ^= 0x1; |
495 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); | 493 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); |
496 | 494 | ||
@@ -525,8 +523,7 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
525 | for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; | 523 | for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; |
526 | true; | 524 | true; |
527 | next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { | 525 | next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { |
528 | if ((le32_to_cpu(cur_trb->generic.field[3]) & TRB_TYPE_BITMASK) | 526 | if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) { |
529 | == TRB_TYPE(TRB_LINK)) { | ||
530 | /* Unchain any chained Link TRBs, but | 527 | /* Unchain any chained Link TRBs, but |
531 | * leave the pointers intact. | 528 | * leave the pointers intact. |
532 | */ | 529 | */ |
@@ -1000,7 +997,7 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci, | |||
1000 | * but we don't care. | 997 | * but we don't care. |
1001 | */ | 998 | */ |
1002 | xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", | 999 | xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", |
1003 | (unsigned int) GET_COMP_CODE(le32_to_cpu(event->status))); | 1000 | GET_COMP_CODE(le32_to_cpu(event->status))); |
1004 | 1001 | ||
1005 | /* HW with the reset endpoint quirk needs to have a configure endpoint | 1002 | /* HW with the reset endpoint quirk needs to have a configure endpoint |
1006 | * command complete before the endpoint can be used. Queue that here | 1003 | * command complete before the endpoint can be used. Queue that here |
@@ -1458,7 +1455,8 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, | |||
1458 | * endpoint anyway. Check if a babble halted the | 1455 | * endpoint anyway. Check if a babble halted the |
1459 | * endpoint. | 1456 | * endpoint. |
1460 | */ | 1457 | */ |
1461 | if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == EP_STATE_HALTED) | 1458 | if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == |
1459 | cpu_to_le32(EP_STATE_HALTED)) | ||
1462 | return 1; | 1460 | return 1; |
1463 | 1461 | ||
1464 | return 0; | 1462 | return 0; |
@@ -1753,10 +1751,8 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
1753 | for (cur_trb = ep_ring->dequeue, | 1751 | for (cur_trb = ep_ring->dequeue, |
1754 | cur_seg = ep_ring->deq_seg; cur_trb != event_trb; | 1752 | cur_seg = ep_ring->deq_seg; cur_trb != event_trb; |
1755 | next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { | 1753 | next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { |
1756 | if ((le32_to_cpu(cur_trb->generic.field[3]) & | 1754 | if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) && |
1757 | TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && | 1755 | !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) |
1758 | (le32_to_cpu(cur_trb->generic.field[3]) & | ||
1759 | TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) | ||
1760 | len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); | 1756 | len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); |
1761 | } | 1757 | } |
1762 | len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - | 1758 | len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - |
@@ -1885,10 +1881,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
1885 | for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; | 1881 | for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; |
1886 | cur_trb != event_trb; | 1882 | cur_trb != event_trb; |
1887 | next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { | 1883 | next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { |
1888 | if ((le32_to_cpu(cur_trb->generic.field[3]) & | 1884 | if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) && |
1889 | TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && | 1885 | !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) |
1890 | (le32_to_cpu(cur_trb->generic.field[3]) & | ||
1891 | TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) | ||
1892 | td->urb->actual_length += | 1886 | td->urb->actual_length += |
1893 | TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); | 1887 | TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); |
1894 | } | 1888 | } |
@@ -2047,8 +2041,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
2047 | TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), | 2041 | TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), |
2048 | ep_index); | 2042 | ep_index); |
2049 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", | 2043 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", |
2050 | (unsigned int) (le32_to_cpu(event->flags) | 2044 | (le32_to_cpu(event->flags) & |
2051 | & TRB_TYPE_BITMASK)>>10); | 2045 | TRB_TYPE_BITMASK)>>10); |
2052 | xhci_print_trb_offsets(xhci, (union xhci_trb *) event); | 2046 | xhci_print_trb_offsets(xhci, (union xhci_trb *) event); |
2053 | if (ep->skip) { | 2047 | if (ep->skip) { |
2054 | ep->skip = false; | 2048 | ep->skip = false; |
@@ -2119,9 +2113,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
2119 | * corresponding TD has been cancelled. Just ignore | 2113 | * corresponding TD has been cancelled. Just ignore |
2120 | * the TD. | 2114 | * the TD. |
2121 | */ | 2115 | */ |
2122 | if ((le32_to_cpu(event_trb->generic.field[3]) | 2116 | if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) { |
2123 | & TRB_TYPE_BITMASK) | ||
2124 | == TRB_TYPE(TRB_TR_NOOP)) { | ||
2125 | xhci_dbg(xhci, | 2117 | xhci_dbg(xhci, |
2126 | "event_trb is a no-op TRB. Skip it\n"); | 2118 | "event_trb is a no-op TRB. Skip it\n"); |
2127 | goto cleanup; | 2119 | goto cleanup; |
@@ -2452,7 +2444,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
2452 | next->link.control |= cpu_to_le32(TRB_CHAIN); | 2444 | next->link.control |= cpu_to_le32(TRB_CHAIN); |
2453 | 2445 | ||
2454 | wmb(); | 2446 | wmb(); |
2455 | next->link.control ^= cpu_to_le32((u32) TRB_CYCLE); | 2447 | next->link.control ^= cpu_to_le32(TRB_CYCLE); |
2456 | 2448 | ||
2457 | /* Toggle the cycle bit after the last ring segment. */ | 2449 | /* Toggle the cycle bit after the last ring segment. */ |
2458 | if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { | 2450 | if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { |