aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb
diff options
context:
space:
mode:
authorMatt Evans <matt@ozlabs.org>2011-05-31 20:22:55 -0400
committerSarah Sharp <sarah.a.sharp@linux.intel.com>2011-06-02 19:37:47 -0400
commitf5960b698eb50a39fce1a066dc19a6a5a1148e16 (patch)
treed1f08656be5ffd40f4268d874801f91016508e3e /drivers/usb
parent55922c9d1b84b89cb946c777fddccb3247e7df2c (diff)
xhci: Remove some unnecessary casts and tidy some endian swap code
Some of the recently-added cpu_to_leXX and leXX_to_cpu made things somewhat messy; this patch neatens some of these areas, removing unnecessary casts in those parts also. In some places (where Y & Z are constants) a comparison of (leXX_to_cpu(X) & Y) == Z has been replaced with (X & cpu_to_leXX(Y)) == cpu_to_leXX(Z). The endian reversal of the constants should wash out at compile time. Signed-off-by: Matt Evans <matt@ozlabs.org> Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/host/xhci-dbg.c22
-rw-r--r--drivers/usb/host/xhci-mem.c26
-rw-r--r--drivers/usb/host/xhci-ring.c42
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/host/xhci.h7
5 files changed, 52 insertions, 55 deletions
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 2e0486178dbe..17d3e359ca62 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -266,11 +266,11 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
266 xhci_dbg(xhci, "Interrupter target = 0x%x\n", 266 xhci_dbg(xhci, "Interrupter target = 0x%x\n",
267 GET_INTR_TARGET(le32_to_cpu(trb->link.intr_target))); 267 GET_INTR_TARGET(le32_to_cpu(trb->link.intr_target)));
268 xhci_dbg(xhci, "Cycle bit = %u\n", 268 xhci_dbg(xhci, "Cycle bit = %u\n",
269 (unsigned int) (le32_to_cpu(trb->link.control) & TRB_CYCLE)); 269 le32_to_cpu(trb->link.control) & TRB_CYCLE);
270 xhci_dbg(xhci, "Toggle cycle bit = %u\n", 270 xhci_dbg(xhci, "Toggle cycle bit = %u\n",
271 (unsigned int) (le32_to_cpu(trb->link.control) & LINK_TOGGLE)); 271 le32_to_cpu(trb->link.control) & LINK_TOGGLE);
272 xhci_dbg(xhci, "No Snoop bit = %u\n", 272 xhci_dbg(xhci, "No Snoop bit = %u\n",
273 (unsigned int) (le32_to_cpu(trb->link.control) & TRB_NO_SNOOP)); 273 le32_to_cpu(trb->link.control) & TRB_NO_SNOOP);
274 break; 274 break;
275 case TRB_TYPE(TRB_TRANSFER): 275 case TRB_TYPE(TRB_TRANSFER):
276 address = le64_to_cpu(trb->trans_event.buffer); 276 address = le64_to_cpu(trb->trans_event.buffer);
@@ -284,9 +284,9 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
284 address = le64_to_cpu(trb->event_cmd.cmd_trb); 284 address = le64_to_cpu(trb->event_cmd.cmd_trb);
285 xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); 285 xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
286 xhci_dbg(xhci, "Completion status = %u\n", 286 xhci_dbg(xhci, "Completion status = %u\n",
287 (unsigned int) GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status))); 287 GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status)));
288 xhci_dbg(xhci, "Flags = 0x%x\n", 288 xhci_dbg(xhci, "Flags = 0x%x\n",
289 (unsigned int) le32_to_cpu(trb->event_cmd.flags)); 289 le32_to_cpu(trb->event_cmd.flags));
290 break; 290 break;
291 default: 291 default:
292 xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n", 292 xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
@@ -318,10 +318,10 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
318 for (i = 0; i < TRBS_PER_SEGMENT; ++i) { 318 for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
319 trb = &seg->trbs[i]; 319 trb = &seg->trbs[i];
320 xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", addr, 320 xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", addr,
321 (u32)lower_32_bits(le64_to_cpu(trb->link.segment_ptr)), 321 lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
322 (u32)upper_32_bits(le64_to_cpu(trb->link.segment_ptr)), 322 upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
323 (unsigned int) le32_to_cpu(trb->link.intr_target), 323 le32_to_cpu(trb->link.intr_target),
324 (unsigned int) le32_to_cpu(trb->link.control)); 324 le32_to_cpu(trb->link.control));
325 addr += sizeof(*trb); 325 addr += sizeof(*trb);
326 } 326 }
327} 327}
@@ -402,8 +402,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
402 addr, 402 addr,
403 lower_32_bits(le64_to_cpu(entry->seg_addr)), 403 lower_32_bits(le64_to_cpu(entry->seg_addr)),
404 upper_32_bits(le64_to_cpu(entry->seg_addr)), 404 upper_32_bits(le64_to_cpu(entry->seg_addr)),
405 (unsigned int) le32_to_cpu(entry->seg_size), 405 le32_to_cpu(entry->seg_size),
406 (unsigned int) le32_to_cpu(entry->rsvd)); 406 le32_to_cpu(entry->rsvd));
407 addr += sizeof(*entry); 407 addr += sizeof(*entry);
408 } 408 }
409} 409}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 26caba4c1950..596d8fbb9e18 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -89,8 +89,8 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
89 return; 89 return;
90 prev->next = next; 90 prev->next = next;
91 if (link_trbs) { 91 if (link_trbs) {
92 prev->trbs[TRBS_PER_SEGMENT-1].link. 92 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
93 segment_ptr = cpu_to_le64(next->dma); 93 cpu_to_le64(next->dma);
94 94
95 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ 95 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
96 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); 96 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
@@ -187,8 +187,8 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
187 187
188 if (link_trbs) { 188 if (link_trbs) {
189 /* See section 4.9.2.1 and 6.4.4.1 */ 189 /* See section 4.9.2.1 and 6.4.4.1 */
190 prev->trbs[TRBS_PER_SEGMENT-1].link. 190 prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
191 control |= cpu_to_le32(LINK_TOGGLE); 191 cpu_to_le32(LINK_TOGGLE);
192 xhci_dbg(xhci, "Wrote link toggle flag to" 192 xhci_dbg(xhci, "Wrote link toggle flag to"
193 " segment %p (virtual), 0x%llx (DMA)\n", 193 " segment %p (virtual), 0x%llx (DMA)\n",
194 prev, (unsigned long long)prev->dma); 194 prev, (unsigned long long)prev->dma);
@@ -549,8 +549,8 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
549 addr = cur_ring->first_seg->dma | 549 addr = cur_ring->first_seg->dma |
550 SCT_FOR_CTX(SCT_PRI_TR) | 550 SCT_FOR_CTX(SCT_PRI_TR) |
551 cur_ring->cycle_state; 551 cur_ring->cycle_state;
552 stream_info->stream_ctx_array[cur_stream]. 552 stream_info->stream_ctx_array[cur_stream].stream_ring =
553 stream_ring = cpu_to_le64(addr); 553 cpu_to_le64(addr);
554 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", 554 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
555 cur_stream, (unsigned long long) addr); 555 cur_stream, (unsigned long long) addr);
556 556
@@ -786,7 +786,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
786 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", 786 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
787 slot_id, 787 slot_id,
788 &xhci->dcbaa->dev_context_ptrs[slot_id], 788 &xhci->dcbaa->dev_context_ptrs[slot_id],
789 (unsigned long long) le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); 789 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
790 790
791 return 1; 791 return 1;
792fail: 792fail:
@@ -890,19 +890,19 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
890 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); 890 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
891 891
892 /* 3) Only the control endpoint is valid - one endpoint context */ 892 /* 3) Only the control endpoint is valid - one endpoint context */
893 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | (u32) udev->route); 893 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
894 switch (udev->speed) { 894 switch (udev->speed) {
895 case USB_SPEED_SUPER: 895 case USB_SPEED_SUPER:
896 slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_SS); 896 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
897 break; 897 break;
898 case USB_SPEED_HIGH: 898 case USB_SPEED_HIGH:
899 slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_HS); 899 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
900 break; 900 break;
901 case USB_SPEED_FULL: 901 case USB_SPEED_FULL:
902 slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_FS); 902 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
903 break; 903 break;
904 case USB_SPEED_LOW: 904 case USB_SPEED_LOW:
905 slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_LS); 905 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
906 break; 906 break;
907 case USB_SPEED_WIRELESS: 907 case USB_SPEED_WIRELESS:
908 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 908 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
@@ -916,7 +916,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
916 port_num = xhci_find_real_port_number(xhci, udev); 916 port_num = xhci_find_real_port_number(xhci, udev);
917 if (!port_num) 917 if (!port_num)
918 return -EINVAL; 918 return -EINVAL;
919 slot_ctx->dev_info2 |= cpu_to_le32((u32) ROOT_HUB_PORT(port_num)); 919 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
920 /* Set the port number in the virtual_device to the faked port number */ 920 /* Set the port number in the virtual_device to the faked port number */
921 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 921 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
922 top_dev = top_dev->parent) 922 top_dev = top_dev->parent)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index cc1485bfed38..4b40e4c95f94 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -113,15 +113,13 @@ static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
113 if (ring == xhci->event_ring) 113 if (ring == xhci->event_ring)
114 return trb == &seg->trbs[TRBS_PER_SEGMENT]; 114 return trb == &seg->trbs[TRBS_PER_SEGMENT];
115 else 115 else
116 return (le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK) 116 return TRB_TYPE_LINK_LE32(trb->link.control);
117 == TRB_TYPE(TRB_LINK);
118} 117}
119 118
120static int enqueue_is_link_trb(struct xhci_ring *ring) 119static int enqueue_is_link_trb(struct xhci_ring *ring)
121{ 120{
122 struct xhci_link_trb *link = &ring->enqueue->link; 121 struct xhci_link_trb *link = &ring->enqueue->link;
123 return ((le32_to_cpu(link->control) & TRB_TYPE_BITMASK) == 122 return TRB_TYPE_LINK_LE32(link->control);
124 TRB_TYPE(TRB_LINK));
125} 123}
126 124
127/* Updates trb to point to the next TRB in the ring, and updates seg if the next 125/* Updates trb to point to the next TRB in the ring, and updates seg if the next
@@ -372,7 +370,7 @@ static struct xhci_segment *find_trb_seg(
372 while (cur_seg->trbs > trb || 370 while (cur_seg->trbs > trb ||
373 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { 371 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
374 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; 372 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
375 if (le32_to_cpu(generic_trb->field[3]) & LINK_TOGGLE) 373 if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
376 *cycle_state ^= 0x1; 374 *cycle_state ^= 0x1;
377 cur_seg = cur_seg->next; 375 cur_seg = cur_seg->next;
378 if (cur_seg == start_seg) 376 if (cur_seg == start_seg)
@@ -489,8 +487,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
489 } 487 }
490 488
491 trb = &state->new_deq_ptr->generic; 489 trb = &state->new_deq_ptr->generic;
492 if ((le32_to_cpu(trb->field[3]) & TRB_TYPE_BITMASK) == 490 if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
493 TRB_TYPE(TRB_LINK) && (le32_to_cpu(trb->field[3]) & LINK_TOGGLE)) 491 (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
494 state->new_cycle_state ^= 0x1; 492 state->new_cycle_state ^= 0x1;
495 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); 493 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
496 494
@@ -525,8 +523,7 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
525 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; 523 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
526 true; 524 true;
527 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 525 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
528 if ((le32_to_cpu(cur_trb->generic.field[3]) & TRB_TYPE_BITMASK) 526 if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
529 == TRB_TYPE(TRB_LINK)) {
530 /* Unchain any chained Link TRBs, but 527 /* Unchain any chained Link TRBs, but
531 * leave the pointers intact. 528 * leave the pointers intact.
532 */ 529 */
@@ -1000,7 +997,7 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
1000 * but we don't care. 997 * but we don't care.
1001 */ 998 */
1002 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", 999 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
1003 (unsigned int) GET_COMP_CODE(le32_to_cpu(event->status))); 1000 GET_COMP_CODE(le32_to_cpu(event->status)));
1004 1001
1005 /* HW with the reset endpoint quirk needs to have a configure endpoint 1002 /* HW with the reset endpoint quirk needs to have a configure endpoint
1006 * command complete before the endpoint can be used. Queue that here 1003 * command complete before the endpoint can be used. Queue that here
@@ -1458,7 +1455,8 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1458 * endpoint anyway. Check if a babble halted the 1455 * endpoint anyway. Check if a babble halted the
1459 * endpoint. 1456 * endpoint.
1460 */ 1457 */
1461 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == EP_STATE_HALTED) 1458 if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1459 cpu_to_le32(EP_STATE_HALTED))
1462 return 1; 1460 return 1;
1463 1461
1464 return 0; 1462 return 0;
@@ -1752,10 +1750,8 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1752 for (cur_trb = ep_ring->dequeue, 1750 for (cur_trb = ep_ring->dequeue,
1753 cur_seg = ep_ring->deq_seg; cur_trb != event_trb; 1751 cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
1754 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 1752 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1755 if ((le32_to_cpu(cur_trb->generic.field[3]) & 1753 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
1756 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && 1754 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
1757 (le32_to_cpu(cur_trb->generic.field[3]) &
1758 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
1759 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); 1755 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
1760 } 1756 }
1761 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 1757 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
@@ -1888,10 +1884,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1888 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; 1884 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
1889 cur_trb != event_trb; 1885 cur_trb != event_trb;
1890 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 1886 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1891 if ((le32_to_cpu(cur_trb->generic.field[3]) & 1887 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
1892 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && 1888 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
1893 (le32_to_cpu(cur_trb->generic.field[3]) &
1894 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
1895 td->urb->actual_length += 1889 td->urb->actual_length +=
1896 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); 1890 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
1897 } 1891 }
@@ -2046,8 +2040,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2046 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2040 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2047 ep_index); 2041 ep_index);
2048 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 2042 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2049 (unsigned int) (le32_to_cpu(event->flags) 2043 (le32_to_cpu(event->flags) &
2050 & TRB_TYPE_BITMASK)>>10); 2044 TRB_TYPE_BITMASK)>>10);
2051 xhci_print_trb_offsets(xhci, (union xhci_trb *) event); 2045 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2052 if (ep->skip) { 2046 if (ep->skip) {
2053 ep->skip = false; 2047 ep->skip = false;
@@ -2104,9 +2098,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2104 * corresponding TD has been cancelled. Just ignore 2098 * corresponding TD has been cancelled. Just ignore
2105 * the TD. 2099 * the TD.
2106 */ 2100 */
2107 if ((le32_to_cpu(event_trb->generic.field[3]) 2101 if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
2108 & TRB_TYPE_BITMASK)
2109 == TRB_TYPE(TRB_TR_NOOP)) {
2110 xhci_dbg(xhci, 2102 xhci_dbg(xhci,
2111 "event_trb is a no-op TRB. Skip it\n"); 2103 "event_trb is a no-op TRB. Skip it\n");
2112 goto cleanup; 2104 goto cleanup;
@@ -2432,7 +2424,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2432 next->link.control |= cpu_to_le32(TRB_CHAIN); 2424 next->link.control |= cpu_to_le32(TRB_CHAIN);
2433 2425
2434 wmb(); 2426 wmb();
2435 next->link.control ^= cpu_to_le32((u32) TRB_CYCLE); 2427 next->link.control ^= cpu_to_le32(TRB_CYCLE);
2436 2428
2437 /* Toggle the cycle bit after the last ring segment. */ 2429 /* Toggle the cycle bit after the last ring segment. */
2438 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 2430 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index d9660eb97eb9..743cf80debb1 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1333,8 +1333,8 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1333 /* If the HC already knows the endpoint is disabled, 1333 /* If the HC already knows the endpoint is disabled,
1334 * or the HCD has noted it is disabled, ignore this request 1334 * or the HCD has noted it is disabled, ignore this request
1335 */ 1335 */
1336 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == 1336 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1337 EP_STATE_DISABLED || 1337 cpu_to_le32(EP_STATE_DISABLED)) ||
1338 le32_to_cpu(ctrl_ctx->drop_flags) & 1338 le32_to_cpu(ctrl_ctx->drop_flags) &
1339 xhci_get_endpoint_flag(&ep->desc)) { 1339 xhci_get_endpoint_flag(&ep->desc)) {
1340 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", 1340 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
@@ -1725,8 +1725,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1725 /* Enqueue pointer can be left pointing to the link TRB, 1725 /* Enqueue pointer can be left pointing to the link TRB,
1726 * we must handle that 1726 * we must handle that
1727 */ 1727 */
1728 if ((le32_to_cpu(command->command_trb->link.control) 1728 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
1729 & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
1730 command->command_trb = 1729 command->command_trb =
1731 xhci->cmd_ring->enq_seg->next->trbs; 1730 xhci->cmd_ring->enq_seg->next->trbs;
1732 1731
@@ -2519,8 +2518,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
2519 /* Enqueue pointer can be left pointing to the link TRB, 2518 /* Enqueue pointer can be left pointing to the link TRB,
2520 * we must handle that 2519 * we must handle that
2521 */ 2520 */
2522 if ((le32_to_cpu(reset_device_cmd->command_trb->link.control) 2521 if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
2523 & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
2524 reset_device_cmd->command_trb = 2522 reset_device_cmd->command_trb =
2525 xhci->cmd_ring->enq_seg->next->trbs; 2523 xhci->cmd_ring->enq_seg->next->trbs;
2526 2524
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index ac0196e7fcf1..f9098a24d38b 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1065,6 +1065,13 @@ union xhci_trb {
1065/* Get NEC firmware revision. */ 1065/* Get NEC firmware revision. */
1066#define TRB_NEC_GET_FW 49 1066#define TRB_NEC_GET_FW 49
1067 1067
1068#define TRB_TYPE_LINK(x) (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
1069/* Above, but for __le32 types -- can avoid work by swapping constants: */
1070#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
1071 cpu_to_le32(TRB_TYPE(TRB_LINK)))
1072#define TRB_TYPE_NOOP_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
1073 cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)))
1074
1068#define NEC_FW_MINOR(p) (((p) >> 0) & 0xff) 1075#define NEC_FW_MINOR(p) (((p) >> 0) & 0xff)
1069#define NEC_FW_MAJOR(p) (((p) >> 8) & 0xff) 1076#define NEC_FW_MAJOR(p) (((p) >> 8) & 0xff)
1070 1077