aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
authorMatt Evans <matt@ozlabs.org>2011-03-28 22:40:46 -0400
committerSarah Sharp <sarah.a.sharp@linux.intel.com>2011-05-02 19:42:49 -0400
commit28ccd2962c66556d7037b2d9f1c11cdcd3b805d5 (patch)
tree02bf9319e60c43c655a97aedeb76ec5171459508 /drivers/usb/host/xhci-ring.c
parent7fc2a61638ef78cdf8d65d5934782963a6e0fc66 (diff)
xhci: Make xHCI driver endian-safe
This patch changes the struct members defining access to xHCI device-visible memory to use __le32/__le64 where appropriate, and then adds swaps where required. Checked with sparse that all accesses are correct. MMIO accesses use readl/writel so already are performed LE, but prototypes now reflect this with __le*. There were a couple of (debug) instances of DMA pointers being truncated to 32bits which have been fixed too. Signed-off-by: Matt Evans <matt@ozlabs.org> Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c267
1 files changed, 140 insertions, 127 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 7437386a9a50..9b1eeb04ce69 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -100,7 +100,7 @@ static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
100 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && 100 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
101 (seg->next == xhci->event_ring->first_seg); 101 (seg->next == xhci->event_ring->first_seg);
102 else 102 else
103 return trb->link.control & LINK_TOGGLE; 103 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
104} 104}
105 105
106/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring 106/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
@@ -113,13 +113,15 @@ static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
113 if (ring == xhci->event_ring) 113 if (ring == xhci->event_ring)
114 return trb == &seg->trbs[TRBS_PER_SEGMENT]; 114 return trb == &seg->trbs[TRBS_PER_SEGMENT];
115 else 115 else
116 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); 116 return (le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK)
117 == TRB_TYPE(TRB_LINK);
117} 118}
118 119
119static int enqueue_is_link_trb(struct xhci_ring *ring) 120static int enqueue_is_link_trb(struct xhci_ring *ring)
120{ 121{
121 struct xhci_link_trb *link = &ring->enqueue->link; 122 struct xhci_link_trb *link = &ring->enqueue->link;
122 return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)); 123 return ((le32_to_cpu(link->control) & TRB_TYPE_BITMASK) ==
124 TRB_TYPE(TRB_LINK));
123} 125}
124 126
125/* Updates trb to point to the next TRB in the ring, and updates seg if the next 127/* Updates trb to point to the next TRB in the ring, and updates seg if the next
@@ -197,7 +199,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
197 union xhci_trb *next; 199 union xhci_trb *next;
198 unsigned long long addr; 200 unsigned long long addr;
199 201
200 chain = ring->enqueue->generic.field[3] & TRB_CHAIN; 202 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
201 next = ++(ring->enqueue); 203 next = ++(ring->enqueue);
202 204
203 ring->enq_updates++; 205 ring->enq_updates++;
@@ -223,12 +225,14 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
223 * (which may mean the chain bit is cleared). 225 * (which may mean the chain bit is cleared).
224 */ 226 */
225 if (!xhci_link_trb_quirk(xhci)) { 227 if (!xhci_link_trb_quirk(xhci)) {
226 next->link.control &= ~TRB_CHAIN; 228 next->link.control &=
227 next->link.control |= chain; 229 cpu_to_le32(~TRB_CHAIN);
230 next->link.control |=
231 cpu_to_le32(chain);
228 } 232 }
229 /* Give this link TRB to the hardware */ 233 /* Give this link TRB to the hardware */
230 wmb(); 234 wmb();
231 next->link.control ^= TRB_CYCLE; 235 next->link.control ^= cpu_to_le32(TRB_CYCLE);
232 } 236 }
233 /* Toggle the cycle bit after the last ring segment. */ 237 /* Toggle the cycle bit after the last ring segment. */
234 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 238 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
@@ -319,7 +323,7 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
319 unsigned int ep_index, 323 unsigned int ep_index,
320 unsigned int stream_id) 324 unsigned int stream_id)
321{ 325{
322 __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; 326 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
323 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 327 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
324 unsigned int ep_state = ep->ep_state; 328 unsigned int ep_state = ep->ep_state;
325 329
@@ -380,7 +384,7 @@ static struct xhci_segment *find_trb_seg(
380 while (cur_seg->trbs > trb || 384 while (cur_seg->trbs > trb ||
381 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { 385 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
382 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; 386 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
383 if (generic_trb->field[3] & LINK_TOGGLE) 387 if (le32_to_cpu(generic_trb->field[3]) & LINK_TOGGLE)
384 *cycle_state ^= 0x1; 388 *cycle_state ^= 0x1;
385 cur_seg = cur_seg->next; 389 cur_seg = cur_seg->next;
386 if (cur_seg == start_seg) 390 if (cur_seg == start_seg)
@@ -447,6 +451,10 @@ static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
447 * any link TRBs with the toggle cycle bit set. 451 * any link TRBs with the toggle cycle bit set.
448 * - Finally we move the dequeue state one TRB further, toggling the cycle bit 452 * - Finally we move the dequeue state one TRB further, toggling the cycle bit
449 * if we've moved it past a link TRB with the toggle cycle bit set. 453 * if we've moved it past a link TRB with the toggle cycle bit set.
454 *
455 * Some of the uses of xhci_generic_trb are grotty, but if they're done
456 * with correct __le32 accesses they should work fine. Only users of this are
457 * in here.
450 */ 458 */
451void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 459void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
452 unsigned int slot_id, unsigned int ep_index, 460 unsigned int slot_id, unsigned int ep_index,
@@ -480,7 +488,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
480 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 488 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
481 xhci_dbg(xhci, "Finding endpoint context\n"); 489 xhci_dbg(xhci, "Finding endpoint context\n");
482 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 490 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
483 state->new_cycle_state = 0x1 & ep_ctx->deq; 491 state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
484 492
485 state->new_deq_ptr = cur_td->last_trb; 493 state->new_deq_ptr = cur_td->last_trb;
486 xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); 494 xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
@@ -493,8 +501,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
493 } 501 }
494 502
495 trb = &state->new_deq_ptr->generic; 503 trb = &state->new_deq_ptr->generic;
496 if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && 504 if ((le32_to_cpu(trb->field[3]) & TRB_TYPE_BITMASK) ==
497 (trb->field[3] & LINK_TOGGLE)) 505 TRB_TYPE(TRB_LINK) && (le32_to_cpu(trb->field[3]) & LINK_TOGGLE))
498 state->new_cycle_state ^= 0x1; 506 state->new_cycle_state ^= 0x1;
499 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); 507 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
500 508
@@ -529,12 +537,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
529 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; 537 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
530 true; 538 true;
531 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 539 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
532 if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) == 540 if ((le32_to_cpu(cur_trb->generic.field[3]) & TRB_TYPE_BITMASK)
533 TRB_TYPE(TRB_LINK)) { 541 == TRB_TYPE(TRB_LINK)) {
534 /* Unchain any chained Link TRBs, but 542 /* Unchain any chained Link TRBs, but
535 * leave the pointers intact. 543 * leave the pointers intact.
536 */ 544 */
537 cur_trb->generic.field[3] &= ~TRB_CHAIN; 545 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
538 xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); 546 xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
539 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 547 xhci_dbg(xhci, "Address = %p (0x%llx dma); "
540 "in seg %p (0x%llx dma)\n", 548 "in seg %p (0x%llx dma)\n",
@@ -547,8 +555,9 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
547 cur_trb->generic.field[1] = 0; 555 cur_trb->generic.field[1] = 0;
548 cur_trb->generic.field[2] = 0; 556 cur_trb->generic.field[2] = 0;
549 /* Preserve only the cycle bit of this TRB */ 557 /* Preserve only the cycle bit of this TRB */
550 cur_trb->generic.field[3] &= TRB_CYCLE; 558 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
551 cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP); 559 cur_trb->generic.field[3] |= cpu_to_le32(
560 TRB_TYPE(TRB_TR_NOOP));
552 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " 561 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
553 "in seg %p (0x%llx dma)\n", 562 "in seg %p (0x%llx dma)\n",
554 cur_trb, 563 cur_trb,
@@ -662,9 +671,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
662 struct xhci_dequeue_state deq_state; 671 struct xhci_dequeue_state deq_state;
663 672
664 if (unlikely(TRB_TO_SUSPEND_PORT( 673 if (unlikely(TRB_TO_SUSPEND_PORT(
665 xhci->cmd_ring->dequeue->generic.field[3]))) { 674 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) {
666 slot_id = TRB_TO_SLOT_ID( 675 slot_id = TRB_TO_SLOT_ID(
667 xhci->cmd_ring->dequeue->generic.field[3]); 676 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
668 virt_dev = xhci->devs[slot_id]; 677 virt_dev = xhci->devs[slot_id];
669 if (virt_dev) 678 if (virt_dev)
670 handle_cmd_in_cmd_wait_list(xhci, virt_dev, 679 handle_cmd_in_cmd_wait_list(xhci, virt_dev,
@@ -677,8 +686,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
677 } 686 }
678 687
679 memset(&deq_state, 0, sizeof(deq_state)); 688 memset(&deq_state, 0, sizeof(deq_state));
680 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 689 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
681 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 690 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
682 ep = &xhci->devs[slot_id]->eps[ep_index]; 691 ep = &xhci->devs[slot_id]->eps[ep_index];
683 692
684 if (list_empty(&ep->cancelled_td_list)) { 693 if (list_empty(&ep->cancelled_td_list)) {
@@ -910,9 +919,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
910 struct xhci_ep_ctx *ep_ctx; 919 struct xhci_ep_ctx *ep_ctx;
911 struct xhci_slot_ctx *slot_ctx; 920 struct xhci_slot_ctx *slot_ctx;
912 921
913 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 922 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
914 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 923 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
915 stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]); 924 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
916 dev = xhci->devs[slot_id]; 925 dev = xhci->devs[slot_id];
917 926
918 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); 927 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
@@ -928,11 +937,11 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
928 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 937 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
929 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); 938 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
930 939
931 if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { 940 if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
932 unsigned int ep_state; 941 unsigned int ep_state;
933 unsigned int slot_state; 942 unsigned int slot_state;
934 943
935 switch (GET_COMP_CODE(event->status)) { 944 switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
936 case COMP_TRB_ERR: 945 case COMP_TRB_ERR:
937 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because " 946 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
938 "of stream ID configuration\n"); 947 "of stream ID configuration\n");
@@ -940,9 +949,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
940 case COMP_CTX_STATE: 949 case COMP_CTX_STATE:
941 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " 950 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
942 "to incorrect slot or ep state.\n"); 951 "to incorrect slot or ep state.\n");
943 ep_state = ep_ctx->ep_info; 952 ep_state = le32_to_cpu(ep_ctx->ep_info);
944 ep_state &= EP_STATE_MASK; 953 ep_state &= EP_STATE_MASK;
945 slot_state = slot_ctx->dev_state; 954 slot_state = le32_to_cpu(slot_ctx->dev_state);
946 slot_state = GET_SLOT_STATE(slot_state); 955 slot_state = GET_SLOT_STATE(slot_state);
947 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", 956 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
948 slot_state, ep_state); 957 slot_state, ep_state);
@@ -954,7 +963,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
954 default: 963 default:
955 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown " 964 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
956 "completion code of %u.\n", 965 "completion code of %u.\n",
957 GET_COMP_CODE(event->status)); 966 GET_COMP_CODE(le32_to_cpu(event->status)));
958 break; 967 break;
959 } 968 }
960 /* OK what do we do now? The endpoint state is hosed, and we 969 /* OK what do we do now? The endpoint state is hosed, and we
@@ -965,10 +974,10 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
965 */ 974 */
966 } else { 975 } else {
967 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", 976 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
968 ep_ctx->deq); 977 le64_to_cpu(ep_ctx->deq));
969 if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, 978 if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
970 dev->eps[ep_index].queued_deq_ptr) == 979 dev->eps[ep_index].queued_deq_ptr) ==
971 (ep_ctx->deq & ~(EP_CTX_CYCLE_MASK))) { 980 (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
972 /* Update the ring's dequeue segment and dequeue pointer 981 /* Update the ring's dequeue segment and dequeue pointer
973 * to reflect the new position. 982 * to reflect the new position.
974 */ 983 */
@@ -997,13 +1006,13 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
997 int slot_id; 1006 int slot_id;
998 unsigned int ep_index; 1007 unsigned int ep_index;
999 1008
1000 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 1009 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
1001 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 1010 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1002 /* This command will only fail if the endpoint wasn't halted, 1011 /* This command will only fail if the endpoint wasn't halted,
1003 * but we don't care. 1012 * but we don't care.
1004 */ 1013 */
1005 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", 1014 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
1006 (unsigned int) GET_COMP_CODE(event->status)); 1015 (unsigned int) GET_COMP_CODE(le32_to_cpu(event->status)));
1007 1016
1008 /* HW with the reset endpoint quirk needs to have a configure endpoint 1017 /* HW with the reset endpoint quirk needs to have a configure endpoint
1009 * command complete before the endpoint can be used. Queue that here 1018 * command complete before the endpoint can be used. Queue that here
@@ -1040,8 +1049,7 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1040 if (xhci->cmd_ring->dequeue != command->command_trb) 1049 if (xhci->cmd_ring->dequeue != command->command_trb)
1041 return 0; 1050 return 0;
1042 1051
1043 command->status = 1052 command->status = GET_COMP_CODE(le32_to_cpu(event->status));
1044 GET_COMP_CODE(event->status);
1045 list_del(&command->cmd_list); 1053 list_del(&command->cmd_list);
1046 if (command->completion) 1054 if (command->completion)
1047 complete(command->completion); 1055 complete(command->completion);
@@ -1053,7 +1061,7 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1053static void handle_cmd_completion(struct xhci_hcd *xhci, 1061static void handle_cmd_completion(struct xhci_hcd *xhci,
1054 struct xhci_event_cmd *event) 1062 struct xhci_event_cmd *event)
1055{ 1063{
1056 int slot_id = TRB_TO_SLOT_ID(event->flags); 1064 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1057 u64 cmd_dma; 1065 u64 cmd_dma;
1058 dma_addr_t cmd_dequeue_dma; 1066 dma_addr_t cmd_dequeue_dma;
1059 struct xhci_input_control_ctx *ctrl_ctx; 1067 struct xhci_input_control_ctx *ctrl_ctx;
@@ -1062,7 +1070,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1062 struct xhci_ring *ep_ring; 1070 struct xhci_ring *ep_ring;
1063 unsigned int ep_state; 1071 unsigned int ep_state;
1064 1072
1065 cmd_dma = event->cmd_trb; 1073 cmd_dma = le64_to_cpu(event->cmd_trb);
1066 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 1074 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1067 xhci->cmd_ring->dequeue); 1075 xhci->cmd_ring->dequeue);
1068 /* Is the command ring deq ptr out of sync with the deq seg ptr? */ 1076 /* Is the command ring deq ptr out of sync with the deq seg ptr? */
@@ -1075,9 +1083,10 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1075 xhci->error_bitmask |= 1 << 5; 1083 xhci->error_bitmask |= 1 << 5;
1076 return; 1084 return;
1077 } 1085 }
1078 switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) { 1086 switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
1087 & TRB_TYPE_BITMASK) {
1079 case TRB_TYPE(TRB_ENABLE_SLOT): 1088 case TRB_TYPE(TRB_ENABLE_SLOT):
1080 if (GET_COMP_CODE(event->status) == COMP_SUCCESS) 1089 if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
1081 xhci->slot_id = slot_id; 1090 xhci->slot_id = slot_id;
1082 else 1091 else
1083 xhci->slot_id = 0; 1092 xhci->slot_id = 0;
@@ -1102,7 +1111,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1102 ctrl_ctx = xhci_get_input_control_ctx(xhci, 1111 ctrl_ctx = xhci_get_input_control_ctx(xhci,
1103 virt_dev->in_ctx); 1112 virt_dev->in_ctx);
1104 /* Input ctx add_flags are the endpoint index plus one */ 1113 /* Input ctx add_flags are the endpoint index plus one */
1105 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; 1114 ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
1106 /* A usb_set_interface() call directly after clearing a halted 1115 /* A usb_set_interface() call directly after clearing a halted
1107 * condition may race on this quirky hardware. Not worth 1116 * condition may race on this quirky hardware. Not worth
1108 * worrying about, since this is prototype hardware. Not sure 1117 * worrying about, since this is prototype hardware. Not sure
@@ -1111,8 +1120,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1111 */ 1120 */
1112 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 1121 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1113 ep_index != (unsigned int) -1 && 1122 ep_index != (unsigned int) -1 &&
1114 ctrl_ctx->add_flags - SLOT_FLAG == 1123 le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
1115 ctrl_ctx->drop_flags) { 1124 le32_to_cpu(ctrl_ctx->drop_flags)) {
1116 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 1125 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1117 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 1126 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1118 if (!(ep_state & EP_HALTED)) 1127 if (!(ep_state & EP_HALTED))
@@ -1129,18 +1138,18 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1129bandwidth_change: 1138bandwidth_change:
1130 xhci_dbg(xhci, "Completed config ep cmd\n"); 1139 xhci_dbg(xhci, "Completed config ep cmd\n");
1131 xhci->devs[slot_id]->cmd_status = 1140 xhci->devs[slot_id]->cmd_status =
1132 GET_COMP_CODE(event->status); 1141 GET_COMP_CODE(le32_to_cpu(event->status));
1133 complete(&xhci->devs[slot_id]->cmd_completion); 1142 complete(&xhci->devs[slot_id]->cmd_completion);
1134 break; 1143 break;
1135 case TRB_TYPE(TRB_EVAL_CONTEXT): 1144 case TRB_TYPE(TRB_EVAL_CONTEXT):
1136 virt_dev = xhci->devs[slot_id]; 1145 virt_dev = xhci->devs[slot_id];
1137 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) 1146 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1138 break; 1147 break;
1139 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); 1148 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1140 complete(&xhci->devs[slot_id]->cmd_completion); 1149 complete(&xhci->devs[slot_id]->cmd_completion);
1141 break; 1150 break;
1142 case TRB_TYPE(TRB_ADDR_DEV): 1151 case TRB_TYPE(TRB_ADDR_DEV):
1143 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); 1152 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1144 complete(&xhci->addr_dev); 1153 complete(&xhci->addr_dev);
1145 break; 1154 break;
1146 case TRB_TYPE(TRB_STOP_RING): 1155 case TRB_TYPE(TRB_STOP_RING):
@@ -1157,7 +1166,7 @@ bandwidth_change:
1157 case TRB_TYPE(TRB_RESET_DEV): 1166 case TRB_TYPE(TRB_RESET_DEV):
1158 xhci_dbg(xhci, "Completed reset device command.\n"); 1167 xhci_dbg(xhci, "Completed reset device command.\n");
1159 slot_id = TRB_TO_SLOT_ID( 1168 slot_id = TRB_TO_SLOT_ID(
1160 xhci->cmd_ring->dequeue->generic.field[3]); 1169 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
1161 virt_dev = xhci->devs[slot_id]; 1170 virt_dev = xhci->devs[slot_id];
1162 if (virt_dev) 1171 if (virt_dev)
1163 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event); 1172 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
@@ -1171,8 +1180,8 @@ bandwidth_change:
1171 break; 1180 break;
1172 } 1181 }
1173 xhci_dbg(xhci, "NEC firmware version %2x.%02x\n", 1182 xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
1174 NEC_FW_MAJOR(event->status), 1183 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1175 NEC_FW_MINOR(event->status)); 1184 NEC_FW_MINOR(le32_to_cpu(event->status)));
1176 break; 1185 break;
1177 default: 1186 default:
1178 /* Skip over unknown commands on the event ring */ 1187 /* Skip over unknown commands on the event ring */
@@ -1187,7 +1196,7 @@ static void handle_vendor_event(struct xhci_hcd *xhci,
1187{ 1196{
1188 u32 trb_type; 1197 u32 trb_type;
1189 1198
1190 trb_type = TRB_FIELD_TO_TYPE(event->generic.field[3]); 1199 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1191 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); 1200 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1192 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) 1201 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1193 handle_cmd_completion(xhci, &event->event_cmd); 1202 handle_cmd_completion(xhci, &event->event_cmd);
@@ -1241,15 +1250,15 @@ static void handle_port_status(struct xhci_hcd *xhci,
1241 unsigned int faked_port_index; 1250 unsigned int faked_port_index;
1242 u8 major_revision; 1251 u8 major_revision;
1243 struct xhci_bus_state *bus_state; 1252 struct xhci_bus_state *bus_state;
1244 u32 __iomem **port_array; 1253 __le32 __iomem **port_array;
1245 bool bogus_port_status = false; 1254 bool bogus_port_status = false;
1246 1255
1247 /* Port status change events always have a successful completion code */ 1256 /* Port status change events always have a successful completion code */
1248 if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) { 1257 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
1249 xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); 1258 xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1250 xhci->error_bitmask |= 1 << 8; 1259 xhci->error_bitmask |= 1 << 8;
1251 } 1260 }
1252 port_id = GET_PORT_ID(event->generic.field[0]); 1261 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1253 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); 1262 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1254 1263
1255 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1264 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
@@ -1456,7 +1465,7 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1456 * endpoint anyway. Check if a babble halted the 1465 * endpoint anyway. Check if a babble halted the
1457 * endpoint. 1466 * endpoint.
1458 */ 1467 */
1459 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED) 1468 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == EP_STATE_HALTED)
1460 return 1; 1469 return 1;
1461 1470
1462 return 0; 1471 return 0;
@@ -1494,12 +1503,12 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1494 struct urb_priv *urb_priv; 1503 struct urb_priv *urb_priv;
1495 u32 trb_comp_code; 1504 u32 trb_comp_code;
1496 1505
1497 slot_id = TRB_TO_SLOT_ID(event->flags); 1506 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1498 xdev = xhci->devs[slot_id]; 1507 xdev = xhci->devs[slot_id];
1499 ep_index = TRB_TO_EP_ID(event->flags) - 1; 1508 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1500 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1509 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1501 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1510 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1502 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1511 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1503 1512
1504 if (skip) 1513 if (skip)
1505 goto td_cleanup; 1514 goto td_cleanup;
@@ -1602,12 +1611,12 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1602 struct xhci_ep_ctx *ep_ctx; 1611 struct xhci_ep_ctx *ep_ctx;
1603 u32 trb_comp_code; 1612 u32 trb_comp_code;
1604 1613
1605 slot_id = TRB_TO_SLOT_ID(event->flags); 1614 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1606 xdev = xhci->devs[slot_id]; 1615 xdev = xhci->devs[slot_id];
1607 ep_index = TRB_TO_EP_ID(event->flags) - 1; 1616 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1608 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1617 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1609 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1618 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1610 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1619 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1611 1620
1612 xhci_debug_trb(xhci, xhci->event_ring->dequeue); 1621 xhci_debug_trb(xhci, xhci->event_ring->dequeue);
1613 switch (trb_comp_code) { 1622 switch (trb_comp_code) {
@@ -1646,7 +1655,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1646 event_trb != td->last_trb) 1655 event_trb != td->last_trb)
1647 td->urb->actual_length = 1656 td->urb->actual_length =
1648 td->urb->transfer_buffer_length 1657 td->urb->transfer_buffer_length
1649 - TRB_LEN(event->transfer_len); 1658 - TRB_LEN(le32_to_cpu(event->transfer_len));
1650 else 1659 else
1651 td->urb->actual_length = 0; 1660 td->urb->actual_length = 0;
1652 1661
@@ -1680,7 +1689,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1680 /* We didn't stop on a link TRB in the middle */ 1689 /* We didn't stop on a link TRB in the middle */
1681 td->urb->actual_length = 1690 td->urb->actual_length =
1682 td->urb->transfer_buffer_length - 1691 td->urb->transfer_buffer_length -
1683 TRB_LEN(event->transfer_len); 1692 TRB_LEN(le32_to_cpu(event->transfer_len));
1684 xhci_dbg(xhci, "Waiting for status " 1693 xhci_dbg(xhci, "Waiting for status "
1685 "stage event\n"); 1694 "stage event\n");
1686 return 0; 1695 return 0;
@@ -1708,8 +1717,8 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1708 u32 trb_comp_code; 1717 u32 trb_comp_code;
1709 bool skip_td = false; 1718 bool skip_td = false;
1710 1719
1711 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1720 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1712 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1721 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1713 urb_priv = td->urb->hcpriv; 1722 urb_priv = td->urb->hcpriv;
1714 idx = urb_priv->td_cnt; 1723 idx = urb_priv->td_cnt;
1715 frame = &td->urb->iso_frame_desc[idx]; 1724 frame = &td->urb->iso_frame_desc[idx];
@@ -1752,15 +1761,14 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1752 for (cur_trb = ep_ring->dequeue, 1761 for (cur_trb = ep_ring->dequeue,
1753 cur_seg = ep_ring->deq_seg; cur_trb != event_trb; 1762 cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
1754 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 1763 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1755 if ((cur_trb->generic.field[3] & 1764 if ((le32_to_cpu(cur_trb->generic.field[3]) &
1756 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && 1765 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
1757 (cur_trb->generic.field[3] & 1766 (le32_to_cpu(cur_trb->generic.field[3]) &
1758 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) 1767 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
1759 len += 1768 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
1760 TRB_LEN(cur_trb->generic.field[2]);
1761 } 1769 }
1762 len += TRB_LEN(cur_trb->generic.field[2]) - 1770 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
1763 TRB_LEN(event->transfer_len); 1771 TRB_LEN(le32_to_cpu(event->transfer_len));
1764 1772
1765 if (trb_comp_code != COMP_STOP_INVAL) { 1773 if (trb_comp_code != COMP_STOP_INVAL) {
1766 frame->actual_length = len; 1774 frame->actual_length = len;
@@ -1815,8 +1823,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1815 struct xhci_segment *cur_seg; 1823 struct xhci_segment *cur_seg;
1816 u32 trb_comp_code; 1824 u32 trb_comp_code;
1817 1825
1818 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1826 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1819 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1827 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1820 1828
1821 switch (trb_comp_code) { 1829 switch (trb_comp_code) {
1822 case COMP_SUCCESS: 1830 case COMP_SUCCESS:
@@ -1852,18 +1860,18 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1852 "%d bytes untransferred\n", 1860 "%d bytes untransferred\n",
1853 td->urb->ep->desc.bEndpointAddress, 1861 td->urb->ep->desc.bEndpointAddress,
1854 td->urb->transfer_buffer_length, 1862 td->urb->transfer_buffer_length,
1855 TRB_LEN(event->transfer_len)); 1863 TRB_LEN(le32_to_cpu(event->transfer_len)));
1856 /* Fast path - was this the last TRB in the TD for this URB? */ 1864 /* Fast path - was this the last TRB in the TD for this URB? */
1857 if (event_trb == td->last_trb) { 1865 if (event_trb == td->last_trb) {
1858 if (TRB_LEN(event->transfer_len) != 0) { 1866 if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
1859 td->urb->actual_length = 1867 td->urb->actual_length =
1860 td->urb->transfer_buffer_length - 1868 td->urb->transfer_buffer_length -
1861 TRB_LEN(event->transfer_len); 1869 TRB_LEN(le32_to_cpu(event->transfer_len));
1862 if (td->urb->transfer_buffer_length < 1870 if (td->urb->transfer_buffer_length <
1863 td->urb->actual_length) { 1871 td->urb->actual_length) {
1864 xhci_warn(xhci, "HC gave bad length " 1872 xhci_warn(xhci, "HC gave bad length "
1865 "of %d bytes left\n", 1873 "of %d bytes left\n",
1866 TRB_LEN(event->transfer_len)); 1874 TRB_LEN(le32_to_cpu(event->transfer_len)));
1867 td->urb->actual_length = 0; 1875 td->urb->actual_length = 0;
1868 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1876 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1869 *status = -EREMOTEIO; 1877 *status = -EREMOTEIO;
@@ -1894,20 +1902,20 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1894 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; 1902 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
1895 cur_trb != event_trb; 1903 cur_trb != event_trb;
1896 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 1904 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1897 if ((cur_trb->generic.field[3] & 1905 if ((le32_to_cpu(cur_trb->generic.field[3]) &
1898 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && 1906 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
1899 (cur_trb->generic.field[3] & 1907 (le32_to_cpu(cur_trb->generic.field[3]) &
1900 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) 1908 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
1901 td->urb->actual_length += 1909 td->urb->actual_length +=
1902 TRB_LEN(cur_trb->generic.field[2]); 1910 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
1903 } 1911 }
1904 /* If the ring didn't stop on a Link or No-op TRB, add 1912 /* If the ring didn't stop on a Link or No-op TRB, add
1905 * in the actual bytes transferred from the Normal TRB 1913 * in the actual bytes transferred from the Normal TRB
1906 */ 1914 */
1907 if (trb_comp_code != COMP_STOP_INVAL) 1915 if (trb_comp_code != COMP_STOP_INVAL)
1908 td->urb->actual_length += 1916 td->urb->actual_length +=
1909 TRB_LEN(cur_trb->generic.field[2]) - 1917 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
1910 TRB_LEN(event->transfer_len); 1918 TRB_LEN(le32_to_cpu(event->transfer_len));
1911 } 1919 }
1912 1920
1913 return finish_td(xhci, td, event_trb, event, ep, status, false); 1921 return finish_td(xhci, td, event_trb, event, ep, status, false);
@@ -1937,7 +1945,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1937 u32 trb_comp_code; 1945 u32 trb_comp_code;
1938 int ret = 0; 1946 int ret = 0;
1939 1947
1940 slot_id = TRB_TO_SLOT_ID(event->flags); 1948 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1941 xdev = xhci->devs[slot_id]; 1949 xdev = xhci->devs[slot_id];
1942 if (!xdev) { 1950 if (!xdev) {
1943 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); 1951 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
@@ -1945,20 +1953,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1945 } 1953 }
1946 1954
1947 /* Endpoint ID is 1 based, our index is zero based */ 1955 /* Endpoint ID is 1 based, our index is zero based */
1948 ep_index = TRB_TO_EP_ID(event->flags) - 1; 1956 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1949 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); 1957 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
1950 ep = &xdev->eps[ep_index]; 1958 ep = &xdev->eps[ep_index];
1951 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1959 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1952 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1960 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1953 if (!ep_ring || 1961 if (!ep_ring ||
1954 (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { 1962 (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
1963 EP_STATE_DISABLED) {
1955 xhci_err(xhci, "ERROR Transfer event for disabled endpoint " 1964 xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
1956 "or incorrect stream ring\n"); 1965 "or incorrect stream ring\n");
1957 return -ENODEV; 1966 return -ENODEV;
1958 } 1967 }
1959 1968
1960 event_dma = event->buffer; 1969 event_dma = le64_to_cpu(event->buffer);
1961 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1970 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1962 /* Look for common error cases */ 1971 /* Look for common error cases */
1963 switch (trb_comp_code) { 1972 switch (trb_comp_code) {
1964 /* Skip codes that require special handling depending on 1973 /* Skip codes that require special handling depending on
@@ -2011,14 +2020,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2011 if (!list_empty(&ep_ring->td_list)) 2020 if (!list_empty(&ep_ring->td_list))
2012 xhci_dbg(xhci, "Underrun Event for slot %d ep %d " 2021 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2013 "still with TDs queued?\n", 2022 "still with TDs queued?\n",
2014 TRB_TO_SLOT_ID(event->flags), ep_index); 2023 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2024 ep_index);
2015 goto cleanup; 2025 goto cleanup;
2016 case COMP_OVERRUN: 2026 case COMP_OVERRUN:
2017 xhci_dbg(xhci, "overrun event on endpoint\n"); 2027 xhci_dbg(xhci, "overrun event on endpoint\n");
2018 if (!list_empty(&ep_ring->td_list)) 2028 if (!list_empty(&ep_ring->td_list))
2019 xhci_dbg(xhci, "Overrun Event for slot %d ep %d " 2029 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2020 "still with TDs queued?\n", 2030 "still with TDs queued?\n",
2021 TRB_TO_SLOT_ID(event->flags), ep_index); 2031 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2032 ep_index);
2022 goto cleanup; 2033 goto cleanup;
2023 case COMP_MISSED_INT: 2034 case COMP_MISSED_INT:
2024 /* 2035 /*
@@ -2047,9 +2058,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2047 if (list_empty(&ep_ring->td_list)) { 2058 if (list_empty(&ep_ring->td_list)) {
2048 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d " 2059 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
2049 "with no TDs queued?\n", 2060 "with no TDs queued?\n",
2050 TRB_TO_SLOT_ID(event->flags), ep_index); 2061 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2062 ep_index);
2051 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 2063 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2052 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); 2064 (unsigned int) (le32_to_cpu(event->flags)
2065 & TRB_TYPE_BITMASK)>>10);
2053 xhci_print_trb_offsets(xhci, (union xhci_trb *) event); 2066 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2054 if (ep->skip) { 2067 if (ep->skip) {
2055 ep->skip = false; 2068 ep->skip = false;
@@ -2092,7 +2105,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2092 * corresponding TD has been cancelled. Just ignore 2105 * corresponding TD has been cancelled. Just ignore
2093 * the TD. 2106 * the TD.
2094 */ 2107 */
2095 if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK) 2108 if ((le32_to_cpu(event_trb->generic.field[3])
2109 & TRB_TYPE_BITMASK)
2096 == TRB_TYPE(TRB_TR_NOOP)) { 2110 == TRB_TYPE(TRB_TR_NOOP)) {
2097 xhci_dbg(xhci, 2111 xhci_dbg(xhci,
2098 "event_trb is a no-op TRB. Skip it\n"); 2112 "event_trb is a no-op TRB. Skip it\n");
@@ -2172,15 +2186,15 @@ static void xhci_handle_event(struct xhci_hcd *xhci)
2172 2186
2173 event = xhci->event_ring->dequeue; 2187 event = xhci->event_ring->dequeue;
2174 /* Does the HC or OS own the TRB? */ 2188 /* Does the HC or OS own the TRB? */
2175 if ((event->event_cmd.flags & TRB_CYCLE) != 2189 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2176 xhci->event_ring->cycle_state) { 2190 xhci->event_ring->cycle_state) {
2177 xhci->error_bitmask |= 1 << 2; 2191 xhci->error_bitmask |= 1 << 2;
2178 return; 2192 return;
2179 } 2193 }
2180 xhci_dbg(xhci, "%s - OS owns TRB\n", __func__); 2194 xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
2181 2195
2182 /* FIXME: Handle more event types. */ 2196 /* FIXME: Handle more event types. */
2183 switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { 2197 switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2184 case TRB_TYPE(TRB_COMPLETION): 2198 case TRB_TYPE(TRB_COMPLETION):
2185 xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__); 2199 xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
2186 handle_cmd_completion(xhci, &event->event_cmd); 2200 handle_cmd_completion(xhci, &event->event_cmd);
@@ -2202,7 +2216,8 @@ static void xhci_handle_event(struct xhci_hcd *xhci)
2202 update_ptrs = 0; 2216 update_ptrs = 0;
2203 break; 2217 break;
2204 default: 2218 default:
2205 if ((event->event_cmd.flags & TRB_TYPE_BITMASK) >= TRB_TYPE(48)) 2219 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2220 TRB_TYPE(48))
2206 handle_vendor_event(xhci, event); 2221 handle_vendor_event(xhci, event);
2207 else 2222 else
2208 xhci->error_bitmask |= 1 << 3; 2223 xhci->error_bitmask |= 1 << 3;
@@ -2252,12 +2267,12 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
2252 xhci_dbg(xhci, "op reg status = %08x\n", status); 2267 xhci_dbg(xhci, "op reg status = %08x\n", status);
2253 xhci_dbg(xhci, "Event ring dequeue ptr:\n"); 2268 xhci_dbg(xhci, "Event ring dequeue ptr:\n");
2254 xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", 2269 xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
2255 (unsigned long long) 2270 (unsigned long long)
2256 xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), 2271 xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
2257 lower_32_bits(trb->link.segment_ptr), 2272 lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
2258 upper_32_bits(trb->link.segment_ptr), 2273 upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
2259 (unsigned int) trb->link.intr_target, 2274 (unsigned int) le32_to_cpu(trb->link.intr_target),
2260 (unsigned int) trb->link.control); 2275 (unsigned int) le32_to_cpu(trb->link.control));
2261 2276
2262 if (status & STS_FATAL) { 2277 if (status & STS_FATAL) {
2263 xhci_warn(xhci, "WARNING: Host System Error\n"); 2278 xhci_warn(xhci, "WARNING: Host System Error\n");
@@ -2358,10 +2373,10 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2358 struct xhci_generic_trb *trb; 2373 struct xhci_generic_trb *trb;
2359 2374
2360 trb = &ring->enqueue->generic; 2375 trb = &ring->enqueue->generic;
2361 trb->field[0] = field1; 2376 trb->field[0] = cpu_to_le32(field1);
2362 trb->field[1] = field2; 2377 trb->field[1] = cpu_to_le32(field2);
2363 trb->field[2] = field3; 2378 trb->field[2] = cpu_to_le32(field3);
2364 trb->field[3] = field4; 2379 trb->field[3] = cpu_to_le32(field4);
2365 inc_enq(xhci, ring, consumer, more_trbs_coming); 2380 inc_enq(xhci, ring, consumer, more_trbs_coming);
2366} 2381}
2367 2382
@@ -2414,17 +2429,16 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2414 next = ring->enqueue; 2429 next = ring->enqueue;
2415 2430
2416 while (last_trb(xhci, ring, ring->enq_seg, next)) { 2431 while (last_trb(xhci, ring, ring->enq_seg, next)) {
2417
2418 /* If we're not dealing with 0.95 hardware, 2432 /* If we're not dealing with 0.95 hardware,
2419 * clear the chain bit. 2433 * clear the chain bit.
2420 */ 2434 */
2421 if (!xhci_link_trb_quirk(xhci)) 2435 if (!xhci_link_trb_quirk(xhci))
2422 next->link.control &= ~TRB_CHAIN; 2436 next->link.control &= cpu_to_le32(~TRB_CHAIN);
2423 else 2437 else
2424 next->link.control |= TRB_CHAIN; 2438 next->link.control |= cpu_to_le32(TRB_CHAIN);
2425 2439
2426 wmb(); 2440 wmb();
2427 next->link.control ^= (u32) TRB_CYCLE; 2441 next->link.control ^= cpu_to_le32((u32) TRB_CYCLE);
2428 2442
2429 /* Toggle the cycle bit after the last ring segment. */ 2443 /* Toggle the cycle bit after the last ring segment. */
2430 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 2444 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
@@ -2467,8 +2481,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
2467 } 2481 }
2468 2482
2469 ret = prepare_ring(xhci, ep_ring, 2483 ret = prepare_ring(xhci, ep_ring,
2470 ep_ctx->ep_info & EP_STATE_MASK, 2484 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
2471 num_trbs, mem_flags); 2485 num_trbs, mem_flags);
2472 if (ret) 2486 if (ret)
2473 return ret; 2487 return ret;
2474 2488
@@ -2570,9 +2584,9 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
2570 */ 2584 */
2571 wmb(); 2585 wmb();
2572 if (start_cycle) 2586 if (start_cycle)
2573 start_trb->field[3] |= start_cycle; 2587 start_trb->field[3] |= cpu_to_le32(start_cycle);
2574 else 2588 else
2575 start_trb->field[3] &= ~0x1; 2589 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
2576 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); 2590 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
2577} 2591}
2578 2592
@@ -2590,7 +2604,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2590 int xhci_interval; 2604 int xhci_interval;
2591 int ep_interval; 2605 int ep_interval;
2592 2606
2593 xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info); 2607 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
2594 ep_interval = urb->interval; 2608 ep_interval = urb->interval;
2595 /* Convert to microframes */ 2609 /* Convert to microframes */
2596 if (urb->dev->speed == USB_SPEED_LOW || 2610 if (urb->dev->speed == USB_SPEED_LOW ||
@@ -2979,12 +2993,11 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2979 if (start_cycle == 0) 2993 if (start_cycle == 0)
2980 field |= 0x1; 2994 field |= 0x1;
2981 queue_trb(xhci, ep_ring, false, true, 2995 queue_trb(xhci, ep_ring, false, true,
2982 /* FIXME endianness is probably going to bite my ass here. */ 2996 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
2983 setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, 2997 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
2984 setup->wIndex | setup->wLength << 16, 2998 TRB_LEN(8) | TRB_INTR_TARGET(0),
2985 TRB_LEN(8) | TRB_INTR_TARGET(0), 2999 /* Immediate data in pointer */
2986 /* Immediate data in pointer */ 3000 field);
2987 field);
2988 3001
2989 /* If there's data, queue data TRBs */ 3002 /* If there's data, queue data TRBs */
2990 field = 0; 3003 field = 0;
@@ -3211,8 +3224,8 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3211 /* Check the ring to guarantee there is enough room for the whole urb. 3224 /* Check the ring to guarantee there is enough room for the whole urb.
3212 * Do not insert any td of the urb to the ring if the check failed. 3225 * Do not insert any td of the urb to the ring if the check failed.
3213 */ 3226 */
3214 ret = prepare_ring(xhci, ep_ring, ep_ctx->ep_info & EP_STATE_MASK, 3227 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3215 num_trbs, mem_flags); 3228 num_trbs, mem_flags);
3216 if (ret) 3229 if (ret)
3217 return ret; 3230 return ret;
3218 3231
@@ -3224,7 +3237,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3224 urb->dev->speed == USB_SPEED_FULL) 3237 urb->dev->speed == USB_SPEED_FULL)
3225 urb->start_frame >>= 3; 3238 urb->start_frame >>= 3;
3226 3239
3227 xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info); 3240 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3228 ep_interval = urb->interval; 3241 ep_interval = urb->interval;
3229 /* Convert to microframes */ 3242 /* Convert to microframes */
3230 if (urb->dev->speed == USB_SPEED_LOW || 3243 if (urb->dev->speed == USB_SPEED_LOW ||