diff options
author | Matt Evans <matt@ozlabs.org> | 2011-03-28 22:40:46 -0400 |
---|---|---|
committer | Sarah Sharp <sarah.a.sharp@linux.intel.com> | 2011-05-02 19:42:49 -0400 |
commit | 28ccd2962c66556d7037b2d9f1c11cdcd3b805d5 (patch) | |
tree | 02bf9319e60c43c655a97aedeb76ec5171459508 /drivers/usb/host/xhci-mem.c | |
parent | 7fc2a61638ef78cdf8d65d5934782963a6e0fc66 (diff) |
xhci: Make xHCI driver endian-safe
This patch changes the struct members defining access to xHCI device-visible
memory to use __le32/__le64 where appropriate, and then adds swaps where
required. Checked with sparse that all accesses are correct.
MMIO accesses use readl/writel so already are performed LE, but prototypes
now reflect this with __le*.
There were a couple of (debug) instances of DMA pointers being truncated to
32bits which have been fixed too.
Signed-off-by: Matt Evans <matt@ozlabs.org>
Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Diffstat (limited to 'drivers/usb/host/xhci-mem.c')
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 122 |
1 files changed, 61 insertions, 61 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 627f3438028c..500ec7a9eb8a 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -89,16 +89,17 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, | |||
89 | return; | 89 | return; |
90 | prev->next = next; | 90 | prev->next = next; |
91 | if (link_trbs) { | 91 | if (link_trbs) { |
92 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma; | 92 | prev->trbs[TRBS_PER_SEGMENT-1].link. |
93 | segment_ptr = cpu_to_le64(next->dma); | ||
93 | 94 | ||
94 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ | 95 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ |
95 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; | 96 | val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); |
96 | val &= ~TRB_TYPE_BITMASK; | 97 | val &= ~TRB_TYPE_BITMASK; |
97 | val |= TRB_TYPE(TRB_LINK); | 98 | val |= TRB_TYPE(TRB_LINK); |
98 | /* Always set the chain bit with 0.95 hardware */ | 99 | /* Always set the chain bit with 0.95 hardware */ |
99 | if (xhci_link_trb_quirk(xhci)) | 100 | if (xhci_link_trb_quirk(xhci)) |
100 | val |= TRB_CHAIN; | 101 | val |= TRB_CHAIN; |
101 | prev->trbs[TRBS_PER_SEGMENT-1].link.control = val; | 102 | prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); |
102 | } | 103 | } |
103 | xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n", | 104 | xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n", |
104 | (unsigned long long)prev->dma, | 105 | (unsigned long long)prev->dma, |
@@ -186,7 +187,8 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, | |||
186 | 187 | ||
187 | if (link_trbs) { | 188 | if (link_trbs) { |
188 | /* See section 4.9.2.1 and 6.4.4.1 */ | 189 | /* See section 4.9.2.1 and 6.4.4.1 */ |
189 | prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE); | 190 | prev->trbs[TRBS_PER_SEGMENT-1].link. |
191 | control |= cpu_to_le32(LINK_TOGGLE); | ||
190 | xhci_dbg(xhci, "Wrote link toggle flag to" | 192 | xhci_dbg(xhci, "Wrote link toggle flag to" |
191 | " segment %p (virtual), 0x%llx (DMA)\n", | 193 | " segment %p (virtual), 0x%llx (DMA)\n", |
192 | prev, (unsigned long long)prev->dma); | 194 | prev, (unsigned long long)prev->dma); |
@@ -548,7 +550,8 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, | |||
548 | addr = cur_ring->first_seg->dma | | 550 | addr = cur_ring->first_seg->dma | |
549 | SCT_FOR_CTX(SCT_PRI_TR) | | 551 | SCT_FOR_CTX(SCT_PRI_TR) | |
550 | cur_ring->cycle_state; | 552 | cur_ring->cycle_state; |
551 | stream_info->stream_ctx_array[cur_stream].stream_ring = addr; | 553 | stream_info->stream_ctx_array[cur_stream]. |
554 | stream_ring = cpu_to_le64(addr); | ||
552 | xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", | 555 | xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", |
553 | cur_stream, (unsigned long long) addr); | 556 | cur_stream, (unsigned long long) addr); |
554 | 557 | ||
@@ -614,10 +617,10 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, | |||
614 | max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; | 617 | max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; |
615 | xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n", | 618 | xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n", |
616 | 1 << (max_primary_streams + 1)); | 619 | 1 << (max_primary_streams + 1)); |
617 | ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK; | 620 | ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); |
618 | ep_ctx->ep_info |= EP_MAXPSTREAMS(max_primary_streams); | 621 | ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) |
619 | ep_ctx->ep_info |= EP_HAS_LSA; | 622 | | EP_HAS_LSA); |
620 | ep_ctx->deq = stream_info->ctx_array_dma; | 623 | ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); |
621 | } | 624 | } |
622 | 625 | ||
623 | /* | 626 | /* |
@@ -630,10 +633,9 @@ void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, | |||
630 | struct xhci_virt_ep *ep) | 633 | struct xhci_virt_ep *ep) |
631 | { | 634 | { |
632 | dma_addr_t addr; | 635 | dma_addr_t addr; |
633 | ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK; | 636 | ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA)); |
634 | ep_ctx->ep_info &= ~EP_HAS_LSA; | ||
635 | addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); | 637 | addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); |
636 | ep_ctx->deq = addr | ep->ring->cycle_state; | 638 | ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state); |
637 | } | 639 | } |
638 | 640 | ||
639 | /* Frees all stream contexts associated with the endpoint, | 641 | /* Frees all stream contexts associated with the endpoint, |
@@ -781,11 +783,11 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
781 | dev->udev = udev; | 783 | dev->udev = udev; |
782 | 784 | ||
783 | /* Point to output device context in dcbaa. */ | 785 | /* Point to output device context in dcbaa. */ |
784 | xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; | 786 | xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma); |
785 | xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", | 787 | xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", |
786 | slot_id, | 788 | slot_id, |
787 | &xhci->dcbaa->dev_context_ptrs[slot_id], | 789 | &xhci->dcbaa->dev_context_ptrs[slot_id], |
788 | (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]); | 790 | (unsigned long long) le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); |
789 | 791 | ||
790 | return 1; | 792 | return 1; |
791 | fail: | 793 | fail: |
@@ -810,8 +812,9 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, | |||
810 | * configured device has reset, so all control transfers should have | 812 | * configured device has reset, so all control transfers should have |
811 | * been completed or cancelled before the reset. | 813 | * been completed or cancelled before the reset. |
812 | */ | 814 | */ |
813 | ep0_ctx->deq = xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue); | 815 | ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg, |
814 | ep0_ctx->deq |= ep_ring->cycle_state; | 816 | ep_ring->enqueue) |
817 | | ep_ring->cycle_state); | ||
815 | } | 818 | } |
816 | 819 | ||
817 | /* | 820 | /* |
@@ -885,24 +888,22 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
885 | slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); | 888 | slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); |
886 | 889 | ||
887 | /* 2) New slot context and endpoint 0 context are valid*/ | 890 | /* 2) New slot context and endpoint 0 context are valid*/ |
888 | ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG; | 891 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); |
889 | 892 | ||
890 | /* 3) Only the control endpoint is valid - one endpoint context */ | 893 | /* 3) Only the control endpoint is valid - one endpoint context */ |
891 | slot_ctx->dev_info |= LAST_CTX(1); | 894 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | (u32) udev->route); |
892 | |||
893 | slot_ctx->dev_info |= (u32) udev->route; | ||
894 | switch (udev->speed) { | 895 | switch (udev->speed) { |
895 | case USB_SPEED_SUPER: | 896 | case USB_SPEED_SUPER: |
896 | slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; | 897 | slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_SS); |
897 | break; | 898 | break; |
898 | case USB_SPEED_HIGH: | 899 | case USB_SPEED_HIGH: |
899 | slot_ctx->dev_info |= (u32) SLOT_SPEED_HS; | 900 | slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_HS); |
900 | break; | 901 | break; |
901 | case USB_SPEED_FULL: | 902 | case USB_SPEED_FULL: |
902 | slot_ctx->dev_info |= (u32) SLOT_SPEED_FS; | 903 | slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_FS); |
903 | break; | 904 | break; |
904 | case USB_SPEED_LOW: | 905 | case USB_SPEED_LOW: |
905 | slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; | 906 | slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_LS); |
906 | break; | 907 | break; |
907 | case USB_SPEED_WIRELESS: | 908 | case USB_SPEED_WIRELESS: |
908 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); | 909 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); |
@@ -916,7 +917,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
916 | port_num = xhci_find_real_port_number(xhci, udev); | 917 | port_num = xhci_find_real_port_number(xhci, udev); |
917 | if (!port_num) | 918 | if (!port_num) |
918 | return -EINVAL; | 919 | return -EINVAL; |
919 | slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(port_num); | 920 | slot_ctx->dev_info2 |= cpu_to_le32((u32) ROOT_HUB_PORT(port_num)); |
920 | /* Set the port number in the virtual_device to the faked port number */ | 921 | /* Set the port number in the virtual_device to the faked port number */ |
921 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; | 922 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; |
922 | top_dev = top_dev->parent) | 923 | top_dev = top_dev->parent) |
@@ -927,31 +928,31 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
927 | 928 | ||
928 | /* Is this a LS/FS device under an external HS hub? */ | 929 | /* Is this a LS/FS device under an external HS hub? */ |
929 | if (udev->tt && udev->tt->hub->parent) { | 930 | if (udev->tt && udev->tt->hub->parent) { |
930 | slot_ctx->tt_info = udev->tt->hub->slot_id; | 931 | slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id | |
931 | slot_ctx->tt_info |= udev->ttport << 8; | 932 | (udev->ttport << 8)); |
932 | if (udev->tt->multi) | 933 | if (udev->tt->multi) |
933 | slot_ctx->dev_info |= DEV_MTT; | 934 | slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); |
934 | } | 935 | } |
935 | xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); | 936 | xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); |
936 | xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); | 937 | xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); |
937 | 938 | ||
938 | /* Step 4 - ring already allocated */ | 939 | /* Step 4 - ring already allocated */ |
939 | /* Step 5 */ | 940 | /* Step 5 */ |
940 | ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP); | 941 | ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP)); |
941 | /* | 942 | /* |
942 | * XXX: Not sure about wireless USB devices. | 943 | * XXX: Not sure about wireless USB devices. |
943 | */ | 944 | */ |
944 | switch (udev->speed) { | 945 | switch (udev->speed) { |
945 | case USB_SPEED_SUPER: | 946 | case USB_SPEED_SUPER: |
946 | ep0_ctx->ep_info2 |= MAX_PACKET(512); | 947 | ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512)); |
947 | break; | 948 | break; |
948 | case USB_SPEED_HIGH: | 949 | case USB_SPEED_HIGH: |
949 | /* USB core guesses at a 64-byte max packet first for FS devices */ | 950 | /* USB core guesses at a 64-byte max packet first for FS devices */ |
950 | case USB_SPEED_FULL: | 951 | case USB_SPEED_FULL: |
951 | ep0_ctx->ep_info2 |= MAX_PACKET(64); | 952 | ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64)); |
952 | break; | 953 | break; |
953 | case USB_SPEED_LOW: | 954 | case USB_SPEED_LOW: |
954 | ep0_ctx->ep_info2 |= MAX_PACKET(8); | 955 | ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8)); |
955 | break; | 956 | break; |
956 | case USB_SPEED_WIRELESS: | 957 | case USB_SPEED_WIRELESS: |
957 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); | 958 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); |
@@ -962,12 +963,10 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
962 | BUG(); | 963 | BUG(); |
963 | } | 964 | } |
964 | /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ | 965 | /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ |
965 | ep0_ctx->ep_info2 |= MAX_BURST(0); | 966 | ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3)); |
966 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); | ||
967 | 967 | ||
968 | ep0_ctx->deq = | 968 | ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma | |
969 | dev->eps[0].ring->first_seg->dma; | 969 | dev->eps[0].ring->cycle_state); |
970 | ep0_ctx->deq |= dev->eps[0].ring->cycle_state; | ||
971 | 970 | ||
972 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ | 971 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ |
973 | 972 | ||
@@ -1133,8 +1132,8 @@ static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, | |||
1133 | if (udev->speed == USB_SPEED_SUPER) | 1132 | if (udev->speed == USB_SPEED_SUPER) |
1134 | return ep->ss_ep_comp.wBytesPerInterval; | 1133 | return ep->ss_ep_comp.wBytesPerInterval; |
1135 | 1134 | ||
1136 | max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); | 1135 | max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize)); |
1137 | max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; | 1136 | max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize) & 0x1800) >> 11; |
1138 | /* A 0 in max burst means 1 transfer per ESIT */ | 1137 | /* A 0 in max burst means 1 transfer per ESIT */ |
1139 | return max_packet * (max_burst + 1); | 1138 | return max_packet * (max_burst + 1); |
1140 | } | 1139 | } |
@@ -1183,10 +1182,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
1183 | } | 1182 | } |
1184 | virt_dev->eps[ep_index].skip = false; | 1183 | virt_dev->eps[ep_index].skip = false; |
1185 | ep_ring = virt_dev->eps[ep_index].new_ring; | 1184 | ep_ring = virt_dev->eps[ep_index].new_ring; |
1186 | ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; | 1185 | ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state); |
1187 | 1186 | ||
1188 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); | 1187 | ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep) |
1189 | ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep)); | 1188 | | EP_MULT(xhci_get_endpoint_mult(udev, ep))); |
1190 | 1189 | ||
1191 | /* FIXME dig Mult and streams info out of ep companion desc */ | 1190 | /* FIXME dig Mult and streams info out of ep companion desc */ |
1192 | 1191 | ||
@@ -1194,22 +1193,22 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
1194 | * error count = 0 means infinite retries. | 1193 | * error count = 0 means infinite retries. |
1195 | */ | 1194 | */ |
1196 | if (!usb_endpoint_xfer_isoc(&ep->desc)) | 1195 | if (!usb_endpoint_xfer_isoc(&ep->desc)) |
1197 | ep_ctx->ep_info2 = ERROR_COUNT(3); | 1196 | ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3)); |
1198 | else | 1197 | else |
1199 | ep_ctx->ep_info2 = ERROR_COUNT(1); | 1198 | ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(1)); |
1200 | 1199 | ||
1201 | ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); | 1200 | ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep)); |
1202 | 1201 | ||
1203 | /* Set the max packet size and max burst */ | 1202 | /* Set the max packet size and max burst */ |
1204 | switch (udev->speed) { | 1203 | switch (udev->speed) { |
1205 | case USB_SPEED_SUPER: | 1204 | case USB_SPEED_SUPER: |
1206 | max_packet = ep->desc.wMaxPacketSize; | 1205 | max_packet = le16_to_cpu(ep->desc.wMaxPacketSize); |
1207 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); | 1206 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); |
1208 | /* dig out max burst from ep companion desc */ | 1207 | /* dig out max burst from ep companion desc */ |
1209 | max_packet = ep->ss_ep_comp.bMaxBurst; | 1208 | max_packet = ep->ss_ep_comp.bMaxBurst; |
1210 | if (!max_packet) | 1209 | if (!max_packet) |
1211 | xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n"); | 1210 | xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n"); |
1212 | ep_ctx->ep_info2 |= MAX_BURST(max_packet); | 1211 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet)); |
1213 | break; | 1212 | break; |
1214 | case USB_SPEED_HIGH: | 1213 | case USB_SPEED_HIGH: |
1215 | /* bits 11:12 specify the number of additional transaction | 1214 | /* bits 11:12 specify the number of additional transaction |
@@ -1217,20 +1216,21 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
1217 | */ | 1216 | */ |
1218 | if (usb_endpoint_xfer_isoc(&ep->desc) || | 1217 | if (usb_endpoint_xfer_isoc(&ep->desc) || |
1219 | usb_endpoint_xfer_int(&ep->desc)) { | 1218 | usb_endpoint_xfer_int(&ep->desc)) { |
1220 | max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; | 1219 | max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize) |
1221 | ep_ctx->ep_info2 |= MAX_BURST(max_burst); | 1220 | & 0x1800) >> 11; |
1221 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst)); | ||
1222 | } | 1222 | } |
1223 | /* Fall through */ | 1223 | /* Fall through */ |
1224 | case USB_SPEED_FULL: | 1224 | case USB_SPEED_FULL: |
1225 | case USB_SPEED_LOW: | 1225 | case USB_SPEED_LOW: |
1226 | max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); | 1226 | max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize)); |
1227 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); | 1227 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); |
1228 | break; | 1228 | break; |
1229 | default: | 1229 | default: |
1230 | BUG(); | 1230 | BUG(); |
1231 | } | 1231 | } |
1232 | max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); | 1232 | max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); |
1233 | ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload); | 1233 | ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload)); |
1234 | 1234 | ||
1235 | /* | 1235 | /* |
1236 | * XXX no idea how to calculate the average TRB buffer length for bulk | 1236 | * XXX no idea how to calculate the average TRB buffer length for bulk |
@@ -1247,7 +1247,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
1247 | * use Event Data TRBs, and we don't chain in a link TRB on short | 1247 | * use Event Data TRBs, and we don't chain in a link TRB on short |
1248 | * transfers, we're basically dividing by 1. | 1248 | * transfers, we're basically dividing by 1. |
1249 | */ | 1249 | */ |
1250 | ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload); | 1250 | ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload)); |
1251 | 1251 | ||
1252 | /* FIXME Debug endpoint context */ | 1252 | /* FIXME Debug endpoint context */ |
1253 | return 0; | 1253 | return 0; |
@@ -1347,7 +1347,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) | |||
1347 | if (!xhci->scratchpad->sp_dma_buffers) | 1347 | if (!xhci->scratchpad->sp_dma_buffers) |
1348 | goto fail_sp4; | 1348 | goto fail_sp4; |
1349 | 1349 | ||
1350 | xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma; | 1350 | xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); |
1351 | for (i = 0; i < num_sp; i++) { | 1351 | for (i = 0; i < num_sp; i++) { |
1352 | dma_addr_t dma; | 1352 | dma_addr_t dma; |
1353 | void *buf = pci_alloc_consistent(to_pci_dev(dev), | 1353 | void *buf = pci_alloc_consistent(to_pci_dev(dev), |
@@ -1724,7 +1724,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) | |||
1724 | } | 1724 | } |
1725 | 1725 | ||
1726 | static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, | 1726 | static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, |
1727 | u32 __iomem *addr, u8 major_revision) | 1727 | __le32 __iomem *addr, u8 major_revision) |
1728 | { | 1728 | { |
1729 | u32 temp, port_offset, port_count; | 1729 | u32 temp, port_offset, port_count; |
1730 | int i; | 1730 | int i; |
@@ -1789,7 +1789,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, | |||
1789 | */ | 1789 | */ |
1790 | static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) | 1790 | static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) |
1791 | { | 1791 | { |
1792 | u32 __iomem *addr; | 1792 | __le32 __iomem *addr; |
1793 | u32 offset; | 1793 | u32 offset; |
1794 | unsigned int num_ports; | 1794 | unsigned int num_ports; |
1795 | int i, port_index; | 1795 | int i, port_index; |
@@ -2042,8 +2042,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
2042 | /* set ring base address and size for each segment table entry */ | 2042 | /* set ring base address and size for each segment table entry */ |
2043 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { | 2043 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { |
2044 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; | 2044 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; |
2045 | entry->seg_addr = seg->dma; | 2045 | entry->seg_addr = cpu_to_le64(seg->dma); |
2046 | entry->seg_size = TRBS_PER_SEGMENT; | 2046 | entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); |
2047 | entry->rsvd = 0; | 2047 | entry->rsvd = 0; |
2048 | seg = seg->next; | 2048 | seg = seg->next; |
2049 | } | 2049 | } |