diff options
Diffstat (limited to 'drivers/usb/host/xhci-mem.c')
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 85 |
1 files changed, 43 insertions, 42 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index fcb7f7efc86..d718033dc53 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -81,7 +81,7 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) | |||
81 | * related flags, such as End TRB, Toggle Cycle, and no snoop. | 81 | * related flags, such as End TRB, Toggle Cycle, and no snoop. |
82 | */ | 82 | */ |
83 | static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, | 83 | static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, |
84 | struct xhci_segment *next, bool link_trbs) | 84 | struct xhci_segment *next, bool link_trbs, bool isoc) |
85 | { | 85 | { |
86 | u32 val; | 86 | u32 val; |
87 | 87 | ||
@@ -89,15 +89,17 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, | |||
89 | return; | 89 | return; |
90 | prev->next = next; | 90 | prev->next = next; |
91 | if (link_trbs) { | 91 | if (link_trbs) { |
92 | prev->trbs[TRBS_PER_SEGMENT-1].link. | 92 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = |
93 | segment_ptr = cpu_to_le64(next->dma); | 93 | cpu_to_le64(next->dma); |
94 | 94 | ||
95 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ | 95 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ |
96 | val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); | 96 | val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); |
97 | val &= ~TRB_TYPE_BITMASK; | 97 | val &= ~TRB_TYPE_BITMASK; |
98 | val |= TRB_TYPE(TRB_LINK); | 98 | val |= TRB_TYPE(TRB_LINK); |
99 | /* Always set the chain bit with 0.95 hardware */ | 99 | /* Always set the chain bit with 0.95 hardware */ |
100 | if (xhci_link_trb_quirk(xhci)) | 100 | /* Set chain bit for isoc rings on AMD 0.96 host */ |
101 | if (xhci_link_trb_quirk(xhci) || | ||
102 | (isoc && (xhci->quirks & XHCI_AMD_0x96_HOST))) | ||
101 | val |= TRB_CHAIN; | 103 | val |= TRB_CHAIN; |
102 | prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); | 104 | prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); |
103 | } | 105 | } |
@@ -112,18 +114,20 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) | |||
112 | struct xhci_segment *seg; | 114 | struct xhci_segment *seg; |
113 | struct xhci_segment *first_seg; | 115 | struct xhci_segment *first_seg; |
114 | 116 | ||
115 | if (!ring || !ring->first_seg) | 117 | if (!ring) |
116 | return; | 118 | return; |
117 | first_seg = ring->first_seg; | 119 | if (ring->first_seg) { |
118 | seg = first_seg->next; | 120 | first_seg = ring->first_seg; |
119 | xhci_dbg(xhci, "Freeing ring at %p\n", ring); | 121 | seg = first_seg->next; |
120 | while (seg != first_seg) { | 122 | xhci_dbg(xhci, "Freeing ring at %p\n", ring); |
121 | struct xhci_segment *next = seg->next; | 123 | while (seg != first_seg) { |
122 | xhci_segment_free(xhci, seg); | 124 | struct xhci_segment *next = seg->next; |
123 | seg = next; | 125 | xhci_segment_free(xhci, seg); |
126 | seg = next; | ||
127 | } | ||
128 | xhci_segment_free(xhci, first_seg); | ||
129 | ring->first_seg = NULL; | ||
124 | } | 130 | } |
125 | xhci_segment_free(xhci, first_seg); | ||
126 | ring->first_seg = NULL; | ||
127 | kfree(ring); | 131 | kfree(ring); |
128 | } | 132 | } |
129 | 133 | ||
@@ -152,7 +156,7 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring) | |||
152 | * See section 4.9.1 and figures 15 and 16. | 156 | * See section 4.9.1 and figures 15 and 16. |
153 | */ | 157 | */ |
154 | static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, | 158 | static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, |
155 | unsigned int num_segs, bool link_trbs, gfp_t flags) | 159 | unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags) |
156 | { | 160 | { |
157 | struct xhci_ring *ring; | 161 | struct xhci_ring *ring; |
158 | struct xhci_segment *prev; | 162 | struct xhci_segment *prev; |
@@ -178,17 +182,17 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, | |||
178 | next = xhci_segment_alloc(xhci, flags); | 182 | next = xhci_segment_alloc(xhci, flags); |
179 | if (!next) | 183 | if (!next) |
180 | goto fail; | 184 | goto fail; |
181 | xhci_link_segments(xhci, prev, next, link_trbs); | 185 | xhci_link_segments(xhci, prev, next, link_trbs, isoc); |
182 | 186 | ||
183 | prev = next; | 187 | prev = next; |
184 | num_segs--; | 188 | num_segs--; |
185 | } | 189 | } |
186 | xhci_link_segments(xhci, prev, ring->first_seg, link_trbs); | 190 | xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc); |
187 | 191 | ||
188 | if (link_trbs) { | 192 | if (link_trbs) { |
189 | /* See section 4.9.2.1 and 6.4.4.1 */ | 193 | /* See section 4.9.2.1 and 6.4.4.1 */ |
190 | prev->trbs[TRBS_PER_SEGMENT-1].link. | 194 | prev->trbs[TRBS_PER_SEGMENT-1].link.control |= |
191 | control |= cpu_to_le32(LINK_TOGGLE); | 195 | cpu_to_le32(LINK_TOGGLE); |
192 | xhci_dbg(xhci, "Wrote link toggle flag to" | 196 | xhci_dbg(xhci, "Wrote link toggle flag to" |
193 | " segment %p (virtual), 0x%llx (DMA)\n", | 197 | " segment %p (virtual), 0x%llx (DMA)\n", |
194 | prev, (unsigned long long)prev->dma); | 198 | prev, (unsigned long long)prev->dma); |
@@ -229,14 +233,14 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, | |||
229 | * pointers to the beginning of the ring. | 233 | * pointers to the beginning of the ring. |
230 | */ | 234 | */ |
231 | static void xhci_reinit_cached_ring(struct xhci_hcd *xhci, | 235 | static void xhci_reinit_cached_ring(struct xhci_hcd *xhci, |
232 | struct xhci_ring *ring) | 236 | struct xhci_ring *ring, bool isoc) |
233 | { | 237 | { |
234 | struct xhci_segment *seg = ring->first_seg; | 238 | struct xhci_segment *seg = ring->first_seg; |
235 | do { | 239 | do { |
236 | memset(seg->trbs, 0, | 240 | memset(seg->trbs, 0, |
237 | sizeof(union xhci_trb)*TRBS_PER_SEGMENT); | 241 | sizeof(union xhci_trb)*TRBS_PER_SEGMENT); |
238 | /* All endpoint rings have link TRBs */ | 242 | /* All endpoint rings have link TRBs */ |
239 | xhci_link_segments(xhci, seg, seg->next, 1); | 243 | xhci_link_segments(xhci, seg, seg->next, 1, isoc); |
240 | seg = seg->next; | 244 | seg = seg->next; |
241 | } while (seg != ring->first_seg); | 245 | } while (seg != ring->first_seg); |
242 | xhci_initialize_ring_info(ring); | 246 | xhci_initialize_ring_info(ring); |
@@ -540,7 +544,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, | |||
540 | */ | 544 | */ |
541 | for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { | 545 | for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { |
542 | stream_info->stream_rings[cur_stream] = | 546 | stream_info->stream_rings[cur_stream] = |
543 | xhci_ring_alloc(xhci, 1, true, mem_flags); | 547 | xhci_ring_alloc(xhci, 1, true, false, mem_flags); |
544 | cur_ring = stream_info->stream_rings[cur_stream]; | 548 | cur_ring = stream_info->stream_rings[cur_stream]; |
545 | if (!cur_ring) | 549 | if (!cur_ring) |
546 | goto cleanup_rings; | 550 | goto cleanup_rings; |
@@ -549,8 +553,8 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, | |||
549 | addr = cur_ring->first_seg->dma | | 553 | addr = cur_ring->first_seg->dma | |
550 | SCT_FOR_CTX(SCT_PRI_TR) | | 554 | SCT_FOR_CTX(SCT_PRI_TR) | |
551 | cur_ring->cycle_state; | 555 | cur_ring->cycle_state; |
552 | stream_info->stream_ctx_array[cur_stream]. | 556 | stream_info->stream_ctx_array[cur_stream].stream_ring = |
553 | stream_ring = cpu_to_le64(addr); | 557 | cpu_to_le64(addr); |
554 | xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", | 558 | xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", |
555 | cur_stream, (unsigned long long) addr); | 559 | cur_stream, (unsigned long long) addr); |
556 | 560 | ||
@@ -765,7 +769,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
765 | } | 769 | } |
766 | 770 | ||
767 | /* Allocate endpoint 0 ring */ | 771 | /* Allocate endpoint 0 ring */ |
768 | dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags); | 772 | dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags); |
769 | if (!dev->eps[0].ring) | 773 | if (!dev->eps[0].ring) |
770 | goto fail; | 774 | goto fail; |
771 | 775 | ||
@@ -786,7 +790,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
786 | xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", | 790 | xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", |
787 | slot_id, | 791 | slot_id, |
788 | &xhci->dcbaa->dev_context_ptrs[slot_id], | 792 | &xhci->dcbaa->dev_context_ptrs[slot_id], |
789 | (unsigned long long) le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); | 793 | le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); |
790 | 794 | ||
791 | return 1; | 795 | return 1; |
792 | fail: | 796 | fail: |
@@ -871,7 +875,6 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
871 | struct xhci_virt_device *dev; | 875 | struct xhci_virt_device *dev; |
872 | struct xhci_ep_ctx *ep0_ctx; | 876 | struct xhci_ep_ctx *ep0_ctx; |
873 | struct xhci_slot_ctx *slot_ctx; | 877 | struct xhci_slot_ctx *slot_ctx; |
874 | struct xhci_input_control_ctx *ctrl_ctx; | ||
875 | u32 port_num; | 878 | u32 port_num; |
876 | struct usb_device *top_dev; | 879 | struct usb_device *top_dev; |
877 | 880 | ||
@@ -883,26 +886,22 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
883 | return -EINVAL; | 886 | return -EINVAL; |
884 | } | 887 | } |
885 | ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); | 888 | ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); |
886 | ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx); | ||
887 | slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); | 889 | slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); |
888 | 890 | ||
889 | /* 2) New slot context and endpoint 0 context are valid*/ | ||
890 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); | ||
891 | |||
892 | /* 3) Only the control endpoint is valid - one endpoint context */ | 891 | /* 3) Only the control endpoint is valid - one endpoint context */ |
893 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | (u32) udev->route); | 892 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); |
894 | switch (udev->speed) { | 893 | switch (udev->speed) { |
895 | case USB_SPEED_SUPER: | 894 | case USB_SPEED_SUPER: |
896 | slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_SS); | 895 | slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); |
897 | break; | 896 | break; |
898 | case USB_SPEED_HIGH: | 897 | case USB_SPEED_HIGH: |
899 | slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_HS); | 898 | slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS); |
900 | break; | 899 | break; |
901 | case USB_SPEED_FULL: | 900 | case USB_SPEED_FULL: |
902 | slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_FS); | 901 | slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS); |
903 | break; | 902 | break; |
904 | case USB_SPEED_LOW: | 903 | case USB_SPEED_LOW: |
905 | slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_LS); | 904 | slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS); |
906 | break; | 905 | break; |
907 | case USB_SPEED_WIRELESS: | 906 | case USB_SPEED_WIRELESS: |
908 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); | 907 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); |
@@ -916,7 +915,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
916 | port_num = xhci_find_real_port_number(xhci, udev); | 915 | port_num = xhci_find_real_port_number(xhci, udev); |
917 | if (!port_num) | 916 | if (!port_num) |
918 | return -EINVAL; | 917 | return -EINVAL; |
919 | slot_ctx->dev_info2 |= cpu_to_le32((u32) ROOT_HUB_PORT(port_num)); | 918 | slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num)); |
920 | /* Set the port number in the virtual_device to the faked port number */ | 919 | /* Set the port number in the virtual_device to the faked port number */ |
921 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; | 920 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; |
922 | top_dev = top_dev->parent) | 921 | top_dev = top_dev->parent) |
@@ -1175,10 +1174,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
1175 | */ | 1174 | */ |
1176 | if (usb_endpoint_xfer_isoc(&ep->desc)) | 1175 | if (usb_endpoint_xfer_isoc(&ep->desc)) |
1177 | virt_dev->eps[ep_index].new_ring = | 1176 | virt_dev->eps[ep_index].new_ring = |
1178 | xhci_ring_alloc(xhci, 8, true, mem_flags); | 1177 | xhci_ring_alloc(xhci, 8, true, true, mem_flags); |
1179 | else | 1178 | else |
1180 | virt_dev->eps[ep_index].new_ring = | 1179 | virt_dev->eps[ep_index].new_ring = |
1181 | xhci_ring_alloc(xhci, 1, true, mem_flags); | 1180 | xhci_ring_alloc(xhci, 1, true, false, mem_flags); |
1182 | if (!virt_dev->eps[ep_index].new_ring) { | 1181 | if (!virt_dev->eps[ep_index].new_ring) { |
1183 | /* Attempt to use the ring cache */ | 1182 | /* Attempt to use the ring cache */ |
1184 | if (virt_dev->num_rings_cached == 0) | 1183 | if (virt_dev->num_rings_cached == 0) |
@@ -1187,7 +1186,8 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
1187 | virt_dev->ring_cache[virt_dev->num_rings_cached]; | 1186 | virt_dev->ring_cache[virt_dev->num_rings_cached]; |
1188 | virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; | 1187 | virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; |
1189 | virt_dev->num_rings_cached--; | 1188 | virt_dev->num_rings_cached--; |
1190 | xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring); | 1189 | xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, |
1190 | usb_endpoint_xfer_isoc(&ep->desc) ? true : false); | ||
1191 | } | 1191 | } |
1192 | virt_dev->eps[ep_index].skip = false; | 1192 | virt_dev->eps[ep_index].skip = false; |
1193 | ep_ring = virt_dev->eps[ep_index].new_ring; | 1193 | ep_ring = virt_dev->eps[ep_index].new_ring; |
@@ -2001,7 +2001,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
2001 | goto fail; | 2001 | goto fail; |
2002 | 2002 | ||
2003 | /* Set up the command ring to have one segments for now. */ | 2003 | /* Set up the command ring to have one segments for now. */ |
2004 | xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); | 2004 | xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags); |
2005 | if (!xhci->cmd_ring) | 2005 | if (!xhci->cmd_ring) |
2006 | goto fail; | 2006 | goto fail; |
2007 | xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); | 2007 | xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); |
@@ -2032,7 +2032,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
2032 | * the event ring segment table (ERST). Section 4.9.3. | 2032 | * the event ring segment table (ERST). Section 4.9.3. |
2033 | */ | 2033 | */ |
2034 | xhci_dbg(xhci, "// Allocating event ring\n"); | 2034 | xhci_dbg(xhci, "// Allocating event ring\n"); |
2035 | xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); | 2035 | xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false, |
2036 | flags); | ||
2036 | if (!xhci->event_ring) | 2037 | if (!xhci->event_ring) |
2037 | goto fail; | 2038 | goto fail; |
2038 | if (xhci_check_trb_in_td_math(xhci, flags) < 0) | 2039 | if (xhci_check_trb_in_td_math(xhci, flags) < 0) |