diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /drivers/usb/host/xhci-mem.c | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/usb/host/xhci-mem.c')
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 373 |
1 files changed, 349 insertions, 24 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index b8fd270a8b0d..d64f5724bfc4 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | #include <linux/usb.h> | 23 | #include <linux/usb.h> |
24 | #include <linux/pci.h> | 24 | #include <linux/pci.h> |
25 | #include <linux/slab.h> | ||
25 | #include <linux/dmapool.h> | 26 | #include <linux/dmapool.h> |
26 | 27 | ||
27 | #include "xhci.h" | 28 | #include "xhci.h" |
@@ -125,6 +126,23 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) | |||
125 | kfree(ring); | 126 | kfree(ring); |
126 | } | 127 | } |
127 | 128 | ||
129 | static void xhci_initialize_ring_info(struct xhci_ring *ring) | ||
130 | { | ||
131 | /* The ring is empty, so the enqueue pointer == dequeue pointer */ | ||
132 | ring->enqueue = ring->first_seg->trbs; | ||
133 | ring->enq_seg = ring->first_seg; | ||
134 | ring->dequeue = ring->enqueue; | ||
135 | ring->deq_seg = ring->first_seg; | ||
136 | /* The ring is initialized to 0. The producer must write 1 to the cycle | ||
137 | * bit to handover ownership of the TRB, so PCS = 1. The consumer must | ||
138 | * compare CCS to the cycle bit to check ownership, so CCS = 1. | ||
139 | */ | ||
140 | ring->cycle_state = 1; | ||
141 | /* Not necessary for new rings, but needed for re-initialized rings */ | ||
142 | ring->enq_updates = 0; | ||
143 | ring->deq_updates = 0; | ||
144 | } | ||
145 | |||
128 | /** | 146 | /** |
129 | * Create a new ring with zero or more segments. | 147 | * Create a new ring with zero or more segments. |
130 | * | 148 | * |
@@ -173,17 +191,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, | |||
173 | " segment %p (virtual), 0x%llx (DMA)\n", | 191 | " segment %p (virtual), 0x%llx (DMA)\n", |
174 | prev, (unsigned long long)prev->dma); | 192 | prev, (unsigned long long)prev->dma); |
175 | } | 193 | } |
176 | /* The ring is empty, so the enqueue pointer == dequeue pointer */ | 194 | xhci_initialize_ring_info(ring); |
177 | ring->enqueue = ring->first_seg->trbs; | ||
178 | ring->enq_seg = ring->first_seg; | ||
179 | ring->dequeue = ring->enqueue; | ||
180 | ring->deq_seg = ring->first_seg; | ||
181 | /* The ring is initialized to 0. The producer must write 1 to the cycle | ||
182 | * bit to handover ownership of the TRB, so PCS = 1. The consumer must | ||
183 | * compare CCS to the cycle bit to check ownership, so CCS = 1. | ||
184 | */ | ||
185 | ring->cycle_state = 1; | ||
186 | |||
187 | return ring; | 195 | return ring; |
188 | 196 | ||
189 | fail: | 197 | fail: |
@@ -191,6 +199,52 @@ fail: | |||
191 | return 0; | 199 | return 0; |
192 | } | 200 | } |
193 | 201 | ||
202 | void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, | ||
203 | struct xhci_virt_device *virt_dev, | ||
204 | unsigned int ep_index) | ||
205 | { | ||
206 | int rings_cached; | ||
207 | |||
208 | rings_cached = virt_dev->num_rings_cached; | ||
209 | if (rings_cached < XHCI_MAX_RINGS_CACHED) { | ||
210 | virt_dev->num_rings_cached++; | ||
211 | rings_cached = virt_dev->num_rings_cached; | ||
212 | virt_dev->ring_cache[rings_cached] = | ||
213 | virt_dev->eps[ep_index].ring; | ||
214 | xhci_dbg(xhci, "Cached old ring, " | ||
215 | "%d ring%s cached\n", | ||
216 | rings_cached, | ||
217 | (rings_cached > 1) ? "s" : ""); | ||
218 | } else { | ||
219 | xhci_ring_free(xhci, virt_dev->eps[ep_index].ring); | ||
220 | xhci_dbg(xhci, "Ring cache full (%d rings), " | ||
221 | "freeing ring\n", | ||
222 | virt_dev->num_rings_cached); | ||
223 | } | ||
224 | virt_dev->eps[ep_index].ring = NULL; | ||
225 | } | ||
226 | |||
227 | /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue | ||
228 | * pointers to the beginning of the ring. | ||
229 | */ | ||
230 | static void xhci_reinit_cached_ring(struct xhci_hcd *xhci, | ||
231 | struct xhci_ring *ring) | ||
232 | { | ||
233 | struct xhci_segment *seg = ring->first_seg; | ||
234 | do { | ||
235 | memset(seg->trbs, 0, | ||
236 | sizeof(union xhci_trb)*TRBS_PER_SEGMENT); | ||
237 | /* All endpoint rings have link TRBs */ | ||
238 | xhci_link_segments(xhci, seg, seg->next, 1); | ||
239 | seg = seg->next; | ||
240 | } while (seg != ring->first_seg); | ||
241 | xhci_initialize_ring_info(ring); | ||
242 | /* td list should be empty since all URBs have been cancelled, | ||
243 | * but just in case... | ||
244 | */ | ||
245 | INIT_LIST_HEAD(&ring->td_list); | ||
246 | } | ||
247 | |||
194 | #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) | 248 | #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) |
195 | 249 | ||
196 | struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, | 250 | struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, |
@@ -214,6 +268,8 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, | |||
214 | void xhci_free_container_ctx(struct xhci_hcd *xhci, | 268 | void xhci_free_container_ctx(struct xhci_hcd *xhci, |
215 | struct xhci_container_ctx *ctx) | 269 | struct xhci_container_ctx *ctx) |
216 | { | 270 | { |
271 | if (!ctx) | ||
272 | return; | ||
217 | dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); | 273 | dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); |
218 | kfree(ctx); | 274 | kfree(ctx); |
219 | } | 275 | } |
@@ -248,6 +304,15 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, | |||
248 | (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); | 304 | (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); |
249 | } | 305 | } |
250 | 306 | ||
307 | static void xhci_init_endpoint_timer(struct xhci_hcd *xhci, | ||
308 | struct xhci_virt_ep *ep) | ||
309 | { | ||
310 | init_timer(&ep->stop_cmd_timer); | ||
311 | ep->stop_cmd_timer.data = (unsigned long) ep; | ||
312 | ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog; | ||
313 | ep->xhci = xhci; | ||
314 | } | ||
315 | |||
251 | /* All the xhci_tds in the ring's TD list should be freed at this point */ | 316 | /* All the xhci_tds in the ring's TD list should be freed at this point */ |
252 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | 317 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) |
253 | { | 318 | { |
@@ -267,6 +332,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | |||
267 | if (dev->eps[i].ring) | 332 | if (dev->eps[i].ring) |
268 | xhci_ring_free(xhci, dev->eps[i].ring); | 333 | xhci_ring_free(xhci, dev->eps[i].ring); |
269 | 334 | ||
335 | if (dev->ring_cache) { | ||
336 | for (i = 0; i < dev->num_rings_cached; i++) | ||
337 | xhci_ring_free(xhci, dev->ring_cache[i]); | ||
338 | kfree(dev->ring_cache); | ||
339 | } | ||
340 | |||
270 | if (dev->in_ctx) | 341 | if (dev->in_ctx) |
271 | xhci_free_container_ctx(xhci, dev->in_ctx); | 342 | xhci_free_container_ctx(xhci, dev->in_ctx); |
272 | if (dev->out_ctx) | 343 | if (dev->out_ctx) |
@@ -309,15 +380,25 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
309 | xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, | 380 | xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, |
310 | (unsigned long long)dev->in_ctx->dma); | 381 | (unsigned long long)dev->in_ctx->dma); |
311 | 382 | ||
312 | /* Initialize the cancellation list for each endpoint */ | 383 | /* Initialize the cancellation list and watchdog timers for each ep */ |
313 | for (i = 0; i < 31; i++) | 384 | for (i = 0; i < 31; i++) { |
385 | xhci_init_endpoint_timer(xhci, &dev->eps[i]); | ||
314 | INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); | 386 | INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); |
387 | } | ||
315 | 388 | ||
316 | /* Allocate endpoint 0 ring */ | 389 | /* Allocate endpoint 0 ring */ |
317 | dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags); | 390 | dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags); |
318 | if (!dev->eps[0].ring) | 391 | if (!dev->eps[0].ring) |
319 | goto fail; | 392 | goto fail; |
320 | 393 | ||
394 | /* Allocate pointers to the ring cache */ | ||
395 | dev->ring_cache = kzalloc( | ||
396 | sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED, | ||
397 | flags); | ||
398 | if (!dev->ring_cache) | ||
399 | goto fail; | ||
400 | dev->num_rings_cached = 0; | ||
401 | |||
321 | init_completion(&dev->cmd_completion); | 402 | init_completion(&dev->cmd_completion); |
322 | INIT_LIST_HEAD(&dev->cmd_list); | 403 | INIT_LIST_HEAD(&dev->cmd_list); |
323 | 404 | ||
@@ -374,7 +455,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
374 | case USB_SPEED_LOW: | 455 | case USB_SPEED_LOW: |
375 | slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; | 456 | slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; |
376 | break; | 457 | break; |
377 | case USB_SPEED_VARIABLE: | 458 | case USB_SPEED_WIRELESS: |
378 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); | 459 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); |
379 | return -EINVAL; | 460 | return -EINVAL; |
380 | break; | 461 | break; |
@@ -418,7 +499,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
418 | case USB_SPEED_LOW: | 499 | case USB_SPEED_LOW: |
419 | ep0_ctx->ep_info2 |= MAX_PACKET(8); | 500 | ep0_ctx->ep_info2 |= MAX_PACKET(8); |
420 | break; | 501 | break; |
421 | case USB_SPEED_VARIABLE: | 502 | case USB_SPEED_WIRELESS: |
422 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); | 503 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); |
423 | return -EINVAL; | 504 | return -EINVAL; |
424 | break; | 505 | break; |
@@ -486,8 +567,13 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, | |||
486 | if (interval < 3) | 567 | if (interval < 3) |
487 | interval = 3; | 568 | interval = 3; |
488 | if ((1 << interval) != 8*ep->desc.bInterval) | 569 | if ((1 << interval) != 8*ep->desc.bInterval) |
489 | dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", | 570 | dev_warn(&udev->dev, |
490 | ep->desc.bEndpointAddress, 1 << interval); | 571 | "ep %#x - rounding interval" |
572 | " to %d microframes, " | ||
573 | "ep desc says %d microframes\n", | ||
574 | ep->desc.bEndpointAddress, | ||
575 | 1 << interval, | ||
576 | 8*ep->desc.bInterval); | ||
491 | } | 577 | } |
492 | break; | 578 | break; |
493 | default: | 579 | default: |
@@ -496,6 +582,19 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, | |||
496 | return EP_INTERVAL(interval); | 582 | return EP_INTERVAL(interval); |
497 | } | 583 | } |
498 | 584 | ||
585 | /* The "Mult" field in the endpoint context is only set for SuperSpeed devices. | ||
586 | * High speed endpoint descriptors can define "the number of additional | ||
587 | * transaction opportunities per microframe", but that goes in the Max Burst | ||
588 | * endpoint context field. | ||
589 | */ | ||
590 | static inline u32 xhci_get_endpoint_mult(struct usb_device *udev, | ||
591 | struct usb_host_endpoint *ep) | ||
592 | { | ||
593 | if (udev->speed != USB_SPEED_SUPER || !ep->ss_ep_comp) | ||
594 | return 0; | ||
595 | return ep->ss_ep_comp->desc.bmAttributes; | ||
596 | } | ||
597 | |||
499 | static inline u32 xhci_get_endpoint_type(struct usb_device *udev, | 598 | static inline u32 xhci_get_endpoint_type(struct usb_device *udev, |
500 | struct usb_host_endpoint *ep) | 599 | struct usb_host_endpoint *ep) |
501 | { | 600 | { |
@@ -526,6 +625,36 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev, | |||
526 | return type; | 625 | return type; |
527 | } | 626 | } |
528 | 627 | ||
628 | /* Return the maximum endpoint service interval time (ESIT) payload. | ||
629 | * Basically, this is the maxpacket size, multiplied by the burst size | ||
630 | * and mult size. | ||
631 | */ | ||
632 | static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, | ||
633 | struct usb_device *udev, | ||
634 | struct usb_host_endpoint *ep) | ||
635 | { | ||
636 | int max_burst; | ||
637 | int max_packet; | ||
638 | |||
639 | /* Only applies for interrupt or isochronous endpoints */ | ||
640 | if (usb_endpoint_xfer_control(&ep->desc) || | ||
641 | usb_endpoint_xfer_bulk(&ep->desc)) | ||
642 | return 0; | ||
643 | |||
644 | if (udev->speed == USB_SPEED_SUPER) { | ||
645 | if (ep->ss_ep_comp) | ||
646 | return ep->ss_ep_comp->desc.wBytesPerInterval; | ||
647 | xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n"); | ||
648 | /* Assume no bursts, no multiple opportunities to send. */ | ||
649 | return ep->desc.wMaxPacketSize; | ||
650 | } | ||
651 | |||
652 | max_packet = ep->desc.wMaxPacketSize & 0x3ff; | ||
653 | max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; | ||
654 | /* A 0 in max burst means 1 transfer per ESIT */ | ||
655 | return max_packet * (max_burst + 1); | ||
656 | } | ||
657 | |||
529 | int xhci_endpoint_init(struct xhci_hcd *xhci, | 658 | int xhci_endpoint_init(struct xhci_hcd *xhci, |
530 | struct xhci_virt_device *virt_dev, | 659 | struct xhci_virt_device *virt_dev, |
531 | struct usb_device *udev, | 660 | struct usb_device *udev, |
@@ -537,6 +666,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
537 | struct xhci_ring *ep_ring; | 666 | struct xhci_ring *ep_ring; |
538 | unsigned int max_packet; | 667 | unsigned int max_packet; |
539 | unsigned int max_burst; | 668 | unsigned int max_burst; |
669 | u32 max_esit_payload; | ||
540 | 670 | ||
541 | ep_index = xhci_get_endpoint_index(&ep->desc); | 671 | ep_index = xhci_get_endpoint_index(&ep->desc); |
542 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); | 672 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
@@ -544,12 +674,21 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
544 | /* Set up the endpoint ring */ | 674 | /* Set up the endpoint ring */ |
545 | virt_dev->eps[ep_index].new_ring = | 675 | virt_dev->eps[ep_index].new_ring = |
546 | xhci_ring_alloc(xhci, 1, true, mem_flags); | 676 | xhci_ring_alloc(xhci, 1, true, mem_flags); |
547 | if (!virt_dev->eps[ep_index].new_ring) | 677 | if (!virt_dev->eps[ep_index].new_ring) { |
548 | return -ENOMEM; | 678 | /* Attempt to use the ring cache */ |
679 | if (virt_dev->num_rings_cached == 0) | ||
680 | return -ENOMEM; | ||
681 | virt_dev->eps[ep_index].new_ring = | ||
682 | virt_dev->ring_cache[virt_dev->num_rings_cached]; | ||
683 | virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; | ||
684 | virt_dev->num_rings_cached--; | ||
685 | xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring); | ||
686 | } | ||
549 | ep_ring = virt_dev->eps[ep_index].new_ring; | 687 | ep_ring = virt_dev->eps[ep_index].new_ring; |
550 | ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; | 688 | ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; |
551 | 689 | ||
552 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); | 690 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); |
691 | ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep)); | ||
553 | 692 | ||
554 | /* FIXME dig Mult and streams info out of ep companion desc */ | 693 | /* FIXME dig Mult and streams info out of ep companion desc */ |
555 | 694 | ||
@@ -595,6 +734,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
595 | default: | 734 | default: |
596 | BUG(); | 735 | BUG(); |
597 | } | 736 | } |
737 | max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); | ||
738 | ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload); | ||
739 | |||
740 | /* | ||
741 | * XXX no idea how to calculate the average TRB buffer length for bulk | ||
742 | * endpoints, as the driver gives us no clue how big each scatter gather | ||
743 | * list entry (or buffer) is going to be. | ||
744 | * | ||
745 | * For isochronous and interrupt endpoints, we set it to the max | ||
746 | * available, until we have new API in the USB core to allow drivers to | ||
747 | * declare how much bandwidth they actually need. | ||
748 | * | ||
749 | * Normally, it would be calculated by taking the total of the buffer | ||
750 | * lengths in the TD and then dividing by the number of TRBs in a TD, | ||
751 | * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't | ||
752 | * use Event Data TRBs, and we don't chain in a link TRB on short | ||
753 | * transfers, we're basically dividing by 1. | ||
754 | */ | ||
755 | ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload); | ||
756 | |||
598 | /* FIXME Debug endpoint context */ | 757 | /* FIXME Debug endpoint context */ |
599 | return 0; | 758 | return 0; |
600 | } | 759 | } |
@@ -758,7 +917,8 @@ static void scratchpad_free(struct xhci_hcd *xhci) | |||
758 | } | 917 | } |
759 | 918 | ||
760 | struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, | 919 | struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, |
761 | bool allocate_completion, gfp_t mem_flags) | 920 | bool allocate_in_ctx, bool allocate_completion, |
921 | gfp_t mem_flags) | ||
762 | { | 922 | { |
763 | struct xhci_command *command; | 923 | struct xhci_command *command; |
764 | 924 | ||
@@ -766,16 +926,22 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, | |||
766 | if (!command) | 926 | if (!command) |
767 | return NULL; | 927 | return NULL; |
768 | 928 | ||
769 | command->in_ctx = | 929 | if (allocate_in_ctx) { |
770 | xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags); | 930 | command->in_ctx = |
771 | if (!command->in_ctx) | 931 | xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, |
772 | return NULL; | 932 | mem_flags); |
933 | if (!command->in_ctx) { | ||
934 | kfree(command); | ||
935 | return NULL; | ||
936 | } | ||
937 | } | ||
773 | 938 | ||
774 | if (allocate_completion) { | 939 | if (allocate_completion) { |
775 | command->completion = | 940 | command->completion = |
776 | kzalloc(sizeof(struct completion), mem_flags); | 941 | kzalloc(sizeof(struct completion), mem_flags); |
777 | if (!command->completion) { | 942 | if (!command->completion) { |
778 | xhci_free_container_ctx(xhci, command->in_ctx); | 943 | xhci_free_container_ctx(xhci, command->in_ctx); |
944 | kfree(command); | ||
779 | return NULL; | 945 | return NULL; |
780 | } | 946 | } |
781 | init_completion(command->completion); | 947 | init_completion(command->completion); |
@@ -848,6 +1014,163 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
848 | xhci->page_shift = 0; | 1014 | xhci->page_shift = 0; |
849 | } | 1015 | } |
850 | 1016 | ||
1017 | static int xhci_test_trb_in_td(struct xhci_hcd *xhci, | ||
1018 | struct xhci_segment *input_seg, | ||
1019 | union xhci_trb *start_trb, | ||
1020 | union xhci_trb *end_trb, | ||
1021 | dma_addr_t input_dma, | ||
1022 | struct xhci_segment *result_seg, | ||
1023 | char *test_name, int test_number) | ||
1024 | { | ||
1025 | unsigned long long start_dma; | ||
1026 | unsigned long long end_dma; | ||
1027 | struct xhci_segment *seg; | ||
1028 | |||
1029 | start_dma = xhci_trb_virt_to_dma(input_seg, start_trb); | ||
1030 | end_dma = xhci_trb_virt_to_dma(input_seg, end_trb); | ||
1031 | |||
1032 | seg = trb_in_td(input_seg, start_trb, end_trb, input_dma); | ||
1033 | if (seg != result_seg) { | ||
1034 | xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n", | ||
1035 | test_name, test_number); | ||
1036 | xhci_warn(xhci, "Tested TRB math w/ seg %p and " | ||
1037 | "input DMA 0x%llx\n", | ||
1038 | input_seg, | ||
1039 | (unsigned long long) input_dma); | ||
1040 | xhci_warn(xhci, "starting TRB %p (0x%llx DMA), " | ||
1041 | "ending TRB %p (0x%llx DMA)\n", | ||
1042 | start_trb, start_dma, | ||
1043 | end_trb, end_dma); | ||
1044 | xhci_warn(xhci, "Expected seg %p, got seg %p\n", | ||
1045 | result_seg, seg); | ||
1046 | return -1; | ||
1047 | } | ||
1048 | return 0; | ||
1049 | } | ||
1050 | |||
1051 | /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */ | ||
1052 | static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags) | ||
1053 | { | ||
1054 | struct { | ||
1055 | dma_addr_t input_dma; | ||
1056 | struct xhci_segment *result_seg; | ||
1057 | } simple_test_vector [] = { | ||
1058 | /* A zeroed DMA field should fail */ | ||
1059 | { 0, NULL }, | ||
1060 | /* One TRB before the ring start should fail */ | ||
1061 | { xhci->event_ring->first_seg->dma - 16, NULL }, | ||
1062 | /* One byte before the ring start should fail */ | ||
1063 | { xhci->event_ring->first_seg->dma - 1, NULL }, | ||
1064 | /* Starting TRB should succeed */ | ||
1065 | { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg }, | ||
1066 | /* Ending TRB should succeed */ | ||
1067 | { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16, | ||
1068 | xhci->event_ring->first_seg }, | ||
1069 | /* One byte after the ring end should fail */ | ||
1070 | { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL }, | ||
1071 | /* One TRB after the ring end should fail */ | ||
1072 | { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL }, | ||
1073 | /* An address of all ones should fail */ | ||
1074 | { (dma_addr_t) (~0), NULL }, | ||
1075 | }; | ||
1076 | struct { | ||
1077 | struct xhci_segment *input_seg; | ||
1078 | union xhci_trb *start_trb; | ||
1079 | union xhci_trb *end_trb; | ||
1080 | dma_addr_t input_dma; | ||
1081 | struct xhci_segment *result_seg; | ||
1082 | } complex_test_vector [] = { | ||
1083 | /* Test feeding a valid DMA address from a different ring */ | ||
1084 | { .input_seg = xhci->event_ring->first_seg, | ||
1085 | .start_trb = xhci->event_ring->first_seg->trbs, | ||
1086 | .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], | ||
1087 | .input_dma = xhci->cmd_ring->first_seg->dma, | ||
1088 | .result_seg = NULL, | ||
1089 | }, | ||
1090 | /* Test feeding a valid end TRB from a different ring */ | ||
1091 | { .input_seg = xhci->event_ring->first_seg, | ||
1092 | .start_trb = xhci->event_ring->first_seg->trbs, | ||
1093 | .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], | ||
1094 | .input_dma = xhci->cmd_ring->first_seg->dma, | ||
1095 | .result_seg = NULL, | ||
1096 | }, | ||
1097 | /* Test feeding a valid start and end TRB from a different ring */ | ||
1098 | { .input_seg = xhci->event_ring->first_seg, | ||
1099 | .start_trb = xhci->cmd_ring->first_seg->trbs, | ||
1100 | .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], | ||
1101 | .input_dma = xhci->cmd_ring->first_seg->dma, | ||
1102 | .result_seg = NULL, | ||
1103 | }, | ||
1104 | /* TRB in this ring, but after this TD */ | ||
1105 | { .input_seg = xhci->event_ring->first_seg, | ||
1106 | .start_trb = &xhci->event_ring->first_seg->trbs[0], | ||
1107 | .end_trb = &xhci->event_ring->first_seg->trbs[3], | ||
1108 | .input_dma = xhci->event_ring->first_seg->dma + 4*16, | ||
1109 | .result_seg = NULL, | ||
1110 | }, | ||
1111 | /* TRB in this ring, but before this TD */ | ||
1112 | { .input_seg = xhci->event_ring->first_seg, | ||
1113 | .start_trb = &xhci->event_ring->first_seg->trbs[3], | ||
1114 | .end_trb = &xhci->event_ring->first_seg->trbs[6], | ||
1115 | .input_dma = xhci->event_ring->first_seg->dma + 2*16, | ||
1116 | .result_seg = NULL, | ||
1117 | }, | ||
1118 | /* TRB in this ring, but after this wrapped TD */ | ||
1119 | { .input_seg = xhci->event_ring->first_seg, | ||
1120 | .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], | ||
1121 | .end_trb = &xhci->event_ring->first_seg->trbs[1], | ||
1122 | .input_dma = xhci->event_ring->first_seg->dma + 2*16, | ||
1123 | .result_seg = NULL, | ||
1124 | }, | ||
1125 | /* TRB in this ring, but before this wrapped TD */ | ||
1126 | { .input_seg = xhci->event_ring->first_seg, | ||
1127 | .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], | ||
1128 | .end_trb = &xhci->event_ring->first_seg->trbs[1], | ||
1129 | .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16, | ||
1130 | .result_seg = NULL, | ||
1131 | }, | ||
1132 | /* TRB not in this ring, and we have a wrapped TD */ | ||
1133 | { .input_seg = xhci->event_ring->first_seg, | ||
1134 | .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], | ||
1135 | .end_trb = &xhci->event_ring->first_seg->trbs[1], | ||
1136 | .input_dma = xhci->cmd_ring->first_seg->dma + 2*16, | ||
1137 | .result_seg = NULL, | ||
1138 | }, | ||
1139 | }; | ||
1140 | |||
1141 | unsigned int num_tests; | ||
1142 | int i, ret; | ||
1143 | |||
1144 | num_tests = sizeof(simple_test_vector) / sizeof(simple_test_vector[0]); | ||
1145 | for (i = 0; i < num_tests; i++) { | ||
1146 | ret = xhci_test_trb_in_td(xhci, | ||
1147 | xhci->event_ring->first_seg, | ||
1148 | xhci->event_ring->first_seg->trbs, | ||
1149 | &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], | ||
1150 | simple_test_vector[i].input_dma, | ||
1151 | simple_test_vector[i].result_seg, | ||
1152 | "Simple", i); | ||
1153 | if (ret < 0) | ||
1154 | return ret; | ||
1155 | } | ||
1156 | |||
1157 | num_tests = sizeof(complex_test_vector) / sizeof(complex_test_vector[0]); | ||
1158 | for (i = 0; i < num_tests; i++) { | ||
1159 | ret = xhci_test_trb_in_td(xhci, | ||
1160 | complex_test_vector[i].input_seg, | ||
1161 | complex_test_vector[i].start_trb, | ||
1162 | complex_test_vector[i].end_trb, | ||
1163 | complex_test_vector[i].input_dma, | ||
1164 | complex_test_vector[i].result_seg, | ||
1165 | "Complex", i); | ||
1166 | if (ret < 0) | ||
1167 | return ret; | ||
1168 | } | ||
1169 | xhci_dbg(xhci, "TRB math tests passed.\n"); | ||
1170 | return 0; | ||
1171 | } | ||
1172 | |||
1173 | |||
851 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | 1174 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) |
852 | { | 1175 | { |
853 | dma_addr_t dma; | 1176 | dma_addr_t dma; |
@@ -951,6 +1274,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
951 | xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); | 1274 | xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); |
952 | if (!xhci->event_ring) | 1275 | if (!xhci->event_ring) |
953 | goto fail; | 1276 | goto fail; |
1277 | if (xhci_check_trb_in_td_math(xhci, flags) < 0) | ||
1278 | goto fail; | ||
954 | 1279 | ||
955 | xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev), | 1280 | xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev), |
956 | sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); | 1281 | sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); |