diff options
author | Sarah Sharp <sarah.a.sharp@linux.intel.com> | 2009-09-04 13:53:09 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-09-23 09:46:39 -0400 |
commit | 63a0d9abd18cdcf5a985029c266c6bfe0511768f (patch) | |
tree | 2ae717082d022b2a86a64b86dee48ddfb2be0627 /drivers/usb/host/xhci-mem.c | |
parent | 9e221be815cd263480928248bfd4541497017a1b (diff) |
USB: xhci: Endpoint representation refactoring.
The xhci_ring structure contained information that is really related to an
endpoint, not a ring. This will cause problems later when endpoint
streams are supported and there are multiple rings per endpoint.
Move the endpoint state and cancellation information into a new virtual
endpoint structure, xhci_virt_ep. The list of TRBs to be cancelled should
be per endpoint, not per ring, for easy access. There can be only one TRB
that the endpoint stopped on after a stop endpoint command (even with
streams enabled); move the stopped TRB information into the new virtual
endpoint structure. Also move the 31 endpoint rings and temporary ring
storage from the virtual device structure (xhci_virt_device) into the
virtual endpoint structure (xhci_virt_ep).
Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci-mem.c')
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 25 |
1 files changed, 15 insertions, 10 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 55920b39d106..75458ecc8eab 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -144,7 +144,6 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, | |||
144 | return 0; | 144 | return 0; |
145 | 145 | ||
146 | INIT_LIST_HEAD(&ring->td_list); | 146 | INIT_LIST_HEAD(&ring->td_list); |
147 | INIT_LIST_HEAD(&ring->cancelled_td_list); | ||
148 | if (num_segs == 0) | 147 | if (num_segs == 0) |
149 | return ring; | 148 | return ring; |
150 | 149 | ||
@@ -265,8 +264,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | |||
265 | return; | 264 | return; |
266 | 265 | ||
267 | for (i = 0; i < 31; ++i) | 266 | for (i = 0; i < 31; ++i) |
268 | if (dev->ep_rings[i]) | 267 | if (dev->eps[i].ring) |
269 | xhci_ring_free(xhci, dev->ep_rings[i]); | 268 | xhci_ring_free(xhci, dev->eps[i].ring); |
270 | 269 | ||
271 | if (dev->in_ctx) | 270 | if (dev->in_ctx) |
272 | xhci_free_container_ctx(xhci, dev->in_ctx); | 271 | xhci_free_container_ctx(xhci, dev->in_ctx); |
@@ -281,6 +280,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
281 | struct usb_device *udev, gfp_t flags) | 280 | struct usb_device *udev, gfp_t flags) |
282 | { | 281 | { |
283 | struct xhci_virt_device *dev; | 282 | struct xhci_virt_device *dev; |
283 | int i; | ||
284 | 284 | ||
285 | /* Slot ID 0 is reserved */ | 285 | /* Slot ID 0 is reserved */ |
286 | if (slot_id == 0 || xhci->devs[slot_id]) { | 286 | if (slot_id == 0 || xhci->devs[slot_id]) { |
@@ -309,9 +309,13 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
309 | xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, | 309 | xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, |
310 | (unsigned long long)dev->in_ctx->dma); | 310 | (unsigned long long)dev->in_ctx->dma); |
311 | 311 | ||
312 | /* Initialize the cancellation list for each endpoint */ | ||
313 | for (i = 0; i < 31; i++) | ||
314 | INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); | ||
315 | |||
312 | /* Allocate endpoint 0 ring */ | 316 | /* Allocate endpoint 0 ring */ |
313 | dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); | 317 | dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags); |
314 | if (!dev->ep_rings[0]) | 318 | if (!dev->eps[0].ring) |
315 | goto fail; | 319 | goto fail; |
316 | 320 | ||
317 | init_completion(&dev->cmd_completion); | 321 | init_completion(&dev->cmd_completion); |
@@ -428,8 +432,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
428 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); | 432 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); |
429 | 433 | ||
430 | ep0_ctx->deq = | 434 | ep0_ctx->deq = |
431 | dev->ep_rings[0]->first_seg->dma; | 435 | dev->eps[0].ring->first_seg->dma; |
432 | ep0_ctx->deq |= dev->ep_rings[0]->cycle_state; | 436 | ep0_ctx->deq |= dev->eps[0].ring->cycle_state; |
433 | 437 | ||
434 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ | 438 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ |
435 | 439 | ||
@@ -539,10 +543,11 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
539 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); | 543 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
540 | 544 | ||
541 | /* Set up the endpoint ring */ | 545 | /* Set up the endpoint ring */ |
542 | virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); | 546 | virt_dev->eps[ep_index].new_ring = |
543 | if (!virt_dev->new_ep_rings[ep_index]) | 547 | xhci_ring_alloc(xhci, 1, true, mem_flags); |
548 | if (!virt_dev->eps[ep_index].new_ring) | ||
544 | return -ENOMEM; | 549 | return -ENOMEM; |
545 | ep_ring = virt_dev->new_ep_rings[ep_index]; | 550 | ep_ring = virt_dev->eps[ep_index].new_ring; |
546 | ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; | 551 | ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; |
547 | 552 | ||
548 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); | 553 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); |