diff options
author | Sarah Sharp <sarah.a.sharp@linux.intel.com> | 2009-09-04 13:53:09 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-09-23 09:46:39 -0400 |
commit | 63a0d9abd18cdcf5a985029c266c6bfe0511768f (patch) | |
tree | 2ae717082d022b2a86a64b86dee48ddfb2be0627 /drivers/usb/host | |
parent | 9e221be815cd263480928248bfd4541497017a1b (diff) |
USB: xhci: Endpoint representation refactoring.
The xhci_ring structure contained information that is really related to an
endpoint, not a ring. This will cause problems later when endpoint
streams are supported and there are multiple rings per endpoint.
Move the endpoint state and cancellation information into a new virtual
endpoint structure, xhci_virt_ep. The list of TRBs to be cancelled should
be per endpoint, not per ring, for easy access. There can be only one TRB
that the endpoint stopped on after a stop endpoint command (even with
streams enabled); move the stopped TRB information into the new virtual
endpoint structure. Also move the 31 endpoint rings and temporary ring
storage from the virtual device structure (xhci_virt_device) into the
virtual endpoint structure (xhci_virt_ep).
Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host')
-rw-r--r-- | drivers/usb/host/xhci-hcd.c | 59 | ||||
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 25 | ||||
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 90 | ||||
-rw-r--r-- | drivers/usb/host/xhci.h | 41 |
4 files changed, 115 insertions, 100 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index e478a63488fb..3ab9090c22dc 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c | |||
@@ -351,13 +351,14 @@ void xhci_event_ring_work(unsigned long arg) | |||
351 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | 351 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); |
352 | xhci_dbg_cmd_ptrs(xhci); | 352 | xhci_dbg_cmd_ptrs(xhci); |
353 | for (i = 0; i < MAX_HC_SLOTS; ++i) { | 353 | for (i = 0; i < MAX_HC_SLOTS; ++i) { |
354 | if (xhci->devs[i]) { | 354 | if (!xhci->devs[i]) |
355 | for (j = 0; j < 31; ++j) { | 355 | continue; |
356 | if (xhci->devs[i]->ep_rings[j]) { | 356 | for (j = 0; j < 31; ++j) { |
357 | xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j); | 357 | struct xhci_ring *ring = xhci->devs[i]->eps[j].ring; |
358 | xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg); | 358 | if (!ring) |
359 | } | 359 | continue; |
360 | } | 360 | xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j); |
361 | xhci_debug_segment(xhci, ring->deq_seg); | ||
361 | } | 362 | } |
362 | } | 363 | } |
363 | 364 | ||
@@ -778,6 +779,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
778 | struct xhci_td *td; | 779 | struct xhci_td *td; |
779 | unsigned int ep_index; | 780 | unsigned int ep_index; |
780 | struct xhci_ring *ep_ring; | 781 | struct xhci_ring *ep_ring; |
782 | struct xhci_virt_ep *ep; | ||
781 | 783 | ||
782 | xhci = hcd_to_xhci(hcd); | 784 | xhci = hcd_to_xhci(hcd); |
783 | spin_lock_irqsave(&xhci->lock, flags); | 785 | spin_lock_irqsave(&xhci->lock, flags); |
@@ -790,17 +792,18 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
790 | xhci_dbg(xhci, "Event ring:\n"); | 792 | xhci_dbg(xhci, "Event ring:\n"); |
791 | xhci_debug_ring(xhci, xhci->event_ring); | 793 | xhci_debug_ring(xhci, xhci->event_ring); |
792 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | 794 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
793 | ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index]; | 795 | ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; |
796 | ep_ring = ep->ring; | ||
794 | xhci_dbg(xhci, "Endpoint ring:\n"); | 797 | xhci_dbg(xhci, "Endpoint ring:\n"); |
795 | xhci_debug_ring(xhci, ep_ring); | 798 | xhci_debug_ring(xhci, ep_ring); |
796 | td = (struct xhci_td *) urb->hcpriv; | 799 | td = (struct xhci_td *) urb->hcpriv; |
797 | 800 | ||
798 | ep_ring->cancels_pending++; | 801 | ep->cancels_pending++; |
799 | list_add_tail(&td->cancelled_td_list, &ep_ring->cancelled_td_list); | 802 | list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); |
800 | /* Queue a stop endpoint command, but only if this is | 803 | /* Queue a stop endpoint command, but only if this is |
801 | * the first cancellation to be handled. | 804 | * the first cancellation to be handled. |
802 | */ | 805 | */ |
803 | if (ep_ring->cancels_pending == 1) { | 806 | if (ep->cancels_pending == 1) { |
804 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); | 807 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); |
805 | xhci_ring_cmd_db(xhci); | 808 | xhci_ring_cmd_db(xhci); |
806 | } | 809 | } |
@@ -1206,10 +1209,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
1206 | xhci_zero_in_ctx(xhci, virt_dev); | 1209 | xhci_zero_in_ctx(xhci, virt_dev); |
1207 | /* Free any old rings */ | 1210 | /* Free any old rings */ |
1208 | for (i = 1; i < 31; ++i) { | 1211 | for (i = 1; i < 31; ++i) { |
1209 | if (virt_dev->new_ep_rings[i]) { | 1212 | if (virt_dev->eps[i].new_ring) { |
1210 | xhci_ring_free(xhci, virt_dev->ep_rings[i]); | 1213 | xhci_ring_free(xhci, virt_dev->eps[i].ring); |
1211 | virt_dev->ep_rings[i] = virt_dev->new_ep_rings[i]; | 1214 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; |
1212 | virt_dev->new_ep_rings[i] = NULL; | 1215 | virt_dev->eps[i].new_ring = NULL; |
1213 | } | 1216 | } |
1214 | } | 1217 | } |
1215 | 1218 | ||
@@ -1236,9 +1239,9 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
1236 | virt_dev = xhci->devs[udev->slot_id]; | 1239 | virt_dev = xhci->devs[udev->slot_id]; |
1237 | /* Free any rings allocated for added endpoints */ | 1240 | /* Free any rings allocated for added endpoints */ |
1238 | for (i = 0; i < 31; ++i) { | 1241 | for (i = 0; i < 31; ++i) { |
1239 | if (virt_dev->new_ep_rings[i]) { | 1242 | if (virt_dev->eps[i].new_ring) { |
1240 | xhci_ring_free(xhci, virt_dev->new_ep_rings[i]); | 1243 | xhci_ring_free(xhci, virt_dev->eps[i].new_ring); |
1241 | virt_dev->new_ep_rings[i] = NULL; | 1244 | virt_dev->eps[i].new_ring = NULL; |
1242 | } | 1245 | } |
1243 | } | 1246 | } |
1244 | xhci_zero_in_ctx(xhci, virt_dev); | 1247 | xhci_zero_in_ctx(xhci, virt_dev); |
@@ -1281,17 +1284,18 @@ void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, | |||
1281 | } | 1284 | } |
1282 | 1285 | ||
1283 | void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, | 1286 | void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, |
1284 | struct usb_device *udev, | 1287 | struct usb_device *udev, unsigned int ep_index) |
1285 | unsigned int ep_index, struct xhci_ring *ep_ring) | ||
1286 | { | 1288 | { |
1287 | struct xhci_dequeue_state deq_state; | 1289 | struct xhci_dequeue_state deq_state; |
1290 | struct xhci_virt_ep *ep; | ||
1288 | 1291 | ||
1289 | xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); | 1292 | xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); |
1293 | ep = &xhci->devs[udev->slot_id]->eps[ep_index]; | ||
1290 | /* We need to move the HW's dequeue pointer past this TD, | 1294 | /* We need to move the HW's dequeue pointer past this TD, |
1291 | * or it will attempt to resend it on the next doorbell ring. | 1295 | * or it will attempt to resend it on the next doorbell ring. |
1292 | */ | 1296 | */ |
1293 | xhci_find_new_dequeue_state(xhci, udev->slot_id, | 1297 | xhci_find_new_dequeue_state(xhci, udev->slot_id, |
1294 | ep_index, ep_ring->stopped_td, | 1298 | ep_index, ep->stopped_td, |
1295 | &deq_state); | 1299 | &deq_state); |
1296 | 1300 | ||
1297 | /* HW with the reset endpoint quirk will use the saved dequeue state to | 1301 | /* HW with the reset endpoint quirk will use the saved dequeue state to |
@@ -1299,8 +1303,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, | |||
1299 | */ | 1303 | */ |
1300 | if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { | 1304 | if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { |
1301 | xhci_dbg(xhci, "Queueing new dequeue state\n"); | 1305 | xhci_dbg(xhci, "Queueing new dequeue state\n"); |
1302 | xhci_queue_new_dequeue_state(xhci, ep_ring, | 1306 | xhci_queue_new_dequeue_state(xhci, udev->slot_id, |
1303 | udev->slot_id, | ||
1304 | ep_index, &deq_state); | 1307 | ep_index, &deq_state); |
1305 | } else { | 1308 | } else { |
1306 | /* Better hope no one uses the input context between now and the | 1309 | /* Better hope no one uses the input context between now and the |
@@ -1327,7 +1330,7 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
1327 | unsigned int ep_index; | 1330 | unsigned int ep_index; |
1328 | unsigned long flags; | 1331 | unsigned long flags; |
1329 | int ret; | 1332 | int ret; |
1330 | struct xhci_ring *ep_ring; | 1333 | struct xhci_virt_ep *virt_ep; |
1331 | 1334 | ||
1332 | xhci = hcd_to_xhci(hcd); | 1335 | xhci = hcd_to_xhci(hcd); |
1333 | udev = (struct usb_device *) ep->hcpriv; | 1336 | udev = (struct usb_device *) ep->hcpriv; |
@@ -1337,8 +1340,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
1337 | if (!ep->hcpriv) | 1340 | if (!ep->hcpriv) |
1338 | return; | 1341 | return; |
1339 | ep_index = xhci_get_endpoint_index(&ep->desc); | 1342 | ep_index = xhci_get_endpoint_index(&ep->desc); |
1340 | ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index]; | 1343 | virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; |
1341 | if (!ep_ring->stopped_td) { | 1344 | if (!virt_ep->stopped_td) { |
1342 | xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", | 1345 | xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", |
1343 | ep->desc.bEndpointAddress); | 1346 | ep->desc.bEndpointAddress); |
1344 | return; | 1347 | return; |
@@ -1357,8 +1360,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
1357 | * command. Better hope that last command worked! | 1360 | * command. Better hope that last command worked! |
1358 | */ | 1361 | */ |
1359 | if (!ret) { | 1362 | if (!ret) { |
1360 | xhci_cleanup_stalled_ring(xhci, udev, ep_index, ep_ring); | 1363 | xhci_cleanup_stalled_ring(xhci, udev, ep_index); |
1361 | kfree(ep_ring->stopped_td); | 1364 | kfree(virt_ep->stopped_td); |
1362 | xhci_ring_cmd_db(xhci); | 1365 | xhci_ring_cmd_db(xhci); |
1363 | } | 1366 | } |
1364 | spin_unlock_irqrestore(&xhci->lock, flags); | 1367 | spin_unlock_irqrestore(&xhci->lock, flags); |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 55920b39d106..75458ecc8eab 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -144,7 +144,6 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, | |||
144 | return 0; | 144 | return 0; |
145 | 145 | ||
146 | INIT_LIST_HEAD(&ring->td_list); | 146 | INIT_LIST_HEAD(&ring->td_list); |
147 | INIT_LIST_HEAD(&ring->cancelled_td_list); | ||
148 | if (num_segs == 0) | 147 | if (num_segs == 0) |
149 | return ring; | 148 | return ring; |
150 | 149 | ||
@@ -265,8 +264,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | |||
265 | return; | 264 | return; |
266 | 265 | ||
267 | for (i = 0; i < 31; ++i) | 266 | for (i = 0; i < 31; ++i) |
268 | if (dev->ep_rings[i]) | 267 | if (dev->eps[i].ring) |
269 | xhci_ring_free(xhci, dev->ep_rings[i]); | 268 | xhci_ring_free(xhci, dev->eps[i].ring); |
270 | 269 | ||
271 | if (dev->in_ctx) | 270 | if (dev->in_ctx) |
272 | xhci_free_container_ctx(xhci, dev->in_ctx); | 271 | xhci_free_container_ctx(xhci, dev->in_ctx); |
@@ -281,6 +280,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
281 | struct usb_device *udev, gfp_t flags) | 280 | struct usb_device *udev, gfp_t flags) |
282 | { | 281 | { |
283 | struct xhci_virt_device *dev; | 282 | struct xhci_virt_device *dev; |
283 | int i; | ||
284 | 284 | ||
285 | /* Slot ID 0 is reserved */ | 285 | /* Slot ID 0 is reserved */ |
286 | if (slot_id == 0 || xhci->devs[slot_id]) { | 286 | if (slot_id == 0 || xhci->devs[slot_id]) { |
@@ -309,9 +309,13 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
309 | xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, | 309 | xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, |
310 | (unsigned long long)dev->in_ctx->dma); | 310 | (unsigned long long)dev->in_ctx->dma); |
311 | 311 | ||
312 | /* Initialize the cancellation list for each endpoint */ | ||
313 | for (i = 0; i < 31; i++) | ||
314 | INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); | ||
315 | |||
312 | /* Allocate endpoint 0 ring */ | 316 | /* Allocate endpoint 0 ring */ |
313 | dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); | 317 | dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags); |
314 | if (!dev->ep_rings[0]) | 318 | if (!dev->eps[0].ring) |
315 | goto fail; | 319 | goto fail; |
316 | 320 | ||
317 | init_completion(&dev->cmd_completion); | 321 | init_completion(&dev->cmd_completion); |
@@ -428,8 +432,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
428 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); | 432 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); |
429 | 433 | ||
430 | ep0_ctx->deq = | 434 | ep0_ctx->deq = |
431 | dev->ep_rings[0]->first_seg->dma; | 435 | dev->eps[0].ring->first_seg->dma; |
432 | ep0_ctx->deq |= dev->ep_rings[0]->cycle_state; | 436 | ep0_ctx->deq |= dev->eps[0].ring->cycle_state; |
433 | 437 | ||
434 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ | 438 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ |
435 | 439 | ||
@@ -539,10 +543,11 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
539 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); | 543 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
540 | 544 | ||
541 | /* Set up the endpoint ring */ | 545 | /* Set up the endpoint ring */ |
542 | virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); | 546 | virt_dev->eps[ep_index].new_ring = |
543 | if (!virt_dev->new_ep_rings[ep_index]) | 547 | xhci_ring_alloc(xhci, 1, true, mem_flags); |
548 | if (!virt_dev->eps[ep_index].new_ring) | ||
544 | return -ENOMEM; | 549 | return -ENOMEM; |
545 | ep_ring = virt_dev->new_ep_rings[ep_index]; | 550 | ep_ring = virt_dev->eps[ep_index].new_ring; |
546 | ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; | 551 | ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; |
547 | 552 | ||
548 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); | 553 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index ff5e6bc2299d..6a72d2022b45 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -296,16 +296,18 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci, | |||
296 | unsigned int slot_id, | 296 | unsigned int slot_id, |
297 | unsigned int ep_index) | 297 | unsigned int ep_index) |
298 | { | 298 | { |
299 | struct xhci_ring *ep_ring; | 299 | struct xhci_virt_ep *ep; |
300 | unsigned int ep_state; | ||
300 | u32 field; | 301 | u32 field; |
301 | __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; | 302 | __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; |
302 | 303 | ||
303 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | 304 | ep = &xhci->devs[slot_id]->eps[ep_index]; |
305 | ep_state = ep->ep_state; | ||
304 | /* Don't ring the doorbell for this endpoint if there are pending | 306 | /* Don't ring the doorbell for this endpoint if there are pending |
305 | * cancellations because the we don't want to interrupt processing. | 307 | * cancellations because the we don't want to interrupt processing. |
306 | */ | 308 | */ |
307 | if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING) | 309 | if (!ep->cancels_pending && !(ep_state & SET_DEQ_PENDING) |
308 | && !(ep_ring->state & EP_HALTED)) { | 310 | && !(ep_state & EP_HALTED)) { |
309 | field = xhci_readl(xhci, db_addr) & DB_MASK; | 311 | field = xhci_readl(xhci, db_addr) & DB_MASK; |
310 | xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); | 312 | xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); |
311 | /* Flush PCI posted writes - FIXME Matthew Wilcox says this | 313 | /* Flush PCI posted writes - FIXME Matthew Wilcox says this |
@@ -361,7 +363,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
361 | struct xhci_td *cur_td, struct xhci_dequeue_state *state) | 363 | struct xhci_td *cur_td, struct xhci_dequeue_state *state) |
362 | { | 364 | { |
363 | struct xhci_virt_device *dev = xhci->devs[slot_id]; | 365 | struct xhci_virt_device *dev = xhci->devs[slot_id]; |
364 | struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; | 366 | struct xhci_ring *ep_ring = dev->eps[ep_index].ring; |
365 | struct xhci_generic_trb *trb; | 367 | struct xhci_generic_trb *trb; |
366 | struct xhci_ep_ctx *ep_ctx; | 368 | struct xhci_ep_ctx *ep_ctx; |
367 | dma_addr_t addr; | 369 | dma_addr_t addr; |
@@ -369,7 +371,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
369 | state->new_cycle_state = 0; | 371 | state->new_cycle_state = 0; |
370 | xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); | 372 | xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); |
371 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, | 373 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, |
372 | ep_ring->stopped_trb, | 374 | dev->eps[ep_index].stopped_trb, |
373 | &state->new_cycle_state); | 375 | &state->new_cycle_state); |
374 | if (!state->new_deq_seg) | 376 | if (!state->new_deq_seg) |
375 | BUG(); | 377 | BUG(); |
@@ -449,9 +451,11 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |||
449 | union xhci_trb *deq_ptr, u32 cycle_state); | 451 | union xhci_trb *deq_ptr, u32 cycle_state); |
450 | 452 | ||
451 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | 453 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, |
452 | struct xhci_ring *ep_ring, unsigned int slot_id, | 454 | unsigned int slot_id, unsigned int ep_index, |
453 | unsigned int ep_index, struct xhci_dequeue_state *deq_state) | 455 | struct xhci_dequeue_state *deq_state) |
454 | { | 456 | { |
457 | struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; | ||
458 | |||
455 | xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " | 459 | xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " |
456 | "new deq ptr = %p (0x%llx dma), new cycle = %u\n", | 460 | "new deq ptr = %p (0x%llx dma), new cycle = %u\n", |
457 | deq_state->new_deq_seg, | 461 | deq_state->new_deq_seg, |
@@ -468,7 +472,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | |||
468 | * if the ring is running, and ringing the doorbell starts the | 472 | * if the ring is running, and ringing the doorbell starts the |
469 | * ring running. | 473 | * ring running. |
470 | */ | 474 | */ |
471 | ep_ring->state |= SET_DEQ_PENDING; | 475 | ep->ep_state |= SET_DEQ_PENDING; |
472 | } | 476 | } |
473 | 477 | ||
474 | /* | 478 | /* |
@@ -487,6 +491,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
487 | unsigned int slot_id; | 491 | unsigned int slot_id; |
488 | unsigned int ep_index; | 492 | unsigned int ep_index; |
489 | struct xhci_ring *ep_ring; | 493 | struct xhci_ring *ep_ring; |
494 | struct xhci_virt_ep *ep; | ||
490 | struct list_head *entry; | 495 | struct list_head *entry; |
491 | struct xhci_td *cur_td = 0; | 496 | struct xhci_td *cur_td = 0; |
492 | struct xhci_td *last_unlinked_td; | 497 | struct xhci_td *last_unlinked_td; |
@@ -499,9 +504,10 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
499 | memset(&deq_state, 0, sizeof(deq_state)); | 504 | memset(&deq_state, 0, sizeof(deq_state)); |
500 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | 505 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); |
501 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | 506 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); |
502 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | 507 | ep = &xhci->devs[slot_id]->eps[ep_index]; |
508 | ep_ring = ep->ring; | ||
503 | 509 | ||
504 | if (list_empty(&ep_ring->cancelled_td_list)) | 510 | if (list_empty(&ep->cancelled_td_list)) |
505 | return; | 511 | return; |
506 | 512 | ||
507 | /* Fix up the ep ring first, so HW stops executing cancelled TDs. | 513 | /* Fix up the ep ring first, so HW stops executing cancelled TDs. |
@@ -509,7 +515,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
509 | * it. We're also in the event handler, so we can't get re-interrupted | 515 | * it. We're also in the event handler, so we can't get re-interrupted |
510 | * if another Stop Endpoint command completes | 516 | * if another Stop Endpoint command completes |
511 | */ | 517 | */ |
512 | list_for_each(entry, &ep_ring->cancelled_td_list) { | 518 | list_for_each(entry, &ep->cancelled_td_list) { |
513 | cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); | 519 | cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); |
514 | xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", | 520 | xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", |
515 | cur_td->first_trb, | 521 | cur_td->first_trb, |
@@ -518,7 +524,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
518 | * If we stopped on the TD we need to cancel, then we have to | 524 | * If we stopped on the TD we need to cancel, then we have to |
519 | * move the xHC endpoint ring dequeue pointer past this TD. | 525 | * move the xHC endpoint ring dequeue pointer past this TD. |
520 | */ | 526 | */ |
521 | if (cur_td == ep_ring->stopped_td) | 527 | if (cur_td == ep->stopped_td) |
522 | xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, | 528 | xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, |
523 | &deq_state); | 529 | &deq_state); |
524 | else | 530 | else |
@@ -529,13 +535,13 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
529 | * the cancelled TD list for URB completion later. | 535 | * the cancelled TD list for URB completion later. |
530 | */ | 536 | */ |
531 | list_del(&cur_td->td_list); | 537 | list_del(&cur_td->td_list); |
532 | ep_ring->cancels_pending--; | 538 | ep->cancels_pending--; |
533 | } | 539 | } |
534 | last_unlinked_td = cur_td; | 540 | last_unlinked_td = cur_td; |
535 | 541 | ||
536 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ | 542 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ |
537 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { | 543 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { |
538 | xhci_queue_new_dequeue_state(xhci, ep_ring, | 544 | xhci_queue_new_dequeue_state(xhci, |
539 | slot_id, ep_index, &deq_state); | 545 | slot_id, ep_index, &deq_state); |
540 | xhci_ring_cmd_db(xhci); | 546 | xhci_ring_cmd_db(xhci); |
541 | } else { | 547 | } else { |
@@ -550,7 +556,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
550 | * So stop when we've completed the URB for the last TD we unlinked. | 556 | * So stop when we've completed the URB for the last TD we unlinked. |
551 | */ | 557 | */ |
552 | do { | 558 | do { |
553 | cur_td = list_entry(ep_ring->cancelled_td_list.next, | 559 | cur_td = list_entry(ep->cancelled_td_list.next, |
554 | struct xhci_td, cancelled_td_list); | 560 | struct xhci_td, cancelled_td_list); |
555 | list_del(&cur_td->cancelled_td_list); | 561 | list_del(&cur_td->cancelled_td_list); |
556 | 562 | ||
@@ -597,7 +603,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
597 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | 603 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); |
598 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | 604 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); |
599 | dev = xhci->devs[slot_id]; | 605 | dev = xhci->devs[slot_id]; |
600 | ep_ring = dev->ep_rings[ep_index]; | 606 | ep_ring = dev->eps[ep_index].ring; |
601 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | 607 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); |
602 | slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); | 608 | slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); |
603 | 609 | ||
@@ -641,7 +647,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
641 | ep_ctx->deq); | 647 | ep_ctx->deq); |
642 | } | 648 | } |
643 | 649 | ||
644 | ep_ring->state &= ~SET_DEQ_PENDING; | 650 | dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; |
645 | ring_ep_doorbell(xhci, slot_id, ep_index); | 651 | ring_ep_doorbell(xhci, slot_id, ep_index); |
646 | } | 652 | } |
647 | 653 | ||
@@ -655,7 +661,7 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci, | |||
655 | 661 | ||
656 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | 662 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); |
657 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | 663 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); |
658 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | 664 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; |
659 | /* This command will only fail if the endpoint wasn't halted, | 665 | /* This command will only fail if the endpoint wasn't halted, |
660 | * but we don't care. | 666 | * but we don't care. |
661 | */ | 667 | */ |
@@ -673,7 +679,7 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci, | |||
673 | xhci_ring_cmd_db(xhci); | 679 | xhci_ring_cmd_db(xhci); |
674 | } else { | 680 | } else { |
675 | /* Clear our internal halted state and restart the ring */ | 681 | /* Clear our internal halted state and restart the ring */ |
676 | ep_ring->state &= ~EP_HALTED; | 682 | xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; |
677 | ring_ep_doorbell(xhci, slot_id, ep_index); | 683 | ring_ep_doorbell(xhci, slot_id, ep_index); |
678 | } | 684 | } |
679 | } | 685 | } |
@@ -726,7 +732,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
726 | xhci->devs[slot_id]->in_ctx); | 732 | xhci->devs[slot_id]->in_ctx); |
727 | /* Input ctx add_flags are the endpoint index plus one */ | 733 | /* Input ctx add_flags are the endpoint index plus one */ |
728 | ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; | 734 | ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; |
729 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | 735 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; |
730 | if (!ep_ring) { | 736 | if (!ep_ring) { |
731 | /* This must have been an initial configure endpoint */ | 737 | /* This must have been an initial configure endpoint */ |
732 | xhci->devs[slot_id]->cmd_status = | 738 | xhci->devs[slot_id]->cmd_status = |
@@ -734,13 +740,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
734 | complete(&xhci->devs[slot_id]->cmd_completion); | 740 | complete(&xhci->devs[slot_id]->cmd_completion); |
735 | break; | 741 | break; |
736 | } | 742 | } |
737 | ep_state = ep_ring->state; | 743 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; |
738 | xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, " | 744 | xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, " |
739 | "state = %d\n", ep_index, ep_state); | 745 | "state = %d\n", ep_index, ep_state); |
740 | if (xhci->quirks & XHCI_RESET_EP_QUIRK && | 746 | if (xhci->quirks & XHCI_RESET_EP_QUIRK && |
741 | ep_state & EP_HALTED) { | 747 | ep_state & EP_HALTED) { |
742 | /* Clear our internal halted state and restart ring */ | 748 | /* Clear our internal halted state and restart ring */ |
743 | xhci->devs[slot_id]->ep_rings[ep_index]->state &= | 749 | xhci->devs[slot_id]->eps[ep_index].ep_state &= |
744 | ~EP_HALTED; | 750 | ~EP_HALTED; |
745 | ring_ep_doorbell(xhci, slot_id, ep_index); | 751 | ring_ep_doorbell(xhci, slot_id, ep_index); |
746 | } else { | 752 | } else { |
@@ -864,6 +870,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
864 | struct xhci_transfer_event *event) | 870 | struct xhci_transfer_event *event) |
865 | { | 871 | { |
866 | struct xhci_virt_device *xdev; | 872 | struct xhci_virt_device *xdev; |
873 | struct xhci_virt_ep *ep; | ||
867 | struct xhci_ring *ep_ring; | 874 | struct xhci_ring *ep_ring; |
868 | unsigned int slot_id; | 875 | unsigned int slot_id; |
869 | int ep_index; | 876 | int ep_index; |
@@ -887,7 +894,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
887 | /* Endpoint ID is 1 based, our index is zero based */ | 894 | /* Endpoint ID is 1 based, our index is zero based */ |
888 | ep_index = TRB_TO_EP_ID(event->flags) - 1; | 895 | ep_index = TRB_TO_EP_ID(event->flags) - 1; |
889 | xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); | 896 | xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); |
890 | ep_ring = xdev->ep_rings[ep_index]; | 897 | ep = &xdev->eps[ep_index]; |
898 | ep_ring = ep->ring; | ||
891 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); | 899 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); |
892 | if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { | 900 | if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { |
893 | xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); | 901 | xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); |
@@ -948,7 +956,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
948 | break; | 956 | break; |
949 | case COMP_STALL: | 957 | case COMP_STALL: |
950 | xhci_warn(xhci, "WARN: Stalled endpoint\n"); | 958 | xhci_warn(xhci, "WARN: Stalled endpoint\n"); |
951 | ep_ring->state |= EP_HALTED; | 959 | ep->ep_state |= EP_HALTED; |
952 | status = -EPIPE; | 960 | status = -EPIPE; |
953 | break; | 961 | break; |
954 | case COMP_TRB_ERR: | 962 | case COMP_TRB_ERR: |
@@ -1016,12 +1024,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
1016 | else | 1024 | else |
1017 | td->urb->actual_length = 0; | 1025 | td->urb->actual_length = 0; |
1018 | 1026 | ||
1019 | ep_ring->stopped_td = td; | 1027 | ep->stopped_td = td; |
1020 | ep_ring->stopped_trb = event_trb; | 1028 | ep->stopped_trb = event_trb; |
1021 | xhci_queue_reset_ep(xhci, slot_id, ep_index); | 1029 | xhci_queue_reset_ep(xhci, slot_id, ep_index); |
1022 | xhci_cleanup_stalled_ring(xhci, | 1030 | xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); |
1023 | td->urb->dev, | ||
1024 | ep_index, ep_ring); | ||
1025 | xhci_ring_cmd_db(xhci); | 1031 | xhci_ring_cmd_db(xhci); |
1026 | goto td_cleanup; | 1032 | goto td_cleanup; |
1027 | default: | 1033 | default: |
@@ -1161,8 +1167,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
1161 | * stopped TDs. A stopped TD may be restarted, so don't update | 1167 | * stopped TDs. A stopped TD may be restarted, so don't update |
1162 | * the ring dequeue pointer or take this TD off any lists yet. | 1168 | * the ring dequeue pointer or take this TD off any lists yet. |
1163 | */ | 1169 | */ |
1164 | ep_ring->stopped_td = td; | 1170 | ep->stopped_td = td; |
1165 | ep_ring->stopped_trb = event_trb; | 1171 | ep->stopped_trb = event_trb; |
1166 | } else { | 1172 | } else { |
1167 | if (trb_comp_code == COMP_STALL || | 1173 | if (trb_comp_code == COMP_STALL || |
1168 | trb_comp_code == COMP_BABBLE) { | 1174 | trb_comp_code == COMP_BABBLE) { |
@@ -1172,8 +1178,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
1172 | * pointer past the TD. We can't do that here because | 1178 | * pointer past the TD. We can't do that here because |
1173 | * the halt condition must be cleared first. | 1179 | * the halt condition must be cleared first. |
1174 | */ | 1180 | */ |
1175 | ep_ring->stopped_td = td; | 1181 | ep->stopped_td = td; |
1176 | ep_ring->stopped_trb = event_trb; | 1182 | ep->stopped_trb = event_trb; |
1177 | } else { | 1183 | } else { |
1178 | /* Update ring dequeue pointer */ | 1184 | /* Update ring dequeue pointer */ |
1179 | while (ep_ring->dequeue != td->last_trb) | 1185 | while (ep_ring->dequeue != td->last_trb) |
@@ -1206,7 +1212,7 @@ td_cleanup: | |||
1206 | /* Was this TD slated to be cancelled but completed anyway? */ | 1212 | /* Was this TD slated to be cancelled but completed anyway? */ |
1207 | if (!list_empty(&td->cancelled_td_list)) { | 1213 | if (!list_empty(&td->cancelled_td_list)) { |
1208 | list_del(&td->cancelled_td_list); | 1214 | list_del(&td->cancelled_td_list); |
1209 | ep_ring->cancels_pending--; | 1215 | ep->cancels_pending--; |
1210 | } | 1216 | } |
1211 | /* Leave the TD around for the reset endpoint function to use | 1217 | /* Leave the TD around for the reset endpoint function to use |
1212 | * (but only if it's not a control endpoint, since we already | 1218 | * (but only if it's not a control endpoint, since we already |
@@ -1369,7 +1375,7 @@ static int prepare_transfer(struct xhci_hcd *xhci, | |||
1369 | { | 1375 | { |
1370 | int ret; | 1376 | int ret; |
1371 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); | 1377 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); |
1372 | ret = prepare_ring(xhci, xdev->ep_rings[ep_index], | 1378 | ret = prepare_ring(xhci, xdev->eps[ep_index].ring, |
1373 | ep_ctx->ep_info & EP_STATE_MASK, | 1379 | ep_ctx->ep_info & EP_STATE_MASK, |
1374 | num_trbs, mem_flags); | 1380 | num_trbs, mem_flags); |
1375 | if (ret) | 1381 | if (ret) |
@@ -1389,9 +1395,9 @@ static int prepare_transfer(struct xhci_hcd *xhci, | |||
1389 | (*td)->urb = urb; | 1395 | (*td)->urb = urb; |
1390 | urb->hcpriv = (void *) (*td); | 1396 | urb->hcpriv = (void *) (*td); |
1391 | /* Add this TD to the tail of the endpoint ring's TD list */ | 1397 | /* Add this TD to the tail of the endpoint ring's TD list */ |
1392 | list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list); | 1398 | list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list); |
1393 | (*td)->start_seg = xdev->ep_rings[ep_index]->enq_seg; | 1399 | (*td)->start_seg = xdev->eps[ep_index].ring->enq_seg; |
1394 | (*td)->first_trb = xdev->ep_rings[ep_index]->enqueue; | 1400 | (*td)->first_trb = xdev->eps[ep_index].ring->enqueue; |
1395 | 1401 | ||
1396 | return 0; | 1402 | return 0; |
1397 | } | 1403 | } |
@@ -1525,7 +1531,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1525 | struct xhci_generic_trb *start_trb; | 1531 | struct xhci_generic_trb *start_trb; |
1526 | int start_cycle; | 1532 | int start_cycle; |
1527 | 1533 | ||
1528 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | 1534 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; |
1529 | num_trbs = count_sg_trbs_needed(xhci, urb); | 1535 | num_trbs = count_sg_trbs_needed(xhci, urb); |
1530 | num_sgs = urb->num_sgs; | 1536 | num_sgs = urb->num_sgs; |
1531 | 1537 | ||
@@ -1658,7 +1664,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1658 | if (urb->sg) | 1664 | if (urb->sg) |
1659 | return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); | 1665 | return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); |
1660 | 1666 | ||
1661 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | 1667 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; |
1662 | 1668 | ||
1663 | num_trbs = 0; | 1669 | num_trbs = 0; |
1664 | /* How much data is (potentially) left before the 64KB boundary? */ | 1670 | /* How much data is (potentially) left before the 64KB boundary? */ |
@@ -1769,7 +1775,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1769 | u32 field, length_field; | 1775 | u32 field, length_field; |
1770 | struct xhci_td *td; | 1776 | struct xhci_td *td; |
1771 | 1777 | ||
1772 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | 1778 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; |
1773 | 1779 | ||
1774 | /* | 1780 | /* |
1775 | * Need to copy setup packet into setup TRB, so we can't use the setup | 1781 | * Need to copy setup packet into setup TRB, so we can't use the setup |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index a7728aa91582..627092286d1b 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -625,6 +625,23 @@ struct xhci_input_control_ctx { | |||
625 | /* add context bitmasks */ | 625 | /* add context bitmasks */ |
626 | #define ADD_EP(x) (0x1 << x) | 626 | #define ADD_EP(x) (0x1 << x) |
627 | 627 | ||
628 | struct xhci_virt_ep { | ||
629 | struct xhci_ring *ring; | ||
630 | /* Temporary storage in case the configure endpoint command fails and we | ||
631 | * have to restore the device state to the previous state | ||
632 | */ | ||
633 | struct xhci_ring *new_ring; | ||
634 | unsigned int ep_state; | ||
635 | #define SET_DEQ_PENDING (1 << 0) | ||
636 | #define EP_HALTED (1 << 1) | ||
637 | /* ---- Related to URB cancellation ---- */ | ||
638 | struct list_head cancelled_td_list; | ||
639 | unsigned int cancels_pending; | ||
640 | /* The TRB that was last reported in a stopped endpoint ring */ | ||
641 | union xhci_trb *stopped_trb; | ||
642 | struct xhci_td *stopped_td; | ||
643 | }; | ||
644 | |||
628 | struct xhci_virt_device { | 645 | struct xhci_virt_device { |
629 | /* | 646 | /* |
630 | * Commands to the hardware are passed an "input context" that | 647 | * Commands to the hardware are passed an "input context" that |
@@ -637,13 +654,7 @@ struct xhci_virt_device { | |||
637 | struct xhci_container_ctx *out_ctx; | 654 | struct xhci_container_ctx *out_ctx; |
638 | /* Used for addressing devices and configuration changes */ | 655 | /* Used for addressing devices and configuration changes */ |
639 | struct xhci_container_ctx *in_ctx; | 656 | struct xhci_container_ctx *in_ctx; |
640 | 657 | struct xhci_virt_ep eps[31]; | |
641 | /* FIXME when stream support is added */ | ||
642 | struct xhci_ring *ep_rings[31]; | ||
643 | /* Temporary storage in case the configure endpoint command fails and we | ||
644 | * have to restore the device state to the previous state | ||
645 | */ | ||
646 | struct xhci_ring *new_ep_rings[31]; | ||
647 | struct completion cmd_completion; | 658 | struct completion cmd_completion; |
648 | /* Status of the last command issued for this device */ | 659 | /* Status of the last command issued for this device */ |
649 | u32 cmd_status; | 660 | u32 cmd_status; |
@@ -945,15 +956,6 @@ struct xhci_ring { | |||
945 | struct xhci_segment *deq_seg; | 956 | struct xhci_segment *deq_seg; |
946 | unsigned int deq_updates; | 957 | unsigned int deq_updates; |
947 | struct list_head td_list; | 958 | struct list_head td_list; |
948 | /* ---- Related to URB cancellation ---- */ | ||
949 | struct list_head cancelled_td_list; | ||
950 | unsigned int cancels_pending; | ||
951 | unsigned int state; | ||
952 | #define SET_DEQ_PENDING (1 << 0) | ||
953 | #define EP_HALTED (1 << 1) | ||
954 | /* The TRB that was last reported in a stopped endpoint ring */ | ||
955 | union xhci_trb *stopped_trb; | ||
956 | struct xhci_td *stopped_td; | ||
957 | /* | 959 | /* |
958 | * Write the cycle state into the TRB cycle field to give ownership of | 960 | * Write the cycle state into the TRB cycle field to give ownership of |
959 | * the TRB to the host controller (if we are the producer), or to check | 961 | * the TRB to the host controller (if we are the producer), or to check |
@@ -1236,11 +1238,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
1236 | unsigned int slot_id, unsigned int ep_index, | 1238 | unsigned int slot_id, unsigned int ep_index, |
1237 | struct xhci_td *cur_td, struct xhci_dequeue_state *state); | 1239 | struct xhci_td *cur_td, struct xhci_dequeue_state *state); |
1238 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | 1240 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, |
1239 | struct xhci_ring *ep_ring, unsigned int slot_id, | 1241 | unsigned int slot_id, unsigned int ep_index, |
1240 | unsigned int ep_index, struct xhci_dequeue_state *deq_state); | 1242 | struct xhci_dequeue_state *deq_state); |
1241 | void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, | 1243 | void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, |
1242 | struct usb_device *udev, | 1244 | struct usb_device *udev, unsigned int ep_index); |
1243 | unsigned int ep_index, struct xhci_ring *ep_ring); | ||
1244 | void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci, | 1245 | void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci, |
1245 | unsigned int slot_id, unsigned int ep_index, | 1246 | unsigned int slot_id, unsigned int ep_index, |
1246 | struct xhci_dequeue_state *deq_state); | 1247 | struct xhci_dequeue_state *deq_state); |