aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2009-09-04 13:53:09 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-09-23 09:46:39 -0400
commit63a0d9abd18cdcf5a985029c266c6bfe0511768f (patch)
tree2ae717082d022b2a86a64b86dee48ddfb2be0627 /drivers/usb/host/xhci-ring.c
parent9e221be815cd263480928248bfd4541497017a1b (diff)
USB: xhci: Endpoint representation refactoring.
The xhci_ring structure contained information that is really related to an endpoint, not a ring. This will cause problems later when endpoint streams are supported and there are multiple rings per endpoint. Move the endpoint state and cancellation information into a new virtual endpoint structure, xhci_virt_ep. The list of TRBs to be cancelled should be per endpoint, not per ring, for easy access. There can be only one TRB that the endpoint stopped on after a stop endpoint command (even with streams enabled); move the stopped TRB information into the new virtual endpoint structure. Also move the 31 endpoint rings and temporary ring storage from the virtual device structure (xhci_virt_device) into the virtual endpoint structure (xhci_virt_ep). Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c90
1 files changed, 48 insertions, 42 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index ff5e6bc2299d..6a72d2022b45 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -296,16 +296,18 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
296 unsigned int slot_id, 296 unsigned int slot_id,
297 unsigned int ep_index) 297 unsigned int ep_index)
298{ 298{
299 struct xhci_ring *ep_ring; 299 struct xhci_virt_ep *ep;
300 unsigned int ep_state;
300 u32 field; 301 u32 field;
301 __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; 302 __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
302 303
303 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 304 ep = &xhci->devs[slot_id]->eps[ep_index];
305 ep_state = ep->ep_state;
304 /* Don't ring the doorbell for this endpoint if there are pending 306 /* Don't ring the doorbell for this endpoint if there are pending
305 * cancellations because the we don't want to interrupt processing. 307 * cancellations because the we don't want to interrupt processing.
306 */ 308 */
307 if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING) 309 if (!ep->cancels_pending && !(ep_state & SET_DEQ_PENDING)
308 && !(ep_ring->state & EP_HALTED)) { 310 && !(ep_state & EP_HALTED)) {
309 field = xhci_readl(xhci, db_addr) & DB_MASK; 311 field = xhci_readl(xhci, db_addr) & DB_MASK;
310 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); 312 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
311 /* Flush PCI posted writes - FIXME Matthew Wilcox says this 313 /* Flush PCI posted writes - FIXME Matthew Wilcox says this
@@ -361,7 +363,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
361 struct xhci_td *cur_td, struct xhci_dequeue_state *state) 363 struct xhci_td *cur_td, struct xhci_dequeue_state *state)
362{ 364{
363 struct xhci_virt_device *dev = xhci->devs[slot_id]; 365 struct xhci_virt_device *dev = xhci->devs[slot_id];
364 struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; 366 struct xhci_ring *ep_ring = dev->eps[ep_index].ring;
365 struct xhci_generic_trb *trb; 367 struct xhci_generic_trb *trb;
366 struct xhci_ep_ctx *ep_ctx; 368 struct xhci_ep_ctx *ep_ctx;
367 dma_addr_t addr; 369 dma_addr_t addr;
@@ -369,7 +371,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
369 state->new_cycle_state = 0; 371 state->new_cycle_state = 0;
370 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); 372 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
371 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 373 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
372 ep_ring->stopped_trb, 374 dev->eps[ep_index].stopped_trb,
373 &state->new_cycle_state); 375 &state->new_cycle_state);
374 if (!state->new_deq_seg) 376 if (!state->new_deq_seg)
375 BUG(); 377 BUG();
@@ -449,9 +451,11 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
449 union xhci_trb *deq_ptr, u32 cycle_state); 451 union xhci_trb *deq_ptr, u32 cycle_state);
450 452
451void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 453void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
452 struct xhci_ring *ep_ring, unsigned int slot_id, 454 unsigned int slot_id, unsigned int ep_index,
453 unsigned int ep_index, struct xhci_dequeue_state *deq_state) 455 struct xhci_dequeue_state *deq_state)
454{ 456{
457 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
458
455 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " 459 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
456 "new deq ptr = %p (0x%llx dma), new cycle = %u\n", 460 "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
457 deq_state->new_deq_seg, 461 deq_state->new_deq_seg,
@@ -468,7 +472,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
468 * if the ring is running, and ringing the doorbell starts the 472 * if the ring is running, and ringing the doorbell starts the
469 * ring running. 473 * ring running.
470 */ 474 */
471 ep_ring->state |= SET_DEQ_PENDING; 475 ep->ep_state |= SET_DEQ_PENDING;
472} 476}
473 477
474/* 478/*
@@ -487,6 +491,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
487 unsigned int slot_id; 491 unsigned int slot_id;
488 unsigned int ep_index; 492 unsigned int ep_index;
489 struct xhci_ring *ep_ring; 493 struct xhci_ring *ep_ring;
494 struct xhci_virt_ep *ep;
490 struct list_head *entry; 495 struct list_head *entry;
491 struct xhci_td *cur_td = 0; 496 struct xhci_td *cur_td = 0;
492 struct xhci_td *last_unlinked_td; 497 struct xhci_td *last_unlinked_td;
@@ -499,9 +504,10 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
499 memset(&deq_state, 0, sizeof(deq_state)); 504 memset(&deq_state, 0, sizeof(deq_state));
500 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 505 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
501 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 506 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
502 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 507 ep = &xhci->devs[slot_id]->eps[ep_index];
508 ep_ring = ep->ring;
503 509
504 if (list_empty(&ep_ring->cancelled_td_list)) 510 if (list_empty(&ep->cancelled_td_list))
505 return; 511 return;
506 512
507 /* Fix up the ep ring first, so HW stops executing cancelled TDs. 513 /* Fix up the ep ring first, so HW stops executing cancelled TDs.
@@ -509,7 +515,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
509 * it. We're also in the event handler, so we can't get re-interrupted 515 * it. We're also in the event handler, so we can't get re-interrupted
510 * if another Stop Endpoint command completes 516 * if another Stop Endpoint command completes
511 */ 517 */
512 list_for_each(entry, &ep_ring->cancelled_td_list) { 518 list_for_each(entry, &ep->cancelled_td_list) {
513 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); 519 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
514 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", 520 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
515 cur_td->first_trb, 521 cur_td->first_trb,
@@ -518,7 +524,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
518 * If we stopped on the TD we need to cancel, then we have to 524 * If we stopped on the TD we need to cancel, then we have to
519 * move the xHC endpoint ring dequeue pointer past this TD. 525 * move the xHC endpoint ring dequeue pointer past this TD.
520 */ 526 */
521 if (cur_td == ep_ring->stopped_td) 527 if (cur_td == ep->stopped_td)
522 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, 528 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
523 &deq_state); 529 &deq_state);
524 else 530 else
@@ -529,13 +535,13 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
529 * the cancelled TD list for URB completion later. 535 * the cancelled TD list for URB completion later.
530 */ 536 */
531 list_del(&cur_td->td_list); 537 list_del(&cur_td->td_list);
532 ep_ring->cancels_pending--; 538 ep->cancels_pending--;
533 } 539 }
534 last_unlinked_td = cur_td; 540 last_unlinked_td = cur_td;
535 541
536 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 542 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
537 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 543 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
538 xhci_queue_new_dequeue_state(xhci, ep_ring, 544 xhci_queue_new_dequeue_state(xhci,
539 slot_id, ep_index, &deq_state); 545 slot_id, ep_index, &deq_state);
540 xhci_ring_cmd_db(xhci); 546 xhci_ring_cmd_db(xhci);
541 } else { 547 } else {
@@ -550,7 +556,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
550 * So stop when we've completed the URB for the last TD we unlinked. 556 * So stop when we've completed the URB for the last TD we unlinked.
551 */ 557 */
552 do { 558 do {
553 cur_td = list_entry(ep_ring->cancelled_td_list.next, 559 cur_td = list_entry(ep->cancelled_td_list.next,
554 struct xhci_td, cancelled_td_list); 560 struct xhci_td, cancelled_td_list);
555 list_del(&cur_td->cancelled_td_list); 561 list_del(&cur_td->cancelled_td_list);
556 562
@@ -597,7 +603,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
597 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 603 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
598 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 604 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
599 dev = xhci->devs[slot_id]; 605 dev = xhci->devs[slot_id];
600 ep_ring = dev->ep_rings[ep_index]; 606 ep_ring = dev->eps[ep_index].ring;
601 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 607 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
602 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); 608 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
603 609
@@ -641,7 +647,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
641 ep_ctx->deq); 647 ep_ctx->deq);
642 } 648 }
643 649
644 ep_ring->state &= ~SET_DEQ_PENDING; 650 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
645 ring_ep_doorbell(xhci, slot_id, ep_index); 651 ring_ep_doorbell(xhci, slot_id, ep_index);
646} 652}
647 653
@@ -655,7 +661,7 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
655 661
656 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 662 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
657 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 663 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
658 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 664 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
659 /* This command will only fail if the endpoint wasn't halted, 665 /* This command will only fail if the endpoint wasn't halted,
660 * but we don't care. 666 * but we don't care.
661 */ 667 */
@@ -673,7 +679,7 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
673 xhci_ring_cmd_db(xhci); 679 xhci_ring_cmd_db(xhci);
674 } else { 680 } else {
675 /* Clear our internal halted state and restart the ring */ 681 /* Clear our internal halted state and restart the ring */
676 ep_ring->state &= ~EP_HALTED; 682 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
677 ring_ep_doorbell(xhci, slot_id, ep_index); 683 ring_ep_doorbell(xhci, slot_id, ep_index);
678 } 684 }
679} 685}
@@ -726,7 +732,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
726 xhci->devs[slot_id]->in_ctx); 732 xhci->devs[slot_id]->in_ctx);
727 /* Input ctx add_flags are the endpoint index plus one */ 733 /* Input ctx add_flags are the endpoint index plus one */
728 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; 734 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
729 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 735 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
730 if (!ep_ring) { 736 if (!ep_ring) {
731 /* This must have been an initial configure endpoint */ 737 /* This must have been an initial configure endpoint */
732 xhci->devs[slot_id]->cmd_status = 738 xhci->devs[slot_id]->cmd_status =
@@ -734,13 +740,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
734 complete(&xhci->devs[slot_id]->cmd_completion); 740 complete(&xhci->devs[slot_id]->cmd_completion);
735 break; 741 break;
736 } 742 }
737 ep_state = ep_ring->state; 743 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
738 xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, " 744 xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, "
739 "state = %d\n", ep_index, ep_state); 745 "state = %d\n", ep_index, ep_state);
740 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 746 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
741 ep_state & EP_HALTED) { 747 ep_state & EP_HALTED) {
742 /* Clear our internal halted state and restart ring */ 748 /* Clear our internal halted state and restart ring */
743 xhci->devs[slot_id]->ep_rings[ep_index]->state &= 749 xhci->devs[slot_id]->eps[ep_index].ep_state &=
744 ~EP_HALTED; 750 ~EP_HALTED;
745 ring_ep_doorbell(xhci, slot_id, ep_index); 751 ring_ep_doorbell(xhci, slot_id, ep_index);
746 } else { 752 } else {
@@ -864,6 +870,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
864 struct xhci_transfer_event *event) 870 struct xhci_transfer_event *event)
865{ 871{
866 struct xhci_virt_device *xdev; 872 struct xhci_virt_device *xdev;
873 struct xhci_virt_ep *ep;
867 struct xhci_ring *ep_ring; 874 struct xhci_ring *ep_ring;
868 unsigned int slot_id; 875 unsigned int slot_id;
869 int ep_index; 876 int ep_index;
@@ -887,7 +894,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
887 /* Endpoint ID is 1 based, our index is zero based */ 894 /* Endpoint ID is 1 based, our index is zero based */
888 ep_index = TRB_TO_EP_ID(event->flags) - 1; 895 ep_index = TRB_TO_EP_ID(event->flags) - 1;
889 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); 896 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
890 ep_ring = xdev->ep_rings[ep_index]; 897 ep = &xdev->eps[ep_index];
898 ep_ring = ep->ring;
891 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 899 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
892 if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { 900 if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
893 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); 901 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
@@ -948,7 +956,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
948 break; 956 break;
949 case COMP_STALL: 957 case COMP_STALL:
950 xhci_warn(xhci, "WARN: Stalled endpoint\n"); 958 xhci_warn(xhci, "WARN: Stalled endpoint\n");
951 ep_ring->state |= EP_HALTED; 959 ep->ep_state |= EP_HALTED;
952 status = -EPIPE; 960 status = -EPIPE;
953 break; 961 break;
954 case COMP_TRB_ERR: 962 case COMP_TRB_ERR:
@@ -1016,12 +1024,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1016 else 1024 else
1017 td->urb->actual_length = 0; 1025 td->urb->actual_length = 0;
1018 1026
1019 ep_ring->stopped_td = td; 1027 ep->stopped_td = td;
1020 ep_ring->stopped_trb = event_trb; 1028 ep->stopped_trb = event_trb;
1021 xhci_queue_reset_ep(xhci, slot_id, ep_index); 1029 xhci_queue_reset_ep(xhci, slot_id, ep_index);
1022 xhci_cleanup_stalled_ring(xhci, 1030 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1023 td->urb->dev,
1024 ep_index, ep_ring);
1025 xhci_ring_cmd_db(xhci); 1031 xhci_ring_cmd_db(xhci);
1026 goto td_cleanup; 1032 goto td_cleanup;
1027 default: 1033 default:
@@ -1161,8 +1167,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1161 * stopped TDs. A stopped TD may be restarted, so don't update 1167 * stopped TDs. A stopped TD may be restarted, so don't update
1162 * the ring dequeue pointer or take this TD off any lists yet. 1168 * the ring dequeue pointer or take this TD off any lists yet.
1163 */ 1169 */
1164 ep_ring->stopped_td = td; 1170 ep->stopped_td = td;
1165 ep_ring->stopped_trb = event_trb; 1171 ep->stopped_trb = event_trb;
1166 } else { 1172 } else {
1167 if (trb_comp_code == COMP_STALL || 1173 if (trb_comp_code == COMP_STALL ||
1168 trb_comp_code == COMP_BABBLE) { 1174 trb_comp_code == COMP_BABBLE) {
@@ -1172,8 +1178,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1172 * pointer past the TD. We can't do that here because 1178 * pointer past the TD. We can't do that here because
1173 * the halt condition must be cleared first. 1179 * the halt condition must be cleared first.
1174 */ 1180 */
1175 ep_ring->stopped_td = td; 1181 ep->stopped_td = td;
1176 ep_ring->stopped_trb = event_trb; 1182 ep->stopped_trb = event_trb;
1177 } else { 1183 } else {
1178 /* Update ring dequeue pointer */ 1184 /* Update ring dequeue pointer */
1179 while (ep_ring->dequeue != td->last_trb) 1185 while (ep_ring->dequeue != td->last_trb)
@@ -1206,7 +1212,7 @@ td_cleanup:
1206 /* Was this TD slated to be cancelled but completed anyway? */ 1212 /* Was this TD slated to be cancelled but completed anyway? */
1207 if (!list_empty(&td->cancelled_td_list)) { 1213 if (!list_empty(&td->cancelled_td_list)) {
1208 list_del(&td->cancelled_td_list); 1214 list_del(&td->cancelled_td_list);
1209 ep_ring->cancels_pending--; 1215 ep->cancels_pending--;
1210 } 1216 }
1211 /* Leave the TD around for the reset endpoint function to use 1217 /* Leave the TD around for the reset endpoint function to use
1212 * (but only if it's not a control endpoint, since we already 1218 * (but only if it's not a control endpoint, since we already
@@ -1369,7 +1375,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
1369{ 1375{
1370 int ret; 1376 int ret;
1371 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1377 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1372 ret = prepare_ring(xhci, xdev->ep_rings[ep_index], 1378 ret = prepare_ring(xhci, xdev->eps[ep_index].ring,
1373 ep_ctx->ep_info & EP_STATE_MASK, 1379 ep_ctx->ep_info & EP_STATE_MASK,
1374 num_trbs, mem_flags); 1380 num_trbs, mem_flags);
1375 if (ret) 1381 if (ret)
@@ -1389,9 +1395,9 @@ static int prepare_transfer(struct xhci_hcd *xhci,
1389 (*td)->urb = urb; 1395 (*td)->urb = urb;
1390 urb->hcpriv = (void *) (*td); 1396 urb->hcpriv = (void *) (*td);
1391 /* Add this TD to the tail of the endpoint ring's TD list */ 1397 /* Add this TD to the tail of the endpoint ring's TD list */
1392 list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list); 1398 list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list);
1393 (*td)->start_seg = xdev->ep_rings[ep_index]->enq_seg; 1399 (*td)->start_seg = xdev->eps[ep_index].ring->enq_seg;
1394 (*td)->first_trb = xdev->ep_rings[ep_index]->enqueue; 1400 (*td)->first_trb = xdev->eps[ep_index].ring->enqueue;
1395 1401
1396 return 0; 1402 return 0;
1397} 1403}
@@ -1525,7 +1531,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1525 struct xhci_generic_trb *start_trb; 1531 struct xhci_generic_trb *start_trb;
1526 int start_cycle; 1532 int start_cycle;
1527 1533
1528 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 1534 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1529 num_trbs = count_sg_trbs_needed(xhci, urb); 1535 num_trbs = count_sg_trbs_needed(xhci, urb);
1530 num_sgs = urb->num_sgs; 1536 num_sgs = urb->num_sgs;
1531 1537
@@ -1658,7 +1664,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1658 if (urb->sg) 1664 if (urb->sg)
1659 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); 1665 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
1660 1666
1661 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 1667 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1662 1668
1663 num_trbs = 0; 1669 num_trbs = 0;
1664 /* How much data is (potentially) left before the 64KB boundary? */ 1670 /* How much data is (potentially) left before the 64KB boundary? */
@@ -1769,7 +1775,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1769 u32 field, length_field; 1775 u32 field, length_field;
1770 struct xhci_td *td; 1776 struct xhci_td *td;
1771 1777
1772 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 1778 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1773 1779
1774 /* 1780 /*
1775 * Need to copy setup packet into setup TRB, so we can't use the setup 1781 * Need to copy setup packet into setup TRB, so we can't use the setup