aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host
diff options
context:
space:
mode:
authorJohn Youn <johnyoun@synopsys.com>2009-07-27 15:05:15 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-07-28 17:31:13 -0400
commitd115b04818e57bdbc7ccde4d0660b15e33013dc8 (patch)
tree48b699c9b81fb0570d03009f353225faad238c64 /drivers/usb/host
parent28c2d2efb48dec2f0b050affae6d5787d6449e47 (diff)
USB: xhci: Support for 64-byte contexts
Adds support for controllers that use 64-byte contexts. The following context data structures are affected by this: Device, Input, Input Control, Endpoint, and Slot. To accommodate the use of either 32 or 64-byte contexts, a Device or Input context can only be accessed through functions which look-up and return pointers to their contained contexts. Signed-off-by: John Youn <johnyoun@synopsys.com> Acked-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host')
-rw-r--r--drivers/usb/host/xhci-dbg.c125
-rw-r--r--drivers/usb/host/xhci-hcd.c121
-rw-r--r--drivers/usb/host/xhci-mem.c121
-rw-r--r--drivers/usb/host/xhci-ring.c22
-rw-r--r--drivers/usb/host/xhci.h61
5 files changed, 287 insertions, 163 deletions
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index d77f8de11256..705e34324156 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -393,103 +393,138 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
393 upper_32_bits(val)); 393 upper_32_bits(val));
394} 394}
395 395
396dma_addr_t xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_slot_ctx *slot, dma_addr_t dma) 396/* Print the last 32 bytes for 64-byte contexts */
397static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
398{
399 int i;
400 for (i = 0; i < 4; ++i) {
401 xhci_dbg(xhci, "@%p (virt) @%08llx "
402 "(dma) %#08llx - rsvd64[%d]\n",
403 &ctx[4 + i], (unsigned long long)dma,
404 ctx[4 + i], i);
405 dma += 8;
406 }
407}
408
409void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
397{ 410{
398 /* Fields are 32 bits wide, DMA addresses are in bytes */ 411 /* Fields are 32 bits wide, DMA addresses are in bytes */
399 int field_size = 32 / 8; 412 int field_size = 32 / 8;
400 int i; 413 int i;
401 414
415 struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
416 dma_addr_t dma = ctx->dma + ((unsigned long)slot_ctx - (unsigned long)ctx);
417 int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
418
402 xhci_dbg(xhci, "Slot Context:\n"); 419 xhci_dbg(xhci, "Slot Context:\n");
403 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n", 420 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
404 &slot->dev_info, 421 &slot_ctx->dev_info,
405 (unsigned long long)dma, slot->dev_info); 422 (unsigned long long)dma, slot_ctx->dev_info);
406 dma += field_size; 423 dma += field_size;
407 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n", 424 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
408 &slot->dev_info2, 425 &slot_ctx->dev_info2,
409 (unsigned long long)dma, slot->dev_info2); 426 (unsigned long long)dma, slot_ctx->dev_info2);
410 dma += field_size; 427 dma += field_size;
411 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n", 428 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
412 &slot->tt_info, 429 &slot_ctx->tt_info,
413 (unsigned long long)dma, slot->tt_info); 430 (unsigned long long)dma, slot_ctx->tt_info);
414 dma += field_size; 431 dma += field_size;
415 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n", 432 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
416 &slot->dev_state, 433 &slot_ctx->dev_state,
417 (unsigned long long)dma, slot->dev_state); 434 (unsigned long long)dma, slot_ctx->dev_state);
418 dma += field_size; 435 dma += field_size;
419 for (i = 0; i < 4; ++i) { 436 for (i = 0; i < 4; ++i) {
420 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", 437 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
421 &slot->reserved[i], (unsigned long long)dma, 438 &slot_ctx->reserved[i], (unsigned long long)dma,
422 slot->reserved[i], i); 439 slot_ctx->reserved[i], i);
423 dma += field_size; 440 dma += field_size;
424 } 441 }
425 442
426 return dma; 443 if (csz)
444 dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
427} 445}
428 446
429dma_addr_t xhci_dbg_ep_ctx(struct xhci_hcd *xhci, struct xhci_ep_ctx *ep, dma_addr_t dma, unsigned int last_ep) 447void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
448 struct xhci_container_ctx *ctx,
449 unsigned int last_ep)
430{ 450{
431 int i, j; 451 int i, j;
432 int last_ep_ctx = 31; 452 int last_ep_ctx = 31;
433 /* Fields are 32 bits wide, DMA addresses are in bytes */ 453 /* Fields are 32 bits wide, DMA addresses are in bytes */
434 int field_size = 32 / 8; 454 int field_size = 32 / 8;
455 int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
435 456
436 if (last_ep < 31) 457 if (last_ep < 31)
437 last_ep_ctx = last_ep + 1; 458 last_ep_ctx = last_ep + 1;
438 for (i = 0; i < last_ep_ctx; ++i) { 459 for (i = 0; i < last_ep_ctx; ++i) {
460 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
461 dma_addr_t dma = ctx->dma +
462 ((unsigned long)ep_ctx - (unsigned long)ctx);
463
439 xhci_dbg(xhci, "Endpoint %02d Context:\n", i); 464 xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
440 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n", 465 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
441 &ep[i].ep_info, 466 &ep_ctx->ep_info,
442 (unsigned long long)dma, ep[i].ep_info); 467 (unsigned long long)dma, ep_ctx->ep_info);
443 dma += field_size; 468 dma += field_size;
444 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n", 469 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
445 &ep[i].ep_info2, 470 &ep_ctx->ep_info2,
446 (unsigned long long)dma, ep[i].ep_info2); 471 (unsigned long long)dma, ep_ctx->ep_info2);
447 dma += field_size; 472 dma += field_size;
448 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n", 473 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
449 &ep[i].deq, 474 &ep_ctx->deq,
450 (unsigned long long)dma, ep[i].deq); 475 (unsigned long long)dma, ep_ctx->deq);
451 dma += 2*field_size; 476 dma += 2*field_size;
452 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n", 477 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
453 &ep[i].tx_info, 478 &ep_ctx->tx_info,
454 (unsigned long long)dma, ep[i].tx_info); 479 (unsigned long long)dma, ep_ctx->tx_info);
455 dma += field_size; 480 dma += field_size;
456 for (j = 0; j < 3; ++j) { 481 for (j = 0; j < 3; ++j) {
457 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", 482 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
458 &ep[i].reserved[j], 483 &ep_ctx->reserved[j],
459 (unsigned long long)dma, 484 (unsigned long long)dma,
460 ep[i].reserved[j], j); 485 ep_ctx->reserved[j], j);
461 dma += field_size; 486 dma += field_size;
462 } 487 }
488
489 if (csz)
490 dbg_rsvd64(xhci, (u64 *)ep_ctx, dma);
463 } 491 }
464 return dma;
465} 492}
466 493
467void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep) 494void xhci_dbg_ctx(struct xhci_hcd *xhci,
495 struct xhci_container_ctx *ctx,
496 unsigned int last_ep)
468{ 497{
469 int i; 498 int i;
470 /* Fields are 32 bits wide, DMA addresses are in bytes */ 499 /* Fields are 32 bits wide, DMA addresses are in bytes */
471 int field_size = 32 / 8; 500 int field_size = 32 / 8;
472 501 struct xhci_slot_ctx *slot_ctx;
473 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n", 502 dma_addr_t dma = ctx->dma;
474 &ctx->drop_flags, (unsigned long long)dma, 503 int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
475 ctx->drop_flags); 504
476 dma += field_size; 505 if (ctx->type == XHCI_CTX_TYPE_INPUT) {
477 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n", 506 struct xhci_input_control_ctx *ctrl_ctx =
478 &ctx->add_flags, (unsigned long long)dma, 507 xhci_get_input_control_ctx(xhci, ctx);
479 ctx->add_flags); 508 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
480 dma += field_size; 509 &ctrl_ctx->drop_flags, (unsigned long long)dma,
481 for (i = 0; i < 6; ++i) { 510 ctrl_ctx->drop_flags);
482 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
483 &ctx->rsvd[i], (unsigned long long)dma,
484 ctx->rsvd[i], i);
485 dma += field_size; 511 dma += field_size;
512 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
513 &ctrl_ctx->add_flags, (unsigned long long)dma,
514 ctrl_ctx->add_flags);
515 dma += field_size;
516 for (i = 0; i < 6; ++i) {
517 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
518 &ctrl_ctx->rsvd2[i], (unsigned long long)dma,
519 ctrl_ctx->rsvd2[i], i);
520 dma += field_size;
521 }
522
523 if (csz)
524 dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
486 } 525 }
487 dma = xhci_dbg_slot_ctx(xhci, &ctx->slot, dma);
488 dma = xhci_dbg_ep_ctx(xhci, ctx->ep, dma, last_ep);
489}
490 526
491void xhci_dbg_device_ctx(struct xhci_hcd *xhci, struct xhci_device_ctx *ctx, dma_addr_t dma, unsigned int last_ep) 527 slot_ctx = xhci_get_slot_ctx(xhci, ctx);
492{ 528 xhci_dbg_slot_ctx(xhci, ctx);
493 dma = xhci_dbg_slot_ctx(xhci, &ctx->slot, dma); 529 xhci_dbg_ep_ctx(xhci, ctx, last_ep);
494 dma = xhci_dbg_ep_ctx(xhci, ctx->ep, dma, last_ep);
495} 530}
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 921dd173d793..057a07e876be 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -722,7 +722,9 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
722 struct usb_host_endpoint *ep) 722 struct usb_host_endpoint *ep)
723{ 723{
724 struct xhci_hcd *xhci; 724 struct xhci_hcd *xhci;
725 struct xhci_device_control *in_ctx; 725 struct xhci_container_ctx *in_ctx, *out_ctx;
726 struct xhci_input_control_ctx *ctrl_ctx;
727 struct xhci_slot_ctx *slot_ctx;
726 unsigned int last_ctx; 728 unsigned int last_ctx;
727 unsigned int ep_index; 729 unsigned int ep_index;
728 struct xhci_ep_ctx *ep_ctx; 730 struct xhci_ep_ctx *ep_ctx;
@@ -750,31 +752,34 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
750 } 752 }
751 753
752 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 754 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
755 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
756 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
753 ep_index = xhci_get_endpoint_index(&ep->desc); 757 ep_index = xhci_get_endpoint_index(&ep->desc);
754 ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; 758 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
755 /* If the HC already knows the endpoint is disabled, 759 /* If the HC already knows the endpoint is disabled,
756 * or the HCD has noted it is disabled, ignore this request 760 * or the HCD has noted it is disabled, ignore this request
757 */ 761 */
758 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || 762 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
759 in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { 763 ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
760 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", 764 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
761 __func__, ep); 765 __func__, ep);
762 return 0; 766 return 0;
763 } 767 }
764 768
765 in_ctx->drop_flags |= drop_flag; 769 ctrl_ctx->drop_flags |= drop_flag;
766 new_drop_flags = in_ctx->drop_flags; 770 new_drop_flags = ctrl_ctx->drop_flags;
767 771
768 in_ctx->add_flags = ~drop_flag; 772 ctrl_ctx->add_flags = ~drop_flag;
769 new_add_flags = in_ctx->add_flags; 773 new_add_flags = ctrl_ctx->add_flags;
770 774
771 last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags); 775 last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags);
776 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
772 /* Update the last valid endpoint context, if we deleted the last one */ 777 /* Update the last valid endpoint context, if we deleted the last one */
773 if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { 778 if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
774 in_ctx->slot.dev_info &= ~LAST_CTX_MASK; 779 slot_ctx->dev_info &= ~LAST_CTX_MASK;
775 in_ctx->slot.dev_info |= LAST_CTX(last_ctx); 780 slot_ctx->dev_info |= LAST_CTX(last_ctx);
776 } 781 }
777 new_slot_info = in_ctx->slot.dev_info; 782 new_slot_info = slot_ctx->dev_info;
778 783
779 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); 784 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
780 785
@@ -804,9 +809,11 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
804 struct usb_host_endpoint *ep) 809 struct usb_host_endpoint *ep)
805{ 810{
806 struct xhci_hcd *xhci; 811 struct xhci_hcd *xhci;
807 struct xhci_device_control *in_ctx; 812 struct xhci_container_ctx *in_ctx, *out_ctx;
808 unsigned int ep_index; 813 unsigned int ep_index;
809 struct xhci_ep_ctx *ep_ctx; 814 struct xhci_ep_ctx *ep_ctx;
815 struct xhci_slot_ctx *slot_ctx;
816 struct xhci_input_control_ctx *ctrl_ctx;
810 u32 added_ctxs; 817 u32 added_ctxs;
811 unsigned int last_ctx; 818 unsigned int last_ctx;
812 u32 new_add_flags, new_drop_flags, new_slot_info; 819 u32 new_add_flags, new_drop_flags, new_slot_info;
@@ -839,12 +846,14 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
839 } 846 }
840 847
841 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 848 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
849 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
850 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
842 ep_index = xhci_get_endpoint_index(&ep->desc); 851 ep_index = xhci_get_endpoint_index(&ep->desc);
843 ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; 852 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
844 /* If the HCD has already noted the endpoint is enabled, 853 /* If the HCD has already noted the endpoint is enabled,
845 * ignore this request. 854 * ignore this request.
846 */ 855 */
847 if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { 856 if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
848 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", 857 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
849 __func__, ep); 858 __func__, ep);
850 return 0; 859 return 0;
@@ -862,8 +871,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
862 return -ENOMEM; 871 return -ENOMEM;
863 } 872 }
864 873
865 in_ctx->add_flags |= added_ctxs; 874 ctrl_ctx->add_flags |= added_ctxs;
866 new_add_flags = in_ctx->add_flags; 875 new_add_flags = ctrl_ctx->add_flags;
867 876
868 /* If xhci_endpoint_disable() was called for this endpoint, but the 877 /* If xhci_endpoint_disable() was called for this endpoint, but the
869 * xHC hasn't been notified yet through the check_bandwidth() call, 878 * xHC hasn't been notified yet through the check_bandwidth() call,
@@ -871,14 +880,15 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
871 * descriptors. We must drop and re-add this endpoint, so we leave the 880 * descriptors. We must drop and re-add this endpoint, so we leave the
872 * drop flags alone. 881 * drop flags alone.
873 */ 882 */
874 new_drop_flags = in_ctx->drop_flags; 883 new_drop_flags = ctrl_ctx->drop_flags;
875 884
885 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
876 /* Update the last valid endpoint context, if we just added one past */ 886 /* Update the last valid endpoint context, if we just added one past */
877 if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { 887 if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
878 in_ctx->slot.dev_info &= ~LAST_CTX_MASK; 888 slot_ctx->dev_info &= ~LAST_CTX_MASK;
879 in_ctx->slot.dev_info |= LAST_CTX(last_ctx); 889 slot_ctx->dev_info |= LAST_CTX(last_ctx);
880 } 890 }
881 new_slot_info = in_ctx->slot.dev_info; 891 new_slot_info = slot_ctx->dev_info;
882 892
883 /* Store the usb_device pointer for later use */ 893 /* Store the usb_device pointer for later use */
884 ep->hcpriv = udev; 894 ep->hcpriv = udev;
@@ -892,9 +902,11 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
892 return 0; 902 return 0;
893} 903}
894 904
895static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev) 905static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
896{ 906{
907 struct xhci_input_control_ctx *ctrl_ctx;
897 struct xhci_ep_ctx *ep_ctx; 908 struct xhci_ep_ctx *ep_ctx;
909 struct xhci_slot_ctx *slot_ctx;
898 int i; 910 int i;
899 911
900 /* When a device's add flag and drop flag are zero, any subsequent 912 /* When a device's add flag and drop flag are zero, any subsequent
@@ -902,13 +914,15 @@ static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
902 * untouched. Make sure we don't leave any old state in the input 914 * untouched. Make sure we don't leave any old state in the input
903 * endpoint contexts. 915 * endpoint contexts.
904 */ 916 */
905 virt_dev->in_ctx->drop_flags = 0; 917 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
906 virt_dev->in_ctx->add_flags = 0; 918 ctrl_ctx->drop_flags = 0;
907 virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK; 919 ctrl_ctx->add_flags = 0;
920 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
921 slot_ctx->dev_info &= ~LAST_CTX_MASK;
908 /* Endpoint 0 is always valid */ 922 /* Endpoint 0 is always valid */
909 virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1); 923 slot_ctx->dev_info |= LAST_CTX(1);
910 for (i = 1; i < 31; ++i) { 924 for (i = 1; i < 31; ++i) {
911 ep_ctx = &virt_dev->in_ctx->ep[i]; 925 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
912 ep_ctx->ep_info = 0; 926 ep_ctx->ep_info = 0;
913 ep_ctx->ep_info2 = 0; 927 ep_ctx->ep_info2 = 0;
914 ep_ctx->deq = 0; 928 ep_ctx->deq = 0;
@@ -934,6 +948,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
934 unsigned long flags; 948 unsigned long flags;
935 struct xhci_hcd *xhci; 949 struct xhci_hcd *xhci;
936 struct xhci_virt_device *virt_dev; 950 struct xhci_virt_device *virt_dev;
951 struct xhci_input_control_ctx *ctrl_ctx;
952 struct xhci_slot_ctx *slot_ctx;
937 953
938 ret = xhci_check_args(hcd, udev, NULL, 0, __func__); 954 ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
939 if (ret <= 0) 955 if (ret <= 0)
@@ -949,16 +965,18 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
949 virt_dev = xhci->devs[udev->slot_id]; 965 virt_dev = xhci->devs[udev->slot_id];
950 966
951 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ 967 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
952 virt_dev->in_ctx->add_flags |= SLOT_FLAG; 968 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
953 virt_dev->in_ctx->add_flags &= ~EP0_FLAG; 969 ctrl_ctx->add_flags |= SLOT_FLAG;
954 virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG; 970 ctrl_ctx->add_flags &= ~EP0_FLAG;
955 virt_dev->in_ctx->drop_flags &= ~EP0_FLAG; 971 ctrl_ctx->drop_flags &= ~SLOT_FLAG;
972 ctrl_ctx->drop_flags &= ~EP0_FLAG;
956 xhci_dbg(xhci, "New Input Control Context:\n"); 973 xhci_dbg(xhci, "New Input Control Context:\n");
957 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 974 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
958 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); 975 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
976 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
959 977
960 spin_lock_irqsave(&xhci->lock, flags); 978 spin_lock_irqsave(&xhci->lock, flags);
961 ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma, 979 ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma,
962 udev->slot_id); 980 udev->slot_id);
963 if (ret < 0) { 981 if (ret < 0) {
964 spin_unlock_irqrestore(&xhci->lock, flags); 982 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1013,10 +1031,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1013 } 1031 }
1014 1032
1015 xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); 1033 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
1016 xhci_dbg_device_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 1034 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
1017 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); 1035 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
1018 1036
1019 xhci_zero_in_ctx(virt_dev); 1037 xhci_zero_in_ctx(xhci, virt_dev);
1020 /* Free any old rings */ 1038 /* Free any old rings */
1021 for (i = 1; i < 31; ++i) { 1039 for (i = 1; i < 31; ++i) {
1022 if (virt_dev->new_ep_rings[i]) { 1040 if (virt_dev->new_ep_rings[i]) {
@@ -1054,7 +1072,7 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1054 virt_dev->new_ep_rings[i] = NULL; 1072 virt_dev->new_ep_rings[i] = NULL;
1055 } 1073 }
1056 } 1074 }
1057 xhci_zero_in_ctx(virt_dev); 1075 xhci_zero_in_ctx(xhci, virt_dev);
1058} 1076}
1059 1077
1060/* Deal with stalled endpoints. The core should have sent the control message 1078/* Deal with stalled endpoints. The core should have sent the control message
@@ -1187,6 +1205,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1187 struct xhci_virt_device *virt_dev; 1205 struct xhci_virt_device *virt_dev;
1188 int ret = 0; 1206 int ret = 0;
1189 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1207 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1208 struct xhci_slot_ctx *slot_ctx;
1209 struct xhci_input_control_ctx *ctrl_ctx;
1190 u64 temp_64; 1210 u64 temp_64;
1191 1211
1192 if (!udev->slot_id) { 1212 if (!udev->slot_id) {
@@ -1201,11 +1221,11 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1201 xhci_setup_addressable_virt_dev(xhci, udev); 1221 xhci_setup_addressable_virt_dev(xhci, udev);
1202 /* Otherwise, assume the core has the device configured how it wants */ 1222 /* Otherwise, assume the core has the device configured how it wants */
1203 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 1223 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
1204 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2); 1224 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
1205 1225
1206 spin_lock_irqsave(&xhci->lock, flags); 1226 spin_lock_irqsave(&xhci->lock, flags);
1207 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma, 1227 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
1208 udev->slot_id); 1228 udev->slot_id);
1209 if (ret) { 1229 if (ret) {
1210 spin_unlock_irqrestore(&xhci->lock, flags); 1230 spin_unlock_irqrestore(&xhci->lock, flags);
1211 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 1231 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
@@ -1246,7 +1266,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1246 xhci_err(xhci, "ERROR: unexpected command completion " 1266 xhci_err(xhci, "ERROR: unexpected command completion "
1247 "code 0x%x.\n", virt_dev->cmd_status); 1267 "code 0x%x.\n", virt_dev->cmd_status);
1248 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 1268 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
1249 xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2); 1269 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
1250 ret = -EINVAL; 1270 ret = -EINVAL;
1251 break; 1271 break;
1252 } 1272 }
@@ -1261,19 +1281,21 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1261 (unsigned long long) 1281 (unsigned long long)
1262 xhci->dcbaa->dev_context_ptrs[udev->slot_id]); 1282 xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
1263 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", 1283 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
1264 (unsigned long long)virt_dev->out_ctx_dma); 1284 (unsigned long long)virt_dev->out_ctx->dma);
1265 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 1285 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
1266 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2); 1286 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
1267 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 1287 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
1268 xhci_dbg_device_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2); 1288 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
1269 /* 1289 /*
1270 * USB core uses address 1 for the roothubs, so we add one to the 1290 * USB core uses address 1 for the roothubs, so we add one to the
1271 * address given back to us by the HC. 1291 * address given back to us by the HC.
1272 */ 1292 */
1273 udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1; 1293 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
1294 udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1;
1274 /* Zero the input context control for later use */ 1295 /* Zero the input context control for later use */
1275 virt_dev->in_ctx->add_flags = 0; 1296 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1276 virt_dev->in_ctx->drop_flags = 0; 1297 ctrl_ctx->add_flags = 0;
1298 ctrl_ctx->drop_flags = 0;
1277 1299
1278 xhci_dbg(xhci, "Device address = %d\n", udev->devnum); 1300 xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
1279 /* XXX Meh, not sure if anyone else but choose_address uses this. */ 1301 /* XXX Meh, not sure if anyone else but choose_address uses this. */
@@ -1315,7 +1337,6 @@ static int __init xhci_hcd_init(void)
1315 /* xhci_device_control has eight fields, and also 1337 /* xhci_device_control has eight fields, and also
1316 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx 1338 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
1317 */ 1339 */
1318 BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8);
1319 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); 1340 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
1320 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); 1341 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
1321 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); 1342 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 8d6bdf2f8015..e6b9a1c6002d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -189,6 +189,63 @@ fail:
189 return 0; 189 return 0;
190} 190}
191 191
192#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
193
194struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
195 int type, gfp_t flags)
196{
197 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
198 if (!ctx)
199 return NULL;
200
201 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
202 ctx->type = type;
203 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
204 if (type == XHCI_CTX_TYPE_INPUT)
205 ctx->size += CTX_SIZE(xhci->hcc_params);
206
207 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
208 memset(ctx->bytes, 0, ctx->size);
209 return ctx;
210}
211
212void xhci_free_container_ctx(struct xhci_hcd *xhci,
213 struct xhci_container_ctx *ctx)
214{
215 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
216 kfree(ctx);
217}
218
219struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
220 struct xhci_container_ctx *ctx)
221{
222 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
223 return (struct xhci_input_control_ctx *)ctx->bytes;
224}
225
226struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
227 struct xhci_container_ctx *ctx)
228{
229 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
230 return (struct xhci_slot_ctx *)ctx->bytes;
231
232 return (struct xhci_slot_ctx *)
233 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
234}
235
236struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
237 struct xhci_container_ctx *ctx,
238 unsigned int ep_index)
239{
240 /* increment ep index by offset of start of ep ctx array */
241 ep_index++;
242 if (ctx->type == XHCI_CTX_TYPE_INPUT)
243 ep_index++;
244
245 return (struct xhci_ep_ctx *)
246 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
247}
248
192/* All the xhci_tds in the ring's TD list should be freed at this point */ 249/* All the xhci_tds in the ring's TD list should be freed at this point */
193void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) 250void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
194{ 251{
@@ -209,11 +266,10 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
209 xhci_ring_free(xhci, dev->ep_rings[i]); 266 xhci_ring_free(xhci, dev->ep_rings[i]);
210 267
211 if (dev->in_ctx) 268 if (dev->in_ctx)
212 dma_pool_free(xhci->device_pool, 269 xhci_free_container_ctx(xhci, dev->in_ctx);
213 dev->in_ctx, dev->in_ctx_dma);
214 if (dev->out_ctx) 270 if (dev->out_ctx)
215 dma_pool_free(xhci->device_pool, 271 xhci_free_container_ctx(xhci, dev->out_ctx);
216 dev->out_ctx, dev->out_ctx_dma); 272
217 kfree(xhci->devs[slot_id]); 273 kfree(xhci->devs[slot_id]);
218 xhci->devs[slot_id] = 0; 274 xhci->devs[slot_id] = 0;
219} 275}
@@ -221,7 +277,6 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
221int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, 277int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
222 struct usb_device *udev, gfp_t flags) 278 struct usb_device *udev, gfp_t flags)
223{ 279{
224 dma_addr_t dma;
225 struct xhci_virt_device *dev; 280 struct xhci_virt_device *dev;
226 281
227 /* Slot ID 0 is reserved */ 282 /* Slot ID 0 is reserved */
@@ -235,26 +290,21 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
235 return 0; 290 return 0;
236 dev = xhci->devs[slot_id]; 291 dev = xhci->devs[slot_id];
237 292
238 /* Allocate the (output) device context that will be used in the HC. 293 /* Allocate the (output) device context that will be used in the HC. */
239 * The structure is 32 bytes smaller than the input context, but that's 294 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
240 * fine.
241 */
242 dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
243 if (!dev->out_ctx) 295 if (!dev->out_ctx)
244 goto fail; 296 goto fail;
245 dev->out_ctx_dma = dma; 297
246 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, 298 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
247 (unsigned long long)dma); 299 (unsigned long long)dev->out_ctx->dma);
248 memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
249 300
250 /* Allocate the (input) device context for address device command */ 301 /* Allocate the (input) device context for address device command */
251 dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); 302 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
252 if (!dev->in_ctx) 303 if (!dev->in_ctx)
253 goto fail; 304 goto fail;
254 dev->in_ctx_dma = dma; 305
255 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, 306 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
256 (unsigned long long)dma); 307 (unsigned long long)dev->in_ctx->dma);
257 memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
258 308
259 /* Allocate endpoint 0 ring */ 309 /* Allocate endpoint 0 ring */
260 dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); 310 dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
@@ -264,7 +314,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
264 init_completion(&dev->cmd_completion); 314 init_completion(&dev->cmd_completion);
265 315
266 /* Point to output device context in dcbaa. */ 316 /* Point to output device context in dcbaa. */
267 xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx_dma; 317 xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
268 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", 318 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
269 slot_id, 319 slot_id,
270 &xhci->dcbaa->dev_context_ptrs[slot_id], 320 &xhci->dcbaa->dev_context_ptrs[slot_id],
@@ -282,6 +332,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
282 struct xhci_virt_device *dev; 332 struct xhci_virt_device *dev;
283 struct xhci_ep_ctx *ep0_ctx; 333 struct xhci_ep_ctx *ep0_ctx;
284 struct usb_device *top_dev; 334 struct usb_device *top_dev;
335 struct xhci_slot_ctx *slot_ctx;
336 struct xhci_input_control_ctx *ctrl_ctx;
285 337
286 dev = xhci->devs[udev->slot_id]; 338 dev = xhci->devs[udev->slot_id];
287 /* Slot ID 0 is reserved */ 339 /* Slot ID 0 is reserved */
@@ -290,27 +342,29 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
290 udev->slot_id); 342 udev->slot_id);
291 return -EINVAL; 343 return -EINVAL;
292 } 344 }
293 ep0_ctx = &dev->in_ctx->ep[0]; 345 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
346 ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
347 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
294 348
295 /* 2) New slot context and endpoint 0 context are valid*/ 349 /* 2) New slot context and endpoint 0 context are valid*/
296 dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG; 350 ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
297 351
298 /* 3) Only the control endpoint is valid - one endpoint context */ 352 /* 3) Only the control endpoint is valid - one endpoint context */
299 dev->in_ctx->slot.dev_info |= LAST_CTX(1); 353 slot_ctx->dev_info |= LAST_CTX(1);
300 354
301 switch (udev->speed) { 355 switch (udev->speed) {
302 case USB_SPEED_SUPER: 356 case USB_SPEED_SUPER:
303 dev->in_ctx->slot.dev_info |= (u32) udev->route; 357 slot_ctx->dev_info |= (u32) udev->route;
304 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS; 358 slot_ctx->dev_info |= (u32) SLOT_SPEED_SS;
305 break; 359 break;
306 case USB_SPEED_HIGH: 360 case USB_SPEED_HIGH:
307 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS; 361 slot_ctx->dev_info |= (u32) SLOT_SPEED_HS;
308 break; 362 break;
309 case USB_SPEED_FULL: 363 case USB_SPEED_FULL:
310 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS; 364 slot_ctx->dev_info |= (u32) SLOT_SPEED_FS;
311 break; 365 break;
312 case USB_SPEED_LOW: 366 case USB_SPEED_LOW:
313 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS; 367 slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
314 break; 368 break;
315 case USB_SPEED_VARIABLE: 369 case USB_SPEED_VARIABLE:
316 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 370 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
@@ -324,7 +378,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
324 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 378 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
325 top_dev = top_dev->parent) 379 top_dev = top_dev->parent)
326 /* Found device below root hub */; 380 /* Found device below root hub */;
327 dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); 381 slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
328 xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); 382 xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
329 383
330 /* Is this a LS/FS device under a HS hub? */ 384 /* Is this a LS/FS device under a HS hub? */
@@ -334,8 +388,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
334 */ 388 */
335 if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && 389 if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
336 udev->tt) { 390 udev->tt) {
337 dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id; 391 slot_ctx->tt_info = udev->tt->hub->slot_id;
338 dev->in_ctx->slot.tt_info |= udev->ttport << 8; 392 slot_ctx->tt_info |= udev->ttport << 8;
339 } 393 }
340 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); 394 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
341 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); 395 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
@@ -466,7 +520,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
466 unsigned int max_burst; 520 unsigned int max_burst;
467 521
468 ep_index = xhci_get_endpoint_index(&ep->desc); 522 ep_index = xhci_get_endpoint_index(&ep->desc);
469 ep_ctx = &virt_dev->in_ctx->ep[ep_index]; 523 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
470 524
471 /* Set up the endpoint ring */ 525 /* Set up the endpoint ring */
472 virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); 526 virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
@@ -533,7 +587,7 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
533 struct xhci_ep_ctx *ep_ctx; 587 struct xhci_ep_ctx *ep_ctx;
534 588
535 ep_index = xhci_get_endpoint_index(&ep->desc); 589 ep_index = xhci_get_endpoint_index(&ep->desc);
536 ep_ctx = &virt_dev->in_ctx->ep[ep_index]; 590 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
537 591
538 ep_ctx->ep_info = 0; 592 ep_ctx->ep_info = 0;
539 ep_ctx->ep_info2 = 0; 593 ep_ctx->ep_info2 = 0;
@@ -753,11 +807,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
753 */ 807 */
754 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, 808 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
755 SEGMENT_SIZE, 64, xhci->page_size); 809 SEGMENT_SIZE, 64, xhci->page_size);
810
756 /* See Table 46 and Note on Figure 55 */ 811 /* See Table 46 and Note on Figure 55 */
757 /* FIXME support 64-byte contexts */
758 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 812 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
759 sizeof(struct xhci_device_control), 813 2112, 64, xhci->page_size);
760 64, xhci->page_size);
761 if (!xhci->segment_pool || !xhci->device_pool) 814 if (!xhci->segment_pool || !xhci->device_pool)
762 goto fail; 815 goto fail;
763 816
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 0903e98989ec..ea31753c3137 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -362,6 +362,7 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci,
362 struct xhci_virt_device *dev = xhci->devs[slot_id]; 362 struct xhci_virt_device *dev = xhci->devs[slot_id];
363 struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; 363 struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
364 struct xhci_generic_trb *trb; 364 struct xhci_generic_trb *trb;
365 struct xhci_ep_ctx *ep_ctx;
365 366
366 state->new_cycle_state = 0; 367 state->new_cycle_state = 0;
367 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 368 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
@@ -370,7 +371,8 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci,
370 if (!state->new_deq_seg) 371 if (!state->new_deq_seg)
371 BUG(); 372 BUG();
372 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 373 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
373 state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq; 374 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
375 state->new_cycle_state = 0x1 & ep_ctx->deq;
374 376
375 state->new_deq_ptr = cur_td->last_trb; 377 state->new_deq_ptr = cur_td->last_trb;
376 state->new_deq_seg = find_trb_seg(state->new_deq_seg, 378 state->new_deq_seg = find_trb_seg(state->new_deq_seg,
@@ -570,11 +572,15 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
570 unsigned int ep_index; 572 unsigned int ep_index;
571 struct xhci_ring *ep_ring; 573 struct xhci_ring *ep_ring;
572 struct xhci_virt_device *dev; 574 struct xhci_virt_device *dev;
575 struct xhci_ep_ctx *ep_ctx;
576 struct xhci_slot_ctx *slot_ctx;
573 577
574 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 578 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
575 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 579 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
576 dev = xhci->devs[slot_id]; 580 dev = xhci->devs[slot_id];
577 ep_ring = dev->ep_rings[ep_index]; 581 ep_ring = dev->ep_rings[ep_index];
582 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
583 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
578 584
579 if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { 585 if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
580 unsigned int ep_state; 586 unsigned int ep_state;
@@ -588,9 +594,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
588 case COMP_CTX_STATE: 594 case COMP_CTX_STATE:
589 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " 595 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
590 "to incorrect slot or ep state.\n"); 596 "to incorrect slot or ep state.\n");
591 ep_state = dev->out_ctx->ep[ep_index].ep_info; 597 ep_state = ep_ctx->ep_info;
592 ep_state &= EP_STATE_MASK; 598 ep_state &= EP_STATE_MASK;
593 slot_state = dev->out_ctx->slot.dev_state; 599 slot_state = slot_ctx->dev_state;
594 slot_state = GET_SLOT_STATE(slot_state); 600 slot_state = GET_SLOT_STATE(slot_state);
595 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", 601 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
596 slot_state, ep_state); 602 slot_state, ep_state);
@@ -613,7 +619,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
613 */ 619 */
614 } else { 620 } else {
615 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", 621 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
616 dev->out_ctx->ep[ep_index].deq); 622 ep_ctx->deq);
617 } 623 }
618 624
619 ep_ring->state &= ~SET_DEQ_PENDING; 625 ep_ring->state &= ~SET_DEQ_PENDING;
@@ -795,6 +801,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
795 union xhci_trb *event_trb; 801 union xhci_trb *event_trb;
796 struct urb *urb = 0; 802 struct urb *urb = 0;
797 int status = -EINPROGRESS; 803 int status = -EINPROGRESS;
804 struct xhci_ep_ctx *ep_ctx;
798 805
799 xhci_dbg(xhci, "In %s\n", __func__); 806 xhci_dbg(xhci, "In %s\n", __func__);
800 xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; 807 xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
@@ -807,7 +814,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
807 ep_index = TRB_TO_EP_ID(event->flags) - 1; 814 ep_index = TRB_TO_EP_ID(event->flags) - 1;
808 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); 815 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
809 ep_ring = xdev->ep_rings[ep_index]; 816 ep_ring = xdev->ep_rings[ep_index];
810 if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { 817 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
818 if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
811 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); 819 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
812 return -ENODEV; 820 return -ENODEV;
813 } 821 }
@@ -1193,9 +1201,9 @@ static int prepare_transfer(struct xhci_hcd *xhci,
1193 gfp_t mem_flags) 1201 gfp_t mem_flags)
1194{ 1202{
1195 int ret; 1203 int ret;
1196 1204 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1197 ret = prepare_ring(xhci, xdev->ep_rings[ep_index], 1205 ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
1198 xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK, 1206 ep_ctx->ep_info & EP_STATE_MASK,
1199 num_trbs, mem_flags); 1207 num_trbs, mem_flags);
1200 if (ret) 1208 if (ret)
1201 return ret; 1209 return ret;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index d4d3c7777fb8..9c108c632704 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -447,6 +447,27 @@ struct xhci_doorbell_array {
447 447
448 448
449/** 449/**
450 * struct xhci_container_ctx
451 * @type: Type of context. Used to calculated offsets to contained contexts.
452 * @size: Size of the context data
453 * @bytes: The raw context data given to HW
454 * @dma: dma address of the bytes
455 *
456 * Represents either a Device or Input context. Holds a pointer to the raw
457 * memory used for the context (bytes) and dma address of it (dma).
458 */
459struct xhci_container_ctx {
460 unsigned type;
461#define XHCI_CTX_TYPE_DEVICE 0x1
462#define XHCI_CTX_TYPE_INPUT 0x2
463
464 int size;
465
466 u8 *bytes;
467 dma_addr_t dma;
468};
469
470/**
450 * struct xhci_slot_ctx 471 * struct xhci_slot_ctx
451 * @dev_info: Route string, device speed, hub info, and last valid endpoint 472 * @dev_info: Route string, device speed, hub info, and last valid endpoint
452 * @dev_info2: Max exit latency for device number, root hub port number 473 * @dev_info2: Max exit latency for device number, root hub port number
@@ -583,32 +604,16 @@ struct xhci_ep_ctx {
583 604
584 605
585/** 606/**
586 * struct xhci_device_control 607 * struct xhci_input_control_context
587 * Input context; see section 6.2.5. 608 * Input control context; see section 6.2.5.
588 * 609 *
589 * @drop_context: set the bit of the endpoint context you want to disable 610 * @drop_context: set the bit of the endpoint context you want to disable
590 * @add_context: set the bit of the endpoint context you want to enable 611 * @add_context: set the bit of the endpoint context you want to enable
591 */ 612 */
592struct xhci_device_control { 613struct xhci_input_control_ctx {
593 /* Input control context */
594 u32 drop_flags; 614 u32 drop_flags;
595 u32 add_flags; 615 u32 add_flags;
596 u32 rsvd[6]; 616 u32 rsvd2[6];
597 /* Copy of device context */
598 struct xhci_slot_ctx slot;
599 struct xhci_ep_ctx ep[31];
600};
601
602/**
603 * struct xhci_device_ctx
604 * Device context; see section 6.2.1.
605 *
606 * @slot: slot context for the device.
607 * @ep: array of endpoint contexts for the device.
608 */
609struct xhci_device_ctx {
610 struct xhci_slot_ctx slot;
611 struct xhci_ep_ctx ep[31];
612}; 617};
613 618
614/* drop context bitmasks */ 619/* drop context bitmasks */
@@ -616,7 +621,6 @@ struct xhci_device_ctx {
616/* add context bitmasks */ 621/* add context bitmasks */
617#define ADD_EP(x) (0x1 << x) 622#define ADD_EP(x) (0x1 << x)
618 623
619
620struct xhci_virt_device { 624struct xhci_virt_device {
621 /* 625 /*
622 * Commands to the hardware are passed an "input context" that 626 * Commands to the hardware are passed an "input context" that
@@ -626,11 +630,10 @@ struct xhci_virt_device {
626 * track of input and output contexts separately because 630 * track of input and output contexts separately because
627 * these commands might fail and we don't trust the hardware. 631 * these commands might fail and we don't trust the hardware.
628 */ 632 */
629 struct xhci_device_ctx *out_ctx; 633 struct xhci_container_ctx *out_ctx;
630 dma_addr_t out_ctx_dma;
631 /* Used for addressing devices and configuration changes */ 634 /* Used for addressing devices and configuration changes */
632 struct xhci_device_control *in_ctx; 635 struct xhci_container_ctx *in_ctx;
633 dma_addr_t in_ctx_dma; 636
634 /* FIXME when stream support is added */ 637 /* FIXME when stream support is added */
635 struct xhci_ring *ep_rings[31]; 638 struct xhci_ring *ep_rings[31];
636 /* Temporary storage in case the configure endpoint command fails and we 639 /* Temporary storage in case the configure endpoint command fails and we
@@ -1139,8 +1142,7 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
1139void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); 1142void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
1140void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); 1143void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
1141void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); 1144void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
1142void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep); 1145void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
1143void xhci_dbg_device_ctx(struct xhci_hcd *xhci, struct xhci_device_ctx *ctx, dma_addr_t dma, unsigned int last_ep);
1144 1146
1145/* xHCI memory managment */ 1147/* xHCI memory managment */
1146void xhci_mem_cleanup(struct xhci_hcd *xhci); 1148void xhci_mem_cleanup(struct xhci_hcd *xhci);
@@ -1207,4 +1209,9 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
1207 char *buf, u16 wLength); 1209 char *buf, u16 wLength);
1208int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); 1210int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
1209 1211
1212/* xHCI contexts */
1213struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
1214struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
1215struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
1216
1210#endif /* __LINUX_XHCI_HCD_H */ 1217#endif /* __LINUX_XHCI_HCD_H */