aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2009-04-27 22:58:38 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-06-16 00:44:49 -0400
commitf94e0186312b0fc39f41eed4e21836ed74b7efe1 (patch)
treed445d846f62c23cfbefc4958168d9cf4bacea3a4
parent79abb1ab13cee5ba488210798b6e7bbae0b391ac (diff)
USB: xhci: Bandwidth allocation support
Since the xHCI host controller hardware (xHC) has an internal schedule, it needs a better representation of what devices are consuming bandwidth on the bus. Each device is represented by a device context, with data about the device, endpoints, and pointers to each endpoint ring. We need to update the endpoint information for a device context before a new configuration or alternate interface setting is selected. We setup an input device context with modified endpoint information and newly allocated endpoint rings, and then submit a Configure Endpoint Command to the hardware. The host controller can reject the new configuration if it exceeds the bus bandwidth, or the host controller doesn't have enough internal resources for the configuration. If the command fails, we still have the older device context with the previous configuration. If the command succeeds, we free the old endpoint rings. The root hub isn't a real device, so always say yes to any bandwidth changes for it. The USB core will enable, disable, and then enable endpoint 0 several times during the initialization sequence. The device will always have an endpoint ring for endpoint 0 and bandwidth allocated for that, unless the device is disconnected or gets a SetAddress 0 request. So we don't pay attention for when xhci_check_bandwidth() is called for a re-add of endpoint 0. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/usb/host/xhci-hcd.c370
-rw-r--r--drivers/usb/host/xhci-mem.c174
-rw-r--r--drivers/usb/host/xhci-pci.c4
-rw-r--r--drivers/usb/host/xhci-ring.c11
-rw-r--r--drivers/usb/host/xhci.h18
5 files changed, 572 insertions, 5 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 5d94b4ffac92..50ab525f65be 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -530,6 +530,26 @@ unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
530 return index; 530 return index;
531} 531}
532 532
533/* Find the flag for this endpoint (for use in the control context). Use the
534 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
535 * bit 1, etc.
536 */
537unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
538{
539 return 1 << (xhci_get_endpoint_index(desc) + 1);
540}
541
542/* Compute the last valid endpoint context index. Basically, this is the
543 * endpoint index plus one. For slot contexts with more than valid endpoint,
544 * we find the most significant bit set in the added contexts flags.
545 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
546 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
547 */
548static inline unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
549{
550 return fls(added_ctxs) - 1;
551}
552
533/* Returns 1 if the arguments are OK; 553/* Returns 1 if the arguments are OK;
534 * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 554 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
535 */ 555 */
@@ -602,6 +622,349 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
602 return -ENOSYS; 622 return -ENOSYS;
603} 623}
604 624
625/* Drop an endpoint from a new bandwidth configuration for this device.
626 * Only one call to this function is allowed per endpoint before
627 * check_bandwidth() or reset_bandwidth() must be called.
628 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
629 * add the endpoint to the schedule with possibly new parameters denoted by a
630 * different endpoint descriptor in usb_host_endpoint.
631 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
632 * not allowed.
633 */
634int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
635 struct usb_host_endpoint *ep)
636{
637 unsigned long flags;
638 struct xhci_hcd *xhci;
639 struct xhci_device_control *in_ctx;
640 unsigned int last_ctx;
641 unsigned int ep_index;
642 struct xhci_ep_ctx *ep_ctx;
643 u32 drop_flag;
644 u32 new_add_flags, new_drop_flags, new_slot_info;
645 int ret;
646
647 ret = xhci_check_args(hcd, udev, ep, 1, __func__);
648 xhci_dbg(xhci, "%s called for udev %#x\n", __func__, (unsigned int) udev);
649 if (ret <= 0)
650 return ret;
651 xhci = hcd_to_xhci(hcd);
652
653 drop_flag = xhci_get_endpoint_flag(&ep->desc);
654 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
655 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
656 __func__, drop_flag);
657 return 0;
658 }
659
660 spin_lock_irqsave(&xhci->lock, flags);
661 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
662 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
663 __func__);
664 spin_unlock_irqrestore(&xhci->lock, flags);
665 return -EINVAL;
666 }
667
668 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
669 ep_index = xhci_get_endpoint_index(&ep->desc);
670 ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
671 /* If the HC already knows the endpoint is disabled,
672 * or the HCD has noted it is disabled, ignore this request
673 */
674 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
675 in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
676 xhci_warn(xhci, "xHCI %s called with disabled ep %#x\n",
677 __func__, (unsigned int) ep);
678 spin_unlock_irqrestore(&xhci->lock, flags);
679 return 0;
680 }
681
682 in_ctx->drop_flags |= drop_flag;
683 new_drop_flags = in_ctx->drop_flags;
684
685 in_ctx->add_flags = ~drop_flag;
686 new_add_flags = in_ctx->add_flags;
687
688 last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags);
689 /* Update the last valid endpoint context, if we deleted the last one */
690 if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
691 in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
692 in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
693 }
694 new_slot_info = in_ctx->slot.dev_info;
695
696 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
697
698 spin_unlock_irqrestore(&xhci->lock, flags);
699
700 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
701 (unsigned int) ep->desc.bEndpointAddress,
702 udev->slot_id,
703 (unsigned int) new_drop_flags,
704 (unsigned int) new_add_flags,
705 (unsigned int) new_slot_info);
706 return 0;
707}
708
709/* Add an endpoint to a new possible bandwidth configuration for this device.
710 * Only one call to this function is allowed per endpoint before
711 * check_bandwidth() or reset_bandwidth() must be called.
712 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
713 * add the endpoint to the schedule with possibly new parameters denoted by a
714 * different endpoint descriptor in usb_host_endpoint.
715 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
716 * not allowed.
717 */
718int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
719 struct usb_host_endpoint *ep)
720{
721 unsigned long flags;
722 struct xhci_hcd *xhci;
723 struct xhci_device_control *in_ctx;
724 unsigned int ep_index;
725 struct xhci_ep_ctx *ep_ctx;
726 u32 added_ctxs;
727 unsigned int last_ctx;
728 u32 new_add_flags, new_drop_flags, new_slot_info;
729 int ret = 0;
730
731 ret = xhci_check_args(hcd, udev, ep, 1, __func__);
732 if (ret <= 0)
733 return ret;
734 xhci = hcd_to_xhci(hcd);
735
736 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
737 last_ctx = xhci_last_valid_endpoint(added_ctxs);
738 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
739 /* FIXME when we have to issue an evaluate endpoint command to
740 * deal with ep0 max packet size changing once we get the
741 * descriptors
742 */
743 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
744 __func__, added_ctxs);
745 return 0;
746 }
747
748 spin_lock_irqsave(&xhci->lock, flags);
749 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
750 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
751 __func__);
752 spin_unlock_irqrestore(&xhci->lock, flags);
753 return -EINVAL;
754 }
755
756 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
757 ep_index = xhci_get_endpoint_index(&ep->desc);
758 ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
759 /* If the HCD has already noted the endpoint is enabled,
760 * ignore this request.
761 */
762 if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
763 xhci_warn(xhci, "xHCI %s called with enabled ep %#x\n",
764 __func__, (unsigned int) ep);
765 spin_unlock_irqrestore(&xhci->lock, flags);
766 return 0;
767 }
768
769 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], udev, ep) < 0) {
770 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
771 __func__, ep->desc.bEndpointAddress);
772 spin_unlock_irqrestore(&xhci->lock, flags);
773 return -ENOMEM;
774 }
775
776 in_ctx->add_flags |= added_ctxs;
777 new_add_flags = in_ctx->add_flags;
778
779 /* If xhci_endpoint_disable() was called for this endpoint, but the
780 * xHC hasn't been notified yet through the check_bandwidth() call,
781 * this re-adds a new state for the endpoint from the new endpoint
782 * descriptors. We must drop and re-add this endpoint, so we leave the
783 * drop flags alone.
784 */
785 new_drop_flags = in_ctx->drop_flags;
786
787 /* Update the last valid endpoint context, if we just added one past */
788 if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
789 in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
790 in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
791 }
792 new_slot_info = in_ctx->slot.dev_info;
793 spin_unlock_irqrestore(&xhci->lock, flags);
794
795 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
796 (unsigned int) ep->desc.bEndpointAddress,
797 udev->slot_id,
798 (unsigned int) new_drop_flags,
799 (unsigned int) new_add_flags,
800 (unsigned int) new_slot_info);
801 return 0;
802}
803
804static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
805{
806 struct xhci_ep_ctx *ep_ctx;
807 int i;
808
809 /* When a device's add flag and drop flag are zero, any subsequent
810 * configure endpoint command will leave that endpoint's state
811 * untouched. Make sure we don't leave any old state in the input
812 * endpoint contexts.
813 */
814 virt_dev->in_ctx->drop_flags = 0;
815 virt_dev->in_ctx->add_flags = 0;
816 virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
817 /* Endpoint 0 is always valid */
818 virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1);
819 for (i = 1; i < 31; ++i) {
820 ep_ctx = &virt_dev->in_ctx->ep[i];
821 ep_ctx->ep_info = 0;
822 ep_ctx->ep_info2 = 0;
823 ep_ctx->deq[0] = 0;
824 ep_ctx->deq[1] = 0;
825 ep_ctx->tx_info = 0;
826 }
827}
828
829int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
830{
831 int i;
832 int ret = 0;
833 int timeleft;
834 unsigned long flags;
835 struct xhci_hcd *xhci;
836 struct xhci_virt_device *virt_dev;
837
838 ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
839 if (ret <= 0)
840 return ret;
841 xhci = hcd_to_xhci(hcd);
842
843 spin_lock_irqsave(&xhci->lock, flags);
844 if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) {
845 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
846 __func__);
847 spin_unlock_irqrestore(&xhci->lock, flags);
848 return -EINVAL;
849 }
850 xhci_dbg(xhci, "%s called for udev %#x\n", __func__, (unsigned int) udev);
851 virt_dev = xhci->devs[udev->slot_id];
852
853 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
854 virt_dev->in_ctx->add_flags |= SLOT_FLAG;
855 virt_dev->in_ctx->add_flags &= ~EP0_FLAG;
856 virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG;
857 virt_dev->in_ctx->drop_flags &= ~EP0_FLAG;
858 xhci_dbg(xhci, "New Input Control Context:\n");
859 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma,
860 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
861
862 ret = queue_configure_endpoint(xhci, virt_dev->in_ctx_dma, udev->slot_id);
863 if (ret < 0) {
864 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
865 spin_unlock_irqrestore(&xhci->lock, flags);
866 return -ENOMEM;
867 }
868 ring_cmd_db(xhci);
869 spin_unlock_irqrestore(&xhci->lock, flags);
870
871 /* Wait for the configure endpoint command to complete */
872 timeleft = wait_for_completion_interruptible_timeout(
873 &virt_dev->cmd_completion,
874 USB_CTRL_SET_TIMEOUT);
875 if (timeleft <= 0) {
876 xhci_warn(xhci, "%s while waiting for configure endpoint command\n",
877 timeleft == 0 ? "Timeout" : "Signal");
878 /* FIXME cancel the configure endpoint command */
879 return -ETIME;
880 }
881
882 spin_lock_irqsave(&xhci->lock, flags);
883 switch (virt_dev->cmd_status) {
884 case COMP_ENOMEM:
885 dev_warn(&udev->dev, "Not enough host controller resources "
886 "for new device state.\n");
887 ret = -ENOMEM;
888 /* FIXME: can we allocate more resources for the HC? */
889 break;
890 case COMP_BW_ERR:
891 dev_warn(&udev->dev, "Not enough bandwidth "
892 "for new device state.\n");
893 ret = -ENOSPC;
894 /* FIXME: can we go back to the old state? */
895 break;
896 case COMP_TRB_ERR:
897 /* the HCD set up something wrong */
898 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, add flag = 1, "
899 "and endpoint is not disabled.\n");
900 ret = -EINVAL;
901 break;
902 case COMP_SUCCESS:
903 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
904 break;
905 default:
906 xhci_err(xhci, "ERROR: unexpected command completion "
907 "code 0x%x.\n", virt_dev->cmd_status);
908 ret = -EINVAL;
909 break;
910 }
911 if (ret) {
912 /* Callee should call reset_bandwidth() */
913 spin_unlock_irqrestore(&xhci->lock, flags);
914 return ret;
915 }
916
917 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
918 xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma,
919 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
920
921 xhci_zero_in_ctx(virt_dev);
922 /* Free any old rings */
923 for (i = 1; i < 31; ++i) {
924 if (virt_dev->new_ep_rings[i]) {
925 xhci_ring_free(xhci, virt_dev->ep_rings[i]);
926 virt_dev->ep_rings[i] = virt_dev->new_ep_rings[i];
927 virt_dev->new_ep_rings[i] = NULL;
928 }
929 }
930
931 spin_unlock_irqrestore(&xhci->lock, flags);
932
933 return ret;
934}
935
936void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
937{
938 unsigned long flags;
939 struct xhci_hcd *xhci;
940 struct xhci_virt_device *virt_dev;
941 int i, ret;
942
943 ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
944 if (ret <= 0)
945 return;
946 xhci = hcd_to_xhci(hcd);
947
948 spin_lock_irqsave(&xhci->lock, flags);
949 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
950 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
951 __func__);
952 spin_unlock_irqrestore(&xhci->lock, flags);
953 return;
954 }
955 xhci_dbg(xhci, "%s called for udev %#x\n", __func__, (unsigned int) udev);
956 virt_dev = xhci->devs[udev->slot_id];
957 /* Free any rings allocated for added endpoints */
958 for (i = 0; i < 31; ++i) {
959 if (virt_dev->new_ep_rings[i]) {
960 xhci_ring_free(xhci, virt_dev->new_ep_rings[i]);
961 virt_dev->new_ep_rings[i] = NULL;
962 }
963 }
964 xhci_zero_in_ctx(virt_dev);
965 spin_unlock_irqrestore(&xhci->lock, flags);
966}
967
605/* 968/*
606 * At this point, the struct usb_device is about to go away, the device has 969 * At this point, the struct usb_device is about to go away, the device has
607 * disconnected, and all traffic has been stopped and the endpoints have been 970 * disconnected, and all traffic has been stopped and the endpoints have been
@@ -783,7 +1146,12 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
783 * address given back to us by the HC. 1146 * address given back to us by the HC.
784 */ 1147 */
785 udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1; 1148 udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1;
786 /* FIXME: Zero the input context control for later use? */ 1149 /* Zero the input context control for later use */
1150 virt_dev->in_ctx->add_flags = 0;
1151 virt_dev->in_ctx->drop_flags = 0;
1152 /* Mirror flags in the output context for future ep enable/disable */
1153 virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
1154 virt_dev->out_ctx->drop_flags = 0;
787 spin_unlock_irqrestore(&xhci->lock, flags); 1155 spin_unlock_irqrestore(&xhci->lock, flags);
788 1156
789 xhci_dbg(xhci, "Device address = %d\n", udev->devnum); 1157 xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 6ff2e298bff8..8cd55f03ea26 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -103,7 +103,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
103} 103}
104 104
105/* XXX: Do we need the hcd structure in all these functions? */ 105/* XXX: Do we need the hcd structure in all these functions? */
106static void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) 106void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
107{ 107{
108 struct xhci_segment *seg; 108 struct xhci_segment *seg;
109 struct xhci_segment *first_seg; 109 struct xhci_segment *first_seg;
@@ -257,6 +257,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
257 if (!dev->ep_rings[0]) 257 if (!dev->ep_rings[0])
258 goto fail; 258 goto fail;
259 259
260 init_completion(&dev->cmd_completion);
261
260 /* 262 /*
261 * Point to output device context in dcbaa; skip the output control 263 * Point to output device context in dcbaa; skip the output control
262 * context, which is eight 32 bit fields (or 32 bytes long) 264 * context, which is eight 32 bit fields (or 32 bytes long)
@@ -366,6 +368,176 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
366 return 0; 368 return 0;
367} 369}
368 370
371/* Return the polling or NAK interval.
372 *
373 * The polling interval is expressed in "microframes". If xHCI's Interval field
374 * is set to N, it will service the endpoint every 2^(Interval)*125us.
375 *
376 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
377 * is set to 0.
378 */
379static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
380 struct usb_host_endpoint *ep)
381{
382 unsigned int interval = 0;
383
384 switch (udev->speed) {
385 case USB_SPEED_HIGH:
386 /* Max NAK rate */
387 if (usb_endpoint_xfer_control(&ep->desc) ||
388 usb_endpoint_xfer_bulk(&ep->desc))
389 interval = ep->desc.bInterval;
390 /* Fall through - SS and HS isoc/int have same decoding */
391 case USB_SPEED_SUPER:
392 if (usb_endpoint_xfer_int(&ep->desc) ||
393 usb_endpoint_xfer_isoc(&ep->desc)) {
394 if (ep->desc.bInterval == 0)
395 interval = 0;
396 else
397 interval = ep->desc.bInterval - 1;
398 if (interval > 15)
399 interval = 15;
400 if (interval != ep->desc.bInterval + 1)
401 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
402 ep->desc.bEndpointAddress, 1 << interval);
403 }
404 break;
405 /* Convert bInterval (in 1-255 frames) to microframes and round down to
406 * nearest power of 2.
407 */
408 case USB_SPEED_FULL:
409 case USB_SPEED_LOW:
410 if (usb_endpoint_xfer_int(&ep->desc) ||
411 usb_endpoint_xfer_isoc(&ep->desc)) {
412 interval = fls(8*ep->desc.bInterval) - 1;
413 if (interval > 10)
414 interval = 10;
415 if (interval < 3)
416 interval = 3;
417 if ((1 << interval) != 8*ep->desc.bInterval)
418 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
419 ep->desc.bEndpointAddress, 1 << interval);
420 }
421 break;
422 default:
423 BUG();
424 }
425 return EP_INTERVAL(interval);
426}
427
428static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
429 struct usb_host_endpoint *ep)
430{
431 int in;
432 u32 type;
433
434 in = usb_endpoint_dir_in(&ep->desc);
435 if (usb_endpoint_xfer_control(&ep->desc)) {
436 type = EP_TYPE(CTRL_EP);
437 } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
438 if (in)
439 type = EP_TYPE(BULK_IN_EP);
440 else
441 type = EP_TYPE(BULK_OUT_EP);
442 } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
443 if (in)
444 type = EP_TYPE(ISOC_IN_EP);
445 else
446 type = EP_TYPE(ISOC_OUT_EP);
447 } else if (usb_endpoint_xfer_int(&ep->desc)) {
448 if (in)
449 type = EP_TYPE(INT_IN_EP);
450 else
451 type = EP_TYPE(INT_OUT_EP);
452 } else {
453 BUG();
454 }
455 return type;
456}
457
458int xhci_endpoint_init(struct xhci_hcd *xhci,
459 struct xhci_virt_device *virt_dev,
460 struct usb_device *udev,
461 struct usb_host_endpoint *ep)
462{
463 unsigned int ep_index;
464 struct xhci_ep_ctx *ep_ctx;
465 struct xhci_ring *ep_ring;
466 unsigned int max_packet;
467 unsigned int max_burst;
468
469 ep_index = xhci_get_endpoint_index(&ep->desc);
470 ep_ctx = &virt_dev->in_ctx->ep[ep_index];
471
472 /* Set up the endpoint ring */
473 virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, GFP_KERNEL);
474 if (!virt_dev->new_ep_rings[ep_index])
475 return -ENOMEM;
476 ep_ring = virt_dev->new_ep_rings[ep_index];
477 ep_ctx->deq[1] = 0;
478 ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state;
479
480 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
481
482 /* FIXME dig Mult and streams info out of ep companion desc */
483
484 /* Allow 3 retries for everything but isoc */
485 if (!usb_endpoint_xfer_isoc(&ep->desc))
486 ep_ctx->ep_info2 = ERROR_COUNT(3);
487 else
488 ep_ctx->ep_info2 = ERROR_COUNT(0);
489
490 ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
491
492 /* Set the max packet size and max burst */
493 switch (udev->speed) {
494 case USB_SPEED_SUPER:
495 max_packet = ep->desc.wMaxPacketSize;
496 ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
497 /* FIXME dig out burst from ep companion desc */
498 break;
499 case USB_SPEED_HIGH:
500 /* bits 11:12 specify the number of additional transaction
501 * opportunities per microframe (USB 2.0, section 9.6.6)
502 */
503 if (usb_endpoint_xfer_isoc(&ep->desc) ||
504 usb_endpoint_xfer_int(&ep->desc)) {
505 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
506 ep_ctx->ep_info2 |= MAX_BURST(max_burst);
507 }
508 /* Fall through */
509 case USB_SPEED_FULL:
510 case USB_SPEED_LOW:
511 max_packet = ep->desc.wMaxPacketSize & 0x3ff;
512 ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
513 break;
514 default:
515 BUG();
516 }
517 /* FIXME Debug endpoint context */
518 return 0;
519}
520
521void xhci_endpoint_zero(struct xhci_hcd *xhci,
522 struct xhci_virt_device *virt_dev,
523 struct usb_host_endpoint *ep)
524{
525 unsigned int ep_index;
526 struct xhci_ep_ctx *ep_ctx;
527
528 ep_index = xhci_get_endpoint_index(&ep->desc);
529 ep_ctx = &virt_dev->in_ctx->ep[ep_index];
530
531 ep_ctx->ep_info = 0;
532 ep_ctx->ep_info2 = 0;
533 ep_ctx->deq[1] = 0;
534 ep_ctx->deq[0] = 0;
535 ep_ctx->tx_info = 0;
536 /* Don't free the endpoint ring until the set interface or configuration
537 * request succeeds.
538 */
539}
540
369void xhci_mem_cleanup(struct xhci_hcd *xhci) 541void xhci_mem_cleanup(struct xhci_hcd *xhci)
370{ 542{
371 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 543 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index ff9a4ef22338..1462709e26c0 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -115,6 +115,10 @@ static const struct hc_driver xhci_pci_hc_driver = {
115 .urb_dequeue = xhci_urb_dequeue, 115 .urb_dequeue = xhci_urb_dequeue,
116 .alloc_dev = xhci_alloc_dev, 116 .alloc_dev = xhci_alloc_dev,
117 .free_dev = xhci_free_dev, 117 .free_dev = xhci_free_dev,
118 .add_endpoint = xhci_add_endpoint,
119 .drop_endpoint = xhci_drop_endpoint,
120 .check_bandwidth = xhci_check_bandwidth,
121 .reset_bandwidth = xhci_reset_bandwidth,
118 .address_device = xhci_address_device, 122 .address_device = xhci_address_device,
119 123
120 /* 124 /*
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index f04162ae4374..b4ccf0d72c17 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -281,6 +281,10 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
281 if (xhci->devs[slot_id]) 281 if (xhci->devs[slot_id])
282 xhci_free_virt_device(xhci, slot_id); 282 xhci_free_virt_device(xhci, slot_id);
283 break; 283 break;
284 case TRB_TYPE(TRB_CONFIG_EP):
285 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
286 complete(&xhci->devs[slot_id]->cmd_completion);
287 break;
284 case TRB_TYPE(TRB_ADDR_DEV): 288 case TRB_TYPE(TRB_ADDR_DEV):
285 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); 289 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
286 complete(&xhci->addr_dev); 290 complete(&xhci->addr_dev);
@@ -809,3 +813,10 @@ int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_
809 return queue_command(xhci, in_ctx_ptr, 0, 0, 813 return queue_command(xhci, in_ctx_ptr, 0, 0,
810 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); 814 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
811} 815}
816
817/* Queue a configure endpoint command TRB */
818int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id)
819{
820 return queue_command(xhci, in_ctx_ptr, 0, 0,
821 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
822}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index fc8dcd2aa770..1a6fd997c343 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -486,8 +486,6 @@ struct xhci_slot_ctx {
486#define LAST_CTX_MASK (0x1f << 27) 486#define LAST_CTX_MASK (0x1f << 27)
487#define LAST_CTX(p) ((p) << 27) 487#define LAST_CTX(p) ((p) << 27)
488#define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1) 488#define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1)
489/* Plus one for the slot context flag */
490#define EPI_TO_FLAG(p) (1 << ((p) + 1))
491#define SLOT_FLAG (1 << 0) 489#define SLOT_FLAG (1 << 0)
492#define EP0_FLAG (1 << 1) 490#define EP0_FLAG (1 << 1)
493 491
@@ -566,7 +564,7 @@ struct xhci_ep_ctx {
566/* bits 10:14 are Max Primary Streams */ 564/* bits 10:14 are Max Primary Streams */
567/* bit 15 is Linear Stream Array */ 565/* bit 15 is Linear Stream Array */
568/* Interval - period between requests to an endpoint - 125u increments. */ 566/* Interval - period between requests to an endpoint - 125u increments. */
569#define EP_INTERVAL (0xff << 16) 567#define EP_INTERVAL(p) ((p & 0xff) << 16)
570 568
571/* ep_info2 bitmasks */ 569/* ep_info2 bitmasks */
572/* 570/*
@@ -626,6 +624,11 @@ struct xhci_virt_device {
626 dma_addr_t in_ctx_dma; 624 dma_addr_t in_ctx_dma;
627 /* FIXME when stream support is added */ 625 /* FIXME when stream support is added */
628 struct xhci_ring *ep_rings[31]; 626 struct xhci_ring *ep_rings[31];
627 /* Temporary storage in case the configure endpoint command fails and we
628 * have to restore the device state to the previous state
629 */
630 struct xhci_ring *new_ep_rings[31];
631 struct completion cmd_completion;
629 /* Status of the last command issued for this device */ 632 /* Status of the last command issued for this device */
630 u32 cmd_status; 633 u32 cmd_status;
631}; 634};
@@ -1075,6 +1078,10 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
1075int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags); 1078int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
1076int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); 1079int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
1077unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc); 1080unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
1081unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
1082void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
1083int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *udev, struct usb_host_endpoint *ep);
1084void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
1078 1085
1079#ifdef CONFIG_PCI 1086#ifdef CONFIG_PCI
1080/* xHCI PCI glue */ 1087/* xHCI PCI glue */
@@ -1096,6 +1103,10 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
1096int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev); 1103int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
1097int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); 1104int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
1098int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); 1105int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
1106int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1107int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1108int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1109void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1099 1110
1100/* xHCI ring, segment, TRB, and TD functions */ 1111/* xHCI ring, segment, TRB, and TD functions */
1101dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); 1112dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
@@ -1106,6 +1117,7 @@ void set_hc_event_deq(struct xhci_hcd *xhci);
1106int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id); 1117int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
1107int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); 1118int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id);
1108int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index); 1119int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index);
1120int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id);
1109 1121
1110/* xHCI roothub code */ 1122/* xHCI roothub code */
1111int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, 1123int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,