aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-hcd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-hcd.c')
-rw-r--r--drivers/usb/host/xhci-hcd.c370
1 files changed, 369 insertions, 1 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 5d94b4ffac92..50ab525f65be 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -530,6 +530,26 @@ unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
530 return index; 530 return index;
531} 531}
532 532
533/* Find the flag for this endpoint (for use in the control context). Use the
534 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
535 * bit 1, etc.
536 */
537unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
538{
539 return 1 << (xhci_get_endpoint_index(desc) + 1);
540}
541
542/* Compute the last valid endpoint context index. Basically, this is the
543 * endpoint index plus one. For slot contexts with more than valid endpoint,
544 * we find the most significant bit set in the added contexts flags.
545 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
546 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
547 */
548static inline unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
549{
550 return fls(added_ctxs) - 1;
551}
552
533/* Returns 1 if the arguments are OK; 553/* Returns 1 if the arguments are OK;
534 * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 554 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
535 */ 555 */
@@ -602,6 +622,349 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
602 return -ENOSYS; 622 return -ENOSYS;
603} 623}
604 624
625/* Drop an endpoint from a new bandwidth configuration for this device.
626 * Only one call to this function is allowed per endpoint before
627 * check_bandwidth() or reset_bandwidth() must be called.
628 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
629 * add the endpoint to the schedule with possibly new parameters denoted by a
630 * different endpoint descriptor in usb_host_endpoint.
631 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
632 * not allowed.
633 */
634int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
635 struct usb_host_endpoint *ep)
636{
637 unsigned long flags;
638 struct xhci_hcd *xhci;
639 struct xhci_device_control *in_ctx;
640 unsigned int last_ctx;
641 unsigned int ep_index;
642 struct xhci_ep_ctx *ep_ctx;
643 u32 drop_flag;
644 u32 new_add_flags, new_drop_flags, new_slot_info;
645 int ret;
646
647 ret = xhci_check_args(hcd, udev, ep, 1, __func__);
648 xhci_dbg(xhci, "%s called for udev %#x\n", __func__, (unsigned int) udev);
649 if (ret <= 0)
650 return ret;
651 xhci = hcd_to_xhci(hcd);
652
653 drop_flag = xhci_get_endpoint_flag(&ep->desc);
654 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
655 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
656 __func__, drop_flag);
657 return 0;
658 }
659
660 spin_lock_irqsave(&xhci->lock, flags);
661 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
662 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
663 __func__);
664 spin_unlock_irqrestore(&xhci->lock, flags);
665 return -EINVAL;
666 }
667
668 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
669 ep_index = xhci_get_endpoint_index(&ep->desc);
670 ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
671 /* If the HC already knows the endpoint is disabled,
672 * or the HCD has noted it is disabled, ignore this request
673 */
674 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
675 in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
676 xhci_warn(xhci, "xHCI %s called with disabled ep %#x\n",
677 __func__, (unsigned int) ep);
678 spin_unlock_irqrestore(&xhci->lock, flags);
679 return 0;
680 }
681
682 in_ctx->drop_flags |= drop_flag;
683 new_drop_flags = in_ctx->drop_flags;
684
685 in_ctx->add_flags = ~drop_flag;
686 new_add_flags = in_ctx->add_flags;
687
688 last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags);
689 /* Update the last valid endpoint context, if we deleted the last one */
690 if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
691 in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
692 in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
693 }
694 new_slot_info = in_ctx->slot.dev_info;
695
696 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
697
698 spin_unlock_irqrestore(&xhci->lock, flags);
699
700 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
701 (unsigned int) ep->desc.bEndpointAddress,
702 udev->slot_id,
703 (unsigned int) new_drop_flags,
704 (unsigned int) new_add_flags,
705 (unsigned int) new_slot_info);
706 return 0;
707}
708
709/* Add an endpoint to a new possible bandwidth configuration for this device.
710 * Only one call to this function is allowed per endpoint before
711 * check_bandwidth() or reset_bandwidth() must be called.
712 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
713 * add the endpoint to the schedule with possibly new parameters denoted by a
714 * different endpoint descriptor in usb_host_endpoint.
715 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
716 * not allowed.
717 */
718int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
719 struct usb_host_endpoint *ep)
720{
721 unsigned long flags;
722 struct xhci_hcd *xhci;
723 struct xhci_device_control *in_ctx;
724 unsigned int ep_index;
725 struct xhci_ep_ctx *ep_ctx;
726 u32 added_ctxs;
727 unsigned int last_ctx;
728 u32 new_add_flags, new_drop_flags, new_slot_info;
729 int ret = 0;
730
731 ret = xhci_check_args(hcd, udev, ep, 1, __func__);
732 if (ret <= 0)
733 return ret;
734 xhci = hcd_to_xhci(hcd);
735
736 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
737 last_ctx = xhci_last_valid_endpoint(added_ctxs);
738 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
739 /* FIXME when we have to issue an evaluate endpoint command to
740 * deal with ep0 max packet size changing once we get the
741 * descriptors
742 */
743 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
744 __func__, added_ctxs);
745 return 0;
746 }
747
748 spin_lock_irqsave(&xhci->lock, flags);
749 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
750 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
751 __func__);
752 spin_unlock_irqrestore(&xhci->lock, flags);
753 return -EINVAL;
754 }
755
756 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
757 ep_index = xhci_get_endpoint_index(&ep->desc);
758 ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
759 /* If the HCD has already noted the endpoint is enabled,
760 * ignore this request.
761 */
762 if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
763 xhci_warn(xhci, "xHCI %s called with enabled ep %#x\n",
764 __func__, (unsigned int) ep);
765 spin_unlock_irqrestore(&xhci->lock, flags);
766 return 0;
767 }
768
769 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], udev, ep) < 0) {
770 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
771 __func__, ep->desc.bEndpointAddress);
772 spin_unlock_irqrestore(&xhci->lock, flags);
773 return -ENOMEM;
774 }
775
776 in_ctx->add_flags |= added_ctxs;
777 new_add_flags = in_ctx->add_flags;
778
779 /* If xhci_endpoint_disable() was called for this endpoint, but the
780 * xHC hasn't been notified yet through the check_bandwidth() call,
781 * this re-adds a new state for the endpoint from the new endpoint
782 * descriptors. We must drop and re-add this endpoint, so we leave the
783 * drop flags alone.
784 */
785 new_drop_flags = in_ctx->drop_flags;
786
787 /* Update the last valid endpoint context, if we just added one past */
788 if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
789 in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
790 in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
791 }
792 new_slot_info = in_ctx->slot.dev_info;
793 spin_unlock_irqrestore(&xhci->lock, flags);
794
795 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
796 (unsigned int) ep->desc.bEndpointAddress,
797 udev->slot_id,
798 (unsigned int) new_drop_flags,
799 (unsigned int) new_add_flags,
800 (unsigned int) new_slot_info);
801 return 0;
802}
803
804static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
805{
806 struct xhci_ep_ctx *ep_ctx;
807 int i;
808
809 /* When a device's add flag and drop flag are zero, any subsequent
810 * configure endpoint command will leave that endpoint's state
811 * untouched. Make sure we don't leave any old state in the input
812 * endpoint contexts.
813 */
814 virt_dev->in_ctx->drop_flags = 0;
815 virt_dev->in_ctx->add_flags = 0;
816 virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
817 /* Endpoint 0 is always valid */
818 virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1);
819 for (i = 1; i < 31; ++i) {
820 ep_ctx = &virt_dev->in_ctx->ep[i];
821 ep_ctx->ep_info = 0;
822 ep_ctx->ep_info2 = 0;
823 ep_ctx->deq[0] = 0;
824 ep_ctx->deq[1] = 0;
825 ep_ctx->tx_info = 0;
826 }
827}
828
829int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
830{
831 int i;
832 int ret = 0;
833 int timeleft;
834 unsigned long flags;
835 struct xhci_hcd *xhci;
836 struct xhci_virt_device *virt_dev;
837
838 ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
839 if (ret <= 0)
840 return ret;
841 xhci = hcd_to_xhci(hcd);
842
843 spin_lock_irqsave(&xhci->lock, flags);
844 if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) {
845 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
846 __func__);
847 spin_unlock_irqrestore(&xhci->lock, flags);
848 return -EINVAL;
849 }
850 xhci_dbg(xhci, "%s called for udev %#x\n", __func__, (unsigned int) udev);
851 virt_dev = xhci->devs[udev->slot_id];
852
853 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
854 virt_dev->in_ctx->add_flags |= SLOT_FLAG;
855 virt_dev->in_ctx->add_flags &= ~EP0_FLAG;
856 virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG;
857 virt_dev->in_ctx->drop_flags &= ~EP0_FLAG;
858 xhci_dbg(xhci, "New Input Control Context:\n");
859 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma,
860 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
861
862 ret = queue_configure_endpoint(xhci, virt_dev->in_ctx_dma, udev->slot_id);
863 if (ret < 0) {
864 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
865 spin_unlock_irqrestore(&xhci->lock, flags);
866 return -ENOMEM;
867 }
868 ring_cmd_db(xhci);
869 spin_unlock_irqrestore(&xhci->lock, flags);
870
871 /* Wait for the configure endpoint command to complete */
872 timeleft = wait_for_completion_interruptible_timeout(
873 &virt_dev->cmd_completion,
874 USB_CTRL_SET_TIMEOUT);
875 if (timeleft <= 0) {
876 xhci_warn(xhci, "%s while waiting for configure endpoint command\n",
877 timeleft == 0 ? "Timeout" : "Signal");
878 /* FIXME cancel the configure endpoint command */
879 return -ETIME;
880 }
881
882 spin_lock_irqsave(&xhci->lock, flags);
883 switch (virt_dev->cmd_status) {
884 case COMP_ENOMEM:
885 dev_warn(&udev->dev, "Not enough host controller resources "
886 "for new device state.\n");
887 ret = -ENOMEM;
888 /* FIXME: can we allocate more resources for the HC? */
889 break;
890 case COMP_BW_ERR:
891 dev_warn(&udev->dev, "Not enough bandwidth "
892 "for new device state.\n");
893 ret = -ENOSPC;
894 /* FIXME: can we go back to the old state? */
895 break;
896 case COMP_TRB_ERR:
897 /* the HCD set up something wrong */
898 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, add flag = 1, "
899 "and endpoint is not disabled.\n");
900 ret = -EINVAL;
901 break;
902 case COMP_SUCCESS:
903 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
904 break;
905 default:
906 xhci_err(xhci, "ERROR: unexpected command completion "
907 "code 0x%x.\n", virt_dev->cmd_status);
908 ret = -EINVAL;
909 break;
910 }
911 if (ret) {
912 /* Callee should call reset_bandwidth() */
913 spin_unlock_irqrestore(&xhci->lock, flags);
914 return ret;
915 }
916
917 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
918 xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma,
919 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
920
921 xhci_zero_in_ctx(virt_dev);
922 /* Free any old rings */
923 for (i = 1; i < 31; ++i) {
924 if (virt_dev->new_ep_rings[i]) {
925 xhci_ring_free(xhci, virt_dev->ep_rings[i]);
926 virt_dev->ep_rings[i] = virt_dev->new_ep_rings[i];
927 virt_dev->new_ep_rings[i] = NULL;
928 }
929 }
930
931 spin_unlock_irqrestore(&xhci->lock, flags);
932
933 return ret;
934}
935
936void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
937{
938 unsigned long flags;
939 struct xhci_hcd *xhci;
940 struct xhci_virt_device *virt_dev;
941 int i, ret;
942
943 ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
944 if (ret <= 0)
945 return;
946 xhci = hcd_to_xhci(hcd);
947
948 spin_lock_irqsave(&xhci->lock, flags);
949 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
950 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
951 __func__);
952 spin_unlock_irqrestore(&xhci->lock, flags);
953 return;
954 }
955 xhci_dbg(xhci, "%s called for udev %#x\n", __func__, (unsigned int) udev);
956 virt_dev = xhci->devs[udev->slot_id];
957 /* Free any rings allocated for added endpoints */
958 for (i = 0; i < 31; ++i) {
959 if (virt_dev->new_ep_rings[i]) {
960 xhci_ring_free(xhci, virt_dev->new_ep_rings[i]);
961 virt_dev->new_ep_rings[i] = NULL;
962 }
963 }
964 xhci_zero_in_ctx(virt_dev);
965 spin_unlock_irqrestore(&xhci->lock, flags);
966}
967
605/* 968/*
606 * At this point, the struct usb_device is about to go away, the device has 969 * At this point, the struct usb_device is about to go away, the device has
607 * disconnected, and all traffic has been stopped and the endpoints have been 970 * disconnected, and all traffic has been stopped and the endpoints have been
@@ -783,7 +1146,12 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
783 * address given back to us by the HC. 1146 * address given back to us by the HC.
784 */ 1147 */
785 udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1; 1148 udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1;
786 /* FIXME: Zero the input context control for later use? */ 1149 /* Zero the input context control for later use */
1150 virt_dev->in_ctx->add_flags = 0;
1151 virt_dev->in_ctx->drop_flags = 0;
1152 /* Mirror flags in the output context for future ep enable/disable */
1153 virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
1154 virt_dev->out_ctx->drop_flags = 0;
787 spin_unlock_irqrestore(&xhci->lock, flags); 1155 spin_unlock_irqrestore(&xhci->lock, flags);
788 1156
789 xhci_dbg(xhci, "Device address = %d\n", udev->devnum); 1157 xhci_dbg(xhci, "Device address = %d\n", udev->devnum);