diff options
Diffstat (limited to 'drivers/usb/host/xhci-hcd.c')
-rw-r--r-- | drivers/usb/host/xhci-hcd.c | 530 |
1 files changed, 439 insertions, 91 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index 816c39caca1c..99911e727e0b 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c | |||
@@ -22,12 +22,18 @@ | |||
22 | 22 | ||
23 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/moduleparam.h> | ||
25 | 26 | ||
26 | #include "xhci.h" | 27 | #include "xhci.h" |
27 | 28 | ||
28 | #define DRIVER_AUTHOR "Sarah Sharp" | 29 | #define DRIVER_AUTHOR "Sarah Sharp" |
29 | #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" | 30 | #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" |
30 | 31 | ||
32 | /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ | ||
33 | static int link_quirk; | ||
34 | module_param(link_quirk, int, S_IRUGO | S_IWUSR); | ||
35 | MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); | ||
36 | |||
31 | /* TODO: copied from ehci-hcd.c - can this be refactored? */ | 37 | /* TODO: copied from ehci-hcd.c - can this be refactored? */ |
32 | /* | 38 | /* |
33 | * handshake - spin reading hc until handshake completes or fails | 39 | * handshake - spin reading hc until handshake completes or fails |
@@ -214,6 +220,12 @@ int xhci_init(struct usb_hcd *hcd) | |||
214 | 220 | ||
215 | xhci_dbg(xhci, "xhci_init\n"); | 221 | xhci_dbg(xhci, "xhci_init\n"); |
216 | spin_lock_init(&xhci->lock); | 222 | spin_lock_init(&xhci->lock); |
223 | if (link_quirk) { | ||
224 | xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); | ||
225 | xhci->quirks |= XHCI_LINK_TRB_QUIRK; | ||
226 | } else { | ||
227 | xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); | ||
228 | } | ||
217 | retval = xhci_mem_init(xhci, GFP_KERNEL); | 229 | retval = xhci_mem_init(xhci, GFP_KERNEL); |
218 | xhci_dbg(xhci, "Finished xhci_init\n"); | 230 | xhci_dbg(xhci, "Finished xhci_init\n"); |
219 | 231 | ||
@@ -339,13 +351,14 @@ void xhci_event_ring_work(unsigned long arg) | |||
339 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | 351 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); |
340 | xhci_dbg_cmd_ptrs(xhci); | 352 | xhci_dbg_cmd_ptrs(xhci); |
341 | for (i = 0; i < MAX_HC_SLOTS; ++i) { | 353 | for (i = 0; i < MAX_HC_SLOTS; ++i) { |
342 | if (xhci->devs[i]) { | 354 | if (!xhci->devs[i]) |
343 | for (j = 0; j < 31; ++j) { | 355 | continue; |
344 | if (xhci->devs[i]->ep_rings[j]) { | 356 | for (j = 0; j < 31; ++j) { |
345 | xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j); | 357 | struct xhci_ring *ring = xhci->devs[i]->eps[j].ring; |
346 | xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg); | 358 | if (!ring) |
347 | } | 359 | continue; |
348 | } | 360 | xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j); |
361 | xhci_debug_segment(xhci, ring->deq_seg); | ||
349 | } | 362 | } |
350 | } | 363 | } |
351 | 364 | ||
@@ -555,13 +568,22 @@ unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) | |||
555 | return 1 << (xhci_get_endpoint_index(desc) + 1); | 568 | return 1 << (xhci_get_endpoint_index(desc) + 1); |
556 | } | 569 | } |
557 | 570 | ||
571 | /* Find the flag for this endpoint (for use in the control context). Use the | ||
572 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is | ||
573 | * bit 1, etc. | ||
574 | */ | ||
575 | unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) | ||
576 | { | ||
577 | return 1 << (ep_index + 1); | ||
578 | } | ||
579 | |||
558 | /* Compute the last valid endpoint context index. Basically, this is the | 580 | /* Compute the last valid endpoint context index. Basically, this is the |
559 | * endpoint index plus one. For slot contexts with more than valid endpoint, | 581 | * endpoint index plus one. For slot contexts with more than valid endpoint, |
560 | * we find the most significant bit set in the added contexts flags. | 582 | * we find the most significant bit set in the added contexts flags. |
561 | * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 | 583 | * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 |
562 | * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. | 584 | * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. |
563 | */ | 585 | */ |
564 | static inline unsigned int xhci_last_valid_endpoint(u32 added_ctxs) | 586 | unsigned int xhci_last_valid_endpoint(u32 added_ctxs) |
565 | { | 587 | { |
566 | return fls(added_ctxs) - 1; | 588 | return fls(added_ctxs) - 1; |
567 | } | 589 | } |
@@ -589,6 +611,71 @@ int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, | |||
589 | return 1; | 611 | return 1; |
590 | } | 612 | } |
591 | 613 | ||
614 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, | ||
615 | struct usb_device *udev, struct xhci_command *command, | ||
616 | bool ctx_change, bool must_succeed); | ||
617 | |||
618 | /* | ||
619 | * Full speed devices may have a max packet size greater than 8 bytes, but the | ||
620 | * USB core doesn't know that until it reads the first 8 bytes of the | ||
621 | * descriptor. If the usb_device's max packet size changes after that point, | ||
622 | * we need to issue an evaluate context command and wait on it. | ||
623 | */ | ||
624 | static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | ||
625 | unsigned int ep_index, struct urb *urb) | ||
626 | { | ||
627 | struct xhci_container_ctx *in_ctx; | ||
628 | struct xhci_container_ctx *out_ctx; | ||
629 | struct xhci_input_control_ctx *ctrl_ctx; | ||
630 | struct xhci_ep_ctx *ep_ctx; | ||
631 | int max_packet_size; | ||
632 | int hw_max_packet_size; | ||
633 | int ret = 0; | ||
634 | |||
635 | out_ctx = xhci->devs[slot_id]->out_ctx; | ||
636 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); | ||
637 | hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2); | ||
638 | max_packet_size = urb->dev->ep0.desc.wMaxPacketSize; | ||
639 | if (hw_max_packet_size != max_packet_size) { | ||
640 | xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); | ||
641 | xhci_dbg(xhci, "Max packet size in usb_device = %d\n", | ||
642 | max_packet_size); | ||
643 | xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n", | ||
644 | hw_max_packet_size); | ||
645 | xhci_dbg(xhci, "Issuing evaluate context command.\n"); | ||
646 | |||
647 | /* Set up the modified control endpoint 0 */ | ||
648 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, | ||
649 | xhci->devs[slot_id]->out_ctx, ep_index); | ||
650 | in_ctx = xhci->devs[slot_id]->in_ctx; | ||
651 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); | ||
652 | ep_ctx->ep_info2 &= ~MAX_PACKET_MASK; | ||
653 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size); | ||
654 | |||
655 | /* Set up the input context flags for the command */ | ||
656 | /* FIXME: This won't work if a non-default control endpoint | ||
657 | * changes max packet sizes. | ||
658 | */ | ||
659 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
660 | ctrl_ctx->add_flags = EP0_FLAG; | ||
661 | ctrl_ctx->drop_flags = 0; | ||
662 | |||
663 | xhci_dbg(xhci, "Slot %d input context\n", slot_id); | ||
664 | xhci_dbg_ctx(xhci, in_ctx, ep_index); | ||
665 | xhci_dbg(xhci, "Slot %d output context\n", slot_id); | ||
666 | xhci_dbg_ctx(xhci, out_ctx, ep_index); | ||
667 | |||
668 | ret = xhci_configure_endpoint(xhci, urb->dev, NULL, | ||
669 | true, false); | ||
670 | |||
671 | /* Clean up the input context for later use by bandwidth | ||
672 | * functions. | ||
673 | */ | ||
674 | ctrl_ctx->add_flags = SLOT_FLAG; | ||
675 | } | ||
676 | return ret; | ||
677 | } | ||
678 | |||
592 | /* | 679 | /* |
593 | * non-error returns are a promise to giveback() the urb later | 680 | * non-error returns are a promise to giveback() the urb later |
594 | * we drop ownership so next owner (or urb unlink) can get it | 681 | * we drop ownership so next owner (or urb unlink) can get it |
@@ -600,13 +687,13 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
600 | int ret = 0; | 687 | int ret = 0; |
601 | unsigned int slot_id, ep_index; | 688 | unsigned int slot_id, ep_index; |
602 | 689 | ||
690 | |||
603 | if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) | 691 | if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) |
604 | return -EINVAL; | 692 | return -EINVAL; |
605 | 693 | ||
606 | slot_id = urb->dev->slot_id; | 694 | slot_id = urb->dev->slot_id; |
607 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | 695 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
608 | 696 | ||
609 | spin_lock_irqsave(&xhci->lock, flags); | ||
610 | if (!xhci->devs || !xhci->devs[slot_id]) { | 697 | if (!xhci->devs || !xhci->devs[slot_id]) { |
611 | if (!in_interrupt()) | 698 | if (!in_interrupt()) |
612 | dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n"); | 699 | dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n"); |
@@ -619,19 +706,38 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
619 | ret = -ESHUTDOWN; | 706 | ret = -ESHUTDOWN; |
620 | goto exit; | 707 | goto exit; |
621 | } | 708 | } |
622 | if (usb_endpoint_xfer_control(&urb->ep->desc)) | 709 | if (usb_endpoint_xfer_control(&urb->ep->desc)) { |
710 | /* Check to see if the max packet size for the default control | ||
711 | * endpoint changed during FS device enumeration | ||
712 | */ | ||
713 | if (urb->dev->speed == USB_SPEED_FULL) { | ||
714 | ret = xhci_check_maxpacket(xhci, slot_id, | ||
715 | ep_index, urb); | ||
716 | if (ret < 0) | ||
717 | return ret; | ||
718 | } | ||
719 | |||
623 | /* We have a spinlock and interrupts disabled, so we must pass | 720 | /* We have a spinlock and interrupts disabled, so we must pass |
624 | * atomic context to this function, which may allocate memory. | 721 | * atomic context to this function, which may allocate memory. |
625 | */ | 722 | */ |
723 | spin_lock_irqsave(&xhci->lock, flags); | ||
626 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, | 724 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, |
627 | slot_id, ep_index); | 725 | slot_id, ep_index); |
628 | else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) | 726 | spin_unlock_irqrestore(&xhci->lock, flags); |
727 | } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { | ||
728 | spin_lock_irqsave(&xhci->lock, flags); | ||
629 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, | 729 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, |
630 | slot_id, ep_index); | 730 | slot_id, ep_index); |
631 | else | 731 | spin_unlock_irqrestore(&xhci->lock, flags); |
732 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { | ||
733 | spin_lock_irqsave(&xhci->lock, flags); | ||
734 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, | ||
735 | slot_id, ep_index); | ||
736 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
737 | } else { | ||
632 | ret = -EINVAL; | 738 | ret = -EINVAL; |
739 | } | ||
633 | exit: | 740 | exit: |
634 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
635 | return ret; | 741 | return ret; |
636 | } | 742 | } |
637 | 743 | ||
@@ -674,6 +780,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
674 | struct xhci_td *td; | 780 | struct xhci_td *td; |
675 | unsigned int ep_index; | 781 | unsigned int ep_index; |
676 | struct xhci_ring *ep_ring; | 782 | struct xhci_ring *ep_ring; |
783 | struct xhci_virt_ep *ep; | ||
677 | 784 | ||
678 | xhci = hcd_to_xhci(hcd); | 785 | xhci = hcd_to_xhci(hcd); |
679 | spin_lock_irqsave(&xhci->lock, flags); | 786 | spin_lock_irqsave(&xhci->lock, flags); |
@@ -686,17 +793,18 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
686 | xhci_dbg(xhci, "Event ring:\n"); | 793 | xhci_dbg(xhci, "Event ring:\n"); |
687 | xhci_debug_ring(xhci, xhci->event_ring); | 794 | xhci_debug_ring(xhci, xhci->event_ring); |
688 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | 795 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
689 | ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index]; | 796 | ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; |
797 | ep_ring = ep->ring; | ||
690 | xhci_dbg(xhci, "Endpoint ring:\n"); | 798 | xhci_dbg(xhci, "Endpoint ring:\n"); |
691 | xhci_debug_ring(xhci, ep_ring); | 799 | xhci_debug_ring(xhci, ep_ring); |
692 | td = (struct xhci_td *) urb->hcpriv; | 800 | td = (struct xhci_td *) urb->hcpriv; |
693 | 801 | ||
694 | ep_ring->cancels_pending++; | 802 | ep->cancels_pending++; |
695 | list_add_tail(&td->cancelled_td_list, &ep_ring->cancelled_td_list); | 803 | list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); |
696 | /* Queue a stop endpoint command, but only if this is | 804 | /* Queue a stop endpoint command, but only if this is |
697 | * the first cancellation to be handled. | 805 | * the first cancellation to be handled. |
698 | */ | 806 | */ |
699 | if (ep_ring->cancels_pending == 1) { | 807 | if (ep->cancels_pending == 1) { |
700 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); | 808 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); |
701 | xhci_ring_cmd_db(xhci); | 809 | xhci_ring_cmd_db(xhci); |
702 | } | 810 | } |
@@ -930,6 +1038,141 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir | |||
930 | } | 1038 | } |
931 | } | 1039 | } |
932 | 1040 | ||
1041 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, | ||
1042 | struct usb_device *udev, int *cmd_status) | ||
1043 | { | ||
1044 | int ret; | ||
1045 | |||
1046 | switch (*cmd_status) { | ||
1047 | case COMP_ENOMEM: | ||
1048 | dev_warn(&udev->dev, "Not enough host controller resources " | ||
1049 | "for new device state.\n"); | ||
1050 | ret = -ENOMEM; | ||
1051 | /* FIXME: can we allocate more resources for the HC? */ | ||
1052 | break; | ||
1053 | case COMP_BW_ERR: | ||
1054 | dev_warn(&udev->dev, "Not enough bandwidth " | ||
1055 | "for new device state.\n"); | ||
1056 | ret = -ENOSPC; | ||
1057 | /* FIXME: can we go back to the old state? */ | ||
1058 | break; | ||
1059 | case COMP_TRB_ERR: | ||
1060 | /* the HCD set up something wrong */ | ||
1061 | dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " | ||
1062 | "add flag = 1, " | ||
1063 | "and endpoint is not disabled.\n"); | ||
1064 | ret = -EINVAL; | ||
1065 | break; | ||
1066 | case COMP_SUCCESS: | ||
1067 | dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); | ||
1068 | ret = 0; | ||
1069 | break; | ||
1070 | default: | ||
1071 | xhci_err(xhci, "ERROR: unexpected command completion " | ||
1072 | "code 0x%x.\n", *cmd_status); | ||
1073 | ret = -EINVAL; | ||
1074 | break; | ||
1075 | } | ||
1076 | return ret; | ||
1077 | } | ||
1078 | |||
1079 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, | ||
1080 | struct usb_device *udev, int *cmd_status) | ||
1081 | { | ||
1082 | int ret; | ||
1083 | struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; | ||
1084 | |||
1085 | switch (*cmd_status) { | ||
1086 | case COMP_EINVAL: | ||
1087 | dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " | ||
1088 | "context command.\n"); | ||
1089 | ret = -EINVAL; | ||
1090 | break; | ||
1091 | case COMP_EBADSLT: | ||
1092 | dev_warn(&udev->dev, "WARN: slot not enabled for" | ||
1093 | "evaluate context command.\n"); | ||
1094 | case COMP_CTX_STATE: | ||
1095 | dev_warn(&udev->dev, "WARN: invalid context state for " | ||
1096 | "evaluate context command.\n"); | ||
1097 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); | ||
1098 | ret = -EINVAL; | ||
1099 | break; | ||
1100 | case COMP_SUCCESS: | ||
1101 | dev_dbg(&udev->dev, "Successful evaluate context command\n"); | ||
1102 | ret = 0; | ||
1103 | break; | ||
1104 | default: | ||
1105 | xhci_err(xhci, "ERROR: unexpected command completion " | ||
1106 | "code 0x%x.\n", *cmd_status); | ||
1107 | ret = -EINVAL; | ||
1108 | break; | ||
1109 | } | ||
1110 | return ret; | ||
1111 | } | ||
1112 | |||
1113 | /* Issue a configure endpoint command or evaluate context command | ||
1114 | * and wait for it to finish. | ||
1115 | */ | ||
1116 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, | ||
1117 | struct usb_device *udev, | ||
1118 | struct xhci_command *command, | ||
1119 | bool ctx_change, bool must_succeed) | ||
1120 | { | ||
1121 | int ret; | ||
1122 | int timeleft; | ||
1123 | unsigned long flags; | ||
1124 | struct xhci_container_ctx *in_ctx; | ||
1125 | struct completion *cmd_completion; | ||
1126 | int *cmd_status; | ||
1127 | struct xhci_virt_device *virt_dev; | ||
1128 | |||
1129 | spin_lock_irqsave(&xhci->lock, flags); | ||
1130 | virt_dev = xhci->devs[udev->slot_id]; | ||
1131 | if (command) { | ||
1132 | in_ctx = command->in_ctx; | ||
1133 | cmd_completion = command->completion; | ||
1134 | cmd_status = &command->status; | ||
1135 | command->command_trb = xhci->cmd_ring->enqueue; | ||
1136 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); | ||
1137 | } else { | ||
1138 | in_ctx = virt_dev->in_ctx; | ||
1139 | cmd_completion = &virt_dev->cmd_completion; | ||
1140 | cmd_status = &virt_dev->cmd_status; | ||
1141 | } | ||
1142 | |||
1143 | if (!ctx_change) | ||
1144 | ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, | ||
1145 | udev->slot_id, must_succeed); | ||
1146 | else | ||
1147 | ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, | ||
1148 | udev->slot_id); | ||
1149 | if (ret < 0) { | ||
1150 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1151 | xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); | ||
1152 | return -ENOMEM; | ||
1153 | } | ||
1154 | xhci_ring_cmd_db(xhci); | ||
1155 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1156 | |||
1157 | /* Wait for the configure endpoint command to complete */ | ||
1158 | timeleft = wait_for_completion_interruptible_timeout( | ||
1159 | cmd_completion, | ||
1160 | USB_CTRL_SET_TIMEOUT); | ||
1161 | if (timeleft <= 0) { | ||
1162 | xhci_warn(xhci, "%s while waiting for %s command\n", | ||
1163 | timeleft == 0 ? "Timeout" : "Signal", | ||
1164 | ctx_change == 0 ? | ||
1165 | "configure endpoint" : | ||
1166 | "evaluate context"); | ||
1167 | /* FIXME cancel the configure endpoint command */ | ||
1168 | return -ETIME; | ||
1169 | } | ||
1170 | |||
1171 | if (!ctx_change) | ||
1172 | return xhci_configure_endpoint_result(xhci, udev, cmd_status); | ||
1173 | return xhci_evaluate_context_result(xhci, udev, cmd_status); | ||
1174 | } | ||
1175 | |||
933 | /* Called after one or more calls to xhci_add_endpoint() or | 1176 | /* Called after one or more calls to xhci_add_endpoint() or |
934 | * xhci_drop_endpoint(). If this call fails, the USB core is expected | 1177 | * xhci_drop_endpoint(). If this call fails, the USB core is expected |
935 | * to call xhci_reset_bandwidth(). | 1178 | * to call xhci_reset_bandwidth(). |
@@ -944,8 +1187,6 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
944 | { | 1187 | { |
945 | int i; | 1188 | int i; |
946 | int ret = 0; | 1189 | int ret = 0; |
947 | int timeleft; | ||
948 | unsigned long flags; | ||
949 | struct xhci_hcd *xhci; | 1190 | struct xhci_hcd *xhci; |
950 | struct xhci_virt_device *virt_dev; | 1191 | struct xhci_virt_device *virt_dev; |
951 | struct xhci_input_control_ctx *ctrl_ctx; | 1192 | struct xhci_input_control_ctx *ctrl_ctx; |
@@ -975,56 +1216,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
975 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, | 1216 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, |
976 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); | 1217 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); |
977 | 1218 | ||
978 | spin_lock_irqsave(&xhci->lock, flags); | 1219 | ret = xhci_configure_endpoint(xhci, udev, NULL, |
979 | ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma, | 1220 | false, false); |
980 | udev->slot_id); | ||
981 | if (ret < 0) { | ||
982 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
983 | xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); | ||
984 | return -ENOMEM; | ||
985 | } | ||
986 | xhci_ring_cmd_db(xhci); | ||
987 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
988 | |||
989 | /* Wait for the configure endpoint command to complete */ | ||
990 | timeleft = wait_for_completion_interruptible_timeout( | ||
991 | &virt_dev->cmd_completion, | ||
992 | USB_CTRL_SET_TIMEOUT); | ||
993 | if (timeleft <= 0) { | ||
994 | xhci_warn(xhci, "%s while waiting for configure endpoint command\n", | ||
995 | timeleft == 0 ? "Timeout" : "Signal"); | ||
996 | /* FIXME cancel the configure endpoint command */ | ||
997 | return -ETIME; | ||
998 | } | ||
999 | |||
1000 | switch (virt_dev->cmd_status) { | ||
1001 | case COMP_ENOMEM: | ||
1002 | dev_warn(&udev->dev, "Not enough host controller resources " | ||
1003 | "for new device state.\n"); | ||
1004 | ret = -ENOMEM; | ||
1005 | /* FIXME: can we allocate more resources for the HC? */ | ||
1006 | break; | ||
1007 | case COMP_BW_ERR: | ||
1008 | dev_warn(&udev->dev, "Not enough bandwidth " | ||
1009 | "for new device state.\n"); | ||
1010 | ret = -ENOSPC; | ||
1011 | /* FIXME: can we go back to the old state? */ | ||
1012 | break; | ||
1013 | case COMP_TRB_ERR: | ||
1014 | /* the HCD set up something wrong */ | ||
1015 | dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, add flag = 1, " | ||
1016 | "and endpoint is not disabled.\n"); | ||
1017 | ret = -EINVAL; | ||
1018 | break; | ||
1019 | case COMP_SUCCESS: | ||
1020 | dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); | ||
1021 | break; | ||
1022 | default: | ||
1023 | xhci_err(xhci, "ERROR: unexpected command completion " | ||
1024 | "code 0x%x.\n", virt_dev->cmd_status); | ||
1025 | ret = -EINVAL; | ||
1026 | break; | ||
1027 | } | ||
1028 | if (ret) { | 1221 | if (ret) { |
1029 | /* Callee should call reset_bandwidth() */ | 1222 | /* Callee should call reset_bandwidth() */ |
1030 | return ret; | 1223 | return ret; |
@@ -1037,10 +1230,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
1037 | xhci_zero_in_ctx(xhci, virt_dev); | 1230 | xhci_zero_in_ctx(xhci, virt_dev); |
1038 | /* Free any old rings */ | 1231 | /* Free any old rings */ |
1039 | for (i = 1; i < 31; ++i) { | 1232 | for (i = 1; i < 31; ++i) { |
1040 | if (virt_dev->new_ep_rings[i]) { | 1233 | if (virt_dev->eps[i].new_ring) { |
1041 | xhci_ring_free(xhci, virt_dev->ep_rings[i]); | 1234 | xhci_ring_free(xhci, virt_dev->eps[i].ring); |
1042 | virt_dev->ep_rings[i] = virt_dev->new_ep_rings[i]; | 1235 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; |
1043 | virt_dev->new_ep_rings[i] = NULL; | 1236 | virt_dev->eps[i].new_ring = NULL; |
1044 | } | 1237 | } |
1045 | } | 1238 | } |
1046 | 1239 | ||
@@ -1067,14 +1260,93 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
1067 | virt_dev = xhci->devs[udev->slot_id]; | 1260 | virt_dev = xhci->devs[udev->slot_id]; |
1068 | /* Free any rings allocated for added endpoints */ | 1261 | /* Free any rings allocated for added endpoints */ |
1069 | for (i = 0; i < 31; ++i) { | 1262 | for (i = 0; i < 31; ++i) { |
1070 | if (virt_dev->new_ep_rings[i]) { | 1263 | if (virt_dev->eps[i].new_ring) { |
1071 | xhci_ring_free(xhci, virt_dev->new_ep_rings[i]); | 1264 | xhci_ring_free(xhci, virt_dev->eps[i].new_ring); |
1072 | virt_dev->new_ep_rings[i] = NULL; | 1265 | virt_dev->eps[i].new_ring = NULL; |
1073 | } | 1266 | } |
1074 | } | 1267 | } |
1075 | xhci_zero_in_ctx(xhci, virt_dev); | 1268 | xhci_zero_in_ctx(xhci, virt_dev); |
1076 | } | 1269 | } |
1077 | 1270 | ||
1271 | static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, | ||
1272 | struct xhci_container_ctx *in_ctx, | ||
1273 | struct xhci_container_ctx *out_ctx, | ||
1274 | u32 add_flags, u32 drop_flags) | ||
1275 | { | ||
1276 | struct xhci_input_control_ctx *ctrl_ctx; | ||
1277 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
1278 | ctrl_ctx->add_flags = add_flags; | ||
1279 | ctrl_ctx->drop_flags = drop_flags; | ||
1280 | xhci_slot_copy(xhci, in_ctx, out_ctx); | ||
1281 | ctrl_ctx->add_flags |= SLOT_FLAG; | ||
1282 | |||
1283 | xhci_dbg(xhci, "Input Context:\n"); | ||
1284 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); | ||
1285 | } | ||
1286 | |||
1287 | void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, | ||
1288 | unsigned int slot_id, unsigned int ep_index, | ||
1289 | struct xhci_dequeue_state *deq_state) | ||
1290 | { | ||
1291 | struct xhci_container_ctx *in_ctx; | ||
1292 | struct xhci_ep_ctx *ep_ctx; | ||
1293 | u32 added_ctxs; | ||
1294 | dma_addr_t addr; | ||
1295 | |||
1296 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, | ||
1297 | xhci->devs[slot_id]->out_ctx, ep_index); | ||
1298 | in_ctx = xhci->devs[slot_id]->in_ctx; | ||
1299 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); | ||
1300 | addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, | ||
1301 | deq_state->new_deq_ptr); | ||
1302 | if (addr == 0) { | ||
1303 | xhci_warn(xhci, "WARN Cannot submit config ep after " | ||
1304 | "reset ep command\n"); | ||
1305 | xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", | ||
1306 | deq_state->new_deq_seg, | ||
1307 | deq_state->new_deq_ptr); | ||
1308 | return; | ||
1309 | } | ||
1310 | ep_ctx->deq = addr | deq_state->new_cycle_state; | ||
1311 | |||
1312 | added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); | ||
1313 | xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, | ||
1314 | xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs); | ||
1315 | } | ||
1316 | |||
1317 | void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, | ||
1318 | struct usb_device *udev, unsigned int ep_index) | ||
1319 | { | ||
1320 | struct xhci_dequeue_state deq_state; | ||
1321 | struct xhci_virt_ep *ep; | ||
1322 | |||
1323 | xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); | ||
1324 | ep = &xhci->devs[udev->slot_id]->eps[ep_index]; | ||
1325 | /* We need to move the HW's dequeue pointer past this TD, | ||
1326 | * or it will attempt to resend it on the next doorbell ring. | ||
1327 | */ | ||
1328 | xhci_find_new_dequeue_state(xhci, udev->slot_id, | ||
1329 | ep_index, ep->stopped_td, | ||
1330 | &deq_state); | ||
1331 | |||
1332 | /* HW with the reset endpoint quirk will use the saved dequeue state to | ||
1333 | * issue a configure endpoint command later. | ||
1334 | */ | ||
1335 | if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { | ||
1336 | xhci_dbg(xhci, "Queueing new dequeue state\n"); | ||
1337 | xhci_queue_new_dequeue_state(xhci, udev->slot_id, | ||
1338 | ep_index, &deq_state); | ||
1339 | } else { | ||
1340 | /* Better hope no one uses the input context between now and the | ||
1341 | * reset endpoint completion! | ||
1342 | */ | ||
1343 | xhci_dbg(xhci, "Setting up input context for " | ||
1344 | "configure endpoint command\n"); | ||
1345 | xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, | ||
1346 | ep_index, &deq_state); | ||
1347 | } | ||
1348 | } | ||
1349 | |||
1078 | /* Deal with stalled endpoints. The core should have sent the control message | 1350 | /* Deal with stalled endpoints. The core should have sent the control message |
1079 | * to clear the halt condition. However, we need to make the xHCI hardware | 1351 | * to clear the halt condition. However, we need to make the xHCI hardware |
1080 | * reset its sequence number, since a device will expect a sequence number of | 1352 | * reset its sequence number, since a device will expect a sequence number of |
@@ -1089,8 +1361,7 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
1089 | unsigned int ep_index; | 1361 | unsigned int ep_index; |
1090 | unsigned long flags; | 1362 | unsigned long flags; |
1091 | int ret; | 1363 | int ret; |
1092 | struct xhci_dequeue_state deq_state; | 1364 | struct xhci_virt_ep *virt_ep; |
1093 | struct xhci_ring *ep_ring; | ||
1094 | 1365 | ||
1095 | xhci = hcd_to_xhci(hcd); | 1366 | xhci = hcd_to_xhci(hcd); |
1096 | udev = (struct usb_device *) ep->hcpriv; | 1367 | udev = (struct usb_device *) ep->hcpriv; |
@@ -1100,12 +1371,16 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
1100 | if (!ep->hcpriv) | 1371 | if (!ep->hcpriv) |
1101 | return; | 1372 | return; |
1102 | ep_index = xhci_get_endpoint_index(&ep->desc); | 1373 | ep_index = xhci_get_endpoint_index(&ep->desc); |
1103 | ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index]; | 1374 | virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; |
1104 | if (!ep_ring->stopped_td) { | 1375 | if (!virt_ep->stopped_td) { |
1105 | xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", | 1376 | xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", |
1106 | ep->desc.bEndpointAddress); | 1377 | ep->desc.bEndpointAddress); |
1107 | return; | 1378 | return; |
1108 | } | 1379 | } |
1380 | if (usb_endpoint_xfer_control(&ep->desc)) { | ||
1381 | xhci_dbg(xhci, "Control endpoint stall already handled.\n"); | ||
1382 | return; | ||
1383 | } | ||
1109 | 1384 | ||
1110 | xhci_dbg(xhci, "Queueing reset endpoint command\n"); | 1385 | xhci_dbg(xhci, "Queueing reset endpoint command\n"); |
1111 | spin_lock_irqsave(&xhci->lock, flags); | 1386 | spin_lock_irqsave(&xhci->lock, flags); |
@@ -1116,17 +1391,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
1116 | * command. Better hope that last command worked! | 1391 | * command. Better hope that last command worked! |
1117 | */ | 1392 | */ |
1118 | if (!ret) { | 1393 | if (!ret) { |
1119 | xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); | 1394 | xhci_cleanup_stalled_ring(xhci, udev, ep_index); |
1120 | /* We need to move the HW's dequeue pointer past this TD, | 1395 | kfree(virt_ep->stopped_td); |
1121 | * or it will attempt to resend it on the next doorbell ring. | ||
1122 | */ | ||
1123 | xhci_find_new_dequeue_state(xhci, udev->slot_id, | ||
1124 | ep_index, ep_ring->stopped_td, &deq_state); | ||
1125 | xhci_dbg(xhci, "Queueing new dequeue state\n"); | ||
1126 | xhci_queue_new_dequeue_state(xhci, ep_ring, | ||
1127 | udev->slot_id, | ||
1128 | ep_index, &deq_state); | ||
1129 | kfree(ep_ring->stopped_td); | ||
1130 | xhci_ring_cmd_db(xhci); | 1396 | xhci_ring_cmd_db(xhci); |
1131 | } | 1397 | } |
1132 | spin_unlock_irqrestore(&xhci->lock, flags); | 1398 | spin_unlock_irqrestore(&xhci->lock, flags); |
@@ -1328,6 +1594,88 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
1328 | return 0; | 1594 | return 0; |
1329 | } | 1595 | } |
1330 | 1596 | ||
1597 | /* Once a hub descriptor is fetched for a device, we need to update the xHC's | ||
1598 | * internal data structures for the device. | ||
1599 | */ | ||
1600 | int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, | ||
1601 | struct usb_tt *tt, gfp_t mem_flags) | ||
1602 | { | ||
1603 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
1604 | struct xhci_virt_device *vdev; | ||
1605 | struct xhci_command *config_cmd; | ||
1606 | struct xhci_input_control_ctx *ctrl_ctx; | ||
1607 | struct xhci_slot_ctx *slot_ctx; | ||
1608 | unsigned long flags; | ||
1609 | unsigned think_time; | ||
1610 | int ret; | ||
1611 | |||
1612 | /* Ignore root hubs */ | ||
1613 | if (!hdev->parent) | ||
1614 | return 0; | ||
1615 | |||
1616 | vdev = xhci->devs[hdev->slot_id]; | ||
1617 | if (!vdev) { | ||
1618 | xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); | ||
1619 | return -EINVAL; | ||
1620 | } | ||
1621 | config_cmd = xhci_alloc_command(xhci, true, mem_flags); | ||
1622 | if (!config_cmd) { | ||
1623 | xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); | ||
1624 | return -ENOMEM; | ||
1625 | } | ||
1626 | |||
1627 | spin_lock_irqsave(&xhci->lock, flags); | ||
1628 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); | ||
1629 | ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); | ||
1630 | ctrl_ctx->add_flags |= SLOT_FLAG; | ||
1631 | slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); | ||
1632 | slot_ctx->dev_info |= DEV_HUB; | ||
1633 | if (tt->multi) | ||
1634 | slot_ctx->dev_info |= DEV_MTT; | ||
1635 | if (xhci->hci_version > 0x95) { | ||
1636 | xhci_dbg(xhci, "xHCI version %x needs hub " | ||
1637 | "TT think time and number of ports\n", | ||
1638 | (unsigned int) xhci->hci_version); | ||
1639 | slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild); | ||
1640 | /* Set TT think time - convert from ns to FS bit times. | ||
1641 | * 0 = 8 FS bit times, 1 = 16 FS bit times, | ||
1642 | * 2 = 24 FS bit times, 3 = 32 FS bit times. | ||
1643 | */ | ||
1644 | think_time = tt->think_time; | ||
1645 | if (think_time != 0) | ||
1646 | think_time = (think_time / 666) - 1; | ||
1647 | slot_ctx->tt_info |= TT_THINK_TIME(think_time); | ||
1648 | } else { | ||
1649 | xhci_dbg(xhci, "xHCI version %x doesn't need hub " | ||
1650 | "TT think time or number of ports\n", | ||
1651 | (unsigned int) xhci->hci_version); | ||
1652 | } | ||
1653 | slot_ctx->dev_state = 0; | ||
1654 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1655 | |||
1656 | xhci_dbg(xhci, "Set up %s for hub device.\n", | ||
1657 | (xhci->hci_version > 0x95) ? | ||
1658 | "configure endpoint" : "evaluate context"); | ||
1659 | xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); | ||
1660 | xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); | ||
1661 | |||
1662 | /* Issue and wait for the configure endpoint or | ||
1663 | * evaluate context command. | ||
1664 | */ | ||
1665 | if (xhci->hci_version > 0x95) | ||
1666 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, | ||
1667 | false, false); | ||
1668 | else | ||
1669 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, | ||
1670 | true, false); | ||
1671 | |||
1672 | xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); | ||
1673 | xhci_dbg_ctx(xhci, vdev->out_ctx, 0); | ||
1674 | |||
1675 | xhci_free_command(xhci, config_cmd); | ||
1676 | return ret; | ||
1677 | } | ||
1678 | |||
1331 | int xhci_get_frame(struct usb_hcd *hcd) | 1679 | int xhci_get_frame(struct usb_hcd *hcd) |
1332 | { | 1680 | { |
1333 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 1681 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |