diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-10-19 17:03:44 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-10-19 17:03:44 -0400 |
commit | 9afcdb10ade4c3f5bedb1c7de9dd37f7061819eb (patch) | |
tree | 907326c69cd913364da38e7e7fb7b30559cc1063 /drivers/usb/host | |
parent | 5584cfbafc1a4c2a465f4423d351bb918c64cad0 (diff) | |
parent | a2cdc3432c361bb885476d1c625e22b518e0bc07 (diff) |
Merge tag 'for-usb-next-2013-10-17' of git://git.kernel.org/pub/scm/linux/kernel/git/sarah/xhci into usb-next
Sarah writes:
xhci: Final patches for 3.13
Hi Greg,
Here's my pull request for usb-next and 3.13. My xHCI tree is closed
after this point, since I won't be able to run my full tests while I'm in
Scotland. After Kernel Summit, I'll be on vacation with access to email
from Oct 26th to Nov 6th.
Here's what's in this request:
- Patches to fix USB 2.0 Link PM issues that cause USB 3.0 devices to not
enumerate or misbehave when plugged into a USB 2.0 port. Those are
marked for stable.
- A msec vs jiffies bug fix by xiao jin, which results in fairly harmless
behavior, and thus isn't marked for stable.
- Xenia's patches to refactor the xHCI command handling code, which makes
it much more readable and consistent.
- Misc cleanup patches, one by Sachin Kamat and three from Dan Williams.
Here's what's not in this request:
- Dan's two patches to allow the xHCI host to use the "Windows" or "new"
enumeration scheme. I did not have time to test those, and I want to
run them with as many USB devices as I can get a hold of. That will
have to wait for 3.14.
- Xenia's patches to remove xhci_readl in favor of readl. I'll queue
those for 3.14 after I test them.
- The xHCI streams update, UAS fixes, and usbfs streams support. I'm not
comfortable with changes and fixes to that patchset coming in this late.
I would rather wait for 3.14 and be really sure the streams support is
stable before we add new userspace API and remove CONFIG_BROKEN from the
uas driver.
- Julius' patch to clear the port reset bit on hub resume that came in
a couple days ago. It looks harmless, but I would rather take the time
to test and queue it for usb-linus and the stable trees once 3.13-rc1
is out.
Sarah Sharp
Diffstat (limited to 'drivers/usb/host')
-rw-r--r-- | drivers/usb/host/xhci-hub.c | 5 | ||||
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 10 | ||||
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 324 | ||||
-rw-r--r-- | drivers/usb/host/xhci.c | 182 | ||||
-rw-r--r-- | drivers/usb/host/xhci.h | 3 |
5 files changed, 211 insertions, 313 deletions
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index e8b4c56dcf62..805f2348eeba 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
@@ -296,7 +296,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) | |||
296 | /* Wait for last stop endpoint command to finish */ | 296 | /* Wait for last stop endpoint command to finish */ |
297 | timeleft = wait_for_completion_interruptible_timeout( | 297 | timeleft = wait_for_completion_interruptible_timeout( |
298 | cmd->completion, | 298 | cmd->completion, |
299 | USB_CTRL_SET_TIMEOUT); | 299 | XHCI_CMD_DEFAULT_TIMEOUT); |
300 | if (timeleft <= 0) { | 300 | if (timeleft <= 0) { |
301 | xhci_warn(xhci, "%s while waiting for stop endpoint command\n", | 301 | xhci_warn(xhci, "%s while waiting for stop endpoint command\n", |
302 | timeleft == 0 ? "Timeout" : "Signal"); | 302 | timeleft == 0 ? "Timeout" : "Signal"); |
@@ -524,7 +524,8 @@ static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg) | |||
524 | * the compliance mode timer is deleted. A port won't enter | 524 | * the compliance mode timer is deleted. A port won't enter |
525 | * compliance mode if it has previously entered U0. | 525 | * compliance mode if it has previously entered U0. |
526 | */ | 526 | */ |
527 | void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex) | 527 | static void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, |
528 | u16 wIndex) | ||
528 | { | 529 | { |
529 | u32 all_ports_seen_u0 = ((1 << xhci->num_usb3_ports)-1); | 530 | u32 all_ports_seen_u0 = ((1 << xhci->num_usb3_ports)-1); |
530 | bool port_in_u0 = ((status & PORT_PLS_MASK) == XDEV_U0); | 531 | bool port_in_u0 = ((status & PORT_PLS_MASK) == XDEV_U0); |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 83bcd13622c3..49b8bd063fab 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -1693,9 +1693,7 @@ void xhci_free_command(struct xhci_hcd *xhci, | |||
1693 | void xhci_mem_cleanup(struct xhci_hcd *xhci) | 1693 | void xhci_mem_cleanup(struct xhci_hcd *xhci) |
1694 | { | 1694 | { |
1695 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | 1695 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
1696 | struct dev_info *dev_info, *next; | ||
1697 | struct xhci_cd *cur_cd, *next_cd; | 1696 | struct xhci_cd *cur_cd, *next_cd; |
1698 | unsigned long flags; | ||
1699 | int size; | 1697 | int size; |
1700 | int i, j, num_ports; | 1698 | int i, j, num_ports; |
1701 | 1699 | ||
@@ -1756,13 +1754,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
1756 | 1754 | ||
1757 | scratchpad_free(xhci); | 1755 | scratchpad_free(xhci); |
1758 | 1756 | ||
1759 | spin_lock_irqsave(&xhci->lock, flags); | ||
1760 | list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) { | ||
1761 | list_del(&dev_info->list); | ||
1762 | kfree(dev_info); | ||
1763 | } | ||
1764 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1765 | |||
1766 | if (!xhci->rh_bw) | 1757 | if (!xhci->rh_bw) |
1767 | goto no_bw; | 1758 | goto no_bw; |
1768 | 1759 | ||
@@ -2231,7 +2222,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
2231 | u32 page_size, temp; | 2222 | u32 page_size, temp; |
2232 | int i; | 2223 | int i; |
2233 | 2224 | ||
2234 | INIT_LIST_HEAD(&xhci->lpm_failed_devs); | ||
2235 | INIT_LIST_HEAD(&xhci->cancel_cmd_list); | 2225 | INIT_LIST_HEAD(&xhci->cancel_cmd_list); |
2236 | 2226 | ||
2237 | page_size = xhci_readl(xhci, &xhci->op_regs->page_size); | 2227 | page_size = xhci_readl(xhci, &xhci->op_regs->page_size); |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 6bfbd80ec2b9..1e2f3f495843 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -178,7 +178,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) | |||
178 | if (ring->type == TYPE_EVENT && | 178 | if (ring->type == TYPE_EVENT && |
179 | last_trb_on_last_seg(xhci, ring, | 179 | last_trb_on_last_seg(xhci, ring, |
180 | ring->deq_seg, ring->dequeue)) { | 180 | ring->deq_seg, ring->dequeue)) { |
181 | ring->cycle_state = (ring->cycle_state ? 0 : 1); | 181 | ring->cycle_state ^= 1; |
182 | } | 182 | } |
183 | ring->deq_seg = ring->deq_seg->next; | 183 | ring->deq_seg = ring->deq_seg->next; |
184 | ring->dequeue = ring->deq_seg->trbs; | 184 | ring->dequeue = ring->deq_seg->trbs; |
@@ -726,7 +726,7 @@ static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, | |||
726 | 726 | ||
727 | /* Must be called with xhci->lock held in interrupt context */ | 727 | /* Must be called with xhci->lock held in interrupt context */ |
728 | static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, | 728 | static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, |
729 | struct xhci_td *cur_td, int status, char *adjective) | 729 | struct xhci_td *cur_td, int status) |
730 | { | 730 | { |
731 | struct usb_hcd *hcd; | 731 | struct usb_hcd *hcd; |
732 | struct urb *urb; | 732 | struct urb *urb; |
@@ -765,10 +765,9 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, | |||
765 | * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain | 765 | * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain |
766 | * bit cleared) so that the HW will skip over them. | 766 | * bit cleared) so that the HW will skip over them. |
767 | */ | 767 | */ |
768 | static void handle_stopped_endpoint(struct xhci_hcd *xhci, | 768 | static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, |
769 | union xhci_trb *trb, struct xhci_event_cmd *event) | 769 | union xhci_trb *trb, struct xhci_event_cmd *event) |
770 | { | 770 | { |
771 | unsigned int slot_id; | ||
772 | unsigned int ep_index; | 771 | unsigned int ep_index; |
773 | struct xhci_virt_device *virt_dev; | 772 | struct xhci_virt_device *virt_dev; |
774 | struct xhci_ring *ep_ring; | 773 | struct xhci_ring *ep_ring; |
@@ -779,10 +778,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
779 | 778 | ||
780 | struct xhci_dequeue_state deq_state; | 779 | struct xhci_dequeue_state deq_state; |
781 | 780 | ||
782 | if (unlikely(TRB_TO_SUSPEND_PORT( | 781 | if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { |
783 | le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) { | ||
784 | slot_id = TRB_TO_SLOT_ID( | ||
785 | le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])); | ||
786 | virt_dev = xhci->devs[slot_id]; | 782 | virt_dev = xhci->devs[slot_id]; |
787 | if (virt_dev) | 783 | if (virt_dev) |
788 | handle_cmd_in_cmd_wait_list(xhci, virt_dev, | 784 | handle_cmd_in_cmd_wait_list(xhci, virt_dev, |
@@ -795,7 +791,6 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
795 | } | 791 | } |
796 | 792 | ||
797 | memset(&deq_state, 0, sizeof(deq_state)); | 793 | memset(&deq_state, 0, sizeof(deq_state)); |
798 | slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3])); | ||
799 | ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); | 794 | ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); |
800 | ep = &xhci->devs[slot_id]->eps[ep_index]; | 795 | ep = &xhci->devs[slot_id]->eps[ep_index]; |
801 | 796 | ||
@@ -891,7 +886,7 @@ remove_finished_td: | |||
891 | /* Doesn't matter what we pass for status, since the core will | 886 | /* Doesn't matter what we pass for status, since the core will |
892 | * just overwrite it (because the URB has been unlinked). | 887 | * just overwrite it (because the URB has been unlinked). |
893 | */ | 888 | */ |
894 | xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled"); | 889 | xhci_giveback_urb_in_irq(xhci, cur_td, 0); |
895 | 890 | ||
896 | /* Stop processing the cancelled list if the watchdog timer is | 891 | /* Stop processing the cancelled list if the watchdog timer is |
897 | * running. | 892 | * running. |
@@ -1001,7 +996,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) | |||
1001 | if (!list_empty(&cur_td->cancelled_td_list)) | 996 | if (!list_empty(&cur_td->cancelled_td_list)) |
1002 | list_del_init(&cur_td->cancelled_td_list); | 997 | list_del_init(&cur_td->cancelled_td_list); |
1003 | xhci_giveback_urb_in_irq(xhci, cur_td, | 998 | xhci_giveback_urb_in_irq(xhci, cur_td, |
1004 | -ESHUTDOWN, "killed"); | 999 | -ESHUTDOWN); |
1005 | } | 1000 | } |
1006 | while (!list_empty(&temp_ep->cancelled_td_list)) { | 1001 | while (!list_empty(&temp_ep->cancelled_td_list)) { |
1007 | cur_td = list_first_entry( | 1002 | cur_td = list_first_entry( |
@@ -1010,7 +1005,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) | |||
1010 | cancelled_td_list); | 1005 | cancelled_td_list); |
1011 | list_del_init(&cur_td->cancelled_td_list); | 1006 | list_del_init(&cur_td->cancelled_td_list); |
1012 | xhci_giveback_urb_in_irq(xhci, cur_td, | 1007 | xhci_giveback_urb_in_irq(xhci, cur_td, |
1013 | -ESHUTDOWN, "killed"); | 1008 | -ESHUTDOWN); |
1014 | } | 1009 | } |
1015 | } | 1010 | } |
1016 | } | 1011 | } |
@@ -1077,11 +1072,9 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, | |||
1077 | * endpoint doorbell to restart the ring, but only if there aren't more | 1072 | * endpoint doorbell to restart the ring, but only if there aren't more |
1078 | * cancellations pending. | 1073 | * cancellations pending. |
1079 | */ | 1074 | */ |
1080 | static void handle_set_deq_completion(struct xhci_hcd *xhci, | 1075 | static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, |
1081 | struct xhci_event_cmd *event, | 1076 | union xhci_trb *trb, u32 cmd_comp_code) |
1082 | union xhci_trb *trb) | ||
1083 | { | 1077 | { |
1084 | unsigned int slot_id; | ||
1085 | unsigned int ep_index; | 1078 | unsigned int ep_index; |
1086 | unsigned int stream_id; | 1079 | unsigned int stream_id; |
1087 | struct xhci_ring *ep_ring; | 1080 | struct xhci_ring *ep_ring; |
@@ -1089,7 +1082,6 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
1089 | struct xhci_ep_ctx *ep_ctx; | 1082 | struct xhci_ep_ctx *ep_ctx; |
1090 | struct xhci_slot_ctx *slot_ctx; | 1083 | struct xhci_slot_ctx *slot_ctx; |
1091 | 1084 | ||
1092 | slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3])); | ||
1093 | ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); | 1085 | ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); |
1094 | stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); | 1086 | stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); |
1095 | dev = xhci->devs[slot_id]; | 1087 | dev = xhci->devs[slot_id]; |
@@ -1107,11 +1099,11 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
1107 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | 1099 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); |
1108 | slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); | 1100 | slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); |
1109 | 1101 | ||
1110 | if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) { | 1102 | if (cmd_comp_code != COMP_SUCCESS) { |
1111 | unsigned int ep_state; | 1103 | unsigned int ep_state; |
1112 | unsigned int slot_state; | 1104 | unsigned int slot_state; |
1113 | 1105 | ||
1114 | switch (GET_COMP_CODE(le32_to_cpu(event->status))) { | 1106 | switch (cmd_comp_code) { |
1115 | case COMP_TRB_ERR: | 1107 | case COMP_TRB_ERR: |
1116 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because " | 1108 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because " |
1117 | "of stream ID configuration\n"); | 1109 | "of stream ID configuration\n"); |
@@ -1134,7 +1126,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
1134 | default: | 1126 | default: |
1135 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown " | 1127 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown " |
1136 | "completion code of %u.\n", | 1128 | "completion code of %u.\n", |
1137 | GET_COMP_CODE(le32_to_cpu(event->status))); | 1129 | cmd_comp_code); |
1138 | break; | 1130 | break; |
1139 | } | 1131 | } |
1140 | /* OK what do we do now? The endpoint state is hosed, and we | 1132 | /* OK what do we do now? The endpoint state is hosed, and we |
@@ -1171,21 +1163,17 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
1171 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); | 1163 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
1172 | } | 1164 | } |
1173 | 1165 | ||
1174 | static void handle_reset_ep_completion(struct xhci_hcd *xhci, | 1166 | static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, |
1175 | struct xhci_event_cmd *event, | 1167 | union xhci_trb *trb, u32 cmd_comp_code) |
1176 | union xhci_trb *trb) | ||
1177 | { | 1168 | { |
1178 | int slot_id; | ||
1179 | unsigned int ep_index; | 1169 | unsigned int ep_index; |
1180 | 1170 | ||
1181 | slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3])); | ||
1182 | ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); | 1171 | ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); |
1183 | /* This command will only fail if the endpoint wasn't halted, | 1172 | /* This command will only fail if the endpoint wasn't halted, |
1184 | * but we don't care. | 1173 | * but we don't care. |
1185 | */ | 1174 | */ |
1186 | xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, | 1175 | xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, |
1187 | "Ignoring reset ep completion code of %u", | 1176 | "Ignoring reset ep completion code of %u", cmd_comp_code); |
1188 | GET_COMP_CODE(le32_to_cpu(event->status))); | ||
1189 | 1177 | ||
1190 | /* HW with the reset endpoint quirk needs to have a configure endpoint | 1178 | /* HW with the reset endpoint quirk needs to have a configure endpoint |
1191 | * command complete before the endpoint can be used. Queue that here | 1179 | * command complete before the endpoint can be used. Queue that here |
@@ -1386,21 +1374,149 @@ static int handle_stopped_cmd_ring(struct xhci_hcd *xhci, | |||
1386 | return cur_trb_is_good; | 1374 | return cur_trb_is_good; |
1387 | } | 1375 | } |
1388 | 1376 | ||
1377 | static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, | ||
1378 | u32 cmd_comp_code) | ||
1379 | { | ||
1380 | if (cmd_comp_code == COMP_SUCCESS) | ||
1381 | xhci->slot_id = slot_id; | ||
1382 | else | ||
1383 | xhci->slot_id = 0; | ||
1384 | complete(&xhci->addr_dev); | ||
1385 | } | ||
1386 | |||
1387 | static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) | ||
1388 | { | ||
1389 | struct xhci_virt_device *virt_dev; | ||
1390 | |||
1391 | virt_dev = xhci->devs[slot_id]; | ||
1392 | if (!virt_dev) | ||
1393 | return; | ||
1394 | if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) | ||
1395 | /* Delete default control endpoint resources */ | ||
1396 | xhci_free_device_endpoint_resources(xhci, virt_dev, true); | ||
1397 | xhci_free_virt_device(xhci, slot_id); | ||
1398 | } | ||
1399 | |||
1400 | static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, | ||
1401 | struct xhci_event_cmd *event, u32 cmd_comp_code) | ||
1402 | { | ||
1403 | struct xhci_virt_device *virt_dev; | ||
1404 | struct xhci_input_control_ctx *ctrl_ctx; | ||
1405 | unsigned int ep_index; | ||
1406 | unsigned int ep_state; | ||
1407 | u32 add_flags, drop_flags; | ||
1408 | |||
1409 | virt_dev = xhci->devs[slot_id]; | ||
1410 | if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) | ||
1411 | return; | ||
1412 | /* | ||
1413 | * Configure endpoint commands can come from the USB core | ||
1414 | * configuration or alt setting changes, or because the HW | ||
1415 | * needed an extra configure endpoint command after a reset | ||
1416 | * endpoint command or streams were being configured. | ||
1417 | * If the command was for a halted endpoint, the xHCI driver | ||
1418 | * is not waiting on the configure endpoint command. | ||
1419 | */ | ||
1420 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); | ||
1421 | if (!ctrl_ctx) { | ||
1422 | xhci_warn(xhci, "Could not get input context, bad type.\n"); | ||
1423 | return; | ||
1424 | } | ||
1425 | |||
1426 | add_flags = le32_to_cpu(ctrl_ctx->add_flags); | ||
1427 | drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); | ||
1428 | /* Input ctx add_flags are the endpoint index plus one */ | ||
1429 | ep_index = xhci_last_valid_endpoint(add_flags) - 1; | ||
1430 | |||
1431 | /* A usb_set_interface() call directly after clearing a halted | ||
1432 | * condition may race on this quirky hardware. Not worth | ||
1433 | * worrying about, since this is prototype hardware. Not sure | ||
1434 | * if this will work for streams, but streams support was | ||
1435 | * untested on this prototype. | ||
1436 | */ | ||
1437 | if (xhci->quirks & XHCI_RESET_EP_QUIRK && | ||
1438 | ep_index != (unsigned int) -1 && | ||
1439 | add_flags - SLOT_FLAG == drop_flags) { | ||
1440 | ep_state = virt_dev->eps[ep_index].ep_state; | ||
1441 | if (!(ep_state & EP_HALTED)) | ||
1442 | goto bandwidth_change; | ||
1443 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | ||
1444 | "Completed config ep cmd - " | ||
1445 | "last ep index = %d, state = %d", | ||
1446 | ep_index, ep_state); | ||
1447 | /* Clear internal halted state and restart ring(s) */ | ||
1448 | virt_dev->eps[ep_index].ep_state &= ~EP_HALTED; | ||
1449 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); | ||
1450 | return; | ||
1451 | } | ||
1452 | bandwidth_change: | ||
1453 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, | ||
1454 | "Completed config ep cmd"); | ||
1455 | virt_dev->cmd_status = cmd_comp_code; | ||
1456 | complete(&virt_dev->cmd_completion); | ||
1457 | return; | ||
1458 | } | ||
1459 | |||
1460 | static void xhci_handle_cmd_eval_ctx(struct xhci_hcd *xhci, int slot_id, | ||
1461 | struct xhci_event_cmd *event, u32 cmd_comp_code) | ||
1462 | { | ||
1463 | struct xhci_virt_device *virt_dev; | ||
1464 | |||
1465 | virt_dev = xhci->devs[slot_id]; | ||
1466 | if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) | ||
1467 | return; | ||
1468 | virt_dev->cmd_status = cmd_comp_code; | ||
1469 | complete(&virt_dev->cmd_completion); | ||
1470 | } | ||
1471 | |||
1472 | static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id, | ||
1473 | u32 cmd_comp_code) | ||
1474 | { | ||
1475 | xhci->devs[slot_id]->cmd_status = cmd_comp_code; | ||
1476 | complete(&xhci->addr_dev); | ||
1477 | } | ||
1478 | |||
1479 | static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id, | ||
1480 | struct xhci_event_cmd *event) | ||
1481 | { | ||
1482 | struct xhci_virt_device *virt_dev; | ||
1483 | |||
1484 | xhci_dbg(xhci, "Completed reset device command.\n"); | ||
1485 | virt_dev = xhci->devs[slot_id]; | ||
1486 | if (virt_dev) | ||
1487 | handle_cmd_in_cmd_wait_list(xhci, virt_dev, event); | ||
1488 | else | ||
1489 | xhci_warn(xhci, "Reset device command completion " | ||
1490 | "for disabled slot %u\n", slot_id); | ||
1491 | } | ||
1492 | |||
1493 | static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, | ||
1494 | struct xhci_event_cmd *event) | ||
1495 | { | ||
1496 | if (!(xhci->quirks & XHCI_NEC_HOST)) { | ||
1497 | xhci->error_bitmask |= 1 << 6; | ||
1498 | return; | ||
1499 | } | ||
1500 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | ||
1501 | "NEC firmware version %2x.%02x", | ||
1502 | NEC_FW_MAJOR(le32_to_cpu(event->status)), | ||
1503 | NEC_FW_MINOR(le32_to_cpu(event->status))); | ||
1504 | } | ||
1505 | |||
1389 | static void handle_cmd_completion(struct xhci_hcd *xhci, | 1506 | static void handle_cmd_completion(struct xhci_hcd *xhci, |
1390 | struct xhci_event_cmd *event) | 1507 | struct xhci_event_cmd *event) |
1391 | { | 1508 | { |
1392 | int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); | 1509 | int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); |
1393 | u64 cmd_dma; | 1510 | u64 cmd_dma; |
1394 | dma_addr_t cmd_dequeue_dma; | 1511 | dma_addr_t cmd_dequeue_dma; |
1395 | struct xhci_input_control_ctx *ctrl_ctx; | 1512 | u32 cmd_comp_code; |
1396 | struct xhci_virt_device *virt_dev; | 1513 | union xhci_trb *cmd_trb; |
1397 | unsigned int ep_index; | 1514 | u32 cmd_type; |
1398 | struct xhci_ring *ep_ring; | ||
1399 | unsigned int ep_state; | ||
1400 | 1515 | ||
1401 | cmd_dma = le64_to_cpu(event->cmd_trb); | 1516 | cmd_dma = le64_to_cpu(event->cmd_trb); |
1517 | cmd_trb = xhci->cmd_ring->dequeue; | ||
1402 | cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, | 1518 | cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, |
1403 | xhci->cmd_ring->dequeue); | 1519 | cmd_trb); |
1404 | /* Is the command ring deq ptr out of sync with the deq seg ptr? */ | 1520 | /* Is the command ring deq ptr out of sync with the deq seg ptr? */ |
1405 | if (cmd_dequeue_dma == 0) { | 1521 | if (cmd_dequeue_dma == 0) { |
1406 | xhci->error_bitmask |= 1 << 4; | 1522 | xhci->error_bitmask |= 1 << 4; |
@@ -1412,19 +1528,17 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
1412 | return; | 1528 | return; |
1413 | } | 1529 | } |
1414 | 1530 | ||
1415 | trace_xhci_cmd_completion(&xhci->cmd_ring->dequeue->generic, | 1531 | trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); |
1416 | (struct xhci_generic_trb *) event); | ||
1417 | 1532 | ||
1418 | if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) || | 1533 | cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); |
1419 | (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) { | 1534 | if (cmd_comp_code == COMP_CMD_ABORT || cmd_comp_code == COMP_CMD_STOP) { |
1420 | /* If the return value is 0, we think the trb pointed by | 1535 | /* If the return value is 0, we think the trb pointed by |
1421 | * command ring dequeue pointer is a good trb. The good | 1536 | * command ring dequeue pointer is a good trb. The good |
1422 | * trb means we don't want to cancel the trb, but it have | 1537 | * trb means we don't want to cancel the trb, but it have |
1423 | * been stopped by host. So we should handle it normally. | 1538 | * been stopped by host. So we should handle it normally. |
1424 | * Otherwise, driver should invoke inc_deq() and return. | 1539 | * Otherwise, driver should invoke inc_deq() and return. |
1425 | */ | 1540 | */ |
1426 | if (handle_stopped_cmd_ring(xhci, | 1541 | if (handle_stopped_cmd_ring(xhci, cmd_comp_code)) { |
1427 | GET_COMP_CODE(le32_to_cpu(event->status)))) { | ||
1428 | inc_deq(xhci, xhci->cmd_ring); | 1542 | inc_deq(xhci, xhci->cmd_ring); |
1429 | return; | 1543 | return; |
1430 | } | 1544 | } |
@@ -1436,117 +1550,47 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
1436 | return; | 1550 | return; |
1437 | } | 1551 | } |
1438 | 1552 | ||
1439 | switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) | 1553 | cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); |
1440 | & TRB_TYPE_BITMASK) { | 1554 | switch (cmd_type) { |
1441 | case TRB_TYPE(TRB_ENABLE_SLOT): | 1555 | case TRB_ENABLE_SLOT: |
1442 | if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS) | 1556 | xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code); |
1443 | xhci->slot_id = slot_id; | ||
1444 | else | ||
1445 | xhci->slot_id = 0; | ||
1446 | complete(&xhci->addr_dev); | ||
1447 | break; | 1557 | break; |
1448 | case TRB_TYPE(TRB_DISABLE_SLOT): | 1558 | case TRB_DISABLE_SLOT: |
1449 | if (xhci->devs[slot_id]) { | 1559 | xhci_handle_cmd_disable_slot(xhci, slot_id); |
1450 | if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) | ||
1451 | /* Delete default control endpoint resources */ | ||
1452 | xhci_free_device_endpoint_resources(xhci, | ||
1453 | xhci->devs[slot_id], true); | ||
1454 | xhci_free_virt_device(xhci, slot_id); | ||
1455 | } | ||
1456 | break; | 1560 | break; |
1457 | case TRB_TYPE(TRB_CONFIG_EP): | 1561 | case TRB_CONFIG_EP: |
1458 | virt_dev = xhci->devs[slot_id]; | 1562 | xhci_handle_cmd_config_ep(xhci, slot_id, event, cmd_comp_code); |
1459 | if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) | ||
1460 | break; | ||
1461 | /* | ||
1462 | * Configure endpoint commands can come from the USB core | ||
1463 | * configuration or alt setting changes, or because the HW | ||
1464 | * needed an extra configure endpoint command after a reset | ||
1465 | * endpoint command or streams were being configured. | ||
1466 | * If the command was for a halted endpoint, the xHCI driver | ||
1467 | * is not waiting on the configure endpoint command. | ||
1468 | */ | ||
1469 | ctrl_ctx = xhci_get_input_control_ctx(xhci, | ||
1470 | virt_dev->in_ctx); | ||
1471 | if (!ctrl_ctx) { | ||
1472 | xhci_warn(xhci, "Could not get input context, bad type.\n"); | ||
1473 | break; | ||
1474 | } | ||
1475 | /* Input ctx add_flags are the endpoint index plus one */ | ||
1476 | ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1; | ||
1477 | /* A usb_set_interface() call directly after clearing a halted | ||
1478 | * condition may race on this quirky hardware. Not worth | ||
1479 | * worrying about, since this is prototype hardware. Not sure | ||
1480 | * if this will work for streams, but streams support was | ||
1481 | * untested on this prototype. | ||
1482 | */ | ||
1483 | if (xhci->quirks & XHCI_RESET_EP_QUIRK && | ||
1484 | ep_index != (unsigned int) -1 && | ||
1485 | le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG == | ||
1486 | le32_to_cpu(ctrl_ctx->drop_flags)) { | ||
1487 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; | ||
1488 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; | ||
1489 | if (!(ep_state & EP_HALTED)) | ||
1490 | goto bandwidth_change; | ||
1491 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | ||
1492 | "Completed config ep cmd - " | ||
1493 | "last ep index = %d, state = %d", | ||
1494 | ep_index, ep_state); | ||
1495 | /* Clear internal halted state and restart ring(s) */ | ||
1496 | xhci->devs[slot_id]->eps[ep_index].ep_state &= | ||
1497 | ~EP_HALTED; | ||
1498 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); | ||
1499 | break; | ||
1500 | } | ||
1501 | bandwidth_change: | ||
1502 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, | ||
1503 | "Completed config ep cmd"); | ||
1504 | xhci->devs[slot_id]->cmd_status = | ||
1505 | GET_COMP_CODE(le32_to_cpu(event->status)); | ||
1506 | complete(&xhci->devs[slot_id]->cmd_completion); | ||
1507 | break; | 1563 | break; |
1508 | case TRB_TYPE(TRB_EVAL_CONTEXT): | 1564 | case TRB_EVAL_CONTEXT: |
1509 | virt_dev = xhci->devs[slot_id]; | 1565 | xhci_handle_cmd_eval_ctx(xhci, slot_id, event, cmd_comp_code); |
1510 | if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) | ||
1511 | break; | ||
1512 | xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status)); | ||
1513 | complete(&xhci->devs[slot_id]->cmd_completion); | ||
1514 | break; | 1566 | break; |
1515 | case TRB_TYPE(TRB_ADDR_DEV): | 1567 | case TRB_ADDR_DEV: |
1516 | xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status)); | 1568 | xhci_handle_cmd_addr_dev(xhci, slot_id, cmd_comp_code); |
1517 | complete(&xhci->addr_dev); | ||
1518 | break; | 1569 | break; |
1519 | case TRB_TYPE(TRB_STOP_RING): | 1570 | case TRB_STOP_RING: |
1520 | handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event); | 1571 | WARN_ON(slot_id != TRB_TO_SLOT_ID( |
1572 | le32_to_cpu(cmd_trb->generic.field[3]))); | ||
1573 | xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event); | ||
1521 | break; | 1574 | break; |
1522 | case TRB_TYPE(TRB_SET_DEQ): | 1575 | case TRB_SET_DEQ: |
1523 | handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue); | 1576 | WARN_ON(slot_id != TRB_TO_SLOT_ID( |
1577 | le32_to_cpu(cmd_trb->generic.field[3]))); | ||
1578 | xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); | ||
1524 | break; | 1579 | break; |
1525 | case TRB_TYPE(TRB_CMD_NOOP): | 1580 | case TRB_CMD_NOOP: |
1526 | break; | 1581 | break; |
1527 | case TRB_TYPE(TRB_RESET_EP): | 1582 | case TRB_RESET_EP: |
1528 | handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); | 1583 | WARN_ON(slot_id != TRB_TO_SLOT_ID( |
1584 | le32_to_cpu(cmd_trb->generic.field[3]))); | ||
1585 | xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); | ||
1529 | break; | 1586 | break; |
1530 | case TRB_TYPE(TRB_RESET_DEV): | 1587 | case TRB_RESET_DEV: |
1531 | xhci_dbg(xhci, "Completed reset device command.\n"); | 1588 | WARN_ON(slot_id != TRB_TO_SLOT_ID( |
1532 | slot_id = TRB_TO_SLOT_ID( | 1589 | le32_to_cpu(cmd_trb->generic.field[3]))); |
1533 | le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])); | 1590 | xhci_handle_cmd_reset_dev(xhci, slot_id, event); |
1534 | virt_dev = xhci->devs[slot_id]; | ||
1535 | if (virt_dev) | ||
1536 | handle_cmd_in_cmd_wait_list(xhci, virt_dev, event); | ||
1537 | else | ||
1538 | xhci_warn(xhci, "Reset device command completion " | ||
1539 | "for disabled slot %u\n", slot_id); | ||
1540 | break; | 1591 | break; |
1541 | case TRB_TYPE(TRB_NEC_GET_FW): | 1592 | case TRB_NEC_GET_FW: |
1542 | if (!(xhci->quirks & XHCI_NEC_HOST)) { | 1593 | xhci_handle_cmd_nec_get_fw(xhci, event); |
1543 | xhci->error_bitmask |= 1 << 6; | ||
1544 | break; | ||
1545 | } | ||
1546 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | ||
1547 | "NEC firmware version %2x.%02x", | ||
1548 | NEC_FW_MAJOR(le32_to_cpu(event->status)), | ||
1549 | NEC_FW_MINOR(le32_to_cpu(event->status))); | ||
1550 | break; | 1594 | break; |
1551 | default: | 1595 | default: |
1552 | /* Skip over unknown commands on the event ring */ | 1596 | /* Skip over unknown commands on the event ring */ |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 6e0d886bcce5..4265b48856f6 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -3459,7 +3459,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
3459 | /* Wait for the Reset Device command to finish */ | 3459 | /* Wait for the Reset Device command to finish */ |
3460 | timeleft = wait_for_completion_interruptible_timeout( | 3460 | timeleft = wait_for_completion_interruptible_timeout( |
3461 | reset_device_cmd->completion, | 3461 | reset_device_cmd->completion, |
3462 | USB_CTRL_SET_TIMEOUT); | 3462 | XHCI_CMD_DEFAULT_TIMEOUT); |
3463 | if (timeleft <= 0) { | 3463 | if (timeleft <= 0) { |
3464 | xhci_warn(xhci, "%s while waiting for reset device command\n", | 3464 | xhci_warn(xhci, "%s while waiting for reset device command\n", |
3465 | timeleft == 0 ? "Timeout" : "Signal"); | 3465 | timeleft == 0 ? "Timeout" : "Signal"); |
@@ -3583,11 +3583,6 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
3583 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); | 3583 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); |
3584 | } | 3584 | } |
3585 | 3585 | ||
3586 | if (udev->usb2_hw_lpm_enabled) { | ||
3587 | xhci_set_usb2_hardware_lpm(hcd, udev, 0); | ||
3588 | udev->usb2_hw_lpm_enabled = 0; | ||
3589 | } | ||
3590 | |||
3591 | spin_lock_irqsave(&xhci->lock, flags); | 3586 | spin_lock_irqsave(&xhci->lock, flags); |
3592 | /* Don't disable the slot if the host controller is dead. */ | 3587 | /* Don't disable the slot if the host controller is dead. */ |
3593 | state = xhci_readl(xhci, &xhci->op_regs->status); | 3588 | state = xhci_readl(xhci, &xhci->op_regs->status); |
@@ -3721,9 +3716,6 @@ disable_slot: | |||
3721 | * the device). | 3716 | * the device). |
3722 | * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so | 3717 | * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so |
3723 | * we should only issue and wait on one address command at the same time. | 3718 | * we should only issue and wait on one address command at the same time. |
3724 | * | ||
3725 | * We add one to the device address issued by the hardware because the USB core | ||
3726 | * uses address 1 for the root hubs (even though they're not really devices). | ||
3727 | */ | 3719 | */ |
3728 | int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | 3720 | int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) |
3729 | { | 3721 | { |
@@ -3868,16 +3860,13 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
3868 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); | 3860 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
3869 | trace_xhci_address_ctx(xhci, virt_dev->out_ctx, | 3861 | trace_xhci_address_ctx(xhci, virt_dev->out_ctx, |
3870 | slot_ctx->dev_info >> 27); | 3862 | slot_ctx->dev_info >> 27); |
3871 | /* Use kernel assigned address for devices; store xHC assigned | ||
3872 | * address locally. */ | ||
3873 | virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) | ||
3874 | + 1; | ||
3875 | /* Zero the input context control for later use */ | 3863 | /* Zero the input context control for later use */ |
3876 | ctrl_ctx->add_flags = 0; | 3864 | ctrl_ctx->add_flags = 0; |
3877 | ctrl_ctx->drop_flags = 0; | 3865 | ctrl_ctx->drop_flags = 0; |
3878 | 3866 | ||
3879 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, | 3867 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
3880 | "Internal device address = %d", virt_dev->address); | 3868 | "Internal device address = %d", |
3869 | le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); | ||
3881 | 3870 | ||
3882 | return 0; | 3871 | return 0; |
3883 | } | 3872 | } |
@@ -4025,133 +4014,6 @@ static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev) | |||
4025 | return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm); | 4014 | return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm); |
4026 | } | 4015 | } |
4027 | 4016 | ||
4028 | static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd, | ||
4029 | struct usb_device *udev) | ||
4030 | { | ||
4031 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
4032 | struct dev_info *dev_info; | ||
4033 | __le32 __iomem **port_array; | ||
4034 | __le32 __iomem *addr, *pm_addr; | ||
4035 | u32 temp, dev_id; | ||
4036 | unsigned int port_num; | ||
4037 | unsigned long flags; | ||
4038 | int hird; | ||
4039 | int ret; | ||
4040 | |||
4041 | if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support || | ||
4042 | !udev->lpm_capable) | ||
4043 | return -EINVAL; | ||
4044 | |||
4045 | /* we only support lpm for non-hub device connected to root hub yet */ | ||
4046 | if (!udev->parent || udev->parent->parent || | ||
4047 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) | ||
4048 | return -EINVAL; | ||
4049 | |||
4050 | spin_lock_irqsave(&xhci->lock, flags); | ||
4051 | |||
4052 | /* Look for devices in lpm_failed_devs list */ | ||
4053 | dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 | | ||
4054 | le16_to_cpu(udev->descriptor.idProduct); | ||
4055 | list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) { | ||
4056 | if (dev_info->dev_id == dev_id) { | ||
4057 | ret = -EINVAL; | ||
4058 | goto finish; | ||
4059 | } | ||
4060 | } | ||
4061 | |||
4062 | port_array = xhci->usb2_ports; | ||
4063 | port_num = udev->portnum - 1; | ||
4064 | |||
4065 | if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) { | ||
4066 | xhci_dbg(xhci, "invalid port number %d\n", udev->portnum); | ||
4067 | ret = -EINVAL; | ||
4068 | goto finish; | ||
4069 | } | ||
4070 | |||
4071 | /* | ||
4072 | * Test USB 2.0 software LPM. | ||
4073 | * FIXME: some xHCI 1.0 hosts may implement a new register to set up | ||
4074 | * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1 | ||
4075 | * in the June 2011 errata release. | ||
4076 | */ | ||
4077 | xhci_dbg(xhci, "test port %d software LPM\n", port_num); | ||
4078 | /* | ||
4079 | * Set L1 Device Slot and HIRD/BESL. | ||
4080 | * Check device's USB 2.0 extension descriptor to determine whether | ||
4081 | * HIRD or BESL shoule be used. See USB2.0 LPM errata. | ||
4082 | */ | ||
4083 | pm_addr = port_array[port_num] + PORTPMSC; | ||
4084 | hird = xhci_calculate_hird_besl(xhci, udev); | ||
4085 | temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird); | ||
4086 | xhci_writel(xhci, temp, pm_addr); | ||
4087 | |||
4088 | /* Set port link state to U2(L1) */ | ||
4089 | addr = port_array[port_num]; | ||
4090 | xhci_set_link_state(xhci, port_array, port_num, XDEV_U2); | ||
4091 | |||
4092 | /* wait for ACK */ | ||
4093 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
4094 | msleep(10); | ||
4095 | spin_lock_irqsave(&xhci->lock, flags); | ||
4096 | |||
4097 | /* Check L1 Status */ | ||
4098 | ret = xhci_handshake(xhci, pm_addr, | ||
4099 | PORT_L1S_MASK, PORT_L1S_SUCCESS, 125); | ||
4100 | if (ret != -ETIMEDOUT) { | ||
4101 | /* enter L1 successfully */ | ||
4102 | temp = xhci_readl(xhci, addr); | ||
4103 | xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n", | ||
4104 | port_num, temp); | ||
4105 | ret = 0; | ||
4106 | } else { | ||
4107 | temp = xhci_readl(xhci, pm_addr); | ||
4108 | xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n", | ||
4109 | port_num, temp & PORT_L1S_MASK); | ||
4110 | ret = -EINVAL; | ||
4111 | } | ||
4112 | |||
4113 | /* Resume the port */ | ||
4114 | xhci_set_link_state(xhci, port_array, port_num, XDEV_U0); | ||
4115 | |||
4116 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
4117 | msleep(10); | ||
4118 | spin_lock_irqsave(&xhci->lock, flags); | ||
4119 | |||
4120 | /* Clear PLC */ | ||
4121 | xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC); | ||
4122 | |||
4123 | /* Check PORTSC to make sure the device is in the right state */ | ||
4124 | if (!ret) { | ||
4125 | temp = xhci_readl(xhci, addr); | ||
4126 | xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp); | ||
4127 | if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) || | ||
4128 | (temp & PORT_PLS_MASK) != XDEV_U0) { | ||
4129 | xhci_dbg(xhci, "port L1 resume fail\n"); | ||
4130 | ret = -EINVAL; | ||
4131 | } | ||
4132 | } | ||
4133 | |||
4134 | if (ret) { | ||
4135 | /* Insert dev to lpm_failed_devs list */ | ||
4136 | xhci_warn(xhci, "device LPM test failed, may disconnect and " | ||
4137 | "re-enumerate\n"); | ||
4138 | dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC); | ||
4139 | if (!dev_info) { | ||
4140 | ret = -ENOMEM; | ||
4141 | goto finish; | ||
4142 | } | ||
4143 | dev_info->dev_id = dev_id; | ||
4144 | INIT_LIST_HEAD(&dev_info->list); | ||
4145 | list_add(&dev_info->list, &xhci->lpm_failed_devs); | ||
4146 | } else { | ||
4147 | xhci_ring_device(xhci, udev->slot_id); | ||
4148 | } | ||
4149 | |||
4150 | finish: | ||
4151 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
4152 | return ret; | ||
4153 | } | ||
4154 | |||
4155 | int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, | 4017 | int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, |
4156 | struct usb_device *udev, int enable) | 4018 | struct usb_device *udev, int enable) |
4157 | { | 4019 | { |
@@ -4228,7 +4090,7 @@ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, | |||
4228 | } | 4090 | } |
4229 | 4091 | ||
4230 | pm_val &= ~PORT_HIRD_MASK; | 4092 | pm_val &= ~PORT_HIRD_MASK; |
4231 | pm_val |= PORT_HIRD(hird) | PORT_RWE; | 4093 | pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id); |
4232 | xhci_writel(xhci, pm_val, pm_addr); | 4094 | xhci_writel(xhci, pm_val, pm_addr); |
4233 | pm_val = xhci_readl(xhci, pm_addr); | 4095 | pm_val = xhci_readl(xhci, pm_addr); |
4234 | pm_val |= PORT_HLE; | 4096 | pm_val |= PORT_HLE; |
@@ -4236,7 +4098,7 @@ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, | |||
4236 | /* flush write */ | 4098 | /* flush write */ |
4237 | xhci_readl(xhci, pm_addr); | 4099 | xhci_readl(xhci, pm_addr); |
4238 | } else { | 4100 | } else { |
4239 | pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK); | 4101 | pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK); |
4240 | xhci_writel(xhci, pm_val, pm_addr); | 4102 | xhci_writel(xhci, pm_val, pm_addr); |
4241 | /* flush write */ | 4103 | /* flush write */ |
4242 | xhci_readl(xhci, pm_addr); | 4104 | xhci_readl(xhci, pm_addr); |
@@ -4279,24 +4141,26 @@ static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, | |||
4279 | int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) | 4141 | int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) |
4280 | { | 4142 | { |
4281 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 4143 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
4282 | int ret; | ||
4283 | int portnum = udev->portnum - 1; | 4144 | int portnum = udev->portnum - 1; |
4284 | 4145 | ||
4285 | ret = xhci_usb2_software_lpm_test(hcd, udev); | 4146 | if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support || |
4286 | if (!ret) { | 4147 | !udev->lpm_capable) |
4287 | xhci_dbg(xhci, "software LPM test succeed\n"); | 4148 | return 0; |
4288 | if (xhci->hw_lpm_support == 1 && | 4149 | |
4289 | xhci_check_usb2_port_capability(xhci, portnum, XHCI_HLC)) { | 4150 | /* we only support lpm for non-hub device connected to root hub yet */ |
4290 | udev->usb2_hw_lpm_capable = 1; | 4151 | if (!udev->parent || udev->parent->parent || |
4291 | udev->l1_params.timeout = XHCI_L1_TIMEOUT; | 4152 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) |
4292 | udev->l1_params.besl = XHCI_DEFAULT_BESL; | 4153 | return 0; |
4293 | if (xhci_check_usb2_port_capability(xhci, portnum, | 4154 | |
4294 | XHCI_BLC)) | 4155 | if (xhci->hw_lpm_support == 1 && |
4295 | udev->usb2_hw_lpm_besl_capable = 1; | 4156 | xhci_check_usb2_port_capability( |
4296 | ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1); | 4157 | xhci, portnum, XHCI_HLC)) { |
4297 | if (!ret) | 4158 | udev->usb2_hw_lpm_capable = 1; |
4298 | udev->usb2_hw_lpm_enabled = 1; | 4159 | udev->l1_params.timeout = XHCI_L1_TIMEOUT; |
4299 | } | 4160 | udev->l1_params.besl = XHCI_DEFAULT_BESL; |
4161 | if (xhci_check_usb2_port_capability(xhci, portnum, | ||
4162 | XHCI_BLC)) | ||
4163 | udev->usb2_hw_lpm_besl_capable = 1; | ||
4300 | } | 4164 | } |
4301 | 4165 | ||
4302 | return 0; | 4166 | return 0; |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 941d5f59e4dc..03c74b7965f8 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -383,6 +383,7 @@ struct xhci_op_regs { | |||
383 | #define PORT_RWE (1 << 3) | 383 | #define PORT_RWE (1 << 3) |
384 | #define PORT_HIRD(p) (((p) & 0xf) << 4) | 384 | #define PORT_HIRD(p) (((p) & 0xf) << 4) |
385 | #define PORT_HIRD_MASK (0xf << 4) | 385 | #define PORT_HIRD_MASK (0xf << 4) |
386 | #define PORT_L1DS_MASK (0xff << 8) | ||
386 | #define PORT_L1DS(p) (((p) & 0xff) << 8) | 387 | #define PORT_L1DS(p) (((p) & 0xff) << 8) |
387 | #define PORT_HLE (1 << 16) | 388 | #define PORT_HLE (1 << 16) |
388 | 389 | ||
@@ -934,8 +935,6 @@ struct xhci_virt_device { | |||
934 | /* Rings saved to ensure old alt settings can be re-instated */ | 935 | /* Rings saved to ensure old alt settings can be re-instated */ |
935 | struct xhci_ring **ring_cache; | 936 | struct xhci_ring **ring_cache; |
936 | int num_rings_cached; | 937 | int num_rings_cached; |
937 | /* Store xHC assigned device address */ | ||
938 | int address; | ||
939 | #define XHCI_MAX_RINGS_CACHED 31 | 938 | #define XHCI_MAX_RINGS_CACHED 31 |
940 | struct xhci_virt_ep eps[31]; | 939 | struct xhci_virt_ep eps[31]; |
941 | struct completion cmd_completion; | 940 | struct completion cmd_completion; |