aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@suse.de>2011-01-23 18:11:18 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2011-01-23 18:14:07 -0500
commitfd96d0d8d8079b1ea7a7e8943a4da9dfc9621124 (patch)
tree1d1314722847d56e3b703ec721f9534e93fa5580 /drivers
parent3c47eb06f08eb970ea9d696bcdb57a175d37b470 (diff)
parent50d64676d132a8a72a1a1657d7b3e6efa53da1ac (diff)
Merge branch 'for-usb-linus' of master.kernel.org:/pub/scm/linux/kernel/git/sarah/xhci into usb-linus
* 'for-usb-linus' of master.kernel.org:/pub/scm/linux/kernel/git/sarah/xhci: xhci: Remove more doorbell-related reads xHCI: fix printk_ratelimit() usage xHCI: replace dev_dbg() with xhci_dbg() xHCI: fix cycle bit set in giveback_first_trb() xHCI: remove redundant parameter in giveback_first_trb() xHCI: fix queue_trb in isoc transfer xhci: Use GFP_NOIO during device reset. usb: Realloc xHCI structures after a hub is verified. xhci: Do not run xhci_cleanup_msix with irq disabled xHCI: synchronize irq in xhci_suspend() xhci: Resume bus on any port status change.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/usb/core/hcd-pci.c7
-rw-r--r--drivers/usb/core/hub.c21
-rw-r--r--drivers/usb/host/xhci-ring.c91
-rw-r--r--drivers/usb/host/xhci.c60
-rw-r--r--drivers/usb/host/xhci.h16
5 files changed, 109 insertions, 86 deletions
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index b55d46070a25..f71e8e307e0f 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -405,7 +405,12 @@ static int suspend_common(struct device *dev, bool do_wakeup)
405 return retval; 405 return retval;
406 } 406 }
407 407
408 synchronize_irq(pci_dev->irq); 408 /* If MSI-X is enabled, the driver will have synchronized all vectors
409 * in pci_suspend(). If MSI or legacy PCI is enabled, that will be
410 * synchronized here.
411 */
412 if (!hcd->msix_enabled)
413 synchronize_irq(pci_dev->irq);
409 414
410 /* Downstream ports from this root hub should already be quiesced, so 415 /* Downstream ports from this root hub should already be quiesced, so
411 * there will be no DMA activity. Now we can shut down the upstream 416 * there will be no DMA activity. Now we can shut down the upstream
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index b98efae6a1cf..4310cc4b1cb5 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -676,6 +676,8 @@ static void hub_init_func3(struct work_struct *ws);
676static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) 676static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
677{ 677{
678 struct usb_device *hdev = hub->hdev; 678 struct usb_device *hdev = hub->hdev;
679 struct usb_hcd *hcd;
680 int ret;
679 int port1; 681 int port1;
680 int status; 682 int status;
681 bool need_debounce_delay = false; 683 bool need_debounce_delay = false;
@@ -714,6 +716,25 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
714 usb_autopm_get_interface_no_resume( 716 usb_autopm_get_interface_no_resume(
715 to_usb_interface(hub->intfdev)); 717 to_usb_interface(hub->intfdev));
716 return; /* Continues at init2: below */ 718 return; /* Continues at init2: below */
719 } else if (type == HUB_RESET_RESUME) {
720 /* The internal host controller state for the hub device
721 * may be gone after a host power loss on system resume.
722 * Update the device's info so the HW knows it's a hub.
723 */
724 hcd = bus_to_hcd(hdev->bus);
725 if (hcd->driver->update_hub_device) {
726 ret = hcd->driver->update_hub_device(hcd, hdev,
727 &hub->tt, GFP_NOIO);
728 if (ret < 0) {
729 dev_err(hub->intfdev, "Host not "
730 "accepting hub info "
731 "update.\n");
732 dev_err(hub->intfdev, "LS/FS devices "
733 "and hubs may not work "
734 "under this hub\n.");
735 }
736 }
737 hub_power_on(hub, true);
717 } else { 738 } else {
718 hub_power_on(hub, true); 739 hub_power_on(hub, true);
719 } 740 }
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index df558f6f84e3..3e8211c1ce5a 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -308,11 +308,8 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
308/* Ring the host controller doorbell after placing a command on the ring */ 308/* Ring the host controller doorbell after placing a command on the ring */
309void xhci_ring_cmd_db(struct xhci_hcd *xhci) 309void xhci_ring_cmd_db(struct xhci_hcd *xhci)
310{ 310{
311 u32 temp;
312
313 xhci_dbg(xhci, "// Ding dong!\n"); 311 xhci_dbg(xhci, "// Ding dong!\n");
314 temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK; 312 xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
315 xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
316 /* Flush PCI posted writes */ 313 /* Flush PCI posted writes */
317 xhci_readl(xhci, &xhci->dba->doorbell[0]); 314 xhci_readl(xhci, &xhci->dba->doorbell[0]);
318} 315}
@@ -322,26 +319,24 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
322 unsigned int ep_index, 319 unsigned int ep_index,
323 unsigned int stream_id) 320 unsigned int stream_id)
324{ 321{
325 struct xhci_virt_ep *ep;
326 unsigned int ep_state;
327 u32 field;
328 __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; 322 __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
323 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
324 unsigned int ep_state = ep->ep_state;
329 325
330 ep = &xhci->devs[slot_id]->eps[ep_index];
331 ep_state = ep->ep_state;
332 /* Don't ring the doorbell for this endpoint if there are pending 326 /* Don't ring the doorbell for this endpoint if there are pending
333 * cancellations because the we don't want to interrupt processing. 327 * cancellations because we don't want to interrupt processing.
334 * We don't want to restart any stream rings if there's a set dequeue 328 * We don't want to restart any stream rings if there's a set dequeue
335 * pointer command pending because the device can choose to start any 329 * pointer command pending because the device can choose to start any
336 * stream once the endpoint is on the HW schedule. 330 * stream once the endpoint is on the HW schedule.
337 * FIXME - check all the stream rings for pending cancellations. 331 * FIXME - check all the stream rings for pending cancellations.
338 */ 332 */
339 if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING) 333 if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
340 && !(ep_state & EP_HALTED)) { 334 (ep_state & EP_HALTED))
341 field = xhci_readl(xhci, db_addr) & DB_MASK; 335 return;
342 field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id); 336 xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
343 xhci_writel(xhci, field, db_addr); 337 /* The CPU has better things to do at this point than wait for a
344 } 338 * write-posting flush. It'll get there soon enough.
339 */
345} 340}
346 341
347/* Ring the doorbell for any rings with pending URBs */ 342/* Ring the doorbell for any rings with pending URBs */
@@ -1188,7 +1183,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
1188 1183
1189 addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1); 1184 addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1);
1190 temp = xhci_readl(xhci, addr); 1185 temp = xhci_readl(xhci, addr);
1191 if ((temp & PORT_CONNECT) && (hcd->state == HC_STATE_SUSPENDED)) { 1186 if (hcd->state == HC_STATE_SUSPENDED) {
1192 xhci_dbg(xhci, "resume root hub\n"); 1187 xhci_dbg(xhci, "resume root hub\n");
1193 usb_hcd_resume_root_hub(hcd); 1188 usb_hcd_resume_root_hub(hcd);
1194 } 1189 }
@@ -1710,8 +1705,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1710 /* Others already handled above */ 1705 /* Others already handled above */
1711 break; 1706 break;
1712 } 1707 }
1713 dev_dbg(&td->urb->dev->dev, 1708 xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
1714 "ep %#x - asked for %d bytes, "
1715 "%d bytes untransferred\n", 1709 "%d bytes untransferred\n",
1716 td->urb->ep->desc.bEndpointAddress, 1710 td->urb->ep->desc.bEndpointAddress,
1717 td->urb->transfer_buffer_length, 1711 td->urb->transfer_buffer_length,
@@ -2389,7 +2383,8 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2389 } 2383 }
2390 xhci_dbg(xhci, "\n"); 2384 xhci_dbg(xhci, "\n");
2391 if (!in_interrupt()) 2385 if (!in_interrupt())
2392 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n", 2386 xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
2387 "num_trbs = %d\n",
2393 urb->ep->desc.bEndpointAddress, 2388 urb->ep->desc.bEndpointAddress,
2394 urb->transfer_buffer_length, 2389 urb->transfer_buffer_length,
2395 num_trbs); 2390 num_trbs);
@@ -2414,14 +2409,17 @@ static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
2414 2409
2415static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, 2410static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
2416 unsigned int ep_index, unsigned int stream_id, int start_cycle, 2411 unsigned int ep_index, unsigned int stream_id, int start_cycle,
2417 struct xhci_generic_trb *start_trb, struct xhci_td *td) 2412 struct xhci_generic_trb *start_trb)
2418{ 2413{
2419 /* 2414 /*
2420 * Pass all the TRBs to the hardware at once and make sure this write 2415 * Pass all the TRBs to the hardware at once and make sure this write
2421 * isn't reordered. 2416 * isn't reordered.
2422 */ 2417 */
2423 wmb(); 2418 wmb();
2424 start_trb->field[3] |= start_cycle; 2419 if (start_cycle)
2420 start_trb->field[3] |= start_cycle;
2421 else
2422 start_trb->field[3] &= ~0x1;
2425 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); 2423 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
2426} 2424}
2427 2425
@@ -2449,7 +2447,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2449 * to set the polling interval (once the API is added). 2447 * to set the polling interval (once the API is added).
2450 */ 2448 */
2451 if (xhci_interval != ep_interval) { 2449 if (xhci_interval != ep_interval) {
2452 if (!printk_ratelimit()) 2450 if (printk_ratelimit())
2453 dev_dbg(&urb->dev->dev, "Driver uses different interval" 2451 dev_dbg(&urb->dev->dev, "Driver uses different interval"
2454 " (%d microframe%s) than xHCI " 2452 " (%d microframe%s) than xHCI "
2455 "(%d microframe%s)\n", 2453 "(%d microframe%s)\n",
@@ -2551,9 +2549,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2551 u32 remainder = 0; 2549 u32 remainder = 0;
2552 2550
2553 /* Don't change the cycle bit of the first TRB until later */ 2551 /* Don't change the cycle bit of the first TRB until later */
2554 if (first_trb) 2552 if (first_trb) {
2555 first_trb = false; 2553 first_trb = false;
2556 else 2554 if (start_cycle == 0)
2555 field |= 0x1;
2556 } else
2557 field |= ep_ring->cycle_state; 2557 field |= ep_ring->cycle_state;
2558 2558
2559 /* Chain all the TRBs together; clear the chain bit in the last 2559 /* Chain all the TRBs together; clear the chain bit in the last
@@ -2625,7 +2625,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2625 2625
2626 check_trb_math(urb, num_trbs, running_total); 2626 check_trb_math(urb, num_trbs, running_total);
2627 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 2627 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2628 start_cycle, start_trb, td); 2628 start_cycle, start_trb);
2629 return 0; 2629 return 0;
2630} 2630}
2631 2631
@@ -2671,7 +2671,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2671 /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */ 2671 /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
2672 2672
2673 if (!in_interrupt()) 2673 if (!in_interrupt())
2674 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n", 2674 xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
2675 "addr = %#llx, num_trbs = %d\n",
2675 urb->ep->desc.bEndpointAddress, 2676 urb->ep->desc.bEndpointAddress,
2676 urb->transfer_buffer_length, 2677 urb->transfer_buffer_length,
2677 urb->transfer_buffer_length, 2678 urb->transfer_buffer_length,
@@ -2711,9 +2712,11 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2711 field = 0; 2712 field = 0;
2712 2713
2713 /* Don't change the cycle bit of the first TRB until later */ 2714 /* Don't change the cycle bit of the first TRB until later */
2714 if (first_trb) 2715 if (first_trb) {
2715 first_trb = false; 2716 first_trb = false;
2716 else 2717 if (start_cycle == 0)
2718 field |= 0x1;
2719 } else
2717 field |= ep_ring->cycle_state; 2720 field |= ep_ring->cycle_state;
2718 2721
2719 /* Chain all the TRBs together; clear the chain bit in the last 2722 /* Chain all the TRBs together; clear the chain bit in the last
@@ -2757,7 +2760,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2757 2760
2758 check_trb_math(urb, num_trbs, running_total); 2761 check_trb_math(urb, num_trbs, running_total);
2759 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 2762 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2760 start_cycle, start_trb, td); 2763 start_cycle, start_trb);
2761 return 0; 2764 return 0;
2762} 2765}
2763 2766
@@ -2818,13 +2821,17 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2818 /* Queue setup TRB - see section 6.4.1.2.1 */ 2821 /* Queue setup TRB - see section 6.4.1.2.1 */
2819 /* FIXME better way to translate setup_packet into two u32 fields? */ 2822 /* FIXME better way to translate setup_packet into two u32 fields? */
2820 setup = (struct usb_ctrlrequest *) urb->setup_packet; 2823 setup = (struct usb_ctrlrequest *) urb->setup_packet;
2824 field = 0;
2825 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
2826 if (start_cycle == 0)
2827 field |= 0x1;
2821 queue_trb(xhci, ep_ring, false, true, 2828 queue_trb(xhci, ep_ring, false, true,
2822 /* FIXME endianness is probably going to bite my ass here. */ 2829 /* FIXME endianness is probably going to bite my ass here. */
2823 setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, 2830 setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
2824 setup->wIndex | setup->wLength << 16, 2831 setup->wIndex | setup->wLength << 16,
2825 TRB_LEN(8) | TRB_INTR_TARGET(0), 2832 TRB_LEN(8) | TRB_INTR_TARGET(0),
2826 /* Immediate data in pointer */ 2833 /* Immediate data in pointer */
2827 TRB_IDT | TRB_TYPE(TRB_SETUP)); 2834 field);
2828 2835
2829 /* If there's data, queue data TRBs */ 2836 /* If there's data, queue data TRBs */
2830 field = 0; 2837 field = 0;
@@ -2859,7 +2866,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2859 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); 2866 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
2860 2867
2861 giveback_first_trb(xhci, slot_id, ep_index, 0, 2868 giveback_first_trb(xhci, slot_id, ep_index, 0,
2862 start_cycle, start_trb, td); 2869 start_cycle, start_trb);
2863 return 0; 2870 return 0;
2864} 2871}
2865 2872
@@ -2900,6 +2907,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2900 int running_total, trb_buff_len, td_len, td_remain_len, ret; 2907 int running_total, trb_buff_len, td_len, td_remain_len, ret;
2901 u64 start_addr, addr; 2908 u64 start_addr, addr;
2902 int i, j; 2909 int i, j;
2910 bool more_trbs_coming;
2903 2911
2904 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 2912 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
2905 2913
@@ -2910,7 +2918,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2910 } 2918 }
2911 2919
2912 if (!in_interrupt()) 2920 if (!in_interrupt())
2913 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d)," 2921 xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
2914 " addr = %#llx, num_tds = %d\n", 2922 " addr = %#llx, num_tds = %d\n",
2915 urb->ep->desc.bEndpointAddress, 2923 urb->ep->desc.bEndpointAddress,
2916 urb->transfer_buffer_length, 2924 urb->transfer_buffer_length,
@@ -2950,7 +2958,10 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2950 field |= TRB_TYPE(TRB_ISOC); 2958 field |= TRB_TYPE(TRB_ISOC);
2951 /* Assume URB_ISO_ASAP is set */ 2959 /* Assume URB_ISO_ASAP is set */
2952 field |= TRB_SIA; 2960 field |= TRB_SIA;
2953 if (i > 0) 2961 if (i == 0) {
2962 if (start_cycle == 0)
2963 field |= 0x1;
2964 } else
2954 field |= ep_ring->cycle_state; 2965 field |= ep_ring->cycle_state;
2955 first_trb = false; 2966 first_trb = false;
2956 } else { 2967 } else {
@@ -2965,9 +2976,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2965 */ 2976 */
2966 if (j < trbs_per_td - 1) { 2977 if (j < trbs_per_td - 1) {
2967 field |= TRB_CHAIN; 2978 field |= TRB_CHAIN;
2979 more_trbs_coming = true;
2968 } else { 2980 } else {
2969 td->last_trb = ep_ring->enqueue; 2981 td->last_trb = ep_ring->enqueue;
2970 field |= TRB_IOC; 2982 field |= TRB_IOC;
2983 more_trbs_coming = false;
2971 } 2984 }
2972 2985
2973 /* Calculate TRB length */ 2986 /* Calculate TRB length */
@@ -2980,7 +2993,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2980 length_field = TRB_LEN(trb_buff_len) | 2993 length_field = TRB_LEN(trb_buff_len) |
2981 remainder | 2994 remainder |
2982 TRB_INTR_TARGET(0); 2995 TRB_INTR_TARGET(0);
2983 queue_trb(xhci, ep_ring, false, false, 2996 queue_trb(xhci, ep_ring, false, more_trbs_coming,
2984 lower_32_bits(addr), 2997 lower_32_bits(addr),
2985 upper_32_bits(addr), 2998 upper_32_bits(addr),
2986 length_field, 2999 length_field,
@@ -3003,10 +3016,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3003 } 3016 }
3004 } 3017 }
3005 3018
3006 wmb(); 3019 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3007 start_trb->field[3] |= start_cycle; 3020 start_cycle, start_trb);
3008
3009 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
3010 return 0; 3021 return 0;
3011} 3022}
3012 3023
@@ -3064,7 +3075,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3064 * to set the polling interval (once the API is added). 3075 * to set the polling interval (once the API is added).
3065 */ 3076 */
3066 if (xhci_interval != ep_interval) { 3077 if (xhci_interval != ep_interval) {
3067 if (!printk_ratelimit()) 3078 if (printk_ratelimit())
3068 dev_dbg(&urb->dev->dev, "Driver uses different interval" 3079 dev_dbg(&urb->dev->dev, "Driver uses different interval"
3069 " (%d microframe%s) than xHCI " 3080 " (%d microframe%s) than xHCI "
3070 "(%d microframe%s)\n", 3081 "(%d microframe%s)\n",
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 45e4a3108cc3..34cf4e165877 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -226,7 +226,8 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
226static int xhci_setup_msix(struct xhci_hcd *xhci) 226static int xhci_setup_msix(struct xhci_hcd *xhci)
227{ 227{
228 int i, ret = 0; 228 int i, ret = 0;
229 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 229 struct usb_hcd *hcd = xhci_to_hcd(xhci);
230 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
230 231
231 /* 232 /*
232 * calculate number of msi-x vectors supported. 233 * calculate number of msi-x vectors supported.
@@ -265,6 +266,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
265 goto disable_msix; 266 goto disable_msix;
266 } 267 }
267 268
269 hcd->msix_enabled = 1;
268 return ret; 270 return ret;
269 271
270disable_msix: 272disable_msix:
@@ -280,7 +282,8 @@ free_entries:
280/* Free any IRQs and disable MSI-X */ 282/* Free any IRQs and disable MSI-X */
281static void xhci_cleanup_msix(struct xhci_hcd *xhci) 283static void xhci_cleanup_msix(struct xhci_hcd *xhci)
282{ 284{
283 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 285 struct usb_hcd *hcd = xhci_to_hcd(xhci);
286 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
284 287
285 xhci_free_irq(xhci); 288 xhci_free_irq(xhci);
286 289
@@ -292,6 +295,7 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
292 pci_disable_msi(pdev); 295 pci_disable_msi(pdev);
293 } 296 }
294 297
298 hcd->msix_enabled = 0;
295 return; 299 return;
296} 300}
297 301
@@ -508,9 +512,10 @@ void xhci_stop(struct usb_hcd *hcd)
508 spin_lock_irq(&xhci->lock); 512 spin_lock_irq(&xhci->lock);
509 xhci_halt(xhci); 513 xhci_halt(xhci);
510 xhci_reset(xhci); 514 xhci_reset(xhci);
511 xhci_cleanup_msix(xhci);
512 spin_unlock_irq(&xhci->lock); 515 spin_unlock_irq(&xhci->lock);
513 516
517 xhci_cleanup_msix(xhci);
518
514#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 519#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
515 /* Tell the event ring poll function not to reschedule */ 520 /* Tell the event ring poll function not to reschedule */
516 xhci->zombie = 1; 521 xhci->zombie = 1;
@@ -544,9 +549,10 @@ void xhci_shutdown(struct usb_hcd *hcd)
544 549
545 spin_lock_irq(&xhci->lock); 550 spin_lock_irq(&xhci->lock);
546 xhci_halt(xhci); 551 xhci_halt(xhci);
547 xhci_cleanup_msix(xhci);
548 spin_unlock_irq(&xhci->lock); 552 spin_unlock_irq(&xhci->lock);
549 553
554 xhci_cleanup_msix(xhci);
555
550 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", 556 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
551 xhci_readl(xhci, &xhci->op_regs->status)); 557 xhci_readl(xhci, &xhci->op_regs->status));
552} 558}
@@ -647,6 +653,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
647 int rc = 0; 653 int rc = 0;
648 struct usb_hcd *hcd = xhci_to_hcd(xhci); 654 struct usb_hcd *hcd = xhci_to_hcd(xhci);
649 u32 command; 655 u32 command;
656 int i;
650 657
651 spin_lock_irq(&xhci->lock); 658 spin_lock_irq(&xhci->lock);
652 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 659 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
@@ -677,10 +684,15 @@ int xhci_suspend(struct xhci_hcd *xhci)
677 spin_unlock_irq(&xhci->lock); 684 spin_unlock_irq(&xhci->lock);
678 return -ETIMEDOUT; 685 return -ETIMEDOUT;
679 } 686 }
680 /* step 5: remove core well power */
681 xhci_cleanup_msix(xhci);
682 spin_unlock_irq(&xhci->lock); 687 spin_unlock_irq(&xhci->lock);
683 688
689 /* step 5: remove core well power */
690 /* synchronize irq when using MSI-X */
691 if (xhci->msix_entries) {
692 for (i = 0; i < xhci->msix_count; i++)
693 synchronize_irq(xhci->msix_entries[i].vector);
694 }
695
684 return rc; 696 return rc;
685} 697}
686 698
@@ -694,7 +706,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
694{ 706{
695 u32 command, temp = 0; 707 u32 command, temp = 0;
696 struct usb_hcd *hcd = xhci_to_hcd(xhci); 708 struct usb_hcd *hcd = xhci_to_hcd(xhci);
697 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
698 int old_state, retval; 709 int old_state, retval;
699 710
700 old_state = hcd->state; 711 old_state = hcd->state;
@@ -729,9 +740,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
729 xhci_dbg(xhci, "Stop HCD\n"); 740 xhci_dbg(xhci, "Stop HCD\n");
730 xhci_halt(xhci); 741 xhci_halt(xhci);
731 xhci_reset(xhci); 742 xhci_reset(xhci);
732 if (hibernated)
733 xhci_cleanup_msix(xhci);
734 spin_unlock_irq(&xhci->lock); 743 spin_unlock_irq(&xhci->lock);
744 xhci_cleanup_msix(xhci);
735 745
736#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 746#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
737 /* Tell the event ring poll function not to reschedule */ 747 /* Tell the event ring poll function not to reschedule */
@@ -765,30 +775,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
765 return retval; 775 return retval;
766 } 776 }
767 777
768 spin_unlock_irq(&xhci->lock);
769 /* Re-setup MSI-X */
770 if (hcd->irq)
771 free_irq(hcd->irq, hcd);
772 hcd->irq = -1;
773
774 retval = xhci_setup_msix(xhci);
775 if (retval)
776 /* fall back to msi*/
777 retval = xhci_setup_msi(xhci);
778
779 if (retval) {
780 /* fall back to legacy interrupt*/
781 retval = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
782 hcd->irq_descr, hcd);
783 if (retval) {
784 xhci_err(xhci, "request interrupt %d failed\n",
785 pdev->irq);
786 return retval;
787 }
788 hcd->irq = pdev->irq;
789 }
790
791 spin_lock_irq(&xhci->lock);
792 /* step 4: set Run/Stop bit */ 778 /* step 4: set Run/Stop bit */
793 command = xhci_readl(xhci, &xhci->op_regs->command); 779 command = xhci_readl(xhci, &xhci->op_regs->command);
794 command |= CMD_RUN; 780 command |= CMD_RUN;
@@ -2445,8 +2431,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
2445 xhci_err(xhci, "Error while assigning device slot ID\n"); 2431 xhci_err(xhci, "Error while assigning device slot ID\n");
2446 return 0; 2432 return 0;
2447 } 2433 }
2448 /* xhci_alloc_virt_device() does not touch rings; no need to lock */ 2434 /* xhci_alloc_virt_device() does not touch rings; no need to lock.
2449 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) { 2435 * Use GFP_NOIO, since this function can be called from
2436 * xhci_discover_or_reset_device(), which may be called as part of
2437 * mass storage driver error handling.
2438 */
2439 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
2450 /* Disable slot, if we can do it without mem alloc */ 2440 /* Disable slot, if we can do it without mem alloc */
2451 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 2441 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
2452 spin_lock_irqsave(&xhci->lock, flags); 2442 spin_lock_irqsave(&xhci->lock, flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 170c367112d2..7f236fd22015 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -436,22 +436,18 @@ struct xhci_run_regs {
436/** 436/**
437 * struct doorbell_array 437 * struct doorbell_array
438 * 438 *
439 * Bits 0 - 7: Endpoint target
440 * Bits 8 - 15: RsvdZ
441 * Bits 16 - 31: Stream ID
442 *
439 * Section 5.6 443 * Section 5.6
440 */ 444 */
441struct xhci_doorbell_array { 445struct xhci_doorbell_array {
442 u32 doorbell[256]; 446 u32 doorbell[256];
443}; 447};
444 448
445#define DB_TARGET_MASK 0xFFFFFF00 449#define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16))
446#define DB_STREAM_ID_MASK 0x0000FFFF 450#define DB_VALUE_HOST 0x00000000
447#define DB_TARGET_HOST 0x0
448#define DB_STREAM_ID_HOST 0x0
449#define DB_MASK (0xff << 8)
450
451/* Endpoint Target - bits 0:7 */
452#define EPI_TO_DB(p) (((p) + 1) & 0xff)
453#define STREAM_ID_TO_DB(p) (((p) & 0xffff) << 16)
454
455 451
456/** 452/**
457 * struct xhci_protocol_caps 453 * struct xhci_protocol_caps