diff options
author | Sarah Sharp <sarah.a.sharp@linux.intel.com> | 2010-07-30 01:12:49 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2010-08-10 17:35:44 -0400 |
commit | c06d68b814d556cff5a4dc589215f5ed9f0b7fd5 (patch) | |
tree | 00c8213bd42c6134d817a1476f4a654a8a51bc33 /drivers/usb/host/xhci-ring.c | |
parent | d6d98a4d8d2411bca7e15d9c0796bf3bc30c3f21 (diff) |
USB: xhci: Minimize HW event ring dequeue pointer writes.
The xHCI specification suggests that writing the hardware event ring dequeue
pointer register too often can be an expensive operation for the xHCI hardware
to manage. It suggests minimizing the number of writes to that register.
Originally, the driver wrote the event ring dequeue pointer after each
event was processed. Depending on how the event ring moderation register
is set up and how fast the transfers are completing, there may be several
events processed for each interrupt. This patch makes the hardware event
ring dequeue pointer be written only once per interrupt.
Make the transfer event handler and port status event handler only write
the software event ring dequeue pointer. Move the updating of the
hardware event ring dequeue pointer into the interrupt function. Move the
contents of xhci_set_hc_event_deq() into the interrupt handler. The
interrupt handler must clear the event handler busy flag, so it might as
well also write the dequeue pointer to the same register. This eliminates
two 32-bit PCI reads and two 32-bit PCI writes.
Reported-by: Andiry Xu <andiry.xu@amd.com>
Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 50 |
1 files changed, 37 insertions, 13 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 3b962ba5ef08..7dfd17707d53 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -1184,7 +1184,6 @@ static void handle_port_status(struct xhci_hcd *xhci, | |||
1184 | 1184 | ||
1185 | /* Update event ring dequeue pointer before dropping the lock */ | 1185 | /* Update event ring dequeue pointer before dropping the lock */ |
1186 | inc_deq(xhci, xhci->event_ring, true); | 1186 | inc_deq(xhci, xhci->event_ring, true); |
1187 | xhci_set_hc_event_deq(xhci); | ||
1188 | 1187 | ||
1189 | spin_unlock(&xhci->lock); | 1188 | spin_unlock(&xhci->lock); |
1190 | /* Pass this up to the core */ | 1189 | /* Pass this up to the core */ |
@@ -1924,7 +1923,6 @@ cleanup: | |||
1924 | */ | 1923 | */ |
1925 | if (trb_comp_code == COMP_MISSED_INT || !ep->skip) { | 1924 | if (trb_comp_code == COMP_MISSED_INT || !ep->skip) { |
1926 | inc_deq(xhci, xhci->event_ring, true); | 1925 | inc_deq(xhci, xhci->event_ring, true); |
1927 | xhci_set_hc_event_deq(xhci); | ||
1928 | } | 1926 | } |
1929 | 1927 | ||
1930 | if (ret) { | 1928 | if (ret) { |
@@ -2022,11 +2020,10 @@ static void xhci_handle_event(struct xhci_hcd *xhci) | |||
2022 | return; | 2020 | return; |
2023 | } | 2021 | } |
2024 | 2022 | ||
2025 | if (update_ptrs) { | 2023 | if (update_ptrs) |
2026 | /* Update SW and HC event ring dequeue pointer */ | 2024 | /* Update SW event ring dequeue pointer */ |
2027 | inc_deq(xhci, xhci->event_ring, true); | 2025 | inc_deq(xhci, xhci->event_ring, true); |
2028 | xhci_set_hc_event_deq(xhci); | 2026 | |
2029 | } | ||
2030 | /* Are there more items on the event ring? */ | 2027 | /* Are there more items on the event ring? */ |
2031 | xhci_handle_event(xhci); | 2028 | xhci_handle_event(xhci); |
2032 | } | 2029 | } |
@@ -2042,6 +2039,8 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) | |||
2042 | u32 status, irq_pending; | 2039 | u32 status, irq_pending; |
2043 | union xhci_trb *trb; | 2040 | union xhci_trb *trb; |
2044 | u64 temp_64; | 2041 | u64 temp_64; |
2042 | union xhci_trb *event_ring_deq; | ||
2043 | dma_addr_t deq; | ||
2045 | 2044 | ||
2046 | spin_lock(&xhci->lock); | 2045 | spin_lock(&xhci->lock); |
2047 | trb = xhci->event_ring->dequeue; | 2046 | trb = xhci->event_ring->dequeue; |
@@ -2090,18 +2089,43 @@ hw_died: | |||
2090 | irq_pending |= 0x3; | 2089 | irq_pending |= 0x3; |
2091 | xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending); | 2090 | xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending); |
2092 | 2091 | ||
2093 | if (xhci->xhc_state & XHCI_STATE_DYING) | 2092 | if (xhci->xhc_state & XHCI_STATE_DYING) { |
2094 | xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " | 2093 | xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " |
2095 | "Shouldn't IRQs be disabled?\n"); | 2094 | "Shouldn't IRQs be disabled?\n"); |
2096 | else | 2095 | /* Clear the event handler busy flag (RW1C); |
2097 | /* FIXME this should be a delayed service routine | 2096 | * the event ring should be empty. |
2098 | * that clears the EHB. | ||
2099 | */ | 2097 | */ |
2100 | xhci_handle_event(xhci); | 2098 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
2099 | xhci_write_64(xhci, temp_64 | ERST_EHB, | ||
2100 | &xhci->ir_set->erst_dequeue); | ||
2101 | spin_unlock(&xhci->lock); | ||
2102 | |||
2103 | return IRQ_HANDLED; | ||
2104 | } | ||
2105 | |||
2106 | event_ring_deq = xhci->event_ring->dequeue; | ||
2107 | /* FIXME this should be a delayed service routine | ||
2108 | * that clears the EHB. | ||
2109 | */ | ||
2110 | xhci_handle_event(xhci); | ||
2101 | 2111 | ||
2102 | /* Clear the event handler busy flag (RW1C); event ring is empty. */ | ||
2103 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); | 2112 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
2104 | xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); | 2113 | /* If necessary, update the HW's version of the event ring deq ptr. */ |
2114 | if (event_ring_deq != xhci->event_ring->dequeue) { | ||
2115 | deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, | ||
2116 | xhci->event_ring->dequeue); | ||
2117 | if (deq == 0) | ||
2118 | xhci_warn(xhci, "WARN something wrong with SW event " | ||
2119 | "ring dequeue ptr.\n"); | ||
2120 | /* Update HC event ring dequeue pointer */ | ||
2121 | temp_64 &= ERST_PTR_MASK; | ||
2122 | temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); | ||
2123 | } | ||
2124 | |||
2125 | /* Clear the event handler busy flag (RW1C); event ring is empty. */ | ||
2126 | temp_64 |= ERST_EHB; | ||
2127 | xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); | ||
2128 | |||
2105 | spin_unlock(&xhci->lock); | 2129 | spin_unlock(&xhci->lock); |
2106 | 2130 | ||
2107 | return IRQ_HANDLED; | 2131 | return IRQ_HANDLED; |