diff options
author | Alek Du <alek.du@intel.com> | 2009-07-13 19:23:29 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-09-23 09:46:29 -0400 |
commit | 3807e26d69b9ad3864fe03224ebebc9610d5802e (patch) | |
tree | 3c85a5cb0686a7e72255c523b963942bbfc60b7f /drivers/usb/host/ehci-q.c | |
parent | 403dbd36739e344d2d25f56ebbe342248487bd48 (diff) |
USB: EHCI: split ehci_qh into hw and sw parts
The ehci_qh structure merged hw and sw together which is not good:
1. More and more items are being added into ehci_qh, the ehci_qh software
part are unnecessary to be allocated in DMA qh_pool.
2. If HCD has local SRAM, the sw part will consume it too, and it won't
bring any benefit.
3. For non-cache-coherence system, the entire ehci_qh is uncachable, actually
we only need the hw part to be uncacheable. Spliting them will let the sw
part to be cacheable.
Signed-off-by: Alek Du <alek.du@intel.com>
Cc: David Brownell <dbrownell@users.sourceforge.net>
CC: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/ehci-q.c')
-rw-r--r-- | drivers/usb/host/ehci-q.c | 50 |
1 files changed, 28 insertions, 22 deletions
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 7673554fa64d..377ed530b920 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c | |||
@@ -87,31 +87,33 @@ qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf, | |||
87 | static inline void | 87 | static inline void |
88 | qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) | 88 | qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) |
89 | { | 89 | { |
90 | struct ehci_qh_hw *hw = qh->hw; | ||
91 | |||
90 | /* writes to an active overlay are unsafe */ | 92 | /* writes to an active overlay are unsafe */ |
91 | BUG_ON(qh->qh_state != QH_STATE_IDLE); | 93 | BUG_ON(qh->qh_state != QH_STATE_IDLE); |
92 | 94 | ||
93 | qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); | 95 | hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); |
94 | qh->hw_alt_next = EHCI_LIST_END(ehci); | 96 | hw->hw_alt_next = EHCI_LIST_END(ehci); |
95 | 97 | ||
96 | /* Except for control endpoints, we make hardware maintain data | 98 | /* Except for control endpoints, we make hardware maintain data |
97 | * toggle (like OHCI) ... here (re)initialize the toggle in the QH, | 99 | * toggle (like OHCI) ... here (re)initialize the toggle in the QH, |
98 | * and set the pseudo-toggle in udev. Only usb_clear_halt() will | 100 | * and set the pseudo-toggle in udev. Only usb_clear_halt() will |
99 | * ever clear it. | 101 | * ever clear it. |
100 | */ | 102 | */ |
101 | if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) { | 103 | if (!(hw->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) { |
102 | unsigned is_out, epnum; | 104 | unsigned is_out, epnum; |
103 | 105 | ||
104 | is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8)); | 106 | is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8)); |
105 | epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f; | 107 | epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f; |
106 | if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) { | 108 | if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) { |
107 | qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); | 109 | hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); |
108 | usb_settoggle (qh->dev, epnum, is_out, 1); | 110 | usb_settoggle (qh->dev, epnum, is_out, 1); |
109 | } | 111 | } |
110 | } | 112 | } |
111 | 113 | ||
112 | /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ | 114 | /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ |
113 | wmb (); | 115 | wmb (); |
114 | qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); | 116 | hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); |
115 | } | 117 | } |
116 | 118 | ||
117 | /* if it weren't for a common silicon quirk (writing the dummy into the qh | 119 | /* if it weren't for a common silicon quirk (writing the dummy into the qh |
@@ -129,7 +131,7 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
129 | qtd = list_entry (qh->qtd_list.next, | 131 | qtd = list_entry (qh->qtd_list.next, |
130 | struct ehci_qtd, qtd_list); | 132 | struct ehci_qtd, qtd_list); |
131 | /* first qtd may already be partially processed */ | 133 | /* first qtd may already be partially processed */ |
132 | if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw_current) | 134 | if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) |
133 | qtd = NULL; | 135 | qtd = NULL; |
134 | } | 136 | } |
135 | 137 | ||
@@ -260,7 +262,7 @@ __acquires(ehci->lock) | |||
260 | struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; | 262 | struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; |
261 | 263 | ||
262 | /* S-mask in a QH means it's an interrupt urb */ | 264 | /* S-mask in a QH means it's an interrupt urb */ |
263 | if ((qh->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) { | 265 | if ((qh->hw->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) { |
264 | 266 | ||
265 | /* ... update hc-wide periodic stats (for usbfs) */ | 267 | /* ... update hc-wide periodic stats (for usbfs) */ |
266 | ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; | 268 | ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; |
@@ -315,6 +317,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
315 | unsigned count = 0; | 317 | unsigned count = 0; |
316 | u8 state; | 318 | u8 state; |
317 | __le32 halt = HALT_BIT(ehci); | 319 | __le32 halt = HALT_BIT(ehci); |
320 | struct ehci_qh_hw *hw = qh->hw; | ||
318 | 321 | ||
319 | if (unlikely (list_empty (&qh->qtd_list))) | 322 | if (unlikely (list_empty (&qh->qtd_list))) |
320 | return count; | 323 | return count; |
@@ -392,7 +395,8 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
392 | qtd->hw_token = cpu_to_hc32(ehci, | 395 | qtd->hw_token = cpu_to_hc32(ehci, |
393 | token); | 396 | token); |
394 | wmb(); | 397 | wmb(); |
395 | qh->hw_token = cpu_to_hc32(ehci, token); | 398 | hw->hw_token = cpu_to_hc32(ehci, |
399 | token); | ||
396 | goto retry_xacterr; | 400 | goto retry_xacterr; |
397 | } | 401 | } |
398 | stopped = 1; | 402 | stopped = 1; |
@@ -435,8 +439,8 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
435 | /* qh unlinked; token in overlay may be most current */ | 439 | /* qh unlinked; token in overlay may be most current */ |
436 | if (state == QH_STATE_IDLE | 440 | if (state == QH_STATE_IDLE |
437 | && cpu_to_hc32(ehci, qtd->qtd_dma) | 441 | && cpu_to_hc32(ehci, qtd->qtd_dma) |
438 | == qh->hw_current) { | 442 | == hw->hw_current) { |
439 | token = hc32_to_cpu(ehci, qh->hw_token); | 443 | token = hc32_to_cpu(ehci, hw->hw_token); |
440 | 444 | ||
441 | /* An unlink may leave an incomplete | 445 | /* An unlink may leave an incomplete |
442 | * async transaction in the TT buffer. | 446 | * async transaction in the TT buffer. |
@@ -449,9 +453,9 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
449 | * patch the qh later and so that completions can't | 453 | * patch the qh later and so that completions can't |
450 | * activate it while we "know" it's stopped. | 454 | * activate it while we "know" it's stopped. |
451 | */ | 455 | */ |
452 | if ((halt & qh->hw_token) == 0) { | 456 | if ((halt & hw->hw_token) == 0) { |
453 | halt: | 457 | halt: |
454 | qh->hw_token |= halt; | 458 | hw->hw_token |= halt; |
455 | wmb (); | 459 | wmb (); |
456 | } | 460 | } |
457 | } | 461 | } |
@@ -510,7 +514,7 @@ halt: | |||
510 | * it after fault cleanup, or recovering from silicon wrongly | 514 | * it after fault cleanup, or recovering from silicon wrongly |
511 | * overlaying the dummy qtd (which reduces DMA chatter). | 515 | * overlaying the dummy qtd (which reduces DMA chatter). |
512 | */ | 516 | */ |
513 | if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END(ehci)) { | 517 | if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) { |
514 | switch (state) { | 518 | switch (state) { |
515 | case QH_STATE_IDLE: | 519 | case QH_STATE_IDLE: |
516 | qh_refresh(ehci, qh); | 520 | qh_refresh(ehci, qh); |
@@ -528,7 +532,7 @@ halt: | |||
528 | * except maybe high bandwidth ... | 532 | * except maybe high bandwidth ... |
529 | */ | 533 | */ |
530 | if ((cpu_to_hc32(ehci, QH_SMASK) | 534 | if ((cpu_to_hc32(ehci, QH_SMASK) |
531 | & qh->hw_info2) != 0) { | 535 | & hw->hw_info2) != 0) { |
532 | intr_deschedule (ehci, qh); | 536 | intr_deschedule (ehci, qh); |
533 | (void) qh_schedule (ehci, qh); | 537 | (void) qh_schedule (ehci, qh); |
534 | } else | 538 | } else |
@@ -649,7 +653,7 @@ qh_urb_transaction ( | |||
649 | * (this will usually be overridden later.) | 653 | * (this will usually be overridden later.) |
650 | */ | 654 | */ |
651 | if (is_input) | 655 | if (is_input) |
652 | qtd->hw_alt_next = ehci->async->hw_alt_next; | 656 | qtd->hw_alt_next = ehci->async->hw->hw_alt_next; |
653 | 657 | ||
654 | /* qh makes control packets use qtd toggle; maybe switch it */ | 658 | /* qh makes control packets use qtd toggle; maybe switch it */ |
655 | if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) | 659 | if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) |
@@ -744,6 +748,7 @@ qh_make ( | |||
744 | int is_input, type; | 748 | int is_input, type; |
745 | int maxp = 0; | 749 | int maxp = 0; |
746 | struct usb_tt *tt = urb->dev->tt; | 750 | struct usb_tt *tt = urb->dev->tt; |
751 | struct ehci_qh_hw *hw; | ||
747 | 752 | ||
748 | if (!qh) | 753 | if (!qh) |
749 | return qh; | 754 | return qh; |
@@ -890,8 +895,9 @@ done: | |||
890 | 895 | ||
891 | /* init as live, toggle clear, advance to dummy */ | 896 | /* init as live, toggle clear, advance to dummy */ |
892 | qh->qh_state = QH_STATE_IDLE; | 897 | qh->qh_state = QH_STATE_IDLE; |
893 | qh->hw_info1 = cpu_to_hc32(ehci, info1); | 898 | hw = qh->hw; |
894 | qh->hw_info2 = cpu_to_hc32(ehci, info2); | 899 | hw->hw_info1 = cpu_to_hc32(ehci, info1); |
900 | hw->hw_info2 = cpu_to_hc32(ehci, info2); | ||
895 | usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); | 901 | usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); |
896 | qh_refresh (ehci, qh); | 902 | qh_refresh (ehci, qh); |
897 | return qh; | 903 | return qh; |
@@ -933,11 +939,11 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
933 | 939 | ||
934 | /* splice right after start */ | 940 | /* splice right after start */ |
935 | qh->qh_next = head->qh_next; | 941 | qh->qh_next = head->qh_next; |
936 | qh->hw_next = head->hw_next; | 942 | qh->hw->hw_next = head->hw->hw_next; |
937 | wmb (); | 943 | wmb (); |
938 | 944 | ||
939 | head->qh_next.qh = qh; | 945 | head->qh_next.qh = qh; |
940 | head->hw_next = dma; | 946 | head->hw->hw_next = dma; |
941 | 947 | ||
942 | qh_get(qh); | 948 | qh_get(qh); |
943 | qh->xacterrs = 0; | 949 | qh->xacterrs = 0; |
@@ -984,7 +990,7 @@ static struct ehci_qh *qh_append_tds ( | |||
984 | 990 | ||
985 | /* usb_reset_device() briefly reverts to address 0 */ | 991 | /* usb_reset_device() briefly reverts to address 0 */ |
986 | if (usb_pipedevice (urb->pipe) == 0) | 992 | if (usb_pipedevice (urb->pipe) == 0) |
987 | qh->hw_info1 &= ~qh_addr_mask; | 993 | qh->hw->hw_info1 &= ~qh_addr_mask; |
988 | } | 994 | } |
989 | 995 | ||
990 | /* just one way to queue requests: swap with the dummy qtd. | 996 | /* just one way to queue requests: swap with the dummy qtd. |
@@ -1169,7 +1175,7 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
1169 | while (prev->qh_next.qh != qh) | 1175 | while (prev->qh_next.qh != qh) |
1170 | prev = prev->qh_next.qh; | 1176 | prev = prev->qh_next.qh; |
1171 | 1177 | ||
1172 | prev->hw_next = qh->hw_next; | 1178 | prev->hw->hw_next = qh->hw->hw_next; |
1173 | prev->qh_next = qh->qh_next; | 1179 | prev->qh_next = qh->qh_next; |
1174 | wmb (); | 1180 | wmb (); |
1175 | 1181 | ||