diff options
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 62 |
1 files changed, 46 insertions, 16 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 9012098add6b..94e6934edb09 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -182,8 +182,12 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
182 | * set, but other sections talk about dealing with the chain bit set. This was | 182 | * set, but other sections talk about dealing with the chain bit set. This was |
183 | * fixed in the 0.96 specification errata, but we have to assume that all 0.95 | 183 | * fixed in the 0.96 specification errata, but we have to assume that all 0.95 |
184 | * xHCI hardware can't handle the chain bit being cleared on a link TRB. | 184 | * xHCI hardware can't handle the chain bit being cleared on a link TRB. |
185 | * | ||
186 | * @more_trbs_coming: Will you enqueue more TRBs before calling | ||
187 | * prepare_transfer()? | ||
185 | */ | 188 | */ |
186 | static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) | 189 | static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, |
190 | bool consumer, bool more_trbs_coming) | ||
187 | { | 191 | { |
188 | u32 chain; | 192 | u32 chain; |
189 | union xhci_trb *next; | 193 | union xhci_trb *next; |
@@ -199,15 +203,28 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
199 | while (last_trb(xhci, ring, ring->enq_seg, next)) { | 203 | while (last_trb(xhci, ring, ring->enq_seg, next)) { |
200 | if (!consumer) { | 204 | if (!consumer) { |
201 | if (ring != xhci->event_ring) { | 205 | if (ring != xhci->event_ring) { |
202 | if (chain) { | 206 | /* |
203 | next->link.control |= TRB_CHAIN; | 207 | * If the caller doesn't plan on enqueueing more |
204 | 208 | * TDs before ringing the doorbell, then we | |
205 | /* Give this link TRB to the hardware */ | 209 | * don't want to give the link TRB to the |
206 | wmb(); | 210 | * hardware just yet. We'll give the link TRB |
207 | next->link.control ^= TRB_CYCLE; | 211 | * back in prepare_ring() just before we enqueue |
208 | } else { | 212 | * the TD at the top of the ring. |
213 | */ | ||
214 | if (!chain && !more_trbs_coming) | ||
209 | break; | 215 | break; |
216 | |||
217 | /* If we're not dealing with 0.95 hardware, | ||
218 | * carry over the chain bit of the previous TRB | ||
219 | * (which may mean the chain bit is cleared). | ||
220 | */ | ||
221 | if (!xhci_link_trb_quirk(xhci)) { | ||
222 | next->link.control &= ~TRB_CHAIN; | ||
223 | next->link.control |= chain; | ||
210 | } | 224 | } |
225 | /* Give this link TRB to the hardware */ | ||
226 | wmb(); | ||
227 | next->link.control ^= TRB_CYCLE; | ||
211 | } | 228 | } |
212 | /* Toggle the cycle bit after the last ring segment. */ | 229 | /* Toggle the cycle bit after the last ring segment. */ |
213 | if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { | 230 | if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { |
@@ -1707,9 +1724,12 @@ void xhci_handle_event(struct xhci_hcd *xhci) | |||
1707 | /* | 1724 | /* |
1708 | * Generic function for queueing a TRB on a ring. | 1725 | * Generic function for queueing a TRB on a ring. |
1709 | * The caller must have checked to make sure there's room on the ring. | 1726 | * The caller must have checked to make sure there's room on the ring. |
1727 | * | ||
1728 | * @more_trbs_coming: Will you enqueue more TRBs before calling | ||
1729 | * prepare_transfer()? | ||
1710 | */ | 1730 | */ |
1711 | static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | 1731 | static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, |
1712 | bool consumer, | 1732 | bool consumer, bool more_trbs_coming, |
1713 | u32 field1, u32 field2, u32 field3, u32 field4) | 1733 | u32 field1, u32 field2, u32 field3, u32 field4) |
1714 | { | 1734 | { |
1715 | struct xhci_generic_trb *trb; | 1735 | struct xhci_generic_trb *trb; |
@@ -1719,7 +1739,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | |||
1719 | trb->field[1] = field2; | 1739 | trb->field[1] = field2; |
1720 | trb->field[2] = field3; | 1740 | trb->field[2] = field3; |
1721 | trb->field[3] = field4; | 1741 | trb->field[3] = field4; |
1722 | inc_enq(xhci, ring, consumer); | 1742 | inc_enq(xhci, ring, consumer, more_trbs_coming); |
1723 | } | 1743 | } |
1724 | 1744 | ||
1725 | /* | 1745 | /* |
@@ -1988,6 +2008,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1988 | int trb_buff_len, this_sg_len, running_total; | 2008 | int trb_buff_len, this_sg_len, running_total; |
1989 | bool first_trb; | 2009 | bool first_trb; |
1990 | u64 addr; | 2010 | u64 addr; |
2011 | bool more_trbs_coming; | ||
1991 | 2012 | ||
1992 | struct xhci_generic_trb *start_trb; | 2013 | struct xhci_generic_trb *start_trb; |
1993 | int start_cycle; | 2014 | int start_cycle; |
@@ -2073,7 +2094,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2073 | length_field = TRB_LEN(trb_buff_len) | | 2094 | length_field = TRB_LEN(trb_buff_len) | |
2074 | remainder | | 2095 | remainder | |
2075 | TRB_INTR_TARGET(0); | 2096 | TRB_INTR_TARGET(0); |
2076 | queue_trb(xhci, ep_ring, false, | 2097 | if (num_trbs > 1) |
2098 | more_trbs_coming = true; | ||
2099 | else | ||
2100 | more_trbs_coming = false; | ||
2101 | queue_trb(xhci, ep_ring, false, more_trbs_coming, | ||
2077 | lower_32_bits(addr), | 2102 | lower_32_bits(addr), |
2078 | upper_32_bits(addr), | 2103 | upper_32_bits(addr), |
2079 | length_field, | 2104 | length_field, |
@@ -2124,6 +2149,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2124 | int num_trbs; | 2149 | int num_trbs; |
2125 | struct xhci_generic_trb *start_trb; | 2150 | struct xhci_generic_trb *start_trb; |
2126 | bool first_trb; | 2151 | bool first_trb; |
2152 | bool more_trbs_coming; | ||
2127 | int start_cycle; | 2153 | int start_cycle; |
2128 | u32 field, length_field; | 2154 | u32 field, length_field; |
2129 | 2155 | ||
@@ -2212,7 +2238,11 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2212 | length_field = TRB_LEN(trb_buff_len) | | 2238 | length_field = TRB_LEN(trb_buff_len) | |
2213 | remainder | | 2239 | remainder | |
2214 | TRB_INTR_TARGET(0); | 2240 | TRB_INTR_TARGET(0); |
2215 | queue_trb(xhci, ep_ring, false, | 2241 | if (num_trbs > 1) |
2242 | more_trbs_coming = true; | ||
2243 | else | ||
2244 | more_trbs_coming = false; | ||
2245 | queue_trb(xhci, ep_ring, false, more_trbs_coming, | ||
2216 | lower_32_bits(addr), | 2246 | lower_32_bits(addr), |
2217 | upper_32_bits(addr), | 2247 | upper_32_bits(addr), |
2218 | length_field, | 2248 | length_field, |
@@ -2291,7 +2321,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2291 | /* Queue setup TRB - see section 6.4.1.2.1 */ | 2321 | /* Queue setup TRB - see section 6.4.1.2.1 */ |
2292 | /* FIXME better way to translate setup_packet into two u32 fields? */ | 2322 | /* FIXME better way to translate setup_packet into two u32 fields? */ |
2293 | setup = (struct usb_ctrlrequest *) urb->setup_packet; | 2323 | setup = (struct usb_ctrlrequest *) urb->setup_packet; |
2294 | queue_trb(xhci, ep_ring, false, | 2324 | queue_trb(xhci, ep_ring, false, true, |
2295 | /* FIXME endianness is probably going to bite my ass here. */ | 2325 | /* FIXME endianness is probably going to bite my ass here. */ |
2296 | setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, | 2326 | setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, |
2297 | setup->wIndex | setup->wLength << 16, | 2327 | setup->wIndex | setup->wLength << 16, |
@@ -2307,7 +2337,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2307 | if (urb->transfer_buffer_length > 0) { | 2337 | if (urb->transfer_buffer_length > 0) { |
2308 | if (setup->bRequestType & USB_DIR_IN) | 2338 | if (setup->bRequestType & USB_DIR_IN) |
2309 | field |= TRB_DIR_IN; | 2339 | field |= TRB_DIR_IN; |
2310 | queue_trb(xhci, ep_ring, false, | 2340 | queue_trb(xhci, ep_ring, false, true, |
2311 | lower_32_bits(urb->transfer_dma), | 2341 | lower_32_bits(urb->transfer_dma), |
2312 | upper_32_bits(urb->transfer_dma), | 2342 | upper_32_bits(urb->transfer_dma), |
2313 | length_field, | 2343 | length_field, |
@@ -2324,7 +2354,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2324 | field = 0; | 2354 | field = 0; |
2325 | else | 2355 | else |
2326 | field = TRB_DIR_IN; | 2356 | field = TRB_DIR_IN; |
2327 | queue_trb(xhci, ep_ring, false, | 2357 | queue_trb(xhci, ep_ring, false, false, |
2328 | 0, | 2358 | 0, |
2329 | 0, | 2359 | 0, |
2330 | TRB_INTR_TARGET(0), | 2360 | TRB_INTR_TARGET(0), |
@@ -2361,7 +2391,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, | |||
2361 | "unfailable commands failed.\n"); | 2391 | "unfailable commands failed.\n"); |
2362 | return -ENOMEM; | 2392 | return -ENOMEM; |
2363 | } | 2393 | } |
2364 | queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, | 2394 | queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3, |
2365 | field4 | xhci->cmd_ring->cycle_state); | 2395 | field4 | xhci->cmd_ring->cycle_state); |
2366 | return 0; | 2396 | return 0; |
2367 | } | 2397 | } |