aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c93
1 files changed, 76 insertions, 17 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 36c858e5b529..94e6934edb09 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -182,8 +182,12 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
182 * set, but other sections talk about dealing with the chain bit set. This was 182 * set, but other sections talk about dealing with the chain bit set. This was
183 * fixed in the 0.96 specification errata, but we have to assume that all 0.95 183 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
184 * xHCI hardware can't handle the chain bit being cleared on a link TRB. 184 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
185 *
186 * @more_trbs_coming: Will you enqueue more TRBs before calling
187 * prepare_transfer()?
185 */ 188 */
186static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) 189static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
190 bool consumer, bool more_trbs_coming)
187{ 191{
188 u32 chain; 192 u32 chain;
189 union xhci_trb *next; 193 union xhci_trb *next;
@@ -199,15 +203,28 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
199 while (last_trb(xhci, ring, ring->enq_seg, next)) { 203 while (last_trb(xhci, ring, ring->enq_seg, next)) {
200 if (!consumer) { 204 if (!consumer) {
201 if (ring != xhci->event_ring) { 205 if (ring != xhci->event_ring) {
202 if (chain) { 206 /*
203 next->link.control |= TRB_CHAIN; 207 * If the caller doesn't plan on enqueueing more
204 208 * TDs before ringing the doorbell, then we
205 /* Give this link TRB to the hardware */ 209 * don't want to give the link TRB to the
206 wmb(); 210 * hardware just yet. We'll give the link TRB
207 next->link.control ^= TRB_CYCLE; 211 * back in prepare_ring() just before we enqueue
208 } else { 212 * the TD at the top of the ring.
213 */
214 if (!chain && !more_trbs_coming)
209 break; 215 break;
216
217 /* If we're not dealing with 0.95 hardware,
218 * carry over the chain bit of the previous TRB
219 * (which may mean the chain bit is cleared).
220 */
221 if (!xhci_link_trb_quirk(xhci)) {
222 next->link.control &= ~TRB_CHAIN;
223 next->link.control |= chain;
210 } 224 }
225 /* Give this link TRB to the hardware */
226 wmb();
227 next->link.control ^= TRB_CYCLE;
211 } 228 }
212 /* Toggle the cycle bit after the last ring segment. */ 229 /* Toggle the cycle bit after the last ring segment. */
213 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 230 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
@@ -1071,6 +1088,15 @@ bandwidth_change:
1071 xhci_warn(xhci, "Reset device command completion " 1088 xhci_warn(xhci, "Reset device command completion "
1072 "for disabled slot %u\n", slot_id); 1089 "for disabled slot %u\n", slot_id);
1073 break; 1090 break;
1091 case TRB_TYPE(TRB_NEC_GET_FW):
1092 if (!(xhci->quirks & XHCI_NEC_HOST)) {
1093 xhci->error_bitmask |= 1 << 6;
1094 break;
1095 }
1096 xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
1097 NEC_FW_MAJOR(event->status),
1098 NEC_FW_MINOR(event->status));
1099 break;
1074 default: 1100 default:
1075 /* Skip over unknown commands on the event ring */ 1101 /* Skip over unknown commands on the event ring */
1076 xhci->error_bitmask |= 1 << 6; 1102 xhci->error_bitmask |= 1 << 6;
@@ -1079,6 +1105,17 @@ bandwidth_change:
1079 inc_deq(xhci, xhci->cmd_ring, false); 1105 inc_deq(xhci, xhci->cmd_ring, false);
1080} 1106}
1081 1107
1108static void handle_vendor_event(struct xhci_hcd *xhci,
1109 union xhci_trb *event)
1110{
1111 u32 trb_type;
1112
1113 trb_type = TRB_FIELD_TO_TYPE(event->generic.field[3]);
1114 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1115 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1116 handle_cmd_completion(xhci, &event->event_cmd);
1117}
1118
1082static void handle_port_status(struct xhci_hcd *xhci, 1119static void handle_port_status(struct xhci_hcd *xhci,
1083 union xhci_trb *event) 1120 union xhci_trb *event)
1084{ 1121{
@@ -1659,7 +1696,10 @@ void xhci_handle_event(struct xhci_hcd *xhci)
1659 update_ptrs = 0; 1696 update_ptrs = 0;
1660 break; 1697 break;
1661 default: 1698 default:
1662 xhci->error_bitmask |= 1 << 3; 1699 if ((event->event_cmd.flags & TRB_TYPE_BITMASK) >= TRB_TYPE(48))
1700 handle_vendor_event(xhci, event);
1701 else
1702 xhci->error_bitmask |= 1 << 3;
1663 } 1703 }
1664 /* Any of the above functions may drop and re-acquire the lock, so check 1704 /* Any of the above functions may drop and re-acquire the lock, so check
1665 * to make sure a watchdog timer didn't mark the host as non-responsive. 1705 * to make sure a watchdog timer didn't mark the host as non-responsive.
@@ -1684,9 +1724,12 @@ void xhci_handle_event(struct xhci_hcd *xhci)
1684/* 1724/*
1685 * Generic function for queueing a TRB on a ring. 1725 * Generic function for queueing a TRB on a ring.
1686 * The caller must have checked to make sure there's room on the ring. 1726 * The caller must have checked to make sure there's room on the ring.
1727 *
1728 * @more_trbs_coming: Will you enqueue more TRBs before calling
1729 * prepare_transfer()?
1687 */ 1730 */
1688static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 1731static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
1689 bool consumer, 1732 bool consumer, bool more_trbs_coming,
1690 u32 field1, u32 field2, u32 field3, u32 field4) 1733 u32 field1, u32 field2, u32 field3, u32 field4)
1691{ 1734{
1692 struct xhci_generic_trb *trb; 1735 struct xhci_generic_trb *trb;
@@ -1696,7 +1739,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
1696 trb->field[1] = field2; 1739 trb->field[1] = field2;
1697 trb->field[2] = field3; 1740 trb->field[2] = field3;
1698 trb->field[3] = field4; 1741 trb->field[3] = field4;
1699 inc_enq(xhci, ring, consumer); 1742 inc_enq(xhci, ring, consumer, more_trbs_coming);
1700} 1743}
1701 1744
1702/* 1745/*
@@ -1965,6 +2008,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1965 int trb_buff_len, this_sg_len, running_total; 2008 int trb_buff_len, this_sg_len, running_total;
1966 bool first_trb; 2009 bool first_trb;
1967 u64 addr; 2010 u64 addr;
2011 bool more_trbs_coming;
1968 2012
1969 struct xhci_generic_trb *start_trb; 2013 struct xhci_generic_trb *start_trb;
1970 int start_cycle; 2014 int start_cycle;
@@ -2050,7 +2094,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2050 length_field = TRB_LEN(trb_buff_len) | 2094 length_field = TRB_LEN(trb_buff_len) |
2051 remainder | 2095 remainder |
2052 TRB_INTR_TARGET(0); 2096 TRB_INTR_TARGET(0);
2053 queue_trb(xhci, ep_ring, false, 2097 if (num_trbs > 1)
2098 more_trbs_coming = true;
2099 else
2100 more_trbs_coming = false;
2101 queue_trb(xhci, ep_ring, false, more_trbs_coming,
2054 lower_32_bits(addr), 2102 lower_32_bits(addr),
2055 upper_32_bits(addr), 2103 upper_32_bits(addr),
2056 length_field, 2104 length_field,
@@ -2101,6 +2149,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2101 int num_trbs; 2149 int num_trbs;
2102 struct xhci_generic_trb *start_trb; 2150 struct xhci_generic_trb *start_trb;
2103 bool first_trb; 2151 bool first_trb;
2152 bool more_trbs_coming;
2104 int start_cycle; 2153 int start_cycle;
2105 u32 field, length_field; 2154 u32 field, length_field;
2106 2155
@@ -2189,7 +2238,11 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2189 length_field = TRB_LEN(trb_buff_len) | 2238 length_field = TRB_LEN(trb_buff_len) |
2190 remainder | 2239 remainder |
2191 TRB_INTR_TARGET(0); 2240 TRB_INTR_TARGET(0);
2192 queue_trb(xhci, ep_ring, false, 2241 if (num_trbs > 1)
2242 more_trbs_coming = true;
2243 else
2244 more_trbs_coming = false;
2245 queue_trb(xhci, ep_ring, false, more_trbs_coming,
2193 lower_32_bits(addr), 2246 lower_32_bits(addr),
2194 upper_32_bits(addr), 2247 upper_32_bits(addr),
2195 length_field, 2248 length_field,
@@ -2268,7 +2321,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2268 /* Queue setup TRB - see section 6.4.1.2.1 */ 2321 /* Queue setup TRB - see section 6.4.1.2.1 */
2269 /* FIXME better way to translate setup_packet into two u32 fields? */ 2322 /* FIXME better way to translate setup_packet into two u32 fields? */
2270 setup = (struct usb_ctrlrequest *) urb->setup_packet; 2323 setup = (struct usb_ctrlrequest *) urb->setup_packet;
2271 queue_trb(xhci, ep_ring, false, 2324 queue_trb(xhci, ep_ring, false, true,
2272 /* FIXME endianness is probably going to bite my ass here. */ 2325 /* FIXME endianness is probably going to bite my ass here. */
2273 setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, 2326 setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
2274 setup->wIndex | setup->wLength << 16, 2327 setup->wIndex | setup->wLength << 16,
@@ -2284,7 +2337,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2284 if (urb->transfer_buffer_length > 0) { 2337 if (urb->transfer_buffer_length > 0) {
2285 if (setup->bRequestType & USB_DIR_IN) 2338 if (setup->bRequestType & USB_DIR_IN)
2286 field |= TRB_DIR_IN; 2339 field |= TRB_DIR_IN;
2287 queue_trb(xhci, ep_ring, false, 2340 queue_trb(xhci, ep_ring, false, true,
2288 lower_32_bits(urb->transfer_dma), 2341 lower_32_bits(urb->transfer_dma),
2289 upper_32_bits(urb->transfer_dma), 2342 upper_32_bits(urb->transfer_dma),
2290 length_field, 2343 length_field,
@@ -2301,7 +2354,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2301 field = 0; 2354 field = 0;
2302 else 2355 else
2303 field = TRB_DIR_IN; 2356 field = TRB_DIR_IN;
2304 queue_trb(xhci, ep_ring, false, 2357 queue_trb(xhci, ep_ring, false, false,
2305 0, 2358 0,
2306 0, 2359 0,
2307 TRB_INTR_TARGET(0), 2360 TRB_INTR_TARGET(0),
@@ -2338,7 +2391,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
2338 "unfailable commands failed.\n"); 2391 "unfailable commands failed.\n");
2339 return -ENOMEM; 2392 return -ENOMEM;
2340 } 2393 }
2341 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, 2394 queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
2342 field4 | xhci->cmd_ring->cycle_state); 2395 field4 | xhci->cmd_ring->cycle_state);
2343 return 0; 2396 return 0;
2344} 2397}
@@ -2378,6 +2431,12 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
2378 false); 2431 false);
2379} 2432}
2380 2433
2434int xhci_queue_vendor_command(struct xhci_hcd *xhci,
2435 u32 field1, u32 field2, u32 field3, u32 field4)
2436{
2437 return queue_command(xhci, field1, field2, field3, field4, false);
2438}
2439
2381/* Queue a reset device command TRB */ 2440/* Queue a reset device command TRB */
2382int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id) 2441int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
2383{ 2442{