aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c587
1 files changed, 222 insertions, 365 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 7a0e3c720c00..d67ff71209f5 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -69,10 +69,6 @@
69#include "xhci.h" 69#include "xhci.h"
70#include "xhci-trace.h" 70#include "xhci-trace.h"
71 71
72static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
73 struct xhci_virt_device *virt_dev,
74 struct xhci_event_cmd *event);
75
76/* 72/*
77 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA 73 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
78 * address of the TRB. 74 * address of the TRB.
@@ -123,16 +119,6 @@ static int enqueue_is_link_trb(struct xhci_ring *ring)
123 return TRB_TYPE_LINK_LE32(link->control); 119 return TRB_TYPE_LINK_LE32(link->control);
124} 120}
125 121
126union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
127{
128 /* Enqueue pointer can be left pointing to the link TRB,
129 * we must handle that
130 */
131 if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
132 return ring->enq_seg->next->trbs;
133 return ring->enqueue;
134}
135
136/* Updates trb to point to the next TRB in the ring, and updates seg if the next 122/* Updates trb to point to the next TRB in the ring, and updates seg if the next
137 * TRB is in a new segment. This does not skip over link TRBs, and it does not 123 * TRB is in a new segment. This does not skip over link TRBs, and it does not
138 * effect the ring dequeue or enqueue pointers. 124 * effect the ring dequeue or enqueue pointers.
@@ -301,17 +287,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
301 287
302 xhci_dbg(xhci, "Abort command ring\n"); 288 xhci_dbg(xhci, "Abort command ring\n");
303 289
304 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
305 xhci_dbg(xhci, "The command ring isn't running, "
306 "Have the command ring been stopped?\n");
307 return 0;
308 }
309
310 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 290 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
311 if (!(temp_64 & CMD_RING_RUNNING)) {
312 xhci_dbg(xhci, "Command ring had been stopped\n");
313 return 0;
314 }
315 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; 291 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
316 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, 292 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
317 &xhci->op_regs->cmd_ring); 293 &xhci->op_regs->cmd_ring);
@@ -337,71 +313,6 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
337 return 0; 313 return 0;
338} 314}
339 315
340static int xhci_queue_cd(struct xhci_hcd *xhci,
341 struct xhci_command *command,
342 union xhci_trb *cmd_trb)
343{
344 struct xhci_cd *cd;
345 cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC);
346 if (!cd)
347 return -ENOMEM;
348 INIT_LIST_HEAD(&cd->cancel_cmd_list);
349
350 cd->command = command;
351 cd->cmd_trb = cmd_trb;
352 list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list);
353
354 return 0;
355}
356
357/*
358 * Cancel the command which has issue.
359 *
360 * Some commands may hang due to waiting for acknowledgement from
361 * usb device. It is outside of the xHC's ability to control and
362 * will cause the command ring is blocked. When it occurs software
363 * should intervene to recover the command ring.
364 * See Section 4.6.1.1 and 4.6.1.2
365 */
366int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
367 union xhci_trb *cmd_trb)
368{
369 int retval = 0;
370 unsigned long flags;
371
372 spin_lock_irqsave(&xhci->lock, flags);
373
374 if (xhci->xhc_state & XHCI_STATE_DYING) {
375 xhci_warn(xhci, "Abort the command ring,"
376 " but the xHCI is dead.\n");
377 retval = -ESHUTDOWN;
378 goto fail;
379 }
380
381 /* queue the cmd desriptor to cancel_cmd_list */
382 retval = xhci_queue_cd(xhci, command, cmd_trb);
383 if (retval) {
384 xhci_warn(xhci, "Queuing command descriptor failed.\n");
385 goto fail;
386 }
387
388 /* abort command ring */
389 retval = xhci_abort_cmd_ring(xhci);
390 if (retval) {
391 xhci_err(xhci, "Abort command ring failed\n");
392 if (unlikely(retval == -ESHUTDOWN)) {
393 spin_unlock_irqrestore(&xhci->lock, flags);
394 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
395 xhci_dbg(xhci, "xHCI host controller is dead.\n");
396 return retval;
397 }
398 }
399
400fail:
401 spin_unlock_irqrestore(&xhci->lock, flags);
402 return retval;
403}
404
405void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, 316void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
406 unsigned int slot_id, 317 unsigned int slot_id,
407 unsigned int ep_index, 318 unsigned int ep_index,
@@ -684,12 +595,14 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
684 } 595 }
685} 596}
686 597
687static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 598static int queue_set_tr_deq(struct xhci_hcd *xhci,
599 struct xhci_command *cmd, int slot_id,
688 unsigned int ep_index, unsigned int stream_id, 600 unsigned int ep_index, unsigned int stream_id,
689 struct xhci_segment *deq_seg, 601 struct xhci_segment *deq_seg,
690 union xhci_trb *deq_ptr, u32 cycle_state); 602 union xhci_trb *deq_ptr, u32 cycle_state);
691 603
692void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 604void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
605 struct xhci_command *cmd,
693 unsigned int slot_id, unsigned int ep_index, 606 unsigned int slot_id, unsigned int ep_index,
694 unsigned int stream_id, 607 unsigned int stream_id,
695 struct xhci_dequeue_state *deq_state) 608 struct xhci_dequeue_state *deq_state)
@@ -704,7 +617,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
704 deq_state->new_deq_ptr, 617 deq_state->new_deq_ptr,
705 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), 618 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
706 deq_state->new_cycle_state); 619 deq_state->new_cycle_state);
707 queue_set_tr_deq(xhci, slot_id, ep_index, stream_id, 620 queue_set_tr_deq(xhci, cmd, slot_id, ep_index, stream_id,
708 deq_state->new_deq_seg, 621 deq_state->new_deq_seg,
709 deq_state->new_deq_ptr, 622 deq_state->new_deq_ptr,
710 (u32) deq_state->new_cycle_state); 623 (u32) deq_state->new_cycle_state);
@@ -773,7 +686,6 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
773 union xhci_trb *trb, struct xhci_event_cmd *event) 686 union xhci_trb *trb, struct xhci_event_cmd *event)
774{ 687{
775 unsigned int ep_index; 688 unsigned int ep_index;
776 struct xhci_virt_device *virt_dev;
777 struct xhci_ring *ep_ring; 689 struct xhci_ring *ep_ring;
778 struct xhci_virt_ep *ep; 690 struct xhci_virt_ep *ep;
779 struct list_head *entry; 691 struct list_head *entry;
@@ -783,11 +695,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
783 struct xhci_dequeue_state deq_state; 695 struct xhci_dequeue_state deq_state;
784 696
785 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { 697 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
786 virt_dev = xhci->devs[slot_id]; 698 if (!xhci->devs[slot_id])
787 if (virt_dev)
788 handle_cmd_in_cmd_wait_list(xhci, virt_dev,
789 event);
790 else
791 xhci_warn(xhci, "Stop endpoint command " 699 xhci_warn(xhci, "Stop endpoint command "
792 "completion for disabled slot %u\n", 700 "completion for disabled slot %u\n",
793 slot_id); 701 slot_id);
@@ -858,7 +766,9 @@ remove_finished_td:
858 766
859 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 767 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
860 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 768 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
861 xhci_queue_new_dequeue_state(xhci, 769 struct xhci_command *command;
770 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
771 xhci_queue_new_dequeue_state(xhci, command,
862 slot_id, ep_index, 772 slot_id, ep_index,
863 ep->stopped_td->urb->stream_id, 773 ep->stopped_td->urb->stream_id,
864 &deq_state); 774 &deq_state);
@@ -1206,9 +1116,11 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1206 * because the HW can't handle two commands being queued in a row. 1116 * because the HW can't handle two commands being queued in a row.
1207 */ 1117 */
1208 if (xhci->quirks & XHCI_RESET_EP_QUIRK) { 1118 if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1119 struct xhci_command *command;
1120 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1209 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1121 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1210 "Queueing configure endpoint command"); 1122 "Queueing configure endpoint command");
1211 xhci_queue_configure_endpoint(xhci, 1123 xhci_queue_configure_endpoint(xhci, command,
1212 xhci->devs[slot_id]->in_ctx->dma, slot_id, 1124 xhci->devs[slot_id]->in_ctx->dma, slot_id,
1213 false); 1125 false);
1214 xhci_ring_cmd_db(xhci); 1126 xhci_ring_cmd_db(xhci);
@@ -1219,187 +1131,6 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1219 } 1131 }
1220} 1132}
1221 1133
1222/* Complete the command and detele it from the devcie's command queue.
1223 */
1224static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1225 struct xhci_command *command, u32 status)
1226{
1227 command->status = status;
1228 list_del(&command->cmd_list);
1229 if (command->completion)
1230 complete(command->completion);
1231 else
1232 xhci_free_command(xhci, command);
1233}
1234
1235
1236/* Check to see if a command in the device's command queue matches this one.
1237 * Signal the completion or free the command, and return 1. Return 0 if the
1238 * completed command isn't at the head of the command list.
1239 */
1240static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1241 struct xhci_virt_device *virt_dev,
1242 struct xhci_event_cmd *event)
1243{
1244 struct xhci_command *command;
1245
1246 if (list_empty(&virt_dev->cmd_list))
1247 return 0;
1248
1249 command = list_entry(virt_dev->cmd_list.next,
1250 struct xhci_command, cmd_list);
1251 if (xhci->cmd_ring->dequeue != command->command_trb)
1252 return 0;
1253
1254 xhci_complete_cmd_in_cmd_wait_list(xhci, command,
1255 GET_COMP_CODE(le32_to_cpu(event->status)));
1256 return 1;
1257}
1258
1259/*
1260 * Finding the command trb need to be cancelled and modifying it to
1261 * NO OP command. And if the command is in device's command wait
1262 * list, finishing and freeing it.
1263 *
1264 * If we can't find the command trb, we think it had already been
1265 * executed.
1266 */
1267static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
1268{
1269 struct xhci_segment *cur_seg;
1270 union xhci_trb *cmd_trb;
1271 u32 cycle_state;
1272
1273 if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
1274 return;
1275
1276 /* find the current segment of command ring */
1277 cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
1278 xhci->cmd_ring->dequeue, &cycle_state);
1279
1280 if (!cur_seg) {
1281 xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n",
1282 xhci->cmd_ring->dequeue,
1283 (unsigned long long)
1284 xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1285 xhci->cmd_ring->dequeue));
1286 xhci_debug_ring(xhci, xhci->cmd_ring);
1287 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
1288 return;
1289 }
1290
1291 /* find the command trb matched by cd from command ring */
1292 for (cmd_trb = xhci->cmd_ring->dequeue;
1293 cmd_trb != xhci->cmd_ring->enqueue;
1294 next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
1295 /* If the trb is link trb, continue */
1296 if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
1297 continue;
1298
1299 if (cur_cd->cmd_trb == cmd_trb) {
1300
1301 /* If the command in device's command list, we should
1302 * finish it and free the command structure.
1303 */
1304 if (cur_cd->command)
1305 xhci_complete_cmd_in_cmd_wait_list(xhci,
1306 cur_cd->command, COMP_CMD_STOP);
1307
1308 /* get cycle state from the origin command trb */
1309 cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
1310 & TRB_CYCLE;
1311
1312 /* modify the command trb to NO OP command */
1313 cmd_trb->generic.field[0] = 0;
1314 cmd_trb->generic.field[1] = 0;
1315 cmd_trb->generic.field[2] = 0;
1316 cmd_trb->generic.field[3] = cpu_to_le32(
1317 TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
1318 break;
1319 }
1320 }
1321}
1322
1323static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci)
1324{
1325 struct xhci_cd *cur_cd, *next_cd;
1326
1327 if (list_empty(&xhci->cancel_cmd_list))
1328 return;
1329
1330 list_for_each_entry_safe(cur_cd, next_cd,
1331 &xhci->cancel_cmd_list, cancel_cmd_list) {
1332 xhci_cmd_to_noop(xhci, cur_cd);
1333 list_del(&cur_cd->cancel_cmd_list);
1334 kfree(cur_cd);
1335 }
1336}
1337
1338/*
1339 * traversing the cancel_cmd_list. If the command descriptor according
1340 * to cmd_trb is found, the function free it and return 1, otherwise
1341 * return 0.
1342 */
1343static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci,
1344 union xhci_trb *cmd_trb)
1345{
1346 struct xhci_cd *cur_cd, *next_cd;
1347
1348 if (list_empty(&xhci->cancel_cmd_list))
1349 return 0;
1350
1351 list_for_each_entry_safe(cur_cd, next_cd,
1352 &xhci->cancel_cmd_list, cancel_cmd_list) {
1353 if (cur_cd->cmd_trb == cmd_trb) {
1354 if (cur_cd->command)
1355 xhci_complete_cmd_in_cmd_wait_list(xhci,
1356 cur_cd->command, COMP_CMD_STOP);
1357 list_del(&cur_cd->cancel_cmd_list);
1358 kfree(cur_cd);
1359 return 1;
1360 }
1361 }
1362
1363 return 0;
1364}
1365
1366/*
1367 * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the
1368 * trb pointed by the command ring dequeue pointer is the trb we want to
1369 * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will
1370 * traverse the cancel_cmd_list to trun the all of the commands according
1371 * to command descriptor to NO-OP trb.
1372 */
1373static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1374 int cmd_trb_comp_code)
1375{
1376 int cur_trb_is_good = 0;
1377
1378 /* Searching the cmd trb pointed by the command ring dequeue
1379 * pointer in command descriptor list. If it is found, free it.
1380 */
1381 cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
1382 xhci->cmd_ring->dequeue);
1383
1384 if (cmd_trb_comp_code == COMP_CMD_ABORT)
1385 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1386 else if (cmd_trb_comp_code == COMP_CMD_STOP) {
1387 /* traversing the cancel_cmd_list and canceling
1388 * the command according to command descriptor
1389 */
1390 xhci_cancel_cmd_in_cd_list(xhci);
1391
1392 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
1393 /*
1394 * ring command ring doorbell again to restart the
1395 * command ring
1396 */
1397 if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue)
1398 xhci_ring_cmd_db(xhci);
1399 }
1400 return cur_trb_is_good;
1401}
1402
1403static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, 1134static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1404 u32 cmd_comp_code) 1135 u32 cmd_comp_code)
1405{ 1136{
@@ -1407,7 +1138,6 @@ static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1407 xhci->slot_id = slot_id; 1138 xhci->slot_id = slot_id;
1408 else 1139 else
1409 xhci->slot_id = 0; 1140 xhci->slot_id = 0;
1410 complete(&xhci->addr_dev);
1411} 1141}
1412 1142
1413static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) 1143static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
@@ -1432,9 +1162,6 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1432 unsigned int ep_state; 1162 unsigned int ep_state;
1433 u32 add_flags, drop_flags; 1163 u32 add_flags, drop_flags;
1434 1164
1435 virt_dev = xhci->devs[slot_id];
1436 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1437 return;
1438 /* 1165 /*
1439 * Configure endpoint commands can come from the USB core 1166 * Configure endpoint commands can come from the USB core
1440 * configuration or alt setting changes, or because the HW 1167 * configuration or alt setting changes, or because the HW
@@ -1443,6 +1170,7 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1443 * If the command was for a halted endpoint, the xHCI driver 1170 * If the command was for a halted endpoint, the xHCI driver
1444 * is not waiting on the configure endpoint command. 1171 * is not waiting on the configure endpoint command.
1445 */ 1172 */
1173 virt_dev = xhci->devs[slot_id];
1446 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 1174 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1447 if (!ctrl_ctx) { 1175 if (!ctrl_ctx) {
1448 xhci_warn(xhci, "Could not get input context, bad type.\n"); 1176 xhci_warn(xhci, "Could not get input context, bad type.\n");
@@ -1465,7 +1193,7 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1465 add_flags - SLOT_FLAG == drop_flags) { 1193 add_flags - SLOT_FLAG == drop_flags) {
1466 ep_state = virt_dev->eps[ep_index].ep_state; 1194 ep_state = virt_dev->eps[ep_index].ep_state;
1467 if (!(ep_state & EP_HALTED)) 1195 if (!(ep_state & EP_HALTED))
1468 goto bandwidth_change; 1196 return;
1469 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1197 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1470 "Completed config ep cmd - " 1198 "Completed config ep cmd - "
1471 "last ep index = %d, state = %d", 1199 "last ep index = %d, state = %d",
@@ -1475,43 +1203,14 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1475 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1203 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1476 return; 1204 return;
1477 } 1205 }
1478bandwidth_change:
1479 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1480 "Completed config ep cmd");
1481 virt_dev->cmd_status = cmd_comp_code;
1482 complete(&virt_dev->cmd_completion);
1483 return; 1206 return;
1484} 1207}
1485 1208
1486static void xhci_handle_cmd_eval_ctx(struct xhci_hcd *xhci, int slot_id,
1487 struct xhci_event_cmd *event, u32 cmd_comp_code)
1488{
1489 struct xhci_virt_device *virt_dev;
1490
1491 virt_dev = xhci->devs[slot_id];
1492 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1493 return;
1494 virt_dev->cmd_status = cmd_comp_code;
1495 complete(&virt_dev->cmd_completion);
1496}
1497
1498static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id,
1499 u32 cmd_comp_code)
1500{
1501 xhci->devs[slot_id]->cmd_status = cmd_comp_code;
1502 complete(&xhci->addr_dev);
1503}
1504
1505static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id, 1209static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
1506 struct xhci_event_cmd *event) 1210 struct xhci_event_cmd *event)
1507{ 1211{
1508 struct xhci_virt_device *virt_dev;
1509
1510 xhci_dbg(xhci, "Completed reset device command.\n"); 1212 xhci_dbg(xhci, "Completed reset device command.\n");
1511 virt_dev = xhci->devs[slot_id]; 1213 if (!xhci->devs[slot_id])
1512 if (virt_dev)
1513 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
1514 else
1515 xhci_warn(xhci, "Reset device command completion " 1214 xhci_warn(xhci, "Reset device command completion "
1516 "for disabled slot %u\n", slot_id); 1215 "for disabled slot %u\n", slot_id);
1517} 1216}
@@ -1529,6 +1228,116 @@ static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1529 NEC_FW_MINOR(le32_to_cpu(event->status))); 1228 NEC_FW_MINOR(le32_to_cpu(event->status)));
1530} 1229}
1531 1230
1231static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
1232{
1233 list_del(&cmd->cmd_list);
1234
1235 if (cmd->completion) {
1236 cmd->status = status;
1237 complete(cmd->completion);
1238 } else {
1239 kfree(cmd);
1240 }
1241}
1242
1243void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1244{
1245 struct xhci_command *cur_cmd, *tmp_cmd;
1246 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
1247 xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
1248}
1249
1250/*
1251 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
1252 * If there are other commands waiting then restart the ring and kick the timer.
1253 * This must be called with command ring stopped and xhci->lock held.
1254 */
1255static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1256 struct xhci_command *cur_cmd)
1257{
1258 struct xhci_command *i_cmd, *tmp_cmd;
1259 u32 cycle_state;
1260
1261 /* Turn all aborted commands in list to no-ops, then restart */
1262 list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
1263 cmd_list) {
1264
1265 if (i_cmd->status != COMP_CMD_ABORT)
1266 continue;
1267
1268 i_cmd->status = COMP_CMD_STOP;
1269
1270 xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
1271 i_cmd->command_trb);
1272 /* get cycle state from the original cmd trb */
1273 cycle_state = le32_to_cpu(
1274 i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
1275 /* modify the command trb to no-op command */
1276 i_cmd->command_trb->generic.field[0] = 0;
1277 i_cmd->command_trb->generic.field[1] = 0;
1278 i_cmd->command_trb->generic.field[2] = 0;
1279 i_cmd->command_trb->generic.field[3] = cpu_to_le32(
1280 TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
1281
1282 /*
1283 * caller waiting for completion is called when command
1284 * completion event is received for these no-op commands
1285 */
1286 }
1287
1288 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
1289
1290 /* ring command ring doorbell to restart the command ring */
1291 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
1292 !(xhci->xhc_state & XHCI_STATE_DYING)) {
1293 xhci->current_cmd = cur_cmd;
1294 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1295 xhci_ring_cmd_db(xhci);
1296 }
1297 return;
1298}
1299
1300
1301void xhci_handle_command_timeout(unsigned long data)
1302{
1303 struct xhci_hcd *xhci;
1304 int ret;
1305 unsigned long flags;
1306 u64 hw_ring_state;
1307 struct xhci_command *cur_cmd = NULL;
1308 xhci = (struct xhci_hcd *) data;
1309
1310 /* mark this command to be cancelled */
1311 spin_lock_irqsave(&xhci->lock, flags);
1312 if (xhci->current_cmd) {
1313 cur_cmd = xhci->current_cmd;
1314 cur_cmd->status = COMP_CMD_ABORT;
1315 }
1316
1317
1318 /* Make sure command ring is running before aborting it */
1319 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1320 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1321 (hw_ring_state & CMD_RING_RUNNING)) {
1322
1323 spin_unlock_irqrestore(&xhci->lock, flags);
1324 xhci_dbg(xhci, "Command timeout\n");
1325 ret = xhci_abort_cmd_ring(xhci);
1326 if (unlikely(ret == -ESHUTDOWN)) {
1327 xhci_err(xhci, "Abort command ring failed\n");
1328 xhci_cleanup_command_queue(xhci);
1329 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
1330 xhci_dbg(xhci, "xHCI host controller is dead.\n");
1331 }
1332 return;
1333 }
1334 /* command timeout on stopped ring, ring can't be aborted */
1335 xhci_dbg(xhci, "Command timeout on stopped ring\n");
1336 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1337 spin_unlock_irqrestore(&xhci->lock, flags);
1338 return;
1339}
1340
1532static void handle_cmd_completion(struct xhci_hcd *xhci, 1341static void handle_cmd_completion(struct xhci_hcd *xhci,
1533 struct xhci_event_cmd *event) 1342 struct xhci_event_cmd *event)
1534{ 1343{
@@ -1537,6 +1346,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1537 dma_addr_t cmd_dequeue_dma; 1346 dma_addr_t cmd_dequeue_dma;
1538 u32 cmd_comp_code; 1347 u32 cmd_comp_code;
1539 union xhci_trb *cmd_trb; 1348 union xhci_trb *cmd_trb;
1349 struct xhci_command *cmd;
1540 u32 cmd_type; 1350 u32 cmd_type;
1541 1351
1542 cmd_dma = le64_to_cpu(event->cmd_trb); 1352 cmd_dma = le64_to_cpu(event->cmd_trb);
@@ -1554,26 +1364,35 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1554 return; 1364 return;
1555 } 1365 }
1556 1366
1367 cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
1368
1369 if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1370 xhci_err(xhci,
1371 "Command completion event does not match command\n");
1372 return;
1373 }
1374
1375 del_timer(&xhci->cmd_timer);
1376
1557 trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); 1377 trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
1558 1378
1559 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); 1379 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1560 if (cmd_comp_code == COMP_CMD_ABORT || cmd_comp_code == COMP_CMD_STOP) { 1380
1561 /* If the return value is 0, we think the trb pointed by 1381 /* If CMD ring stopped we own the trbs between enqueue and dequeue */
1562 * command ring dequeue pointer is a good trb. The good 1382 if (cmd_comp_code == COMP_CMD_STOP) {
1563 * trb means we don't want to cancel the trb, but it have 1383 xhci_handle_stopped_cmd_ring(xhci, cmd);
1564 * been stopped by host. So we should handle it normally. 1384 return;
1565 * Otherwise, driver should invoke inc_deq() and return. 1385 }
1566 */ 1386 /*
1567 if (handle_stopped_cmd_ring(xhci, cmd_comp_code)) { 1387 * Host aborted the command ring, check if the current command was
1568 inc_deq(xhci, xhci->cmd_ring); 1388 * supposed to be aborted, otherwise continue normally.
1569 return; 1389 * The command ring is stopped now, but the xHC will issue a Command
1570 } 1390 * Ring Stopped event which will cause us to restart it.
1571 /* There is no command to handle if we get a stop event when the 1391 */
1572 * command ring is empty, event->cmd_trb points to the next 1392 if (cmd_comp_code == COMP_CMD_ABORT) {
1573 * unset command 1393 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1574 */ 1394 if (cmd->status == COMP_CMD_ABORT)
1575 if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue) 1395 goto event_handled;
1576 return;
1577 } 1396 }
1578 1397
1579 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); 1398 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
@@ -1585,13 +1404,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1585 xhci_handle_cmd_disable_slot(xhci, slot_id); 1404 xhci_handle_cmd_disable_slot(xhci, slot_id);
1586 break; 1405 break;
1587 case TRB_CONFIG_EP: 1406 case TRB_CONFIG_EP:
1588 xhci_handle_cmd_config_ep(xhci, slot_id, event, cmd_comp_code); 1407 if (!cmd->completion)
1408 xhci_handle_cmd_config_ep(xhci, slot_id, event,
1409 cmd_comp_code);
1589 break; 1410 break;
1590 case TRB_EVAL_CONTEXT: 1411 case TRB_EVAL_CONTEXT:
1591 xhci_handle_cmd_eval_ctx(xhci, slot_id, event, cmd_comp_code);
1592 break; 1412 break;
1593 case TRB_ADDR_DEV: 1413 case TRB_ADDR_DEV:
1594 xhci_handle_cmd_addr_dev(xhci, slot_id, cmd_comp_code);
1595 break; 1414 break;
1596 case TRB_STOP_RING: 1415 case TRB_STOP_RING:
1597 WARN_ON(slot_id != TRB_TO_SLOT_ID( 1416 WARN_ON(slot_id != TRB_TO_SLOT_ID(
@@ -1604,6 +1423,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1604 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); 1423 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1605 break; 1424 break;
1606 case TRB_CMD_NOOP: 1425 case TRB_CMD_NOOP:
1426 /* Is this an aborted command turned to NO-OP? */
1427 if (cmd->status == COMP_CMD_STOP)
1428 cmd_comp_code = COMP_CMD_STOP;
1607 break; 1429 break;
1608 case TRB_RESET_EP: 1430 case TRB_RESET_EP:
1609 WARN_ON(slot_id != TRB_TO_SLOT_ID( 1431 WARN_ON(slot_id != TRB_TO_SLOT_ID(
@@ -1623,6 +1445,17 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1623 xhci->error_bitmask |= 1 << 6; 1445 xhci->error_bitmask |= 1 << 6;
1624 break; 1446 break;
1625 } 1447 }
1448
1449 /* restart timer if this wasn't the last command */
1450 if (cmd->cmd_list.next != &xhci->cmd_list) {
1451 xhci->current_cmd = list_entry(cmd->cmd_list.next,
1452 struct xhci_command, cmd_list);
1453 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1454 }
1455
1456event_handled:
1457 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
1458
1626 inc_deq(xhci, xhci->cmd_ring); 1459 inc_deq(xhci, xhci->cmd_ring);
1627} 1460}
1628 1461
@@ -1938,11 +1771,16 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1938 struct xhci_td *td, union xhci_trb *event_trb) 1771 struct xhci_td *td, union xhci_trb *event_trb)
1939{ 1772{
1940 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 1773 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1774 struct xhci_command *command;
1775 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1776 if (!command)
1777 return;
1778
1941 ep->ep_state |= EP_HALTED; 1779 ep->ep_state |= EP_HALTED;
1942 ep->stopped_td = td; 1780 ep->stopped_td = td;
1943 ep->stopped_stream = stream_id; 1781 ep->stopped_stream = stream_id;
1944 1782
1945 xhci_queue_reset_ep(xhci, slot_id, ep_index); 1783 xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
1946 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); 1784 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1947 1785
1948 ep->stopped_td = NULL; 1786 ep->stopped_td = NULL;
@@ -2654,7 +2492,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2654 * successful event after a short transfer. 2492 * successful event after a short transfer.
2655 * Ignore it. 2493 * Ignore it.
2656 */ 2494 */
2657 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && 2495 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2658 ep_ring->last_td_was_short) { 2496 ep_ring->last_td_was_short) {
2659 ep_ring->last_td_was_short = false; 2497 ep_ring->last_td_was_short = false;
2660 ret = 0; 2498 ret = 0;
@@ -3996,11 +3834,14 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3996 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB 3834 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
3997 * because the command event handler may want to resubmit a failed command. 3835 * because the command event handler may want to resubmit a failed command.
3998 */ 3836 */
3999static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, 3837static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4000 u32 field3, u32 field4, bool command_must_succeed) 3838 u32 field1, u32 field2,
3839 u32 field3, u32 field4, bool command_must_succeed)
4001{ 3840{
4002 int reserved_trbs = xhci->cmd_ring_reserved_trbs; 3841 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
4003 int ret; 3842 int ret;
3843 if (xhci->xhc_state & XHCI_STATE_DYING)
3844 return -ESHUTDOWN;
4004 3845
4005 if (!command_must_succeed) 3846 if (!command_must_succeed)
4006 reserved_trbs++; 3847 reserved_trbs++;
@@ -4014,57 +3855,71 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
4014 "unfailable commands failed.\n"); 3855 "unfailable commands failed.\n");
4015 return ret; 3856 return ret;
4016 } 3857 }
3858
3859 cmd->command_trb = xhci->cmd_ring->enqueue;
3860 list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
3861
3862 /* if there are no other commands queued we start the timeout timer */
3863 if (xhci->cmd_list.next == &cmd->cmd_list &&
3864 !timer_pending(&xhci->cmd_timer)) {
3865 xhci->current_cmd = cmd;
3866 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
3867 }
3868
4017 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, 3869 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
4018 field4 | xhci->cmd_ring->cycle_state); 3870 field4 | xhci->cmd_ring->cycle_state);
4019 return 0; 3871 return 0;
4020} 3872}
4021 3873
4022/* Queue a slot enable or disable request on the command ring */ 3874/* Queue a slot enable or disable request on the command ring */
4023int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) 3875int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
3876 u32 trb_type, u32 slot_id)
4024{ 3877{
4025 return queue_command(xhci, 0, 0, 0, 3878 return queue_command(xhci, cmd, 0, 0, 0,
4026 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false); 3879 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
4027} 3880}
4028 3881
4029/* Queue an address device command TRB */ 3882/* Queue an address device command TRB */
4030int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 3883int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4031 u32 slot_id, enum xhci_setup_dev setup) 3884 dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
4032{ 3885{
4033 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 3886 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4034 upper_32_bits(in_ctx_ptr), 0, 3887 upper_32_bits(in_ctx_ptr), 0,
4035 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id) 3888 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
4036 | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false); 3889 | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
4037} 3890}
4038 3891
4039int xhci_queue_vendor_command(struct xhci_hcd *xhci, 3892int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4040 u32 field1, u32 field2, u32 field3, u32 field4) 3893 u32 field1, u32 field2, u32 field3, u32 field4)
4041{ 3894{
4042 return queue_command(xhci, field1, field2, field3, field4, false); 3895 return queue_command(xhci, cmd, field1, field2, field3, field4, false);
4043} 3896}
4044 3897
4045/* Queue a reset device command TRB */ 3898/* Queue a reset device command TRB */
4046int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id) 3899int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
3900 u32 slot_id)
4047{ 3901{
4048 return queue_command(xhci, 0, 0, 0, 3902 return queue_command(xhci, cmd, 0, 0, 0,
4049 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), 3903 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
4050 false); 3904 false);
4051} 3905}
4052 3906
4053/* Queue a configure endpoint command TRB */ 3907/* Queue a configure endpoint command TRB */
4054int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 3908int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
3909 struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
4055 u32 slot_id, bool command_must_succeed) 3910 u32 slot_id, bool command_must_succeed)
4056{ 3911{
4057 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 3912 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4058 upper_32_bits(in_ctx_ptr), 0, 3913 upper_32_bits(in_ctx_ptr), 0,
4059 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), 3914 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
4060 command_must_succeed); 3915 command_must_succeed);
4061} 3916}
4062 3917
4063/* Queue an evaluate context command TRB */ 3918/* Queue an evaluate context command TRB */
4064int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 3919int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
4065 u32 slot_id, bool command_must_succeed) 3920 dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
4066{ 3921{
4067 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 3922 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4068 upper_32_bits(in_ctx_ptr), 0, 3923 upper_32_bits(in_ctx_ptr), 0,
4069 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), 3924 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
4070 command_must_succeed); 3925 command_must_succeed);
@@ -4074,25 +3929,26 @@ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
4074 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop 3929 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
4075 * activity on an endpoint that is about to be suspended. 3930 * activity on an endpoint that is about to be suspended.
4076 */ 3931 */
4077int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, 3932int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
4078 unsigned int ep_index, int suspend) 3933 int slot_id, unsigned int ep_index, int suspend)
4079{ 3934{
4080 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3935 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4081 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3936 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4082 u32 type = TRB_TYPE(TRB_STOP_RING); 3937 u32 type = TRB_TYPE(TRB_STOP_RING);
4083 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend); 3938 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
4084 3939
4085 return queue_command(xhci, 0, 0, 0, 3940 return queue_command(xhci, cmd, 0, 0, 0,
4086 trb_slot_id | trb_ep_index | type | trb_suspend, false); 3941 trb_slot_id | trb_ep_index | type | trb_suspend, false);
4087} 3942}
4088 3943
4089/* Set Transfer Ring Dequeue Pointer command. 3944/* Set Transfer Ring Dequeue Pointer command.
4090 * This should not be used for endpoints that have streams enabled. 3945 * This should not be used for endpoints that have streams enabled.
4091 */ 3946 */
4092static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 3947static int queue_set_tr_deq(struct xhci_hcd *xhci, struct xhci_command *cmd,
4093 unsigned int ep_index, unsigned int stream_id, 3948 int slot_id,
4094 struct xhci_segment *deq_seg, 3949 unsigned int ep_index, unsigned int stream_id,
4095 union xhci_trb *deq_ptr, u32 cycle_state) 3950 struct xhci_segment *deq_seg,
3951 union xhci_trb *deq_ptr, u32 cycle_state)
4096{ 3952{
4097 dma_addr_t addr; 3953 dma_addr_t addr;
4098 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3954 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
@@ -4119,18 +3975,19 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
4119 ep->queued_deq_ptr = deq_ptr; 3975 ep->queued_deq_ptr = deq_ptr;
4120 if (stream_id) 3976 if (stream_id)
4121 trb_sct = SCT_FOR_TRB(SCT_PRI_TR); 3977 trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
4122 return queue_command(xhci, lower_32_bits(addr) | trb_sct | cycle_state, 3978 return queue_command(xhci, cmd,
3979 lower_32_bits(addr) | trb_sct | cycle_state,
4123 upper_32_bits(addr), trb_stream_id, 3980 upper_32_bits(addr), trb_stream_id,
4124 trb_slot_id | trb_ep_index | type, false); 3981 trb_slot_id | trb_ep_index | type, false);
4125} 3982}
4126 3983
4127int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, 3984int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
4128 unsigned int ep_index) 3985 int slot_id, unsigned int ep_index)
4129{ 3986{
4130 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3987 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4131 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3988 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4132 u32 type = TRB_TYPE(TRB_RESET_EP); 3989 u32 type = TRB_TYPE(TRB_RESET_EP);
4133 3990
4134 return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type, 3991 return queue_command(xhci, cmd, 0, 0, 0,
4135 false); 3992 trb_slot_id | trb_ep_index | type, false);
4136} 3993}