aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host
diff options
context:
space:
mode:
authorMathias Nyman <mathias.nyman@linux.intel.com>2014-05-08 12:26:03 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-05-19 21:03:25 -0400
commitc311e391a7efd101250c0e123286709b7e736249 (patch)
tree5d26094a50efde190578e3f16cef2827992706ae /drivers/usb/host
parent9ea1833e4c210ac5580f63495be15502f275c578 (diff)
xhci: rework command timeout and cancellation,
Use one timer to control command timeout. start/kick the timer every time a command is completed and a new command is waiting, or a new command is added to a empty list. If the timer runs out, then tag the current command as "aborted", and start the xhci command abortion process. Previously each function that submitted a command had its own timer. If that command timed out, a new command structure for the command was created and it was put on a cancel_cmd_list list, then a pci write to abort the command ring was issued. when the ring was aborted, it checked if the current command was the one to be canceled, later when the ring was stopped the driver got ownership of the TRBs in the command ring, compared then to the TRBs in the cancel_cmd_list, and turned them into No-ops. Now, instead, at timeout we tag the status of the command in the command queue to be aborted, and start the ring abortion. Ring abortion stops the command ring and gives control of the commands to us. All the aborted commands are now turned into No-ops. If the ring is already stopped when the command times outs its not possible to start the ring abortion, in this case the command is turnd to No-op right away. All these changes allows us to remove the entire cancel_cmd_list code. The functions waiting for a command to finish no longer have their own timeouts. They will wait either until the command completes normally, or until the whole command abortion is done. Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/usb/host')
-rw-r--r--drivers/usb/host/xhci-hub.c11
-rw-r--r--drivers/usb/host/xhci-mem.c14
-rw-r--r--drivers/usb/host/xhci-ring.c378
-rw-r--r--drivers/usb/host/xhci.c78
-rw-r--r--drivers/usb/host/xhci.h8
5 files changed, 169 insertions, 320 deletions
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 12871b5d4a2e..6231ce6aa0c3 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -271,7 +271,6 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
271 struct xhci_virt_device *virt_dev; 271 struct xhci_virt_device *virt_dev;
272 struct xhci_command *cmd; 272 struct xhci_command *cmd;
273 unsigned long flags; 273 unsigned long flags;
274 int timeleft;
275 int ret; 274 int ret;
276 int i; 275 int i;
277 276
@@ -304,12 +303,10 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
304 spin_unlock_irqrestore(&xhci->lock, flags); 303 spin_unlock_irqrestore(&xhci->lock, flags);
305 304
306 /* Wait for last stop endpoint command to finish */ 305 /* Wait for last stop endpoint command to finish */
307 timeleft = wait_for_completion_interruptible_timeout( 306 wait_for_completion(cmd->completion);
308 cmd->completion, 307
309 XHCI_CMD_DEFAULT_TIMEOUT); 308 if (cmd->status == COMP_CMD_ABORT || cmd->status == COMP_CMD_STOP) {
310 if (timeleft <= 0) { 309 xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
311 xhci_warn(xhci, "%s while waiting for stop endpoint command\n",
312 timeleft == 0 ? "Timeout" : "Signal");
313 ret = -ETIME; 310 ret = -ETIME;
314 } 311 }
315 xhci_free_command(xhci, cmd); 312 xhci_free_command(xhci, cmd);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 38dc721bc8bb..6a57e81c2a76 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1793,10 +1793,11 @@ void xhci_free_command(struct xhci_hcd *xhci,
1793void xhci_mem_cleanup(struct xhci_hcd *xhci) 1793void xhci_mem_cleanup(struct xhci_hcd *xhci)
1794{ 1794{
1795 struct device *dev = xhci_to_hcd(xhci)->self.controller; 1795 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1796 struct xhci_cd *cur_cd, *next_cd;
1797 int size; 1796 int size;
1798 int i, j, num_ports; 1797 int i, j, num_ports;
1799 1798
1799 del_timer_sync(&xhci->cmd_timer);
1800
1800 /* Free the Event Ring Segment Table and the actual Event Ring */ 1801 /* Free the Event Ring Segment Table and the actual Event Ring */
1801 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 1802 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
1802 if (xhci->erst.entries) 1803 if (xhci->erst.entries)
@@ -1815,11 +1816,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1815 xhci_ring_free(xhci, xhci->cmd_ring); 1816 xhci_ring_free(xhci, xhci->cmd_ring);
1816 xhci->cmd_ring = NULL; 1817 xhci->cmd_ring = NULL;
1817 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring"); 1818 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1818 list_for_each_entry_safe(cur_cd, next_cd,
1819 &xhci->cancel_cmd_list, cancel_cmd_list) {
1820 list_del(&cur_cd->cancel_cmd_list);
1821 kfree(cur_cd);
1822 }
1823 xhci_cleanup_command_queue(xhci); 1819 xhci_cleanup_command_queue(xhci);
1824 1820
1825 for (i = 1; i < MAX_HC_SLOTS; ++i) 1821 for (i = 1; i < MAX_HC_SLOTS; ++i)
@@ -2323,7 +2319,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2323 u32 page_size, temp; 2319 u32 page_size, temp;
2324 int i; 2320 int i;
2325 2321
2326 INIT_LIST_HEAD(&xhci->cancel_cmd_list);
2327 INIT_LIST_HEAD(&xhci->cmd_list); 2322 INIT_LIST_HEAD(&xhci->cmd_list);
2328 2323
2329 page_size = readl(&xhci->op_regs->page_size); 2324 page_size = readl(&xhci->op_regs->page_size);
@@ -2510,6 +2505,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2510 "Wrote ERST address to ir_set 0."); 2505 "Wrote ERST address to ir_set 0.");
2511 xhci_print_ir_set(xhci, 0); 2506 xhci_print_ir_set(xhci, 0);
2512 2507
2508 /* init command timeout timer */
2509 init_timer(&xhci->cmd_timer);
2510 xhci->cmd_timer.data = (unsigned long) xhci;
2511 xhci->cmd_timer.function = xhci_handle_command_timeout;
2512
2513 /* 2513 /*
2514 * XXX: Might need to set the Interrupter Moderation Register to 2514 * XXX: Might need to set the Interrupter Moderation Register to
2515 * something other than the default (~1ms minimum between interrupts). 2515 * something other than the default (~1ms minimum between interrupts).
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 3d60865a3d8f..d67ff71209f5 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -287,17 +287,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
287 287
288 xhci_dbg(xhci, "Abort command ring\n"); 288 xhci_dbg(xhci, "Abort command ring\n");
289 289
290 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
291 xhci_dbg(xhci, "The command ring isn't running, "
292 "Have the command ring been stopped?\n");
293 return 0;
294 }
295
296 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 290 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
297 if (!(temp_64 & CMD_RING_RUNNING)) {
298 xhci_dbg(xhci, "Command ring had been stopped\n");
299 return 0;
300 }
301 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; 291 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
302 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, 292 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
303 &xhci->op_regs->cmd_ring); 293 &xhci->op_regs->cmd_ring);
@@ -323,71 +313,6 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
323 return 0; 313 return 0;
324} 314}
325 315
326static int xhci_queue_cd(struct xhci_hcd *xhci,
327 struct xhci_command *command,
328 union xhci_trb *cmd_trb)
329{
330 struct xhci_cd *cd;
331 cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC);
332 if (!cd)
333 return -ENOMEM;
334 INIT_LIST_HEAD(&cd->cancel_cmd_list);
335
336 cd->command = command;
337 cd->cmd_trb = cmd_trb;
338 list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list);
339
340 return 0;
341}
342
343/*
344 * Cancel the command which has issue.
345 *
346 * Some commands may hang due to waiting for acknowledgement from
347 * usb device. It is outside of the xHC's ability to control and
348 * will cause the command ring is blocked. When it occurs software
349 * should intervene to recover the command ring.
350 * See Section 4.6.1.1 and 4.6.1.2
351 */
352int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
353 union xhci_trb *cmd_trb)
354{
355 int retval = 0;
356 unsigned long flags;
357
358 spin_lock_irqsave(&xhci->lock, flags);
359
360 if (xhci->xhc_state & XHCI_STATE_DYING) {
361 xhci_warn(xhci, "Abort the command ring,"
362 " but the xHCI is dead.\n");
363 retval = -ESHUTDOWN;
364 goto fail;
365 }
366
367 /* queue the cmd desriptor to cancel_cmd_list */
368 retval = xhci_queue_cd(xhci, command, cmd_trb);
369 if (retval) {
370 xhci_warn(xhci, "Queuing command descriptor failed.\n");
371 goto fail;
372 }
373
374 /* abort command ring */
375 retval = xhci_abort_cmd_ring(xhci);
376 if (retval) {
377 xhci_err(xhci, "Abort command ring failed\n");
378 if (unlikely(retval == -ESHUTDOWN)) {
379 spin_unlock_irqrestore(&xhci->lock, flags);
380 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
381 xhci_dbg(xhci, "xHCI host controller is dead.\n");
382 return retval;
383 }
384 }
385
386fail:
387 spin_unlock_irqrestore(&xhci->lock, flags);
388 return retval;
389}
390
391void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, 316void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
392 unsigned int slot_id, 317 unsigned int slot_id,
393 unsigned int ep_index, 318 unsigned int ep_index,
@@ -1206,164 +1131,6 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1206 } 1131 }
1207} 1132}
1208 1133
1209/* Complete the command and detele it from the devcie's command queue.
1210 */
1211static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1212 struct xhci_command *command, u32 status)
1213{
1214 command->status = status;
1215 list_del(&command->cmd_list);
1216 if (command->completion)
1217 complete(command->completion);
1218 else
1219 xhci_free_command(xhci, command);
1220}
1221
1222
1223/*
1224 * Finding the command trb need to be cancelled and modifying it to
1225 * NO OP command. And if the command is in device's command wait
1226 * list, finishing and freeing it.
1227 *
1228 * If we can't find the command trb, we think it had already been
1229 * executed.
1230 */
1231static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
1232{
1233 struct xhci_segment *cur_seg;
1234 union xhci_trb *cmd_trb;
1235 u32 cycle_state;
1236
1237 if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
1238 return;
1239
1240 /* find the current segment of command ring */
1241 cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
1242 xhci->cmd_ring->dequeue, &cycle_state);
1243
1244 if (!cur_seg) {
1245 xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n",
1246 xhci->cmd_ring->dequeue,
1247 (unsigned long long)
1248 xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1249 xhci->cmd_ring->dequeue));
1250 xhci_debug_ring(xhci, xhci->cmd_ring);
1251 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
1252 return;
1253 }
1254
1255 /* find the command trb matched by cd from command ring */
1256 for (cmd_trb = xhci->cmd_ring->dequeue;
1257 cmd_trb != xhci->cmd_ring->enqueue;
1258 next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
1259 /* If the trb is link trb, continue */
1260 if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
1261 continue;
1262
1263 if (cur_cd->cmd_trb == cmd_trb) {
1264
1265 /* If the command in device's command list, we should
1266 * finish it and free the command structure.
1267 */
1268 if (cur_cd->command)
1269 xhci_complete_cmd_in_cmd_wait_list(xhci,
1270 cur_cd->command, COMP_CMD_STOP);
1271
1272 /* get cycle state from the origin command trb */
1273 cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
1274 & TRB_CYCLE;
1275
1276 /* modify the command trb to NO OP command */
1277 cmd_trb->generic.field[0] = 0;
1278 cmd_trb->generic.field[1] = 0;
1279 cmd_trb->generic.field[2] = 0;
1280 cmd_trb->generic.field[3] = cpu_to_le32(
1281 TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
1282 break;
1283 }
1284 }
1285}
1286
1287static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci)
1288{
1289 struct xhci_cd *cur_cd, *next_cd;
1290
1291 if (list_empty(&xhci->cancel_cmd_list))
1292 return;
1293
1294 list_for_each_entry_safe(cur_cd, next_cd,
1295 &xhci->cancel_cmd_list, cancel_cmd_list) {
1296 xhci_cmd_to_noop(xhci, cur_cd);
1297 list_del(&cur_cd->cancel_cmd_list);
1298 kfree(cur_cd);
1299 }
1300}
1301
1302/*
1303 * traversing the cancel_cmd_list. If the command descriptor according
1304 * to cmd_trb is found, the function free it and return 1, otherwise
1305 * return 0.
1306 */
1307static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci,
1308 union xhci_trb *cmd_trb)
1309{
1310 struct xhci_cd *cur_cd, *next_cd;
1311
1312 if (list_empty(&xhci->cancel_cmd_list))
1313 return 0;
1314
1315 list_for_each_entry_safe(cur_cd, next_cd,
1316 &xhci->cancel_cmd_list, cancel_cmd_list) {
1317 if (cur_cd->cmd_trb == cmd_trb) {
1318 if (cur_cd->command)
1319 xhci_complete_cmd_in_cmd_wait_list(xhci,
1320 cur_cd->command, COMP_CMD_STOP);
1321 list_del(&cur_cd->cancel_cmd_list);
1322 kfree(cur_cd);
1323 return 1;
1324 }
1325 }
1326
1327 return 0;
1328}
1329
1330/*
1331 * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the
1332 * trb pointed by the command ring dequeue pointer is the trb we want to
1333 * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will
1334 * traverse the cancel_cmd_list to trun the all of the commands according
1335 * to command descriptor to NO-OP trb.
1336 */
1337static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1338 int cmd_trb_comp_code)
1339{
1340 int cur_trb_is_good = 0;
1341
1342 /* Searching the cmd trb pointed by the command ring dequeue
1343 * pointer in command descriptor list. If it is found, free it.
1344 */
1345 cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
1346 xhci->cmd_ring->dequeue);
1347
1348 if (cmd_trb_comp_code == COMP_CMD_ABORT)
1349 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1350 else if (cmd_trb_comp_code == COMP_CMD_STOP) {
1351 /* traversing the cancel_cmd_list and canceling
1352 * the command according to command descriptor
1353 */
1354 xhci_cancel_cmd_in_cd_list(xhci);
1355
1356 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
1357 /*
1358 * ring command ring doorbell again to restart the
1359 * command ring
1360 */
1361 if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue)
1362 xhci_ring_cmd_db(xhci);
1363 }
1364 return cur_trb_is_good;
1365}
1366
1367static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, 1134static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1368 u32 cmd_comp_code) 1135 u32 cmd_comp_code)
1369{ 1136{
@@ -1480,6 +1247,97 @@ void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1480 xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT); 1247 xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
1481} 1248}
1482 1249
1250/*
1251 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
1252 * If there are other commands waiting then restart the ring and kick the timer.
1253 * This must be called with command ring stopped and xhci->lock held.
1254 */
1255static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1256 struct xhci_command *cur_cmd)
1257{
1258 struct xhci_command *i_cmd, *tmp_cmd;
1259 u32 cycle_state;
1260
1261 /* Turn all aborted commands in list to no-ops, then restart */
1262 list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
1263 cmd_list) {
1264
1265 if (i_cmd->status != COMP_CMD_ABORT)
1266 continue;
1267
1268 i_cmd->status = COMP_CMD_STOP;
1269
1270 xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
1271 i_cmd->command_trb);
1272 /* get cycle state from the original cmd trb */
1273 cycle_state = le32_to_cpu(
1274 i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
1275 /* modify the command trb to no-op command */
1276 i_cmd->command_trb->generic.field[0] = 0;
1277 i_cmd->command_trb->generic.field[1] = 0;
1278 i_cmd->command_trb->generic.field[2] = 0;
1279 i_cmd->command_trb->generic.field[3] = cpu_to_le32(
1280 TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
1281
1282 /*
1283 * caller waiting for completion is called when command
1284 * completion event is received for these no-op commands
1285 */
1286 }
1287
1288 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
1289
1290 /* ring command ring doorbell to restart the command ring */
1291 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
1292 !(xhci->xhc_state & XHCI_STATE_DYING)) {
1293 xhci->current_cmd = cur_cmd;
1294 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1295 xhci_ring_cmd_db(xhci);
1296 }
1297 return;
1298}
1299
1300
1301void xhci_handle_command_timeout(unsigned long data)
1302{
1303 struct xhci_hcd *xhci;
1304 int ret;
1305 unsigned long flags;
1306 u64 hw_ring_state;
1307 struct xhci_command *cur_cmd = NULL;
1308 xhci = (struct xhci_hcd *) data;
1309
1310 /* mark this command to be cancelled */
1311 spin_lock_irqsave(&xhci->lock, flags);
1312 if (xhci->current_cmd) {
1313 cur_cmd = xhci->current_cmd;
1314 cur_cmd->status = COMP_CMD_ABORT;
1315 }
1316
1317
1318 /* Make sure command ring is running before aborting it */
1319 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1320 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1321 (hw_ring_state & CMD_RING_RUNNING)) {
1322
1323 spin_unlock_irqrestore(&xhci->lock, flags);
1324 xhci_dbg(xhci, "Command timeout\n");
1325 ret = xhci_abort_cmd_ring(xhci);
1326 if (unlikely(ret == -ESHUTDOWN)) {
1327 xhci_err(xhci, "Abort command ring failed\n");
1328 xhci_cleanup_command_queue(xhci);
1329 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
1330 xhci_dbg(xhci, "xHCI host controller is dead.\n");
1331 }
1332 return;
1333 }
1334 /* command timeout on stopped ring, ring can't be aborted */
1335 xhci_dbg(xhci, "Command timeout on stopped ring\n");
1336 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1337 spin_unlock_irqrestore(&xhci->lock, flags);
1338 return;
1339}
1340
1483static void handle_cmd_completion(struct xhci_hcd *xhci, 1341static void handle_cmd_completion(struct xhci_hcd *xhci,
1484 struct xhci_event_cmd *event) 1342 struct xhci_event_cmd *event)
1485{ 1343{
@@ -1513,26 +1371,28 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1513 "Command completion event does not match command\n"); 1371 "Command completion event does not match command\n");
1514 return; 1372 return;
1515 } 1373 }
1374
1375 del_timer(&xhci->cmd_timer);
1376
1516 trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); 1377 trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
1517 1378
1518 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); 1379 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1519 if (cmd_comp_code == COMP_CMD_ABORT || cmd_comp_code == COMP_CMD_STOP) { 1380
1520 /* If the return value is 0, we think the trb pointed by 1381 /* If CMD ring stopped we own the trbs between enqueue and dequeue */
1521 * command ring dequeue pointer is a good trb. The good 1382 if (cmd_comp_code == COMP_CMD_STOP) {
1522 * trb means we don't want to cancel the trb, but it have 1383 xhci_handle_stopped_cmd_ring(xhci, cmd);
1523 * been stopped by host. So we should handle it normally. 1384 return;
1524 * Otherwise, driver should invoke inc_deq() and return. 1385 }
1525 */ 1386 /*
1526 if (handle_stopped_cmd_ring(xhci, cmd_comp_code)) { 1387 * Host aborted the command ring, check if the current command was
1527 inc_deq(xhci, xhci->cmd_ring); 1388 * supposed to be aborted, otherwise continue normally.
1528 return; 1389 * The command ring is stopped now, but the xHC will issue a Command
1529 } 1390 * Ring Stopped event which will cause us to restart it.
1530 /* There is no command to handle if we get a stop event when the 1391 */
1531 * command ring is empty, event->cmd_trb points to the next 1392 if (cmd_comp_code == COMP_CMD_ABORT) {
1532 * unset command 1393 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1533 */ 1394 if (cmd->status == COMP_CMD_ABORT)
1534 if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue) 1395 goto event_handled;
1535 return;
1536 } 1396 }
1537 1397
1538 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); 1398 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
@@ -1563,6 +1423,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1563 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); 1423 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1564 break; 1424 break;
1565 case TRB_CMD_NOOP: 1425 case TRB_CMD_NOOP:
1426 /* Is this an aborted command turned to NO-OP? */
1427 if (cmd->status == COMP_CMD_STOP)
1428 cmd_comp_code = COMP_CMD_STOP;
1566 break; 1429 break;
1567 case TRB_RESET_EP: 1430 case TRB_RESET_EP:
1568 WARN_ON(slot_id != TRB_TO_SLOT_ID( 1431 WARN_ON(slot_id != TRB_TO_SLOT_ID(
@@ -1583,6 +1446,14 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1583 break; 1446 break;
1584 } 1447 }
1585 1448
1449 /* restart timer if this wasn't the last command */
1450 if (cmd->cmd_list.next != &xhci->cmd_list) {
1451 xhci->current_cmd = list_entry(cmd->cmd_list.next,
1452 struct xhci_command, cmd_list);
1453 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1454 }
1455
1456event_handled:
1586 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code); 1457 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
1587 1458
1588 inc_deq(xhci, xhci->cmd_ring); 1459 inc_deq(xhci, xhci->cmd_ring);
@@ -3988,6 +3859,13 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
3988 cmd->command_trb = xhci->cmd_ring->enqueue; 3859 cmd->command_trb = xhci->cmd_ring->enqueue;
3989 list_add_tail(&cmd->cmd_list, &xhci->cmd_list); 3860 list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
3990 3861
3862 /* if there are no other commands queued we start the timeout timer */
3863 if (xhci->cmd_list.next == &cmd->cmd_list &&
3864 !timer_pending(&xhci->cmd_timer)) {
3865 xhci->current_cmd = cmd;
3866 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
3867 }
3868
3991 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, 3869 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
3992 field4 | xhci->cmd_ring->cycle_state); 3870 field4 | xhci->cmd_ring->cycle_state);
3993 return 0; 3871 return 0;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 64c1ba353856..2b8d9a24af09 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1820,6 +1820,11 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1820 int ret; 1820 int ret;
1821 1821
1822 switch (*cmd_status) { 1822 switch (*cmd_status) {
1823 case COMP_CMD_ABORT:
1824 case COMP_CMD_STOP:
1825 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
1826 ret = -ETIME;
1827 break;
1823 case COMP_ENOMEM: 1828 case COMP_ENOMEM:
1824 dev_warn(&udev->dev, "Not enough host controller resources " 1829 dev_warn(&udev->dev, "Not enough host controller resources "
1825 "for new device state.\n"); 1830 "for new device state.\n");
@@ -1866,6 +1871,11 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1866 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; 1871 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1867 1872
1868 switch (*cmd_status) { 1873 switch (*cmd_status) {
1874 case COMP_CMD_ABORT:
1875 case COMP_CMD_STOP:
1876 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
1877 ret = -ETIME;
1878 break;
1869 case COMP_EINVAL: 1879 case COMP_EINVAL:
1870 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " 1880 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1871 "context command.\n"); 1881 "context command.\n");
@@ -2590,7 +2600,6 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2590 bool ctx_change, bool must_succeed) 2600 bool ctx_change, bool must_succeed)
2591{ 2601{
2592 int ret; 2602 int ret;
2593 int timeleft;
2594 unsigned long flags; 2603 unsigned long flags;
2595 struct xhci_input_control_ctx *ctrl_ctx; 2604 struct xhci_input_control_ctx *ctrl_ctx;
2596 struct xhci_virt_device *virt_dev; 2605 struct xhci_virt_device *virt_dev;
@@ -2646,21 +2655,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2646 spin_unlock_irqrestore(&xhci->lock, flags); 2655 spin_unlock_irqrestore(&xhci->lock, flags);
2647 2656
2648 /* Wait for the configure endpoint command to complete */ 2657 /* Wait for the configure endpoint command to complete */
2649 timeleft = wait_for_completion_interruptible_timeout( 2658 wait_for_completion(command->completion);
2650 command->completion,
2651 XHCI_CMD_DEFAULT_TIMEOUT);
2652 if (timeleft <= 0) {
2653 xhci_warn(xhci, "%s while waiting for %s command\n",
2654 timeleft == 0 ? "Timeout" : "Signal",
2655 ctx_change == 0 ?
2656 "configure endpoint" :
2657 "evaluate context");
2658 /* cancel the configure endpoint command */
2659 ret = xhci_cancel_cmd(xhci, command, command->command_trb);
2660 if (ret < 0)
2661 return ret;
2662 return -ETIME;
2663 }
2664 2659
2665 if (!ctx_change) 2660 if (!ctx_change)
2666 ret = xhci_configure_endpoint_result(xhci, udev, 2661 ret = xhci_configure_endpoint_result(xhci, udev,
@@ -3438,7 +3433,6 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3438 unsigned int slot_id; 3433 unsigned int slot_id;
3439 struct xhci_virt_device *virt_dev; 3434 struct xhci_virt_device *virt_dev;
3440 struct xhci_command *reset_device_cmd; 3435 struct xhci_command *reset_device_cmd;
3441 int timeleft;
3442 int last_freed_endpoint; 3436 int last_freed_endpoint;
3443 struct xhci_slot_ctx *slot_ctx; 3437 struct xhci_slot_ctx *slot_ctx;
3444 int old_active_eps = 0; 3438 int old_active_eps = 0;
@@ -3506,15 +3500,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3506 spin_unlock_irqrestore(&xhci->lock, flags); 3500 spin_unlock_irqrestore(&xhci->lock, flags);
3507 3501
3508 /* Wait for the Reset Device command to finish */ 3502 /* Wait for the Reset Device command to finish */
3509 timeleft = wait_for_completion_interruptible_timeout( 3503 wait_for_completion(reset_device_cmd->completion);
3510 reset_device_cmd->completion,
3511 XHCI_CMD_DEFAULT_TIMEOUT);
3512 if (timeleft <= 0) {
3513 xhci_warn(xhci, "%s while waiting for reset device command\n",
3514 timeleft == 0 ? "Timeout" : "Signal");
3515 ret = -ETIME;
3516 goto command_cleanup;
3517 }
3518 3504
3519 /* The Reset Device command can't fail, according to the 0.95/0.96 spec, 3505 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3520 * unless we tried to reset a slot ID that wasn't enabled, 3506 * unless we tried to reset a slot ID that wasn't enabled,
@@ -3522,6 +3508,11 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3522 */ 3508 */
3523 ret = reset_device_cmd->status; 3509 ret = reset_device_cmd->status;
3524 switch (ret) { 3510 switch (ret) {
3511 case COMP_CMD_ABORT:
3512 case COMP_CMD_STOP:
3513 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3514 ret = -ETIME;
3515 goto command_cleanup;
3525 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ 3516 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
3526 case COMP_CTX_STATE: /* 0.96 completion code for same thing */ 3517 case COMP_CTX_STATE: /* 0.96 completion code for same thing */
3527 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", 3518 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
@@ -3691,7 +3682,6 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3691{ 3682{
3692 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3683 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3693 unsigned long flags; 3684 unsigned long flags;
3694 int timeleft;
3695 int ret; 3685 int ret;
3696 struct xhci_command *command; 3686 struct xhci_command *command;
3697 3687
@@ -3711,19 +3701,9 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3711 xhci_ring_cmd_db(xhci); 3701 xhci_ring_cmd_db(xhci);
3712 spin_unlock_irqrestore(&xhci->lock, flags); 3702 spin_unlock_irqrestore(&xhci->lock, flags);
3713 3703
3714 /* XXX: how much time for xHC slot assignment? */ 3704 wait_for_completion(command->completion);
3715 timeleft = wait_for_completion_interruptible_timeout(
3716 command->completion,
3717 XHCI_CMD_DEFAULT_TIMEOUT);
3718 if (timeleft <= 0) {
3719 xhci_warn(xhci, "%s while waiting for a slot\n",
3720 timeleft == 0 ? "Timeout" : "Signal");
3721 /* cancel the enable slot request */
3722 ret = xhci_cancel_cmd(xhci, NULL, command->command_trb);
3723 return ret;
3724 }
3725 3705
3726 if (!xhci->slot_id) { 3706 if (!xhci->slot_id || command->status != COMP_SUCCESS) {
3727 xhci_err(xhci, "Error while assigning device slot ID\n"); 3707 xhci_err(xhci, "Error while assigning device slot ID\n");
3728 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", 3708 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3729 HCS_MAX_SLOTS( 3709 HCS_MAX_SLOTS(
@@ -3792,7 +3772,6 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3792{ 3772{
3793 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address"; 3773 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
3794 unsigned long flags; 3774 unsigned long flags;
3795 int timeleft;
3796 struct xhci_virt_device *virt_dev; 3775 struct xhci_virt_device *virt_dev;
3797 int ret = 0; 3776 int ret = 0;
3798 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3777 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -3867,23 +3846,18 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3867 spin_unlock_irqrestore(&xhci->lock, flags); 3846 spin_unlock_irqrestore(&xhci->lock, flags);
3868 3847
3869 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ 3848 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3870 timeleft = wait_for_completion_interruptible_timeout( 3849 wait_for_completion(command->completion);
3871 command->completion, XHCI_CMD_DEFAULT_TIMEOUT); 3850
3872 /* FIXME: From section 4.3.4: "Software shall be responsible for timing 3851 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
3873 * the SetAddress() "recovery interval" required by USB and aborting the 3852 * the SetAddress() "recovery interval" required by USB and aborting the
3874 * command on a timeout. 3853 * command on a timeout.
3875 */ 3854 */
3876 if (timeleft <= 0) {
3877 xhci_warn(xhci, "%s while waiting for setup %s command\n",
3878 timeleft == 0 ? "Timeout" : "Signal", act);
3879 /* cancel the address device command */
3880 ret = xhci_cancel_cmd(xhci, NULL, command->command_trb);
3881 if (ret < 0)
3882 return ret;
3883 return -ETIME;
3884 }
3885
3886 switch (command->status) { 3855 switch (command->status) {
3856 case COMP_CMD_ABORT:
3857 case COMP_CMD_STOP:
3858 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
3859 ret = -ETIME;
3860 break;
3887 case COMP_CTX_STATE: 3861 case COMP_CTX_STATE:
3888 case COMP_EBADSLT: 3862 case COMP_EBADSLT:
3889 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n", 3863 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index fde57b09a9bd..2774526449a6 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1295,7 +1295,6 @@ struct xhci_td {
1295 1295
1296/* command descriptor */ 1296/* command descriptor */
1297struct xhci_cd { 1297struct xhci_cd {
1298 struct list_head cancel_cmd_list;
1299 struct xhci_command *command; 1298 struct xhci_command *command;
1300 union xhci_trb *cmd_trb; 1299 union xhci_trb *cmd_trb;
1301}; 1300};
@@ -1480,9 +1479,10 @@ struct xhci_hcd {
1480#define CMD_RING_STATE_RUNNING (1 << 0) 1479#define CMD_RING_STATE_RUNNING (1 << 0)
1481#define CMD_RING_STATE_ABORTED (1 << 1) 1480#define CMD_RING_STATE_ABORTED (1 << 1)
1482#define CMD_RING_STATE_STOPPED (1 << 2) 1481#define CMD_RING_STATE_STOPPED (1 << 2)
1483 struct list_head cancel_cmd_list;
1484 struct list_head cmd_list; 1482 struct list_head cmd_list;
1485 unsigned int cmd_ring_reserved_trbs; 1483 unsigned int cmd_ring_reserved_trbs;
1484 struct timer_list cmd_timer;
1485 struct xhci_command *current_cmd;
1486 struct xhci_ring *event_ring; 1486 struct xhci_ring *event_ring;
1487 struct xhci_erst erst; 1487 struct xhci_erst erst;
1488 /* Scratchpad */ 1488 /* Scratchpad */
@@ -1845,8 +1845,8 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
1845 unsigned int slot_id, unsigned int ep_index, 1845 unsigned int slot_id, unsigned int ep_index,
1846 struct xhci_dequeue_state *deq_state); 1846 struct xhci_dequeue_state *deq_state);
1847void xhci_stop_endpoint_command_watchdog(unsigned long arg); 1847void xhci_stop_endpoint_command_watchdog(unsigned long arg);
1848int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command, 1848void xhci_handle_command_timeout(unsigned long data);
1849 union xhci_trb *cmd_trb); 1849
1850void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, 1850void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
1851 unsigned int ep_index, unsigned int stream_id); 1851 unsigned int ep_index, unsigned int stream_id);
1852void xhci_cleanup_command_queue(struct xhci_hcd *xhci); 1852void xhci_cleanup_command_queue(struct xhci_hcd *xhci);