diff options
Diffstat (limited to 'drivers/usb/host/xhci.c')
-rw-r--r-- | drivers/usb/host/xhci.c | 978 |
1 files changed, 816 insertions, 162 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index d5c550ea3e68..f5fe1ac301ab 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -93,23 +93,26 @@ void xhci_quiesce(struct xhci_hcd *xhci) | |||
93 | * | 93 | * |
94 | * Disable any IRQs and clear the run/stop bit. | 94 | * Disable any IRQs and clear the run/stop bit. |
95 | * HC will complete any current and actively pipelined transactions, and | 95 | * HC will complete any current and actively pipelined transactions, and |
96 | * should halt within 16 microframes of the run/stop bit being cleared. | 96 | * should halt within 16 ms of the run/stop bit being cleared. |
97 | * Read HC Halted bit in the status register to see when the HC is finished. | 97 | * Read HC Halted bit in the status register to see when the HC is finished. |
98 | * XXX: shouldn't we set HC_STATE_HALT here somewhere? | ||
99 | */ | 98 | */ |
100 | int xhci_halt(struct xhci_hcd *xhci) | 99 | int xhci_halt(struct xhci_hcd *xhci) |
101 | { | 100 | { |
101 | int ret; | ||
102 | xhci_dbg(xhci, "// Halt the HC\n"); | 102 | xhci_dbg(xhci, "// Halt the HC\n"); |
103 | xhci_quiesce(xhci); | 103 | xhci_quiesce(xhci); |
104 | 104 | ||
105 | return handshake(xhci, &xhci->op_regs->status, | 105 | ret = handshake(xhci, &xhci->op_regs->status, |
106 | STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); | 106 | STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); |
107 | if (!ret) | ||
108 | xhci->xhc_state |= XHCI_STATE_HALTED; | ||
109 | return ret; | ||
107 | } | 110 | } |
108 | 111 | ||
109 | /* | 112 | /* |
110 | * Set the run bit and wait for the host to be running. | 113 | * Set the run bit and wait for the host to be running. |
111 | */ | 114 | */ |
112 | int xhci_start(struct xhci_hcd *xhci) | 115 | static int xhci_start(struct xhci_hcd *xhci) |
113 | { | 116 | { |
114 | u32 temp; | 117 | u32 temp; |
115 | int ret; | 118 | int ret; |
@@ -130,11 +133,13 @@ int xhci_start(struct xhci_hcd *xhci) | |||
130 | xhci_err(xhci, "Host took too long to start, " | 133 | xhci_err(xhci, "Host took too long to start, " |
131 | "waited %u microseconds.\n", | 134 | "waited %u microseconds.\n", |
132 | XHCI_MAX_HALT_USEC); | 135 | XHCI_MAX_HALT_USEC); |
136 | if (!ret) | ||
137 | xhci->xhc_state &= ~XHCI_STATE_HALTED; | ||
133 | return ret; | 138 | return ret; |
134 | } | 139 | } |
135 | 140 | ||
136 | /* | 141 | /* |
137 | * Reset a halted HC, and set the internal HC state to HC_STATE_HALT. | 142 | * Reset a halted HC. |
138 | * | 143 | * |
139 | * This resets pipelines, timers, counters, state machines, etc. | 144 | * This resets pipelines, timers, counters, state machines, etc. |
140 | * Transactions will be terminated immediately, and operational registers | 145 | * Transactions will be terminated immediately, and operational registers |
@@ -156,8 +161,6 @@ int xhci_reset(struct xhci_hcd *xhci) | |||
156 | command = xhci_readl(xhci, &xhci->op_regs->command); | 161 | command = xhci_readl(xhci, &xhci->op_regs->command); |
157 | command |= CMD_RESET; | 162 | command |= CMD_RESET; |
158 | xhci_writel(xhci, command, &xhci->op_regs->command); | 163 | xhci_writel(xhci, command, &xhci->op_regs->command); |
159 | /* XXX: Why does EHCI set this here? Shouldn't other code do this? */ | ||
160 | xhci_to_hcd(xhci)->state = HC_STATE_HALT; | ||
161 | 164 | ||
162 | ret = handshake(xhci, &xhci->op_regs->command, | 165 | ret = handshake(xhci, &xhci->op_regs->command, |
163 | CMD_RESET, 0, 250 * 1000); | 166 | CMD_RESET, 0, 250 * 1000); |
@@ -226,7 +229,8 @@ static int xhci_setup_msi(struct xhci_hcd *xhci) | |||
226 | static int xhci_setup_msix(struct xhci_hcd *xhci) | 229 | static int xhci_setup_msix(struct xhci_hcd *xhci) |
227 | { | 230 | { |
228 | int i, ret = 0; | 231 | int i, ret = 0; |
229 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | 232 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
233 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); | ||
230 | 234 | ||
231 | /* | 235 | /* |
232 | * calculate number of msi-x vectors supported. | 236 | * calculate number of msi-x vectors supported. |
@@ -265,6 +269,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci) | |||
265 | goto disable_msix; | 269 | goto disable_msix; |
266 | } | 270 | } |
267 | 271 | ||
272 | hcd->msix_enabled = 1; | ||
268 | return ret; | 273 | return ret; |
269 | 274 | ||
270 | disable_msix: | 275 | disable_msix: |
@@ -280,7 +285,8 @@ free_entries: | |||
280 | /* Free any IRQs and disable MSI-X */ | 285 | /* Free any IRQs and disable MSI-X */ |
281 | static void xhci_cleanup_msix(struct xhci_hcd *xhci) | 286 | static void xhci_cleanup_msix(struct xhci_hcd *xhci) |
282 | { | 287 | { |
283 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | 288 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
289 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); | ||
284 | 290 | ||
285 | xhci_free_irq(xhci); | 291 | xhci_free_irq(xhci); |
286 | 292 | ||
@@ -292,6 +298,7 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci) | |||
292 | pci_disable_msi(pdev); | 298 | pci_disable_msi(pdev); |
293 | } | 299 | } |
294 | 300 | ||
301 | hcd->msix_enabled = 0; | ||
295 | return; | 302 | return; |
296 | } | 303 | } |
297 | 304 | ||
@@ -325,7 +332,7 @@ int xhci_init(struct usb_hcd *hcd) | |||
325 | 332 | ||
326 | 333 | ||
327 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | 334 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
328 | void xhci_event_ring_work(unsigned long arg) | 335 | static void xhci_event_ring_work(unsigned long arg) |
329 | { | 336 | { |
330 | unsigned long flags; | 337 | unsigned long flags; |
331 | int temp; | 338 | int temp; |
@@ -346,7 +353,6 @@ void xhci_event_ring_work(unsigned long arg) | |||
346 | 353 | ||
347 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | 354 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
348 | xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); | 355 | xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); |
349 | xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled); | ||
350 | xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask); | 356 | xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask); |
351 | xhci->error_bitmask = 0; | 357 | xhci->error_bitmask = 0; |
352 | xhci_dbg(xhci, "Event ring:\n"); | 358 | xhci_dbg(xhci, "Event ring:\n"); |
@@ -366,10 +372,6 @@ void xhci_event_ring_work(unsigned long arg) | |||
366 | xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]); | 372 | xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]); |
367 | } | 373 | } |
368 | } | 374 | } |
369 | |||
370 | if (xhci->noops_submitted != NUM_TEST_NOOPS) | ||
371 | if (xhci_setup_one_noop(xhci)) | ||
372 | xhci_ring_cmd_db(xhci); | ||
373 | spin_unlock_irqrestore(&xhci->lock, flags); | 375 | spin_unlock_irqrestore(&xhci->lock, flags); |
374 | 376 | ||
375 | if (!xhci->zombie) | 377 | if (!xhci->zombie) |
@@ -379,6 +381,21 @@ void xhci_event_ring_work(unsigned long arg) | |||
379 | } | 381 | } |
380 | #endif | 382 | #endif |
381 | 383 | ||
384 | static int xhci_run_finished(struct xhci_hcd *xhci) | ||
385 | { | ||
386 | if (xhci_start(xhci)) { | ||
387 | xhci_halt(xhci); | ||
388 | return -ENODEV; | ||
389 | } | ||
390 | xhci->shared_hcd->state = HC_STATE_RUNNING; | ||
391 | |||
392 | if (xhci->quirks & XHCI_NEC_HOST) | ||
393 | xhci_ring_cmd_db(xhci); | ||
394 | |||
395 | xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n"); | ||
396 | return 0; | ||
397 | } | ||
398 | |||
382 | /* | 399 | /* |
383 | * Start the HC after it was halted. | 400 | * Start the HC after it was halted. |
384 | * | 401 | * |
@@ -398,9 +415,14 @@ int xhci_run(struct usb_hcd *hcd) | |||
398 | u32 ret; | 415 | u32 ret; |
399 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 416 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
400 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | 417 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
401 | void (*doorbell)(struct xhci_hcd *) = NULL; | 418 | |
419 | /* Start the xHCI host controller running only after the USB 2.0 roothub | ||
420 | * is setup. | ||
421 | */ | ||
402 | 422 | ||
403 | hcd->uses_new_polling = 1; | 423 | hcd->uses_new_polling = 1; |
424 | if (!usb_hcd_is_primary_hcd(hcd)) | ||
425 | return xhci_run_finished(xhci); | ||
404 | 426 | ||
405 | xhci_dbg(xhci, "xhci_run\n"); | 427 | xhci_dbg(xhci, "xhci_run\n"); |
406 | /* unregister the legacy interrupt */ | 428 | /* unregister the legacy interrupt */ |
@@ -408,12 +430,19 @@ int xhci_run(struct usb_hcd *hcd) | |||
408 | free_irq(hcd->irq, hcd); | 430 | free_irq(hcd->irq, hcd); |
409 | hcd->irq = -1; | 431 | hcd->irq = -1; |
410 | 432 | ||
433 | /* Some Fresco Logic host controllers advertise MSI, but fail to | ||
434 | * generate interrupts. Don't even try to enable MSI. | ||
435 | */ | ||
436 | if (xhci->quirks & XHCI_BROKEN_MSI) | ||
437 | goto legacy_irq; | ||
438 | |||
411 | ret = xhci_setup_msix(xhci); | 439 | ret = xhci_setup_msix(xhci); |
412 | if (ret) | 440 | if (ret) |
413 | /* fall back to msi*/ | 441 | /* fall back to msi*/ |
414 | ret = xhci_setup_msi(xhci); | 442 | ret = xhci_setup_msi(xhci); |
415 | 443 | ||
416 | if (ret) { | 444 | if (ret) { |
445 | legacy_irq: | ||
417 | /* fall back to legacy interrupt*/ | 446 | /* fall back to legacy interrupt*/ |
418 | ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, | 447 | ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, |
419 | hcd->irq_descr, hcd); | 448 | hcd->irq_descr, hcd); |
@@ -457,7 +486,6 @@ int xhci_run(struct usb_hcd *hcd) | |||
457 | xhci_writel(xhci, temp, &xhci->ir_set->irq_control); | 486 | xhci_writel(xhci, temp, &xhci->ir_set->irq_control); |
458 | 487 | ||
459 | /* Set the HCD state before we enable the irqs */ | 488 | /* Set the HCD state before we enable the irqs */ |
460 | hcd->state = HC_STATE_RUNNING; | ||
461 | temp = xhci_readl(xhci, &xhci->op_regs->command); | 489 | temp = xhci_readl(xhci, &xhci->op_regs->command); |
462 | temp |= (CMD_EIE); | 490 | temp |= (CMD_EIE); |
463 | xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n", | 491 | xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n", |
@@ -469,26 +497,29 @@ int xhci_run(struct usb_hcd *hcd) | |||
469 | xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); | 497 | xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); |
470 | xhci_writel(xhci, ER_IRQ_ENABLE(temp), | 498 | xhci_writel(xhci, ER_IRQ_ENABLE(temp), |
471 | &xhci->ir_set->irq_pending); | 499 | &xhci->ir_set->irq_pending); |
472 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | 500 | xhci_print_ir_set(xhci, 0); |
473 | 501 | ||
474 | if (NUM_TEST_NOOPS > 0) | ||
475 | doorbell = xhci_setup_one_noop(xhci); | ||
476 | if (xhci->quirks & XHCI_NEC_HOST) | 502 | if (xhci->quirks & XHCI_NEC_HOST) |
477 | xhci_queue_vendor_command(xhci, 0, 0, 0, | 503 | xhci_queue_vendor_command(xhci, 0, 0, 0, |
478 | TRB_TYPE(TRB_NEC_GET_FW)); | 504 | TRB_TYPE(TRB_NEC_GET_FW)); |
479 | 505 | ||
480 | if (xhci_start(xhci)) { | 506 | xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n"); |
481 | xhci_halt(xhci); | 507 | return 0; |
482 | return -ENODEV; | 508 | } |
483 | } | ||
484 | 509 | ||
485 | if (doorbell) | 510 | static void xhci_only_stop_hcd(struct usb_hcd *hcd) |
486 | (*doorbell)(xhci); | 511 | { |
487 | if (xhci->quirks & XHCI_NEC_HOST) | 512 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
488 | xhci_ring_cmd_db(xhci); | ||
489 | 513 | ||
490 | xhci_dbg(xhci, "Finished xhci_run\n"); | 514 | spin_lock_irq(&xhci->lock); |
491 | return 0; | 515 | xhci_halt(xhci); |
516 | |||
517 | /* The shared_hcd is going to be deallocated shortly (the USB core only | ||
518 | * calls this function when allocation fails in usb_add_hcd(), or | ||
519 | * usb_remove_hcd() is called). So we need to unset xHCI's pointer. | ||
520 | */ | ||
521 | xhci->shared_hcd = NULL; | ||
522 | spin_unlock_irq(&xhci->lock); | ||
492 | } | 523 | } |
493 | 524 | ||
494 | /* | 525 | /* |
@@ -505,25 +536,37 @@ void xhci_stop(struct usb_hcd *hcd) | |||
505 | u32 temp; | 536 | u32 temp; |
506 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 537 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
507 | 538 | ||
539 | if (!usb_hcd_is_primary_hcd(hcd)) { | ||
540 | xhci_only_stop_hcd(xhci->shared_hcd); | ||
541 | return; | ||
542 | } | ||
543 | |||
508 | spin_lock_irq(&xhci->lock); | 544 | spin_lock_irq(&xhci->lock); |
545 | /* Make sure the xHC is halted for a USB3 roothub | ||
546 | * (xhci_stop() could be called as part of failed init). | ||
547 | */ | ||
509 | xhci_halt(xhci); | 548 | xhci_halt(xhci); |
510 | xhci_reset(xhci); | 549 | xhci_reset(xhci); |
511 | xhci_cleanup_msix(xhci); | ||
512 | spin_unlock_irq(&xhci->lock); | 550 | spin_unlock_irq(&xhci->lock); |
513 | 551 | ||
552 | xhci_cleanup_msix(xhci); | ||
553 | |||
514 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | 554 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
515 | /* Tell the event ring poll function not to reschedule */ | 555 | /* Tell the event ring poll function not to reschedule */ |
516 | xhci->zombie = 1; | 556 | xhci->zombie = 1; |
517 | del_timer_sync(&xhci->event_ring_timer); | 557 | del_timer_sync(&xhci->event_ring_timer); |
518 | #endif | 558 | #endif |
519 | 559 | ||
560 | if (xhci->quirks & XHCI_AMD_PLL_FIX) | ||
561 | usb_amd_dev_put(); | ||
562 | |||
520 | xhci_dbg(xhci, "// Disabling event ring interrupts\n"); | 563 | xhci_dbg(xhci, "// Disabling event ring interrupts\n"); |
521 | temp = xhci_readl(xhci, &xhci->op_regs->status); | 564 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
522 | xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); | 565 | xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); |
523 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | 566 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
524 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), | 567 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), |
525 | &xhci->ir_set->irq_pending); | 568 | &xhci->ir_set->irq_pending); |
526 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | 569 | xhci_print_ir_set(xhci, 0); |
527 | 570 | ||
528 | xhci_dbg(xhci, "cleaning up memory\n"); | 571 | xhci_dbg(xhci, "cleaning up memory\n"); |
529 | xhci_mem_cleanup(xhci); | 572 | xhci_mem_cleanup(xhci); |
@@ -537,6 +580,8 @@ void xhci_stop(struct usb_hcd *hcd) | |||
537 | * This is called when the machine is rebooting or halting. We assume that the | 580 | * This is called when the machine is rebooting or halting. We assume that the |
538 | * machine will be powered off, and the HC's internal state will be reset. | 581 | * machine will be powered off, and the HC's internal state will be reset. |
539 | * Don't bother to free memory. | 582 | * Don't bother to free memory. |
583 | * | ||
584 | * This will only ever be called with the main usb_hcd (the USB3 roothub). | ||
540 | */ | 585 | */ |
541 | void xhci_shutdown(struct usb_hcd *hcd) | 586 | void xhci_shutdown(struct usb_hcd *hcd) |
542 | { | 587 | { |
@@ -544,13 +589,284 @@ void xhci_shutdown(struct usb_hcd *hcd) | |||
544 | 589 | ||
545 | spin_lock_irq(&xhci->lock); | 590 | spin_lock_irq(&xhci->lock); |
546 | xhci_halt(xhci); | 591 | xhci_halt(xhci); |
547 | xhci_cleanup_msix(xhci); | ||
548 | spin_unlock_irq(&xhci->lock); | 592 | spin_unlock_irq(&xhci->lock); |
549 | 593 | ||
594 | xhci_cleanup_msix(xhci); | ||
595 | |||
550 | xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", | 596 | xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", |
551 | xhci_readl(xhci, &xhci->op_regs->status)); | 597 | xhci_readl(xhci, &xhci->op_regs->status)); |
552 | } | 598 | } |
553 | 599 | ||
600 | #ifdef CONFIG_PM | ||
601 | static void xhci_save_registers(struct xhci_hcd *xhci) | ||
602 | { | ||
603 | xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command); | ||
604 | xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification); | ||
605 | xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); | ||
606 | xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg); | ||
607 | xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending); | ||
608 | xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control); | ||
609 | xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size); | ||
610 | xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); | ||
611 | xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); | ||
612 | } | ||
613 | |||
614 | static void xhci_restore_registers(struct xhci_hcd *xhci) | ||
615 | { | ||
616 | xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command); | ||
617 | xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification); | ||
618 | xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); | ||
619 | xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg); | ||
620 | xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending); | ||
621 | xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control); | ||
622 | xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size); | ||
623 | xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); | ||
624 | } | ||
625 | |||
626 | static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) | ||
627 | { | ||
628 | u64 val_64; | ||
629 | |||
630 | /* step 2: initialize command ring buffer */ | ||
631 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); | ||
632 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | | ||
633 | (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, | ||
634 | xhci->cmd_ring->dequeue) & | ||
635 | (u64) ~CMD_RING_RSVD_BITS) | | ||
636 | xhci->cmd_ring->cycle_state; | ||
637 | xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", | ||
638 | (long unsigned long) val_64); | ||
639 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); | ||
640 | } | ||
641 | |||
642 | /* | ||
643 | * The whole command ring must be cleared to zero when we suspend the host. | ||
644 | * | ||
645 | * The host doesn't save the command ring pointer in the suspend well, so we | ||
646 | * need to re-program it on resume. Unfortunately, the pointer must be 64-byte | ||
647 | * aligned, because of the reserved bits in the command ring dequeue pointer | ||
648 | * register. Therefore, we can't just set the dequeue pointer back in the | ||
649 | * middle of the ring (TRBs are 16-byte aligned). | ||
650 | */ | ||
651 | static void xhci_clear_command_ring(struct xhci_hcd *xhci) | ||
652 | { | ||
653 | struct xhci_ring *ring; | ||
654 | struct xhci_segment *seg; | ||
655 | |||
656 | ring = xhci->cmd_ring; | ||
657 | seg = ring->deq_seg; | ||
658 | do { | ||
659 | memset(seg->trbs, 0, SEGMENT_SIZE); | ||
660 | seg = seg->next; | ||
661 | } while (seg != ring->deq_seg); | ||
662 | |||
663 | /* Reset the software enqueue and dequeue pointers */ | ||
664 | ring->deq_seg = ring->first_seg; | ||
665 | ring->dequeue = ring->first_seg->trbs; | ||
666 | ring->enq_seg = ring->deq_seg; | ||
667 | ring->enqueue = ring->dequeue; | ||
668 | |||
669 | /* | ||
670 | * Ring is now zeroed, so the HW should look for change of ownership | ||
671 | * when the cycle bit is set to 1. | ||
672 | */ | ||
673 | ring->cycle_state = 1; | ||
674 | |||
675 | /* | ||
676 | * Reset the hardware dequeue pointer. | ||
677 | * Yes, this will need to be re-written after resume, but we're paranoid | ||
678 | * and want to make sure the hardware doesn't access bogus memory | ||
679 | * because, say, the BIOS or an SMI started the host without changing | ||
680 | * the command ring pointers. | ||
681 | */ | ||
682 | xhci_set_cmd_ring_deq(xhci); | ||
683 | } | ||
684 | |||
685 | /* | ||
686 | * Stop HC (not bus-specific) | ||
687 | * | ||
688 | * This is called when the machine transition into S3/S4 mode. | ||
689 | * | ||
690 | */ | ||
691 | int xhci_suspend(struct xhci_hcd *xhci) | ||
692 | { | ||
693 | int rc = 0; | ||
694 | struct usb_hcd *hcd = xhci_to_hcd(xhci); | ||
695 | u32 command; | ||
696 | int i; | ||
697 | |||
698 | spin_lock_irq(&xhci->lock); | ||
699 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | ||
700 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); | ||
701 | /* step 1: stop endpoint */ | ||
702 | /* skipped assuming that port suspend has done */ | ||
703 | |||
704 | /* step 2: clear Run/Stop bit */ | ||
705 | command = xhci_readl(xhci, &xhci->op_regs->command); | ||
706 | command &= ~CMD_RUN; | ||
707 | xhci_writel(xhci, command, &xhci->op_regs->command); | ||
708 | if (handshake(xhci, &xhci->op_regs->status, | ||
709 | STS_HALT, STS_HALT, 100*100)) { | ||
710 | xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); | ||
711 | spin_unlock_irq(&xhci->lock); | ||
712 | return -ETIMEDOUT; | ||
713 | } | ||
714 | xhci_clear_command_ring(xhci); | ||
715 | |||
716 | /* step 3: save registers */ | ||
717 | xhci_save_registers(xhci); | ||
718 | |||
719 | /* step 4: set CSS flag */ | ||
720 | command = xhci_readl(xhci, &xhci->op_regs->command); | ||
721 | command |= CMD_CSS; | ||
722 | xhci_writel(xhci, command, &xhci->op_regs->command); | ||
723 | if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) { | ||
724 | xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n"); | ||
725 | spin_unlock_irq(&xhci->lock); | ||
726 | return -ETIMEDOUT; | ||
727 | } | ||
728 | spin_unlock_irq(&xhci->lock); | ||
729 | |||
730 | /* step 5: remove core well power */ | ||
731 | /* synchronize irq when using MSI-X */ | ||
732 | if (xhci->msix_entries) { | ||
733 | for (i = 0; i < xhci->msix_count; i++) | ||
734 | synchronize_irq(xhci->msix_entries[i].vector); | ||
735 | } | ||
736 | |||
737 | return rc; | ||
738 | } | ||
739 | |||
740 | /* | ||
741 | * start xHC (not bus-specific) | ||
742 | * | ||
743 | * This is called when the machine transition from S3/S4 mode. | ||
744 | * | ||
745 | */ | ||
746 | int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | ||
747 | { | ||
748 | u32 command, temp = 0; | ||
749 | struct usb_hcd *hcd = xhci_to_hcd(xhci); | ||
750 | struct usb_hcd *secondary_hcd; | ||
751 | int retval; | ||
752 | |||
753 | /* Wait a bit if either of the roothubs need to settle from the | ||
754 | * transition into bus suspend. | ||
755 | */ | ||
756 | if (time_before(jiffies, xhci->bus_state[0].next_statechange) || | ||
757 | time_before(jiffies, | ||
758 | xhci->bus_state[1].next_statechange)) | ||
759 | msleep(100); | ||
760 | |||
761 | spin_lock_irq(&xhci->lock); | ||
762 | if (xhci->quirks & XHCI_RESET_ON_RESUME) | ||
763 | hibernated = true; | ||
764 | |||
765 | if (!hibernated) { | ||
766 | /* step 1: restore register */ | ||
767 | xhci_restore_registers(xhci); | ||
768 | /* step 2: initialize command ring buffer */ | ||
769 | xhci_set_cmd_ring_deq(xhci); | ||
770 | /* step 3: restore state and start state*/ | ||
771 | /* step 3: set CRS flag */ | ||
772 | command = xhci_readl(xhci, &xhci->op_regs->command); | ||
773 | command |= CMD_CRS; | ||
774 | xhci_writel(xhci, command, &xhci->op_regs->command); | ||
775 | if (handshake(xhci, &xhci->op_regs->status, | ||
776 | STS_RESTORE, 0, 10*100)) { | ||
777 | xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n"); | ||
778 | spin_unlock_irq(&xhci->lock); | ||
779 | return -ETIMEDOUT; | ||
780 | } | ||
781 | temp = xhci_readl(xhci, &xhci->op_regs->status); | ||
782 | } | ||
783 | |||
784 | /* If restore operation fails, re-initialize the HC during resume */ | ||
785 | if ((temp & STS_SRE) || hibernated) { | ||
786 | /* Let the USB core know _both_ roothubs lost power. */ | ||
787 | usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); | ||
788 | usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); | ||
789 | |||
790 | xhci_dbg(xhci, "Stop HCD\n"); | ||
791 | xhci_halt(xhci); | ||
792 | xhci_reset(xhci); | ||
793 | spin_unlock_irq(&xhci->lock); | ||
794 | xhci_cleanup_msix(xhci); | ||
795 | |||
796 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | ||
797 | /* Tell the event ring poll function not to reschedule */ | ||
798 | xhci->zombie = 1; | ||
799 | del_timer_sync(&xhci->event_ring_timer); | ||
800 | #endif | ||
801 | |||
802 | xhci_dbg(xhci, "// Disabling event ring interrupts\n"); | ||
803 | temp = xhci_readl(xhci, &xhci->op_regs->status); | ||
804 | xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); | ||
805 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | ||
806 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), | ||
807 | &xhci->ir_set->irq_pending); | ||
808 | xhci_print_ir_set(xhci, 0); | ||
809 | |||
810 | xhci_dbg(xhci, "cleaning up memory\n"); | ||
811 | xhci_mem_cleanup(xhci); | ||
812 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n", | ||
813 | xhci_readl(xhci, &xhci->op_regs->status)); | ||
814 | |||
815 | /* USB core calls the PCI reinit and start functions twice: | ||
816 | * first with the primary HCD, and then with the secondary HCD. | ||
817 | * If we don't do the same, the host will never be started. | ||
818 | */ | ||
819 | if (!usb_hcd_is_primary_hcd(hcd)) | ||
820 | secondary_hcd = hcd; | ||
821 | else | ||
822 | secondary_hcd = xhci->shared_hcd; | ||
823 | |||
824 | xhci_dbg(xhci, "Initialize the xhci_hcd\n"); | ||
825 | retval = xhci_init(hcd->primary_hcd); | ||
826 | if (retval) | ||
827 | return retval; | ||
828 | xhci_dbg(xhci, "Start the primary HCD\n"); | ||
829 | retval = xhci_run(hcd->primary_hcd); | ||
830 | if (retval) | ||
831 | goto failed_restart; | ||
832 | |||
833 | xhci_dbg(xhci, "Start the secondary HCD\n"); | ||
834 | retval = xhci_run(secondary_hcd); | ||
835 | if (!retval) { | ||
836 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | ||
837 | set_bit(HCD_FLAG_HW_ACCESSIBLE, | ||
838 | &xhci->shared_hcd->flags); | ||
839 | } | ||
840 | failed_restart: | ||
841 | hcd->state = HC_STATE_SUSPENDED; | ||
842 | xhci->shared_hcd->state = HC_STATE_SUSPENDED; | ||
843 | return retval; | ||
844 | } | ||
845 | |||
846 | /* step 4: set Run/Stop bit */ | ||
847 | command = xhci_readl(xhci, &xhci->op_regs->command); | ||
848 | command |= CMD_RUN; | ||
849 | xhci_writel(xhci, command, &xhci->op_regs->command); | ||
850 | handshake(xhci, &xhci->op_regs->status, STS_HALT, | ||
851 | 0, 250 * 1000); | ||
852 | |||
853 | /* step 5: walk topology and initialize portsc, | ||
854 | * portpmsc and portli | ||
855 | */ | ||
856 | /* this is done in bus_resume */ | ||
857 | |||
858 | /* step 6: restart each of the previously | ||
859 | * Running endpoints by ringing their doorbells | ||
860 | */ | ||
861 | |||
862 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | ||
863 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); | ||
864 | |||
865 | spin_unlock_irq(&xhci->lock); | ||
866 | return 0; | ||
867 | } | ||
868 | #endif /* CONFIG_PM */ | ||
869 | |||
554 | /*-------------------------------------------------------------------------*/ | 870 | /*-------------------------------------------------------------------------*/ |
555 | 871 | ||
556 | /** | 872 | /** |
@@ -606,8 +922,12 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs) | |||
606 | /* Returns 1 if the arguments are OK; | 922 | /* Returns 1 if the arguments are OK; |
607 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. | 923 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. |
608 | */ | 924 | */ |
609 | int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, | 925 | static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, |
610 | struct usb_host_endpoint *ep, int check_ep, const char *func) { | 926 | struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, |
927 | const char *func) { | ||
928 | struct xhci_hcd *xhci; | ||
929 | struct xhci_virt_device *virt_dev; | ||
930 | |||
611 | if (!hcd || (check_ep && !ep) || !udev) { | 931 | if (!hcd || (check_ep && !ep) || !udev) { |
612 | printk(KERN_DEBUG "xHCI %s called with invalid args\n", | 932 | printk(KERN_DEBUG "xHCI %s called with invalid args\n", |
613 | func); | 933 | func); |
@@ -618,11 +938,24 @@ int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, | |||
618 | func); | 938 | func); |
619 | return 0; | 939 | return 0; |
620 | } | 940 | } |
621 | if (!udev->slot_id) { | 941 | |
622 | printk(KERN_DEBUG "xHCI %s called with unaddressed device\n", | 942 | if (check_virt_dev) { |
623 | func); | 943 | xhci = hcd_to_xhci(hcd); |
624 | return -EINVAL; | 944 | if (!udev->slot_id || !xhci->devs |
945 | || !xhci->devs[udev->slot_id]) { | ||
946 | printk(KERN_DEBUG "xHCI %s called with unaddressed " | ||
947 | "device\n", func); | ||
948 | return -EINVAL; | ||
949 | } | ||
950 | |||
951 | virt_dev = xhci->devs[udev->slot_id]; | ||
952 | if (virt_dev->udev != udev) { | ||
953 | printk(KERN_DEBUG "xHCI %s called with udev and " | ||
954 | "virt_dev does not match\n", func); | ||
955 | return -EINVAL; | ||
956 | } | ||
625 | } | 957 | } |
958 | |||
626 | return 1; | 959 | return 1; |
627 | } | 960 | } |
628 | 961 | ||
@@ -649,8 +982,8 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | |||
649 | 982 | ||
650 | out_ctx = xhci->devs[slot_id]->out_ctx; | 983 | out_ctx = xhci->devs[slot_id]->out_ctx; |
651 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); | 984 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
652 | hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2); | 985 | hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); |
653 | max_packet_size = urb->dev->ep0.desc.wMaxPacketSize; | 986 | max_packet_size = le16_to_cpu(urb->dev->ep0.desc.wMaxPacketSize); |
654 | if (hw_max_packet_size != max_packet_size) { | 987 | if (hw_max_packet_size != max_packet_size) { |
655 | xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); | 988 | xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); |
656 | xhci_dbg(xhci, "Max packet size in usb_device = %d\n", | 989 | xhci_dbg(xhci, "Max packet size in usb_device = %d\n", |
@@ -664,15 +997,15 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | |||
664 | xhci->devs[slot_id]->out_ctx, ep_index); | 997 | xhci->devs[slot_id]->out_ctx, ep_index); |
665 | in_ctx = xhci->devs[slot_id]->in_ctx; | 998 | in_ctx = xhci->devs[slot_id]->in_ctx; |
666 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); | 999 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); |
667 | ep_ctx->ep_info2 &= ~MAX_PACKET_MASK; | 1000 | ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); |
668 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size); | 1001 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); |
669 | 1002 | ||
670 | /* Set up the input context flags for the command */ | 1003 | /* Set up the input context flags for the command */ |
671 | /* FIXME: This won't work if a non-default control endpoint | 1004 | /* FIXME: This won't work if a non-default control endpoint |
672 | * changes max packet sizes. | 1005 | * changes max packet sizes. |
673 | */ | 1006 | */ |
674 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | 1007 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
675 | ctrl_ctx->add_flags = EP0_FLAG; | 1008 | ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); |
676 | ctrl_ctx->drop_flags = 0; | 1009 | ctrl_ctx->drop_flags = 0; |
677 | 1010 | ||
678 | xhci_dbg(xhci, "Slot %d input context\n", slot_id); | 1011 | xhci_dbg(xhci, "Slot %d input context\n", slot_id); |
@@ -686,7 +1019,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | |||
686 | /* Clean up the input context for later use by bandwidth | 1019 | /* Clean up the input context for later use by bandwidth |
687 | * functions. | 1020 | * functions. |
688 | */ | 1021 | */ |
689 | ctrl_ctx->add_flags = SLOT_FLAG; | 1022 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); |
690 | } | 1023 | } |
691 | return ret; | 1024 | return ret; |
692 | } | 1025 | } |
@@ -704,18 +1037,13 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
704 | struct urb_priv *urb_priv; | 1037 | struct urb_priv *urb_priv; |
705 | int size, i; | 1038 | int size, i; |
706 | 1039 | ||
707 | if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) | 1040 | if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, |
1041 | true, true, __func__) <= 0) | ||
708 | return -EINVAL; | 1042 | return -EINVAL; |
709 | 1043 | ||
710 | slot_id = urb->dev->slot_id; | 1044 | slot_id = urb->dev->slot_id; |
711 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | 1045 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
712 | 1046 | ||
713 | if (!xhci->devs || !xhci->devs[slot_id]) { | ||
714 | if (!in_interrupt()) | ||
715 | dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n"); | ||
716 | ret = -EINVAL; | ||
717 | goto exit; | ||
718 | } | ||
719 | if (!HCD_HW_ACCESSIBLE(hcd)) { | 1047 | if (!HCD_HW_ACCESSIBLE(hcd)) { |
720 | if (!in_interrupt()) | 1048 | if (!in_interrupt()) |
721 | xhci_dbg(xhci, "urb submitted during PCI suspend\n"); | 1049 | xhci_dbg(xhci, "urb submitted during PCI suspend\n"); |
@@ -904,13 +1232,13 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
904 | if (ret || !urb->hcpriv) | 1232 | if (ret || !urb->hcpriv) |
905 | goto done; | 1233 | goto done; |
906 | temp = xhci_readl(xhci, &xhci->op_regs->status); | 1234 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
907 | if (temp == 0xffffffff) { | 1235 | if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { |
908 | xhci_dbg(xhci, "HW died, freeing TD.\n"); | 1236 | xhci_dbg(xhci, "HW died, freeing TD.\n"); |
909 | urb_priv = urb->hcpriv; | 1237 | urb_priv = urb->hcpriv; |
910 | 1238 | ||
911 | usb_hcd_unlink_urb_from_ep(hcd, urb); | 1239 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
912 | spin_unlock_irqrestore(&xhci->lock, flags); | 1240 | spin_unlock_irqrestore(&xhci->lock, flags); |
913 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN); | 1241 | usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); |
914 | xhci_urb_free_priv(xhci, urb_priv); | 1242 | xhci_urb_free_priv(xhci, urb_priv); |
915 | return ret; | 1243 | return ret; |
916 | } | 1244 | } |
@@ -956,7 +1284,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
956 | ep->stop_cmd_timer.expires = jiffies + | 1284 | ep->stop_cmd_timer.expires = jiffies + |
957 | XHCI_STOP_EP_CMD_TIMEOUT * HZ; | 1285 | XHCI_STOP_EP_CMD_TIMEOUT * HZ; |
958 | add_timer(&ep->stop_cmd_timer); | 1286 | add_timer(&ep->stop_cmd_timer); |
959 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); | 1287 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0); |
960 | xhci_ring_cmd_db(xhci); | 1288 | xhci_ring_cmd_db(xhci); |
961 | } | 1289 | } |
962 | done: | 1290 | done: |
@@ -991,12 +1319,14 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
991 | u32 new_add_flags, new_drop_flags, new_slot_info; | 1319 | u32 new_add_flags, new_drop_flags, new_slot_info; |
992 | int ret; | 1320 | int ret; |
993 | 1321 | ||
994 | ret = xhci_check_args(hcd, udev, ep, 1, __func__); | 1322 | ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); |
995 | if (ret <= 0) | 1323 | if (ret <= 0) |
996 | return ret; | 1324 | return ret; |
997 | xhci = hcd_to_xhci(hcd); | 1325 | xhci = hcd_to_xhci(hcd); |
998 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); | 1326 | if (xhci->xhc_state & XHCI_STATE_DYING) |
1327 | return -ENODEV; | ||
999 | 1328 | ||
1329 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); | ||
1000 | drop_flag = xhci_get_endpoint_flag(&ep->desc); | 1330 | drop_flag = xhci_get_endpoint_flag(&ep->desc); |
1001 | if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { | 1331 | if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { |
1002 | xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", | 1332 | xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", |
@@ -1004,12 +1334,6 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
1004 | return 0; | 1334 | return 0; |
1005 | } | 1335 | } |
1006 | 1336 | ||
1007 | if (!xhci->devs || !xhci->devs[udev->slot_id]) { | ||
1008 | xhci_warn(xhci, "xHCI %s called with unaddressed device\n", | ||
1009 | __func__); | ||
1010 | return -EINVAL; | ||
1011 | } | ||
1012 | |||
1013 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; | 1337 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; |
1014 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; | 1338 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; |
1015 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | 1339 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
@@ -1018,27 +1342,30 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
1018 | /* If the HC already knows the endpoint is disabled, | 1342 | /* If the HC already knows the endpoint is disabled, |
1019 | * or the HCD has noted it is disabled, ignore this request | 1343 | * or the HCD has noted it is disabled, ignore this request |
1020 | */ | 1344 | */ |
1021 | if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || | 1345 | if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == |
1022 | ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { | 1346 | EP_STATE_DISABLED || |
1347 | le32_to_cpu(ctrl_ctx->drop_flags) & | ||
1348 | xhci_get_endpoint_flag(&ep->desc)) { | ||
1023 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", | 1349 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", |
1024 | __func__, ep); | 1350 | __func__, ep); |
1025 | return 0; | 1351 | return 0; |
1026 | } | 1352 | } |
1027 | 1353 | ||
1028 | ctrl_ctx->drop_flags |= drop_flag; | 1354 | ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); |
1029 | new_drop_flags = ctrl_ctx->drop_flags; | 1355 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); |
1030 | 1356 | ||
1031 | ctrl_ctx->add_flags &= ~drop_flag; | 1357 | ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); |
1032 | new_add_flags = ctrl_ctx->add_flags; | 1358 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
1033 | 1359 | ||
1034 | last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags); | 1360 | last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)); |
1035 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); | 1361 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); |
1036 | /* Update the last valid endpoint context, if we deleted the last one */ | 1362 | /* Update the last valid endpoint context, if we deleted the last one */ |
1037 | if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { | 1363 | if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) > |
1038 | slot_ctx->dev_info &= ~LAST_CTX_MASK; | 1364 | LAST_CTX(last_ctx)) { |
1039 | slot_ctx->dev_info |= LAST_CTX(last_ctx); | 1365 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
1366 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); | ||
1040 | } | 1367 | } |
1041 | new_slot_info = slot_ctx->dev_info; | 1368 | new_slot_info = le32_to_cpu(slot_ctx->dev_info); |
1042 | 1369 | ||
1043 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); | 1370 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); |
1044 | 1371 | ||
@@ -1076,15 +1403,18 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
1076 | u32 added_ctxs; | 1403 | u32 added_ctxs; |
1077 | unsigned int last_ctx; | 1404 | unsigned int last_ctx; |
1078 | u32 new_add_flags, new_drop_flags, new_slot_info; | 1405 | u32 new_add_flags, new_drop_flags, new_slot_info; |
1406 | struct xhci_virt_device *virt_dev; | ||
1079 | int ret = 0; | 1407 | int ret = 0; |
1080 | 1408 | ||
1081 | ret = xhci_check_args(hcd, udev, ep, 1, __func__); | 1409 | ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); |
1082 | if (ret <= 0) { | 1410 | if (ret <= 0) { |
1083 | /* So we won't queue a reset ep command for a root hub */ | 1411 | /* So we won't queue a reset ep command for a root hub */ |
1084 | ep->hcpriv = NULL; | 1412 | ep->hcpriv = NULL; |
1085 | return ret; | 1413 | return ret; |
1086 | } | 1414 | } |
1087 | xhci = hcd_to_xhci(hcd); | 1415 | xhci = hcd_to_xhci(hcd); |
1416 | if (xhci->xhc_state & XHCI_STATE_DYING) | ||
1417 | return -ENODEV; | ||
1088 | 1418 | ||
1089 | added_ctxs = xhci_get_endpoint_flag(&ep->desc); | 1419 | added_ctxs = xhci_get_endpoint_flag(&ep->desc); |
1090 | last_ctx = xhci_last_valid_endpoint(added_ctxs); | 1420 | last_ctx = xhci_last_valid_endpoint(added_ctxs); |
@@ -1098,21 +1428,30 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
1098 | return 0; | 1428 | return 0; |
1099 | } | 1429 | } |
1100 | 1430 | ||
1101 | if (!xhci->devs || !xhci->devs[udev->slot_id]) { | 1431 | virt_dev = xhci->devs[udev->slot_id]; |
1102 | xhci_warn(xhci, "xHCI %s called with unaddressed device\n", | 1432 | in_ctx = virt_dev->in_ctx; |
1103 | __func__); | 1433 | out_ctx = virt_dev->out_ctx; |
1104 | return -EINVAL; | ||
1105 | } | ||
1106 | |||
1107 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; | ||
1108 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; | ||
1109 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | 1434 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
1110 | ep_index = xhci_get_endpoint_index(&ep->desc); | 1435 | ep_index = xhci_get_endpoint_index(&ep->desc); |
1111 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); | 1436 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
1437 | |||
1438 | /* If this endpoint is already in use, and the upper layers are trying | ||
1439 | * to add it again without dropping it, reject the addition. | ||
1440 | */ | ||
1441 | if (virt_dev->eps[ep_index].ring && | ||
1442 | !(le32_to_cpu(ctrl_ctx->drop_flags) & | ||
1443 | xhci_get_endpoint_flag(&ep->desc))) { | ||
1444 | xhci_warn(xhci, "Trying to add endpoint 0x%x " | ||
1445 | "without dropping it.\n", | ||
1446 | (unsigned int) ep->desc.bEndpointAddress); | ||
1447 | return -EINVAL; | ||
1448 | } | ||
1449 | |||
1112 | /* If the HCD has already noted the endpoint is enabled, | 1450 | /* If the HCD has already noted the endpoint is enabled, |
1113 | * ignore this request. | 1451 | * ignore this request. |
1114 | */ | 1452 | */ |
1115 | if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { | 1453 | if (le32_to_cpu(ctrl_ctx->add_flags) & |
1454 | xhci_get_endpoint_flag(&ep->desc)) { | ||
1116 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", | 1455 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", |
1117 | __func__, ep); | 1456 | __func__, ep); |
1118 | return 0; | 1457 | return 0; |
@@ -1123,15 +1462,14 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
1123 | * process context, not interrupt context (or so documenation | 1462 | * process context, not interrupt context (or so documenation |
1124 | * for usb_set_interface() and usb_set_configuration() claim). | 1463 | * for usb_set_interface() and usb_set_configuration() claim). |
1125 | */ | 1464 | */ |
1126 | if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], | 1465 | if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { |
1127 | udev, ep, GFP_NOIO) < 0) { | ||
1128 | dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", | 1466 | dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", |
1129 | __func__, ep->desc.bEndpointAddress); | 1467 | __func__, ep->desc.bEndpointAddress); |
1130 | return -ENOMEM; | 1468 | return -ENOMEM; |
1131 | } | 1469 | } |
1132 | 1470 | ||
1133 | ctrl_ctx->add_flags |= added_ctxs; | 1471 | ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); |
1134 | new_add_flags = ctrl_ctx->add_flags; | 1472 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
1135 | 1473 | ||
1136 | /* If xhci_endpoint_disable() was called for this endpoint, but the | 1474 | /* If xhci_endpoint_disable() was called for this endpoint, but the |
1137 | * xHC hasn't been notified yet through the check_bandwidth() call, | 1475 | * xHC hasn't been notified yet through the check_bandwidth() call, |
@@ -1139,15 +1477,16 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
1139 | * descriptors. We must drop and re-add this endpoint, so we leave the | 1477 | * descriptors. We must drop and re-add this endpoint, so we leave the |
1140 | * drop flags alone. | 1478 | * drop flags alone. |
1141 | */ | 1479 | */ |
1142 | new_drop_flags = ctrl_ctx->drop_flags; | 1480 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); |
1143 | 1481 | ||
1144 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); | 1482 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); |
1145 | /* Update the last valid endpoint context, if we just added one past */ | 1483 | /* Update the last valid endpoint context, if we just added one past */ |
1146 | if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { | 1484 | if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) < |
1147 | slot_ctx->dev_info &= ~LAST_CTX_MASK; | 1485 | LAST_CTX(last_ctx)) { |
1148 | slot_ctx->dev_info |= LAST_CTX(last_ctx); | 1486 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
1487 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); | ||
1149 | } | 1488 | } |
1150 | new_slot_info = slot_ctx->dev_info; | 1489 | new_slot_info = le32_to_cpu(slot_ctx->dev_info); |
1151 | 1490 | ||
1152 | /* Store the usb_device pointer for later use */ | 1491 | /* Store the usb_device pointer for later use */ |
1153 | ep->hcpriv = udev; | 1492 | ep->hcpriv = udev; |
@@ -1177,9 +1516,9 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir | |||
1177 | ctrl_ctx->drop_flags = 0; | 1516 | ctrl_ctx->drop_flags = 0; |
1178 | ctrl_ctx->add_flags = 0; | 1517 | ctrl_ctx->add_flags = 0; |
1179 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | 1518 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
1180 | slot_ctx->dev_info &= ~LAST_CTX_MASK; | 1519 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
1181 | /* Endpoint 0 is always valid */ | 1520 | /* Endpoint 0 is always valid */ |
1182 | slot_ctx->dev_info |= LAST_CTX(1); | 1521 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); |
1183 | for (i = 1; i < 31; ++i) { | 1522 | for (i = 1; i < 31; ++i) { |
1184 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); | 1523 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); |
1185 | ep_ctx->ep_info = 0; | 1524 | ep_ctx->ep_info = 0; |
@@ -1190,7 +1529,7 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir | |||
1190 | } | 1529 | } |
1191 | 1530 | ||
1192 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, | 1531 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, |
1193 | struct usb_device *udev, int *cmd_status) | 1532 | struct usb_device *udev, u32 *cmd_status) |
1194 | { | 1533 | { |
1195 | int ret; | 1534 | int ret; |
1196 | 1535 | ||
@@ -1214,6 +1553,11 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, | |||
1214 | "and endpoint is not disabled.\n"); | 1553 | "and endpoint is not disabled.\n"); |
1215 | ret = -EINVAL; | 1554 | ret = -EINVAL; |
1216 | break; | 1555 | break; |
1556 | case COMP_DEV_ERR: | ||
1557 | dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint " | ||
1558 | "configure command.\n"); | ||
1559 | ret = -ENODEV; | ||
1560 | break; | ||
1217 | case COMP_SUCCESS: | 1561 | case COMP_SUCCESS: |
1218 | dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); | 1562 | dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); |
1219 | ret = 0; | 1563 | ret = 0; |
@@ -1228,7 +1572,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, | |||
1228 | } | 1572 | } |
1229 | 1573 | ||
1230 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, | 1574 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, |
1231 | struct usb_device *udev, int *cmd_status) | 1575 | struct usb_device *udev, u32 *cmd_status) |
1232 | { | 1576 | { |
1233 | int ret; | 1577 | int ret; |
1234 | struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; | 1578 | struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; |
@@ -1248,6 +1592,16 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci, | |||
1248 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); | 1592 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); |
1249 | ret = -EINVAL; | 1593 | ret = -EINVAL; |
1250 | break; | 1594 | break; |
1595 | case COMP_DEV_ERR: | ||
1596 | dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate " | ||
1597 | "context command.\n"); | ||
1598 | ret = -ENODEV; | ||
1599 | break; | ||
1600 | case COMP_MEL_ERR: | ||
1601 | /* Max Exit Latency too large error */ | ||
1602 | dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); | ||
1603 | ret = -EINVAL; | ||
1604 | break; | ||
1251 | case COMP_SUCCESS: | 1605 | case COMP_SUCCESS: |
1252 | dev_dbg(&udev->dev, "Successful evaluate context command\n"); | 1606 | dev_dbg(&udev->dev, "Successful evaluate context command\n"); |
1253 | ret = 0; | 1607 | ret = 0; |
@@ -1261,6 +1615,113 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci, | |||
1261 | return ret; | 1615 | return ret; |
1262 | } | 1616 | } |
1263 | 1617 | ||
1618 | static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, | ||
1619 | struct xhci_container_ctx *in_ctx) | ||
1620 | { | ||
1621 | struct xhci_input_control_ctx *ctrl_ctx; | ||
1622 | u32 valid_add_flags; | ||
1623 | u32 valid_drop_flags; | ||
1624 | |||
1625 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
1626 | /* Ignore the slot flag (bit 0), and the default control endpoint flag | ||
1627 | * (bit 1). The default control endpoint is added during the Address | ||
1628 | * Device command and is never removed until the slot is disabled. | ||
1629 | */ | ||
1630 | valid_add_flags = ctrl_ctx->add_flags >> 2; | ||
1631 | valid_drop_flags = ctrl_ctx->drop_flags >> 2; | ||
1632 | |||
1633 | /* Use hweight32 to count the number of ones in the add flags, or | ||
1634 | * number of endpoints added. Don't count endpoints that are changed | ||
1635 | * (both added and dropped). | ||
1636 | */ | ||
1637 | return hweight32(valid_add_flags) - | ||
1638 | hweight32(valid_add_flags & valid_drop_flags); | ||
1639 | } | ||
1640 | |||
1641 | static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, | ||
1642 | struct xhci_container_ctx *in_ctx) | ||
1643 | { | ||
1644 | struct xhci_input_control_ctx *ctrl_ctx; | ||
1645 | u32 valid_add_flags; | ||
1646 | u32 valid_drop_flags; | ||
1647 | |||
1648 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
1649 | valid_add_flags = ctrl_ctx->add_flags >> 2; | ||
1650 | valid_drop_flags = ctrl_ctx->drop_flags >> 2; | ||
1651 | |||
1652 | return hweight32(valid_drop_flags) - | ||
1653 | hweight32(valid_add_flags & valid_drop_flags); | ||
1654 | } | ||
1655 | |||
1656 | /* | ||
1657 | * We need to reserve the new number of endpoints before the configure endpoint | ||
1658 | * command completes. We can't subtract the dropped endpoints from the number | ||
1659 | * of active endpoints until the command completes because we can oversubscribe | ||
1660 | * the host in this case: | ||
1661 | * | ||
1662 | * - the first configure endpoint command drops more endpoints than it adds | ||
1663 | * - a second configure endpoint command that adds more endpoints is queued | ||
1664 | * - the first configure endpoint command fails, so the config is unchanged | ||
1665 | * - the second command may succeed, even though there isn't enough resources | ||
1666 | * | ||
1667 | * Must be called with xhci->lock held. | ||
1668 | */ | ||
1669 | static int xhci_reserve_host_resources(struct xhci_hcd *xhci, | ||
1670 | struct xhci_container_ctx *in_ctx) | ||
1671 | { | ||
1672 | u32 added_eps; | ||
1673 | |||
1674 | added_eps = xhci_count_num_new_endpoints(xhci, in_ctx); | ||
1675 | if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { | ||
1676 | xhci_dbg(xhci, "Not enough ep ctxs: " | ||
1677 | "%u active, need to add %u, limit is %u.\n", | ||
1678 | xhci->num_active_eps, added_eps, | ||
1679 | xhci->limit_active_eps); | ||
1680 | return -ENOMEM; | ||
1681 | } | ||
1682 | xhci->num_active_eps += added_eps; | ||
1683 | xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps, | ||
1684 | xhci->num_active_eps); | ||
1685 | return 0; | ||
1686 | } | ||
1687 | |||
1688 | /* | ||
1689 | * The configure endpoint was failed by the xHC for some other reason, so we | ||
1690 | * need to revert the resources that failed configuration would have used. | ||
1691 | * | ||
1692 | * Must be called with xhci->lock held. | ||
1693 | */ | ||
1694 | static void xhci_free_host_resources(struct xhci_hcd *xhci, | ||
1695 | struct xhci_container_ctx *in_ctx) | ||
1696 | { | ||
1697 | u32 num_failed_eps; | ||
1698 | |||
1699 | num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx); | ||
1700 | xhci->num_active_eps -= num_failed_eps; | ||
1701 | xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n", | ||
1702 | num_failed_eps, | ||
1703 | xhci->num_active_eps); | ||
1704 | } | ||
1705 | |||
1706 | /* | ||
1707 | * Now that the command has completed, clean up the active endpoint count by | ||
1708 | * subtracting out the endpoints that were dropped (but not changed). | ||
1709 | * | ||
1710 | * Must be called with xhci->lock held. | ||
1711 | */ | ||
1712 | static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, | ||
1713 | struct xhci_container_ctx *in_ctx) | ||
1714 | { | ||
1715 | u32 num_dropped_eps; | ||
1716 | |||
1717 | num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx); | ||
1718 | xhci->num_active_eps -= num_dropped_eps; | ||
1719 | if (num_dropped_eps) | ||
1720 | xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n", | ||
1721 | num_dropped_eps, | ||
1722 | xhci->num_active_eps); | ||
1723 | } | ||
1724 | |||
1264 | /* Issue a configure endpoint command or evaluate context command | 1725 | /* Issue a configure endpoint command or evaluate context command |
1265 | * and wait for it to finish. | 1726 | * and wait for it to finish. |
1266 | */ | 1727 | */ |
@@ -1274,19 +1735,45 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
1274 | unsigned long flags; | 1735 | unsigned long flags; |
1275 | struct xhci_container_ctx *in_ctx; | 1736 | struct xhci_container_ctx *in_ctx; |
1276 | struct completion *cmd_completion; | 1737 | struct completion *cmd_completion; |
1277 | int *cmd_status; | 1738 | u32 *cmd_status; |
1278 | struct xhci_virt_device *virt_dev; | 1739 | struct xhci_virt_device *virt_dev; |
1279 | 1740 | ||
1280 | spin_lock_irqsave(&xhci->lock, flags); | 1741 | spin_lock_irqsave(&xhci->lock, flags); |
1281 | virt_dev = xhci->devs[udev->slot_id]; | 1742 | virt_dev = xhci->devs[udev->slot_id]; |
1282 | if (command) { | 1743 | if (command) { |
1283 | in_ctx = command->in_ctx; | 1744 | in_ctx = command->in_ctx; |
1745 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && | ||
1746 | xhci_reserve_host_resources(xhci, in_ctx)) { | ||
1747 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1748 | xhci_warn(xhci, "Not enough host resources, " | ||
1749 | "active endpoint contexts = %u\n", | ||
1750 | xhci->num_active_eps); | ||
1751 | return -ENOMEM; | ||
1752 | } | ||
1753 | |||
1284 | cmd_completion = command->completion; | 1754 | cmd_completion = command->completion; |
1285 | cmd_status = &command->status; | 1755 | cmd_status = &command->status; |
1286 | command->command_trb = xhci->cmd_ring->enqueue; | 1756 | command->command_trb = xhci->cmd_ring->enqueue; |
1757 | |||
1758 | /* Enqueue pointer can be left pointing to the link TRB, | ||
1759 | * we must handle that | ||
1760 | */ | ||
1761 | if ((le32_to_cpu(command->command_trb->link.control) | ||
1762 | & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)) | ||
1763 | command->command_trb = | ||
1764 | xhci->cmd_ring->enq_seg->next->trbs; | ||
1765 | |||
1287 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); | 1766 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); |
1288 | } else { | 1767 | } else { |
1289 | in_ctx = virt_dev->in_ctx; | 1768 | in_ctx = virt_dev->in_ctx; |
1769 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && | ||
1770 | xhci_reserve_host_resources(xhci, in_ctx)) { | ||
1771 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1772 | xhci_warn(xhci, "Not enough host resources, " | ||
1773 | "active endpoint contexts = %u\n", | ||
1774 | xhci->num_active_eps); | ||
1775 | return -ENOMEM; | ||
1776 | } | ||
1290 | cmd_completion = &virt_dev->cmd_completion; | 1777 | cmd_completion = &virt_dev->cmd_completion; |
1291 | cmd_status = &virt_dev->cmd_status; | 1778 | cmd_status = &virt_dev->cmd_status; |
1292 | } | 1779 | } |
@@ -1301,6 +1788,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
1301 | if (ret < 0) { | 1788 | if (ret < 0) { |
1302 | if (command) | 1789 | if (command) |
1303 | list_del(&command->cmd_list); | 1790 | list_del(&command->cmd_list); |
1791 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) | ||
1792 | xhci_free_host_resources(xhci, in_ctx); | ||
1304 | spin_unlock_irqrestore(&xhci->lock, flags); | 1793 | spin_unlock_irqrestore(&xhci->lock, flags); |
1305 | xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); | 1794 | xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); |
1306 | return -ENOMEM; | 1795 | return -ENOMEM; |
@@ -1323,8 +1812,22 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
1323 | } | 1812 | } |
1324 | 1813 | ||
1325 | if (!ctx_change) | 1814 | if (!ctx_change) |
1326 | return xhci_configure_endpoint_result(xhci, udev, cmd_status); | 1815 | ret = xhci_configure_endpoint_result(xhci, udev, cmd_status); |
1327 | return xhci_evaluate_context_result(xhci, udev, cmd_status); | 1816 | else |
1817 | ret = xhci_evaluate_context_result(xhci, udev, cmd_status); | ||
1818 | |||
1819 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { | ||
1820 | spin_lock_irqsave(&xhci->lock, flags); | ||
1821 | /* If the command failed, remove the reserved resources. | ||
1822 | * Otherwise, clean up the estimate to include dropped eps. | ||
1823 | */ | ||
1824 | if (ret) | ||
1825 | xhci_free_host_resources(xhci, in_ctx); | ||
1826 | else | ||
1827 | xhci_finish_resource_reservation(xhci, in_ctx); | ||
1828 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1829 | } | ||
1830 | return ret; | ||
1328 | } | 1831 | } |
1329 | 1832 | ||
1330 | /* Called after one or more calls to xhci_add_endpoint() or | 1833 | /* Called after one or more calls to xhci_add_endpoint() or |
@@ -1346,29 +1849,25 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
1346 | struct xhci_input_control_ctx *ctrl_ctx; | 1849 | struct xhci_input_control_ctx *ctrl_ctx; |
1347 | struct xhci_slot_ctx *slot_ctx; | 1850 | struct xhci_slot_ctx *slot_ctx; |
1348 | 1851 | ||
1349 | ret = xhci_check_args(hcd, udev, NULL, 0, __func__); | 1852 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
1350 | if (ret <= 0) | 1853 | if (ret <= 0) |
1351 | return ret; | 1854 | return ret; |
1352 | xhci = hcd_to_xhci(hcd); | 1855 | xhci = hcd_to_xhci(hcd); |
1856 | if (xhci->xhc_state & XHCI_STATE_DYING) | ||
1857 | return -ENODEV; | ||
1353 | 1858 | ||
1354 | if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) { | ||
1355 | xhci_warn(xhci, "xHCI %s called with unaddressed device\n", | ||
1356 | __func__); | ||
1357 | return -EINVAL; | ||
1358 | } | ||
1359 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); | 1859 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
1360 | virt_dev = xhci->devs[udev->slot_id]; | 1860 | virt_dev = xhci->devs[udev->slot_id]; |
1361 | 1861 | ||
1362 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ | 1862 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ |
1363 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); | 1863 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
1364 | ctrl_ctx->add_flags |= SLOT_FLAG; | 1864 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
1365 | ctrl_ctx->add_flags &= ~EP0_FLAG; | 1865 | ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); |
1366 | ctrl_ctx->drop_flags &= ~SLOT_FLAG; | 1866 | ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); |
1367 | ctrl_ctx->drop_flags &= ~EP0_FLAG; | ||
1368 | xhci_dbg(xhci, "New Input Control Context:\n"); | 1867 | xhci_dbg(xhci, "New Input Control Context:\n"); |
1369 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | 1868 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
1370 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, | 1869 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, |
1371 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); | 1870 | LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); |
1372 | 1871 | ||
1373 | ret = xhci_configure_endpoint(xhci, udev, NULL, | 1872 | ret = xhci_configure_endpoint(xhci, udev, NULL, |
1374 | false, false); | 1873 | false, false); |
@@ -1379,10 +1878,19 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
1379 | 1878 | ||
1380 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); | 1879 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); |
1381 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, | 1880 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, |
1382 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); | 1881 | LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); |
1383 | 1882 | ||
1883 | /* Free any rings that were dropped, but not changed. */ | ||
1884 | for (i = 1; i < 31; ++i) { | ||
1885 | if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && | ||
1886 | !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) | ||
1887 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); | ||
1888 | } | ||
1384 | xhci_zero_in_ctx(xhci, virt_dev); | 1889 | xhci_zero_in_ctx(xhci, virt_dev); |
1385 | /* Install new rings and free or cache any old rings */ | 1890 | /* |
1891 | * Install any rings for completely new endpoints or changed endpoints, | ||
1892 | * and free or cache any old rings from changed endpoints. | ||
1893 | */ | ||
1386 | for (i = 1; i < 31; ++i) { | 1894 | for (i = 1; i < 31; ++i) { |
1387 | if (!virt_dev->eps[i].new_ring) | 1895 | if (!virt_dev->eps[i].new_ring) |
1388 | continue; | 1896 | continue; |
@@ -1405,16 +1913,11 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
1405 | struct xhci_virt_device *virt_dev; | 1913 | struct xhci_virt_device *virt_dev; |
1406 | int i, ret; | 1914 | int i, ret; |
1407 | 1915 | ||
1408 | ret = xhci_check_args(hcd, udev, NULL, 0, __func__); | 1916 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
1409 | if (ret <= 0) | 1917 | if (ret <= 0) |
1410 | return; | 1918 | return; |
1411 | xhci = hcd_to_xhci(hcd); | 1919 | xhci = hcd_to_xhci(hcd); |
1412 | 1920 | ||
1413 | if (!xhci->devs || !xhci->devs[udev->slot_id]) { | ||
1414 | xhci_warn(xhci, "xHCI %s called with unaddressed device\n", | ||
1415 | __func__); | ||
1416 | return; | ||
1417 | } | ||
1418 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); | 1921 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
1419 | virt_dev = xhci->devs[udev->slot_id]; | 1922 | virt_dev = xhci->devs[udev->slot_id]; |
1420 | /* Free any rings allocated for added endpoints */ | 1923 | /* Free any rings allocated for added endpoints */ |
@@ -1434,16 +1937,16 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, | |||
1434 | { | 1937 | { |
1435 | struct xhci_input_control_ctx *ctrl_ctx; | 1938 | struct xhci_input_control_ctx *ctrl_ctx; |
1436 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | 1939 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
1437 | ctrl_ctx->add_flags = add_flags; | 1940 | ctrl_ctx->add_flags = cpu_to_le32(add_flags); |
1438 | ctrl_ctx->drop_flags = drop_flags; | 1941 | ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); |
1439 | xhci_slot_copy(xhci, in_ctx, out_ctx); | 1942 | xhci_slot_copy(xhci, in_ctx, out_ctx); |
1440 | ctrl_ctx->add_flags |= SLOT_FLAG; | 1943 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
1441 | 1944 | ||
1442 | xhci_dbg(xhci, "Input Context:\n"); | 1945 | xhci_dbg(xhci, "Input Context:\n"); |
1443 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); | 1946 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); |
1444 | } | 1947 | } |
1445 | 1948 | ||
1446 | void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, | 1949 | static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, |
1447 | unsigned int slot_id, unsigned int ep_index, | 1950 | unsigned int slot_id, unsigned int ep_index, |
1448 | struct xhci_dequeue_state *deq_state) | 1951 | struct xhci_dequeue_state *deq_state) |
1449 | { | 1952 | { |
@@ -1466,7 +1969,7 @@ void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, | |||
1466 | deq_state->new_deq_ptr); | 1969 | deq_state->new_deq_ptr); |
1467 | return; | 1970 | return; |
1468 | } | 1971 | } |
1469 | ep_ctx->deq = addr | deq_state->new_cycle_state; | 1972 | ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state); |
1470 | 1973 | ||
1471 | added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); | 1974 | added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); |
1472 | xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, | 1975 | xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, |
@@ -1575,7 +2078,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, | |||
1575 | 2078 | ||
1576 | if (!ep) | 2079 | if (!ep) |
1577 | return -EINVAL; | 2080 | return -EINVAL; |
1578 | ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, __func__); | 2081 | ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); |
1579 | if (ret <= 0) | 2082 | if (ret <= 0) |
1580 | return -EINVAL; | 2083 | return -EINVAL; |
1581 | if (ep->ss_ep_comp.bmAttributes == 0) { | 2084 | if (ep->ss_ep_comp.bmAttributes == 0) { |
@@ -1771,7 +2274,7 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, | |||
1771 | return -EINVAL; | 2274 | return -EINVAL; |
1772 | } | 2275 | } |
1773 | vdev = xhci->devs[udev->slot_id]; | 2276 | vdev = xhci->devs[udev->slot_id]; |
1774 | /* Mark each endpoint as being in transistion, so | 2277 | /* Mark each endpoint as being in transition, so |
1775 | * xhci_urb_enqueue() will reject all URBs. | 2278 | * xhci_urb_enqueue() will reject all URBs. |
1776 | */ | 2279 | */ |
1777 | for (i = 0; i < num_eps; i++) { | 2280 | for (i = 0; i < num_eps; i++) { |
@@ -1942,6 +2445,34 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, | |||
1942 | } | 2445 | } |
1943 | 2446 | ||
1944 | /* | 2447 | /* |
2448 | * Deletes endpoint resources for endpoints that were active before a Reset | ||
2449 | * Device command, or a Disable Slot command. The Reset Device command leaves | ||
2450 | * the control endpoint intact, whereas the Disable Slot command deletes it. | ||
2451 | * | ||
2452 | * Must be called with xhci->lock held. | ||
2453 | */ | ||
2454 | void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, | ||
2455 | struct xhci_virt_device *virt_dev, bool drop_control_ep) | ||
2456 | { | ||
2457 | int i; | ||
2458 | unsigned int num_dropped_eps = 0; | ||
2459 | unsigned int drop_flags = 0; | ||
2460 | |||
2461 | for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { | ||
2462 | if (virt_dev->eps[i].ring) { | ||
2463 | drop_flags |= 1 << i; | ||
2464 | num_dropped_eps++; | ||
2465 | } | ||
2466 | } | ||
2467 | xhci->num_active_eps -= num_dropped_eps; | ||
2468 | if (num_dropped_eps) | ||
2469 | xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, " | ||
2470 | "%u now active.\n", | ||
2471 | num_dropped_eps, drop_flags, | ||
2472 | xhci->num_active_eps); | ||
2473 | } | ||
2474 | |||
2475 | /* | ||
1945 | * This submits a Reset Device Command, which will set the device state to 0, | 2476 | * This submits a Reset Device Command, which will set the device state to 0, |
1946 | * set the device address to 0, and disable all the endpoints except the default | 2477 | * set the device address to 0, and disable all the endpoints except the default |
1947 | * control endpoint. The USB core should come back and call | 2478 | * control endpoint. The USB core should come back and call |
@@ -1953,8 +2484,13 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, | |||
1953 | * Wait for the Reset Device command to finish. Remove all structures | 2484 | * Wait for the Reset Device command to finish. Remove all structures |
1954 | * associated with the endpoints that were disabled. Clear the input device | 2485 | * associated with the endpoints that were disabled. Clear the input device |
1955 | * structure? Cache the rings? Reset the control endpoint 0 max packet size? | 2486 | * structure? Cache the rings? Reset the control endpoint 0 max packet size? |
2487 | * | ||
2488 | * If the virt_dev to be reset does not exist or does not match the udev, | ||
2489 | * it means the device is lost, possibly due to the xHC restore error and | ||
2490 | * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to | ||
2491 | * re-allocate the device. | ||
1956 | */ | 2492 | */ |
1957 | int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | 2493 | int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) |
1958 | { | 2494 | { |
1959 | int ret, i; | 2495 | int ret, i; |
1960 | unsigned long flags; | 2496 | unsigned long flags; |
@@ -1964,19 +2500,45 @@ int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
1964 | struct xhci_command *reset_device_cmd; | 2500 | struct xhci_command *reset_device_cmd; |
1965 | int timeleft; | 2501 | int timeleft; |
1966 | int last_freed_endpoint; | 2502 | int last_freed_endpoint; |
2503 | struct xhci_slot_ctx *slot_ctx; | ||
1967 | 2504 | ||
1968 | ret = xhci_check_args(hcd, udev, NULL, 0, __func__); | 2505 | ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); |
1969 | if (ret <= 0) | 2506 | if (ret <= 0) |
1970 | return ret; | 2507 | return ret; |
1971 | xhci = hcd_to_xhci(hcd); | 2508 | xhci = hcd_to_xhci(hcd); |
1972 | slot_id = udev->slot_id; | 2509 | slot_id = udev->slot_id; |
1973 | virt_dev = xhci->devs[slot_id]; | 2510 | virt_dev = xhci->devs[slot_id]; |
1974 | if (!virt_dev) { | 2511 | if (!virt_dev) { |
1975 | xhci_dbg(xhci, "%s called with invalid slot ID %u\n", | 2512 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " |
1976 | __func__, slot_id); | 2513 | "not exist. Re-allocate the device\n", slot_id); |
1977 | return -EINVAL; | 2514 | ret = xhci_alloc_dev(hcd, udev); |
2515 | if (ret == 1) | ||
2516 | return 0; | ||
2517 | else | ||
2518 | return -EINVAL; | ||
1978 | } | 2519 | } |
1979 | 2520 | ||
2521 | if (virt_dev->udev != udev) { | ||
2522 | /* If the virt_dev and the udev does not match, this virt_dev | ||
2523 | * may belong to another udev. | ||
2524 | * Re-allocate the device. | ||
2525 | */ | ||
2526 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " | ||
2527 | "not match the udev. Re-allocate the device\n", | ||
2528 | slot_id); | ||
2529 | ret = xhci_alloc_dev(hcd, udev); | ||
2530 | if (ret == 1) | ||
2531 | return 0; | ||
2532 | else | ||
2533 | return -EINVAL; | ||
2534 | } | ||
2535 | |||
2536 | /* If device is not setup, there is no point in resetting it */ | ||
2537 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); | ||
2538 | if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == | ||
2539 | SLOT_STATE_DISABLED) | ||
2540 | return 0; | ||
2541 | |||
1980 | xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); | 2542 | xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); |
1981 | /* Allocate the command structure that holds the struct completion. | 2543 | /* Allocate the command structure that holds the struct completion. |
1982 | * Assume we're in process context, since the normal device reset | 2544 | * Assume we're in process context, since the normal device reset |
@@ -1993,6 +2555,15 @@ int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
1993 | /* Attempt to submit the Reset Device command to the command ring */ | 2555 | /* Attempt to submit the Reset Device command to the command ring */ |
1994 | spin_lock_irqsave(&xhci->lock, flags); | 2556 | spin_lock_irqsave(&xhci->lock, flags); |
1995 | reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; | 2557 | reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; |
2558 | |||
2559 | /* Enqueue pointer can be left pointing to the link TRB, | ||
2560 | * we must handle that | ||
2561 | */ | ||
2562 | if ((le32_to_cpu(reset_device_cmd->command_trb->link.control) | ||
2563 | & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)) | ||
2564 | reset_device_cmd->command_trb = | ||
2565 | xhci->cmd_ring->enq_seg->next->trbs; | ||
2566 | |||
1996 | list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); | 2567 | list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); |
1997 | ret = xhci_queue_reset_device(xhci, slot_id); | 2568 | ret = xhci_queue_reset_device(xhci, slot_id); |
1998 | if (ret) { | 2569 | if (ret) { |
@@ -2049,13 +2620,29 @@ int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2049 | goto command_cleanup; | 2620 | goto command_cleanup; |
2050 | } | 2621 | } |
2051 | 2622 | ||
2623 | /* Free up host controller endpoint resources */ | ||
2624 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { | ||
2625 | spin_lock_irqsave(&xhci->lock, flags); | ||
2626 | /* Don't delete the default control endpoint resources */ | ||
2627 | xhci_free_device_endpoint_resources(xhci, virt_dev, false); | ||
2628 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
2629 | } | ||
2630 | |||
2052 | /* Everything but endpoint 0 is disabled, so free or cache the rings. */ | 2631 | /* Everything but endpoint 0 is disabled, so free or cache the rings. */ |
2053 | last_freed_endpoint = 1; | 2632 | last_freed_endpoint = 1; |
2054 | for (i = 1; i < 31; ++i) { | 2633 | for (i = 1; i < 31; ++i) { |
2055 | if (!virt_dev->eps[i].ring) | 2634 | struct xhci_virt_ep *ep = &virt_dev->eps[i]; |
2056 | continue; | 2635 | |
2057 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); | 2636 | if (ep->ep_state & EP_HAS_STREAMS) { |
2058 | last_freed_endpoint = i; | 2637 | xhci_free_stream_info(xhci, ep->stream_info); |
2638 | ep->stream_info = NULL; | ||
2639 | ep->ep_state &= ~EP_HAS_STREAMS; | ||
2640 | } | ||
2641 | |||
2642 | if (ep->ring) { | ||
2643 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); | ||
2644 | last_freed_endpoint = i; | ||
2645 | } | ||
2059 | } | 2646 | } |
2060 | xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); | 2647 | xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); |
2061 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); | 2648 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); |
@@ -2077,13 +2664,13 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
2077 | struct xhci_virt_device *virt_dev; | 2664 | struct xhci_virt_device *virt_dev; |
2078 | unsigned long flags; | 2665 | unsigned long flags; |
2079 | u32 state; | 2666 | u32 state; |
2080 | int i; | 2667 | int i, ret; |
2081 | 2668 | ||
2082 | if (udev->slot_id == 0) | 2669 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
2670 | if (ret <= 0) | ||
2083 | return; | 2671 | return; |
2672 | |||
2084 | virt_dev = xhci->devs[udev->slot_id]; | 2673 | virt_dev = xhci->devs[udev->slot_id]; |
2085 | if (!virt_dev) | ||
2086 | return; | ||
2087 | 2674 | ||
2088 | /* Stop any wayward timer functions (which may grab the lock) */ | 2675 | /* Stop any wayward timer functions (which may grab the lock) */ |
2089 | for (i = 0; i < 31; ++i) { | 2676 | for (i = 0; i < 31; ++i) { |
@@ -2114,6 +2701,27 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
2114 | } | 2701 | } |
2115 | 2702 | ||
2116 | /* | 2703 | /* |
2704 | * Checks if we have enough host controller resources for the default control | ||
2705 | * endpoint. | ||
2706 | * | ||
2707 | * Must be called with xhci->lock held. | ||
2708 | */ | ||
2709 | static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) | ||
2710 | { | ||
2711 | if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { | ||
2712 | xhci_dbg(xhci, "Not enough ep ctxs: " | ||
2713 | "%u active, need to add 1, limit is %u.\n", | ||
2714 | xhci->num_active_eps, xhci->limit_active_eps); | ||
2715 | return -ENOMEM; | ||
2716 | } | ||
2717 | xhci->num_active_eps += 1; | ||
2718 | xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n", | ||
2719 | xhci->num_active_eps); | ||
2720 | return 0; | ||
2721 | } | ||
2722 | |||
2723 | |||
2724 | /* | ||
2117 | * Returns 0 if the xHC ran out of device slots, the Enable Slot command | 2725 | * Returns 0 if the xHC ran out of device slots, the Enable Slot command |
2118 | * timed out, or allocating memory failed. Returns 1 on success. | 2726 | * timed out, or allocating memory failed. Returns 1 on success. |
2119 | */ | 2727 | */ |
@@ -2148,20 +2756,39 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
2148 | xhci_err(xhci, "Error while assigning device slot ID\n"); | 2756 | xhci_err(xhci, "Error while assigning device slot ID\n"); |
2149 | return 0; | 2757 | return 0; |
2150 | } | 2758 | } |
2151 | /* xhci_alloc_virt_device() does not touch rings; no need to lock */ | 2759 | |
2152 | if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) { | 2760 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
2153 | /* Disable slot, if we can do it without mem alloc */ | ||
2154 | xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); | ||
2155 | spin_lock_irqsave(&xhci->lock, flags); | 2761 | spin_lock_irqsave(&xhci->lock, flags); |
2156 | if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) | 2762 | ret = xhci_reserve_host_control_ep_resources(xhci); |
2157 | xhci_ring_cmd_db(xhci); | 2763 | if (ret) { |
2764 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
2765 | xhci_warn(xhci, "Not enough host resources, " | ||
2766 | "active endpoint contexts = %u\n", | ||
2767 | xhci->num_active_eps); | ||
2768 | goto disable_slot; | ||
2769 | } | ||
2158 | spin_unlock_irqrestore(&xhci->lock, flags); | 2770 | spin_unlock_irqrestore(&xhci->lock, flags); |
2159 | return 0; | 2771 | } |
2772 | /* Use GFP_NOIO, since this function can be called from | ||
2773 | * xhci_discover_or_reset_device(), which may be called as part of | ||
2774 | * mass storage driver error handling. | ||
2775 | */ | ||
2776 | if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { | ||
2777 | xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); | ||
2778 | goto disable_slot; | ||
2160 | } | 2779 | } |
2161 | udev->slot_id = xhci->slot_id; | 2780 | udev->slot_id = xhci->slot_id; |
2162 | /* Is this a LS or FS device under a HS hub? */ | 2781 | /* Is this a LS or FS device under a HS hub? */ |
2163 | /* Hub or peripherial? */ | 2782 | /* Hub or peripherial? */ |
2164 | return 1; | 2783 | return 1; |
2784 | |||
2785 | disable_slot: | ||
2786 | /* Disable slot, if we can do it without mem alloc */ | ||
2787 | spin_lock_irqsave(&xhci->lock, flags); | ||
2788 | if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) | ||
2789 | xhci_ring_cmd_db(xhci); | ||
2790 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
2791 | return 0; | ||
2165 | } | 2792 | } |
2166 | 2793 | ||
2167 | /* | 2794 | /* |
@@ -2191,12 +2818,28 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2191 | 2818 | ||
2192 | virt_dev = xhci->devs[udev->slot_id]; | 2819 | virt_dev = xhci->devs[udev->slot_id]; |
2193 | 2820 | ||
2194 | /* If this is a Set Address to an unconfigured device, setup ep 0 */ | 2821 | if (WARN_ON(!virt_dev)) { |
2195 | if (!udev->config) | 2822 | /* |
2823 | * In plug/unplug torture test with an NEC controller, | ||
2824 | * a zero-dereference was observed once due to virt_dev = 0. | ||
2825 | * Print useful debug rather than crash if it is observed again! | ||
2826 | */ | ||
2827 | xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", | ||
2828 | udev->slot_id); | ||
2829 | return -EINVAL; | ||
2830 | } | ||
2831 | |||
2832 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | ||
2833 | /* | ||
2834 | * If this is the first Set Address since device plug-in or | ||
2835 | * virt_device realloaction after a resume with an xHCI power loss, | ||
2836 | * then set up the slot context. | ||
2837 | */ | ||
2838 | if (!slot_ctx->dev_info) | ||
2196 | xhci_setup_addressable_virt_dev(xhci, udev); | 2839 | xhci_setup_addressable_virt_dev(xhci, udev); |
2840 | /* Otherwise, update the control endpoint ring enqueue pointer. */ | ||
2197 | else | 2841 | else |
2198 | xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); | 2842 | xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); |
2199 | /* Otherwise, assume the core has the device configured how it wants */ | ||
2200 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); | 2843 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
2201 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); | 2844 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); |
2202 | 2845 | ||
@@ -2236,6 +2879,11 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2236 | dev_warn(&udev->dev, "Device not responding to set address.\n"); | 2879 | dev_warn(&udev->dev, "Device not responding to set address.\n"); |
2237 | ret = -EPROTO; | 2880 | ret = -EPROTO; |
2238 | break; | 2881 | break; |
2882 | case COMP_DEV_ERR: | ||
2883 | dev_warn(&udev->dev, "ERROR: Incompatible device for address " | ||
2884 | "device command.\n"); | ||
2885 | ret = -ENODEV; | ||
2886 | break; | ||
2239 | case COMP_SUCCESS: | 2887 | case COMP_SUCCESS: |
2240 | xhci_dbg(xhci, "Successful Address Device command\n"); | 2888 | xhci_dbg(xhci, "Successful Address Device command\n"); |
2241 | break; | 2889 | break; |
@@ -2253,10 +2901,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2253 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); | 2901 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
2254 | xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); | 2902 | xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); |
2255 | xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", | 2903 | xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", |
2256 | udev->slot_id, | 2904 | udev->slot_id, |
2257 | &xhci->dcbaa->dev_context_ptrs[udev->slot_id], | 2905 | &xhci->dcbaa->dev_context_ptrs[udev->slot_id], |
2258 | (unsigned long long) | 2906 | (unsigned long long) |
2259 | xhci->dcbaa->dev_context_ptrs[udev->slot_id]); | 2907 | le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); |
2260 | xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", | 2908 | xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", |
2261 | (unsigned long long)virt_dev->out_ctx->dma); | 2909 | (unsigned long long)virt_dev->out_ctx->dma); |
2262 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); | 2910 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
@@ -2268,15 +2916,16 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2268 | * address given back to us by the HC. | 2916 | * address given back to us by the HC. |
2269 | */ | 2917 | */ |
2270 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); | 2918 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
2271 | udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1; | 2919 | /* Use kernel assigned address for devices; store xHC assigned |
2920 | * address locally. */ | ||
2921 | virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) | ||
2922 | + 1; | ||
2272 | /* Zero the input context control for later use */ | 2923 | /* Zero the input context control for later use */ |
2273 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); | 2924 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
2274 | ctrl_ctx->add_flags = 0; | 2925 | ctrl_ctx->add_flags = 0; |
2275 | ctrl_ctx->drop_flags = 0; | 2926 | ctrl_ctx->drop_flags = 0; |
2276 | 2927 | ||
2277 | xhci_dbg(xhci, "Device address = %d\n", udev->devnum); | 2928 | xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address); |
2278 | /* XXX Meh, not sure if anyone else but choose_address uses this. */ | ||
2279 | set_bit(udev->devnum, udev->bus->devmap.devicemap); | ||
2280 | 2929 | ||
2281 | return 0; | 2930 | return 0; |
2282 | } | 2931 | } |
@@ -2314,24 +2963,29 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, | |||
2314 | spin_lock_irqsave(&xhci->lock, flags); | 2963 | spin_lock_irqsave(&xhci->lock, flags); |
2315 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); | 2964 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); |
2316 | ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); | 2965 | ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); |
2317 | ctrl_ctx->add_flags |= SLOT_FLAG; | 2966 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
2318 | slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); | 2967 | slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); |
2319 | slot_ctx->dev_info |= DEV_HUB; | 2968 | slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); |
2320 | if (tt->multi) | 2969 | if (tt->multi) |
2321 | slot_ctx->dev_info |= DEV_MTT; | 2970 | slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); |
2322 | if (xhci->hci_version > 0x95) { | 2971 | if (xhci->hci_version > 0x95) { |
2323 | xhci_dbg(xhci, "xHCI version %x needs hub " | 2972 | xhci_dbg(xhci, "xHCI version %x needs hub " |
2324 | "TT think time and number of ports\n", | 2973 | "TT think time and number of ports\n", |
2325 | (unsigned int) xhci->hci_version); | 2974 | (unsigned int) xhci->hci_version); |
2326 | slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild); | 2975 | slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); |
2327 | /* Set TT think time - convert from ns to FS bit times. | 2976 | /* Set TT think time - convert from ns to FS bit times. |
2328 | * 0 = 8 FS bit times, 1 = 16 FS bit times, | 2977 | * 0 = 8 FS bit times, 1 = 16 FS bit times, |
2329 | * 2 = 24 FS bit times, 3 = 32 FS bit times. | 2978 | * 2 = 24 FS bit times, 3 = 32 FS bit times. |
2979 | * | ||
2980 | * xHCI 1.0: this field shall be 0 if the device is not a | ||
2981 | * High-spped hub. | ||
2330 | */ | 2982 | */ |
2331 | think_time = tt->think_time; | 2983 | think_time = tt->think_time; |
2332 | if (think_time != 0) | 2984 | if (think_time != 0) |
2333 | think_time = (think_time / 666) - 1; | 2985 | think_time = (think_time / 666) - 1; |
2334 | slot_ctx->tt_info |= TT_THINK_TIME(think_time); | 2986 | if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) |
2987 | slot_ctx->tt_info |= | ||
2988 | cpu_to_le32(TT_THINK_TIME(think_time)); | ||
2335 | } else { | 2989 | } else { |
2336 | xhci_dbg(xhci, "xHCI version %x doesn't need hub " | 2990 | xhci_dbg(xhci, "xHCI version %x doesn't need hub " |
2337 | "TT think time or number of ports\n", | 2991 | "TT think time or number of ports\n", |