diff options
Diffstat (limited to 'drivers/usb/host/xhci.c')
-rw-r--r-- | drivers/usb/host/xhci.c | 344 |
1 files changed, 202 insertions, 142 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 3998f72cd0c4..d5c550ea3e68 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -20,6 +20,7 @@ | |||
20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/pci.h> | ||
23 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
24 | #include <linux/log2.h> | 25 | #include <linux/log2.h> |
25 | #include <linux/module.h> | 26 | #include <linux/module.h> |
@@ -171,22 +172,84 @@ int xhci_reset(struct xhci_hcd *xhci) | |||
171 | return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); | 172 | return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); |
172 | } | 173 | } |
173 | 174 | ||
175 | /* | ||
176 | * Free IRQs | ||
177 | * free all IRQs request | ||
178 | */ | ||
179 | static void xhci_free_irq(struct xhci_hcd *xhci) | ||
180 | { | ||
181 | int i; | ||
182 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
174 | 183 | ||
175 | #if 0 | 184 | /* return if using legacy interrupt */ |
176 | /* Set up MSI-X table for entry 0 (may claim other entries later) */ | 185 | if (xhci_to_hcd(xhci)->irq >= 0) |
177 | static int xhci_setup_msix(struct xhci_hcd *xhci) | 186 | return; |
187 | |||
188 | if (xhci->msix_entries) { | ||
189 | for (i = 0; i < xhci->msix_count; i++) | ||
190 | if (xhci->msix_entries[i].vector) | ||
191 | free_irq(xhci->msix_entries[i].vector, | ||
192 | xhci_to_hcd(xhci)); | ||
193 | } else if (pdev->irq >= 0) | ||
194 | free_irq(pdev->irq, xhci_to_hcd(xhci)); | ||
195 | |||
196 | return; | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * Set up MSI | ||
201 | */ | ||
202 | static int xhci_setup_msi(struct xhci_hcd *xhci) | ||
178 | { | 203 | { |
179 | int ret; | 204 | int ret; |
205 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
206 | |||
207 | ret = pci_enable_msi(pdev); | ||
208 | if (ret) { | ||
209 | xhci_err(xhci, "failed to allocate MSI entry\n"); | ||
210 | return ret; | ||
211 | } | ||
212 | |||
213 | ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq, | ||
214 | 0, "xhci_hcd", xhci_to_hcd(xhci)); | ||
215 | if (ret) { | ||
216 | xhci_err(xhci, "disable MSI interrupt\n"); | ||
217 | pci_disable_msi(pdev); | ||
218 | } | ||
219 | |||
220 | return ret; | ||
221 | } | ||
222 | |||
223 | /* | ||
224 | * Set up MSI-X | ||
225 | */ | ||
226 | static int xhci_setup_msix(struct xhci_hcd *xhci) | ||
227 | { | ||
228 | int i, ret = 0; | ||
180 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | 229 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
181 | 230 | ||
182 | xhci->msix_count = 0; | 231 | /* |
183 | /* XXX: did I do this right? ixgbe does kcalloc for more than one */ | 232 | * calculate number of msi-x vectors supported. |
184 | xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL); | 233 | * - HCS_MAX_INTRS: the max number of interrupts the host can handle, |
234 | * with max number of interrupters based on the xhci HCSPARAMS1. | ||
235 | * - num_online_cpus: maximum msi-x vectors per CPUs core. | ||
236 | * Add additional 1 vector to ensure always available interrupt. | ||
237 | */ | ||
238 | xhci->msix_count = min(num_online_cpus() + 1, | ||
239 | HCS_MAX_INTRS(xhci->hcs_params1)); | ||
240 | |||
241 | xhci->msix_entries = | ||
242 | kmalloc((sizeof(struct msix_entry))*xhci->msix_count, | ||
243 | GFP_KERNEL); | ||
185 | if (!xhci->msix_entries) { | 244 | if (!xhci->msix_entries) { |
186 | xhci_err(xhci, "Failed to allocate MSI-X entries\n"); | 245 | xhci_err(xhci, "Failed to allocate MSI-X entries\n"); |
187 | return -ENOMEM; | 246 | return -ENOMEM; |
188 | } | 247 | } |
189 | xhci->msix_entries[0].entry = 0; | 248 | |
249 | for (i = 0; i < xhci->msix_count; i++) { | ||
250 | xhci->msix_entries[i].entry = i; | ||
251 | xhci->msix_entries[i].vector = 0; | ||
252 | } | ||
190 | 253 | ||
191 | ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); | 254 | ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); |
192 | if (ret) { | 255 | if (ret) { |
@@ -194,20 +257,19 @@ static int xhci_setup_msix(struct xhci_hcd *xhci) | |||
194 | goto free_entries; | 257 | goto free_entries; |
195 | } | 258 | } |
196 | 259 | ||
197 | /* | 260 | for (i = 0; i < xhci->msix_count; i++) { |
198 | * Pass the xhci pointer value as the request_irq "cookie". | 261 | ret = request_irq(xhci->msix_entries[i].vector, |
199 | * If more irqs are added, this will need to be unique for each one. | 262 | (irq_handler_t)xhci_msi_irq, |
200 | */ | 263 | 0, "xhci_hcd", xhci_to_hcd(xhci)); |
201 | ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0, | 264 | if (ret) |
202 | "xHCI", xhci_to_hcd(xhci)); | 265 | goto disable_msix; |
203 | if (ret) { | ||
204 | xhci_err(xhci, "Failed to allocate MSI-X interrupt\n"); | ||
205 | goto disable_msix; | ||
206 | } | 266 | } |
207 | xhci_dbg(xhci, "Finished setting up MSI-X\n"); | 267 | |
208 | return 0; | 268 | return ret; |
209 | 269 | ||
210 | disable_msix: | 270 | disable_msix: |
271 | xhci_err(xhci, "disable MSI-X interrupt\n"); | ||
272 | xhci_free_irq(xhci); | ||
211 | pci_disable_msix(pdev); | 273 | pci_disable_msix(pdev); |
212 | free_entries: | 274 | free_entries: |
213 | kfree(xhci->msix_entries); | 275 | kfree(xhci->msix_entries); |
@@ -215,21 +277,23 @@ free_entries: | |||
215 | return ret; | 277 | return ret; |
216 | } | 278 | } |
217 | 279 | ||
218 | /* XXX: code duplication; can xhci_setup_msix call this? */ | ||
219 | /* Free any IRQs and disable MSI-X */ | 280 | /* Free any IRQs and disable MSI-X */ |
220 | static void xhci_cleanup_msix(struct xhci_hcd *xhci) | 281 | static void xhci_cleanup_msix(struct xhci_hcd *xhci) |
221 | { | 282 | { |
222 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | 283 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
223 | if (!xhci->msix_entries) | ||
224 | return; | ||
225 | 284 | ||
226 | free_irq(xhci->msix_entries[0].vector, xhci); | 285 | xhci_free_irq(xhci); |
227 | pci_disable_msix(pdev); | 286 | |
228 | kfree(xhci->msix_entries); | 287 | if (xhci->msix_entries) { |
229 | xhci->msix_entries = NULL; | 288 | pci_disable_msix(pdev); |
230 | xhci_dbg(xhci, "Finished cleaning up MSI-X\n"); | 289 | kfree(xhci->msix_entries); |
290 | xhci->msix_entries = NULL; | ||
291 | } else { | ||
292 | pci_disable_msi(pdev); | ||
293 | } | ||
294 | |||
295 | return; | ||
231 | } | 296 | } |
232 | #endif | ||
233 | 297 | ||
234 | /* | 298 | /* |
235 | * Initialize memory for HCD and xHC (one-time init). | 299 | * Initialize memory for HCD and xHC (one-time init). |
@@ -257,100 +321,8 @@ int xhci_init(struct usb_hcd *hcd) | |||
257 | return retval; | 321 | return retval; |
258 | } | 322 | } |
259 | 323 | ||
260 | /* | ||
261 | * Called in interrupt context when there might be work | ||
262 | * queued on the event ring | ||
263 | * | ||
264 | * xhci->lock must be held by caller. | ||
265 | */ | ||
266 | static void xhci_work(struct xhci_hcd *xhci) | ||
267 | { | ||
268 | u32 temp; | ||
269 | u64 temp_64; | ||
270 | |||
271 | /* | ||
272 | * Clear the op reg interrupt status first, | ||
273 | * so we can receive interrupts from other MSI-X interrupters. | ||
274 | * Write 1 to clear the interrupt status. | ||
275 | */ | ||
276 | temp = xhci_readl(xhci, &xhci->op_regs->status); | ||
277 | temp |= STS_EINT; | ||
278 | xhci_writel(xhci, temp, &xhci->op_regs->status); | ||
279 | /* FIXME when MSI-X is supported and there are multiple vectors */ | ||
280 | /* Clear the MSI-X event interrupt status */ | ||
281 | |||
282 | /* Acknowledge the interrupt */ | ||
283 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | ||
284 | temp |= 0x3; | ||
285 | xhci_writel(xhci, temp, &xhci->ir_set->irq_pending); | ||
286 | /* Flush posted writes */ | ||
287 | xhci_readl(xhci, &xhci->ir_set->irq_pending); | ||
288 | |||
289 | if (xhci->xhc_state & XHCI_STATE_DYING) | ||
290 | xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " | ||
291 | "Shouldn't IRQs be disabled?\n"); | ||
292 | else | ||
293 | /* FIXME this should be a delayed service routine | ||
294 | * that clears the EHB. | ||
295 | */ | ||
296 | xhci_handle_event(xhci); | ||
297 | |||
298 | /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ | ||
299 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); | ||
300 | xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); | ||
301 | /* Flush posted writes -- FIXME is this necessary? */ | ||
302 | xhci_readl(xhci, &xhci->ir_set->irq_pending); | ||
303 | } | ||
304 | |||
305 | /*-------------------------------------------------------------------------*/ | 324 | /*-------------------------------------------------------------------------*/ |
306 | 325 | ||
307 | /* | ||
308 | * xHCI spec says we can get an interrupt, and if the HC has an error condition, | ||
309 | * we might get bad data out of the event ring. Section 4.10.2.7 has a list of | ||
310 | * indicators of an event TRB error, but we check the status *first* to be safe. | ||
311 | */ | ||
312 | irqreturn_t xhci_irq(struct usb_hcd *hcd) | ||
313 | { | ||
314 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
315 | u32 temp, temp2; | ||
316 | union xhci_trb *trb; | ||
317 | |||
318 | spin_lock(&xhci->lock); | ||
319 | trb = xhci->event_ring->dequeue; | ||
320 | /* Check if the xHC generated the interrupt, or the irq is shared */ | ||
321 | temp = xhci_readl(xhci, &xhci->op_regs->status); | ||
322 | temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); | ||
323 | if (temp == 0xffffffff && temp2 == 0xffffffff) | ||
324 | goto hw_died; | ||
325 | |||
326 | if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { | ||
327 | spin_unlock(&xhci->lock); | ||
328 | return IRQ_NONE; | ||
329 | } | ||
330 | xhci_dbg(xhci, "op reg status = %08x\n", temp); | ||
331 | xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2); | ||
332 | xhci_dbg(xhci, "Event ring dequeue ptr:\n"); | ||
333 | xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", | ||
334 | (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), | ||
335 | lower_32_bits(trb->link.segment_ptr), | ||
336 | upper_32_bits(trb->link.segment_ptr), | ||
337 | (unsigned int) trb->link.intr_target, | ||
338 | (unsigned int) trb->link.control); | ||
339 | |||
340 | if (temp & STS_FATAL) { | ||
341 | xhci_warn(xhci, "WARNING: Host System Error\n"); | ||
342 | xhci_halt(xhci); | ||
343 | hw_died: | ||
344 | xhci_to_hcd(xhci)->state = HC_STATE_HALT; | ||
345 | spin_unlock(&xhci->lock); | ||
346 | return -ESHUTDOWN; | ||
347 | } | ||
348 | |||
349 | xhci_work(xhci); | ||
350 | spin_unlock(&xhci->lock); | ||
351 | |||
352 | return IRQ_HANDLED; | ||
353 | } | ||
354 | 326 | ||
355 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | 327 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
356 | void xhci_event_ring_work(unsigned long arg) | 328 | void xhci_event_ring_work(unsigned long arg) |
@@ -423,21 +395,36 @@ int xhci_run(struct usb_hcd *hcd) | |||
423 | { | 395 | { |
424 | u32 temp; | 396 | u32 temp; |
425 | u64 temp_64; | 397 | u64 temp_64; |
398 | u32 ret; | ||
426 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 399 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
400 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
427 | void (*doorbell)(struct xhci_hcd *) = NULL; | 401 | void (*doorbell)(struct xhci_hcd *) = NULL; |
428 | 402 | ||
429 | hcd->uses_new_polling = 1; | 403 | hcd->uses_new_polling = 1; |
430 | hcd->poll_rh = 0; | ||
431 | 404 | ||
432 | xhci_dbg(xhci, "xhci_run\n"); | 405 | xhci_dbg(xhci, "xhci_run\n"); |
433 | #if 0 /* FIXME: MSI not setup yet */ | 406 | /* unregister the legacy interrupt */ |
434 | /* Do this at the very last minute */ | 407 | if (hcd->irq) |
408 | free_irq(hcd->irq, hcd); | ||
409 | hcd->irq = -1; | ||
410 | |||
435 | ret = xhci_setup_msix(xhci); | 411 | ret = xhci_setup_msix(xhci); |
436 | if (!ret) | 412 | if (ret) |
437 | return ret; | 413 | /* fall back to msi*/ |
414 | ret = xhci_setup_msi(xhci); | ||
415 | |||
416 | if (ret) { | ||
417 | /* fall back to legacy interrupt*/ | ||
418 | ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, | ||
419 | hcd->irq_descr, hcd); | ||
420 | if (ret) { | ||
421 | xhci_err(xhci, "request interrupt %d failed\n", | ||
422 | pdev->irq); | ||
423 | return ret; | ||
424 | } | ||
425 | hcd->irq = pdev->irq; | ||
426 | } | ||
438 | 427 | ||
439 | return -ENOSYS; | ||
440 | #endif | ||
441 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | 428 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
442 | init_timer(&xhci->event_ring_timer); | 429 | init_timer(&xhci->event_ring_timer); |
443 | xhci->event_ring_timer.data = (unsigned long) xhci; | 430 | xhci->event_ring_timer.data = (unsigned long) xhci; |
@@ -495,7 +482,6 @@ int xhci_run(struct usb_hcd *hcd) | |||
495 | return -ENODEV; | 482 | return -ENODEV; |
496 | } | 483 | } |
497 | 484 | ||
498 | xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp); | ||
499 | if (doorbell) | 485 | if (doorbell) |
500 | (*doorbell)(xhci); | 486 | (*doorbell)(xhci); |
501 | if (xhci->quirks & XHCI_NEC_HOST) | 487 | if (xhci->quirks & XHCI_NEC_HOST) |
@@ -522,11 +508,9 @@ void xhci_stop(struct usb_hcd *hcd) | |||
522 | spin_lock_irq(&xhci->lock); | 508 | spin_lock_irq(&xhci->lock); |
523 | xhci_halt(xhci); | 509 | xhci_halt(xhci); |
524 | xhci_reset(xhci); | 510 | xhci_reset(xhci); |
511 | xhci_cleanup_msix(xhci); | ||
525 | spin_unlock_irq(&xhci->lock); | 512 | spin_unlock_irq(&xhci->lock); |
526 | 513 | ||
527 | #if 0 /* No MSI yet */ | ||
528 | xhci_cleanup_msix(xhci); | ||
529 | #endif | ||
530 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | 514 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
531 | /* Tell the event ring poll function not to reschedule */ | 515 | /* Tell the event ring poll function not to reschedule */ |
532 | xhci->zombie = 1; | 516 | xhci->zombie = 1; |
@@ -560,11 +544,8 @@ void xhci_shutdown(struct usb_hcd *hcd) | |||
560 | 544 | ||
561 | spin_lock_irq(&xhci->lock); | 545 | spin_lock_irq(&xhci->lock); |
562 | xhci_halt(xhci); | 546 | xhci_halt(xhci); |
563 | spin_unlock_irq(&xhci->lock); | ||
564 | |||
565 | #if 0 | ||
566 | xhci_cleanup_msix(xhci); | 547 | xhci_cleanup_msix(xhci); |
567 | #endif | 548 | spin_unlock_irq(&xhci->lock); |
568 | 549 | ||
569 | xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", | 550 | xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", |
570 | xhci_readl(xhci, &xhci->op_regs->status)); | 551 | xhci_readl(xhci, &xhci->op_regs->status)); |
@@ -720,7 +701,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
720 | unsigned long flags; | 701 | unsigned long flags; |
721 | int ret = 0; | 702 | int ret = 0; |
722 | unsigned int slot_id, ep_index; | 703 | unsigned int slot_id, ep_index; |
723 | 704 | struct urb_priv *urb_priv; | |
705 | int size, i; | ||
724 | 706 | ||
725 | if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) | 707 | if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) |
726 | return -EINVAL; | 708 | return -EINVAL; |
@@ -734,12 +716,36 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
734 | ret = -EINVAL; | 716 | ret = -EINVAL; |
735 | goto exit; | 717 | goto exit; |
736 | } | 718 | } |
737 | if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) { | 719 | if (!HCD_HW_ACCESSIBLE(hcd)) { |
738 | if (!in_interrupt()) | 720 | if (!in_interrupt()) |
739 | xhci_dbg(xhci, "urb submitted during PCI suspend\n"); | 721 | xhci_dbg(xhci, "urb submitted during PCI suspend\n"); |
740 | ret = -ESHUTDOWN; | 722 | ret = -ESHUTDOWN; |
741 | goto exit; | 723 | goto exit; |
742 | } | 724 | } |
725 | |||
726 | if (usb_endpoint_xfer_isoc(&urb->ep->desc)) | ||
727 | size = urb->number_of_packets; | ||
728 | else | ||
729 | size = 1; | ||
730 | |||
731 | urb_priv = kzalloc(sizeof(struct urb_priv) + | ||
732 | size * sizeof(struct xhci_td *), mem_flags); | ||
733 | if (!urb_priv) | ||
734 | return -ENOMEM; | ||
735 | |||
736 | for (i = 0; i < size; i++) { | ||
737 | urb_priv->td[i] = kzalloc(sizeof(struct xhci_td), mem_flags); | ||
738 | if (!urb_priv->td[i]) { | ||
739 | urb_priv->length = i; | ||
740 | xhci_urb_free_priv(xhci, urb_priv); | ||
741 | return -ENOMEM; | ||
742 | } | ||
743 | } | ||
744 | |||
745 | urb_priv->length = size; | ||
746 | urb_priv->td_cnt = 0; | ||
747 | urb->hcpriv = urb_priv; | ||
748 | |||
743 | if (usb_endpoint_xfer_control(&urb->ep->desc)) { | 749 | if (usb_endpoint_xfer_control(&urb->ep->desc)) { |
744 | /* Check to see if the max packet size for the default control | 750 | /* Check to see if the max packet size for the default control |
745 | * endpoint changed during FS device enumeration | 751 | * endpoint changed during FS device enumeration |
@@ -788,11 +794,18 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
788 | slot_id, ep_index); | 794 | slot_id, ep_index); |
789 | spin_unlock_irqrestore(&xhci->lock, flags); | 795 | spin_unlock_irqrestore(&xhci->lock, flags); |
790 | } else { | 796 | } else { |
791 | ret = -EINVAL; | 797 | spin_lock_irqsave(&xhci->lock, flags); |
798 | if (xhci->xhc_state & XHCI_STATE_DYING) | ||
799 | goto dying; | ||
800 | ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, | ||
801 | slot_id, ep_index); | ||
802 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
792 | } | 803 | } |
793 | exit: | 804 | exit: |
794 | return ret; | 805 | return ret; |
795 | dying: | 806 | dying: |
807 | xhci_urb_free_priv(xhci, urb_priv); | ||
808 | urb->hcpriv = NULL; | ||
796 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " | 809 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " |
797 | "non-responsive xHCI host.\n", | 810 | "non-responsive xHCI host.\n", |
798 | urb->ep->desc.bEndpointAddress, urb); | 811 | urb->ep->desc.bEndpointAddress, urb); |
@@ -800,6 +813,47 @@ dying: | |||
800 | return -ESHUTDOWN; | 813 | return -ESHUTDOWN; |
801 | } | 814 | } |
802 | 815 | ||
816 | /* Get the right ring for the given URB. | ||
817 | * If the endpoint supports streams, boundary check the URB's stream ID. | ||
818 | * If the endpoint doesn't support streams, return the singular endpoint ring. | ||
819 | */ | ||
820 | static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, | ||
821 | struct urb *urb) | ||
822 | { | ||
823 | unsigned int slot_id; | ||
824 | unsigned int ep_index; | ||
825 | unsigned int stream_id; | ||
826 | struct xhci_virt_ep *ep; | ||
827 | |||
828 | slot_id = urb->dev->slot_id; | ||
829 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | ||
830 | stream_id = urb->stream_id; | ||
831 | ep = &xhci->devs[slot_id]->eps[ep_index]; | ||
832 | /* Common case: no streams */ | ||
833 | if (!(ep->ep_state & EP_HAS_STREAMS)) | ||
834 | return ep->ring; | ||
835 | |||
836 | if (stream_id == 0) { | ||
837 | xhci_warn(xhci, | ||
838 | "WARN: Slot ID %u, ep index %u has streams, " | ||
839 | "but URB has no stream ID.\n", | ||
840 | slot_id, ep_index); | ||
841 | return NULL; | ||
842 | } | ||
843 | |||
844 | if (stream_id < ep->stream_info->num_streams) | ||
845 | return ep->stream_info->stream_rings[stream_id]; | ||
846 | |||
847 | xhci_warn(xhci, | ||
848 | "WARN: Slot ID %u, ep index %u has " | ||
849 | "stream IDs 1 to %u allocated, " | ||
850 | "but stream ID %u is requested.\n", | ||
851 | slot_id, ep_index, | ||
852 | ep->stream_info->num_streams - 1, | ||
853 | stream_id); | ||
854 | return NULL; | ||
855 | } | ||
856 | |||
803 | /* | 857 | /* |
804 | * Remove the URB's TD from the endpoint ring. This may cause the HC to stop | 858 | * Remove the URB's TD from the endpoint ring. This may cause the HC to stop |
805 | * USB transfers, potentially stopping in the middle of a TRB buffer. The HC | 859 | * USB transfers, potentially stopping in the middle of a TRB buffer. The HC |
@@ -834,9 +888,10 @@ dying: | |||
834 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | 888 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) |
835 | { | 889 | { |
836 | unsigned long flags; | 890 | unsigned long flags; |
837 | int ret; | 891 | int ret, i; |
838 | u32 temp; | 892 | u32 temp; |
839 | struct xhci_hcd *xhci; | 893 | struct xhci_hcd *xhci; |
894 | struct urb_priv *urb_priv; | ||
840 | struct xhci_td *td; | 895 | struct xhci_td *td; |
841 | unsigned int ep_index; | 896 | unsigned int ep_index; |
842 | struct xhci_ring *ep_ring; | 897 | struct xhci_ring *ep_ring; |
@@ -851,12 +906,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
851 | temp = xhci_readl(xhci, &xhci->op_regs->status); | 906 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
852 | if (temp == 0xffffffff) { | 907 | if (temp == 0xffffffff) { |
853 | xhci_dbg(xhci, "HW died, freeing TD.\n"); | 908 | xhci_dbg(xhci, "HW died, freeing TD.\n"); |
854 | td = (struct xhci_td *) urb->hcpriv; | 909 | urb_priv = urb->hcpriv; |
855 | 910 | ||
856 | usb_hcd_unlink_urb_from_ep(hcd, urb); | 911 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
857 | spin_unlock_irqrestore(&xhci->lock, flags); | 912 | spin_unlock_irqrestore(&xhci->lock, flags); |
858 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN); | 913 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN); |
859 | kfree(td); | 914 | xhci_urb_free_priv(xhci, urb_priv); |
860 | return ret; | 915 | return ret; |
861 | } | 916 | } |
862 | if (xhci->xhc_state & XHCI_STATE_DYING) { | 917 | if (xhci->xhc_state & XHCI_STATE_DYING) { |
@@ -884,9 +939,14 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
884 | 939 | ||
885 | xhci_dbg(xhci, "Endpoint ring:\n"); | 940 | xhci_dbg(xhci, "Endpoint ring:\n"); |
886 | xhci_debug_ring(xhci, ep_ring); | 941 | xhci_debug_ring(xhci, ep_ring); |
887 | td = (struct xhci_td *) urb->hcpriv; | ||
888 | 942 | ||
889 | list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); | 943 | urb_priv = urb->hcpriv; |
944 | |||
945 | for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { | ||
946 | td = urb_priv->td[i]; | ||
947 | list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); | ||
948 | } | ||
949 | |||
890 | /* Queue a stop endpoint command, but only if this is | 950 | /* Queue a stop endpoint command, but only if this is |
891 | * the first cancellation to be handled. | 951 | * the first cancellation to be handled. |
892 | */ | 952 | */ |