diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/usb/host/xhci-dbg.c | 24 | ||||
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 74 | ||||
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 192 | ||||
-rw-r--r-- | drivers/usb/host/xhci.c | 19 | ||||
-rw-r--r-- | drivers/usb/host/xhci.h | 26 |
5 files changed, 280 insertions, 55 deletions
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 105fa8b025bb..fcbf4abbf381 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c | |||
@@ -364,6 +364,30 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring) | |||
364 | xhci_debug_segment(xhci, seg); | 364 | xhci_debug_segment(xhci, seg); |
365 | } | 365 | } |
366 | 366 | ||
367 | void xhci_dbg_ep_rings(struct xhci_hcd *xhci, | ||
368 | unsigned int slot_id, unsigned int ep_index, | ||
369 | struct xhci_virt_ep *ep) | ||
370 | { | ||
371 | int i; | ||
372 | struct xhci_ring *ring; | ||
373 | |||
374 | if (ep->ep_state & EP_HAS_STREAMS) { | ||
375 | for (i = 1; i < ep->stream_info->num_streams; i++) { | ||
376 | ring = ep->stream_info->stream_rings[i]; | ||
377 | xhci_dbg(xhci, "Dev %d endpoint %d stream ID %d:\n", | ||
378 | slot_id, ep_index, i); | ||
379 | xhci_debug_segment(xhci, ring->deq_seg); | ||
380 | } | ||
381 | } else { | ||
382 | ring = ep->ring; | ||
383 | if (!ring) | ||
384 | return; | ||
385 | xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", | ||
386 | slot_id, ep_index); | ||
387 | xhci_debug_segment(xhci, ring->deq_seg); | ||
388 | } | ||
389 | } | ||
390 | |||
367 | void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) | 391 | void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) |
368 | { | 392 | { |
369 | u32 addr = (u32) erst->erst_dma_addr; | 393 | u32 addr = (u32) erst->erst_dma_addr; |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index d299ffad806b..5711048708d7 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -353,8 +353,19 @@ struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, | |||
353 | mem_flags, dma); | 353 | mem_flags, dma); |
354 | } | 354 | } |
355 | 355 | ||
356 | struct xhci_ring *xhci_dma_to_transfer_ring( | ||
357 | struct xhci_virt_ep *ep, | ||
358 | u64 address) | ||
359 | { | ||
360 | if (ep->ep_state & EP_HAS_STREAMS) | ||
361 | return radix_tree_lookup(&ep->stream_info->trb_address_map, | ||
362 | address >> SEGMENT_SHIFT); | ||
363 | return ep->ring; | ||
364 | } | ||
365 | |||
366 | /* Only use this when you know stream_info is valid */ | ||
356 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | 367 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
357 | struct xhci_ring *dma_to_stream_ring( | 368 | static struct xhci_ring *dma_to_stream_ring( |
358 | struct xhci_stream_info *stream_info, | 369 | struct xhci_stream_info *stream_info, |
359 | u64 address) | 370 | u64 address) |
360 | { | 371 | { |
@@ -363,6 +374,66 @@ struct xhci_ring *dma_to_stream_ring( | |||
363 | } | 374 | } |
364 | #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */ | 375 | #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */ |
365 | 376 | ||
377 | struct xhci_ring *xhci_stream_id_to_ring( | ||
378 | struct xhci_virt_device *dev, | ||
379 | unsigned int ep_index, | ||
380 | unsigned int stream_id) | ||
381 | { | ||
382 | struct xhci_virt_ep *ep = &dev->eps[ep_index]; | ||
383 | |||
384 | if (stream_id == 0) | ||
385 | return ep->ring; | ||
386 | if (!ep->stream_info) | ||
387 | return NULL; | ||
388 | |||
389 | if (stream_id > ep->stream_info->num_streams) | ||
390 | return NULL; | ||
391 | return ep->stream_info->stream_rings[stream_id]; | ||
392 | } | ||
393 | |||
394 | struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, | ||
395 | unsigned int slot_id, unsigned int ep_index, | ||
396 | unsigned int stream_id) | ||
397 | { | ||
398 | struct xhci_virt_ep *ep; | ||
399 | |||
400 | ep = &xhci->devs[slot_id]->eps[ep_index]; | ||
401 | /* Common case: no streams */ | ||
402 | if (!(ep->ep_state & EP_HAS_STREAMS)) | ||
403 | return ep->ring; | ||
404 | |||
405 | if (stream_id == 0) { | ||
406 | xhci_warn(xhci, | ||
407 | "WARN: Slot ID %u, ep index %u has streams, " | ||
408 | "but URB has no stream ID.\n", | ||
409 | slot_id, ep_index); | ||
410 | return NULL; | ||
411 | } | ||
412 | |||
413 | if (stream_id < ep->stream_info->num_streams) | ||
414 | return ep->stream_info->stream_rings[stream_id]; | ||
415 | |||
416 | xhci_warn(xhci, | ||
417 | "WARN: Slot ID %u, ep index %u has " | ||
418 | "stream IDs 1 to %u allocated, " | ||
419 | "but stream ID %u is requested.\n", | ||
420 | slot_id, ep_index, | ||
421 | ep->stream_info->num_streams - 1, | ||
422 | stream_id); | ||
423 | return NULL; | ||
424 | } | ||
425 | |||
426 | /* Get the right ring for the given URB. | ||
427 | * If the endpoint supports streams, boundary check the URB's stream ID. | ||
428 | * If the endpoint doesn't support streams, return the singular endpoint ring. | ||
429 | */ | ||
430 | struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, | ||
431 | struct urb *urb) | ||
432 | { | ||
433 | return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id, | ||
434 | xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id); | ||
435 | } | ||
436 | |||
366 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | 437 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
367 | static int xhci_test_radix_tree(struct xhci_hcd *xhci, | 438 | static int xhci_test_radix_tree(struct xhci_hcd *xhci, |
368 | unsigned int num_streams, | 439 | unsigned int num_streams, |
@@ -515,6 +586,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, | |||
515 | cur_ring = stream_info->stream_rings[cur_stream]; | 586 | cur_ring = stream_info->stream_rings[cur_stream]; |
516 | if (!cur_ring) | 587 | if (!cur_ring) |
517 | goto cleanup_rings; | 588 | goto cleanup_rings; |
589 | cur_ring->stream_id = cur_stream; | ||
518 | /* Set deq ptr, cycle bit, and stream context type */ | 590 | /* Set deq ptr, cycle bit, and stream context type */ |
519 | addr = cur_ring->first_seg->dma | | 591 | addr = cur_ring->first_seg->dma | |
520 | SCT_FOR_CTX(SCT_PRI_TR) | | 592 | SCT_FOR_CTX(SCT_PRI_TR) | |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index a14f657e279b..16ef5fd77ce2 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -312,7 +312,8 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci) | |||
312 | 312 | ||
313 | static void ring_ep_doorbell(struct xhci_hcd *xhci, | 313 | static void ring_ep_doorbell(struct xhci_hcd *xhci, |
314 | unsigned int slot_id, | 314 | unsigned int slot_id, |
315 | unsigned int ep_index) | 315 | unsigned int ep_index, |
316 | unsigned int stream_id) | ||
316 | { | 317 | { |
317 | struct xhci_virt_ep *ep; | 318 | struct xhci_virt_ep *ep; |
318 | unsigned int ep_state; | 319 | unsigned int ep_state; |
@@ -331,7 +332,8 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci, | |||
331 | if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING) | 332 | if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING) |
332 | && !(ep_state & EP_HALTED)) { | 333 | && !(ep_state & EP_HALTED)) { |
333 | field = xhci_readl(xhci, db_addr) & DB_MASK; | 334 | field = xhci_readl(xhci, db_addr) & DB_MASK; |
334 | xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); | 335 | field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id); |
336 | xhci_writel(xhci, field, db_addr); | ||
335 | /* Flush PCI posted writes - FIXME Matthew Wilcox says this | 337 | /* Flush PCI posted writes - FIXME Matthew Wilcox says this |
336 | * isn't time-critical and we shouldn't make the CPU wait for | 338 | * isn't time-critical and we shouldn't make the CPU wait for |
337 | * the flush. | 339 | * the flush. |
@@ -340,6 +342,31 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci, | |||
340 | } | 342 | } |
341 | } | 343 | } |
342 | 344 | ||
345 | /* Ring the doorbell for any rings with pending URBs */ | ||
346 | static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, | ||
347 | unsigned int slot_id, | ||
348 | unsigned int ep_index) | ||
349 | { | ||
350 | unsigned int stream_id; | ||
351 | struct xhci_virt_ep *ep; | ||
352 | |||
353 | ep = &xhci->devs[slot_id]->eps[ep_index]; | ||
354 | |||
355 | /* A ring has pending URBs if its TD list is not empty */ | ||
356 | if (!(ep->ep_state & EP_HAS_STREAMS)) { | ||
357 | if (!(list_empty(&ep->ring->td_list))) | ||
358 | ring_ep_doorbell(xhci, slot_id, ep_index, 0); | ||
359 | return; | ||
360 | } | ||
361 | |||
362 | for (stream_id = 1; stream_id < ep->stream_info->num_streams; | ||
363 | stream_id++) { | ||
364 | struct xhci_stream_info *stream_info = ep->stream_info; | ||
365 | if (!list_empty(&stream_info->stream_rings[stream_id]->td_list)) | ||
366 | ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); | ||
367 | } | ||
368 | } | ||
369 | |||
343 | /* | 370 | /* |
344 | * Find the segment that trb is in. Start searching in start_seg. | 371 | * Find the segment that trb is in. Start searching in start_seg. |
345 | * If we must move past a segment that has a link TRB with a toggle cycle state | 372 | * If we must move past a segment that has a link TRB with a toggle cycle state |
@@ -382,14 +409,23 @@ static struct xhci_segment *find_trb_seg( | |||
382 | */ | 409 | */ |
383 | void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | 410 | void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, |
384 | unsigned int slot_id, unsigned int ep_index, | 411 | unsigned int slot_id, unsigned int ep_index, |
385 | struct xhci_td *cur_td, struct xhci_dequeue_state *state) | 412 | unsigned int stream_id, struct xhci_td *cur_td, |
413 | struct xhci_dequeue_state *state) | ||
386 | { | 414 | { |
387 | struct xhci_virt_device *dev = xhci->devs[slot_id]; | 415 | struct xhci_virt_device *dev = xhci->devs[slot_id]; |
388 | struct xhci_ring *ep_ring = dev->eps[ep_index].ring; | 416 | struct xhci_ring *ep_ring; |
389 | struct xhci_generic_trb *trb; | 417 | struct xhci_generic_trb *trb; |
390 | struct xhci_ep_ctx *ep_ctx; | 418 | struct xhci_ep_ctx *ep_ctx; |
391 | dma_addr_t addr; | 419 | dma_addr_t addr; |
392 | 420 | ||
421 | ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, | ||
422 | ep_index, stream_id); | ||
423 | if (!ep_ring) { | ||
424 | xhci_warn(xhci, "WARN can't find new dequeue state " | ||
425 | "for invalid stream ID %u.\n", | ||
426 | stream_id); | ||
427 | return; | ||
428 | } | ||
393 | state->new_cycle_state = 0; | 429 | state->new_cycle_state = 0; |
394 | xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); | 430 | xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); |
395 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, | 431 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, |
@@ -469,11 +505,13 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
469 | } | 505 | } |
470 | 506 | ||
471 | static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | 507 | static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, |
472 | unsigned int ep_index, struct xhci_segment *deq_seg, | 508 | unsigned int ep_index, unsigned int stream_id, |
509 | struct xhci_segment *deq_seg, | ||
473 | union xhci_trb *deq_ptr, u32 cycle_state); | 510 | union xhci_trb *deq_ptr, u32 cycle_state); |
474 | 511 | ||
475 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | 512 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, |
476 | unsigned int slot_id, unsigned int ep_index, | 513 | unsigned int slot_id, unsigned int ep_index, |
514 | unsigned int stream_id, | ||
477 | struct xhci_dequeue_state *deq_state) | 515 | struct xhci_dequeue_state *deq_state) |
478 | { | 516 | { |
479 | struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; | 517 | struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; |
@@ -485,7 +523,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | |||
485 | deq_state->new_deq_ptr, | 523 | deq_state->new_deq_ptr, |
486 | (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), | 524 | (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), |
487 | deq_state->new_cycle_state); | 525 | deq_state->new_cycle_state); |
488 | queue_set_tr_deq(xhci, slot_id, ep_index, | 526 | queue_set_tr_deq(xhci, slot_id, ep_index, stream_id, |
489 | deq_state->new_deq_seg, | 527 | deq_state->new_deq_seg, |
490 | deq_state->new_deq_ptr, | 528 | deq_state->new_deq_ptr, |
491 | (u32) deq_state->new_cycle_state); | 529 | (u32) deq_state->new_cycle_state); |
@@ -553,11 +591,10 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
553 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | 591 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); |
554 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | 592 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); |
555 | ep = &xhci->devs[slot_id]->eps[ep_index]; | 593 | ep = &xhci->devs[slot_id]->eps[ep_index]; |
556 | ep_ring = ep->ring; | ||
557 | 594 | ||
558 | if (list_empty(&ep->cancelled_td_list)) { | 595 | if (list_empty(&ep->cancelled_td_list)) { |
559 | xhci_stop_watchdog_timer_in_irq(xhci, ep); | 596 | xhci_stop_watchdog_timer_in_irq(xhci, ep); |
560 | ring_ep_doorbell(xhci, slot_id, ep_index); | 597 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
561 | return; | 598 | return; |
562 | } | 599 | } |
563 | 600 | ||
@@ -571,15 +608,36 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
571 | xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", | 608 | xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", |
572 | cur_td->first_trb, | 609 | cur_td->first_trb, |
573 | (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); | 610 | (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); |
611 | ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); | ||
612 | if (!ep_ring) { | ||
613 | /* This shouldn't happen unless a driver is mucking | ||
614 | * with the stream ID after submission. This will | ||
615 | * leave the TD on the hardware ring, and the hardware | ||
616 | * will try to execute it, and may access a buffer | ||
617 | * that has already been freed. In the best case, the | ||
618 | * hardware will execute it, and the event handler will | ||
619 | * ignore the completion event for that TD, since it was | ||
620 | * removed from the td_list for that endpoint. In | ||
621 | * short, don't muck with the stream ID after | ||
622 | * submission. | ||
623 | */ | ||
624 | xhci_warn(xhci, "WARN Cancelled URB %p " | ||
625 | "has invalid stream ID %u.\n", | ||
626 | cur_td->urb, | ||
627 | cur_td->urb->stream_id); | ||
628 | goto remove_finished_td; | ||
629 | } | ||
574 | /* | 630 | /* |
575 | * If we stopped on the TD we need to cancel, then we have to | 631 | * If we stopped on the TD we need to cancel, then we have to |
576 | * move the xHC endpoint ring dequeue pointer past this TD. | 632 | * move the xHC endpoint ring dequeue pointer past this TD. |
577 | */ | 633 | */ |
578 | if (cur_td == ep->stopped_td) | 634 | if (cur_td == ep->stopped_td) |
579 | xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, | 635 | xhci_find_new_dequeue_state(xhci, slot_id, ep_index, |
580 | &deq_state); | 636 | cur_td->urb->stream_id, |
637 | cur_td, &deq_state); | ||
581 | else | 638 | else |
582 | td_to_noop(xhci, ep_ring, cur_td); | 639 | td_to_noop(xhci, ep_ring, cur_td); |
640 | remove_finished_td: | ||
583 | /* | 641 | /* |
584 | * The event handler won't see a completion for this TD anymore, | 642 | * The event handler won't see a completion for this TD anymore, |
585 | * so remove it from the endpoint ring's TD list. Keep it in | 643 | * so remove it from the endpoint ring's TD list. Keep it in |
@@ -593,11 +651,13 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
593 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ | 651 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ |
594 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { | 652 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { |
595 | xhci_queue_new_dequeue_state(xhci, | 653 | xhci_queue_new_dequeue_state(xhci, |
596 | slot_id, ep_index, &deq_state); | 654 | slot_id, ep_index, |
655 | ep->stopped_td->urb->stream_id, | ||
656 | &deq_state); | ||
597 | xhci_ring_cmd_db(xhci); | 657 | xhci_ring_cmd_db(xhci); |
598 | } else { | 658 | } else { |
599 | /* Otherwise just ring the doorbell to restart the ring */ | 659 | /* Otherwise ring the doorbell(s) to restart queued transfers */ |
600 | ring_ep_doorbell(xhci, slot_id, ep_index); | 660 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
601 | } | 661 | } |
602 | ep->stopped_td = NULL; | 662 | ep->stopped_td = NULL; |
603 | ep->stopped_trb = NULL; | 663 | ep->stopped_trb = NULL; |
@@ -757,6 +817,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
757 | { | 817 | { |
758 | unsigned int slot_id; | 818 | unsigned int slot_id; |
759 | unsigned int ep_index; | 819 | unsigned int ep_index; |
820 | unsigned int stream_id; | ||
760 | struct xhci_ring *ep_ring; | 821 | struct xhci_ring *ep_ring; |
761 | struct xhci_virt_device *dev; | 822 | struct xhci_virt_device *dev; |
762 | struct xhci_ep_ctx *ep_ctx; | 823 | struct xhci_ep_ctx *ep_ctx; |
@@ -764,8 +825,19 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
764 | 825 | ||
765 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | 826 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); |
766 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | 827 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); |
828 | stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]); | ||
767 | dev = xhci->devs[slot_id]; | 829 | dev = xhci->devs[slot_id]; |
768 | ep_ring = dev->eps[ep_index].ring; | 830 | |
831 | ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); | ||
832 | if (!ep_ring) { | ||
833 | xhci_warn(xhci, "WARN Set TR deq ptr command for " | ||
834 | "freed stream ID %u\n", | ||
835 | stream_id); | ||
836 | /* XXX: Harmless??? */ | ||
837 | dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; | ||
838 | return; | ||
839 | } | ||
840 | |||
769 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | 841 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); |
770 | slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); | 842 | slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); |
771 | 843 | ||
@@ -810,7 +882,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
810 | } | 882 | } |
811 | 883 | ||
812 | dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; | 884 | dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; |
813 | ring_ep_doorbell(xhci, slot_id, ep_index); | 885 | /* Restart any rings with pending URBs */ |
886 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); | ||
814 | } | 887 | } |
815 | 888 | ||
816 | static void handle_reset_ep_completion(struct xhci_hcd *xhci, | 889 | static void handle_reset_ep_completion(struct xhci_hcd *xhci, |
@@ -819,11 +892,9 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci, | |||
819 | { | 892 | { |
820 | int slot_id; | 893 | int slot_id; |
821 | unsigned int ep_index; | 894 | unsigned int ep_index; |
822 | struct xhci_ring *ep_ring; | ||
823 | 895 | ||
824 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | 896 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); |
825 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | 897 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); |
826 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; | ||
827 | /* This command will only fail if the endpoint wasn't halted, | 898 | /* This command will only fail if the endpoint wasn't halted, |
828 | * but we don't care. | 899 | * but we don't care. |
829 | */ | 900 | */ |
@@ -841,9 +912,9 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci, | |||
841 | false); | 912 | false); |
842 | xhci_ring_cmd_db(xhci); | 913 | xhci_ring_cmd_db(xhci); |
843 | } else { | 914 | } else { |
844 | /* Clear our internal halted state and restart the ring */ | 915 | /* Clear our internal halted state and restart the ring(s) */ |
845 | xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; | 916 | xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; |
846 | ring_ep_doorbell(xhci, slot_id, ep_index); | 917 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
847 | } | 918 | } |
848 | } | 919 | } |
849 | 920 | ||
@@ -929,8 +1000,10 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
929 | /* Input ctx add_flags are the endpoint index plus one */ | 1000 | /* Input ctx add_flags are the endpoint index plus one */ |
930 | ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; | 1001 | ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; |
931 | /* A usb_set_interface() call directly after clearing a halted | 1002 | /* A usb_set_interface() call directly after clearing a halted |
932 | * condition may race on this quirky hardware. | 1003 | * condition may race on this quirky hardware. Not worth |
933 | * Not worth worrying about, since this is prototype hardware. | 1004 | * worrying about, since this is prototype hardware. Not sure |
1005 | * if this will work for streams, but streams support was | ||
1006 | * untested on this prototype. | ||
934 | */ | 1007 | */ |
935 | if (xhci->quirks & XHCI_RESET_EP_QUIRK && | 1008 | if (xhci->quirks & XHCI_RESET_EP_QUIRK && |
936 | ep_index != (unsigned int) -1 && | 1009 | ep_index != (unsigned int) -1 && |
@@ -943,10 +1016,10 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
943 | xhci_dbg(xhci, "Completed config ep cmd - " | 1016 | xhci_dbg(xhci, "Completed config ep cmd - " |
944 | "last ep index = %d, state = %d\n", | 1017 | "last ep index = %d, state = %d\n", |
945 | ep_index, ep_state); | 1018 | ep_index, ep_state); |
946 | /* Clear our internal halted state and restart ring */ | 1019 | /* Clear internal halted state and restart ring(s) */ |
947 | xhci->devs[slot_id]->eps[ep_index].ep_state &= | 1020 | xhci->devs[slot_id]->eps[ep_index].ep_state &= |
948 | ~EP_HALTED; | 1021 | ~EP_HALTED; |
949 | ring_ep_doorbell(xhci, slot_id, ep_index); | 1022 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
950 | break; | 1023 | break; |
951 | } | 1024 | } |
952 | bandwidth_change: | 1025 | bandwidth_change: |
@@ -1079,12 +1152,14 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg, | |||
1079 | 1152 | ||
1080 | static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, | 1153 | static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, |
1081 | unsigned int slot_id, unsigned int ep_index, | 1154 | unsigned int slot_id, unsigned int ep_index, |
1155 | unsigned int stream_id, | ||
1082 | struct xhci_td *td, union xhci_trb *event_trb) | 1156 | struct xhci_td *td, union xhci_trb *event_trb) |
1083 | { | 1157 | { |
1084 | struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; | 1158 | struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; |
1085 | ep->ep_state |= EP_HALTED; | 1159 | ep->ep_state |= EP_HALTED; |
1086 | ep->stopped_td = td; | 1160 | ep->stopped_td = td; |
1087 | ep->stopped_trb = event_trb; | 1161 | ep->stopped_trb = event_trb; |
1162 | ep->stopped_stream = stream_id; | ||
1088 | 1163 | ||
1089 | xhci_queue_reset_ep(xhci, slot_id, ep_index); | 1164 | xhci_queue_reset_ep(xhci, slot_id, ep_index); |
1090 | xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); | 1165 | xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); |
@@ -1169,10 +1244,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
1169 | ep_index = TRB_TO_EP_ID(event->flags) - 1; | 1244 | ep_index = TRB_TO_EP_ID(event->flags) - 1; |
1170 | xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); | 1245 | xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); |
1171 | ep = &xdev->eps[ep_index]; | 1246 | ep = &xdev->eps[ep_index]; |
1172 | ep_ring = ep->ring; | 1247 | ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); |
1173 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); | 1248 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); |
1174 | if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { | 1249 | if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { |
1175 | xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); | 1250 | xhci_err(xhci, "ERROR Transfer event for disabled endpoint " |
1251 | "or incorrect stream ring\n"); | ||
1176 | return -ENODEV; | 1252 | return -ENODEV; |
1177 | } | 1253 | } |
1178 | 1254 | ||
@@ -1303,7 +1379,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
1303 | td->urb->actual_length = 0; | 1379 | td->urb->actual_length = 0; |
1304 | 1380 | ||
1305 | xhci_cleanup_halted_endpoint(xhci, | 1381 | xhci_cleanup_halted_endpoint(xhci, |
1306 | slot_id, ep_index, td, event_trb); | 1382 | slot_id, ep_index, 0, td, event_trb); |
1307 | goto td_cleanup; | 1383 | goto td_cleanup; |
1308 | } | 1384 | } |
1309 | /* | 1385 | /* |
@@ -1452,6 +1528,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
1452 | */ | 1528 | */ |
1453 | ep->stopped_td = td; | 1529 | ep->stopped_td = td; |
1454 | ep->stopped_trb = event_trb; | 1530 | ep->stopped_trb = event_trb; |
1531 | ep->stopped_stream = ep_ring->stream_id; | ||
1455 | } else if (xhci_requires_manual_halt_cleanup(xhci, | 1532 | } else if (xhci_requires_manual_halt_cleanup(xhci, |
1456 | ep_ctx, trb_comp_code)) { | 1533 | ep_ctx, trb_comp_code)) { |
1457 | /* Other types of errors halt the endpoint, but the | 1534 | /* Other types of errors halt the endpoint, but the |
@@ -1460,7 +1537,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
1460 | * xHCI hardware manually. | 1537 | * xHCI hardware manually. |
1461 | */ | 1538 | */ |
1462 | xhci_cleanup_halted_endpoint(xhci, | 1539 | xhci_cleanup_halted_endpoint(xhci, |
1463 | slot_id, ep_index, td, event_trb); | 1540 | slot_id, ep_index, ep_ring->stream_id, td, event_trb); |
1464 | } else { | 1541 | } else { |
1465 | /* Update ring dequeue pointer */ | 1542 | /* Update ring dequeue pointer */ |
1466 | while (ep_ring->dequeue != td->last_trb) | 1543 | while (ep_ring->dequeue != td->last_trb) |
@@ -1656,14 +1733,24 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
1656 | static int prepare_transfer(struct xhci_hcd *xhci, | 1733 | static int prepare_transfer(struct xhci_hcd *xhci, |
1657 | struct xhci_virt_device *xdev, | 1734 | struct xhci_virt_device *xdev, |
1658 | unsigned int ep_index, | 1735 | unsigned int ep_index, |
1736 | unsigned int stream_id, | ||
1659 | unsigned int num_trbs, | 1737 | unsigned int num_trbs, |
1660 | struct urb *urb, | 1738 | struct urb *urb, |
1661 | struct xhci_td **td, | 1739 | struct xhci_td **td, |
1662 | gfp_t mem_flags) | 1740 | gfp_t mem_flags) |
1663 | { | 1741 | { |
1664 | int ret; | 1742 | int ret; |
1743 | struct xhci_ring *ep_ring; | ||
1665 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); | 1744 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); |
1666 | ret = prepare_ring(xhci, xdev->eps[ep_index].ring, | 1745 | |
1746 | ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id); | ||
1747 | if (!ep_ring) { | ||
1748 | xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", | ||
1749 | stream_id); | ||
1750 | return -EINVAL; | ||
1751 | } | ||
1752 | |||
1753 | ret = prepare_ring(xhci, ep_ring, | ||
1667 | ep_ctx->ep_info & EP_STATE_MASK, | 1754 | ep_ctx->ep_info & EP_STATE_MASK, |
1668 | num_trbs, mem_flags); | 1755 | num_trbs, mem_flags); |
1669 | if (ret) | 1756 | if (ret) |
@@ -1683,9 +1770,9 @@ static int prepare_transfer(struct xhci_hcd *xhci, | |||
1683 | (*td)->urb = urb; | 1770 | (*td)->urb = urb; |
1684 | urb->hcpriv = (void *) (*td); | 1771 | urb->hcpriv = (void *) (*td); |
1685 | /* Add this TD to the tail of the endpoint ring's TD list */ | 1772 | /* Add this TD to the tail of the endpoint ring's TD list */ |
1686 | list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list); | 1773 | list_add_tail(&(*td)->td_list, &ep_ring->td_list); |
1687 | (*td)->start_seg = xdev->eps[ep_index].ring->enq_seg; | 1774 | (*td)->start_seg = ep_ring->enq_seg; |
1688 | (*td)->first_trb = xdev->eps[ep_index].ring->enqueue; | 1775 | (*td)->first_trb = ep_ring->enqueue; |
1689 | 1776 | ||
1690 | return 0; | 1777 | return 0; |
1691 | } | 1778 | } |
@@ -1751,7 +1838,7 @@ static void check_trb_math(struct urb *urb, int num_trbs, int running_total) | |||
1751 | } | 1838 | } |
1752 | 1839 | ||
1753 | static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, | 1840 | static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, |
1754 | unsigned int ep_index, int start_cycle, | 1841 | unsigned int ep_index, unsigned int stream_id, int start_cycle, |
1755 | struct xhci_generic_trb *start_trb, struct xhci_td *td) | 1842 | struct xhci_generic_trb *start_trb, struct xhci_td *td) |
1756 | { | 1843 | { |
1757 | /* | 1844 | /* |
@@ -1760,7 +1847,7 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, | |||
1760 | */ | 1847 | */ |
1761 | wmb(); | 1848 | wmb(); |
1762 | start_trb->field[3] |= start_cycle; | 1849 | start_trb->field[3] |= start_cycle; |
1763 | ring_ep_doorbell(xhci, slot_id, ep_index); | 1850 | ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); |
1764 | } | 1851 | } |
1765 | 1852 | ||
1766 | /* | 1853 | /* |
@@ -1834,12 +1921,16 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1834 | struct xhci_generic_trb *start_trb; | 1921 | struct xhci_generic_trb *start_trb; |
1835 | int start_cycle; | 1922 | int start_cycle; |
1836 | 1923 | ||
1837 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; | 1924 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); |
1925 | if (!ep_ring) | ||
1926 | return -EINVAL; | ||
1927 | |||
1838 | num_trbs = count_sg_trbs_needed(xhci, urb); | 1928 | num_trbs = count_sg_trbs_needed(xhci, urb); |
1839 | num_sgs = urb->num_sgs; | 1929 | num_sgs = urb->num_sgs; |
1840 | 1930 | ||
1841 | trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], | 1931 | trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], |
1842 | ep_index, num_trbs, urb, &td, mem_flags); | 1932 | ep_index, urb->stream_id, |
1933 | num_trbs, urb, &td, mem_flags); | ||
1843 | if (trb_buff_len < 0) | 1934 | if (trb_buff_len < 0) |
1844 | return trb_buff_len; | 1935 | return trb_buff_len; |
1845 | /* | 1936 | /* |
@@ -1948,7 +2039,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1948 | } while (running_total < urb->transfer_buffer_length); | 2039 | } while (running_total < urb->transfer_buffer_length); |
1949 | 2040 | ||
1950 | check_trb_math(urb, num_trbs, running_total); | 2041 | check_trb_math(urb, num_trbs, running_total); |
1951 | giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); | 2042 | giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, |
2043 | start_cycle, start_trb, td); | ||
1952 | return 0; | 2044 | return 0; |
1953 | } | 2045 | } |
1954 | 2046 | ||
@@ -1970,7 +2062,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1970 | if (urb->num_sgs) | 2062 | if (urb->num_sgs) |
1971 | return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); | 2063 | return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); |
1972 | 2064 | ||
1973 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; | 2065 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); |
2066 | if (!ep_ring) | ||
2067 | return -EINVAL; | ||
1974 | 2068 | ||
1975 | num_trbs = 0; | 2069 | num_trbs = 0; |
1976 | /* How much data is (potentially) left before the 64KB boundary? */ | 2070 | /* How much data is (potentially) left before the 64KB boundary? */ |
@@ -1997,7 +2091,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1997 | (unsigned long long)urb->transfer_dma, | 2091 | (unsigned long long)urb->transfer_dma, |
1998 | num_trbs); | 2092 | num_trbs); |
1999 | 2093 | ||
2000 | ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, | 2094 | ret = prepare_transfer(xhci, xhci->devs[slot_id], |
2095 | ep_index, urb->stream_id, | ||
2001 | num_trbs, urb, &td, mem_flags); | 2096 | num_trbs, urb, &td, mem_flags); |
2002 | if (ret < 0) | 2097 | if (ret < 0) |
2003 | return ret; | 2098 | return ret; |
@@ -2067,7 +2162,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2067 | } while (running_total < urb->transfer_buffer_length); | 2162 | } while (running_total < urb->transfer_buffer_length); |
2068 | 2163 | ||
2069 | check_trb_math(urb, num_trbs, running_total); | 2164 | check_trb_math(urb, num_trbs, running_total); |
2070 | giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); | 2165 | giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, |
2166 | start_cycle, start_trb, td); | ||
2071 | return 0; | 2167 | return 0; |
2072 | } | 2168 | } |
2073 | 2169 | ||
@@ -2084,7 +2180,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2084 | u32 field, length_field; | 2180 | u32 field, length_field; |
2085 | struct xhci_td *td; | 2181 | struct xhci_td *td; |
2086 | 2182 | ||
2087 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; | 2183 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); |
2184 | if (!ep_ring) | ||
2185 | return -EINVAL; | ||
2088 | 2186 | ||
2089 | /* | 2187 | /* |
2090 | * Need to copy setup packet into setup TRB, so we can't use the setup | 2188 | * Need to copy setup packet into setup TRB, so we can't use the setup |
@@ -2105,8 +2203,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2105 | */ | 2203 | */ |
2106 | if (urb->transfer_buffer_length > 0) | 2204 | if (urb->transfer_buffer_length > 0) |
2107 | num_trbs++; | 2205 | num_trbs++; |
2108 | ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs, | 2206 | ret = prepare_transfer(xhci, xhci->devs[slot_id], |
2109 | urb, &td, mem_flags); | 2207 | ep_index, urb->stream_id, |
2208 | num_trbs, urb, &td, mem_flags); | ||
2110 | if (ret < 0) | 2209 | if (ret < 0) |
2111 | return ret; | 2210 | return ret; |
2112 | 2211 | ||
@@ -2161,7 +2260,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2161 | /* Event on completion */ | 2260 | /* Event on completion */ |
2162 | field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); | 2261 | field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); |
2163 | 2262 | ||
2164 | giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); | 2263 | giveback_first_trb(xhci, slot_id, ep_index, 0, |
2264 | start_cycle, start_trb, td); | ||
2165 | return 0; | 2265 | return 0; |
2166 | } | 2266 | } |
2167 | 2267 | ||
@@ -2273,12 +2373,14 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, | |||
2273 | * This should not be used for endpoints that have streams enabled. | 2373 | * This should not be used for endpoints that have streams enabled. |
2274 | */ | 2374 | */ |
2275 | static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | 2375 | static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, |
2276 | unsigned int ep_index, struct xhci_segment *deq_seg, | 2376 | unsigned int ep_index, unsigned int stream_id, |
2377 | struct xhci_segment *deq_seg, | ||
2277 | union xhci_trb *deq_ptr, u32 cycle_state) | 2378 | union xhci_trb *deq_ptr, u32 cycle_state) |
2278 | { | 2379 | { |
2279 | dma_addr_t addr; | 2380 | dma_addr_t addr; |
2280 | u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); | 2381 | u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); |
2281 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); | 2382 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); |
2383 | u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id); | ||
2282 | u32 type = TRB_TYPE(TRB_SET_DEQ); | 2384 | u32 type = TRB_TYPE(TRB_SET_DEQ); |
2283 | 2385 | ||
2284 | addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); | 2386 | addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); |
@@ -2289,7 +2391,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |||
2289 | return 0; | 2391 | return 0; |
2290 | } | 2392 | } |
2291 | return queue_command(xhci, lower_32_bits(addr) | cycle_state, | 2393 | return queue_command(xhci, lower_32_bits(addr) | cycle_state, |
2292 | upper_32_bits(addr), 0, | 2394 | upper_32_bits(addr), trb_stream_id, |
2293 | trb_slot_id | trb_ep_index | type, false); | 2395 | trb_slot_id | trb_ep_index | type, false); |
2294 | } | 2396 | } |
2295 | 2397 | ||
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 2e370fea9590..3cac2ff8b50a 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -353,11 +353,7 @@ void xhci_event_ring_work(unsigned long arg) | |||
353 | if (!xhci->devs[i]) | 353 | if (!xhci->devs[i]) |
354 | continue; | 354 | continue; |
355 | for (j = 0; j < 31; ++j) { | 355 | for (j = 0; j < 31; ++j) { |
356 | struct xhci_ring *ring = xhci->devs[i]->eps[j].ring; | 356 | xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]); |
357 | if (!ring) | ||
358 | continue; | ||
359 | xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j); | ||
360 | xhci_debug_segment(xhci, ring->deq_seg); | ||
361 | } | 357 | } |
362 | } | 358 | } |
363 | 359 | ||
@@ -839,7 +835,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
839 | xhci_debug_ring(xhci, xhci->event_ring); | 835 | xhci_debug_ring(xhci, xhci->event_ring); |
840 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | 836 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
841 | ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; | 837 | ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; |
842 | ep_ring = ep->ring; | 838 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); |
839 | if (!ep_ring) { | ||
840 | ret = -EINVAL; | ||
841 | goto done; | ||
842 | } | ||
843 | |||
843 | xhci_dbg(xhci, "Endpoint ring:\n"); | 844 | xhci_dbg(xhci, "Endpoint ring:\n"); |
844 | xhci_debug_ring(xhci, ep_ring); | 845 | xhci_debug_ring(xhci, ep_ring); |
845 | td = (struct xhci_td *) urb->hcpriv; | 846 | td = (struct xhci_td *) urb->hcpriv; |
@@ -1383,7 +1384,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, | |||
1383 | * or it will attempt to resend it on the next doorbell ring. | 1384 | * or it will attempt to resend it on the next doorbell ring. |
1384 | */ | 1385 | */ |
1385 | xhci_find_new_dequeue_state(xhci, udev->slot_id, | 1386 | xhci_find_new_dequeue_state(xhci, udev->slot_id, |
1386 | ep_index, ep->stopped_td, | 1387 | ep_index, ep->stopped_stream, ep->stopped_td, |
1387 | &deq_state); | 1388 | &deq_state); |
1388 | 1389 | ||
1389 | /* HW with the reset endpoint quirk will use the saved dequeue state to | 1390 | /* HW with the reset endpoint quirk will use the saved dequeue state to |
@@ -1392,10 +1393,12 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, | |||
1392 | if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { | 1393 | if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { |
1393 | xhci_dbg(xhci, "Queueing new dequeue state\n"); | 1394 | xhci_dbg(xhci, "Queueing new dequeue state\n"); |
1394 | xhci_queue_new_dequeue_state(xhci, udev->slot_id, | 1395 | xhci_queue_new_dequeue_state(xhci, udev->slot_id, |
1395 | ep_index, &deq_state); | 1396 | ep_index, ep->stopped_stream, &deq_state); |
1396 | } else { | 1397 | } else { |
1397 | /* Better hope no one uses the input context between now and the | 1398 | /* Better hope no one uses the input context between now and the |
1398 | * reset endpoint completion! | 1399 | * reset endpoint completion! |
1400 | * XXX: No idea how this hardware will react when stream rings | ||
1401 | * are enabled. | ||
1399 | */ | 1402 | */ |
1400 | xhci_dbg(xhci, "Setting up input context for " | 1403 | xhci_dbg(xhci, "Setting up input context for " |
1401 | "configure endpoint command\n"); | 1404 | "configure endpoint command\n"); |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 7a9447cb6ea9..dada2fb59261 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -444,6 +444,7 @@ struct xhci_doorbell_array { | |||
444 | 444 | ||
445 | /* Endpoint Target - bits 0:7 */ | 445 | /* Endpoint Target - bits 0:7 */ |
446 | #define EPI_TO_DB(p) (((p) + 1) & 0xff) | 446 | #define EPI_TO_DB(p) (((p) + 1) & 0xff) |
447 | #define STREAM_ID_TO_DB(p) (((p) & 0xffff) << 16) | ||
447 | 448 | ||
448 | 449 | ||
449 | /** | 450 | /** |
@@ -714,6 +715,7 @@ struct xhci_virt_ep { | |||
714 | /* The TRB that was last reported in a stopped endpoint ring */ | 715 | /* The TRB that was last reported in a stopped endpoint ring */ |
715 | union xhci_trb *stopped_trb; | 716 | union xhci_trb *stopped_trb; |
716 | struct xhci_td *stopped_td; | 717 | struct xhci_td *stopped_td; |
718 | unsigned int stopped_stream; | ||
717 | /* Watchdog timer for stop endpoint command to cancel URBs */ | 719 | /* Watchdog timer for stop endpoint command to cancel URBs */ |
718 | struct timer_list stop_cmd_timer; | 720 | struct timer_list stop_cmd_timer; |
719 | int stop_cmds_pending; | 721 | int stop_cmds_pending; |
@@ -871,6 +873,10 @@ struct xhci_event_cmd { | |||
871 | #define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1) | 873 | #define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1) |
872 | #define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16) | 874 | #define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16) |
873 | 875 | ||
876 | /* Set TR Dequeue Pointer command TRB fields */ | ||
877 | #define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16)) | ||
878 | #define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16) | ||
879 | |||
874 | 880 | ||
875 | /* Port Status Change Event TRB fields */ | 881 | /* Port Status Change Event TRB fields */ |
876 | /* Port ID - bits 31:24 */ | 882 | /* Port ID - bits 31:24 */ |
@@ -1040,6 +1046,7 @@ struct xhci_ring { | |||
1040 | * if we own the TRB (if we are the consumer). See section 4.9.1. | 1046 | * if we own the TRB (if we are the consumer). See section 4.9.1. |
1041 | */ | 1047 | */ |
1042 | u32 cycle_state; | 1048 | u32 cycle_state; |
1049 | unsigned int stream_id; | ||
1043 | }; | 1050 | }; |
1044 | 1051 | ||
1045 | struct xhci_erst_entry { | 1052 | struct xhci_erst_entry { |
@@ -1265,6 +1272,9 @@ void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); | |||
1265 | void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); | 1272 | void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); |
1266 | char *xhci_get_slot_state(struct xhci_hcd *xhci, | 1273 | char *xhci_get_slot_state(struct xhci_hcd *xhci, |
1267 | struct xhci_container_ctx *ctx); | 1274 | struct xhci_container_ctx *ctx); |
1275 | void xhci_dbg_ep_rings(struct xhci_hcd *xhci, | ||
1276 | unsigned int slot_id, unsigned int ep_index, | ||
1277 | struct xhci_virt_ep *ep); | ||
1268 | 1278 | ||
1269 | /* xHCI memory management */ | 1279 | /* xHCI memory management */ |
1270 | void xhci_mem_cleanup(struct xhci_hcd *xhci); | 1280 | void xhci_mem_cleanup(struct xhci_hcd *xhci); |
@@ -1302,6 +1312,18 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, | |||
1302 | void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, | 1312 | void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, |
1303 | struct xhci_ep_ctx *ep_ctx, | 1313 | struct xhci_ep_ctx *ep_ctx, |
1304 | struct xhci_virt_ep *ep); | 1314 | struct xhci_virt_ep *ep); |
1315 | struct xhci_ring *xhci_dma_to_transfer_ring( | ||
1316 | struct xhci_virt_ep *ep, | ||
1317 | u64 address); | ||
1318 | struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, | ||
1319 | struct urb *urb); | ||
1320 | struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, | ||
1321 | unsigned int slot_id, unsigned int ep_index, | ||
1322 | unsigned int stream_id); | ||
1323 | struct xhci_ring *xhci_stream_id_to_ring( | ||
1324 | struct xhci_virt_device *dev, | ||
1325 | unsigned int ep_index, | ||
1326 | unsigned int stream_id); | ||
1305 | struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, | 1327 | struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, |
1306 | bool allocate_in_ctx, bool allocate_completion, | 1328 | bool allocate_in_ctx, bool allocate_completion, |
1307 | gfp_t mem_flags); | 1329 | gfp_t mem_flags); |
@@ -1374,9 +1396,11 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, | |||
1374 | int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id); | 1396 | int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id); |
1375 | void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | 1397 | void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, |
1376 | unsigned int slot_id, unsigned int ep_index, | 1398 | unsigned int slot_id, unsigned int ep_index, |
1377 | struct xhci_td *cur_td, struct xhci_dequeue_state *state); | 1399 | unsigned int stream_id, struct xhci_td *cur_td, |
1400 | struct xhci_dequeue_state *state); | ||
1378 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | 1401 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, |
1379 | unsigned int slot_id, unsigned int ep_index, | 1402 | unsigned int slot_id, unsigned int ep_index, |
1403 | unsigned int stream_id, | ||
1380 | struct xhci_dequeue_state *deq_state); | 1404 | struct xhci_dequeue_state *deq_state); |
1381 | void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, | 1405 | void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, |
1382 | struct usb_device *udev, unsigned int ep_index); | 1406 | struct usb_device *udev, unsigned int ep_index); |