aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/usb/host/xhci-hcd.c13
-rw-r--r--drivers/usb/host/xhci-mem.c4
-rw-r--r--drivers/usb/host/xhci-ring.c270
-rw-r--r--drivers/usb/host/xhci.h4
4 files changed, 254 insertions, 37 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 50ab525f65be..e5fbdcdbf676 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -589,12 +589,6 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
589 589
590 slot_id = urb->dev->slot_id; 590 slot_id = urb->dev->slot_id;
591 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 591 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
592 /* Only support ep 0 control transfers for now */
593 if (ep_index != 0) {
594 xhci_dbg(xhci, "WARN: urb submitted to unsupported ep %x\n",
595 urb->ep->desc.bEndpointAddress);
596 return -ENOSYS;
597 }
598 592
599 spin_lock_irqsave(&xhci->lock, flags); 593 spin_lock_irqsave(&xhci->lock, flags);
600 if (!xhci->devs || !xhci->devs[slot_id]) { 594 if (!xhci->devs || !xhci->devs[slot_id]) {
@@ -608,7 +602,12 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
608 ret = -ESHUTDOWN; 602 ret = -ESHUTDOWN;
609 goto exit; 603 goto exit;
610 } 604 }
611 ret = queue_ctrl_tx(xhci, mem_flags, urb, slot_id, ep_index); 605 if (usb_endpoint_xfer_control(&urb->ep->desc))
606 ret = queue_ctrl_tx(xhci, mem_flags, urb, slot_id, ep_index);
607 else if (usb_endpoint_xfer_bulk(&urb->ep->desc))
608 ret = queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
609 else
610 ret = -EINVAL;
612exit: 611exit:
613 spin_unlock_irqrestore(&xhci->lock, flags); 612 spin_unlock_irqrestore(&xhci->lock, flags);
614 return ret; 613 return ret;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 8cd55f03ea26..617db9c37770 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -494,7 +494,9 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
494 case USB_SPEED_SUPER: 494 case USB_SPEED_SUPER:
495 max_packet = ep->desc.wMaxPacketSize; 495 max_packet = ep->desc.wMaxPacketSize;
496 ep_ctx->ep_info2 |= MAX_PACKET(max_packet); 496 ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
497 /* FIXME dig out burst from ep companion desc */ 497 /* dig out max burst from ep companion desc */
498 max_packet = ep->ep_comp->desc.bMaxBurst;
499 ep_ctx->ep_info2 |= MAX_BURST(max_packet);
498 break; 500 break;
499 case USB_SPEED_HIGH: 501 case USB_SPEED_HIGH:
500 /* bits 11:12 specify the number of additional transaction 502 /* bits 11:12 specify the number of additional transaction
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index b4ccf0d72c17..3364381ee6ca 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -395,7 +395,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
395 dma_addr_t event_dma; 395 dma_addr_t event_dma;
396 struct xhci_segment *event_seg; 396 struct xhci_segment *event_seg;
397 union xhci_trb *event_trb; 397 union xhci_trb *event_trb;
398 struct urb *urb = NULL; 398 struct urb *urb;
399 int status = -EINPROGRESS; 399 int status = -EINPROGRESS;
400 400
401 xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; 401 xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
@@ -437,7 +437,46 @@ static int handle_tx_event(struct xhci_hcd *xhci,
437 return -ESHUTDOWN; 437 return -ESHUTDOWN;
438 } 438 }
439 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; 439 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
440 440 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
441 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
442 xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n",
443 (unsigned int) event->buffer[0]);
444 xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n",
445 (unsigned int) event->buffer[1]);
446 xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
447 (unsigned int) event->transfer_len);
448 xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
449 (unsigned int) event->flags);
450
451 /* Look for common error cases */
452 switch (GET_COMP_CODE(event->transfer_len)) {
453 /* Skip codes that require special handling depending on
454 * transfer type
455 */
456 case COMP_SUCCESS:
457 case COMP_SHORT_TX:
458 break;
459 case COMP_STALL:
460 xhci_warn(xhci, "WARN: Stalled endpoint\n");
461 status = -EPIPE;
462 break;
463 case COMP_TRB_ERR:
464 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
465 status = -EILSEQ;
466 break;
467 case COMP_TX_ERR:
468 xhci_warn(xhci, "WARN: transfer error on endpoint\n");
469 status = -EPROTO;
470 break;
471 case COMP_DB_ERR:
472 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
473 status = -ENOSR;
474 break;
475 default:
476 xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
477 urb = NULL;
478 goto cleanup;
479 }
441 /* Now update the urb's actual_length and give back to the core */ 480 /* Now update the urb's actual_length and give back to the core */
442 /* Was this a control transfer? */ 481 /* Was this a control transfer? */
443 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) { 482 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
@@ -459,25 +498,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
459 xhci_warn(xhci, "WARN: short transfer on control ep\n"); 498 xhci_warn(xhci, "WARN: short transfer on control ep\n");
460 status = -EREMOTEIO; 499 status = -EREMOTEIO;
461 break; 500 break;
462 case COMP_STALL:
463 xhci_warn(xhci, "WARN: Stalled control ep\n");
464 status = -EPIPE;
465 break;
466 case COMP_TRB_ERR:
467 xhci_warn(xhci, "WARN: TRB error on control ep\n");
468 status = -EILSEQ;
469 break;
470 case COMP_TX_ERR:
471 xhci_warn(xhci, "WARN: transfer error on control ep\n");
472 status = -EPROTO;
473 break;
474 case COMP_DB_ERR:
475 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough on control TX\n");
476 status = -ENOSR;
477 break;
478 default: 501 default:
479 xhci_dbg(xhci, "ERROR Unknown event condition, HC probably busted\n"); 502 /* Others already handled above */
480 goto cleanup; 503 break;
481 } 504 }
482 /* 505 /*
483 * Did we transfer any data, despite the errors that might have 506 * Did we transfer any data, despite the errors that might have
@@ -493,21 +516,90 @@ static int handle_tx_event(struct xhci_hcd *xhci,
493 TRB_LEN(event->transfer_len); 516 TRB_LEN(event->transfer_len);
494 } 517 }
495 } 518 }
496 while (ep_ring->dequeue != td->last_trb)
497 inc_deq(xhci, ep_ring, false);
498 inc_deq(xhci, ep_ring, false);
499
500 /* Clean up the endpoint's TD list */
501 urb = td->urb;
502 list_del(&td->td_list);
503 kfree(td);
504 } else { 519 } else {
505 xhci_dbg(xhci, "FIXME do something for non-control transfers\n"); 520 switch (GET_COMP_CODE(event->transfer_len)) {
521 case COMP_SUCCESS:
522 /* Double check that the HW transferred everything. */
523 if (event_trb != td->last_trb) {
524 xhci_warn(xhci, "WARN Successful completion "
525 "on short TX\n");
526 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
527 status = -EREMOTEIO;
528 else
529 status = 0;
530 } else {
531 xhci_dbg(xhci, "Successful bulk transfer!\n");
532 status = 0;
533 }
534 break;
535 case COMP_SHORT_TX:
536 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
537 status = -EREMOTEIO;
538 else
539 status = 0;
540 break;
541 default:
542 /* Others already handled above */
543 break;
544 }
545 dev_dbg(&td->urb->dev->dev,
546 "ep %#x - asked for %d bytes, "
547 "%d bytes untransferred\n",
548 td->urb->ep->desc.bEndpointAddress,
549 td->urb->transfer_buffer_length,
550 TRB_LEN(event->transfer_len));
551 /* Fast path - was this the last TRB in the TD for this URB? */
552 if (event_trb == td->last_trb) {
553 if (TRB_LEN(event->transfer_len) != 0) {
554 td->urb->actual_length =
555 td->urb->transfer_buffer_length -
556 TRB_LEN(event->transfer_len);
557 if (td->urb->actual_length < 0) {
558 xhci_warn(xhci, "HC gave bad length "
559 "of %d bytes left\n",
560 TRB_LEN(event->transfer_len));
561 td->urb->actual_length = 0;
562 }
563 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
564 status = -EREMOTEIO;
565 else
566 status = 0;
567 } else {
568 td->urb->actual_length = td->urb->transfer_buffer_length;
569 /* Ignore a short packet completion if the
570 * untransferred length was zero.
571 */
572 status = 0;
573 }
574 } else {
575 /* Slow path - walk the list, starting from the first
576 * TRB to get the actual length transferred
577 */
578 td->urb->actual_length = 0;
579 while (ep_ring->dequeue != event_trb) {
580 td->urb->actual_length += TRB_LEN(ep_ring->dequeue->generic.field[2]);
581 inc_deq(xhci, ep_ring, false);
582 }
583 td->urb->actual_length += TRB_LEN(ep_ring->dequeue->generic.field[2]) -
584 TRB_LEN(event->transfer_len);
585
586 }
506 } 587 }
588 /* Update ring dequeue pointer */
589 while (ep_ring->dequeue != td->last_trb)
590 inc_deq(xhci, ep_ring, false);
591 inc_deq(xhci, ep_ring, false);
592
593 /* Clean up the endpoint's TD list */
594 urb = td->urb;
595 list_del(&td->td_list);
596 kfree(td);
597 urb->hcpriv = NULL;
507cleanup: 598cleanup:
508 inc_deq(xhci, xhci->event_ring, true); 599 inc_deq(xhci, xhci->event_ring, true);
509 set_hc_event_deq(xhci); 600 set_hc_event_deq(xhci);
510 601
602 /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
511 if (urb) { 603 if (urb) {
512 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); 604 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
513 spin_unlock(&xhci->lock); 605 spin_unlock(&xhci->lock);
@@ -666,6 +758,126 @@ int xhci_prepare_transfer(struct xhci_hcd *xhci,
666 return 0; 758 return 0;
667} 759}
668 760
761/* This is very similar to what ehci-q.c qtd_fill() does */
762int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
763 struct urb *urb, int slot_id, unsigned int ep_index)
764{
765 struct xhci_ring *ep_ring;
766 struct xhci_td *td;
767 int num_trbs;
768 struct xhci_generic_trb *start_trb;
769 bool first_trb;
770 int start_cycle;
771 u32 field;
772
773 int running_total, trb_buff_len, ret;
774 u64 addr;
775
776 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
777
778 num_trbs = 0;
779 /* How much data is (potentially) left before the 64KB boundary? */
780 running_total = TRB_MAX_BUFF_SIZE -
781 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
782
783 /* If there's some data on this 64KB chunk, or we have to send a
784 * zero-length transfer, we need at least one TRB
785 */
786 if (running_total != 0 || urb->transfer_buffer_length == 0)
787 num_trbs++;
788 /* How many more 64KB chunks to transfer, how many more TRBs? */
789 while (running_total < urb->transfer_buffer_length) {
790 num_trbs++;
791 running_total += TRB_MAX_BUFF_SIZE;
792 }
793 /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
794
795 if (!in_interrupt())
796 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, addr = %#x, num_trbs = %d\n",
797 urb->ep->desc.bEndpointAddress,
798 urb->transfer_buffer_length, urb->transfer_dma,
799 num_trbs);
800 ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
801 num_trbs, urb, &td, mem_flags);
802 if (ret < 0)
803 return ret;
804
805 /*
806 * Don't give the first TRB to the hardware (by toggling the cycle bit)
807 * until we've finished creating all the other TRBs. The ring's cycle
808 * state may change as we enqueue the other TRBs, so save it too.
809 */
810 start_trb = &ep_ring->enqueue->generic;
811 start_cycle = ep_ring->cycle_state;
812
813 running_total = 0;
814 /* How much data is in the first TRB? */
815 addr = (u64) urb->transfer_dma;
816 trb_buff_len = TRB_MAX_BUFF_SIZE -
817 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
818 if (urb->transfer_buffer_length < trb_buff_len)
819 trb_buff_len = urb->transfer_buffer_length;
820
821 first_trb = true;
822
823 /* Queue the first TRB, even if it's zero-length */
824 do {
825 field = 0;
826
827 /* Don't change the cycle bit of the first TRB until later */
828 if (first_trb)
829 first_trb = false;
830 else
831 field |= ep_ring->cycle_state;
832
833 /* Chain all the TRBs together; clear the chain bit in the last
834 * TRB to indicate it's the last TRB in the chain.
835 */
836 if (num_trbs > 1) {
837 field |= TRB_CHAIN;
838 } else {
839 /* FIXME - add check for ZERO_PACKET flag before this */
840 td->last_trb = ep_ring->enqueue;
841 field |= TRB_IOC;
842 }
843 queue_trb(xhci, ep_ring, false,
844 (u32) addr,
845 (u32) ((u64) addr >> 32),
846 TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0),
847 /* We always want to know if the TRB was short,
848 * or we won't get an event when it completes.
849 * (Unless we use event data TRBs, which are a
850 * waste of space and HC resources.)
851 */
852 field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
853 --num_trbs;
854 running_total += trb_buff_len;
855
856 /* Calculate length for next transfer */
857 addr += trb_buff_len;
858 trb_buff_len = urb->transfer_buffer_length - running_total;
859 if (trb_buff_len > TRB_MAX_BUFF_SIZE)
860 trb_buff_len = TRB_MAX_BUFF_SIZE;
861 } while (running_total < urb->transfer_buffer_length);
862
863 if (num_trbs != 0)
864 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
865 "TRBs, %d left\n", __FUNCTION__,
866 urb->ep->desc.bEndpointAddress, num_trbs);
867 /*
868 * Pass all the TRBs to the hardware at once and make sure this write
869 * isn't reordered.
870 */
871 wmb();
872 start_trb->field[3] |= start_cycle;
873 field = xhci_readl(xhci, &xhci->dba->doorbell[slot_id]) & DB_MASK;
874 xhci_writel(xhci, field | EPI_TO_DB(ep_index), &xhci->dba->doorbell[slot_id]);
875 /* Flush PCI posted writes */
876 xhci_readl(xhci, &xhci->dba->doorbell[slot_id]);
877
878 return 0;
879}
880
669/* Caller must have locked xhci->lock */ 881/* Caller must have locked xhci->lock */
670int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 882int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
671 struct urb *urb, int slot_id, unsigned int ep_index) 883 struct urb *urb, int slot_id, unsigned int ep_index)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 1a6fd997c343..06e07616631f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -889,6 +889,9 @@ union xhci_trb {
889 */ 889 */
890#define TRBS_PER_SEGMENT 64 890#define TRBS_PER_SEGMENT 64
891#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16) 891#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
892/* TRB buffer pointers can't cross 64KB boundaries */
893#define TRB_MAX_BUFF_SHIFT 16
894#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT)
892 895
893struct xhci_td { 896struct xhci_td {
894 struct list_head td_list; 897 struct list_head td_list;
@@ -1117,6 +1120,7 @@ void set_hc_event_deq(struct xhci_hcd *xhci);
1117int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id); 1120int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
1118int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); 1121int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id);
1119int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index); 1122int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index);
1123int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index);
1120int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); 1124int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id);
1121 1125
1122/* xHCI roothub code */ 1126/* xHCI roothub code */