diff options
author | Sarah Sharp <sarah.a.sharp@linux.intel.com> | 2009-04-27 22:58:50 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-06-16 00:44:49 -0400 |
commit | b10de142119a676552df3f0d2e3a9d647036c26a (patch) | |
tree | cd38fe5efed6776e7c9e154a05202bae4f683295 /drivers/usb/host/xhci-hcd.c | |
parent | f94e0186312b0fc39f41eed4e21836ed74b7efe1 (diff) |
USB: xhci: Bulk transfer support
Allow device drivers to submit URBs to bulk endpoints on devices under an
xHCI host controller. Share code between the control and bulk enqueueing
functions when it makes sense.
To get the best performance out of bulk transfers, SuperSpeed devices must
have the bMaxBurst size copied from their endpoint companion controller
into the xHCI device context. This allows the host controller to "burst"
up to 16 packets before it has to wait for the device to acknowledge the
first packet.
The buffers in Transfer Request Blocks (TRBs) can cross page boundaries,
but they cannot cross 64KB boundaries. The buffer must be broken into
multiple TRBs if a 64KB boundary is crossed.
The sum of buffer lengths in all the TRBs in a Transfer Descriptor (TD)
cannot exceed 64MB. To work around this, the enqueueing code must enqueue
multiple TDs. The transfer event handler may incorrectly give back the
URB in this case, if it gets a transfer event that points somewhere in the
first TD. FIXME later.
Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci-hcd.c')
-rw-r--r-- | drivers/usb/host/xhci-hcd.c | 13 |
1 files changed, 6 insertions, 7 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index 50ab525f65be..e5fbdcdbf676 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c | |||
@@ -589,12 +589,6 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
589 | 589 | ||
590 | slot_id = urb->dev->slot_id; | 590 | slot_id = urb->dev->slot_id; |
591 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | 591 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
592 | /* Only support ep 0 control transfers for now */ | ||
593 | if (ep_index != 0) { | ||
594 | xhci_dbg(xhci, "WARN: urb submitted to unsupported ep %x\n", | ||
595 | urb->ep->desc.bEndpointAddress); | ||
596 | return -ENOSYS; | ||
597 | } | ||
598 | 592 | ||
599 | spin_lock_irqsave(&xhci->lock, flags); | 593 | spin_lock_irqsave(&xhci->lock, flags); |
600 | if (!xhci->devs || !xhci->devs[slot_id]) { | 594 | if (!xhci->devs || !xhci->devs[slot_id]) { |
@@ -608,7 +602,12 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
608 | ret = -ESHUTDOWN; | 602 | ret = -ESHUTDOWN; |
609 | goto exit; | 603 | goto exit; |
610 | } | 604 | } |
611 | ret = queue_ctrl_tx(xhci, mem_flags, urb, slot_id, ep_index); | 605 | if (usb_endpoint_xfer_control(&urb->ep->desc)) |
606 | ret = queue_ctrl_tx(xhci, mem_flags, urb, slot_id, ep_index); | ||
607 | else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) | ||
608 | ret = queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); | ||
609 | else | ||
610 | ret = -EINVAL; | ||
612 | exit: | 611 | exit: |
613 | spin_unlock_irqrestore(&xhci->lock, flags); | 612 | spin_unlock_irqrestore(&xhci->lock, flags); |
614 | return ret; | 613 | return ret; |