diff options
author | Sarah Sharp <sarah.a.sharp@linux.intel.com> | 2009-04-27 22:58:50 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-06-16 00:44:49 -0400 |
commit | b10de142119a676552df3f0d2e3a9d647036c26a (patch) | |
tree | cd38fe5efed6776e7c9e154a05202bae4f683295 /drivers/usb/host/xhci.h | |
parent | f94e0186312b0fc39f41eed4e21836ed74b7efe1 (diff) |
USB: xhci: Bulk transfer support
Allow device drivers to submit URBs to bulk endpoints on devices under an
xHCI host controller. Share code between the control and bulk enqueueing
functions when it makes sense.
To get the best performance out of bulk transfers, SuperSpeed devices must
have the bMaxBurst size copied from their endpoint companion controller
into the xHCI device context. This allows the host controller to "burst"
up to 16 packets before it has to wait for the device to acknowledge the
first packet.
The buffers in Transfer Request Blocks (TRBs) can cross page boundaries,
but they cannot cross 64KB boundaries. The buffer must be broken into
multiple TRBs if a 64KB boundary is crossed.
The sum of buffer lengths in all the TRBs in a Transfer Descriptor (TD)
cannot exceed 64MB. To work around this, the enqueueing code must enqueue
multiple TDs. The transfer event handler may incorrectly give back the
URB in this case, if it gets a transfer event that points somewhere in the
first TD. FIXME later.
Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci.h')
-rw-r--r-- | drivers/usb/host/xhci.h | 4 |
1 files changed, 4 insertions, 0 deletions
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 1a6fd997c343..06e07616631f 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -889,6 +889,9 @@ union xhci_trb { | |||
889 | */ | 889 | */ |
890 | #define TRBS_PER_SEGMENT 64 | 890 | #define TRBS_PER_SEGMENT 64 |
891 | #define SEGMENT_SIZE (TRBS_PER_SEGMENT*16) | 891 | #define SEGMENT_SIZE (TRBS_PER_SEGMENT*16) |
892 | /* TRB buffer pointers can't cross 64KB boundaries */ | ||
893 | #define TRB_MAX_BUFF_SHIFT 16 | ||
894 | #define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT) | ||
892 | 895 | ||
893 | struct xhci_td { | 896 | struct xhci_td { |
894 | struct list_head td_list; | 897 | struct list_head td_list; |
@@ -1117,6 +1120,7 @@ void set_hc_event_deq(struct xhci_hcd *xhci); | |||
1117 | int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id); | 1120 | int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id); |
1118 | int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); | 1121 | int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); |
1119 | int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index); | 1122 | int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index); |
1123 | int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index); | ||
1120 | int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); | 1124 | int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); |
1121 | 1125 | ||
1122 | /* xHCI roothub code */ | 1126 | /* xHCI roothub code */ |