aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2009-09-02 15:14:28 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-09-23 09:46:18 -0400
commit624defa12f304b4d11eda309bc207fa5a1900d0f (patch)
treefb350ade85d9f1703c28eae0b2683c0a70aec8a7
parent2f697f6cbff155b3ce4053a50cdf00b5be4dda11 (diff)
USB: xhci: Support interrupt transfers.
Interrupt transfers are submitted to the xHCI hardware using the same TRB type as bulk transfers. Re-use the bulk transfer enqueueing code to enqueue interrupt transfers. Interrupt transfers are a bit different than bulk transfers. When the interrupt endpoint is to be serviced, the xHC will consume (at most) one TD. A TD (comprised of sg list entries) can take several service intervals to transmit. The important thing for device drivers to note is that if they use the scatter gather interface to submit interrupt requests, they will not get data sent from two different scatter gather lists in the same service interval. For now, the xHCI driver will use the service interval from the endpoint's descriptor (bInterval). Drivers will need a hook to poll at a more frequent interval. Set urb->interval to the interval that the xHCI hardware will use. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Cc: stable <stable@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/usb/host/xhci-hcd.c5
-rw-r--r--drivers/usb/host/xhci-ring.c48
-rw-r--r--drivers/usb/host/xhci.h3
3 files changed, 55 insertions, 1 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 1d4a1e3f9533..e478a63488fb 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -727,6 +727,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
727 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 727 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
728 slot_id, ep_index); 728 slot_id, ep_index);
729 spin_unlock_irqrestore(&xhci->lock, flags); 729 spin_unlock_irqrestore(&xhci->lock, flags);
730 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
731 spin_lock_irqsave(&xhci->lock, flags);
732 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
733 slot_id, ep_index);
734 spin_unlock_irqrestore(&xhci->lock, flags);
730 } else { 735 } else {
731 ret = -EINVAL; 736 ret = -EINVAL;
732 } 737 }
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index aac379e1c883..ff5e6bc2299d 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1072,7 +1072,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1072 else 1072 else
1073 status = 0; 1073 status = 0;
1074 } else { 1074 } else {
1075 xhci_dbg(xhci, "Successful bulk transfer!\n"); 1075 if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
1076 xhci_dbg(xhci, "Successful bulk "
1077 "transfer!\n");
1078 else
1079 xhci_dbg(xhci, "Successful interrupt "
1080 "transfer!\n");
1076 status = 0; 1081 status = 0;
1077 } 1082 }
1078 break; 1083 break;
@@ -1464,6 +1469,47 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
1464 ring_ep_doorbell(xhci, slot_id, ep_index); 1469 ring_ep_doorbell(xhci, slot_id, ep_index);
1465} 1470}
1466 1471
1472/*
1473 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
1474 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
1475 * (comprised of sg list entries) can take several service intervals to
1476 * transmit.
1477 */
1478int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1479 struct urb *urb, int slot_id, unsigned int ep_index)
1480{
1481 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
1482 xhci->devs[slot_id]->out_ctx, ep_index);
1483 int xhci_interval;
1484 int ep_interval;
1485
1486 xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
1487 ep_interval = urb->interval;
1488 /* Convert to microframes */
1489 if (urb->dev->speed == USB_SPEED_LOW ||
1490 urb->dev->speed == USB_SPEED_FULL)
1491 ep_interval *= 8;
1492 /* FIXME change this to a warning and a suggestion to use the new API
1493 * to set the polling interval (once the API is added).
1494 */
1495 if (xhci_interval != ep_interval) {
1496 if (!printk_ratelimit())
1497 dev_dbg(&urb->dev->dev, "Driver uses different interval"
1498 " (%d microframe%s) than xHCI "
1499 "(%d microframe%s)\n",
1500 ep_interval,
1501 ep_interval == 1 ? "" : "s",
1502 xhci_interval,
1503 xhci_interval == 1 ? "" : "s");
1504 urb->interval = xhci_interval;
1505 /* Convert back to frames for LS/FS devices */
1506 if (urb->dev->speed == USB_SPEED_LOW ||
1507 urb->dev->speed == USB_SPEED_FULL)
1508 urb->interval /= 8;
1509 }
1510 return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
1511}
1512
1467static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1513static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1468 struct urb *urb, int slot_id, unsigned int ep_index) 1514 struct urb *urb, int slot_id, unsigned int ep_index)
1469{ 1515{
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index bc64b500feb8..a7728aa91582 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -581,6 +581,7 @@ struct xhci_ep_ctx {
581/* bit 15 is Linear Stream Array */ 581/* bit 15 is Linear Stream Array */
582/* Interval - period between requests to an endpoint - 125u increments. */ 582/* Interval - period between requests to an endpoint - 125u increments. */
583#define EP_INTERVAL(p) ((p & 0xff) << 16) 583#define EP_INTERVAL(p) ((p & 0xff) << 16)
584#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
584 585
585/* ep_info2 bitmasks */ 586/* ep_info2 bitmasks */
586/* 587/*
@@ -1223,6 +1224,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1223 int slot_id, unsigned int ep_index); 1224 int slot_id, unsigned int ep_index);
1224int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, 1225int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1225 int slot_id, unsigned int ep_index); 1226 int slot_id, unsigned int ep_index);
1227int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1228 int slot_id, unsigned int ep_index);
1226int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 1229int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1227 u32 slot_id); 1230 u32 slot_id);
1228int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 1231int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,