aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-mem.c
diff options
context:
space:
mode:
authorAndiry Xu <andiry.xu@amd.com>2012-03-05 04:49:37 -0500
committerSarah Sharp <sarah.a.sharp@linux.intel.com>2012-03-13 12:30:24 -0400
commit8dfec6140fc617b932cf9a09ba46d0ee3f3a7d87 (patch)
tree0929c51b289a54c7ac11e6919ff1f9ae737535ff /drivers/usb/host/xhci-mem.c
parent186a7ef13a8fa3bc7cca1ccd33bd469b931e46de (diff)
xHCI: dynamic ring expansion
If room_on_ring() check fails, try to expand the ring and check again. When expand a ring, use a cached ring or allocate new segments, link the original ring and the new ring or segments, update the original ring's segment numbers and the last segment pointer. Signed-off-by: Andiry Xu <andiry.xu@amd.com> Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Tested-by: Paul Zimmerman <Paul.Zimmerman@synopsys.com>
Diffstat (limited to 'drivers/usb/host/xhci-mem.c')
-rw-r--r--drivers/usb/host/xhci-mem.c75
1 files changed, 63 insertions, 12 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index c1800c7582b7..c37aa1ba9126 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -119,6 +119,34 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
119 } 119 }
120} 120}
121 121
122/*
123 * Link the ring to the new segments.
124 * Set Toggle Cycle for the new ring if needed.
125 */
126static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
127 struct xhci_segment *first, struct xhci_segment *last,
128 unsigned int num_segs)
129{
130 struct xhci_segment *next;
131
132 if (!ring || !first || !last)
133 return;
134
135 next = ring->enq_seg->next;
136 xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
137 xhci_link_segments(xhci, last, next, ring->type);
138 ring->num_segs += num_segs;
139 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
140
141 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
142 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
143 &= ~cpu_to_le32(LINK_TOGGLE);
144 last->trbs[TRBS_PER_SEGMENT-1].link.control
145 |= cpu_to_le32(LINK_TOGGLE);
146 ring->last_seg = last;
147 }
148}
149
122/* XXX: Do we need the hcd structure in all these functions? */ 150/* XXX: Do we need the hcd structure in all these functions? */
123void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) 151void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
124{ 152{
@@ -287,6 +315,39 @@ static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
287 INIT_LIST_HEAD(&ring->td_list); 315 INIT_LIST_HEAD(&ring->td_list);
288} 316}
289 317
318/*
319 * Expand an existing ring.
320 * Look for a cached ring or allocate a new ring which has same segment numbers
321 * and link the two rings.
322 */
323int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
324 unsigned int num_trbs, gfp_t flags)
325{
326 struct xhci_segment *first;
327 struct xhci_segment *last;
328 unsigned int num_segs;
329 unsigned int num_segs_needed;
330 int ret;
331
332 num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
333 (TRBS_PER_SEGMENT - 1);
334
335 /* Allocate number of segments we needed, or double the ring size */
336 num_segs = ring->num_segs > num_segs_needed ?
337 ring->num_segs : num_segs_needed;
338
339 ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
340 num_segs, ring->cycle_state, ring->type, flags);
341 if (ret)
342 return -ENOMEM;
343
344 xhci_link_rings(xhci, ring, first, last, num_segs);
345 xhci_dbg(xhci, "ring expansion succeed, now has %d segments\n",
346 ring->num_segs);
347
348 return 0;
349}
350
290#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) 351#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
291 352
292static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, 353static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
@@ -1361,18 +1422,8 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1361 1422
1362 type = usb_endpoint_type(&ep->desc); 1423 type = usb_endpoint_type(&ep->desc);
1363 /* Set up the endpoint ring */ 1424 /* Set up the endpoint ring */
1364 /* 1425 virt_dev->eps[ep_index].new_ring =
1365 * Isochronous endpoint ring needs bigger size because one isoc URB 1426 xhci_ring_alloc(xhci, 1, 1, type, mem_flags);
1366 * carries multiple packets and it will insert multiple tds to the
1367 * ring.
1368 * This should be replaced with dynamic ring resizing in the future.
1369 */
1370 if (usb_endpoint_xfer_isoc(&ep->desc))
1371 virt_dev->eps[ep_index].new_ring =
1372 xhci_ring_alloc(xhci, 8, 1, type, mem_flags);
1373 else
1374 virt_dev->eps[ep_index].new_ring =
1375 xhci_ring_alloc(xhci, 1, 1, type, mem_flags);
1376 if (!virt_dev->eps[ep_index].new_ring) { 1427 if (!virt_dev->eps[ep_index].new_ring) {
1377 /* Attempt to use the ring cache */ 1428 /* Attempt to use the ring cache */
1378 if (virt_dev->num_rings_cached == 0) 1429 if (virt_dev->num_rings_cached == 0)