aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-mem.c')
-rw-r--r--drivers/usb/host/xhci-mem.c76
1 files changed, 63 insertions, 13 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 9034721106d7..bffcef7a5545 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -125,6 +125,23 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
125 kfree(ring); 125 kfree(ring);
126} 126}
127 127
128static void xhci_initialize_ring_info(struct xhci_ring *ring)
129{
130 /* The ring is empty, so the enqueue pointer == dequeue pointer */
131 ring->enqueue = ring->first_seg->trbs;
132 ring->enq_seg = ring->first_seg;
133 ring->dequeue = ring->enqueue;
134 ring->deq_seg = ring->first_seg;
135 /* The ring is initialized to 0. The producer must write 1 to the cycle
136 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
137 * compare CCS to the cycle bit to check ownership, so CCS = 1.
138 */
139 ring->cycle_state = 1;
140 /* Not necessary for new rings, but needed for re-initialized rings */
141 ring->enq_updates = 0;
142 ring->deq_updates = 0;
143}
144
128/** 145/**
129 * Create a new ring with zero or more segments. 146 * Create a new ring with zero or more segments.
130 * 147 *
@@ -173,17 +190,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
173 " segment %p (virtual), 0x%llx (DMA)\n", 190 " segment %p (virtual), 0x%llx (DMA)\n",
174 prev, (unsigned long long)prev->dma); 191 prev, (unsigned long long)prev->dma);
175 } 192 }
176 /* The ring is empty, so the enqueue pointer == dequeue pointer */ 193 xhci_initialize_ring_info(ring);
177 ring->enqueue = ring->first_seg->trbs;
178 ring->enq_seg = ring->first_seg;
179 ring->dequeue = ring->enqueue;
180 ring->deq_seg = ring->first_seg;
181 /* The ring is initialized to 0. The producer must write 1 to the cycle
182 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
183 * compare CCS to the cycle bit to check ownership, so CCS = 1.
184 */
185 ring->cycle_state = 1;
186
187 return ring; 194 return ring;
188 195
189fail: 196fail:
@@ -191,6 +198,27 @@ fail:
191 return 0; 198 return 0;
192} 199}
193 200
201/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
202 * pointers to the beginning of the ring.
203 */
204static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
205 struct xhci_ring *ring)
206{
207 struct xhci_segment *seg = ring->first_seg;
208 do {
209 memset(seg->trbs, 0,
210 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
211 /* All endpoint rings have link TRBs */
212 xhci_link_segments(xhci, seg, seg->next, 1);
213 seg = seg->next;
214 } while (seg != ring->first_seg);
215 xhci_initialize_ring_info(ring);
216 /* td list should be empty since all URBs have been cancelled,
217 * but just in case...
218 */
219 INIT_LIST_HEAD(&ring->td_list);
220}
221
194#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) 222#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
195 223
196struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, 224struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
@@ -276,6 +304,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
276 if (dev->eps[i].ring) 304 if (dev->eps[i].ring)
277 xhci_ring_free(xhci, dev->eps[i].ring); 305 xhci_ring_free(xhci, dev->eps[i].ring);
278 306
307 if (dev->ring_cache) {
308 for (i = 0; i < dev->num_rings_cached; i++)
309 xhci_ring_free(xhci, dev->ring_cache[i]);
310 kfree(dev->ring_cache);
311 }
312
279 if (dev->in_ctx) 313 if (dev->in_ctx)
280 xhci_free_container_ctx(xhci, dev->in_ctx); 314 xhci_free_container_ctx(xhci, dev->in_ctx);
281 if (dev->out_ctx) 315 if (dev->out_ctx)
@@ -329,6 +363,14 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
329 if (!dev->eps[0].ring) 363 if (!dev->eps[0].ring)
330 goto fail; 364 goto fail;
331 365
366 /* Allocate pointers to the ring cache */
367 dev->ring_cache = kzalloc(
368 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
369 flags);
370 if (!dev->ring_cache)
371 goto fail;
372 dev->num_rings_cached = 0;
373
332 init_completion(&dev->cmd_completion); 374 init_completion(&dev->cmd_completion);
333 INIT_LIST_HEAD(&dev->cmd_list); 375 INIT_LIST_HEAD(&dev->cmd_list);
334 376
@@ -555,8 +597,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
555 /* Set up the endpoint ring */ 597 /* Set up the endpoint ring */
556 virt_dev->eps[ep_index].new_ring = 598 virt_dev->eps[ep_index].new_ring =
557 xhci_ring_alloc(xhci, 1, true, mem_flags); 599 xhci_ring_alloc(xhci, 1, true, mem_flags);
558 if (!virt_dev->eps[ep_index].new_ring) 600 if (!virt_dev->eps[ep_index].new_ring) {
559 return -ENOMEM; 601 /* Attempt to use the ring cache */
602 if (virt_dev->num_rings_cached == 0)
603 return -ENOMEM;
604 virt_dev->eps[ep_index].new_ring =
605 virt_dev->ring_cache[virt_dev->num_rings_cached];
606 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
607 virt_dev->num_rings_cached--;
608 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
609 }
560 ep_ring = virt_dev->eps[ep_index].new_ring; 610 ep_ring = virt_dev->eps[ep_index].new_ring;
561 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; 611 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
562 612