aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2009-12-03 12:44:29 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2009-12-11 14:55:27 -0500
commit74f9fe21e0440066eb337b9f644238cb3050b91c (patch)
treeb4ab839ba1a02cf2e6834ae3013b24e59e1e76c4 /drivers/usb
parent3342ecda3ffb059f2ffd765a71d9579f0aa036eb (diff)
USB: xhci: Make reverting an alt setting "unfailable".
When a driver wants to switch to a different alternate setting for an interface, the USB core will (soon) check whether there is enough bandwidth. Once the new alternate setting is installed in the xHCI hardware, the USB core will send a USB_REQ_SET_INTERFACE control message. That can fail in various ways, and the USB core needs to be able to reinstate the old alternate setting. With the old code, reinstating the old alt setting could fail if the there's not enough memory to allocate new endpoint rings. Keep around a cache of (at most 31) endpoint rings for this case. When we successfully switch the xHCI hardware to the new alt setting, the old alt setting's rings will be stored in the cache. Therefore we'll always have enough rings to satisfy a conversion back to a previous device setting. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/host/xhci-hcd.c32
-rw-r--r--drivers/usb/host/xhci-mem.c76
-rw-r--r--drivers/usb/host/xhci.h4
3 files changed, 94 insertions, 18 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 0d5a8564ed17..5e92c72df642 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -1262,13 +1262,35 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1262 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); 1262 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
1263 1263
1264 xhci_zero_in_ctx(xhci, virt_dev); 1264 xhci_zero_in_ctx(xhci, virt_dev);
1265 /* Free any old rings */ 1265 /* Install new rings and free or cache any old rings */
1266 for (i = 1; i < 31; ++i) { 1266 for (i = 1; i < 31; ++i) {
1267 if (virt_dev->eps[i].new_ring) { 1267 int rings_cached;
1268 xhci_ring_free(xhci, virt_dev->eps[i].ring); 1268
1269 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; 1269 if (!virt_dev->eps[i].new_ring)
1270 virt_dev->eps[i].new_ring = NULL; 1270 continue;
1271 /* Only cache or free the old ring if it exists.
1272 * It may not if this is the first add of an endpoint.
1273 */
1274 if (virt_dev->eps[i].ring) {
1275 rings_cached = virt_dev->num_rings_cached;
1276 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
1277 virt_dev->num_rings_cached++;
1278 rings_cached = virt_dev->num_rings_cached;
1279 virt_dev->ring_cache[rings_cached] =
1280 virt_dev->eps[i].ring;
1281 xhci_dbg(xhci, "Cached old ring, "
1282 "%d ring%s cached\n",
1283 rings_cached,
1284 (rings_cached > 1) ? "s" : "");
1285 } else {
1286 xhci_ring_free(xhci, virt_dev->eps[i].ring);
1287 xhci_dbg(xhci, "Ring cache full (%d rings), "
1288 "freeing ring\n",
1289 virt_dev->num_rings_cached);
1290 }
1271 } 1291 }
1292 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
1293 virt_dev->eps[i].new_ring = NULL;
1272 } 1294 }
1273 1295
1274 return ret; 1296 return ret;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 9034721106d7..bffcef7a5545 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -125,6 +125,23 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
125 kfree(ring); 125 kfree(ring);
126} 126}
127 127
128static void xhci_initialize_ring_info(struct xhci_ring *ring)
129{
130 /* The ring is empty, so the enqueue pointer == dequeue pointer */
131 ring->enqueue = ring->first_seg->trbs;
132 ring->enq_seg = ring->first_seg;
133 ring->dequeue = ring->enqueue;
134 ring->deq_seg = ring->first_seg;
135 /* The ring is initialized to 0. The producer must write 1 to the cycle
136 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
137 * compare CCS to the cycle bit to check ownership, so CCS = 1.
138 */
139 ring->cycle_state = 1;
140 /* Not necessary for new rings, but needed for re-initialized rings */
141 ring->enq_updates = 0;
142 ring->deq_updates = 0;
143}
144
128/** 145/**
129 * Create a new ring with zero or more segments. 146 * Create a new ring with zero or more segments.
130 * 147 *
@@ -173,17 +190,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
173 " segment %p (virtual), 0x%llx (DMA)\n", 190 " segment %p (virtual), 0x%llx (DMA)\n",
174 prev, (unsigned long long)prev->dma); 191 prev, (unsigned long long)prev->dma);
175 } 192 }
176 /* The ring is empty, so the enqueue pointer == dequeue pointer */ 193 xhci_initialize_ring_info(ring);
177 ring->enqueue = ring->first_seg->trbs;
178 ring->enq_seg = ring->first_seg;
179 ring->dequeue = ring->enqueue;
180 ring->deq_seg = ring->first_seg;
181 /* The ring is initialized to 0. The producer must write 1 to the cycle
182 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
183 * compare CCS to the cycle bit to check ownership, so CCS = 1.
184 */
185 ring->cycle_state = 1;
186
187 return ring; 194 return ring;
188 195
189fail: 196fail:
@@ -191,6 +198,27 @@ fail:
191 return 0; 198 return 0;
192} 199}
193 200
201/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
202 * pointers to the beginning of the ring.
203 */
204static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
205 struct xhci_ring *ring)
206{
207 struct xhci_segment *seg = ring->first_seg;
208 do {
209 memset(seg->trbs, 0,
210 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
211 /* All endpoint rings have link TRBs */
212 xhci_link_segments(xhci, seg, seg->next, 1);
213 seg = seg->next;
214 } while (seg != ring->first_seg);
215 xhci_initialize_ring_info(ring);
216 /* td list should be empty since all URBs have been cancelled,
217 * but just in case...
218 */
219 INIT_LIST_HEAD(&ring->td_list);
220}
221
194#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) 222#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
195 223
196struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, 224struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
@@ -276,6 +304,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
276 if (dev->eps[i].ring) 304 if (dev->eps[i].ring)
277 xhci_ring_free(xhci, dev->eps[i].ring); 305 xhci_ring_free(xhci, dev->eps[i].ring);
278 306
307 if (dev->ring_cache) {
308 for (i = 0; i < dev->num_rings_cached; i++)
309 xhci_ring_free(xhci, dev->ring_cache[i]);
310 kfree(dev->ring_cache);
311 }
312
279 if (dev->in_ctx) 313 if (dev->in_ctx)
280 xhci_free_container_ctx(xhci, dev->in_ctx); 314 xhci_free_container_ctx(xhci, dev->in_ctx);
281 if (dev->out_ctx) 315 if (dev->out_ctx)
@@ -329,6 +363,14 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
329 if (!dev->eps[0].ring) 363 if (!dev->eps[0].ring)
330 goto fail; 364 goto fail;
331 365
366 /* Allocate pointers to the ring cache */
367 dev->ring_cache = kzalloc(
368 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
369 flags);
370 if (!dev->ring_cache)
371 goto fail;
372 dev->num_rings_cached = 0;
373
332 init_completion(&dev->cmd_completion); 374 init_completion(&dev->cmd_completion);
333 INIT_LIST_HEAD(&dev->cmd_list); 375 INIT_LIST_HEAD(&dev->cmd_list);
334 376
@@ -555,8 +597,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
555 /* Set up the endpoint ring */ 597 /* Set up the endpoint ring */
556 virt_dev->eps[ep_index].new_ring = 598 virt_dev->eps[ep_index].new_ring =
557 xhci_ring_alloc(xhci, 1, true, mem_flags); 599 xhci_ring_alloc(xhci, 1, true, mem_flags);
558 if (!virt_dev->eps[ep_index].new_ring) 600 if (!virt_dev->eps[ep_index].new_ring) {
559 return -ENOMEM; 601 /* Attempt to use the ring cache */
602 if (virt_dev->num_rings_cached == 0)
603 return -ENOMEM;
604 virt_dev->eps[ep_index].new_ring =
605 virt_dev->ring_cache[virt_dev->num_rings_cached];
606 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
607 virt_dev->num_rings_cached--;
608 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
609 }
560 ep_ring = virt_dev->eps[ep_index].new_ring; 610 ep_ring = virt_dev->eps[ep_index].new_ring;
561 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; 611 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
562 612
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index bb8e6656cca4..877813505ef2 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -677,6 +677,10 @@ struct xhci_virt_device {
677 struct xhci_container_ctx *out_ctx; 677 struct xhci_container_ctx *out_ctx;
678 /* Used for addressing devices and configuration changes */ 678 /* Used for addressing devices and configuration changes */
679 struct xhci_container_ctx *in_ctx; 679 struct xhci_container_ctx *in_ctx;
680 /* Rings saved to ensure old alt settings can be re-instated */
681 struct xhci_ring **ring_cache;
682 int num_rings_cached;
683#define XHCI_MAX_RINGS_CACHED 31
680 struct xhci_virt_ep eps[31]; 684 struct xhci_virt_ep eps[31];
681 struct completion cmd_completion; 685 struct completion cmd_completion;
682 /* Status of the last command issued for this device */ 686 /* Status of the last command issued for this device */