aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-mem.c
diff options
context:
space:
mode:
authorAndiry Xu <andiry.xu@amd.com>2012-03-05 04:49:36 -0500
committerSarah Sharp <sarah.a.sharp@linux.intel.com>2012-03-13 12:30:15 -0400
commit186a7ef13a8fa3bc7cca1ccd33bd469b931e46de (patch)
tree9416923f77f39ae4dd9f7af9a17335d106965e92 /drivers/usb/host/xhci-mem.c
parent70d43601773b9f270b62867a51495846d746b5d4 (diff)
xHCI: set cycle state when allocate rings
In the past all the rings were allocated with cycle state equal to 1. Now the driver may expand an existing ring, and the new segments shall be allocated with the same cycle state as the old one. This affects ring allocation and cached ring re-initialization. Signed-off-by: Andiry Xu <andiry.xu@amd.com> Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Tested-by: Paul Zimmerman <Paul.Zimmerman@synopsys.com>
Diffstat (limited to 'drivers/usb/host/xhci-mem.c')
-rw-r--r--drivers/usb/host/xhci-mem.c56
1 files changed, 38 insertions, 18 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 47b762994ae9..c1800c7582b7 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -34,10 +34,12 @@
34 * Section 4.11.1.1: 34 * Section 4.11.1.1:
35 * "All components of all Command and Transfer TRBs shall be initialized to '0'" 35 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
36 */ 36 */
37static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags) 37static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
38 unsigned int cycle_state, gfp_t flags)
38{ 39{
39 struct xhci_segment *seg; 40 struct xhci_segment *seg;
40 dma_addr_t dma; 41 dma_addr_t dma;
42 int i;
41 43
42 seg = kzalloc(sizeof *seg, flags); 44 seg = kzalloc(sizeof *seg, flags);
43 if (!seg) 45 if (!seg)
@@ -50,6 +52,11 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag
50 } 52 }
51 53
52 memset(seg->trbs, 0, SEGMENT_SIZE); 54 memset(seg->trbs, 0, SEGMENT_SIZE);
55 /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
56 if (cycle_state == 0) {
57 for (i = 0; i < TRBS_PER_SEGMENT; i++)
58 seg->trbs[i].link.control |= TRB_CYCLE;
59 }
53 seg->dma = dma; 60 seg->dma = dma;
54 seg->next = NULL; 61 seg->next = NULL;
55 62
@@ -124,7 +131,8 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
124 kfree(ring); 131 kfree(ring);
125} 132}
126 133
127static void xhci_initialize_ring_info(struct xhci_ring *ring) 134static void xhci_initialize_ring_info(struct xhci_ring *ring,
135 unsigned int cycle_state)
128{ 136{
129 /* The ring is empty, so the enqueue pointer == dequeue pointer */ 137 /* The ring is empty, so the enqueue pointer == dequeue pointer */
130 ring->enqueue = ring->first_seg->trbs; 138 ring->enqueue = ring->first_seg->trbs;
@@ -134,8 +142,11 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
134 /* The ring is initialized to 0. The producer must write 1 to the cycle 142 /* The ring is initialized to 0. The producer must write 1 to the cycle
135 * bit to handover ownership of the TRB, so PCS = 1. The consumer must 143 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
136 * compare CCS to the cycle bit to check ownership, so CCS = 1. 144 * compare CCS to the cycle bit to check ownership, so CCS = 1.
145 *
146 * New rings are initialized with cycle state equal to 1; if we are
147 * handling ring expansion, set the cycle state equal to the old ring.
137 */ 148 */
138 ring->cycle_state = 1; 149 ring->cycle_state = cycle_state;
139 /* Not necessary for new rings, but needed for re-initialized rings */ 150 /* Not necessary for new rings, but needed for re-initialized rings */
140 ring->enq_updates = 0; 151 ring->enq_updates = 0;
141 ring->deq_updates = 0; 152 ring->deq_updates = 0;
@@ -150,11 +161,12 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
150/* Allocate segments and link them for a ring */ 161/* Allocate segments and link them for a ring */
151static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, 162static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
152 struct xhci_segment **first, struct xhci_segment **last, 163 struct xhci_segment **first, struct xhci_segment **last,
153 unsigned int num_segs, enum xhci_ring_type type, gfp_t flags) 164 unsigned int num_segs, unsigned int cycle_state,
165 enum xhci_ring_type type, gfp_t flags)
154{ 166{
155 struct xhci_segment *prev; 167 struct xhci_segment *prev;
156 168
157 prev = xhci_segment_alloc(xhci, flags); 169 prev = xhci_segment_alloc(xhci, cycle_state, flags);
158 if (!prev) 170 if (!prev)
159 return -ENOMEM; 171 return -ENOMEM;
160 num_segs--; 172 num_segs--;
@@ -163,7 +175,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
163 while (num_segs > 0) { 175 while (num_segs > 0) {
164 struct xhci_segment *next; 176 struct xhci_segment *next;
165 177
166 next = xhci_segment_alloc(xhci, flags); 178 next = xhci_segment_alloc(xhci, cycle_state, flags);
167 if (!next) { 179 if (!next) {
168 xhci_free_segments_for_ring(xhci, *first); 180 xhci_free_segments_for_ring(xhci, *first);
169 return -ENOMEM; 181 return -ENOMEM;
@@ -187,7 +199,8 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
187 * See section 4.9.1 and figures 15 and 16. 199 * See section 4.9.1 and figures 15 and 16.
188 */ 200 */
189static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, 201static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
190 unsigned int num_segs, enum xhci_ring_type type, gfp_t flags) 202 unsigned int num_segs, unsigned int cycle_state,
203 enum xhci_ring_type type, gfp_t flags)
191{ 204{
192 struct xhci_ring *ring; 205 struct xhci_ring *ring;
193 int ret; 206 int ret;
@@ -203,7 +216,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
203 return ring; 216 return ring;
204 217
205 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, 218 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
206 &ring->last_seg, num_segs, type, flags); 219 &ring->last_seg, num_segs, cycle_state, type, flags);
207 if (ret) 220 if (ret)
208 goto fail; 221 goto fail;
209 222
@@ -213,7 +226,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
213 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= 226 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
214 cpu_to_le32(LINK_TOGGLE); 227 cpu_to_le32(LINK_TOGGLE);
215 } 228 }
216 xhci_initialize_ring_info(ring); 229 xhci_initialize_ring_info(ring, cycle_state);
217 return ring; 230 return ring;
218 231
219fail: 232fail:
@@ -249,18 +262,25 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
249 * pointers to the beginning of the ring. 262 * pointers to the beginning of the ring.
250 */ 263 */
251static void xhci_reinit_cached_ring(struct xhci_hcd *xhci, 264static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
252 struct xhci_ring *ring, enum xhci_ring_type type) 265 struct xhci_ring *ring, unsigned int cycle_state,
266 enum xhci_ring_type type)
253{ 267{
254 struct xhci_segment *seg = ring->first_seg; 268 struct xhci_segment *seg = ring->first_seg;
269 int i;
270
255 do { 271 do {
256 memset(seg->trbs, 0, 272 memset(seg->trbs, 0,
257 sizeof(union xhci_trb)*TRBS_PER_SEGMENT); 273 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
274 if (cycle_state == 0) {
275 for (i = 0; i < TRBS_PER_SEGMENT; i++)
276 seg->trbs[i].link.control |= TRB_CYCLE;
277 }
258 /* All endpoint rings have link TRBs */ 278 /* All endpoint rings have link TRBs */
259 xhci_link_segments(xhci, seg, seg->next, type); 279 xhci_link_segments(xhci, seg, seg->next, type);
260 seg = seg->next; 280 seg = seg->next;
261 } while (seg != ring->first_seg); 281 } while (seg != ring->first_seg);
262 ring->type = type; 282 ring->type = type;
263 xhci_initialize_ring_info(ring); 283 xhci_initialize_ring_info(ring, cycle_state);
264 /* td list should be empty since all URBs have been cancelled, 284 /* td list should be empty since all URBs have been cancelled,
265 * but just in case... 285 * but just in case...
266 */ 286 */
@@ -561,7 +581,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
561 */ 581 */
562 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 582 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
563 stream_info->stream_rings[cur_stream] = 583 stream_info->stream_rings[cur_stream] =
564 xhci_ring_alloc(xhci, 1, TYPE_STREAM, mem_flags); 584 xhci_ring_alloc(xhci, 1, 1, TYPE_STREAM, mem_flags);
565 cur_ring = stream_info->stream_rings[cur_stream]; 585 cur_ring = stream_info->stream_rings[cur_stream];
566 if (!cur_ring) 586 if (!cur_ring)
567 goto cleanup_rings; 587 goto cleanup_rings;
@@ -895,7 +915,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
895 } 915 }
896 916
897 /* Allocate endpoint 0 ring */ 917 /* Allocate endpoint 0 ring */
898 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, TYPE_CTRL, flags); 918 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, 1, TYPE_CTRL, flags);
899 if (!dev->eps[0].ring) 919 if (!dev->eps[0].ring)
900 goto fail; 920 goto fail;
901 921
@@ -1349,10 +1369,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1349 */ 1369 */
1350 if (usb_endpoint_xfer_isoc(&ep->desc)) 1370 if (usb_endpoint_xfer_isoc(&ep->desc))
1351 virt_dev->eps[ep_index].new_ring = 1371 virt_dev->eps[ep_index].new_ring =
1352 xhci_ring_alloc(xhci, 8, type, mem_flags); 1372 xhci_ring_alloc(xhci, 8, 1, type, mem_flags);
1353 else 1373 else
1354 virt_dev->eps[ep_index].new_ring = 1374 virt_dev->eps[ep_index].new_ring =
1355 xhci_ring_alloc(xhci, 1, type, mem_flags); 1375 xhci_ring_alloc(xhci, 1, 1, type, mem_flags);
1356 if (!virt_dev->eps[ep_index].new_ring) { 1376 if (!virt_dev->eps[ep_index].new_ring) {
1357 /* Attempt to use the ring cache */ 1377 /* Attempt to use the ring cache */
1358 if (virt_dev->num_rings_cached == 0) 1378 if (virt_dev->num_rings_cached == 0)
@@ -1362,7 +1382,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1362 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; 1382 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1363 virt_dev->num_rings_cached--; 1383 virt_dev->num_rings_cached--;
1364 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, 1384 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1365 type); 1385 1, type);
1366 } 1386 }
1367 virt_dev->eps[ep_index].skip = false; 1387 virt_dev->eps[ep_index].skip = false;
1368 ep_ring = virt_dev->eps[ep_index].new_ring; 1388 ep_ring = virt_dev->eps[ep_index].new_ring;
@@ -2270,7 +2290,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2270 goto fail; 2290 goto fail;
2271 2291
2272 /* Set up the command ring to have one segments for now. */ 2292 /* Set up the command ring to have one segments for now. */
2273 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, flags); 2293 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
2274 if (!xhci->cmd_ring) 2294 if (!xhci->cmd_ring)
2275 goto fail; 2295 goto fail;
2276 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); 2296 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
@@ -2301,7 +2321,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2301 * the event ring segment table (ERST). Section 4.9.3. 2321 * the event ring segment table (ERST). Section 4.9.3.
2302 */ 2322 */
2303 xhci_dbg(xhci, "// Allocating event ring\n"); 2323 xhci_dbg(xhci, "// Allocating event ring\n");
2304 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, TYPE_EVENT, 2324 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2305 flags); 2325 flags);
2306 if (!xhci->event_ring) 2326 if (!xhci->event_ring)
2307 goto fail; 2327 goto fail;