aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-mem.c
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2009-04-27 22:52:34 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-06-16 00:44:48 -0400
commit0ebbab37422315a5d0cb29792271085bafdf38c0 (patch)
treeb638a71f004c27b49ea09f64ed80596078cc6167 /drivers/usb/host/xhci-mem.c
parent66d4eadd8d067269ea8fead1a50fe87c2979a80d (diff)
USB: xhci: Ring allocation and initialization.
Allocate basic xHCI host controller data structures. For every xHC, there is a command ring, an event ring, and a doorbell array. The doorbell array is used to notify the host controller that work has been enqueued onto one of the rings. The host controller driver enqueues commands on the command ring. The HW enqueues command completion events on the event ring and interrupts the system (currently using PCI interrupts, although the xHCI HW will use MSI interrupts eventually). All rings and the doorbell array must be allocated by the xHCI host controller driver. Each ring is comprised of one or more segments, which consists of 16-byte Transfer Request Blocks (TRBs) that can be chained to form a Transfer Descriptor (TD) that represents a multiple-buffer request. Segments are linked into a ring using Link TRBs, which means they are dynamically growable. The producer of the ring enqueues a TD by writing one or more TRBs in the ring and toggling the TRB cycle bit for each TRB. The consumer knows it can process the TRB when the cycle bit matches its internal consumer cycle state for the ring. The consumer cycle state is toggled an odd amount of times in the ring. An example ring (a ring must have a minimum of 16 TRBs on it, but that's too big to draw in ASCII art): chain cycle bit bit ------------------------ | TD A TRB 1 | 1 | 1 |<------------- <-- consumer dequeue ptr ------------------------ | consumer cycle state = 1 | TD A TRB 2 | 1 | 1 | | ------------------------ | | TD A TRB 3 | 0 | 1 | segment 1 | ------------------------ | | TD B TRB 1 | 1 | 1 | | ------------------------ | | TD B TRB 2 | 0 | 1 | | ------------------------ | | Link TRB | 0 | 1 |----- | ------------------------ | | | | chain cycle | | bit bit | | ------------------------ | | | TD C TRB 1 | 0 | 1 |<---- | ------------------------ | | TD D TRB 1 | 1 | 1 | | ------------------------ | | TD D TRB 2 | 1 | 1 | segment 2 | ------------------------ | | TD D TRB 3 | 1 | 1 | | ------------------------ | | TD D TRB 4 | 1 | 1 | | ------------------------ | | Link TRB | 1 | 1 |----- | ------------------------ | | | | chain cycle | | bit bit | | ------------------------ | | | TD D TRB 5 | 1 | 1 |<---- | ------------------------ | | TD D TRB 6 | 0 | 1 | | ------------------------ | | TD E TRB 1 | 0 | 1 | segment 3 | ------------------------ | | | 0 | 0 | | <-- producer enqueue ptr ------------------------ | | | 0 | 0 | | ------------------------ | | Link TRB | 0 | 0 |--------------- ------------------------ Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci-mem.c')
-rw-r--r--drivers/usb/host/xhci-mem.c305
1 files changed, 304 insertions, 1 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 0e383f9c380c..7cf15ca854be 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -21,18 +21,215 @@
21 */ 21 */
22 22
23#include <linux/usb.h> 23#include <linux/usb.h>
24#include <linux/pci.h>
24 25
25#include "xhci.h" 26#include "xhci.h"
26 27
28/*
29 * Allocates a generic ring segment from the ring pool, sets the dma address,
30 * initializes the segment to zero, and sets the private next pointer to NULL.
31 *
32 * Section 4.11.1.1:
33 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
34 */
35static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
36{
37 struct xhci_segment *seg;
38 dma_addr_t dma;
39
40 seg = kzalloc(sizeof *seg, flags);
41 if (!seg)
42 return 0;
43 xhci_dbg(xhci, "Allocating priv segment structure at 0x%x\n",
44 (unsigned int) seg);
45
46 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
47 if (!seg->trbs) {
48 kfree(seg);
49 return 0;
50 }
51 xhci_dbg(xhci, "// Allocating segment at 0x%x (virtual) 0x%x (DMA)\n",
52 (unsigned int) seg->trbs, (u32) dma);
53
54 memset(seg->trbs, 0, SEGMENT_SIZE);
55 seg->dma = dma;
56 seg->next = NULL;
57
58 return seg;
59}
60
61static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
62{
63 if (!seg)
64 return;
65 if (seg->trbs) {
66 xhci_dbg(xhci, "Freeing DMA segment at 0x%x"
67 " (virtual) 0x%x (DMA)\n",
68 (unsigned int) seg->trbs, (u32) seg->dma);
69 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
70 seg->trbs = NULL;
71 }
72 xhci_dbg(xhci, "Freeing priv segment structure at 0x%x\n",
73 (unsigned int) seg);
74 kfree(seg);
75}
76
77/*
78 * Make the prev segment point to the next segment.
79 *
80 * Change the last TRB in the prev segment to be a Link TRB which points to the
81 * DMA address of the next segment. The caller needs to set any Link TRB
82 * related flags, such as End TRB, Toggle Cycle, and no snoop.
83 */
84static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
85 struct xhci_segment *next, bool link_trbs)
86{
87 u32 val;
88
89 if (!prev || !next)
90 return;
91 prev->next = next;
92 if (link_trbs) {
93 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma;
94
95 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
96 val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
97 val &= ~TRB_TYPE_BITMASK;
98 val |= TRB_TYPE(TRB_LINK);
99 prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
100 }
101 xhci_dbg(xhci, "Linking segment 0x%x to segment 0x%x (DMA)\n",
102 prev->dma, next->dma);
103}
104
105/* XXX: Do we need the hcd structure in all these functions? */
106static void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
107{
108 struct xhci_segment *seg;
109 struct xhci_segment *first_seg;
110
111 if (!ring || !ring->first_seg)
112 return;
113 first_seg = ring->first_seg;
114 seg = first_seg->next;
115 xhci_dbg(xhci, "Freeing ring at 0x%x\n", (unsigned int) ring);
116 while (seg != first_seg) {
117 struct xhci_segment *next = seg->next;
118 xhci_segment_free(xhci, seg);
119 seg = next;
120 }
121 xhci_segment_free(xhci, first_seg);
122 ring->first_seg = NULL;
123 kfree(ring);
124}
125
126/**
127 * Create a new ring with zero or more segments.
128 *
129 * Link each segment together into a ring.
130 * Set the end flag and the cycle toggle bit on the last segment.
131 * See section 4.9.1 and figures 15 and 16.
132 */
133static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
134 unsigned int num_segs, bool link_trbs, gfp_t flags)
135{
136 struct xhci_ring *ring;
137 struct xhci_segment *prev;
138
139 ring = kzalloc(sizeof *(ring), flags);
140 xhci_dbg(xhci, "Allocating ring at 0x%x\n", (unsigned int) ring);
141 if (!ring)
142 return 0;
143
144 if (num_segs == 0)
145 return ring;
146
147 ring->first_seg = xhci_segment_alloc(xhci, flags);
148 if (!ring->first_seg)
149 goto fail;
150 num_segs--;
151
152 prev = ring->first_seg;
153 while (num_segs > 0) {
154 struct xhci_segment *next;
155
156 next = xhci_segment_alloc(xhci, flags);
157 if (!next)
158 goto fail;
159 xhci_link_segments(xhci, prev, next, link_trbs);
160
161 prev = next;
162 num_segs--;
163 }
164 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
165
166 if (link_trbs) {
167 /* See section 4.9.2.1 and 6.4.4.1 */
168 prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
169 xhci_dbg(xhci, "Wrote link toggle flag to"
170 " segment 0x%x (virtual), 0x%x (DMA)\n",
171 (unsigned int) prev, (u32) prev->dma);
172 }
173 /* The ring is empty, so the enqueue pointer == dequeue pointer */
174 ring->enqueue = ring->first_seg->trbs;
175 ring->dequeue = ring->enqueue;
176 /* The ring is initialized to 0. The producer must write 1 to the cycle
177 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
178 * compare CCS to the cycle bit to check ownership, so CCS = 1.
179 */
180 ring->cycle_state = 1;
181
182 return ring;
183
184fail:
185 xhci_ring_free(xhci, ring);
186 return 0;
187}
188
27void xhci_mem_cleanup(struct xhci_hcd *xhci) 189void xhci_mem_cleanup(struct xhci_hcd *xhci)
28{ 190{
191 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
192 int size;
193
194 /* XXX: Free all the segments in the various rings */
195
196 /* Free the Event Ring Segment Table and the actual Event Ring */
197 xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
198 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
199 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]);
200 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
201 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
202 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
203 if (xhci->erst.entries)
204 pci_free_consistent(pdev, size,
205 xhci->erst.entries, xhci->erst.erst_dma_addr);
206 xhci->erst.entries = NULL;
207 xhci_dbg(xhci, "Freed ERST\n");
208 if (xhci->event_ring)
209 xhci_ring_free(xhci, xhci->event_ring);
210 xhci->event_ring = NULL;
211 xhci_dbg(xhci, "Freed event ring\n");
212
213 xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
214 xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]);
215 if (xhci->cmd_ring)
216 xhci_ring_free(xhci, xhci->cmd_ring);
217 xhci->cmd_ring = NULL;
218 xhci_dbg(xhci, "Freed command ring\n");
219 if (xhci->segment_pool)
220 dma_pool_destroy(xhci->segment_pool);
221 xhci->segment_pool = NULL;
222 xhci_dbg(xhci, "Freed segment pool\n");
29 xhci->page_size = 0; 223 xhci->page_size = 0;
30 xhci->page_shift = 0; 224 xhci->page_shift = 0;
31} 225}
32 226
33int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) 227int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
34{ 228{
229 dma_addr_t dma;
230 struct device *dev = xhci_to_hcd(xhci)->self.controller;
35 unsigned int val, val2; 231 unsigned int val, val2;
232 struct xhci_segment *seg;
36 u32 page_size; 233 u32 page_size;
37 int i; 234 int i;
38 235
@@ -65,7 +262,113 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
65 (unsigned int) val); 262 (unsigned int) val);
66 xhci_writel(xhci, val, &xhci->op_regs->config_reg); 263 xhci_writel(xhci, val, &xhci->op_regs->config_reg);
67 264
68 xhci->ir_set = &xhci->run_regs->ir_set[0]; 265 /*
266 * Initialize the ring segment pool. The ring must be a contiguous
267 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
268 * however, the command ring segment needs 64-byte aligned segments,
269 * so we pick the greater alignment need.
270 */
271 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
272 SEGMENT_SIZE, 64, xhci->page_size);
273 if (!xhci->segment_pool)
274 goto fail;
275
276 /* Set up the command ring to have one segments for now. */
277 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
278 if (!xhci->cmd_ring)
279 goto fail;
280 xhci_dbg(xhci, "Allocated command ring at 0x%x\n", (unsigned int) xhci->cmd_ring);
281 xhci_dbg(xhci, "First segment DMA is 0x%x\n", (unsigned int) xhci->cmd_ring->first_seg->dma);
282
283 /* Set the address in the Command Ring Control register */
284 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
285 val = (val & ~CMD_RING_ADDR_MASK) |
286 (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) |
287 xhci->cmd_ring->cycle_state;
288 xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
289 xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
290 xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val);
291 xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]);
292 xhci_dbg_cmd_ptrs(xhci);
293
294 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
295 val &= DBOFF_MASK;
296 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
297 " from cap regs base addr\n", val);
298 xhci->dba = (void *) xhci->cap_regs + val;
299 xhci_dbg_regs(xhci);
300 xhci_print_run_regs(xhci);
301 /* Set ir_set to interrupt register set 0 */
302 xhci->ir_set = (void *) xhci->run_regs->ir_set;
303
304 /*
305 * Event ring setup: Allocate a normal ring, but also setup
306 * the event ring segment table (ERST). Section 4.9.3.
307 */
308 xhci_dbg(xhci, "// Allocating event ring\n");
309 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
310 if (!xhci->event_ring)
311 goto fail;
312
313 xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
314 sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
315 if (!xhci->erst.entries)
316 goto fail;
317 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%x\n", dma);
318
319 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
320 xhci->erst.num_entries = ERST_NUM_SEGS;
321 xhci->erst.erst_dma_addr = dma;
322 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = 0x%x, dma addr = 0x%x\n",
323 xhci->erst.num_entries,
324 (unsigned int) xhci->erst.entries,
325 xhci->erst.erst_dma_addr);
326
327 /* set ring base address and size for each segment table entry */
328 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
329 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
330 entry->seg_addr[1] = 0;
331 entry->seg_addr[0] = seg->dma;
332 entry->seg_size = TRBS_PER_SEGMENT;
333 entry->rsvd = 0;
334 seg = seg->next;
335 }
336
337 /* set ERST count with the number of entries in the segment table */
338 val = xhci_readl(xhci, &xhci->ir_set->erst_size);
339 val &= ERST_SIZE_MASK;
340 val |= ERST_NUM_SEGS;
341 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
342 val);
343 xhci_writel(xhci, val, &xhci->ir_set->erst_size);
344
345 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
346 /* set the segment table base address */
347 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%x\n",
348 xhci->erst.erst_dma_addr);
349 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
350 val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]);
351 val &= ERST_PTR_MASK;
352 val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK);
353 xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]);
354
355 /* Set the event ring dequeue address */
356 xhci_dbg(xhci, "// Set ERST dequeue address for ir_set 0 = 0x%x%x\n",
357 xhci->erst.entries[0].seg_addr[1], xhci->erst.entries[0].seg_addr[0]);
358 val = xhci_readl(xhci, &xhci->run_regs->ir_set[0].erst_dequeue[0]);
359 val &= ERST_PTR_MASK;
360 val |= (xhci->erst.entries[0].seg_addr[0] & ~ERST_PTR_MASK);
361 xhci_writel(xhci, val, &xhci->run_regs->ir_set[0].erst_dequeue[0]);
362 xhci_writel(xhci, xhci->erst.entries[0].seg_addr[1],
363 &xhci->run_regs->ir_set[0].erst_dequeue[1]);
364 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
365 xhci_print_ir_set(xhci, xhci->ir_set, 0);
366
367 /*
368 * XXX: Might need to set the Interrupter Moderation Register to
369 * something other than the default (~1ms minimum between interrupts).
370 * See section 5.5.1.2.
371 */
69 372
70 return 0; 373 return 0;
71fail: 374fail: