diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/usb/host/xhci-dbg.c | 81 | ||||
-rw-r--r-- | drivers/usb/host/xhci-hcd.c | 5 | ||||
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 305 | ||||
-rw-r--r-- | drivers/usb/host/xhci.h | 296 |
4 files changed, 686 insertions, 1 deletions
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index a7798b460492..5724683cef16 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c | |||
@@ -56,6 +56,8 @@ void xhci_dbg_regs(struct xhci_hcd *xhci) | |||
56 | temp = xhci_readl(xhci, &xhci->cap_regs->db_off); | 56 | temp = xhci_readl(xhci, &xhci->cap_regs->db_off); |
57 | xhci_dbg(xhci, "// @%x = 0x%x DBOFF\n", | 57 | xhci_dbg(xhci, "// @%x = 0x%x DBOFF\n", |
58 | (unsigned int) &xhci->cap_regs->db_off, temp); | 58 | (unsigned int) &xhci->cap_regs->db_off, temp); |
59 | xhci_dbg(xhci, "// Doorbell array at 0x%x:\n", | ||
60 | (unsigned int) xhci->dba); | ||
59 | } | 61 | } |
60 | 62 | ||
61 | void xhci_print_cap_regs(struct xhci_hcd *xhci) | 63 | void xhci_print_cap_regs(struct xhci_hcd *xhci) |
@@ -227,3 +229,82 @@ void xhci_print_registers(struct xhci_hcd *xhci) | |||
227 | xhci_print_cap_regs(xhci); | 229 | xhci_print_cap_regs(xhci); |
228 | xhci_print_op_regs(xhci); | 230 | xhci_print_op_regs(xhci); |
229 | } | 231 | } |
232 | |||
233 | |||
234 | /** | ||
235 | * Debug a segment with an xHCI ring. | ||
236 | * | ||
237 | * @return The Link TRB of the segment, or NULL if there is no Link TRB | ||
238 | * (which is a bug, since all segments must have a Link TRB). | ||
239 | * | ||
240 | * Prints out all TRBs in the segment, even those after the Link TRB. | ||
241 | * | ||
242 | * XXX: should we print out TRBs that the HC owns? As long as we don't | ||
243 | * write, that should be fine... We shouldn't expect that the memory pointed to | ||
244 | * by the TRB is valid at all. Do we care about ones the HC owns? Probably, | ||
245 | * for HC debugging. | ||
246 | */ | ||
247 | void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg) | ||
248 | { | ||
249 | int i; | ||
250 | u32 addr = (u32) seg->dma; | ||
251 | union xhci_trb *trb = seg->trbs; | ||
252 | |||
253 | for (i = 0; i < TRBS_PER_SEGMENT; ++i) { | ||
254 | trb = &seg->trbs[i]; | ||
255 | xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr, | ||
256 | (unsigned int) trb->link.segment_ptr[0], | ||
257 | (unsigned int) trb->link.segment_ptr[1], | ||
258 | (unsigned int) trb->link.intr_target, | ||
259 | (unsigned int) trb->link.control); | ||
260 | addr += sizeof(*trb); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | /** | ||
265 | * Debugging for an xHCI ring, which is a queue broken into multiple segments. | ||
266 | * | ||
267 | * Print out each segment in the ring. Check that the DMA address in | ||
268 | * each link segment actually matches the segment's stored DMA address. | ||
269 | * Check that the link end bit is only set at the end of the ring. | ||
270 | * Check that the dequeue and enqueue pointers point to real data in this ring | ||
271 | * (not some other ring). | ||
272 | */ | ||
273 | void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring) | ||
274 | { | ||
275 | /* FIXME: Throw an error if any segment doesn't have a Link TRB */ | ||
276 | struct xhci_segment *seg; | ||
277 | struct xhci_segment *first_seg = ring->first_seg; | ||
278 | xhci_debug_segment(xhci, first_seg); | ||
279 | |||
280 | for (seg = first_seg->next; seg != first_seg; seg = seg->next) | ||
281 | xhci_debug_segment(xhci, seg); | ||
282 | } | ||
283 | |||
284 | void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) | ||
285 | { | ||
286 | u32 addr = (u32) erst->erst_dma_addr; | ||
287 | int i; | ||
288 | struct xhci_erst_entry *entry; | ||
289 | |||
290 | for (i = 0; i < erst->num_entries; ++i) { | ||
291 | entry = &erst->entries[i]; | ||
292 | xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", | ||
293 | (unsigned int) addr, | ||
294 | (unsigned int) entry->seg_addr[0], | ||
295 | (unsigned int) entry->seg_addr[1], | ||
296 | (unsigned int) entry->seg_size, | ||
297 | (unsigned int) entry->rsvd); | ||
298 | addr += sizeof(*entry); | ||
299 | } | ||
300 | } | ||
301 | |||
302 | void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) | ||
303 | { | ||
304 | u32 val; | ||
305 | |||
306 | val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); | ||
307 | xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val); | ||
308 | val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]); | ||
309 | xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val); | ||
310 | } | ||
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index 64fcc22e9d59..011f47810665 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c | |||
@@ -266,6 +266,11 @@ int xhci_run(struct usb_hcd *hcd) | |||
266 | &xhci->ir_set->irq_pending); | 266 | &xhci->ir_set->irq_pending); |
267 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | 267 | xhci_print_ir_set(xhci, xhci->ir_set, 0); |
268 | 268 | ||
269 | xhci_dbg(xhci, "Command ring memory map follows:\n"); | ||
270 | xhci_debug_ring(xhci, xhci->cmd_ring); | ||
271 | xhci_dbg(xhci, "ERST memory map follows:\n"); | ||
272 | xhci_dbg_erst(xhci, &xhci->erst); | ||
273 | |||
269 | temp = xhci_readl(xhci, &xhci->op_regs->command); | 274 | temp = xhci_readl(xhci, &xhci->op_regs->command); |
270 | temp |= (CMD_RUN); | 275 | temp |= (CMD_RUN); |
271 | xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", | 276 | xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 0e383f9c380c..7cf15ca854be 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -21,18 +21,215 @@ | |||
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/usb.h> | 23 | #include <linux/usb.h> |
24 | #include <linux/pci.h> | ||
24 | 25 | ||
25 | #include "xhci.h" | 26 | #include "xhci.h" |
26 | 27 | ||
28 | /* | ||
29 | * Allocates a generic ring segment from the ring pool, sets the dma address, | ||
30 | * initializes the segment to zero, and sets the private next pointer to NULL. | ||
31 | * | ||
32 | * Section 4.11.1.1: | ||
33 | * "All components of all Command and Transfer TRBs shall be initialized to '0'" | ||
34 | */ | ||
35 | static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags) | ||
36 | { | ||
37 | struct xhci_segment *seg; | ||
38 | dma_addr_t dma; | ||
39 | |||
40 | seg = kzalloc(sizeof *seg, flags); | ||
41 | if (!seg) | ||
42 | return 0; | ||
43 | xhci_dbg(xhci, "Allocating priv segment structure at 0x%x\n", | ||
44 | (unsigned int) seg); | ||
45 | |||
46 | seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma); | ||
47 | if (!seg->trbs) { | ||
48 | kfree(seg); | ||
49 | return 0; | ||
50 | } | ||
51 | xhci_dbg(xhci, "// Allocating segment at 0x%x (virtual) 0x%x (DMA)\n", | ||
52 | (unsigned int) seg->trbs, (u32) dma); | ||
53 | |||
54 | memset(seg->trbs, 0, SEGMENT_SIZE); | ||
55 | seg->dma = dma; | ||
56 | seg->next = NULL; | ||
57 | |||
58 | return seg; | ||
59 | } | ||
60 | |||
61 | static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) | ||
62 | { | ||
63 | if (!seg) | ||
64 | return; | ||
65 | if (seg->trbs) { | ||
66 | xhci_dbg(xhci, "Freeing DMA segment at 0x%x" | ||
67 | " (virtual) 0x%x (DMA)\n", | ||
68 | (unsigned int) seg->trbs, (u32) seg->dma); | ||
69 | dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); | ||
70 | seg->trbs = NULL; | ||
71 | } | ||
72 | xhci_dbg(xhci, "Freeing priv segment structure at 0x%x\n", | ||
73 | (unsigned int) seg); | ||
74 | kfree(seg); | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * Make the prev segment point to the next segment. | ||
79 | * | ||
80 | * Change the last TRB in the prev segment to be a Link TRB which points to the | ||
81 | * DMA address of the next segment. The caller needs to set any Link TRB | ||
82 | * related flags, such as End TRB, Toggle Cycle, and no snoop. | ||
83 | */ | ||
84 | static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, | ||
85 | struct xhci_segment *next, bool link_trbs) | ||
86 | { | ||
87 | u32 val; | ||
88 | |||
89 | if (!prev || !next) | ||
90 | return; | ||
91 | prev->next = next; | ||
92 | if (link_trbs) { | ||
93 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma; | ||
94 | |||
95 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ | ||
96 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; | ||
97 | val &= ~TRB_TYPE_BITMASK; | ||
98 | val |= TRB_TYPE(TRB_LINK); | ||
99 | prev->trbs[TRBS_PER_SEGMENT-1].link.control = val; | ||
100 | } | ||
101 | xhci_dbg(xhci, "Linking segment 0x%x to segment 0x%x (DMA)\n", | ||
102 | prev->dma, next->dma); | ||
103 | } | ||
104 | |||
105 | /* XXX: Do we need the hcd structure in all these functions? */ | ||
106 | static void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) | ||
107 | { | ||
108 | struct xhci_segment *seg; | ||
109 | struct xhci_segment *first_seg; | ||
110 | |||
111 | if (!ring || !ring->first_seg) | ||
112 | return; | ||
113 | first_seg = ring->first_seg; | ||
114 | seg = first_seg->next; | ||
115 | xhci_dbg(xhci, "Freeing ring at 0x%x\n", (unsigned int) ring); | ||
116 | while (seg != first_seg) { | ||
117 | struct xhci_segment *next = seg->next; | ||
118 | xhci_segment_free(xhci, seg); | ||
119 | seg = next; | ||
120 | } | ||
121 | xhci_segment_free(xhci, first_seg); | ||
122 | ring->first_seg = NULL; | ||
123 | kfree(ring); | ||
124 | } | ||
125 | |||
126 | /** | ||
127 | * Create a new ring with zero or more segments. | ||
128 | * | ||
129 | * Link each segment together into a ring. | ||
130 | * Set the end flag and the cycle toggle bit on the last segment. | ||
131 | * See section 4.9.1 and figures 15 and 16. | ||
132 | */ | ||
133 | static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, | ||
134 | unsigned int num_segs, bool link_trbs, gfp_t flags) | ||
135 | { | ||
136 | struct xhci_ring *ring; | ||
137 | struct xhci_segment *prev; | ||
138 | |||
139 | ring = kzalloc(sizeof *(ring), flags); | ||
140 | xhci_dbg(xhci, "Allocating ring at 0x%x\n", (unsigned int) ring); | ||
141 | if (!ring) | ||
142 | return 0; | ||
143 | |||
144 | if (num_segs == 0) | ||
145 | return ring; | ||
146 | |||
147 | ring->first_seg = xhci_segment_alloc(xhci, flags); | ||
148 | if (!ring->first_seg) | ||
149 | goto fail; | ||
150 | num_segs--; | ||
151 | |||
152 | prev = ring->first_seg; | ||
153 | while (num_segs > 0) { | ||
154 | struct xhci_segment *next; | ||
155 | |||
156 | next = xhci_segment_alloc(xhci, flags); | ||
157 | if (!next) | ||
158 | goto fail; | ||
159 | xhci_link_segments(xhci, prev, next, link_trbs); | ||
160 | |||
161 | prev = next; | ||
162 | num_segs--; | ||
163 | } | ||
164 | xhci_link_segments(xhci, prev, ring->first_seg, link_trbs); | ||
165 | |||
166 | if (link_trbs) { | ||
167 | /* See section 4.9.2.1 and 6.4.4.1 */ | ||
168 | prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE); | ||
169 | xhci_dbg(xhci, "Wrote link toggle flag to" | ||
170 | " segment 0x%x (virtual), 0x%x (DMA)\n", | ||
171 | (unsigned int) prev, (u32) prev->dma); | ||
172 | } | ||
173 | /* The ring is empty, so the enqueue pointer == dequeue pointer */ | ||
174 | ring->enqueue = ring->first_seg->trbs; | ||
175 | ring->dequeue = ring->enqueue; | ||
176 | /* The ring is initialized to 0. The producer must write 1 to the cycle | ||
177 | * bit to handover ownership of the TRB, so PCS = 1. The consumer must | ||
178 | * compare CCS to the cycle bit to check ownership, so CCS = 1. | ||
179 | */ | ||
180 | ring->cycle_state = 1; | ||
181 | |||
182 | return ring; | ||
183 | |||
184 | fail: | ||
185 | xhci_ring_free(xhci, ring); | ||
186 | return 0; | ||
187 | } | ||
188 | |||
27 | void xhci_mem_cleanup(struct xhci_hcd *xhci) | 189 | void xhci_mem_cleanup(struct xhci_hcd *xhci) |
28 | { | 190 | { |
191 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
192 | int size; | ||
193 | |||
194 | /* XXX: Free all the segments in the various rings */ | ||
195 | |||
196 | /* Free the Event Ring Segment Table and the actual Event Ring */ | ||
197 | xhci_writel(xhci, 0, &xhci->ir_set->erst_size); | ||
198 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); | ||
199 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]); | ||
200 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); | ||
201 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]); | ||
202 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); | ||
203 | if (xhci->erst.entries) | ||
204 | pci_free_consistent(pdev, size, | ||
205 | xhci->erst.entries, xhci->erst.erst_dma_addr); | ||
206 | xhci->erst.entries = NULL; | ||
207 | xhci_dbg(xhci, "Freed ERST\n"); | ||
208 | if (xhci->event_ring) | ||
209 | xhci_ring_free(xhci, xhci->event_ring); | ||
210 | xhci->event_ring = NULL; | ||
211 | xhci_dbg(xhci, "Freed event ring\n"); | ||
212 | |||
213 | xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]); | ||
214 | xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]); | ||
215 | if (xhci->cmd_ring) | ||
216 | xhci_ring_free(xhci, xhci->cmd_ring); | ||
217 | xhci->cmd_ring = NULL; | ||
218 | xhci_dbg(xhci, "Freed command ring\n"); | ||
219 | if (xhci->segment_pool) | ||
220 | dma_pool_destroy(xhci->segment_pool); | ||
221 | xhci->segment_pool = NULL; | ||
222 | xhci_dbg(xhci, "Freed segment pool\n"); | ||
29 | xhci->page_size = 0; | 223 | xhci->page_size = 0; |
30 | xhci->page_shift = 0; | 224 | xhci->page_shift = 0; |
31 | } | 225 | } |
32 | 226 | ||
33 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | 227 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) |
34 | { | 228 | { |
229 | dma_addr_t dma; | ||
230 | struct device *dev = xhci_to_hcd(xhci)->self.controller; | ||
35 | unsigned int val, val2; | 231 | unsigned int val, val2; |
232 | struct xhci_segment *seg; | ||
36 | u32 page_size; | 233 | u32 page_size; |
37 | int i; | 234 | int i; |
38 | 235 | ||
@@ -65,7 +262,113 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
65 | (unsigned int) val); | 262 | (unsigned int) val); |
66 | xhci_writel(xhci, val, &xhci->op_regs->config_reg); | 263 | xhci_writel(xhci, val, &xhci->op_regs->config_reg); |
67 | 264 | ||
68 | xhci->ir_set = &xhci->run_regs->ir_set[0]; | 265 | /* |
266 | * Initialize the ring segment pool. The ring must be a contiguous | ||
267 | * structure comprised of TRBs. The TRBs must be 16 byte aligned, | ||
268 | * however, the command ring segment needs 64-byte aligned segments, | ||
269 | * so we pick the greater alignment need. | ||
270 | */ | ||
271 | xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, | ||
272 | SEGMENT_SIZE, 64, xhci->page_size); | ||
273 | if (!xhci->segment_pool) | ||
274 | goto fail; | ||
275 | |||
276 | /* Set up the command ring to have one segments for now. */ | ||
277 | xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); | ||
278 | if (!xhci->cmd_ring) | ||
279 | goto fail; | ||
280 | xhci_dbg(xhci, "Allocated command ring at 0x%x\n", (unsigned int) xhci->cmd_ring); | ||
281 | xhci_dbg(xhci, "First segment DMA is 0x%x\n", (unsigned int) xhci->cmd_ring->first_seg->dma); | ||
282 | |||
283 | /* Set the address in the Command Ring Control register */ | ||
284 | val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); | ||
285 | val = (val & ~CMD_RING_ADDR_MASK) | | ||
286 | (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) | | ||
287 | xhci->cmd_ring->cycle_state; | ||
288 | xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n"); | ||
289 | xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]); | ||
290 | xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val); | ||
291 | xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]); | ||
292 | xhci_dbg_cmd_ptrs(xhci); | ||
293 | |||
294 | val = xhci_readl(xhci, &xhci->cap_regs->db_off); | ||
295 | val &= DBOFF_MASK; | ||
296 | xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" | ||
297 | " from cap regs base addr\n", val); | ||
298 | xhci->dba = (void *) xhci->cap_regs + val; | ||
299 | xhci_dbg_regs(xhci); | ||
300 | xhci_print_run_regs(xhci); | ||
301 | /* Set ir_set to interrupt register set 0 */ | ||
302 | xhci->ir_set = (void *) xhci->run_regs->ir_set; | ||
303 | |||
304 | /* | ||
305 | * Event ring setup: Allocate a normal ring, but also setup | ||
306 | * the event ring segment table (ERST). Section 4.9.3. | ||
307 | */ | ||
308 | xhci_dbg(xhci, "// Allocating event ring\n"); | ||
309 | xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); | ||
310 | if (!xhci->event_ring) | ||
311 | goto fail; | ||
312 | |||
313 | xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev), | ||
314 | sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); | ||
315 | if (!xhci->erst.entries) | ||
316 | goto fail; | ||
317 | xhci_dbg(xhci, "// Allocated event ring segment table at 0x%x\n", dma); | ||
318 | |||
319 | memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); | ||
320 | xhci->erst.num_entries = ERST_NUM_SEGS; | ||
321 | xhci->erst.erst_dma_addr = dma; | ||
322 | xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = 0x%x, dma addr = 0x%x\n", | ||
323 | xhci->erst.num_entries, | ||
324 | (unsigned int) xhci->erst.entries, | ||
325 | xhci->erst.erst_dma_addr); | ||
326 | |||
327 | /* set ring base address and size for each segment table entry */ | ||
328 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { | ||
329 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; | ||
330 | entry->seg_addr[1] = 0; | ||
331 | entry->seg_addr[0] = seg->dma; | ||
332 | entry->seg_size = TRBS_PER_SEGMENT; | ||
333 | entry->rsvd = 0; | ||
334 | seg = seg->next; | ||
335 | } | ||
336 | |||
337 | /* set ERST count with the number of entries in the segment table */ | ||
338 | val = xhci_readl(xhci, &xhci->ir_set->erst_size); | ||
339 | val &= ERST_SIZE_MASK; | ||
340 | val |= ERST_NUM_SEGS; | ||
341 | xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n", | ||
342 | val); | ||
343 | xhci_writel(xhci, val, &xhci->ir_set->erst_size); | ||
344 | |||
345 | xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n"); | ||
346 | /* set the segment table base address */ | ||
347 | xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%x\n", | ||
348 | xhci->erst.erst_dma_addr); | ||
349 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); | ||
350 | val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]); | ||
351 | val &= ERST_PTR_MASK; | ||
352 | val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK); | ||
353 | xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]); | ||
354 | |||
355 | /* Set the event ring dequeue address */ | ||
356 | xhci_dbg(xhci, "// Set ERST dequeue address for ir_set 0 = 0x%x%x\n", | ||
357 | xhci->erst.entries[0].seg_addr[1], xhci->erst.entries[0].seg_addr[0]); | ||
358 | val = xhci_readl(xhci, &xhci->run_regs->ir_set[0].erst_dequeue[0]); | ||
359 | val &= ERST_PTR_MASK; | ||
360 | val |= (xhci->erst.entries[0].seg_addr[0] & ~ERST_PTR_MASK); | ||
361 | xhci_writel(xhci, val, &xhci->run_regs->ir_set[0].erst_dequeue[0]); | ||
362 | xhci_writel(xhci, xhci->erst.entries[0].seg_addr[1], | ||
363 | &xhci->run_regs->ir_set[0].erst_dequeue[1]); | ||
364 | xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); | ||
365 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | ||
366 | |||
367 | /* | ||
368 | * XXX: Might need to set the Interrupter Moderation Register to | ||
369 | * something other than the default (~1ms minimum between interrupts). | ||
370 | * See section 5.5.1.2. | ||
371 | */ | ||
69 | 372 | ||
70 | return 0; | 373 | return 0; |
71 | fail: | 374 | fail: |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 59fae2e5ea59..ed331310f1a8 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -241,6 +241,18 @@ struct xhci_op_regs { | |||
241 | */ | 241 | */ |
242 | #define DEV_NOTE_FWAKE ENABLE_DEV_NOTE(1) | 242 | #define DEV_NOTE_FWAKE ENABLE_DEV_NOTE(1) |
243 | 243 | ||
244 | /* CRCR - Command Ring Control Register - cmd_ring bitmasks */ | ||
245 | /* bit 0 is the command ring cycle state */ | ||
246 | /* stop ring operation after completion of the currently executing command */ | ||
247 | #define CMD_RING_PAUSE (1 << 1) | ||
248 | /* stop ring immediately - abort the currently executing command */ | ||
249 | #define CMD_RING_ABORT (1 << 2) | ||
250 | /* true: command ring is running */ | ||
251 | #define CMD_RING_RUNNING (1 << 3) | ||
252 | /* bits 4:5 reserved and should be preserved */ | ||
253 | /* Command Ring pointer - bit mask for the lower 32 bits. */ | ||
254 | #define CMD_RING_ADDR_MASK (0xffffffc0) | ||
255 | |||
244 | /* CONFIG - Configure Register - config_reg bitmasks */ | 256 | /* CONFIG - Configure Register - config_reg bitmasks */ |
245 | /* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ | 257 | /* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ |
246 | #define MAX_DEVS(p) ((p) & 0xff) | 258 | #define MAX_DEVS(p) ((p) & 0xff) |
@@ -391,6 +403,7 @@ struct intr_reg { | |||
391 | * a work queue (or delayed service routine)? | 403 | * a work queue (or delayed service routine)? |
392 | */ | 404 | */ |
393 | #define ERST_EHB (1 << 3) | 405 | #define ERST_EHB (1 << 3) |
406 | #define ERST_PTR_MASK (0xf) | ||
394 | 407 | ||
395 | /** | 408 | /** |
396 | * struct xhci_run_regs | 409 | * struct xhci_run_regs |
@@ -407,6 +420,275 @@ struct xhci_run_regs { | |||
407 | struct intr_reg ir_set[128]; | 420 | struct intr_reg ir_set[128]; |
408 | } __attribute__ ((packed)); | 421 | } __attribute__ ((packed)); |
409 | 422 | ||
423 | /** | ||
424 | * struct doorbell_array | ||
425 | * | ||
426 | * Section 5.6 | ||
427 | */ | ||
428 | struct xhci_doorbell_array { | ||
429 | u32 doorbell[256]; | ||
430 | } __attribute__ ((packed)); | ||
431 | |||
432 | #define DB_TARGET_MASK 0xFFFFFF00 | ||
433 | #define DB_STREAM_ID_MASK 0x0000FFFF | ||
434 | #define DB_TARGET_HOST 0x0 | ||
435 | #define DB_STREAM_ID_HOST 0x0 | ||
436 | #define DB_MASK (0xff << 8) | ||
437 | |||
438 | |||
439 | struct xhci_transfer_event { | ||
440 | /* 64-bit buffer address, or immediate data */ | ||
441 | u32 buffer[2]; | ||
442 | u32 transfer_len; | ||
443 | /* This field is interpreted differently based on the type of TRB */ | ||
444 | u32 flags; | ||
445 | } __attribute__ ((packed)); | ||
446 | |||
447 | /* Completion Code - only applicable for some types of TRBs */ | ||
448 | #define COMP_CODE_MASK (0xff << 24) | ||
449 | #define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24) | ||
450 | #define COMP_SUCCESS 1 | ||
451 | /* Data Buffer Error */ | ||
452 | #define COMP_DB_ERR 2 | ||
453 | /* Babble Detected Error */ | ||
454 | #define COMP_BABBLE 3 | ||
455 | /* USB Transaction Error */ | ||
456 | #define COMP_TX_ERR 4 | ||
457 | /* TRB Error - some TRB field is invalid */ | ||
458 | #define COMP_TRB_ERR 5 | ||
459 | /* Stall Error - USB device is stalled */ | ||
460 | #define COMP_STALL 6 | ||
461 | /* Resource Error - HC doesn't have memory for that device configuration */ | ||
462 | #define COMP_ENOMEM 7 | ||
463 | /* Bandwidth Error - not enough room in schedule for this dev config */ | ||
464 | #define COMP_BW_ERR 8 | ||
465 | /* No Slots Available Error - HC ran out of device slots */ | ||
466 | #define COMP_ENOSLOTS 9 | ||
467 | /* Invalid Stream Type Error */ | ||
468 | #define COMP_STREAM_ERR 10 | ||
469 | /* Slot Not Enabled Error - doorbell rung for disabled device slot */ | ||
470 | #define COMP_EBADSLT 11 | ||
471 | /* Endpoint Not Enabled Error */ | ||
472 | #define COMP_EBADEP 12 | ||
473 | /* Short Packet */ | ||
474 | #define COMP_SHORT_TX 13 | ||
475 | /* Ring Underrun - doorbell rung for an empty isoc OUT ep ring */ | ||
476 | #define COMP_UNDERRUN 14 | ||
477 | /* Ring Overrun - isoc IN ep ring is empty when ep is scheduled to RX */ | ||
478 | #define COMP_OVERRUN 15 | ||
479 | /* Virtual Function Event Ring Full Error */ | ||
480 | #define COMP_VF_FULL 16 | ||
481 | /* Parameter Error - Context parameter is invalid */ | ||
482 | #define COMP_EINVAL 17 | ||
483 | /* Bandwidth Overrun Error - isoc ep exceeded its allocated bandwidth */ | ||
484 | #define COMP_BW_OVER 18 | ||
485 | /* Context State Error - illegal context state transition requested */ | ||
486 | #define COMP_CTX_STATE 19 | ||
487 | /* No Ping Response Error - HC didn't get PING_RESPONSE in time to TX */ | ||
488 | #define COMP_PING_ERR 20 | ||
489 | /* Event Ring is full */ | ||
490 | #define COMP_ER_FULL 21 | ||
491 | /* Missed Service Error - HC couldn't service an isoc ep within interval */ | ||
492 | #define COMP_MISSED_INT 23 | ||
493 | /* Successfully stopped command ring */ | ||
494 | #define COMP_CMD_STOP 24 | ||
495 | /* Successfully aborted current command and stopped command ring */ | ||
496 | #define COMP_CMD_ABORT 25 | ||
497 | /* Stopped - transfer was terminated by a stop endpoint command */ | ||
498 | #define COMP_STOP 26 | ||
499 | /* Same as COMP_EP_STOPPED, but the transfered length in the event is invalid */ | ||
500 | #define COMP_STOP_INVAL 27 | ||
501 | /* Control Abort Error - Debug Capability - control pipe aborted */ | ||
502 | #define COMP_DBG_ABORT 28 | ||
503 | /* TRB type 29 and 30 reserved */ | ||
504 | /* Isoc Buffer Overrun - an isoc IN ep sent more data than could fit in TD */ | ||
505 | #define COMP_BUFF_OVER 31 | ||
506 | /* Event Lost Error - xHC has an "internal event overrun condition" */ | ||
507 | #define COMP_ISSUES 32 | ||
508 | /* Undefined Error - reported when other error codes don't apply */ | ||
509 | #define COMP_UNKNOWN 33 | ||
510 | /* Invalid Stream ID Error */ | ||
511 | #define COMP_STRID_ERR 34 | ||
512 | /* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */ | ||
513 | /* FIXME - check for this */ | ||
514 | #define COMP_2ND_BW_ERR 35 | ||
515 | /* Split Transaction Error */ | ||
516 | #define COMP_SPLIT_ERR 36 | ||
517 | |||
518 | struct xhci_link_trb { | ||
519 | /* 64-bit segment pointer*/ | ||
520 | u32 segment_ptr[2]; | ||
521 | u32 intr_target; | ||
522 | u32 control; | ||
523 | } __attribute__ ((packed)); | ||
524 | |||
525 | /* control bitfields */ | ||
526 | #define LINK_TOGGLE (0x1<<1) | ||
527 | |||
528 | |||
529 | union xhci_trb { | ||
530 | struct xhci_link_trb link; | ||
531 | struct xhci_transfer_event trans_event; | ||
532 | }; | ||
533 | |||
534 | /* Normal TRB fields */ | ||
535 | /* transfer_len bitmasks - bits 0:16 */ | ||
536 | #define TRB_LEN(p) ((p) & 0x1ffff) | ||
537 | /* TD size - number of bytes remaining in the TD (including this TRB): | ||
538 | * bits 17 - 21. Shift the number of bytes by 10. */ | ||
539 | #define TD_REMAINDER(p) ((((p) >> 10) & 0x1f) << 17) | ||
540 | /* Interrupter Target - which MSI-X vector to target the completion event at */ | ||
541 | #define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22) | ||
542 | #define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff) | ||
543 | |||
544 | /* Cycle bit - indicates TRB ownership by HC or HCD */ | ||
545 | #define TRB_CYCLE (1<<0) | ||
546 | /* | ||
547 | * Force next event data TRB to be evaluated before task switch. | ||
548 | * Used to pass OS data back after a TD completes. | ||
549 | */ | ||
550 | #define TRB_ENT (1<<1) | ||
551 | /* Interrupt on short packet */ | ||
552 | #define TRB_ISP (1<<2) | ||
553 | /* Set PCIe no snoop attribute */ | ||
554 | #define TRB_NO_SNOOP (1<<3) | ||
555 | /* Chain multiple TRBs into a TD */ | ||
556 | #define TRB_CHAIN (1<<4) | ||
557 | /* Interrupt on completion */ | ||
558 | #define TRB_IOC (1<<5) | ||
559 | /* The buffer pointer contains immediate data */ | ||
560 | #define TRB_IDT (1<<6) | ||
561 | |||
562 | |||
563 | /* Control transfer TRB specific fields */ | ||
564 | #define TRB_DIR_IN (1<<16) | ||
565 | |||
566 | /* TRB bit mask */ | ||
567 | #define TRB_TYPE_BITMASK (0xfc00) | ||
568 | #define TRB_TYPE(p) ((p) << 10) | ||
569 | /* TRB type IDs */ | ||
570 | /* bulk, interrupt, isoc scatter/gather, and control data stage */ | ||
571 | #define TRB_NORMAL 1 | ||
572 | /* setup stage for control transfers */ | ||
573 | #define TRB_SETUP 2 | ||
574 | /* data stage for control transfers */ | ||
575 | #define TRB_DATA 3 | ||
576 | /* status stage for control transfers */ | ||
577 | #define TRB_STATUS 4 | ||
578 | /* isoc transfers */ | ||
579 | #define TRB_ISOC 5 | ||
580 | /* TRB for linking ring segments */ | ||
581 | #define TRB_LINK 6 | ||
582 | #define TRB_EVENT_DATA 7 | ||
583 | /* Transfer Ring No-op (not for the command ring) */ | ||
584 | #define TRB_TR_NOOP 8 | ||
585 | /* Command TRBs */ | ||
586 | /* Enable Slot Command */ | ||
587 | #define TRB_ENABLE_SLOT 9 | ||
588 | /* Disable Slot Command */ | ||
589 | #define TRB_DISABLE_SLOT 10 | ||
590 | /* Address Device Command */ | ||
591 | #define TRB_ADDR_DEV 11 | ||
592 | /* Configure Endpoint Command */ | ||
593 | #define TRB_CONFIG_EP 12 | ||
594 | /* Evaluate Context Command */ | ||
595 | #define TRB_EVAL_CONTEXT 13 | ||
596 | /* Reset Transfer Ring Command */ | ||
597 | #define TRB_RESET_RING 14 | ||
598 | /* Stop Transfer Ring Command */ | ||
599 | #define TRB_STOP_RING 15 | ||
600 | /* Set Transfer Ring Dequeue Pointer Command */ | ||
601 | #define TRB_SET_DEQ 16 | ||
602 | /* Reset Device Command */ | ||
603 | #define TRB_RESET_DEV 17 | ||
604 | /* Force Event Command (opt) */ | ||
605 | #define TRB_FORCE_EVENT 18 | ||
606 | /* Negotiate Bandwidth Command (opt) */ | ||
607 | #define TRB_NEG_BANDWIDTH 19 | ||
608 | /* Set Latency Tolerance Value Command (opt) */ | ||
609 | #define TRB_SET_LT 20 | ||
610 | /* Get port bandwidth Command */ | ||
611 | #define TRB_GET_BW 21 | ||
612 | /* Force Header Command - generate a transaction or link management packet */ | ||
613 | #define TRB_FORCE_HEADER 22 | ||
614 | /* No-op Command - not for transfer rings */ | ||
615 | #define TRB_CMD_NOOP 23 | ||
616 | /* TRB IDs 24-31 reserved */ | ||
617 | /* Event TRBS */ | ||
618 | /* Transfer Event */ | ||
619 | #define TRB_TRANSFER 32 | ||
620 | /* Command Completion Event */ | ||
621 | #define TRB_COMPLETION 33 | ||
622 | /* Port Status Change Event */ | ||
623 | #define TRB_PORT_STATUS 34 | ||
624 | /* Bandwidth Request Event (opt) */ | ||
625 | #define TRB_BANDWIDTH_EVENT 35 | ||
626 | /* Doorbell Event (opt) */ | ||
627 | #define TRB_DOORBELL 36 | ||
628 | /* Host Controller Event */ | ||
629 | #define TRB_HC_EVENT 37 | ||
630 | /* Device Notification Event - device sent function wake notification */ | ||
631 | #define TRB_DEV_NOTE 38 | ||
632 | /* MFINDEX Wrap Event - microframe counter wrapped */ | ||
633 | #define TRB_MFINDEX_WRAP 39 | ||
634 | /* TRB IDs 40-47 reserved, 48-63 is vendor-defined */ | ||
635 | |||
636 | /* | ||
637 | * TRBS_PER_SEGMENT must be a multiple of 4, | ||
638 | * since the command ring is 64-byte aligned. | ||
639 | * It must also be greater than 16. | ||
640 | */ | ||
641 | #define TRBS_PER_SEGMENT 64 | ||
642 | #define SEGMENT_SIZE (TRBS_PER_SEGMENT*16) | ||
643 | |||
644 | struct xhci_segment { | ||
645 | union xhci_trb *trbs; | ||
646 | /* private to HCD */ | ||
647 | struct xhci_segment *next; | ||
648 | dma_addr_t dma; | ||
649 | } __attribute__ ((packed)); | ||
650 | |||
651 | struct xhci_ring { | ||
652 | struct xhci_segment *first_seg; | ||
653 | union xhci_trb *enqueue; | ||
654 | union xhci_trb *dequeue; | ||
655 | /* | ||
656 | * Write the cycle state into the TRB cycle field to give ownership of | ||
657 | * the TRB to the host controller (if we are the producer), or to check | ||
658 | * if we own the TRB (if we are the consumer). See section 4.9.1. | ||
659 | */ | ||
660 | u32 cycle_state; | ||
661 | }; | ||
662 | |||
663 | struct xhci_erst_entry { | ||
664 | /* 64-bit event ring segment address */ | ||
665 | u32 seg_addr[2]; | ||
666 | u32 seg_size; | ||
667 | /* Set to zero */ | ||
668 | u32 rsvd; | ||
669 | } __attribute__ ((packed)); | ||
670 | |||
671 | struct xhci_erst { | ||
672 | struct xhci_erst_entry *entries; | ||
673 | unsigned int num_entries; | ||
674 | /* xhci->event_ring keeps track of segment dma addresses */ | ||
675 | dma_addr_t erst_dma_addr; | ||
676 | /* Num entries the ERST can contain */ | ||
677 | unsigned int erst_size; | ||
678 | }; | ||
679 | |||
680 | /* | ||
681 | * Each segment table entry is 4*32bits long. 1K seems like an ok size: | ||
682 | * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, | ||
683 | * meaning 64 ring segments. | ||
684 | * Initial allocated size of the ERST, in number of entries */ | ||
685 | #define ERST_NUM_SEGS 1 | ||
686 | /* Initial allocated size of the ERST, in number of entries */ | ||
687 | #define ERST_SIZE 64 | ||
688 | /* Initial number of event segment rings allocated */ | ||
689 | #define ERST_ENTRIES 1 | ||
690 | /* XXX: Make these module parameters */ | ||
691 | |||
410 | 692 | ||
411 | /* There is one ehci_hci structure per controller */ | 693 | /* There is one ehci_hci structure per controller */ |
412 | struct xhci_hcd { | 694 | struct xhci_hcd { |
@@ -414,6 +696,7 @@ struct xhci_hcd { | |||
414 | struct xhci_cap_regs __iomem *cap_regs; | 696 | struct xhci_cap_regs __iomem *cap_regs; |
415 | struct xhci_op_regs __iomem *op_regs; | 697 | struct xhci_op_regs __iomem *op_regs; |
416 | struct xhci_run_regs __iomem *run_regs; | 698 | struct xhci_run_regs __iomem *run_regs; |
699 | struct xhci_doorbell_array __iomem *dba; | ||
417 | /* Our HCD's current interrupter register set */ | 700 | /* Our HCD's current interrupter register set */ |
418 | struct intr_reg __iomem *ir_set; | 701 | struct intr_reg __iomem *ir_set; |
419 | 702 | ||
@@ -441,6 +724,14 @@ struct xhci_hcd { | |||
441 | /* only one MSI vector for now, but might need more later */ | 724 | /* only one MSI vector for now, but might need more later */ |
442 | int msix_count; | 725 | int msix_count; |
443 | struct msix_entry *msix_entries; | 726 | struct msix_entry *msix_entries; |
727 | /* data structures */ | ||
728 | struct xhci_ring *cmd_ring; | ||
729 | struct xhci_ring *event_ring; | ||
730 | struct xhci_erst erst; | ||
731 | |||
732 | /* DMA pools */ | ||
733 | struct dma_pool *device_pool; | ||
734 | struct dma_pool *segment_pool; | ||
444 | }; | 735 | }; |
445 | 736 | ||
446 | /* convert between an HCD pointer and the corresponding EHCI_HCD */ | 737 | /* convert between an HCD pointer and the corresponding EHCI_HCD */ |
@@ -488,6 +779,11 @@ static inline void xhci_writel(const struct xhci_hcd *xhci, | |||
488 | /* xHCI debugging */ | 779 | /* xHCI debugging */ |
489 | void xhci_print_ir_set(struct xhci_hcd *xhci, struct intr_reg *ir_set, int set_num); | 780 | void xhci_print_ir_set(struct xhci_hcd *xhci, struct intr_reg *ir_set, int set_num); |
490 | void xhci_print_registers(struct xhci_hcd *xhci); | 781 | void xhci_print_registers(struct xhci_hcd *xhci); |
782 | void xhci_dbg_regs(struct xhci_hcd *xhci); | ||
783 | void xhci_print_run_regs(struct xhci_hcd *xhci); | ||
784 | void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring); | ||
785 | void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); | ||
786 | void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); | ||
491 | 787 | ||
492 | /* xHCI memory managment */ | 788 | /* xHCI memory managment */ |
493 | void xhci_mem_cleanup(struct xhci_hcd *xhci); | 789 | void xhci_mem_cleanup(struct xhci_hcd *xhci); |