diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /drivers/usb/host/xhci.c | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/usb/host/xhci.c')
-rw-r--r-- | drivers/usb/host/xhci.c | 1918 |
1 files changed, 1918 insertions, 0 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c new file mode 100644 index 000000000000..7e4277273908 --- /dev/null +++ b/drivers/usb/host/xhci.c | |||
@@ -0,0 +1,1918 @@ | |||
1 | /* | ||
2 | * xHCI host controller driver | ||
3 | * | ||
4 | * Copyright (C) 2008 Intel Corp. | ||
5 | * | ||
6 | * Author: Sarah Sharp | ||
7 | * Some code borrowed from the Linux EHCI driver. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
15 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
16 | * for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software Foundation, | ||
20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/irq.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/moduleparam.h> | ||
26 | #include <linux/slab.h> | ||
27 | |||
28 | #include "xhci.h" | ||
29 | |||
30 | #define DRIVER_AUTHOR "Sarah Sharp" | ||
31 | #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" | ||
32 | |||
33 | /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ | ||
34 | static int link_quirk; | ||
35 | module_param(link_quirk, int, S_IRUGO | S_IWUSR); | ||
36 | MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); | ||
37 | |||
38 | /* TODO: copied from ehci-hcd.c - can this be refactored? */ | ||
39 | /* | ||
40 | * handshake - spin reading hc until handshake completes or fails | ||
41 | * @ptr: address of hc register to be read | ||
42 | * @mask: bits to look at in result of read | ||
43 | * @done: value of those bits when handshake succeeds | ||
44 | * @usec: timeout in microseconds | ||
45 | * | ||
46 | * Returns negative errno, or zero on success | ||
47 | * | ||
48 | * Success happens when the "mask" bits have the specified value (hardware | ||
49 | * handshake done). There are two failure modes: "usec" have passed (major | ||
50 | * hardware flakeout), or the register reads as all-ones (hardware removed). | ||
51 | */ | ||
52 | static int handshake(struct xhci_hcd *xhci, void __iomem *ptr, | ||
53 | u32 mask, u32 done, int usec) | ||
54 | { | ||
55 | u32 result; | ||
56 | |||
57 | do { | ||
58 | result = xhci_readl(xhci, ptr); | ||
59 | if (result == ~(u32)0) /* card removed */ | ||
60 | return -ENODEV; | ||
61 | result &= mask; | ||
62 | if (result == done) | ||
63 | return 0; | ||
64 | udelay(1); | ||
65 | usec--; | ||
66 | } while (usec > 0); | ||
67 | return -ETIMEDOUT; | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * Disable interrupts and begin the xHCI halting process. | ||
72 | */ | ||
73 | void xhci_quiesce(struct xhci_hcd *xhci) | ||
74 | { | ||
75 | u32 halted; | ||
76 | u32 cmd; | ||
77 | u32 mask; | ||
78 | |||
79 | mask = ~(XHCI_IRQS); | ||
80 | halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; | ||
81 | if (!halted) | ||
82 | mask &= ~CMD_RUN; | ||
83 | |||
84 | cmd = xhci_readl(xhci, &xhci->op_regs->command); | ||
85 | cmd &= mask; | ||
86 | xhci_writel(xhci, cmd, &xhci->op_regs->command); | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Force HC into halt state. | ||
91 | * | ||
92 | * Disable any IRQs and clear the run/stop bit. | ||
93 | * HC will complete any current and actively pipelined transactions, and | ||
94 | * should halt within 16 microframes of the run/stop bit being cleared. | ||
95 | * Read HC Halted bit in the status register to see when the HC is finished. | ||
96 | * XXX: shouldn't we set HC_STATE_HALT here somewhere? | ||
97 | */ | ||
98 | int xhci_halt(struct xhci_hcd *xhci) | ||
99 | { | ||
100 | xhci_dbg(xhci, "// Halt the HC\n"); | ||
101 | xhci_quiesce(xhci); | ||
102 | |||
103 | return handshake(xhci, &xhci->op_regs->status, | ||
104 | STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * Reset a halted HC, and set the internal HC state to HC_STATE_HALT. | ||
109 | * | ||
110 | * This resets pipelines, timers, counters, state machines, etc. | ||
111 | * Transactions will be terminated immediately, and operational registers | ||
112 | * will be set to their defaults. | ||
113 | */ | ||
114 | int xhci_reset(struct xhci_hcd *xhci) | ||
115 | { | ||
116 | u32 command; | ||
117 | u32 state; | ||
118 | |||
119 | state = xhci_readl(xhci, &xhci->op_regs->status); | ||
120 | if ((state & STS_HALT) == 0) { | ||
121 | xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | xhci_dbg(xhci, "// Reset the HC\n"); | ||
126 | command = xhci_readl(xhci, &xhci->op_regs->command); | ||
127 | command |= CMD_RESET; | ||
128 | xhci_writel(xhci, command, &xhci->op_regs->command); | ||
129 | /* XXX: Why does EHCI set this here? Shouldn't other code do this? */ | ||
130 | xhci_to_hcd(xhci)->state = HC_STATE_HALT; | ||
131 | |||
132 | return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000); | ||
133 | } | ||
134 | |||
135 | |||
136 | #if 0 | ||
137 | /* Set up MSI-X table for entry 0 (may claim other entries later) */ | ||
138 | static int xhci_setup_msix(struct xhci_hcd *xhci) | ||
139 | { | ||
140 | int ret; | ||
141 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
142 | |||
143 | xhci->msix_count = 0; | ||
144 | /* XXX: did I do this right? ixgbe does kcalloc for more than one */ | ||
145 | xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL); | ||
146 | if (!xhci->msix_entries) { | ||
147 | xhci_err(xhci, "Failed to allocate MSI-X entries\n"); | ||
148 | return -ENOMEM; | ||
149 | } | ||
150 | xhci->msix_entries[0].entry = 0; | ||
151 | |||
152 | ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); | ||
153 | if (ret) { | ||
154 | xhci_err(xhci, "Failed to enable MSI-X\n"); | ||
155 | goto free_entries; | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * Pass the xhci pointer value as the request_irq "cookie". | ||
160 | * If more irqs are added, this will need to be unique for each one. | ||
161 | */ | ||
162 | ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0, | ||
163 | "xHCI", xhci_to_hcd(xhci)); | ||
164 | if (ret) { | ||
165 | xhci_err(xhci, "Failed to allocate MSI-X interrupt\n"); | ||
166 | goto disable_msix; | ||
167 | } | ||
168 | xhci_dbg(xhci, "Finished setting up MSI-X\n"); | ||
169 | return 0; | ||
170 | |||
171 | disable_msix: | ||
172 | pci_disable_msix(pdev); | ||
173 | free_entries: | ||
174 | kfree(xhci->msix_entries); | ||
175 | xhci->msix_entries = NULL; | ||
176 | return ret; | ||
177 | } | ||
178 | |||
179 | /* XXX: code duplication; can xhci_setup_msix call this? */ | ||
180 | /* Free any IRQs and disable MSI-X */ | ||
181 | static void xhci_cleanup_msix(struct xhci_hcd *xhci) | ||
182 | { | ||
183 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
184 | if (!xhci->msix_entries) | ||
185 | return; | ||
186 | |||
187 | free_irq(xhci->msix_entries[0].vector, xhci); | ||
188 | pci_disable_msix(pdev); | ||
189 | kfree(xhci->msix_entries); | ||
190 | xhci->msix_entries = NULL; | ||
191 | xhci_dbg(xhci, "Finished cleaning up MSI-X\n"); | ||
192 | } | ||
193 | #endif | ||
194 | |||
195 | /* | ||
196 | * Initialize memory for HCD and xHC (one-time init). | ||
197 | * | ||
198 | * Program the PAGESIZE register, initialize the device context array, create | ||
199 | * device contexts (?), set up a command ring segment (or two?), create event | ||
200 | * ring (one for now). | ||
201 | */ | ||
202 | int xhci_init(struct usb_hcd *hcd) | ||
203 | { | ||
204 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
205 | int retval = 0; | ||
206 | |||
207 | xhci_dbg(xhci, "xhci_init\n"); | ||
208 | spin_lock_init(&xhci->lock); | ||
209 | if (link_quirk) { | ||
210 | xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); | ||
211 | xhci->quirks |= XHCI_LINK_TRB_QUIRK; | ||
212 | } else { | ||
213 | xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); | ||
214 | } | ||
215 | retval = xhci_mem_init(xhci, GFP_KERNEL); | ||
216 | xhci_dbg(xhci, "Finished xhci_init\n"); | ||
217 | |||
218 | return retval; | ||
219 | } | ||
220 | |||
221 | /* | ||
222 | * Called in interrupt context when there might be work | ||
223 | * queued on the event ring | ||
224 | * | ||
225 | * xhci->lock must be held by caller. | ||
226 | */ | ||
227 | static void xhci_work(struct xhci_hcd *xhci) | ||
228 | { | ||
229 | u32 temp; | ||
230 | u64 temp_64; | ||
231 | |||
232 | /* | ||
233 | * Clear the op reg interrupt status first, | ||
234 | * so we can receive interrupts from other MSI-X interrupters. | ||
235 | * Write 1 to clear the interrupt status. | ||
236 | */ | ||
237 | temp = xhci_readl(xhci, &xhci->op_regs->status); | ||
238 | temp |= STS_EINT; | ||
239 | xhci_writel(xhci, temp, &xhci->op_regs->status); | ||
240 | /* FIXME when MSI-X is supported and there are multiple vectors */ | ||
241 | /* Clear the MSI-X event interrupt status */ | ||
242 | |||
243 | /* Acknowledge the interrupt */ | ||
244 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | ||
245 | temp |= 0x3; | ||
246 | xhci_writel(xhci, temp, &xhci->ir_set->irq_pending); | ||
247 | /* Flush posted writes */ | ||
248 | xhci_readl(xhci, &xhci->ir_set->irq_pending); | ||
249 | |||
250 | if (xhci->xhc_state & XHCI_STATE_DYING) | ||
251 | xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " | ||
252 | "Shouldn't IRQs be disabled?\n"); | ||
253 | else | ||
254 | /* FIXME this should be a delayed service routine | ||
255 | * that clears the EHB. | ||
256 | */ | ||
257 | xhci_handle_event(xhci); | ||
258 | |||
259 | /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ | ||
260 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); | ||
261 | xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); | ||
262 | /* Flush posted writes -- FIXME is this necessary? */ | ||
263 | xhci_readl(xhci, &xhci->ir_set->irq_pending); | ||
264 | } | ||
265 | |||
266 | /*-------------------------------------------------------------------------*/ | ||
267 | |||
268 | /* | ||
269 | * xHCI spec says we can get an interrupt, and if the HC has an error condition, | ||
270 | * we might get bad data out of the event ring. Section 4.10.2.7 has a list of | ||
271 | * indicators of an event TRB error, but we check the status *first* to be safe. | ||
272 | */ | ||
273 | irqreturn_t xhci_irq(struct usb_hcd *hcd) | ||
274 | { | ||
275 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
276 | u32 temp, temp2; | ||
277 | union xhci_trb *trb; | ||
278 | |||
279 | spin_lock(&xhci->lock); | ||
280 | trb = xhci->event_ring->dequeue; | ||
281 | /* Check if the xHC generated the interrupt, or the irq is shared */ | ||
282 | temp = xhci_readl(xhci, &xhci->op_regs->status); | ||
283 | temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); | ||
284 | if (temp == 0xffffffff && temp2 == 0xffffffff) | ||
285 | goto hw_died; | ||
286 | |||
287 | if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { | ||
288 | spin_unlock(&xhci->lock); | ||
289 | return IRQ_NONE; | ||
290 | } | ||
291 | xhci_dbg(xhci, "op reg status = %08x\n", temp); | ||
292 | xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2); | ||
293 | xhci_dbg(xhci, "Event ring dequeue ptr:\n"); | ||
294 | xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", | ||
295 | (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), | ||
296 | lower_32_bits(trb->link.segment_ptr), | ||
297 | upper_32_bits(trb->link.segment_ptr), | ||
298 | (unsigned int) trb->link.intr_target, | ||
299 | (unsigned int) trb->link.control); | ||
300 | |||
301 | if (temp & STS_FATAL) { | ||
302 | xhci_warn(xhci, "WARNING: Host System Error\n"); | ||
303 | xhci_halt(xhci); | ||
304 | hw_died: | ||
305 | xhci_to_hcd(xhci)->state = HC_STATE_HALT; | ||
306 | spin_unlock(&xhci->lock); | ||
307 | return -ESHUTDOWN; | ||
308 | } | ||
309 | |||
310 | xhci_work(xhci); | ||
311 | spin_unlock(&xhci->lock); | ||
312 | |||
313 | return IRQ_HANDLED; | ||
314 | } | ||
315 | |||
316 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | ||
317 | void xhci_event_ring_work(unsigned long arg) | ||
318 | { | ||
319 | unsigned long flags; | ||
320 | int temp; | ||
321 | u64 temp_64; | ||
322 | struct xhci_hcd *xhci = (struct xhci_hcd *) arg; | ||
323 | int i, j; | ||
324 | |||
325 | xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies); | ||
326 | |||
327 | spin_lock_irqsave(&xhci->lock, flags); | ||
328 | temp = xhci_readl(xhci, &xhci->op_regs->status); | ||
329 | xhci_dbg(xhci, "op reg status = 0x%x\n", temp); | ||
330 | if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { | ||
331 | xhci_dbg(xhci, "HW died, polling stopped.\n"); | ||
332 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
333 | return; | ||
334 | } | ||
335 | |||
336 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | ||
337 | xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); | ||
338 | xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled); | ||
339 | xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask); | ||
340 | xhci->error_bitmask = 0; | ||
341 | xhci_dbg(xhci, "Event ring:\n"); | ||
342 | xhci_debug_segment(xhci, xhci->event_ring->deq_seg); | ||
343 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); | ||
344 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); | ||
345 | temp_64 &= ~ERST_PTR_MASK; | ||
346 | xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); | ||
347 | xhci_dbg(xhci, "Command ring:\n"); | ||
348 | xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); | ||
349 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | ||
350 | xhci_dbg_cmd_ptrs(xhci); | ||
351 | for (i = 0; i < MAX_HC_SLOTS; ++i) { | ||
352 | if (!xhci->devs[i]) | ||
353 | continue; | ||
354 | for (j = 0; j < 31; ++j) { | ||
355 | struct xhci_ring *ring = xhci->devs[i]->eps[j].ring; | ||
356 | if (!ring) | ||
357 | continue; | ||
358 | xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j); | ||
359 | xhci_debug_segment(xhci, ring->deq_seg); | ||
360 | } | ||
361 | } | ||
362 | |||
363 | if (xhci->noops_submitted != NUM_TEST_NOOPS) | ||
364 | if (xhci_setup_one_noop(xhci)) | ||
365 | xhci_ring_cmd_db(xhci); | ||
366 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
367 | |||
368 | if (!xhci->zombie) | ||
369 | mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ); | ||
370 | else | ||
371 | xhci_dbg(xhci, "Quit polling the event ring.\n"); | ||
372 | } | ||
373 | #endif | ||
374 | |||
375 | /* | ||
376 | * Start the HC after it was halted. | ||
377 | * | ||
378 | * This function is called by the USB core when the HC driver is added. | ||
379 | * Its opposite is xhci_stop(). | ||
380 | * | ||
381 | * xhci_init() must be called once before this function can be called. | ||
382 | * Reset the HC, enable device slot contexts, program DCBAAP, and | ||
383 | * set command ring pointer and event ring pointer. | ||
384 | * | ||
385 | * Setup MSI-X vectors and enable interrupts. | ||
386 | */ | ||
387 | int xhci_run(struct usb_hcd *hcd) | ||
388 | { | ||
389 | u32 temp; | ||
390 | u64 temp_64; | ||
391 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
392 | void (*doorbell)(struct xhci_hcd *) = NULL; | ||
393 | |||
394 | hcd->uses_new_polling = 1; | ||
395 | hcd->poll_rh = 0; | ||
396 | |||
397 | xhci_dbg(xhci, "xhci_run\n"); | ||
398 | #if 0 /* FIXME: MSI not setup yet */ | ||
399 | /* Do this at the very last minute */ | ||
400 | ret = xhci_setup_msix(xhci); | ||
401 | if (!ret) | ||
402 | return ret; | ||
403 | |||
404 | return -ENOSYS; | ||
405 | #endif | ||
406 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | ||
407 | init_timer(&xhci->event_ring_timer); | ||
408 | xhci->event_ring_timer.data = (unsigned long) xhci; | ||
409 | xhci->event_ring_timer.function = xhci_event_ring_work; | ||
410 | /* Poll the event ring */ | ||
411 | xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ; | ||
412 | xhci->zombie = 0; | ||
413 | xhci_dbg(xhci, "Setting event ring polling timer\n"); | ||
414 | add_timer(&xhci->event_ring_timer); | ||
415 | #endif | ||
416 | |||
417 | xhci_dbg(xhci, "Command ring memory map follows:\n"); | ||
418 | xhci_debug_ring(xhci, xhci->cmd_ring); | ||
419 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | ||
420 | xhci_dbg_cmd_ptrs(xhci); | ||
421 | |||
422 | xhci_dbg(xhci, "ERST memory map follows:\n"); | ||
423 | xhci_dbg_erst(xhci, &xhci->erst); | ||
424 | xhci_dbg(xhci, "Event ring:\n"); | ||
425 | xhci_debug_ring(xhci, xhci->event_ring); | ||
426 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); | ||
427 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); | ||
428 | temp_64 &= ~ERST_PTR_MASK; | ||
429 | xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); | ||
430 | |||
431 | xhci_dbg(xhci, "// Set the interrupt modulation register\n"); | ||
432 | temp = xhci_readl(xhci, &xhci->ir_set->irq_control); | ||
433 | temp &= ~ER_IRQ_INTERVAL_MASK; | ||
434 | temp |= (u32) 160; | ||
435 | xhci_writel(xhci, temp, &xhci->ir_set->irq_control); | ||
436 | |||
437 | /* Set the HCD state before we enable the irqs */ | ||
438 | hcd->state = HC_STATE_RUNNING; | ||
439 | temp = xhci_readl(xhci, &xhci->op_regs->command); | ||
440 | temp |= (CMD_EIE); | ||
441 | xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n", | ||
442 | temp); | ||
443 | xhci_writel(xhci, temp, &xhci->op_regs->command); | ||
444 | |||
445 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | ||
446 | xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n", | ||
447 | xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); | ||
448 | xhci_writel(xhci, ER_IRQ_ENABLE(temp), | ||
449 | &xhci->ir_set->irq_pending); | ||
450 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | ||
451 | |||
452 | if (NUM_TEST_NOOPS > 0) | ||
453 | doorbell = xhci_setup_one_noop(xhci); | ||
454 | |||
455 | temp = xhci_readl(xhci, &xhci->op_regs->command); | ||
456 | temp |= (CMD_RUN); | ||
457 | xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", | ||
458 | temp); | ||
459 | xhci_writel(xhci, temp, &xhci->op_regs->command); | ||
460 | /* Flush PCI posted writes */ | ||
461 | temp = xhci_readl(xhci, &xhci->op_regs->command); | ||
462 | xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp); | ||
463 | if (doorbell) | ||
464 | (*doorbell)(xhci); | ||
465 | |||
466 | xhci_dbg(xhci, "Finished xhci_run\n"); | ||
467 | return 0; | ||
468 | } | ||
469 | |||
470 | /* | ||
471 | * Stop xHCI driver. | ||
472 | * | ||
473 | * This function is called by the USB core when the HC driver is removed. | ||
474 | * Its opposite is xhci_run(). | ||
475 | * | ||
476 | * Disable device contexts, disable IRQs, and quiesce the HC. | ||
477 | * Reset the HC, finish any completed transactions, and cleanup memory. | ||
478 | */ | ||
479 | void xhci_stop(struct usb_hcd *hcd) | ||
480 | { | ||
481 | u32 temp; | ||
482 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
483 | |||
484 | spin_lock_irq(&xhci->lock); | ||
485 | xhci_halt(xhci); | ||
486 | xhci_reset(xhci); | ||
487 | spin_unlock_irq(&xhci->lock); | ||
488 | |||
489 | #if 0 /* No MSI yet */ | ||
490 | xhci_cleanup_msix(xhci); | ||
491 | #endif | ||
492 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | ||
493 | /* Tell the event ring poll function not to reschedule */ | ||
494 | xhci->zombie = 1; | ||
495 | del_timer_sync(&xhci->event_ring_timer); | ||
496 | #endif | ||
497 | |||
498 | xhci_dbg(xhci, "// Disabling event ring interrupts\n"); | ||
499 | temp = xhci_readl(xhci, &xhci->op_regs->status); | ||
500 | xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); | ||
501 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | ||
502 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), | ||
503 | &xhci->ir_set->irq_pending); | ||
504 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | ||
505 | |||
506 | xhci_dbg(xhci, "cleaning up memory\n"); | ||
507 | xhci_mem_cleanup(xhci); | ||
508 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n", | ||
509 | xhci_readl(xhci, &xhci->op_regs->status)); | ||
510 | } | ||
511 | |||
512 | /* | ||
513 | * Shutdown HC (not bus-specific) | ||
514 | * | ||
515 | * This is called when the machine is rebooting or halting. We assume that the | ||
516 | * machine will be powered off, and the HC's internal state will be reset. | ||
517 | * Don't bother to free memory. | ||
518 | */ | ||
519 | void xhci_shutdown(struct usb_hcd *hcd) | ||
520 | { | ||
521 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
522 | |||
523 | spin_lock_irq(&xhci->lock); | ||
524 | xhci_halt(xhci); | ||
525 | spin_unlock_irq(&xhci->lock); | ||
526 | |||
527 | #if 0 | ||
528 | xhci_cleanup_msix(xhci); | ||
529 | #endif | ||
530 | |||
531 | xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", | ||
532 | xhci_readl(xhci, &xhci->op_regs->status)); | ||
533 | } | ||
534 | |||
535 | /*-------------------------------------------------------------------------*/ | ||
536 | |||
537 | /** | ||
538 | * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and | ||
539 | * HCDs. Find the index for an endpoint given its descriptor. Use the return | ||
540 | * value to right shift 1 for the bitmask. | ||
541 | * | ||
542 | * Index = (epnum * 2) + direction - 1, | ||
543 | * where direction = 0 for OUT, 1 for IN. | ||
544 | * For control endpoints, the IN index is used (OUT index is unused), so | ||
545 | * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) | ||
546 | */ | ||
547 | unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) | ||
548 | { | ||
549 | unsigned int index; | ||
550 | if (usb_endpoint_xfer_control(desc)) | ||
551 | index = (unsigned int) (usb_endpoint_num(desc)*2); | ||
552 | else | ||
553 | index = (unsigned int) (usb_endpoint_num(desc)*2) + | ||
554 | (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; | ||
555 | return index; | ||
556 | } | ||
557 | |||
558 | /* Find the flag for this endpoint (for use in the control context). Use the | ||
559 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is | ||
560 | * bit 1, etc. | ||
561 | */ | ||
562 | unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) | ||
563 | { | ||
564 | return 1 << (xhci_get_endpoint_index(desc) + 1); | ||
565 | } | ||
566 | |||
567 | /* Find the flag for this endpoint (for use in the control context). Use the | ||
568 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is | ||
569 | * bit 1, etc. | ||
570 | */ | ||
571 | unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) | ||
572 | { | ||
573 | return 1 << (ep_index + 1); | ||
574 | } | ||
575 | |||
576 | /* Compute the last valid endpoint context index. Basically, this is the | ||
577 | * endpoint index plus one. For slot contexts with more than valid endpoint, | ||
578 | * we find the most significant bit set in the added contexts flags. | ||
579 | * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 | ||
580 | * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. | ||
581 | */ | ||
582 | unsigned int xhci_last_valid_endpoint(u32 added_ctxs) | ||
583 | { | ||
584 | return fls(added_ctxs) - 1; | ||
585 | } | ||
586 | |||
587 | /* Returns 1 if the arguments are OK; | ||
588 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. | ||
589 | */ | ||
590 | int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, | ||
591 | struct usb_host_endpoint *ep, int check_ep, const char *func) { | ||
592 | if (!hcd || (check_ep && !ep) || !udev) { | ||
593 | printk(KERN_DEBUG "xHCI %s called with invalid args\n", | ||
594 | func); | ||
595 | return -EINVAL; | ||
596 | } | ||
597 | if (!udev->parent) { | ||
598 | printk(KERN_DEBUG "xHCI %s called for root hub\n", | ||
599 | func); | ||
600 | return 0; | ||
601 | } | ||
602 | if (!udev->slot_id) { | ||
603 | printk(KERN_DEBUG "xHCI %s called with unaddressed device\n", | ||
604 | func); | ||
605 | return -EINVAL; | ||
606 | } | ||
607 | return 1; | ||
608 | } | ||
609 | |||
610 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, | ||
611 | struct usb_device *udev, struct xhci_command *command, | ||
612 | bool ctx_change, bool must_succeed); | ||
613 | |||
614 | /* | ||
615 | * Full speed devices may have a max packet size greater than 8 bytes, but the | ||
616 | * USB core doesn't know that until it reads the first 8 bytes of the | ||
617 | * descriptor. If the usb_device's max packet size changes after that point, | ||
618 | * we need to issue an evaluate context command and wait on it. | ||
619 | */ | ||
620 | static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | ||
621 | unsigned int ep_index, struct urb *urb) | ||
622 | { | ||
623 | struct xhci_container_ctx *in_ctx; | ||
624 | struct xhci_container_ctx *out_ctx; | ||
625 | struct xhci_input_control_ctx *ctrl_ctx; | ||
626 | struct xhci_ep_ctx *ep_ctx; | ||
627 | int max_packet_size; | ||
628 | int hw_max_packet_size; | ||
629 | int ret = 0; | ||
630 | |||
631 | out_ctx = xhci->devs[slot_id]->out_ctx; | ||
632 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); | ||
633 | hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2); | ||
634 | max_packet_size = urb->dev->ep0.desc.wMaxPacketSize; | ||
635 | if (hw_max_packet_size != max_packet_size) { | ||
636 | xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); | ||
637 | xhci_dbg(xhci, "Max packet size in usb_device = %d\n", | ||
638 | max_packet_size); | ||
639 | xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n", | ||
640 | hw_max_packet_size); | ||
641 | xhci_dbg(xhci, "Issuing evaluate context command.\n"); | ||
642 | |||
643 | /* Set up the modified control endpoint 0 */ | ||
644 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, | ||
645 | xhci->devs[slot_id]->out_ctx, ep_index); | ||
646 | in_ctx = xhci->devs[slot_id]->in_ctx; | ||
647 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); | ||
648 | ep_ctx->ep_info2 &= ~MAX_PACKET_MASK; | ||
649 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size); | ||
650 | |||
651 | /* Set up the input context flags for the command */ | ||
652 | /* FIXME: This won't work if a non-default control endpoint | ||
653 | * changes max packet sizes. | ||
654 | */ | ||
655 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
656 | ctrl_ctx->add_flags = EP0_FLAG; | ||
657 | ctrl_ctx->drop_flags = 0; | ||
658 | |||
659 | xhci_dbg(xhci, "Slot %d input context\n", slot_id); | ||
660 | xhci_dbg_ctx(xhci, in_ctx, ep_index); | ||
661 | xhci_dbg(xhci, "Slot %d output context\n", slot_id); | ||
662 | xhci_dbg_ctx(xhci, out_ctx, ep_index); | ||
663 | |||
664 | ret = xhci_configure_endpoint(xhci, urb->dev, NULL, | ||
665 | true, false); | ||
666 | |||
667 | /* Clean up the input context for later use by bandwidth | ||
668 | * functions. | ||
669 | */ | ||
670 | ctrl_ctx->add_flags = SLOT_FLAG; | ||
671 | } | ||
672 | return ret; | ||
673 | } | ||
674 | |||
675 | /* | ||
676 | * non-error returns are a promise to giveback() the urb later | ||
677 | * we drop ownership so next owner (or urb unlink) can get it | ||
678 | */ | ||
679 | int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | ||
680 | { | ||
681 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
682 | unsigned long flags; | ||
683 | int ret = 0; | ||
684 | unsigned int slot_id, ep_index; | ||
685 | |||
686 | |||
687 | if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) | ||
688 | return -EINVAL; | ||
689 | |||
690 | slot_id = urb->dev->slot_id; | ||
691 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | ||
692 | |||
693 | if (!xhci->devs || !xhci->devs[slot_id]) { | ||
694 | if (!in_interrupt()) | ||
695 | dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n"); | ||
696 | ret = -EINVAL; | ||
697 | goto exit; | ||
698 | } | ||
699 | if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) { | ||
700 | if (!in_interrupt()) | ||
701 | xhci_dbg(xhci, "urb submitted during PCI suspend\n"); | ||
702 | ret = -ESHUTDOWN; | ||
703 | goto exit; | ||
704 | } | ||
705 | if (usb_endpoint_xfer_control(&urb->ep->desc)) { | ||
706 | /* Check to see if the max packet size for the default control | ||
707 | * endpoint changed during FS device enumeration | ||
708 | */ | ||
709 | if (urb->dev->speed == USB_SPEED_FULL) { | ||
710 | ret = xhci_check_maxpacket(xhci, slot_id, | ||
711 | ep_index, urb); | ||
712 | if (ret < 0) | ||
713 | return ret; | ||
714 | } | ||
715 | |||
716 | /* We have a spinlock and interrupts disabled, so we must pass | ||
717 | * atomic context to this function, which may allocate memory. | ||
718 | */ | ||
719 | spin_lock_irqsave(&xhci->lock, flags); | ||
720 | if (xhci->xhc_state & XHCI_STATE_DYING) | ||
721 | goto dying; | ||
722 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, | ||
723 | slot_id, ep_index); | ||
724 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
725 | } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { | ||
726 | spin_lock_irqsave(&xhci->lock, flags); | ||
727 | if (xhci->xhc_state & XHCI_STATE_DYING) | ||
728 | goto dying; | ||
729 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, | ||
730 | slot_id, ep_index); | ||
731 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
732 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { | ||
733 | spin_lock_irqsave(&xhci->lock, flags); | ||
734 | if (xhci->xhc_state & XHCI_STATE_DYING) | ||
735 | goto dying; | ||
736 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, | ||
737 | slot_id, ep_index); | ||
738 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
739 | } else { | ||
740 | ret = -EINVAL; | ||
741 | } | ||
742 | exit: | ||
743 | return ret; | ||
744 | dying: | ||
745 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " | ||
746 | "non-responsive xHCI host.\n", | ||
747 | urb->ep->desc.bEndpointAddress, urb); | ||
748 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
749 | return -ESHUTDOWN; | ||
750 | } | ||
751 | |||
752 | /* | ||
753 | * Remove the URB's TD from the endpoint ring. This may cause the HC to stop | ||
754 | * USB transfers, potentially stopping in the middle of a TRB buffer. The HC | ||
755 | * should pick up where it left off in the TD, unless a Set Transfer Ring | ||
756 | * Dequeue Pointer is issued. | ||
757 | * | ||
758 | * The TRBs that make up the buffers for the canceled URB will be "removed" from | ||
759 | * the ring. Since the ring is a contiguous structure, they can't be physically | ||
760 | * removed. Instead, there are two options: | ||
761 | * | ||
762 | * 1) If the HC is in the middle of processing the URB to be canceled, we | ||
763 | * simply move the ring's dequeue pointer past those TRBs using the Set | ||
764 | * Transfer Ring Dequeue Pointer command. This will be the common case, | ||
765 | * when drivers timeout on the last submitted URB and attempt to cancel. | ||
766 | * | ||
767 | * 2) If the HC is in the middle of a different TD, we turn the TRBs into a | ||
768 | * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The | ||
769 | * HC will need to invalidate the any TRBs it has cached after the stop | ||
770 | * endpoint command, as noted in the xHCI 0.95 errata. | ||
771 | * | ||
772 | * 3) The TD may have completed by the time the Stop Endpoint Command | ||
773 | * completes, so software needs to handle that case too. | ||
774 | * | ||
775 | * This function should protect against the TD enqueueing code ringing the | ||
776 | * doorbell while this code is waiting for a Stop Endpoint command to complete. | ||
777 | * It also needs to account for multiple cancellations on happening at the same | ||
778 | * time for the same endpoint. | ||
779 | * | ||
780 | * Note that this function can be called in any context, or so says | ||
781 | * usb_hcd_unlink_urb() | ||
782 | */ | ||
783 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | ||
784 | { | ||
785 | unsigned long flags; | ||
786 | int ret; | ||
787 | u32 temp; | ||
788 | struct xhci_hcd *xhci; | ||
789 | struct xhci_td *td; | ||
790 | unsigned int ep_index; | ||
791 | struct xhci_ring *ep_ring; | ||
792 | struct xhci_virt_ep *ep; | ||
793 | |||
794 | xhci = hcd_to_xhci(hcd); | ||
795 | spin_lock_irqsave(&xhci->lock, flags); | ||
796 | /* Make sure the URB hasn't completed or been unlinked already */ | ||
797 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); | ||
798 | if (ret || !urb->hcpriv) | ||
799 | goto done; | ||
800 | temp = xhci_readl(xhci, &xhci->op_regs->status); | ||
801 | if (temp == 0xffffffff) { | ||
802 | xhci_dbg(xhci, "HW died, freeing TD.\n"); | ||
803 | td = (struct xhci_td *) urb->hcpriv; | ||
804 | |||
805 | usb_hcd_unlink_urb_from_ep(hcd, urb); | ||
806 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
807 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN); | ||
808 | kfree(td); | ||
809 | return ret; | ||
810 | } | ||
811 | if (xhci->xhc_state & XHCI_STATE_DYING) { | ||
812 | xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " | ||
813 | "non-responsive xHCI host.\n", | ||
814 | urb->ep->desc.bEndpointAddress, urb); | ||
815 | /* Let the stop endpoint command watchdog timer (which set this | ||
816 | * state) finish cleaning up the endpoint TD lists. We must | ||
817 | * have caught it in the middle of dropping a lock and giving | ||
818 | * back an URB. | ||
819 | */ | ||
820 | goto done; | ||
821 | } | ||
822 | |||
823 | xhci_dbg(xhci, "Cancel URB %p\n", urb); | ||
824 | xhci_dbg(xhci, "Event ring:\n"); | ||
825 | xhci_debug_ring(xhci, xhci->event_ring); | ||
826 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | ||
827 | ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; | ||
828 | ep_ring = ep->ring; | ||
829 | xhci_dbg(xhci, "Endpoint ring:\n"); | ||
830 | xhci_debug_ring(xhci, ep_ring); | ||
831 | td = (struct xhci_td *) urb->hcpriv; | ||
832 | |||
833 | list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); | ||
834 | /* Queue a stop endpoint command, but only if this is | ||
835 | * the first cancellation to be handled. | ||
836 | */ | ||
837 | if (!(ep->ep_state & EP_HALT_PENDING)) { | ||
838 | ep->ep_state |= EP_HALT_PENDING; | ||
839 | ep->stop_cmds_pending++; | ||
840 | ep->stop_cmd_timer.expires = jiffies + | ||
841 | XHCI_STOP_EP_CMD_TIMEOUT * HZ; | ||
842 | add_timer(&ep->stop_cmd_timer); | ||
843 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); | ||
844 | xhci_ring_cmd_db(xhci); | ||
845 | } | ||
846 | done: | ||
847 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
848 | return ret; | ||
849 | } | ||
850 | |||
851 | /* Drop an endpoint from a new bandwidth configuration for this device. | ||
852 | * Only one call to this function is allowed per endpoint before | ||
853 | * check_bandwidth() or reset_bandwidth() must be called. | ||
854 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will | ||
855 | * add the endpoint to the schedule with possibly new parameters denoted by a | ||
856 | * different endpoint descriptor in usb_host_endpoint. | ||
857 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is | ||
858 | * not allowed. | ||
859 | * | ||
860 | * The USB core will not allow URBs to be queued to an endpoint that is being | ||
861 | * disabled, so there's no need for mutual exclusion to protect | ||
862 | * the xhci->devs[slot_id] structure. | ||
863 | */ | ||
864 | int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | ||
865 | struct usb_host_endpoint *ep) | ||
866 | { | ||
867 | struct xhci_hcd *xhci; | ||
868 | struct xhci_container_ctx *in_ctx, *out_ctx; | ||
869 | struct xhci_input_control_ctx *ctrl_ctx; | ||
870 | struct xhci_slot_ctx *slot_ctx; | ||
871 | unsigned int last_ctx; | ||
872 | unsigned int ep_index; | ||
873 | struct xhci_ep_ctx *ep_ctx; | ||
874 | u32 drop_flag; | ||
875 | u32 new_add_flags, new_drop_flags, new_slot_info; | ||
876 | int ret; | ||
877 | |||
878 | ret = xhci_check_args(hcd, udev, ep, 1, __func__); | ||
879 | if (ret <= 0) | ||
880 | return ret; | ||
881 | xhci = hcd_to_xhci(hcd); | ||
882 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); | ||
883 | |||
884 | drop_flag = xhci_get_endpoint_flag(&ep->desc); | ||
885 | if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { | ||
886 | xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", | ||
887 | __func__, drop_flag); | ||
888 | return 0; | ||
889 | } | ||
890 | |||
891 | if (!xhci->devs || !xhci->devs[udev->slot_id]) { | ||
892 | xhci_warn(xhci, "xHCI %s called with unaddressed device\n", | ||
893 | __func__); | ||
894 | return -EINVAL; | ||
895 | } | ||
896 | |||
897 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; | ||
898 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; | ||
899 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
900 | ep_index = xhci_get_endpoint_index(&ep->desc); | ||
901 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); | ||
902 | /* If the HC already knows the endpoint is disabled, | ||
903 | * or the HCD has noted it is disabled, ignore this request | ||
904 | */ | ||
905 | if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || | ||
906 | ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { | ||
907 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", | ||
908 | __func__, ep); | ||
909 | return 0; | ||
910 | } | ||
911 | |||
912 | ctrl_ctx->drop_flags |= drop_flag; | ||
913 | new_drop_flags = ctrl_ctx->drop_flags; | ||
914 | |||
915 | ctrl_ctx->add_flags &= ~drop_flag; | ||
916 | new_add_flags = ctrl_ctx->add_flags; | ||
917 | |||
918 | last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags); | ||
919 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); | ||
920 | /* Update the last valid endpoint context, if we deleted the last one */ | ||
921 | if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { | ||
922 | slot_ctx->dev_info &= ~LAST_CTX_MASK; | ||
923 | slot_ctx->dev_info |= LAST_CTX(last_ctx); | ||
924 | } | ||
925 | new_slot_info = slot_ctx->dev_info; | ||
926 | |||
927 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); | ||
928 | |||
929 | xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", | ||
930 | (unsigned int) ep->desc.bEndpointAddress, | ||
931 | udev->slot_id, | ||
932 | (unsigned int) new_drop_flags, | ||
933 | (unsigned int) new_add_flags, | ||
934 | (unsigned int) new_slot_info); | ||
935 | return 0; | ||
936 | } | ||
937 | |||
938 | /* Add an endpoint to a new possible bandwidth configuration for this device. | ||
939 | * Only one call to this function is allowed per endpoint before | ||
940 | * check_bandwidth() or reset_bandwidth() must be called. | ||
941 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will | ||
942 | * add the endpoint to the schedule with possibly new parameters denoted by a | ||
943 | * different endpoint descriptor in usb_host_endpoint. | ||
944 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is | ||
945 | * not allowed. | ||
946 | * | ||
947 | * The USB core will not allow URBs to be queued to an endpoint until the | ||
948 | * configuration or alt setting is installed in the device, so there's no need | ||
949 | * for mutual exclusion to protect the xhci->devs[slot_id] structure. | ||
950 | */ | ||
951 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | ||
952 | struct usb_host_endpoint *ep) | ||
953 | { | ||
954 | struct xhci_hcd *xhci; | ||
955 | struct xhci_container_ctx *in_ctx, *out_ctx; | ||
956 | unsigned int ep_index; | ||
957 | struct xhci_ep_ctx *ep_ctx; | ||
958 | struct xhci_slot_ctx *slot_ctx; | ||
959 | struct xhci_input_control_ctx *ctrl_ctx; | ||
960 | u32 added_ctxs; | ||
961 | unsigned int last_ctx; | ||
962 | u32 new_add_flags, new_drop_flags, new_slot_info; | ||
963 | int ret = 0; | ||
964 | |||
965 | ret = xhci_check_args(hcd, udev, ep, 1, __func__); | ||
966 | if (ret <= 0) { | ||
967 | /* So we won't queue a reset ep command for a root hub */ | ||
968 | ep->hcpriv = NULL; | ||
969 | return ret; | ||
970 | } | ||
971 | xhci = hcd_to_xhci(hcd); | ||
972 | |||
973 | added_ctxs = xhci_get_endpoint_flag(&ep->desc); | ||
974 | last_ctx = xhci_last_valid_endpoint(added_ctxs); | ||
975 | if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { | ||
976 | /* FIXME when we have to issue an evaluate endpoint command to | ||
977 | * deal with ep0 max packet size changing once we get the | ||
978 | * descriptors | ||
979 | */ | ||
980 | xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", | ||
981 | __func__, added_ctxs); | ||
982 | return 0; | ||
983 | } | ||
984 | |||
985 | if (!xhci->devs || !xhci->devs[udev->slot_id]) { | ||
986 | xhci_warn(xhci, "xHCI %s called with unaddressed device\n", | ||
987 | __func__); | ||
988 | return -EINVAL; | ||
989 | } | ||
990 | |||
991 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; | ||
992 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; | ||
993 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
994 | ep_index = xhci_get_endpoint_index(&ep->desc); | ||
995 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); | ||
996 | /* If the HCD has already noted the endpoint is enabled, | ||
997 | * ignore this request. | ||
998 | */ | ||
999 | if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { | ||
1000 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", | ||
1001 | __func__, ep); | ||
1002 | return 0; | ||
1003 | } | ||
1004 | |||
1005 | /* | ||
1006 | * Configuration and alternate setting changes must be done in | ||
1007 | * process context, not interrupt context (or so documenation | ||
1008 | * for usb_set_interface() and usb_set_configuration() claim). | ||
1009 | */ | ||
1010 | if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], | ||
1011 | udev, ep, GFP_NOIO) < 0) { | ||
1012 | dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", | ||
1013 | __func__, ep->desc.bEndpointAddress); | ||
1014 | return -ENOMEM; | ||
1015 | } | ||
1016 | |||
1017 | ctrl_ctx->add_flags |= added_ctxs; | ||
1018 | new_add_flags = ctrl_ctx->add_flags; | ||
1019 | |||
1020 | /* If xhci_endpoint_disable() was called for this endpoint, but the | ||
1021 | * xHC hasn't been notified yet through the check_bandwidth() call, | ||
1022 | * this re-adds a new state for the endpoint from the new endpoint | ||
1023 | * descriptors. We must drop and re-add this endpoint, so we leave the | ||
1024 | * drop flags alone. | ||
1025 | */ | ||
1026 | new_drop_flags = ctrl_ctx->drop_flags; | ||
1027 | |||
1028 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); | ||
1029 | /* Update the last valid endpoint context, if we just added one past */ | ||
1030 | if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { | ||
1031 | slot_ctx->dev_info &= ~LAST_CTX_MASK; | ||
1032 | slot_ctx->dev_info |= LAST_CTX(last_ctx); | ||
1033 | } | ||
1034 | new_slot_info = slot_ctx->dev_info; | ||
1035 | |||
1036 | /* Store the usb_device pointer for later use */ | ||
1037 | ep->hcpriv = udev; | ||
1038 | |||
1039 | xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", | ||
1040 | (unsigned int) ep->desc.bEndpointAddress, | ||
1041 | udev->slot_id, | ||
1042 | (unsigned int) new_drop_flags, | ||
1043 | (unsigned int) new_add_flags, | ||
1044 | (unsigned int) new_slot_info); | ||
1045 | return 0; | ||
1046 | } | ||
1047 | |||
1048 | static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) | ||
1049 | { | ||
1050 | struct xhci_input_control_ctx *ctrl_ctx; | ||
1051 | struct xhci_ep_ctx *ep_ctx; | ||
1052 | struct xhci_slot_ctx *slot_ctx; | ||
1053 | int i; | ||
1054 | |||
1055 | /* When a device's add flag and drop flag are zero, any subsequent | ||
1056 | * configure endpoint command will leave that endpoint's state | ||
1057 | * untouched. Make sure we don't leave any old state in the input | ||
1058 | * endpoint contexts. | ||
1059 | */ | ||
1060 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); | ||
1061 | ctrl_ctx->drop_flags = 0; | ||
1062 | ctrl_ctx->add_flags = 0; | ||
1063 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | ||
1064 | slot_ctx->dev_info &= ~LAST_CTX_MASK; | ||
1065 | /* Endpoint 0 is always valid */ | ||
1066 | slot_ctx->dev_info |= LAST_CTX(1); | ||
1067 | for (i = 1; i < 31; ++i) { | ||
1068 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); | ||
1069 | ep_ctx->ep_info = 0; | ||
1070 | ep_ctx->ep_info2 = 0; | ||
1071 | ep_ctx->deq = 0; | ||
1072 | ep_ctx->tx_info = 0; | ||
1073 | } | ||
1074 | } | ||
1075 | |||
1076 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, | ||
1077 | struct usb_device *udev, int *cmd_status) | ||
1078 | { | ||
1079 | int ret; | ||
1080 | |||
1081 | switch (*cmd_status) { | ||
1082 | case COMP_ENOMEM: | ||
1083 | dev_warn(&udev->dev, "Not enough host controller resources " | ||
1084 | "for new device state.\n"); | ||
1085 | ret = -ENOMEM; | ||
1086 | /* FIXME: can we allocate more resources for the HC? */ | ||
1087 | break; | ||
1088 | case COMP_BW_ERR: | ||
1089 | dev_warn(&udev->dev, "Not enough bandwidth " | ||
1090 | "for new device state.\n"); | ||
1091 | ret = -ENOSPC; | ||
1092 | /* FIXME: can we go back to the old state? */ | ||
1093 | break; | ||
1094 | case COMP_TRB_ERR: | ||
1095 | /* the HCD set up something wrong */ | ||
1096 | dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " | ||
1097 | "add flag = 1, " | ||
1098 | "and endpoint is not disabled.\n"); | ||
1099 | ret = -EINVAL; | ||
1100 | break; | ||
1101 | case COMP_SUCCESS: | ||
1102 | dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); | ||
1103 | ret = 0; | ||
1104 | break; | ||
1105 | default: | ||
1106 | xhci_err(xhci, "ERROR: unexpected command completion " | ||
1107 | "code 0x%x.\n", *cmd_status); | ||
1108 | ret = -EINVAL; | ||
1109 | break; | ||
1110 | } | ||
1111 | return ret; | ||
1112 | } | ||
1113 | |||
1114 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, | ||
1115 | struct usb_device *udev, int *cmd_status) | ||
1116 | { | ||
1117 | int ret; | ||
1118 | struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; | ||
1119 | |||
1120 | switch (*cmd_status) { | ||
1121 | case COMP_EINVAL: | ||
1122 | dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " | ||
1123 | "context command.\n"); | ||
1124 | ret = -EINVAL; | ||
1125 | break; | ||
1126 | case COMP_EBADSLT: | ||
1127 | dev_warn(&udev->dev, "WARN: slot not enabled for" | ||
1128 | "evaluate context command.\n"); | ||
1129 | case COMP_CTX_STATE: | ||
1130 | dev_warn(&udev->dev, "WARN: invalid context state for " | ||
1131 | "evaluate context command.\n"); | ||
1132 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); | ||
1133 | ret = -EINVAL; | ||
1134 | break; | ||
1135 | case COMP_SUCCESS: | ||
1136 | dev_dbg(&udev->dev, "Successful evaluate context command\n"); | ||
1137 | ret = 0; | ||
1138 | break; | ||
1139 | default: | ||
1140 | xhci_err(xhci, "ERROR: unexpected command completion " | ||
1141 | "code 0x%x.\n", *cmd_status); | ||
1142 | ret = -EINVAL; | ||
1143 | break; | ||
1144 | } | ||
1145 | return ret; | ||
1146 | } | ||
1147 | |||
1148 | /* Issue a configure endpoint command or evaluate context command | ||
1149 | * and wait for it to finish. | ||
1150 | */ | ||
1151 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, | ||
1152 | struct usb_device *udev, | ||
1153 | struct xhci_command *command, | ||
1154 | bool ctx_change, bool must_succeed) | ||
1155 | { | ||
1156 | int ret; | ||
1157 | int timeleft; | ||
1158 | unsigned long flags; | ||
1159 | struct xhci_container_ctx *in_ctx; | ||
1160 | struct completion *cmd_completion; | ||
1161 | int *cmd_status; | ||
1162 | struct xhci_virt_device *virt_dev; | ||
1163 | |||
1164 | spin_lock_irqsave(&xhci->lock, flags); | ||
1165 | virt_dev = xhci->devs[udev->slot_id]; | ||
1166 | if (command) { | ||
1167 | in_ctx = command->in_ctx; | ||
1168 | cmd_completion = command->completion; | ||
1169 | cmd_status = &command->status; | ||
1170 | command->command_trb = xhci->cmd_ring->enqueue; | ||
1171 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); | ||
1172 | } else { | ||
1173 | in_ctx = virt_dev->in_ctx; | ||
1174 | cmd_completion = &virt_dev->cmd_completion; | ||
1175 | cmd_status = &virt_dev->cmd_status; | ||
1176 | } | ||
1177 | init_completion(cmd_completion); | ||
1178 | |||
1179 | if (!ctx_change) | ||
1180 | ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, | ||
1181 | udev->slot_id, must_succeed); | ||
1182 | else | ||
1183 | ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, | ||
1184 | udev->slot_id); | ||
1185 | if (ret < 0) { | ||
1186 | if (command) | ||
1187 | list_del(&command->cmd_list); | ||
1188 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1189 | xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); | ||
1190 | return -ENOMEM; | ||
1191 | } | ||
1192 | xhci_ring_cmd_db(xhci); | ||
1193 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1194 | |||
1195 | /* Wait for the configure endpoint command to complete */ | ||
1196 | timeleft = wait_for_completion_interruptible_timeout( | ||
1197 | cmd_completion, | ||
1198 | USB_CTRL_SET_TIMEOUT); | ||
1199 | if (timeleft <= 0) { | ||
1200 | xhci_warn(xhci, "%s while waiting for %s command\n", | ||
1201 | timeleft == 0 ? "Timeout" : "Signal", | ||
1202 | ctx_change == 0 ? | ||
1203 | "configure endpoint" : | ||
1204 | "evaluate context"); | ||
1205 | /* FIXME cancel the configure endpoint command */ | ||
1206 | return -ETIME; | ||
1207 | } | ||
1208 | |||
1209 | if (!ctx_change) | ||
1210 | return xhci_configure_endpoint_result(xhci, udev, cmd_status); | ||
1211 | return xhci_evaluate_context_result(xhci, udev, cmd_status); | ||
1212 | } | ||
1213 | |||
1214 | /* Called after one or more calls to xhci_add_endpoint() or | ||
1215 | * xhci_drop_endpoint(). If this call fails, the USB core is expected | ||
1216 | * to call xhci_reset_bandwidth(). | ||
1217 | * | ||
1218 | * Since we are in the middle of changing either configuration or | ||
1219 | * installing a new alt setting, the USB core won't allow URBs to be | ||
1220 | * enqueued for any endpoint on the old config or interface. Nothing | ||
1221 | * else should be touching the xhci->devs[slot_id] structure, so we | ||
1222 | * don't need to take the xhci->lock for manipulating that. | ||
1223 | */ | ||
1224 | int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | ||
1225 | { | ||
1226 | int i; | ||
1227 | int ret = 0; | ||
1228 | struct xhci_hcd *xhci; | ||
1229 | struct xhci_virt_device *virt_dev; | ||
1230 | struct xhci_input_control_ctx *ctrl_ctx; | ||
1231 | struct xhci_slot_ctx *slot_ctx; | ||
1232 | |||
1233 | ret = xhci_check_args(hcd, udev, NULL, 0, __func__); | ||
1234 | if (ret <= 0) | ||
1235 | return ret; | ||
1236 | xhci = hcd_to_xhci(hcd); | ||
1237 | |||
1238 | if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) { | ||
1239 | xhci_warn(xhci, "xHCI %s called with unaddressed device\n", | ||
1240 | __func__); | ||
1241 | return -EINVAL; | ||
1242 | } | ||
1243 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); | ||
1244 | virt_dev = xhci->devs[udev->slot_id]; | ||
1245 | |||
1246 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ | ||
1247 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); | ||
1248 | ctrl_ctx->add_flags |= SLOT_FLAG; | ||
1249 | ctrl_ctx->add_flags &= ~EP0_FLAG; | ||
1250 | ctrl_ctx->drop_flags &= ~SLOT_FLAG; | ||
1251 | ctrl_ctx->drop_flags &= ~EP0_FLAG; | ||
1252 | xhci_dbg(xhci, "New Input Control Context:\n"); | ||
1253 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | ||
1254 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, | ||
1255 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); | ||
1256 | |||
1257 | ret = xhci_configure_endpoint(xhci, udev, NULL, | ||
1258 | false, false); | ||
1259 | if (ret) { | ||
1260 | /* Callee should call reset_bandwidth() */ | ||
1261 | return ret; | ||
1262 | } | ||
1263 | |||
1264 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); | ||
1265 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, | ||
1266 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); | ||
1267 | |||
1268 | xhci_zero_in_ctx(xhci, virt_dev); | ||
1269 | /* Install new rings and free or cache any old rings */ | ||
1270 | for (i = 1; i < 31; ++i) { | ||
1271 | if (!virt_dev->eps[i].new_ring) | ||
1272 | continue; | ||
1273 | /* Only cache or free the old ring if it exists. | ||
1274 | * It may not if this is the first add of an endpoint. | ||
1275 | */ | ||
1276 | if (virt_dev->eps[i].ring) { | ||
1277 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); | ||
1278 | } | ||
1279 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; | ||
1280 | virt_dev->eps[i].new_ring = NULL; | ||
1281 | } | ||
1282 | |||
1283 | return ret; | ||
1284 | } | ||
1285 | |||
1286 | void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | ||
1287 | { | ||
1288 | struct xhci_hcd *xhci; | ||
1289 | struct xhci_virt_device *virt_dev; | ||
1290 | int i, ret; | ||
1291 | |||
1292 | ret = xhci_check_args(hcd, udev, NULL, 0, __func__); | ||
1293 | if (ret <= 0) | ||
1294 | return; | ||
1295 | xhci = hcd_to_xhci(hcd); | ||
1296 | |||
1297 | if (!xhci->devs || !xhci->devs[udev->slot_id]) { | ||
1298 | xhci_warn(xhci, "xHCI %s called with unaddressed device\n", | ||
1299 | __func__); | ||
1300 | return; | ||
1301 | } | ||
1302 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); | ||
1303 | virt_dev = xhci->devs[udev->slot_id]; | ||
1304 | /* Free any rings allocated for added endpoints */ | ||
1305 | for (i = 0; i < 31; ++i) { | ||
1306 | if (virt_dev->eps[i].new_ring) { | ||
1307 | xhci_ring_free(xhci, virt_dev->eps[i].new_ring); | ||
1308 | virt_dev->eps[i].new_ring = NULL; | ||
1309 | } | ||
1310 | } | ||
1311 | xhci_zero_in_ctx(xhci, virt_dev); | ||
1312 | } | ||
1313 | |||
1314 | static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, | ||
1315 | struct xhci_container_ctx *in_ctx, | ||
1316 | struct xhci_container_ctx *out_ctx, | ||
1317 | u32 add_flags, u32 drop_flags) | ||
1318 | { | ||
1319 | struct xhci_input_control_ctx *ctrl_ctx; | ||
1320 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
1321 | ctrl_ctx->add_flags = add_flags; | ||
1322 | ctrl_ctx->drop_flags = drop_flags; | ||
1323 | xhci_slot_copy(xhci, in_ctx, out_ctx); | ||
1324 | ctrl_ctx->add_flags |= SLOT_FLAG; | ||
1325 | |||
1326 | xhci_dbg(xhci, "Input Context:\n"); | ||
1327 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); | ||
1328 | } | ||
1329 | |||
1330 | void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, | ||
1331 | unsigned int slot_id, unsigned int ep_index, | ||
1332 | struct xhci_dequeue_state *deq_state) | ||
1333 | { | ||
1334 | struct xhci_container_ctx *in_ctx; | ||
1335 | struct xhci_ep_ctx *ep_ctx; | ||
1336 | u32 added_ctxs; | ||
1337 | dma_addr_t addr; | ||
1338 | |||
1339 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, | ||
1340 | xhci->devs[slot_id]->out_ctx, ep_index); | ||
1341 | in_ctx = xhci->devs[slot_id]->in_ctx; | ||
1342 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); | ||
1343 | addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, | ||
1344 | deq_state->new_deq_ptr); | ||
1345 | if (addr == 0) { | ||
1346 | xhci_warn(xhci, "WARN Cannot submit config ep after " | ||
1347 | "reset ep command\n"); | ||
1348 | xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", | ||
1349 | deq_state->new_deq_seg, | ||
1350 | deq_state->new_deq_ptr); | ||
1351 | return; | ||
1352 | } | ||
1353 | ep_ctx->deq = addr | deq_state->new_cycle_state; | ||
1354 | |||
1355 | added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); | ||
1356 | xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, | ||
1357 | xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs); | ||
1358 | } | ||
1359 | |||
1360 | void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, | ||
1361 | struct usb_device *udev, unsigned int ep_index) | ||
1362 | { | ||
1363 | struct xhci_dequeue_state deq_state; | ||
1364 | struct xhci_virt_ep *ep; | ||
1365 | |||
1366 | xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); | ||
1367 | ep = &xhci->devs[udev->slot_id]->eps[ep_index]; | ||
1368 | /* We need to move the HW's dequeue pointer past this TD, | ||
1369 | * or it will attempt to resend it on the next doorbell ring. | ||
1370 | */ | ||
1371 | xhci_find_new_dequeue_state(xhci, udev->slot_id, | ||
1372 | ep_index, ep->stopped_td, | ||
1373 | &deq_state); | ||
1374 | |||
1375 | /* HW with the reset endpoint quirk will use the saved dequeue state to | ||
1376 | * issue a configure endpoint command later. | ||
1377 | */ | ||
1378 | if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { | ||
1379 | xhci_dbg(xhci, "Queueing new dequeue state\n"); | ||
1380 | xhci_queue_new_dequeue_state(xhci, udev->slot_id, | ||
1381 | ep_index, &deq_state); | ||
1382 | } else { | ||
1383 | /* Better hope no one uses the input context between now and the | ||
1384 | * reset endpoint completion! | ||
1385 | */ | ||
1386 | xhci_dbg(xhci, "Setting up input context for " | ||
1387 | "configure endpoint command\n"); | ||
1388 | xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, | ||
1389 | ep_index, &deq_state); | ||
1390 | } | ||
1391 | } | ||
1392 | |||
1393 | /* Deal with stalled endpoints. The core should have sent the control message | ||
1394 | * to clear the halt condition. However, we need to make the xHCI hardware | ||
1395 | * reset its sequence number, since a device will expect a sequence number of | ||
1396 | * zero after the halt condition is cleared. | ||
1397 | * Context: in_interrupt | ||
1398 | */ | ||
1399 | void xhci_endpoint_reset(struct usb_hcd *hcd, | ||
1400 | struct usb_host_endpoint *ep) | ||
1401 | { | ||
1402 | struct xhci_hcd *xhci; | ||
1403 | struct usb_device *udev; | ||
1404 | unsigned int ep_index; | ||
1405 | unsigned long flags; | ||
1406 | int ret; | ||
1407 | struct xhci_virt_ep *virt_ep; | ||
1408 | |||
1409 | xhci = hcd_to_xhci(hcd); | ||
1410 | udev = (struct usb_device *) ep->hcpriv; | ||
1411 | /* Called with a root hub endpoint (or an endpoint that wasn't added | ||
1412 | * with xhci_add_endpoint() | ||
1413 | */ | ||
1414 | if (!ep->hcpriv) | ||
1415 | return; | ||
1416 | ep_index = xhci_get_endpoint_index(&ep->desc); | ||
1417 | virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; | ||
1418 | if (!virt_ep->stopped_td) { | ||
1419 | xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", | ||
1420 | ep->desc.bEndpointAddress); | ||
1421 | return; | ||
1422 | } | ||
1423 | if (usb_endpoint_xfer_control(&ep->desc)) { | ||
1424 | xhci_dbg(xhci, "Control endpoint stall already handled.\n"); | ||
1425 | return; | ||
1426 | } | ||
1427 | |||
1428 | xhci_dbg(xhci, "Queueing reset endpoint command\n"); | ||
1429 | spin_lock_irqsave(&xhci->lock, flags); | ||
1430 | ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); | ||
1431 | /* | ||
1432 | * Can't change the ring dequeue pointer until it's transitioned to the | ||
1433 | * stopped state, which is only upon a successful reset endpoint | ||
1434 | * command. Better hope that last command worked! | ||
1435 | */ | ||
1436 | if (!ret) { | ||
1437 | xhci_cleanup_stalled_ring(xhci, udev, ep_index); | ||
1438 | kfree(virt_ep->stopped_td); | ||
1439 | xhci_ring_cmd_db(xhci); | ||
1440 | } | ||
1441 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1442 | |||
1443 | if (ret) | ||
1444 | xhci_warn(xhci, "FIXME allocate a new ring segment\n"); | ||
1445 | } | ||
1446 | |||
1447 | /* | ||
1448 | * This submits a Reset Device Command, which will set the device state to 0, | ||
1449 | * set the device address to 0, and disable all the endpoints except the default | ||
1450 | * control endpoint. The USB core should come back and call | ||
1451 | * xhci_address_device(), and then re-set up the configuration. If this is | ||
1452 | * called because of a usb_reset_and_verify_device(), then the old alternate | ||
1453 | * settings will be re-installed through the normal bandwidth allocation | ||
1454 | * functions. | ||
1455 | * | ||
1456 | * Wait for the Reset Device command to finish. Remove all structures | ||
1457 | * associated with the endpoints that were disabled. Clear the input device | ||
1458 | * structure? Cache the rings? Reset the control endpoint 0 max packet size? | ||
1459 | */ | ||
1460 | int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | ||
1461 | { | ||
1462 | int ret, i; | ||
1463 | unsigned long flags; | ||
1464 | struct xhci_hcd *xhci; | ||
1465 | unsigned int slot_id; | ||
1466 | struct xhci_virt_device *virt_dev; | ||
1467 | struct xhci_command *reset_device_cmd; | ||
1468 | int timeleft; | ||
1469 | int last_freed_endpoint; | ||
1470 | |||
1471 | ret = xhci_check_args(hcd, udev, NULL, 0, __func__); | ||
1472 | if (ret <= 0) | ||
1473 | return ret; | ||
1474 | xhci = hcd_to_xhci(hcd); | ||
1475 | slot_id = udev->slot_id; | ||
1476 | virt_dev = xhci->devs[slot_id]; | ||
1477 | if (!virt_dev) { | ||
1478 | xhci_dbg(xhci, "%s called with invalid slot ID %u\n", | ||
1479 | __func__, slot_id); | ||
1480 | return -EINVAL; | ||
1481 | } | ||
1482 | |||
1483 | xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); | ||
1484 | /* Allocate the command structure that holds the struct completion. | ||
1485 | * Assume we're in process context, since the normal device reset | ||
1486 | * process has to wait for the device anyway. Storage devices are | ||
1487 | * reset as part of error handling, so use GFP_NOIO instead of | ||
1488 | * GFP_KERNEL. | ||
1489 | */ | ||
1490 | reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); | ||
1491 | if (!reset_device_cmd) { | ||
1492 | xhci_dbg(xhci, "Couldn't allocate command structure.\n"); | ||
1493 | return -ENOMEM; | ||
1494 | } | ||
1495 | |||
1496 | /* Attempt to submit the Reset Device command to the command ring */ | ||
1497 | spin_lock_irqsave(&xhci->lock, flags); | ||
1498 | reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; | ||
1499 | list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); | ||
1500 | ret = xhci_queue_reset_device(xhci, slot_id); | ||
1501 | if (ret) { | ||
1502 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | ||
1503 | list_del(&reset_device_cmd->cmd_list); | ||
1504 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1505 | goto command_cleanup; | ||
1506 | } | ||
1507 | xhci_ring_cmd_db(xhci); | ||
1508 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1509 | |||
1510 | /* Wait for the Reset Device command to finish */ | ||
1511 | timeleft = wait_for_completion_interruptible_timeout( | ||
1512 | reset_device_cmd->completion, | ||
1513 | USB_CTRL_SET_TIMEOUT); | ||
1514 | if (timeleft <= 0) { | ||
1515 | xhci_warn(xhci, "%s while waiting for reset device command\n", | ||
1516 | timeleft == 0 ? "Timeout" : "Signal"); | ||
1517 | spin_lock_irqsave(&xhci->lock, flags); | ||
1518 | /* The timeout might have raced with the event ring handler, so | ||
1519 | * only delete from the list if the item isn't poisoned. | ||
1520 | */ | ||
1521 | if (reset_device_cmd->cmd_list.next != LIST_POISON1) | ||
1522 | list_del(&reset_device_cmd->cmd_list); | ||
1523 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1524 | ret = -ETIME; | ||
1525 | goto command_cleanup; | ||
1526 | } | ||
1527 | |||
1528 | /* The Reset Device command can't fail, according to the 0.95/0.96 spec, | ||
1529 | * unless we tried to reset a slot ID that wasn't enabled, | ||
1530 | * or the device wasn't in the addressed or configured state. | ||
1531 | */ | ||
1532 | ret = reset_device_cmd->status; | ||
1533 | switch (ret) { | ||
1534 | case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ | ||
1535 | case COMP_CTX_STATE: /* 0.96 completion code for same thing */ | ||
1536 | xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n", | ||
1537 | slot_id, | ||
1538 | xhci_get_slot_state(xhci, virt_dev->out_ctx)); | ||
1539 | xhci_info(xhci, "Not freeing device rings.\n"); | ||
1540 | /* Don't treat this as an error. May change my mind later. */ | ||
1541 | ret = 0; | ||
1542 | goto command_cleanup; | ||
1543 | case COMP_SUCCESS: | ||
1544 | xhci_dbg(xhci, "Successful reset device command.\n"); | ||
1545 | break; | ||
1546 | default: | ||
1547 | if (xhci_is_vendor_info_code(xhci, ret)) | ||
1548 | break; | ||
1549 | xhci_warn(xhci, "Unknown completion code %u for " | ||
1550 | "reset device command.\n", ret); | ||
1551 | ret = -EINVAL; | ||
1552 | goto command_cleanup; | ||
1553 | } | ||
1554 | |||
1555 | /* Everything but endpoint 0 is disabled, so free or cache the rings. */ | ||
1556 | last_freed_endpoint = 1; | ||
1557 | for (i = 1; i < 31; ++i) { | ||
1558 | if (!virt_dev->eps[i].ring) | ||
1559 | continue; | ||
1560 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); | ||
1561 | last_freed_endpoint = i; | ||
1562 | } | ||
1563 | xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); | ||
1564 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); | ||
1565 | ret = 0; | ||
1566 | |||
1567 | command_cleanup: | ||
1568 | xhci_free_command(xhci, reset_device_cmd); | ||
1569 | return ret; | ||
1570 | } | ||
1571 | |||
1572 | /* | ||
1573 | * At this point, the struct usb_device is about to go away, the device has | ||
1574 | * disconnected, and all traffic has been stopped and the endpoints have been | ||
1575 | * disabled. Free any HC data structures associated with that device. | ||
1576 | */ | ||
1577 | void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | ||
1578 | { | ||
1579 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
1580 | struct xhci_virt_device *virt_dev; | ||
1581 | unsigned long flags; | ||
1582 | u32 state; | ||
1583 | int i; | ||
1584 | |||
1585 | if (udev->slot_id == 0) | ||
1586 | return; | ||
1587 | virt_dev = xhci->devs[udev->slot_id]; | ||
1588 | if (!virt_dev) | ||
1589 | return; | ||
1590 | |||
1591 | /* Stop any wayward timer functions (which may grab the lock) */ | ||
1592 | for (i = 0; i < 31; ++i) { | ||
1593 | virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; | ||
1594 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); | ||
1595 | } | ||
1596 | |||
1597 | spin_lock_irqsave(&xhci->lock, flags); | ||
1598 | /* Don't disable the slot if the host controller is dead. */ | ||
1599 | state = xhci_readl(xhci, &xhci->op_regs->status); | ||
1600 | if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { | ||
1601 | xhci_free_virt_device(xhci, udev->slot_id); | ||
1602 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1603 | return; | ||
1604 | } | ||
1605 | |||
1606 | if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { | ||
1607 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1608 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | ||
1609 | return; | ||
1610 | } | ||
1611 | xhci_ring_cmd_db(xhci); | ||
1612 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1613 | /* | ||
1614 | * Event command completion handler will free any data structures | ||
1615 | * associated with the slot. XXX Can free sleep? | ||
1616 | */ | ||
1617 | } | ||
1618 | |||
1619 | /* | ||
1620 | * Returns 0 if the xHC ran out of device slots, the Enable Slot command | ||
1621 | * timed out, or allocating memory failed. Returns 1 on success. | ||
1622 | */ | ||
1623 | int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) | ||
1624 | { | ||
1625 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
1626 | unsigned long flags; | ||
1627 | int timeleft; | ||
1628 | int ret; | ||
1629 | |||
1630 | spin_lock_irqsave(&xhci->lock, flags); | ||
1631 | ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); | ||
1632 | if (ret) { | ||
1633 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1634 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | ||
1635 | return 0; | ||
1636 | } | ||
1637 | xhci_ring_cmd_db(xhci); | ||
1638 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1639 | |||
1640 | /* XXX: how much time for xHC slot assignment? */ | ||
1641 | timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, | ||
1642 | USB_CTRL_SET_TIMEOUT); | ||
1643 | if (timeleft <= 0) { | ||
1644 | xhci_warn(xhci, "%s while waiting for a slot\n", | ||
1645 | timeleft == 0 ? "Timeout" : "Signal"); | ||
1646 | /* FIXME cancel the enable slot request */ | ||
1647 | return 0; | ||
1648 | } | ||
1649 | |||
1650 | if (!xhci->slot_id) { | ||
1651 | xhci_err(xhci, "Error while assigning device slot ID\n"); | ||
1652 | return 0; | ||
1653 | } | ||
1654 | /* xhci_alloc_virt_device() does not touch rings; no need to lock */ | ||
1655 | if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) { | ||
1656 | /* Disable slot, if we can do it without mem alloc */ | ||
1657 | xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); | ||
1658 | spin_lock_irqsave(&xhci->lock, flags); | ||
1659 | if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) | ||
1660 | xhci_ring_cmd_db(xhci); | ||
1661 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1662 | return 0; | ||
1663 | } | ||
1664 | udev->slot_id = xhci->slot_id; | ||
1665 | /* Is this a LS or FS device under a HS hub? */ | ||
1666 | /* Hub or peripherial? */ | ||
1667 | return 1; | ||
1668 | } | ||
1669 | |||
1670 | /* | ||
1671 | * Issue an Address Device command (which will issue a SetAddress request to | ||
1672 | * the device). | ||
1673 | * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so | ||
1674 | * we should only issue and wait on one address command at the same time. | ||
1675 | * | ||
1676 | * We add one to the device address issued by the hardware because the USB core | ||
1677 | * uses address 1 for the root hubs (even though they're not really devices). | ||
1678 | */ | ||
1679 | int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | ||
1680 | { | ||
1681 | unsigned long flags; | ||
1682 | int timeleft; | ||
1683 | struct xhci_virt_device *virt_dev; | ||
1684 | int ret = 0; | ||
1685 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
1686 | struct xhci_slot_ctx *slot_ctx; | ||
1687 | struct xhci_input_control_ctx *ctrl_ctx; | ||
1688 | u64 temp_64; | ||
1689 | |||
1690 | if (!udev->slot_id) { | ||
1691 | xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); | ||
1692 | return -EINVAL; | ||
1693 | } | ||
1694 | |||
1695 | virt_dev = xhci->devs[udev->slot_id]; | ||
1696 | |||
1697 | /* If this is a Set Address to an unconfigured device, setup ep 0 */ | ||
1698 | if (!udev->config) | ||
1699 | xhci_setup_addressable_virt_dev(xhci, udev); | ||
1700 | /* Otherwise, assume the core has the device configured how it wants */ | ||
1701 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); | ||
1702 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); | ||
1703 | |||
1704 | spin_lock_irqsave(&xhci->lock, flags); | ||
1705 | ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, | ||
1706 | udev->slot_id); | ||
1707 | if (ret) { | ||
1708 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1709 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | ||
1710 | return ret; | ||
1711 | } | ||
1712 | xhci_ring_cmd_db(xhci); | ||
1713 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1714 | |||
1715 | /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ | ||
1716 | timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, | ||
1717 | USB_CTRL_SET_TIMEOUT); | ||
1718 | /* FIXME: From section 4.3.4: "Software shall be responsible for timing | ||
1719 | * the SetAddress() "recovery interval" required by USB and aborting the | ||
1720 | * command on a timeout. | ||
1721 | */ | ||
1722 | if (timeleft <= 0) { | ||
1723 | xhci_warn(xhci, "%s while waiting for a slot\n", | ||
1724 | timeleft == 0 ? "Timeout" : "Signal"); | ||
1725 | /* FIXME cancel the address device command */ | ||
1726 | return -ETIME; | ||
1727 | } | ||
1728 | |||
1729 | switch (virt_dev->cmd_status) { | ||
1730 | case COMP_CTX_STATE: | ||
1731 | case COMP_EBADSLT: | ||
1732 | xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n", | ||
1733 | udev->slot_id); | ||
1734 | ret = -EINVAL; | ||
1735 | break; | ||
1736 | case COMP_TX_ERR: | ||
1737 | dev_warn(&udev->dev, "Device not responding to set address.\n"); | ||
1738 | ret = -EPROTO; | ||
1739 | break; | ||
1740 | case COMP_SUCCESS: | ||
1741 | xhci_dbg(xhci, "Successful Address Device command\n"); | ||
1742 | break; | ||
1743 | default: | ||
1744 | xhci_err(xhci, "ERROR: unexpected command completion " | ||
1745 | "code 0x%x.\n", virt_dev->cmd_status); | ||
1746 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); | ||
1747 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); | ||
1748 | ret = -EINVAL; | ||
1749 | break; | ||
1750 | } | ||
1751 | if (ret) { | ||
1752 | return ret; | ||
1753 | } | ||
1754 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); | ||
1755 | xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); | ||
1756 | xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", | ||
1757 | udev->slot_id, | ||
1758 | &xhci->dcbaa->dev_context_ptrs[udev->slot_id], | ||
1759 | (unsigned long long) | ||
1760 | xhci->dcbaa->dev_context_ptrs[udev->slot_id]); | ||
1761 | xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", | ||
1762 | (unsigned long long)virt_dev->out_ctx->dma); | ||
1763 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); | ||
1764 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); | ||
1765 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); | ||
1766 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); | ||
1767 | /* | ||
1768 | * USB core uses address 1 for the roothubs, so we add one to the | ||
1769 | * address given back to us by the HC. | ||
1770 | */ | ||
1771 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); | ||
1772 | udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1; | ||
1773 | /* Zero the input context control for later use */ | ||
1774 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); | ||
1775 | ctrl_ctx->add_flags = 0; | ||
1776 | ctrl_ctx->drop_flags = 0; | ||
1777 | |||
1778 | xhci_dbg(xhci, "Device address = %d\n", udev->devnum); | ||
1779 | /* XXX Meh, not sure if anyone else but choose_address uses this. */ | ||
1780 | set_bit(udev->devnum, udev->bus->devmap.devicemap); | ||
1781 | |||
1782 | return 0; | ||
1783 | } | ||
1784 | |||
1785 | /* Once a hub descriptor is fetched for a device, we need to update the xHC's | ||
1786 | * internal data structures for the device. | ||
1787 | */ | ||
1788 | int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, | ||
1789 | struct usb_tt *tt, gfp_t mem_flags) | ||
1790 | { | ||
1791 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
1792 | struct xhci_virt_device *vdev; | ||
1793 | struct xhci_command *config_cmd; | ||
1794 | struct xhci_input_control_ctx *ctrl_ctx; | ||
1795 | struct xhci_slot_ctx *slot_ctx; | ||
1796 | unsigned long flags; | ||
1797 | unsigned think_time; | ||
1798 | int ret; | ||
1799 | |||
1800 | /* Ignore root hubs */ | ||
1801 | if (!hdev->parent) | ||
1802 | return 0; | ||
1803 | |||
1804 | vdev = xhci->devs[hdev->slot_id]; | ||
1805 | if (!vdev) { | ||
1806 | xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); | ||
1807 | return -EINVAL; | ||
1808 | } | ||
1809 | config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); | ||
1810 | if (!config_cmd) { | ||
1811 | xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); | ||
1812 | return -ENOMEM; | ||
1813 | } | ||
1814 | |||
1815 | spin_lock_irqsave(&xhci->lock, flags); | ||
1816 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); | ||
1817 | ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); | ||
1818 | ctrl_ctx->add_flags |= SLOT_FLAG; | ||
1819 | slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); | ||
1820 | slot_ctx->dev_info |= DEV_HUB; | ||
1821 | if (tt->multi) | ||
1822 | slot_ctx->dev_info |= DEV_MTT; | ||
1823 | if (xhci->hci_version > 0x95) { | ||
1824 | xhci_dbg(xhci, "xHCI version %x needs hub " | ||
1825 | "TT think time and number of ports\n", | ||
1826 | (unsigned int) xhci->hci_version); | ||
1827 | slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild); | ||
1828 | /* Set TT think time - convert from ns to FS bit times. | ||
1829 | * 0 = 8 FS bit times, 1 = 16 FS bit times, | ||
1830 | * 2 = 24 FS bit times, 3 = 32 FS bit times. | ||
1831 | */ | ||
1832 | think_time = tt->think_time; | ||
1833 | if (think_time != 0) | ||
1834 | think_time = (think_time / 666) - 1; | ||
1835 | slot_ctx->tt_info |= TT_THINK_TIME(think_time); | ||
1836 | } else { | ||
1837 | xhci_dbg(xhci, "xHCI version %x doesn't need hub " | ||
1838 | "TT think time or number of ports\n", | ||
1839 | (unsigned int) xhci->hci_version); | ||
1840 | } | ||
1841 | slot_ctx->dev_state = 0; | ||
1842 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1843 | |||
1844 | xhci_dbg(xhci, "Set up %s for hub device.\n", | ||
1845 | (xhci->hci_version > 0x95) ? | ||
1846 | "configure endpoint" : "evaluate context"); | ||
1847 | xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); | ||
1848 | xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); | ||
1849 | |||
1850 | /* Issue and wait for the configure endpoint or | ||
1851 | * evaluate context command. | ||
1852 | */ | ||
1853 | if (xhci->hci_version > 0x95) | ||
1854 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, | ||
1855 | false, false); | ||
1856 | else | ||
1857 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, | ||
1858 | true, false); | ||
1859 | |||
1860 | xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); | ||
1861 | xhci_dbg_ctx(xhci, vdev->out_ctx, 0); | ||
1862 | |||
1863 | xhci_free_command(xhci, config_cmd); | ||
1864 | return ret; | ||
1865 | } | ||
1866 | |||
1867 | int xhci_get_frame(struct usb_hcd *hcd) | ||
1868 | { | ||
1869 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
1870 | /* EHCI mods by the periodic size. Why? */ | ||
1871 | return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3; | ||
1872 | } | ||
1873 | |||
1874 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
1875 | MODULE_AUTHOR(DRIVER_AUTHOR); | ||
1876 | MODULE_LICENSE("GPL"); | ||
1877 | |||
1878 | static int __init xhci_hcd_init(void) | ||
1879 | { | ||
1880 | #ifdef CONFIG_PCI | ||
1881 | int retval = 0; | ||
1882 | |||
1883 | retval = xhci_register_pci(); | ||
1884 | |||
1885 | if (retval < 0) { | ||
1886 | printk(KERN_DEBUG "Problem registering PCI driver."); | ||
1887 | return retval; | ||
1888 | } | ||
1889 | #endif | ||
1890 | /* | ||
1891 | * Check the compiler generated sizes of structures that must be laid | ||
1892 | * out in specific ways for hardware access. | ||
1893 | */ | ||
1894 | BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); | ||
1895 | BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); | ||
1896 | BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); | ||
1897 | /* xhci_device_control has eight fields, and also | ||
1898 | * embeds one xhci_slot_ctx and 31 xhci_ep_ctx | ||
1899 | */ | ||
1900 | BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); | ||
1901 | BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); | ||
1902 | BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); | ||
1903 | BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8); | ||
1904 | BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); | ||
1905 | /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ | ||
1906 | BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); | ||
1907 | BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); | ||
1908 | return 0; | ||
1909 | } | ||
1910 | module_init(xhci_hcd_init); | ||
1911 | |||
1912 | static void __exit xhci_hcd_cleanup(void) | ||
1913 | { | ||
1914 | #ifdef CONFIG_PCI | ||
1915 | xhci_unregister_pci(); | ||
1916 | #endif | ||
1917 | } | ||
1918 | module_exit(xhci_hcd_cleanup); | ||