aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2009-04-27 22:57:38 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-06-16 00:44:49 -0400
commit3ffbba9511b4148cbe1f6b6238686adaeaca8feb (patch)
treef69e42d07d596039e049fe2b14b720ddc6be2694
parentc6515272b858742962c1de0f3bf497a048b9abd7 (diff)
USB: xhci: Allocate and address USB devices
xHCI needs to get a "Slot ID" from the host controller and allocate other data structures for every USB device. Make usb_alloc_dev() and usb_release_dev() allocate and free these device structures. After setting up the xHC device structures, usb_alloc_dev() must wait for the hardware to respond to an Enable Slot command. usb_alloc_dev() fires off a Disable Slot command and does not wait for it to complete. When the USB core wants to choose an address for the device, the xHCI driver must issue a Set Address command and wait for an event for that command. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/usb/host/xhci-dbg.c79
-rw-r--r--drivers/usb/host/xhci-hcd.c201
-rw-r--r--drivers/usb/host/xhci-mem.c204
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/host/xhci-ring.c34
-rw-r--r--drivers/usb/host/xhci.h94
6 files changed, 590 insertions, 29 deletions
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 570cd4820458..16ef42a0fe85 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -410,3 +410,82 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
410 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]); 410 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]);
411 xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val); 411 xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val);
412} 412}
413
414void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep)
415{
416 int i, j;
417 int last_ep_ctx = 31;
418 /* Fields are 32 bits wide, DMA addresses are in bytes */
419 int field_size = 32 / 8;
420
421 xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - drop flags\n",
422 (unsigned int) &ctx->drop_flags,
423 dma, ctx->drop_flags);
424 dma += field_size;
425 xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - add flags\n",
426 (unsigned int) &ctx->add_flags,
427 dma, ctx->add_flags);
428 dma += field_size;
429 for (i = 0; i > 6; ++i) {
430 xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - rsvd[%d]\n",
431 (unsigned int) &ctx->rsvd[i],
432 dma, ctx->rsvd[i], i);
433 dma += field_size;
434 }
435
436 xhci_dbg(xhci, "Slot Context:\n");
437 xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - dev_info\n",
438 (unsigned int) &ctx->slot.dev_info,
439 dma, ctx->slot.dev_info);
440 dma += field_size;
441 xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - dev_info2\n",
442 (unsigned int) &ctx->slot.dev_info2,
443 dma, ctx->slot.dev_info2);
444 dma += field_size;
445 xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - tt_info\n",
446 (unsigned int) &ctx->slot.tt_info,
447 dma, ctx->slot.tt_info);
448 dma += field_size;
449 xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - dev_state\n",
450 (unsigned int) &ctx->slot.dev_state,
451 dma, ctx->slot.dev_state);
452 dma += field_size;
453 for (i = 0; i > 4; ++i) {
454 xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - rsvd[%d]\n",
455 (unsigned int) &ctx->slot.reserved[i],
456 dma, ctx->slot.reserved[i], i);
457 dma += field_size;
458 }
459
460 if (last_ep < 31)
461 last_ep_ctx = last_ep + 1;
462 for (i = 0; i < last_ep_ctx; ++i) {
463 xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
464 xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - ep_info\n",
465 (unsigned int) &ctx->ep[i].ep_info,
466 dma, ctx->ep[i].ep_info);
467 dma += field_size;
468 xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - ep_info2\n",
469 (unsigned int) &ctx->ep[i].ep_info2,
470 dma, ctx->ep[i].ep_info2);
471 dma += field_size;
472 xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - deq[0]\n",
473 (unsigned int) &ctx->ep[i].deq[0],
474 dma, ctx->ep[i].deq[0]);
475 dma += field_size;
476 xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - deq[1]\n",
477 (unsigned int) &ctx->ep[i].deq[1],
478 dma, ctx->ep[i].deq[1]);
479 dma += field_size;
480 xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - tx_info\n",
481 (unsigned int) &ctx->ep[i].tx_info,
482 dma, ctx->ep[i].tx_info);
483 dma += field_size;
484 for (j = 0; j < 3; ++j) {
485 xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - rsvd[%d]\n",
486 (unsigned int) &ctx->ep[i].reserved[j],
487 dma, ctx->ep[i].reserved[j], j);
488 dma += field_size;
489 }
490 }
491}
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index d7c2fed55978..a01d2ee7435a 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -318,6 +318,16 @@ void event_ring_work(unsigned long arg)
318 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); 318 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
319 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 319 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
320 xhci_dbg_cmd_ptrs(xhci); 320 xhci_dbg_cmd_ptrs(xhci);
321 for (i = 0; i < MAX_HC_SLOTS; ++i) {
322 if (xhci->devs[i]) {
323 for (j = 0; j < 31; ++j) {
324 if (xhci->devs[i]->ep_rings[j]) {
325 xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
326 xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg);
327 }
328 }
329 }
330 }
321 331
322 if (xhci->noops_submitted != NUM_TEST_NOOPS) 332 if (xhci->noops_submitted != NUM_TEST_NOOPS)
323 if (setup_one_noop(xhci)) 333 if (setup_one_noop(xhci))
@@ -499,6 +509,197 @@ void xhci_shutdown(struct usb_hcd *hcd)
499 509
500/*-------------------------------------------------------------------------*/ 510/*-------------------------------------------------------------------------*/
501 511
512/*
513 * At this point, the struct usb_device is about to go away, the device has
514 * disconnected, and all traffic has been stopped and the endpoints have been
515 * disabled. Free any HC data structures associated with that device.
516 */
517void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
518{
519 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
520 unsigned long flags;
521
522 if (udev->slot_id == 0)
523 return;
524
525 spin_lock_irqsave(&xhci->lock, flags);
526 if (queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
527 spin_unlock_irqrestore(&xhci->lock, flags);
528 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
529 return;
530 }
531 ring_cmd_db(xhci);
532 spin_unlock_irqrestore(&xhci->lock, flags);
533 /*
534 * Event command completion handler will free any data structures
535 * associated with the slot
536 */
537}
538
539/*
540 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
541 * timed out, or allocating memory failed. Returns 1 on success.
542 */
543int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
544{
545 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
546 unsigned long flags;
547 int timeleft;
548 int ret;
549
550 spin_lock_irqsave(&xhci->lock, flags);
551 ret = queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
552 if (ret) {
553 spin_unlock_irqrestore(&xhci->lock, flags);
554 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
555 return 0;
556 }
557 ring_cmd_db(xhci);
558 spin_unlock_irqrestore(&xhci->lock, flags);
559
560 /* XXX: how much time for xHC slot assignment? */
561 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
562 USB_CTRL_SET_TIMEOUT);
563 if (timeleft <= 0) {
564 xhci_warn(xhci, "%s while waiting for a slot\n",
565 timeleft == 0 ? "Timeout" : "Signal");
566 /* FIXME cancel the enable slot request */
567 return 0;
568 }
569
570 spin_lock_irqsave(&xhci->lock, flags);
571 if (!xhci->slot_id) {
572 xhci_err(xhci, "Error while assigning device slot ID\n");
573 spin_unlock_irqrestore(&xhci->lock, flags);
574 return 0;
575 }
576 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
577 /* Disable slot, if we can do it without mem alloc */
578 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
579 if (!queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
580 ring_cmd_db(xhci);
581 spin_unlock_irqrestore(&xhci->lock, flags);
582 return 0;
583 }
584 udev->slot_id = xhci->slot_id;
585 /* Is this a LS or FS device under a HS hub? */
586 /* Hub or peripherial? */
587 spin_unlock_irqrestore(&xhci->lock, flags);
588 return 1;
589}
590
591/*
592 * Issue an Address Device command (which will issue a SetAddress request to
593 * the device).
594 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
595 * we should only issue and wait on one address command at the same time.
596 *
597 * We add one to the device address issued by the hardware because the USB core
598 * uses address 1 for the root hubs (even though they're not really devices).
599 */
600int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
601{
602 unsigned long flags;
603 int timeleft;
604 struct xhci_virt_device *virt_dev;
605 int ret = 0;
606 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
607 u32 temp;
608
609 if (!udev->slot_id) {
610 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
611 return -EINVAL;
612 }
613
614 spin_lock_irqsave(&xhci->lock, flags);
615 virt_dev = xhci->devs[udev->slot_id];
616
617 /* If this is a Set Address to an unconfigured device, setup ep 0 */
618 if (!udev->config)
619 xhci_setup_addressable_virt_dev(xhci, udev);
620 /* Otherwise, assume the core has the device configured how it wants */
621
622 ret = queue_address_device(xhci, virt_dev->in_ctx_dma, udev->slot_id);
623 if (ret) {
624 spin_unlock_irqrestore(&xhci->lock, flags);
625 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
626 return ret;
627 }
628 ring_cmd_db(xhci);
629 spin_unlock_irqrestore(&xhci->lock, flags);
630
631 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
632 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
633 USB_CTRL_SET_TIMEOUT);
634 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
635 * the SetAddress() "recovery interval" required by USB and aborting the
636 * command on a timeout.
637 */
638 if (timeleft <= 0) {
639 xhci_warn(xhci, "%s while waiting for a slot\n",
640 timeleft == 0 ? "Timeout" : "Signal");
641 /* FIXME cancel the address device command */
642 return -ETIME;
643 }
644
645 spin_lock_irqsave(&xhci->lock, flags);
646 switch (virt_dev->cmd_status) {
647 case COMP_CTX_STATE:
648 case COMP_EBADSLT:
649 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
650 udev->slot_id);
651 ret = -EINVAL;
652 break;
653 case COMP_TX_ERR:
654 dev_warn(&udev->dev, "Device not responding to set address.\n");
655 ret = -EPROTO;
656 break;
657 case COMP_SUCCESS:
658 xhci_dbg(xhci, "Successful Address Device command\n");
659 break;
660 default:
661 xhci_err(xhci, "ERROR: unexpected command completion "
662 "code 0x%x.\n", virt_dev->cmd_status);
663 ret = -EINVAL;
664 break;
665 }
666 if (ret) {
667 spin_unlock_irqrestore(&xhci->lock, flags);
668 return ret;
669 }
670 temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]);
671 xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp);
672 temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]);
673 xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp);
674 xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%08x = %#08x\n",
675 udev->slot_id,
676 (unsigned int) &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id],
677 xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]);
678 xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%08x = %#08x\n",
679 udev->slot_id,
680 (unsigned int) &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1],
681 xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]);
682 xhci_dbg(xhci, "Output Context DMA address = %#08x\n",
683 virt_dev->out_ctx_dma);
684 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
685 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2);
686 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
687 xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2);
688 /*
689 * USB core uses address 1 for the roothubs, so we add one to the
690 * address given back to us by the HC.
691 */
692 udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1;
693 /* FIXME: Zero the input context control for later use? */
694 spin_unlock_irqrestore(&xhci->lock, flags);
695
696 xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
697 /* XXX Meh, not sure if anyone else but choose_address uses this. */
698 set_bit(udev->devnum, udev->bus->devmap.devicemap);
699
700 return 0;
701}
702
502int xhci_get_frame(struct usb_hcd *hcd) 703int xhci_get_frame(struct usb_hcd *hcd)
503{ 704{
504 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 705 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 005d44641d81..d34b91a135a1 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -188,12 +188,187 @@ fail:
188 return 0; 188 return 0;
189} 189}
190 190
191void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
192{
193 struct xhci_virt_device *dev;
194 int i;
195
196 /* Slot ID 0 is reserved */
197 if (slot_id == 0 || !xhci->devs[slot_id])
198 return;
199
200 dev = xhci->devs[slot_id];
201 xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0;
202 xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
203 if (!dev)
204 return;
205
206 for (i = 0; i < 31; ++i)
207 if (dev->ep_rings[i])
208 xhci_ring_free(xhci, dev->ep_rings[i]);
209
210 if (dev->in_ctx)
211 dma_pool_free(xhci->device_pool,
212 dev->in_ctx, dev->in_ctx_dma);
213 if (dev->out_ctx)
214 dma_pool_free(xhci->device_pool,
215 dev->out_ctx, dev->out_ctx_dma);
216 kfree(xhci->devs[slot_id]);
217 xhci->devs[slot_id] = 0;
218}
219
220int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
221 struct usb_device *udev, gfp_t flags)
222{
223 dma_addr_t dma;
224 struct xhci_virt_device *dev;
225
226 /* Slot ID 0 is reserved */
227 if (slot_id == 0 || xhci->devs[slot_id]) {
228 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
229 return 0;
230 }
231
232 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
233 if (!xhci->devs[slot_id])
234 return 0;
235 dev = xhci->devs[slot_id];
236
237 /* Allocate the (output) device context that will be used in the HC */
238 dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
239 if (!dev->out_ctx)
240 goto fail;
241 dev->out_ctx_dma = dma;
242 xhci_dbg(xhci, "Slot %d output ctx = 0x%x (dma)\n", slot_id, dma);
243 memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
244
245 /* Allocate the (input) device context for address device command */
246 dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
247 if (!dev->in_ctx)
248 goto fail;
249 dev->in_ctx_dma = dma;
250 xhci_dbg(xhci, "Slot %d input ctx = 0x%x (dma)\n", slot_id, dma);
251 memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
252
253 /* Allocate endpoint 0 ring */
254 dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
255 if (!dev->ep_rings[0])
256 goto fail;
257
258 /*
259 * Point to output device context in dcbaa; skip the output control
260 * context, which is eight 32 bit fields (or 32 bytes long)
261 */
262 xhci->dcbaa->dev_context_ptrs[2*slot_id] =
263 (u32) dev->out_ctx_dma + (32);
264 xhci_dbg(xhci, "Set slot id %d dcbaa entry 0x%x to 0x%x\n",
265 slot_id,
266 (unsigned int) &xhci->dcbaa->dev_context_ptrs[2*slot_id],
267 dev->out_ctx_dma);
268 xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
269
270 return 1;
271fail:
272 xhci_free_virt_device(xhci, slot_id);
273 return 0;
274}
275
276/* Setup an xHCI virtual device for a Set Address command */
277int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
278{
279 struct xhci_virt_device *dev;
280 struct xhci_ep_ctx *ep0_ctx;
281 struct usb_device *top_dev;
282
283 dev = xhci->devs[udev->slot_id];
284 /* Slot ID 0 is reserved */
285 if (udev->slot_id == 0 || !dev) {
286 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
287 udev->slot_id);
288 return -EINVAL;
289 }
290 ep0_ctx = &dev->in_ctx->ep[0];
291
292 /* 2) New slot context and endpoint 0 context are valid*/
293 dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
294
295 /* 3) Only the control endpoint is valid - one endpoint context */
296 dev->in_ctx->slot.dev_info |= LAST_CTX(1);
297
298 switch (udev->speed) {
299 case USB_SPEED_SUPER:
300 dev->in_ctx->slot.dev_info |= (u32) udev->route;
301 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS;
302 break;
303 case USB_SPEED_HIGH:
304 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS;
305 break;
306 case USB_SPEED_FULL:
307 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS;
308 break;
309 case USB_SPEED_LOW:
310 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS;
311 break;
312 case USB_SPEED_VARIABLE:
313 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
314 return -EINVAL;
315 break;
316 default:
317 /* Speed was set earlier, this shouldn't happen. */
318 BUG();
319 }
320 /* Find the root hub port this device is under */
321 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
322 top_dev = top_dev->parent)
323 /* Found device below root hub */;
324 dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
325 xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
326
327 /* Is this a LS/FS device under a HS hub? */
328 /*
329 * FIXME: I don't think this is right, where does the TT info for the
330 * roothub or parent hub come from?
331 */
332 if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
333 udev->tt) {
334 dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id;
335 dev->in_ctx->slot.tt_info |= udev->ttport << 8;
336 }
337 xhci_dbg(xhci, "udev->tt = 0x%x\n", (unsigned int) udev->tt);
338 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
339
340 /* Step 4 - ring already allocated */
341 /* Step 5 */
342 ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
343 /*
344 * See section 4.3 bullet 6:
345 * The default Max Packet size for ep0 is "8 bytes for a USB2
346 * LS/FS/HS device or 512 bytes for a USB3 SS device"
347 * XXX: Not sure about wireless USB devices.
348 */
349 if (udev->speed == USB_SPEED_SUPER)
350 ep0_ctx->ep_info2 |= MAX_PACKET(512);
351 else
352 ep0_ctx->ep_info2 |= MAX_PACKET(8);
353 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
354 ep0_ctx->ep_info2 |= MAX_BURST(0);
355 ep0_ctx->ep_info2 |= ERROR_COUNT(3);
356
357 ep0_ctx->deq[0] =
358 dev->ep_rings[0]->first_seg->dma;
359 ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state;
360 ep0_ctx->deq[1] = 0;
361
362 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
363
364 return 0;
365}
366
191void xhci_mem_cleanup(struct xhci_hcd *xhci) 367void xhci_mem_cleanup(struct xhci_hcd *xhci)
192{ 368{
193 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 369 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
194 int size; 370 int size;
195 371 int i;
196 /* XXX: Free all the segments in the various rings */
197 372
198 /* Free the Event Ring Segment Table and the actual Event Ring */ 373 /* Free the Event Ring Segment Table and the actual Event Ring */
199 xhci_writel(xhci, 0, &xhci->ir_set->erst_size); 374 xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
@@ -218,16 +393,27 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
218 xhci_ring_free(xhci, xhci->cmd_ring); 393 xhci_ring_free(xhci, xhci->cmd_ring);
219 xhci->cmd_ring = NULL; 394 xhci->cmd_ring = NULL;
220 xhci_dbg(xhci, "Freed command ring\n"); 395 xhci_dbg(xhci, "Freed command ring\n");
396
397 for (i = 1; i < MAX_HC_SLOTS; ++i)
398 xhci_free_virt_device(xhci, i);
399
221 if (xhci->segment_pool) 400 if (xhci->segment_pool)
222 dma_pool_destroy(xhci->segment_pool); 401 dma_pool_destroy(xhci->segment_pool);
223 xhci->segment_pool = NULL; 402 xhci->segment_pool = NULL;
224 xhci_dbg(xhci, "Freed segment pool\n"); 403 xhci_dbg(xhci, "Freed segment pool\n");
404
405 if (xhci->device_pool)
406 dma_pool_destroy(xhci->device_pool);
407 xhci->device_pool = NULL;
408 xhci_dbg(xhci, "Freed device context pool\n");
409
225 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]); 410 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
226 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); 411 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]);
227 if (xhci->dcbaa) 412 if (xhci->dcbaa)
228 pci_free_consistent(pdev, sizeof(*xhci->dcbaa), 413 pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
229 xhci->dcbaa, xhci->dcbaa->dma); 414 xhci->dcbaa, xhci->dcbaa->dma);
230 xhci->dcbaa = NULL; 415 xhci->dcbaa = NULL;
416
231 xhci->page_size = 0; 417 xhci->page_size = 0;
232 xhci->page_shift = 0; 418 xhci->page_shift = 0;
233} 419}
@@ -280,8 +466,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
280 goto fail; 466 goto fail;
281 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); 467 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
282 xhci->dcbaa->dma = dma; 468 xhci->dcbaa->dma = dma;
283 xhci_dbg(xhci, "// Setting device context base array address to 0x%x\n", 469 xhci_dbg(xhci, "// Device context base array address = 0x%x (DMA), 0x%x (virt)\n",
284 xhci->dcbaa->dma); 470 xhci->dcbaa->dma, (unsigned int) xhci->dcbaa);
285 xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]); 471 xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
286 xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); 472 xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]);
287 473
@@ -293,7 +479,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
293 */ 479 */
294 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, 480 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
295 SEGMENT_SIZE, 64, xhci->page_size); 481 SEGMENT_SIZE, 64, xhci->page_size);
296 if (!xhci->segment_pool) 482 /* See Table 46 and Note on Figure 55 */
483 /* FIXME support 64-byte contexts */
484 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
485 sizeof(struct xhci_device_control),
486 64, xhci->page_size);
487 if (!xhci->segment_pool || !xhci->device_pool)
297 goto fail; 488 goto fail;
298 489
299 /* Set up the command ring to have one segments for now. */ 490 /* Set up the command ring to have one segments for now. */
@@ -385,6 +576,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
385 * something other than the default (~1ms minimum between interrupts). 576 * something other than the default (~1ms minimum between interrupts).
386 * See section 5.5.1.2. 577 * See section 5.5.1.2.
387 */ 578 */
579 init_completion(&xhci->addr_dev);
580 for (i = 0; i < MAX_HC_SLOTS; ++i)
581 xhci->devs[i] = 0;
388 582
389 return 0; 583 return 0;
390fail: 584fail:
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 005c5b264a7c..7ac12b4ffe86 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -109,6 +109,13 @@ static const struct hc_driver xhci_pci_hc_driver = {
109 .shutdown = xhci_shutdown, 109 .shutdown = xhci_shutdown,
110 110
111 /* 111 /*
112 * managing i/o requests and associated device resources
113 */
114 .alloc_dev = xhci_alloc_dev,
115 .free_dev = xhci_free_dev,
116 .address_device = xhci_address_device,
117
118 /*
112 * scheduling support 119 * scheduling support
113 */ 120 */
114 .get_frame_number = xhci_get_frame, 121 .get_frame_number = xhci_get_frame,
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9d6bb3d730c4..901ce70b30b8 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -252,13 +252,10 @@ void ring_cmd_db(struct xhci_hcd *xhci)
252static void handle_cmd_completion(struct xhci_hcd *xhci, 252static void handle_cmd_completion(struct xhci_hcd *xhci,
253 struct xhci_event_cmd *event) 253 struct xhci_event_cmd *event)
254{ 254{
255 int slot_id = TRB_TO_SLOT_ID(event->flags);
255 u64 cmd_dma; 256 u64 cmd_dma;
256 dma_addr_t cmd_dequeue_dma; 257 dma_addr_t cmd_dequeue_dma;
257 258
258 /* Check completion code */
259 if (GET_COMP_CODE(event->status) != COMP_SUCCESS)
260 xhci_dbg(xhci, "WARN: unsuccessful no-op command\n");
261
262 cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; 259 cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0];
263 cmd_dequeue_dma = trb_virt_to_dma(xhci->cmd_ring->deq_seg, 260 cmd_dequeue_dma = trb_virt_to_dma(xhci->cmd_ring->deq_seg,
264 xhci->cmd_ring->dequeue); 261 xhci->cmd_ring->dequeue);
@@ -273,6 +270,21 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
273 return; 270 return;
274 } 271 }
275 switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) { 272 switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
273 case TRB_TYPE(TRB_ENABLE_SLOT):
274 if (GET_COMP_CODE(event->status) == COMP_SUCCESS)
275 xhci->slot_id = slot_id;
276 else
277 xhci->slot_id = 0;
278 complete(&xhci->addr_dev);
279 break;
280 case TRB_TYPE(TRB_DISABLE_SLOT):
281 if (xhci->devs[slot_id])
282 xhci_free_virt_device(xhci, slot_id);
283 break;
284 case TRB_TYPE(TRB_ADDR_DEV):
285 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
286 complete(&xhci->addr_dev);
287 break;
276 case TRB_TYPE(TRB_CMD_NOOP): 288 case TRB_TYPE(TRB_CMD_NOOP):
277 ++xhci->noops_handled; 289 ++xhci->noops_handled;
278 break; 290 break;
@@ -400,3 +412,17 @@ void *setup_one_noop(struct xhci_hcd *xhci)
400 xhci->noops_submitted++; 412 xhci->noops_submitted++;
401 return ring_cmd_db; 413 return ring_cmd_db;
402} 414}
415
416/* Queue a slot enable or disable request on the command ring */
417int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
418{
419 return queue_command(xhci, 0, 0, 0,
420 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id));
421}
422
423/* Queue an address device command TRB */
424int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id)
425{
426 return queue_command(xhci, in_ctx_ptr, 0, 0,
427 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
428}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 059c659d3f39..4ef6b9e88504 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -285,12 +285,21 @@ struct xhci_op_regs {
285 * 4 - super speed 285 * 4 - super speed
286 * 5-15 reserved 286 * 5-15 reserved
287 */ 287 */
288#define DEV_SPEED_MASK (0xf<<10) 288#define DEV_SPEED_MASK (0xf << 10)
289#define XDEV_FS (0x1 << 10)
290#define XDEV_LS (0x2 << 10)
291#define XDEV_HS (0x3 << 10)
292#define XDEV_SS (0x4 << 10)
289#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10)) 293#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
290#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == (0x1<<10)) 294#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
291#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == (0x2<<10)) 295#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
292#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == (0x3<<10)) 296#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
293#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == (0x4<<10)) 297#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
298/* Bits 20:23 in the Slot Context are the speed for the device */
299#define SLOT_SPEED_FS (XDEV_FS << 10)
300#define SLOT_SPEED_LS (XDEV_LS << 10)
301#define SLOT_SPEED_HS (XDEV_HS << 10)
302#define SLOT_SPEED_SS (XDEV_SS << 10)
294/* Port Indicator Control */ 303/* Port Indicator Control */
295#define PORT_LED_OFF (0 << 14) 304#define PORT_LED_OFF (0 << 14)
296#define PORT_LED_AMBER (1 << 14) 305#define PORT_LED_AMBER (1 << 14)
@@ -471,14 +480,19 @@ struct xhci_slot_ctx {
471/* Set if the device is a hub - bit 26 */ 480/* Set if the device is a hub - bit 26 */
472#define DEV_HUB (0x1 << 26) 481#define DEV_HUB (0x1 << 26)
473/* Index of the last valid endpoint context in this device context - 27:31 */ 482/* Index of the last valid endpoint context in this device context - 27:31 */
474#define LAST_EP_MASK (0x1f << 27) 483#define LAST_CTX_MASK (0x1f << 27)
475#define LAST_EP(p) ((p) << 27) 484#define LAST_CTX(p) ((p) << 27)
485#define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1)
486/* Plus one for the slot context flag */
487#define EPI_TO_FLAG(p) (1 << ((p) + 1))
488#define SLOT_FLAG (1 << 0)
489#define EP0_FLAG (1 << 1)
476 490
477/* dev_info2 bitmasks */ 491/* dev_info2 bitmasks */
478/* Max Exit Latency (ms) - worst case time to wake up all links in dev path */ 492/* Max Exit Latency (ms) - worst case time to wake up all links in dev path */
479#define MAX_EXIT (0xffff) 493#define MAX_EXIT (0xffff)
480/* Root hub port number that is needed to access the USB device */ 494/* Root hub port number that is needed to access the USB device */
481#define ROOT_HUB_PORT (0xff << 16) 495#define ROOT_HUB_PORT(p) (((p) & 0xff) << 16)
482 496
483/* tt_info bitmasks */ 497/* tt_info bitmasks */
484/* 498/*
@@ -495,7 +509,7 @@ struct xhci_slot_ctx {
495 509
496/* dev_state bitmasks */ 510/* dev_state bitmasks */
497/* USB device address - assigned by the HC */ 511/* USB device address - assigned by the HC */
498#define DEV_ADDR (0xff) 512#define DEV_ADDR_MASK (0xff)
499/* bits 8:26 reserved */ 513/* bits 8:26 reserved */
500/* Slot state */ 514/* Slot state */
501#define SLOT_STATE (0x1f << 27) 515#define SLOT_STATE (0x1f << 27)
@@ -507,12 +521,13 @@ struct xhci_slot_ctx {
507 * @ep_info2: information on endpoint type, max packet size, max burst size, 521 * @ep_info2: information on endpoint type, max packet size, max burst size,
508 * error count, and whether the HC will force an event for all 522 * error count, and whether the HC will force an event for all
509 * transactions. 523 * transactions.
510 * @ep_ring: 64-bit ring address. If the endpoint only defines one flow, 524 * @deq: 64-bit ring dequeue pointer address. If the endpoint only
511 * this points to the endpoint transfer ring. Otherwise, it points 525 * defines one stream, this points to the endpoint transfer ring.
512 * to a flow context array, which has a ring pointer for each flow. 526 * Otherwise, it points to a stream context array, which has a
513 * @intr_target: 527 * ring pointer for each flow.
514 * 64-bit address of the Interrupter Target that will receive 528 * @tx_info:
515 * events from this endpoint. 529 * Average TRB lengths for the endpoint ring and
530 * max payload within an Endpoint Service Interval Time (ESIT).
516 * 531 *
517 * Endpoint Context - section 6.2.1.2. This assumes the HC uses 32-byte context 532 * Endpoint Context - section 6.2.1.2. This assumes the HC uses 32-byte context
518 * structures. If the HC uses 64-byte contexts, there is an additional 32 bytes 533 * structures. If the HC uses 64-byte contexts, there is an additional 32 bytes
@@ -521,12 +536,10 @@ struct xhci_slot_ctx {
521struct xhci_ep_ctx { 536struct xhci_ep_ctx {
522 u32 ep_info; 537 u32 ep_info;
523 u32 ep_info2; 538 u32 ep_info2;
524 /* 64-bit endpoint ring address */ 539 u32 deq[2];
525 u32 ep_ring[2]; 540 u32 tx_info;
526 /* 64-bit address of the interrupter target */
527 u32 intr_target[2];
528 /* offset 0x14 - 0x1f reserved for HC internal use */ 541 /* offset 0x14 - 0x1f reserved for HC internal use */
529 u32 reserved[2]; 542 u32 reserved[3];
530} __attribute__ ((packed)); 543} __attribute__ ((packed));
531 544
532/* ep_info bitmasks */ 545/* ep_info bitmasks */
@@ -589,6 +602,28 @@ struct xhci_device_control {
589#define ADD_EP(x) (0x1 << x) 602#define ADD_EP(x) (0x1 << x)
590 603
591 604
605struct xhci_virt_device {
606 /*
607 * Commands to the hardware are passed an "input context" that
608 * tells the hardware what to change in its data structures.
609 * The hardware will return changes in an "output context" that
610 * software must allocate for the hardware. We need to keep
611 * track of input and output contexts separately because
612 * these commands might fail and we don't trust the hardware.
613 */
614 struct xhci_device_control *out_ctx;
615 dma_addr_t out_ctx_dma;
616 /* Used for addressing devices and configuration changes */
617 struct xhci_device_control *in_ctx;
618 dma_addr_t in_ctx_dma;
619 /* FIXME when stream support is added */
620 struct xhci_ring *ep_rings[31];
621 dma_addr_t ep_dma[31];
622 /* Status of the last command issued for this device */
623 u32 cmd_status;
624};
625
626
592/** 627/**
593 * struct xhci_device_context_array 628 * struct xhci_device_context_array
594 * @dev_context_ptr array of 64-bit DMA addresses for device contexts 629 * @dev_context_ptr array of 64-bit DMA addresses for device contexts
@@ -711,6 +746,11 @@ struct xhci_event_cmd {
711 u32 flags; 746 u32 flags;
712} __attribute__ ((packed)); 747} __attribute__ ((packed));
713 748
749/* flags bitmasks */
750/* bits 16:23 are the virtual function ID */
751/* bits 24:31 are the slot ID */
752#define TRB_TO_SLOT_ID(p) (((p) & (0xff<<24)) >> 24)
753#define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24)
714 754
715/* Port Status Change Event TRB fields */ 755/* Port Status Change Event TRB fields */
716/* Port ID - bits 31:24 */ 756/* Port ID - bits 31:24 */
@@ -931,6 +971,11 @@ struct xhci_hcd {
931 struct xhci_ring *cmd_ring; 971 struct xhci_ring *cmd_ring;
932 struct xhci_ring *event_ring; 972 struct xhci_ring *event_ring;
933 struct xhci_erst erst; 973 struct xhci_erst erst;
974 /* slot enabling and address device helpers */
975 struct completion addr_dev;
976 int slot_id;
977 /* Internal mirror of the HW's dcbaa */
978 struct xhci_virt_device *devs[MAX_HC_SLOTS];
934 979
935 /* DMA pools */ 980 /* DMA pools */
936 struct dma_pool *device_pool; 981 struct dma_pool *device_pool;
@@ -1002,10 +1047,14 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
1002void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); 1047void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
1003void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); 1048void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
1004void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); 1049void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
1050void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep);
1005 1051
1006/* xHCI memory managment */ 1052/* xHCI memory managment */
1007void xhci_mem_cleanup(struct xhci_hcd *xhci); 1053void xhci_mem_cleanup(struct xhci_hcd *xhci);
1008int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags); 1054int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
1055void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
1056int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
1057int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
1009 1058
1010#ifdef CONFIG_PCI 1059#ifdef CONFIG_PCI
1011/* xHCI PCI glue */ 1060/* xHCI PCI glue */
@@ -1022,6 +1071,9 @@ void xhci_stop(struct usb_hcd *hcd);
1022void xhci_shutdown(struct usb_hcd *hcd); 1071void xhci_shutdown(struct usb_hcd *hcd);
1023int xhci_get_frame(struct usb_hcd *hcd); 1072int xhci_get_frame(struct usb_hcd *hcd);
1024irqreturn_t xhci_irq(struct usb_hcd *hcd); 1073irqreturn_t xhci_irq(struct usb_hcd *hcd);
1074int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
1075void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
1076int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
1025 1077
1026/* xHCI ring, segment, TRB, and TD functions */ 1078/* xHCI ring, segment, TRB, and TD functions */
1027dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); 1079dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
@@ -1029,6 +1081,8 @@ void ring_cmd_db(struct xhci_hcd *xhci);
1029void *setup_one_noop(struct xhci_hcd *xhci); 1081void *setup_one_noop(struct xhci_hcd *xhci);
1030void handle_event(struct xhci_hcd *xhci); 1082void handle_event(struct xhci_hcd *xhci);
1031void set_hc_event_deq(struct xhci_hcd *xhci); 1083void set_hc_event_deq(struct xhci_hcd *xhci);
1084int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
1085int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id);
1032 1086
1033/* xHCI roothub code */ 1087/* xHCI roothub code */
1034int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, 1088int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,