aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2009-04-27 22:58:01 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-06-16 00:44:49 -0400
commitd0e96f5a71a032ced0c35f521c1cbd67e816922a (patch)
tree402e4d1ce20682fd2efd3ffd2ad23ffd097b1436 /drivers/usb/host/xhci-ring.c
parent6d65b78a093552fb42448480d4c66bf093a6d4cf (diff)
USB: xhci: Control transfer support.
Allow device drivers to enqueue URBs to control endpoints on devices under an xHCI host controller. Each control transfer is represented by a series of Transfer Descriptors (TDs) written to an endpoint ring. There is one TD for the Setup phase, (optionally) one TD for the Data phase, and one TD for the Status phase. Enqueue these TDs onto the endpoint ring that represents the control endpoint. The host controller hardware will return an event on the event ring that points to the (DMA) address of one of the TDs on the endpoint ring. If the transfer was successful, the transfer event TRB will have a completion code of success, and it will point to the Status phase TD. Anything else is considered an error. This should work for control endpoints besides the default endpoint, but that hasn't been tested. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c383
1 files changed, 383 insertions, 0 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 901ce70b30b8..f04162ae4374 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -321,6 +321,199 @@ static void handle_port_status(struct xhci_hcd *xhci,
321} 321}
322 322
323/* 323/*
324 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
325 * at end_trb, which may be in another segment. If the suspect DMA address is a
326 * TRB in this TD, this function returns that TRB's segment. Otherwise it
327 * returns 0.
328 */
329static struct xhci_segment *trb_in_td(
330 struct xhci_segment *start_seg,
331 union xhci_trb *start_trb,
332 union xhci_trb *end_trb,
333 dma_addr_t suspect_dma)
334{
335 dma_addr_t start_dma;
336 dma_addr_t end_seg_dma;
337 dma_addr_t end_trb_dma;
338 struct xhci_segment *cur_seg;
339
340 start_dma = trb_virt_to_dma(start_seg, start_trb);
341 cur_seg = start_seg;
342
343 do {
344 /*
345 * Last TRB is a link TRB (unless we start inserting links in
346 * the middle, FIXME if you do)
347 */
348 end_seg_dma = trb_virt_to_dma(cur_seg, &start_seg->trbs[TRBS_PER_SEGMENT - 2]);
349 /* If the end TRB isn't in this segment, this is set to 0 */
350 end_trb_dma = trb_virt_to_dma(cur_seg, end_trb);
351
352 if (end_trb_dma > 0) {
353 /* The end TRB is in this segment, so suspect should be here */
354 if (start_dma <= end_trb_dma) {
355 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
356 return cur_seg;
357 } else {
358 /* Case for one segment with
359 * a TD wrapped around to the top
360 */
361 if ((suspect_dma >= start_dma &&
362 suspect_dma <= end_seg_dma) ||
363 (suspect_dma >= cur_seg->dma &&
364 suspect_dma <= end_trb_dma))
365 return cur_seg;
366 }
367 return 0;
368 } else {
369 /* Might still be somewhere in this segment */
370 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
371 return cur_seg;
372 }
373 cur_seg = cur_seg->next;
374 start_dma = trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
375 } while (1);
376
377}
378
379/*
380 * If this function returns an error condition, it means it got a Transfer
381 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
382 * At this point, the host controller is probably hosed and should be reset.
383 */
384static int handle_tx_event(struct xhci_hcd *xhci,
385 struct xhci_transfer_event *event)
386{
387 struct xhci_virt_device *xdev;
388 struct xhci_ring *ep_ring;
389 int ep_index;
390 struct xhci_td *td = 0;
391 dma_addr_t event_dma;
392 struct xhci_segment *event_seg;
393 union xhci_trb *event_trb;
394 struct urb *urb = NULL;
395 int status = -EINPROGRESS;
396
397 xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
398 if (!xdev) {
399 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
400 return -ENODEV;
401 }
402
403 /* Endpoint ID is 1 based, our index is zero based */
404 ep_index = TRB_TO_EP_ID(event->flags) - 1;
405 ep_ring = xdev->ep_rings[ep_index];
406 if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
407 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
408 return -ENODEV;
409 }
410
411 event_dma = event->buffer[0];
412 if (event->buffer[1] != 0)
413 xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n");
414
415 /* This TRB should be in the TD at the head of this ring's TD list */
416 if (list_empty(&ep_ring->td_list)) {
417 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
418 TRB_TO_SLOT_ID(event->flags), ep_index);
419 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
420 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
421 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
422 urb = NULL;
423 goto cleanup;
424 }
425 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
426
427 /* Is this a TRB in the currently executing TD? */
428 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
429 td->last_trb, event_dma);
430 if (!event_seg) {
431 /* HC is busted, give up! */
432 xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
433 return -ESHUTDOWN;
434 }
435 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
436
437 /* Now update the urb's actual_length and give back to the core */
438 /* Was this a control transfer? */
439 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
440 xhci_debug_trb(xhci, xhci->event_ring->dequeue);
441 switch (GET_COMP_CODE(event->transfer_len)) {
442 case COMP_SUCCESS:
443 if (event_trb == ep_ring->dequeue) {
444 xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
445 status = -ESHUTDOWN;
446 } else if (event_trb != td->last_trb) {
447 xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n");
448 status = -ESHUTDOWN;
449 } else {
450 xhci_dbg(xhci, "Successful control transfer!\n");
451 status = 0;
452 }
453 break;
454 case COMP_SHORT_TX:
455 xhci_warn(xhci, "WARN: short transfer on control ep\n");
456 status = -EREMOTEIO;
457 break;
458 case COMP_STALL:
459 xhci_warn(xhci, "WARN: Stalled control ep\n");
460 status = -EPIPE;
461 break;
462 case COMP_TRB_ERR:
463 xhci_warn(xhci, "WARN: TRB error on control ep\n");
464 status = -EILSEQ;
465 break;
466 case COMP_TX_ERR:
467 xhci_warn(xhci, "WARN: transfer error on control ep\n");
468 status = -EPROTO;
469 break;
470 case COMP_DB_ERR:
471 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough on control TX\n");
472 status = -ENOSR;
473 break;
474 default:
475 xhci_dbg(xhci, "ERROR Unknown event condition, HC probably busted\n");
476 goto cleanup;
477 }
478 /*
479 * Did we transfer any data, despite the errors that might have
480 * happened? I.e. did we get past the setup stage?
481 */
482 if (event_trb != ep_ring->dequeue) {
483 /* The event was for the status stage */
484 if (event_trb == td->last_trb) {
485 td->urb->actual_length = td->urb->transfer_buffer_length;
486 } else {
487 /* The event was for the data stage */
488 td->urb->actual_length = td->urb->transfer_buffer_length -
489 TRB_LEN(event->transfer_len);
490 }
491 }
492 while (ep_ring->dequeue != td->last_trb)
493 inc_deq(xhci, ep_ring, false);
494 inc_deq(xhci, ep_ring, false);
495
496 /* Clean up the endpoint's TD list */
497 urb = td->urb;
498 list_del(&td->td_list);
499 kfree(td);
500 } else {
501 xhci_dbg(xhci, "FIXME do something for non-control transfers\n");
502 }
503cleanup:
504 inc_deq(xhci, xhci->event_ring, true);
505 set_hc_event_deq(xhci);
506
507 if (urb) {
508 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
509 spin_unlock(&xhci->lock);
510 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
511 spin_lock(&xhci->lock);
512 }
513 return 0;
514}
515
516/*
324 * This function handles all OS-owned events on the event ring. It may drop 517 * This function handles all OS-owned events on the event ring. It may drop
325 * xhci->lock between event processing (e.g. to pass up port status changes). 518 * xhci->lock between event processing (e.g. to pass up port status changes).
326 */ 519 */
@@ -328,6 +521,7 @@ void handle_event(struct xhci_hcd *xhci)
328{ 521{
329 union xhci_trb *event; 522 union xhci_trb *event;
330 int update_ptrs = 1; 523 int update_ptrs = 1;
524 int ret;
331 525
332 if (!xhci->event_ring || !xhci->event_ring->dequeue) { 526 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
333 xhci->error_bitmask |= 1 << 1; 527 xhci->error_bitmask |= 1 << 1;
@@ -351,6 +545,13 @@ void handle_event(struct xhci_hcd *xhci)
351 handle_port_status(xhci, event); 545 handle_port_status(xhci, event);
352 update_ptrs = 0; 546 update_ptrs = 0;
353 break; 547 break;
548 case TRB_TYPE(TRB_TRANSFER):
549 ret = handle_tx_event(xhci, &event->trans_event);
550 if (ret < 0)
551 xhci->error_bitmask |= 1 << 9;
552 else
553 update_ptrs = 0;
554 break;
354 default: 555 default:
355 xhci->error_bitmask |= 1 << 3; 556 xhci->error_bitmask |= 1 << 3;
356 } 557 }
@@ -364,6 +565,8 @@ void handle_event(struct xhci_hcd *xhci)
364 handle_event(xhci); 565 handle_event(xhci);
365} 566}
366 567
568/**** Endpoint Ring Operations ****/
569
367/* 570/*
368 * Generic function for queueing a TRB on a ring. 571 * Generic function for queueing a TRB on a ring.
369 * The caller must have checked to make sure there's room on the ring. 572 * The caller must have checked to make sure there's room on the ring.
@@ -382,6 +585,186 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
382 inc_enq(xhci, ring, consumer); 585 inc_enq(xhci, ring, consumer);
383} 586}
384 587
588/*
589 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
590 * FIXME allocate segments if the ring is full.
591 */
592static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
593 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
594{
595 /* Make sure the endpoint has been added to xHC schedule */
596 xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
597 switch (ep_state) {
598 case EP_STATE_DISABLED:
599 /*
600 * USB core changed config/interfaces without notifying us,
601 * or hardware is reporting the wrong state.
602 */
603 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
604 return -ENOENT;
605 case EP_STATE_HALTED:
606 case EP_STATE_ERROR:
607 xhci_warn(xhci, "WARN waiting for halt or error on ep "
608 "to be cleared\n");
609 /* FIXME event handling code for error needs to clear it */
610 /* XXX not sure if this should be -ENOENT or not */
611 return -EINVAL;
612 case EP_STATE_STOPPED:
613 case EP_STATE_RUNNING:
614 break;
615 default:
616 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
617 /*
618 * FIXME issue Configure Endpoint command to try to get the HC
619 * back into a known state.
620 */
621 return -EINVAL;
622 }
623 if (!room_on_ring(xhci, ep_ring, num_trbs)) {
624 /* FIXME allocate more room */
625 xhci_err(xhci, "ERROR no room on ep ring\n");
626 return -ENOMEM;
627 }
628 return 0;
629}
630
631int xhci_prepare_transfer(struct xhci_hcd *xhci,
632 struct xhci_virt_device *xdev,
633 unsigned int ep_index,
634 unsigned int num_trbs,
635 struct urb *urb,
636 struct xhci_td **td,
637 gfp_t mem_flags)
638{
639 int ret;
640
641 ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
642 xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK,
643 num_trbs, mem_flags);
644 if (ret)
645 return ret;
646 *td = kzalloc(sizeof(struct xhci_td), mem_flags);
647 if (!*td)
648 return -ENOMEM;
649 INIT_LIST_HEAD(&(*td)->td_list);
650
651 ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
652 if (unlikely(ret)) {
653 kfree(*td);
654 return ret;
655 }
656
657 (*td)->urb = urb;
658 urb->hcpriv = (void *) (*td);
659 /* Add this TD to the tail of the endpoint ring's TD list */
660 list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list);
661
662 return 0;
663}
664
665/* Caller must have locked xhci->lock */
666int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
667 struct urb *urb, int slot_id, unsigned int ep_index)
668{
669 struct xhci_ring *ep_ring;
670 int num_trbs;
671 int ret;
672 struct usb_ctrlrequest *setup;
673 struct xhci_generic_trb *start_trb;
674 int start_cycle;
675 u32 field;
676 struct xhci_td *td;
677
678 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
679
680 /*
681 * Need to copy setup packet into setup TRB, so we can't use the setup
682 * DMA address.
683 */
684 if (!urb->setup_packet)
685 return -EINVAL;
686
687 if (!in_interrupt())
688 xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
689 slot_id, ep_index);
690 /* 1 TRB for setup, 1 for status */
691 num_trbs = 2;
692 /*
693 * Don't need to check if we need additional event data and normal TRBs,
694 * since data in control transfers will never get bigger than 16MB
695 * XXX: can we get a buffer that crosses 64KB boundaries?
696 */
697 if (urb->transfer_buffer_length > 0)
698 num_trbs++;
699 ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs,
700 urb, &td, mem_flags);
701 if (ret < 0)
702 return ret;
703
704 /*
705 * Don't give the first TRB to the hardware (by toggling the cycle bit)
706 * until we've finished creating all the other TRBs. The ring's cycle
707 * state may change as we enqueue the other TRBs, so save it too.
708 */
709 start_trb = &ep_ring->enqueue->generic;
710 start_cycle = ep_ring->cycle_state;
711
712 /* Queue setup TRB - see section 6.4.1.2.1 */
713 /* FIXME better way to translate setup_packet into two u32 fields? */
714 setup = (struct usb_ctrlrequest *) urb->setup_packet;
715 queue_trb(xhci, ep_ring, false,
716 /* FIXME endianness is probably going to bite my ass here. */
717 setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
718 setup->wIndex | setup->wLength << 16,
719 TRB_LEN(8) | TRB_INTR_TARGET(0),
720 /* Immediate data in pointer */
721 TRB_IDT | TRB_TYPE(TRB_SETUP));
722
723 /* If there's data, queue data TRBs */
724 field = 0;
725 if (urb->transfer_buffer_length > 0) {
726 if (setup->bRequestType & USB_DIR_IN)
727 field |= TRB_DIR_IN;
728 queue_trb(xhci, ep_ring, false,
729 lower_32_bits(urb->transfer_dma),
730 upper_32_bits(urb->transfer_dma),
731 TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0),
732 /* Event on short tx */
733 field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
734 }
735
736 /* Save the DMA address of the last TRB in the TD */
737 td->last_trb = ep_ring->enqueue;
738
739 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
740 /* If the device sent data, the status stage is an OUT transfer */
741 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
742 field = 0;
743 else
744 field = TRB_DIR_IN;
745 queue_trb(xhci, ep_ring, false,
746 0,
747 0,
748 TRB_INTR_TARGET(0),
749 /* Event on completion */
750 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
751
752 /*
753 * Pass all the TRBs to the hardware at once and make sure this write
754 * isn't reordered.
755 */
756 wmb();
757 start_trb->field[3] |= start_cycle;
758 field = xhci_readl(xhci, &xhci->dba->doorbell[slot_id]) & DB_MASK;
759 xhci_writel(xhci, field | EPI_TO_DB(ep_index), &xhci->dba->doorbell[slot_id]);
760 /* Flush PCI posted writes */
761 xhci_readl(xhci, &xhci->dba->doorbell[slot_id]);
762
763 return 0;
764}
765
766/**** Command Ring Operations ****/
767
385/* Generic function for queueing a command TRB on the command ring */ 768/* Generic function for queueing a command TRB on the command ring */
386static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4) 769static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4)
387{ 770{