diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/usb/host/xhci-hcd.c | 93 | ||||
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 2 | ||||
-rw-r--r-- | drivers/usb/host/xhci-pci.c | 2 | ||||
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 383 | ||||
-rw-r--r-- | drivers/usb/host/xhci.h | 29 |
5 files changed, 506 insertions, 3 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index a01d2ee7435a..5d94b4ffac92 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c | |||
@@ -509,6 +509,99 @@ void xhci_shutdown(struct usb_hcd *hcd) | |||
509 | 509 | ||
510 | /*-------------------------------------------------------------------------*/ | 510 | /*-------------------------------------------------------------------------*/ |
511 | 511 | ||
512 | /** | ||
513 | * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and | ||
514 | * HCDs. Find the index for an endpoint given its descriptor. Use the return | ||
515 | * value to right shift 1 for the bitmask. | ||
516 | * | ||
517 | * Index = (epnum * 2) + direction - 1, | ||
518 | * where direction = 0 for OUT, 1 for IN. | ||
519 | * For control endpoints, the IN index is used (OUT index is unused), so | ||
520 | * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) | ||
521 | */ | ||
522 | unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) | ||
523 | { | ||
524 | unsigned int index; | ||
525 | if (usb_endpoint_xfer_control(desc)) | ||
526 | index = (unsigned int) (usb_endpoint_num(desc)*2); | ||
527 | else | ||
528 | index = (unsigned int) (usb_endpoint_num(desc)*2) + | ||
529 | (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; | ||
530 | return index; | ||
531 | } | ||
532 | |||
533 | /* Returns 1 if the arguments are OK; | ||
534 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. | ||
535 | */ | ||
536 | int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, | ||
537 | struct usb_host_endpoint *ep, int check_ep, const char *func) { | ||
538 | if (!hcd || (check_ep && !ep) || !udev) { | ||
539 | printk(KERN_DEBUG "xHCI %s called with invalid args\n", | ||
540 | func); | ||
541 | return -EINVAL; | ||
542 | } | ||
543 | if (!udev->parent) { | ||
544 | printk(KERN_DEBUG "xHCI %s called for root hub\n", | ||
545 | func); | ||
546 | return 0; | ||
547 | } | ||
548 | if (!udev->slot_id) { | ||
549 | printk(KERN_DEBUG "xHCI %s called with unaddressed device\n", | ||
550 | func); | ||
551 | return -EINVAL; | ||
552 | } | ||
553 | return 1; | ||
554 | } | ||
555 | |||
556 | /* | ||
557 | * non-error returns are a promise to giveback() the urb later | ||
558 | * we drop ownership so next owner (or urb unlink) can get it | ||
559 | */ | ||
560 | int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | ||
561 | { | ||
562 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
563 | unsigned long flags; | ||
564 | int ret = 0; | ||
565 | unsigned int slot_id, ep_index; | ||
566 | |||
567 | if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) | ||
568 | return -EINVAL; | ||
569 | |||
570 | slot_id = urb->dev->slot_id; | ||
571 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | ||
572 | /* Only support ep 0 control transfers for now */ | ||
573 | if (ep_index != 0) { | ||
574 | xhci_dbg(xhci, "WARN: urb submitted to unsupported ep %x\n", | ||
575 | urb->ep->desc.bEndpointAddress); | ||
576 | return -ENOSYS; | ||
577 | } | ||
578 | |||
579 | spin_lock_irqsave(&xhci->lock, flags); | ||
580 | if (!xhci->devs || !xhci->devs[slot_id]) { | ||
581 | if (!in_interrupt()) | ||
582 | dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n"); | ||
583 | return -EINVAL; | ||
584 | } | ||
585 | if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) { | ||
586 | if (!in_interrupt()) | ||
587 | xhci_dbg(xhci, "urb submitted during PCI suspend\n"); | ||
588 | ret = -ESHUTDOWN; | ||
589 | goto exit; | ||
590 | } | ||
591 | ret = queue_ctrl_tx(xhci, mem_flags, urb, slot_id, ep_index); | ||
592 | exit: | ||
593 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
594 | return ret; | ||
595 | } | ||
596 | |||
597 | /* Remove from hardware lists | ||
598 | * completions normally happen asynchronously | ||
599 | */ | ||
600 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | ||
601 | { | ||
602 | return -ENOSYS; | ||
603 | } | ||
604 | |||
512 | /* | 605 | /* |
513 | * At this point, the struct usb_device is about to go away, the device has | 606 | * At this point, the struct usb_device is about to go away, the device has |
514 | * disconnected, and all traffic has been stopped and the endpoints have been | 607 | * disconnected, and all traffic has been stopped and the endpoints have been |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index d34b91a135a1..6ff2e298bff8 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -141,6 +141,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, | |||
141 | if (!ring) | 141 | if (!ring) |
142 | return 0; | 142 | return 0; |
143 | 143 | ||
144 | INIT_LIST_HEAD(&ring->td_list); | ||
144 | if (num_segs == 0) | 145 | if (num_segs == 0) |
145 | return ring; | 146 | return ring; |
146 | 147 | ||
@@ -188,6 +189,7 @@ fail: | |||
188 | return 0; | 189 | return 0; |
189 | } | 190 | } |
190 | 191 | ||
192 | /* All the xhci_tds in the ring's TD list should be freed at this point */ | ||
191 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | 193 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) |
192 | { | 194 | { |
193 | struct xhci_virt_device *dev; | 195 | struct xhci_virt_device *dev; |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 7ac12b4ffe86..ff9a4ef22338 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -111,6 +111,8 @@ static const struct hc_driver xhci_pci_hc_driver = { | |||
111 | /* | 111 | /* |
112 | * managing i/o requests and associated device resources | 112 | * managing i/o requests and associated device resources |
113 | */ | 113 | */ |
114 | .urb_enqueue = xhci_urb_enqueue, | ||
115 | .urb_dequeue = xhci_urb_dequeue, | ||
114 | .alloc_dev = xhci_alloc_dev, | 116 | .alloc_dev = xhci_alloc_dev, |
115 | .free_dev = xhci_free_dev, | 117 | .free_dev = xhci_free_dev, |
116 | .address_device = xhci_address_device, | 118 | .address_device = xhci_address_device, |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 901ce70b30b8..f04162ae4374 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -321,6 +321,199 @@ static void handle_port_status(struct xhci_hcd *xhci, | |||
321 | } | 321 | } |
322 | 322 | ||
323 | /* | 323 | /* |
324 | * This TD is defined by the TRBs starting at start_trb in start_seg and ending | ||
325 | * at end_trb, which may be in another segment. If the suspect DMA address is a | ||
326 | * TRB in this TD, this function returns that TRB's segment. Otherwise it | ||
327 | * returns 0. | ||
328 | */ | ||
329 | static struct xhci_segment *trb_in_td( | ||
330 | struct xhci_segment *start_seg, | ||
331 | union xhci_trb *start_trb, | ||
332 | union xhci_trb *end_trb, | ||
333 | dma_addr_t suspect_dma) | ||
334 | { | ||
335 | dma_addr_t start_dma; | ||
336 | dma_addr_t end_seg_dma; | ||
337 | dma_addr_t end_trb_dma; | ||
338 | struct xhci_segment *cur_seg; | ||
339 | |||
340 | start_dma = trb_virt_to_dma(start_seg, start_trb); | ||
341 | cur_seg = start_seg; | ||
342 | |||
343 | do { | ||
344 | /* | ||
345 | * Last TRB is a link TRB (unless we start inserting links in | ||
346 | * the middle, FIXME if you do) | ||
347 | */ | ||
348 | end_seg_dma = trb_virt_to_dma(cur_seg, &start_seg->trbs[TRBS_PER_SEGMENT - 2]); | ||
349 | /* If the end TRB isn't in this segment, this is set to 0 */ | ||
350 | end_trb_dma = trb_virt_to_dma(cur_seg, end_trb); | ||
351 | |||
352 | if (end_trb_dma > 0) { | ||
353 | /* The end TRB is in this segment, so suspect should be here */ | ||
354 | if (start_dma <= end_trb_dma) { | ||
355 | if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) | ||
356 | return cur_seg; | ||
357 | } else { | ||
358 | /* Case for one segment with | ||
359 | * a TD wrapped around to the top | ||
360 | */ | ||
361 | if ((suspect_dma >= start_dma && | ||
362 | suspect_dma <= end_seg_dma) || | ||
363 | (suspect_dma >= cur_seg->dma && | ||
364 | suspect_dma <= end_trb_dma)) | ||
365 | return cur_seg; | ||
366 | } | ||
367 | return 0; | ||
368 | } else { | ||
369 | /* Might still be somewhere in this segment */ | ||
370 | if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) | ||
371 | return cur_seg; | ||
372 | } | ||
373 | cur_seg = cur_seg->next; | ||
374 | start_dma = trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); | ||
375 | } while (1); | ||
376 | |||
377 | } | ||
378 | |||
379 | /* | ||
380 | * If this function returns an error condition, it means it got a Transfer | ||
381 | * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. | ||
382 | * At this point, the host controller is probably hosed and should be reset. | ||
383 | */ | ||
384 | static int handle_tx_event(struct xhci_hcd *xhci, | ||
385 | struct xhci_transfer_event *event) | ||
386 | { | ||
387 | struct xhci_virt_device *xdev; | ||
388 | struct xhci_ring *ep_ring; | ||
389 | int ep_index; | ||
390 | struct xhci_td *td = 0; | ||
391 | dma_addr_t event_dma; | ||
392 | struct xhci_segment *event_seg; | ||
393 | union xhci_trb *event_trb; | ||
394 | struct urb *urb = NULL; | ||
395 | int status = -EINPROGRESS; | ||
396 | |||
397 | xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; | ||
398 | if (!xdev) { | ||
399 | xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); | ||
400 | return -ENODEV; | ||
401 | } | ||
402 | |||
403 | /* Endpoint ID is 1 based, our index is zero based */ | ||
404 | ep_index = TRB_TO_EP_ID(event->flags) - 1; | ||
405 | ep_ring = xdev->ep_rings[ep_index]; | ||
406 | if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { | ||
407 | xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); | ||
408 | return -ENODEV; | ||
409 | } | ||
410 | |||
411 | event_dma = event->buffer[0]; | ||
412 | if (event->buffer[1] != 0) | ||
413 | xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n"); | ||
414 | |||
415 | /* This TRB should be in the TD at the head of this ring's TD list */ | ||
416 | if (list_empty(&ep_ring->td_list)) { | ||
417 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", | ||
418 | TRB_TO_SLOT_ID(event->flags), ep_index); | ||
419 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", | ||
420 | (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); | ||
421 | xhci_print_trb_offsets(xhci, (union xhci_trb *) event); | ||
422 | urb = NULL; | ||
423 | goto cleanup; | ||
424 | } | ||
425 | td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); | ||
426 | |||
427 | /* Is this a TRB in the currently executing TD? */ | ||
428 | event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, | ||
429 | td->last_trb, event_dma); | ||
430 | if (!event_seg) { | ||
431 | /* HC is busted, give up! */ | ||
432 | xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); | ||
433 | return -ESHUTDOWN; | ||
434 | } | ||
435 | event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; | ||
436 | |||
437 | /* Now update the urb's actual_length and give back to the core */ | ||
438 | /* Was this a control transfer? */ | ||
439 | if (usb_endpoint_xfer_control(&td->urb->ep->desc)) { | ||
440 | xhci_debug_trb(xhci, xhci->event_ring->dequeue); | ||
441 | switch (GET_COMP_CODE(event->transfer_len)) { | ||
442 | case COMP_SUCCESS: | ||
443 | if (event_trb == ep_ring->dequeue) { | ||
444 | xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n"); | ||
445 | status = -ESHUTDOWN; | ||
446 | } else if (event_trb != td->last_trb) { | ||
447 | xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n"); | ||
448 | status = -ESHUTDOWN; | ||
449 | } else { | ||
450 | xhci_dbg(xhci, "Successful control transfer!\n"); | ||
451 | status = 0; | ||
452 | } | ||
453 | break; | ||
454 | case COMP_SHORT_TX: | ||
455 | xhci_warn(xhci, "WARN: short transfer on control ep\n"); | ||
456 | status = -EREMOTEIO; | ||
457 | break; | ||
458 | case COMP_STALL: | ||
459 | xhci_warn(xhci, "WARN: Stalled control ep\n"); | ||
460 | status = -EPIPE; | ||
461 | break; | ||
462 | case COMP_TRB_ERR: | ||
463 | xhci_warn(xhci, "WARN: TRB error on control ep\n"); | ||
464 | status = -EILSEQ; | ||
465 | break; | ||
466 | case COMP_TX_ERR: | ||
467 | xhci_warn(xhci, "WARN: transfer error on control ep\n"); | ||
468 | status = -EPROTO; | ||
469 | break; | ||
470 | case COMP_DB_ERR: | ||
471 | xhci_warn(xhci, "WARN: HC couldn't access mem fast enough on control TX\n"); | ||
472 | status = -ENOSR; | ||
473 | break; | ||
474 | default: | ||
475 | xhci_dbg(xhci, "ERROR Unknown event condition, HC probably busted\n"); | ||
476 | goto cleanup; | ||
477 | } | ||
478 | /* | ||
479 | * Did we transfer any data, despite the errors that might have | ||
480 | * happened? I.e. did we get past the setup stage? | ||
481 | */ | ||
482 | if (event_trb != ep_ring->dequeue) { | ||
483 | /* The event was for the status stage */ | ||
484 | if (event_trb == td->last_trb) { | ||
485 | td->urb->actual_length = td->urb->transfer_buffer_length; | ||
486 | } else { | ||
487 | /* The event was for the data stage */ | ||
488 | td->urb->actual_length = td->urb->transfer_buffer_length - | ||
489 | TRB_LEN(event->transfer_len); | ||
490 | } | ||
491 | } | ||
492 | while (ep_ring->dequeue != td->last_trb) | ||
493 | inc_deq(xhci, ep_ring, false); | ||
494 | inc_deq(xhci, ep_ring, false); | ||
495 | |||
496 | /* Clean up the endpoint's TD list */ | ||
497 | urb = td->urb; | ||
498 | list_del(&td->td_list); | ||
499 | kfree(td); | ||
500 | } else { | ||
501 | xhci_dbg(xhci, "FIXME do something for non-control transfers\n"); | ||
502 | } | ||
503 | cleanup: | ||
504 | inc_deq(xhci, xhci->event_ring, true); | ||
505 | set_hc_event_deq(xhci); | ||
506 | |||
507 | if (urb) { | ||
508 | usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); | ||
509 | spin_unlock(&xhci->lock); | ||
510 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); | ||
511 | spin_lock(&xhci->lock); | ||
512 | } | ||
513 | return 0; | ||
514 | } | ||
515 | |||
516 | /* | ||
324 | * This function handles all OS-owned events on the event ring. It may drop | 517 | * This function handles all OS-owned events on the event ring. It may drop |
325 | * xhci->lock between event processing (e.g. to pass up port status changes). | 518 | * xhci->lock between event processing (e.g. to pass up port status changes). |
326 | */ | 519 | */ |
@@ -328,6 +521,7 @@ void handle_event(struct xhci_hcd *xhci) | |||
328 | { | 521 | { |
329 | union xhci_trb *event; | 522 | union xhci_trb *event; |
330 | int update_ptrs = 1; | 523 | int update_ptrs = 1; |
524 | int ret; | ||
331 | 525 | ||
332 | if (!xhci->event_ring || !xhci->event_ring->dequeue) { | 526 | if (!xhci->event_ring || !xhci->event_ring->dequeue) { |
333 | xhci->error_bitmask |= 1 << 1; | 527 | xhci->error_bitmask |= 1 << 1; |
@@ -351,6 +545,13 @@ void handle_event(struct xhci_hcd *xhci) | |||
351 | handle_port_status(xhci, event); | 545 | handle_port_status(xhci, event); |
352 | update_ptrs = 0; | 546 | update_ptrs = 0; |
353 | break; | 547 | break; |
548 | case TRB_TYPE(TRB_TRANSFER): | ||
549 | ret = handle_tx_event(xhci, &event->trans_event); | ||
550 | if (ret < 0) | ||
551 | xhci->error_bitmask |= 1 << 9; | ||
552 | else | ||
553 | update_ptrs = 0; | ||
554 | break; | ||
354 | default: | 555 | default: |
355 | xhci->error_bitmask |= 1 << 3; | 556 | xhci->error_bitmask |= 1 << 3; |
356 | } | 557 | } |
@@ -364,6 +565,8 @@ void handle_event(struct xhci_hcd *xhci) | |||
364 | handle_event(xhci); | 565 | handle_event(xhci); |
365 | } | 566 | } |
366 | 567 | ||
568 | /**** Endpoint Ring Operations ****/ | ||
569 | |||
367 | /* | 570 | /* |
368 | * Generic function for queueing a TRB on a ring. | 571 | * Generic function for queueing a TRB on a ring. |
369 | * The caller must have checked to make sure there's room on the ring. | 572 | * The caller must have checked to make sure there's room on the ring. |
@@ -382,6 +585,186 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | |||
382 | inc_enq(xhci, ring, consumer); | 585 | inc_enq(xhci, ring, consumer); |
383 | } | 586 | } |
384 | 587 | ||
588 | /* | ||
589 | * Does various checks on the endpoint ring, and makes it ready to queue num_trbs. | ||
590 | * FIXME allocate segments if the ring is full. | ||
591 | */ | ||
592 | static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | ||
593 | u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) | ||
594 | { | ||
595 | /* Make sure the endpoint has been added to xHC schedule */ | ||
596 | xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state); | ||
597 | switch (ep_state) { | ||
598 | case EP_STATE_DISABLED: | ||
599 | /* | ||
600 | * USB core changed config/interfaces without notifying us, | ||
601 | * or hardware is reporting the wrong state. | ||
602 | */ | ||
603 | xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); | ||
604 | return -ENOENT; | ||
605 | case EP_STATE_HALTED: | ||
606 | case EP_STATE_ERROR: | ||
607 | xhci_warn(xhci, "WARN waiting for halt or error on ep " | ||
608 | "to be cleared\n"); | ||
609 | /* FIXME event handling code for error needs to clear it */ | ||
610 | /* XXX not sure if this should be -ENOENT or not */ | ||
611 | return -EINVAL; | ||
612 | case EP_STATE_STOPPED: | ||
613 | case EP_STATE_RUNNING: | ||
614 | break; | ||
615 | default: | ||
616 | xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); | ||
617 | /* | ||
618 | * FIXME issue Configure Endpoint command to try to get the HC | ||
619 | * back into a known state. | ||
620 | */ | ||
621 | return -EINVAL; | ||
622 | } | ||
623 | if (!room_on_ring(xhci, ep_ring, num_trbs)) { | ||
624 | /* FIXME allocate more room */ | ||
625 | xhci_err(xhci, "ERROR no room on ep ring\n"); | ||
626 | return -ENOMEM; | ||
627 | } | ||
628 | return 0; | ||
629 | } | ||
630 | |||
631 | int xhci_prepare_transfer(struct xhci_hcd *xhci, | ||
632 | struct xhci_virt_device *xdev, | ||
633 | unsigned int ep_index, | ||
634 | unsigned int num_trbs, | ||
635 | struct urb *urb, | ||
636 | struct xhci_td **td, | ||
637 | gfp_t mem_flags) | ||
638 | { | ||
639 | int ret; | ||
640 | |||
641 | ret = prepare_ring(xhci, xdev->ep_rings[ep_index], | ||
642 | xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK, | ||
643 | num_trbs, mem_flags); | ||
644 | if (ret) | ||
645 | return ret; | ||
646 | *td = kzalloc(sizeof(struct xhci_td), mem_flags); | ||
647 | if (!*td) | ||
648 | return -ENOMEM; | ||
649 | INIT_LIST_HEAD(&(*td)->td_list); | ||
650 | |||
651 | ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb); | ||
652 | if (unlikely(ret)) { | ||
653 | kfree(*td); | ||
654 | return ret; | ||
655 | } | ||
656 | |||
657 | (*td)->urb = urb; | ||
658 | urb->hcpriv = (void *) (*td); | ||
659 | /* Add this TD to the tail of the endpoint ring's TD list */ | ||
660 | list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list); | ||
661 | |||
662 | return 0; | ||
663 | } | ||
664 | |||
665 | /* Caller must have locked xhci->lock */ | ||
666 | int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | ||
667 | struct urb *urb, int slot_id, unsigned int ep_index) | ||
668 | { | ||
669 | struct xhci_ring *ep_ring; | ||
670 | int num_trbs; | ||
671 | int ret; | ||
672 | struct usb_ctrlrequest *setup; | ||
673 | struct xhci_generic_trb *start_trb; | ||
674 | int start_cycle; | ||
675 | u32 field; | ||
676 | struct xhci_td *td; | ||
677 | |||
678 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | ||
679 | |||
680 | /* | ||
681 | * Need to copy setup packet into setup TRB, so we can't use the setup | ||
682 | * DMA address. | ||
683 | */ | ||
684 | if (!urb->setup_packet) | ||
685 | return -EINVAL; | ||
686 | |||
687 | if (!in_interrupt()) | ||
688 | xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n", | ||
689 | slot_id, ep_index); | ||
690 | /* 1 TRB for setup, 1 for status */ | ||
691 | num_trbs = 2; | ||
692 | /* | ||
693 | * Don't need to check if we need additional event data and normal TRBs, | ||
694 | * since data in control transfers will never get bigger than 16MB | ||
695 | * XXX: can we get a buffer that crosses 64KB boundaries? | ||
696 | */ | ||
697 | if (urb->transfer_buffer_length > 0) | ||
698 | num_trbs++; | ||
699 | ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs, | ||
700 | urb, &td, mem_flags); | ||
701 | if (ret < 0) | ||
702 | return ret; | ||
703 | |||
704 | /* | ||
705 | * Don't give the first TRB to the hardware (by toggling the cycle bit) | ||
706 | * until we've finished creating all the other TRBs. The ring's cycle | ||
707 | * state may change as we enqueue the other TRBs, so save it too. | ||
708 | */ | ||
709 | start_trb = &ep_ring->enqueue->generic; | ||
710 | start_cycle = ep_ring->cycle_state; | ||
711 | |||
712 | /* Queue setup TRB - see section 6.4.1.2.1 */ | ||
713 | /* FIXME better way to translate setup_packet into two u32 fields? */ | ||
714 | setup = (struct usb_ctrlrequest *) urb->setup_packet; | ||
715 | queue_trb(xhci, ep_ring, false, | ||
716 | /* FIXME endianness is probably going to bite my ass here. */ | ||
717 | setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, | ||
718 | setup->wIndex | setup->wLength << 16, | ||
719 | TRB_LEN(8) | TRB_INTR_TARGET(0), | ||
720 | /* Immediate data in pointer */ | ||
721 | TRB_IDT | TRB_TYPE(TRB_SETUP)); | ||
722 | |||
723 | /* If there's data, queue data TRBs */ | ||
724 | field = 0; | ||
725 | if (urb->transfer_buffer_length > 0) { | ||
726 | if (setup->bRequestType & USB_DIR_IN) | ||
727 | field |= TRB_DIR_IN; | ||
728 | queue_trb(xhci, ep_ring, false, | ||
729 | lower_32_bits(urb->transfer_dma), | ||
730 | upper_32_bits(urb->transfer_dma), | ||
731 | TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0), | ||
732 | /* Event on short tx */ | ||
733 | field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); | ||
734 | } | ||
735 | |||
736 | /* Save the DMA address of the last TRB in the TD */ | ||
737 | td->last_trb = ep_ring->enqueue; | ||
738 | |||
739 | /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ | ||
740 | /* If the device sent data, the status stage is an OUT transfer */ | ||
741 | if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) | ||
742 | field = 0; | ||
743 | else | ||
744 | field = TRB_DIR_IN; | ||
745 | queue_trb(xhci, ep_ring, false, | ||
746 | 0, | ||
747 | 0, | ||
748 | TRB_INTR_TARGET(0), | ||
749 | /* Event on completion */ | ||
750 | field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); | ||
751 | |||
752 | /* | ||
753 | * Pass all the TRBs to the hardware at once and make sure this write | ||
754 | * isn't reordered. | ||
755 | */ | ||
756 | wmb(); | ||
757 | start_trb->field[3] |= start_cycle; | ||
758 | field = xhci_readl(xhci, &xhci->dba->doorbell[slot_id]) & DB_MASK; | ||
759 | xhci_writel(xhci, field | EPI_TO_DB(ep_index), &xhci->dba->doorbell[slot_id]); | ||
760 | /* Flush PCI posted writes */ | ||
761 | xhci_readl(xhci, &xhci->dba->doorbell[slot_id]); | ||
762 | |||
763 | return 0; | ||
764 | } | ||
765 | |||
766 | /**** Command Ring Operations ****/ | ||
767 | |||
385 | /* Generic function for queueing a command TRB on the command ring */ | 768 | /* Generic function for queueing a command TRB on the command ring */ |
386 | static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4) | 769 | static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4) |
387 | { | 770 | { |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 4ef6b9e88504..fc8dcd2aa770 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -448,6 +448,9 @@ struct xhci_doorbell_array { | |||
448 | #define DB_STREAM_ID_HOST 0x0 | 448 | #define DB_STREAM_ID_HOST 0x0 |
449 | #define DB_MASK (0xff << 8) | 449 | #define DB_MASK (0xff << 8) |
450 | 450 | ||
451 | /* Endpoint Target - bits 0:7 */ | ||
452 | #define EPI_TO_DB(p) (((p) + 1) & 0xff) | ||
453 | |||
451 | 454 | ||
452 | /** | 455 | /** |
453 | * struct xhci_slot_ctx | 456 | * struct xhci_slot_ctx |
@@ -552,13 +555,18 @@ struct xhci_ep_ctx { | |||
552 | * 4 - TRB error | 555 | * 4 - TRB error |
553 | * 5-7 - reserved | 556 | * 5-7 - reserved |
554 | */ | 557 | */ |
555 | #define EP_STATE (0xf) | 558 | #define EP_STATE_MASK (0xf) |
559 | #define EP_STATE_DISABLED 0 | ||
560 | #define EP_STATE_RUNNING 1 | ||
561 | #define EP_STATE_HALTED 2 | ||
562 | #define EP_STATE_STOPPED 3 | ||
563 | #define EP_STATE_ERROR 4 | ||
556 | /* Mult - Max number of burtst within an interval, in EP companion desc. */ | 564 | /* Mult - Max number of burtst within an interval, in EP companion desc. */ |
557 | #define EP_MULT(p) ((p & 0x3) << 8) | 565 | #define EP_MULT(p) ((p & 0x3) << 8) |
558 | /* bits 10:14 are Max Primary Streams */ | 566 | /* bits 10:14 are Max Primary Streams */ |
559 | /* bit 15 is Linear Stream Array */ | 567 | /* bit 15 is Linear Stream Array */ |
560 | /* Interval - period between requests to an endpoint - 125u increments. */ | 568 | /* Interval - period between requests to an endpoint - 125u increments. */ |
561 | #define EP_INTERVAL (0xff << 16) | 569 | #define EP_INTERVAL (0xff << 16) |
562 | 570 | ||
563 | /* ep_info2 bitmasks */ | 571 | /* ep_info2 bitmasks */ |
564 | /* | 572 | /* |
@@ -618,7 +626,6 @@ struct xhci_virt_device { | |||
618 | dma_addr_t in_ctx_dma; | 626 | dma_addr_t in_ctx_dma; |
619 | /* FIXME when stream support is added */ | 627 | /* FIXME when stream support is added */ |
620 | struct xhci_ring *ep_rings[31]; | 628 | struct xhci_ring *ep_rings[31]; |
621 | dma_addr_t ep_dma[31]; | ||
622 | /* Status of the last command issued for this device */ | 629 | /* Status of the last command issued for this device */ |
623 | u32 cmd_status; | 630 | u32 cmd_status; |
624 | }; | 631 | }; |
@@ -657,6 +664,9 @@ struct xhci_transfer_event { | |||
657 | u32 flags; | 664 | u32 flags; |
658 | } __attribute__ ((packed)); | 665 | } __attribute__ ((packed)); |
659 | 666 | ||
667 | /** Transfer Event bit fields **/ | ||
668 | #define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f) | ||
669 | |||
660 | /* Completion Code - only applicable for some types of TRBs */ | 670 | /* Completion Code - only applicable for some types of TRBs */ |
661 | #define COMP_CODE_MASK (0xff << 24) | 671 | #define COMP_CODE_MASK (0xff << 24) |
662 | #define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24) | 672 | #define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24) |
@@ -877,6 +887,12 @@ union xhci_trb { | |||
877 | #define TRBS_PER_SEGMENT 64 | 887 | #define TRBS_PER_SEGMENT 64 |
878 | #define SEGMENT_SIZE (TRBS_PER_SEGMENT*16) | 888 | #define SEGMENT_SIZE (TRBS_PER_SEGMENT*16) |
879 | 889 | ||
890 | struct xhci_td { | ||
891 | struct list_head td_list; | ||
892 | struct urb *urb; | ||
893 | union xhci_trb *last_trb; | ||
894 | }; | ||
895 | |||
880 | struct xhci_segment { | 896 | struct xhci_segment { |
881 | union xhci_trb *trbs; | 897 | union xhci_trb *trbs; |
882 | /* private to HCD */ | 898 | /* private to HCD */ |
@@ -892,6 +908,7 @@ struct xhci_ring { | |||
892 | union xhci_trb *dequeue; | 908 | union xhci_trb *dequeue; |
893 | struct xhci_segment *deq_seg; | 909 | struct xhci_segment *deq_seg; |
894 | unsigned int deq_updates; | 910 | unsigned int deq_updates; |
911 | struct list_head td_list; | ||
895 | /* | 912 | /* |
896 | * Write the cycle state into the TRB cycle field to give ownership of | 913 | * Write the cycle state into the TRB cycle field to give ownership of |
897 | * the TRB to the host controller (if we are the producer), or to check | 914 | * the TRB to the host controller (if we are the producer), or to check |
@@ -1042,6 +1059,8 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct intr_reg *ir_set, int set_n | |||
1042 | void xhci_print_registers(struct xhci_hcd *xhci); | 1059 | void xhci_print_registers(struct xhci_hcd *xhci); |
1043 | void xhci_dbg_regs(struct xhci_hcd *xhci); | 1060 | void xhci_dbg_regs(struct xhci_hcd *xhci); |
1044 | void xhci_print_run_regs(struct xhci_hcd *xhci); | 1061 | void xhci_print_run_regs(struct xhci_hcd *xhci); |
1062 | void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb); | ||
1063 | void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb); | ||
1045 | void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg); | 1064 | void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg); |
1046 | void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring); | 1065 | void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring); |
1047 | void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); | 1066 | void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); |
@@ -1055,6 +1074,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags); | |||
1055 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id); | 1074 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id); |
1056 | int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags); | 1075 | int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags); |
1057 | int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); | 1076 | int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); |
1077 | unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc); | ||
1058 | 1078 | ||
1059 | #ifdef CONFIG_PCI | 1079 | #ifdef CONFIG_PCI |
1060 | /* xHCI PCI glue */ | 1080 | /* xHCI PCI glue */ |
@@ -1074,6 +1094,8 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd); | |||
1074 | int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev); | 1094 | int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev); |
1075 | void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev); | 1095 | void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev); |
1076 | int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev); | 1096 | int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev); |
1097 | int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); | ||
1098 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); | ||
1077 | 1099 | ||
1078 | /* xHCI ring, segment, TRB, and TD functions */ | 1100 | /* xHCI ring, segment, TRB, and TD functions */ |
1079 | dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); | 1101 | dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); |
@@ -1083,6 +1105,7 @@ void handle_event(struct xhci_hcd *xhci); | |||
1083 | void set_hc_event_deq(struct xhci_hcd *xhci); | 1105 | void set_hc_event_deq(struct xhci_hcd *xhci); |
1084 | int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id); | 1106 | int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id); |
1085 | int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); | 1107 | int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); |
1108 | int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index); | ||
1086 | 1109 | ||
1087 | /* xHCI roothub code */ | 1110 | /* xHCI roothub code */ |
1088 | int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, | 1111 | int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, |