aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2009-04-29 22:02:31 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-06-16 00:44:49 -0400
commitae636747146ea97efa18e04576acd3416e2514f5 (patch)
tree22e392df7126974c0ac4dc2fe516dc9e16a49873 /drivers/usb
parent8a96c052283e68fe91a6c657c175b39bfed80bed (diff)
USB: xhci: URB cancellation support.
Add URB cancellation support to the xHCI host controller driver. This currently supports cancellation for endpoints that do not have streams enabled. An URB is represented by a number of Transaction Request Buffers (TRBs), that are chained together to make one (or more) Transaction Descriptors (TDs) on an endpoint ring. The ring is comprised of contiguous segments, linked together with Link TRBs (which may or may not be chained into a TD). To cancel an URB, we must stop the endpoint ring, make the hardware skip over the TDs in the URB (either by turning them into No-op TDs, or by moving the hardware's ring dequeue pointer past the last TRB in the last TD), and then restart the ring. There are times when we must drop the xHCI lock during this process, like when we need to complete cancelled URBs. We must ensure that additional URBs can be marked as cancelled, and that new URBs can be enqueued (since the URB completion handlers can do either). The new endpoint ring variables cancels_pending and state (which can only be modified while holding the xHCI lock) ensure that future cancellation and enqueueing do not interrupt any pending cancellation code. To facilitate cancellation, we must keep track of the starting ring segment, first TRB, and last TRB for each URB. We also need to keep track of the list of TDs that have been marked as cancelled, separate from the list of TDs that are queued for this endpoint. The new variables and cancellation list are stored in the xhci_td structure. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/host/xhci-hcd.c64
-rw-r--r--drivers/usb/host/xhci-mem.c1
-rw-r--r--drivers/usb/host/xhci-ring.c491
-rw-r--r--drivers/usb/host/xhci.h31
4 files changed, 545 insertions, 42 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index e5fbdcdbf676..36e440ce88e5 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -613,12 +613,70 @@ exit:
613 return ret; 613 return ret;
614} 614}
615 615
616/* Remove from hardware lists 616/*
617 * completions normally happen asynchronously 617 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
618 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
619 * should pick up where it left off in the TD, unless a Set Transfer Ring
620 * Dequeue Pointer is issued.
621 *
622 * The TRBs that make up the buffers for the canceled URB will be "removed" from
623 * the ring. Since the ring is a contiguous structure, they can't be physically
624 * removed. Instead, there are two options:
625 *
626 * 1) If the HC is in the middle of processing the URB to be canceled, we
627 * simply move the ring's dequeue pointer past those TRBs using the Set
628 * Transfer Ring Dequeue Pointer command. This will be the common case,
629 * when drivers timeout on the last submitted URB and attempt to cancel.
630 *
631 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
632 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
633 * HC will need to invalidate the any TRBs it has cached after the stop
634 * endpoint command, as noted in the xHCI 0.95 errata.
635 *
636 * 3) The TD may have completed by the time the Stop Endpoint Command
637 * completes, so software needs to handle that case too.
638 *
639 * This function should protect against the TD enqueueing code ringing the
640 * doorbell while this code is waiting for a Stop Endpoint command to complete.
641 * It also needs to account for multiple cancellations on happening at the same
642 * time for the same endpoint.
643 *
644 * Note that this function can be called in any context, or so says
645 * usb_hcd_unlink_urb()
618 */ 646 */
619int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 647int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
620{ 648{
621 return -ENOSYS; 649 unsigned long flags;
650 int ret;
651 struct xhci_hcd *xhci;
652 struct xhci_td *td;
653 unsigned int ep_index;
654 struct xhci_ring *ep_ring;
655
656 xhci = hcd_to_xhci(hcd);
657 spin_lock_irqsave(&xhci->lock, flags);
658 /* Make sure the URB hasn't completed or been unlinked already */
659 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
660 if (ret || !urb->hcpriv)
661 goto done;
662
663 xhci_dbg(xhci, "Cancel URB 0x%x\n", (unsigned int) urb);
664 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
665 ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index];
666 td = (struct xhci_td *) urb->hcpriv;
667
668 ep_ring->cancels_pending++;
669 list_add_tail(&td->cancelled_td_list, &ep_ring->cancelled_td_list);
670 /* Queue a stop endpoint command, but only if this is
671 * the first cancellation to be handled.
672 */
673 if (ep_ring->cancels_pending == 1) {
674 queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
675 ring_cmd_db(xhci);
676 }
677done:
678 spin_unlock_irqrestore(&xhci->lock, flags);
679 return ret;
622} 680}
623 681
624/* Drop an endpoint from a new bandwidth configuration for this device. 682/* Drop an endpoint from a new bandwidth configuration for this device.
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 617db9c37770..e81d10a653ef 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -142,6 +142,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
142 return 0; 142 return 0;
143 143
144 INIT_LIST_HEAD(&ring->td_list); 144 INIT_LIST_HEAD(&ring->td_list);
145 INIT_LIST_HEAD(&ring->cancelled_td_list);
145 if (num_segs == 0) 146 if (num_segs == 0)
146 return ring; 147 return ring;
147 148
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index c948288042e2..f967a6df83c7 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -112,6 +112,23 @@ static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
112 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); 112 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
113} 113}
114 114
115/* Updates trb to point to the next TRB in the ring, and updates seg if the next
116 * TRB is in a new segment. This does not skip over link TRBs, and it does not
117 * effect the ring dequeue or enqueue pointers.
118 */
119static void next_trb(struct xhci_hcd *xhci,
120 struct xhci_ring *ring,
121 struct xhci_segment **seg,
122 union xhci_trb **trb)
123{
124 if (last_trb(xhci, ring, *seg, *trb)) {
125 *seg = (*seg)->next;
126 *trb = ((*seg)->trbs);
127 } else {
128 *trb = (*trb)++;
129 }
130}
131
115/* 132/*
116 * See Cycle bit rules. SW is the consumer for the event ring only. 133 * See Cycle bit rules. SW is the consumer for the event ring only.
117 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 134 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
@@ -250,6 +267,344 @@ void ring_cmd_db(struct xhci_hcd *xhci)
250 xhci_readl(xhci, &xhci->dba->doorbell[0]); 267 xhci_readl(xhci, &xhci->dba->doorbell[0]);
251} 268}
252 269
270static void ring_ep_doorbell(struct xhci_hcd *xhci,
271 unsigned int slot_id,
272 unsigned int ep_index)
273{
274 struct xhci_ring *ep_ring;
275 u32 field;
276 __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
277
278 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
279 /* Don't ring the doorbell for this endpoint if there are pending
280 * cancellations because the we don't want to interrupt processing.
281 */
282 if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) {
283 field = xhci_readl(xhci, db_addr) & DB_MASK;
284 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
285 /* Flush PCI posted writes - FIXME Matthew Wilcox says this
286 * isn't time-critical and we shouldn't make the CPU wait for
287 * the flush.
288 */
289 xhci_readl(xhci, db_addr);
290 }
291}
292
293/*
294 * Find the segment that trb is in. Start searching in start_seg.
295 * If we must move past a segment that has a link TRB with a toggle cycle state
296 * bit set, then we will toggle the value pointed at by cycle_state.
297 */
298static struct xhci_segment *find_trb_seg(
299 struct xhci_segment *start_seg,
300 union xhci_trb *trb, int *cycle_state)
301{
302 struct xhci_segment *cur_seg = start_seg;
303 struct xhci_generic_trb *generic_trb;
304
305 while (cur_seg->trbs > trb ||
306 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
307 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
308 if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK &&
309 (generic_trb->field[3] & LINK_TOGGLE))
310 *cycle_state = ~(*cycle_state) & 0x1;
311 cur_seg = cur_seg->next;
312 if (cur_seg == start_seg)
313 /* Looped over the entire list. Oops! */
314 return 0;
315 }
316 return cur_seg;
317}
318
319struct dequeue_state {
320 struct xhci_segment *new_deq_seg;
321 union xhci_trb *new_deq_ptr;
322 int new_cycle_state;
323};
324
325/*
326 * Move the xHC's endpoint ring dequeue pointer past cur_td.
327 * Record the new state of the xHC's endpoint ring dequeue segment,
328 * dequeue pointer, and new consumer cycle state in state.
329 * Update our internal representation of the ring's dequeue pointer.
330 *
331 * We do this in three jumps:
332 * - First we update our new ring state to be the same as when the xHC stopped.
333 * - Then we traverse the ring to find the segment that contains
334 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
335 * any link TRBs with the toggle cycle bit set.
336 * - Finally we move the dequeue state one TRB further, toggling the cycle bit
337 * if we've moved it past a link TRB with the toggle cycle bit set.
338 */
339static void find_new_dequeue_state(struct xhci_hcd *xhci,
340 unsigned int slot_id, unsigned int ep_index,
341 struct xhci_td *cur_td, struct dequeue_state *state)
342{
343 struct xhci_virt_device *dev = xhci->devs[slot_id];
344 struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
345 struct xhci_generic_trb *trb;
346
347 state->new_cycle_state = 0;
348 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
349 ep_ring->stopped_trb,
350 &state->new_cycle_state);
351 if (!state->new_deq_seg)
352 BUG();
353 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
354 state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0];
355
356 state->new_deq_ptr = cur_td->last_trb;
357 state->new_deq_seg = find_trb_seg(state->new_deq_seg,
358 state->new_deq_ptr,
359 &state->new_cycle_state);
360 if (!state->new_deq_seg)
361 BUG();
362
363 trb = &state->new_deq_ptr->generic;
364 if (TRB_TYPE(trb->field[3]) == TRB_LINK &&
365 (trb->field[3] & LINK_TOGGLE))
366 state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
367 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
368
369 /* Don't update the ring cycle state for the producer (us). */
370 ep_ring->dequeue = state->new_deq_ptr;
371 ep_ring->deq_seg = state->new_deq_seg;
372}
373
374void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
375 struct xhci_td *cur_td)
376{
377 struct xhci_segment *cur_seg;
378 union xhci_trb *cur_trb;
379
380 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
381 true;
382 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
383 if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) ==
384 TRB_TYPE(TRB_LINK)) {
385 /* Unchain any chained Link TRBs, but
386 * leave the pointers intact.
387 */
388 cur_trb->generic.field[3] &= ~TRB_CHAIN;
389 xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
390 xhci_dbg(xhci, "Address = 0x%x (0x%x dma); "
391 "in seg 0x%x (0x%x dma)\n",
392 (unsigned int) cur_trb,
393 trb_virt_to_dma(cur_seg, cur_trb),
394 (unsigned int) cur_seg,
395 cur_seg->dma);
396 } else {
397 cur_trb->generic.field[0] = 0;
398 cur_trb->generic.field[1] = 0;
399 cur_trb->generic.field[2] = 0;
400 /* Preserve only the cycle bit of this TRB */
401 cur_trb->generic.field[3] &= TRB_CYCLE;
402 cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP);
403 xhci_dbg(xhci, "Cancel TRB 0x%x (0x%x dma) "
404 "in seg 0x%x (0x%x dma)\n",
405 (unsigned int) cur_trb,
406 trb_virt_to_dma(cur_seg, cur_trb),
407 (unsigned int) cur_seg,
408 cur_seg->dma);
409 }
410 if (cur_trb == cur_td->last_trb)
411 break;
412 }
413}
414
415static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
416 unsigned int ep_index, struct xhci_segment *deq_seg,
417 union xhci_trb *deq_ptr, u32 cycle_state);
418
419/*
420 * When we get a command completion for a Stop Endpoint Command, we need to
421 * unlink any cancelled TDs from the ring. There are two ways to do that:
422 *
423 * 1. If the HW was in the middle of processing the TD that needs to be
424 * cancelled, then we must move the ring's dequeue pointer past the last TRB
425 * in the TD with a Set Dequeue Pointer Command.
426 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
427 * bit cleared) so that the HW will skip over them.
428 */
429static void handle_stopped_endpoint(struct xhci_hcd *xhci,
430 union xhci_trb *trb)
431{
432 unsigned int slot_id;
433 unsigned int ep_index;
434 struct xhci_ring *ep_ring;
435 struct list_head *entry;
436 struct xhci_td *cur_td = 0;
437 struct xhci_td *last_unlinked_td;
438
439 struct dequeue_state deq_state;
440#ifdef CONFIG_USB_HCD_STAT
441 ktime_t stop_time = ktime_get();
442#endif
443
444 memset(&deq_state, 0, sizeof(deq_state));
445 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
446 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
447 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
448
449 if (list_empty(&ep_ring->cancelled_td_list))
450 return;
451
452 /* Fix up the ep ring first, so HW stops executing cancelled TDs.
453 * We have the xHCI lock, so nothing can modify this list until we drop
454 * it. We're also in the event handler, so we can't get re-interrupted
455 * if another Stop Endpoint command completes
456 */
457 list_for_each(entry, &ep_ring->cancelled_td_list) {
458 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
459 xhci_dbg(xhci, "Cancelling TD starting at 0x%x, 0x%x (dma).\n",
460 (unsigned int) cur_td->first_trb,
461 trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
462 /*
463 * If we stopped on the TD we need to cancel, then we have to
464 * move the xHC endpoint ring dequeue pointer past this TD.
465 */
466 if (cur_td == ep_ring->stopped_td)
467 find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
468 &deq_state);
469 else
470 td_to_noop(xhci, ep_ring, cur_td);
471 /*
472 * The event handler won't see a completion for this TD anymore,
473 * so remove it from the endpoint ring's TD list. Keep it in
474 * the cancelled TD list for URB completion later.
475 */
476 list_del(&cur_td->td_list);
477 ep_ring->cancels_pending--;
478 }
479 last_unlinked_td = cur_td;
480
481 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
482 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
483 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = 0x%x (0x%x dma), "
484 "new deq ptr = 0x%x (0x%x dma), new cycle = %u\n",
485 (unsigned int) deq_state.new_deq_seg,
486 deq_state.new_deq_seg->dma,
487 (unsigned int) deq_state.new_deq_ptr,
488 trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
489 deq_state.new_cycle_state);
490 queue_set_tr_deq(xhci, slot_id, ep_index,
491 deq_state.new_deq_seg,
492 deq_state.new_deq_ptr,
493 (u32) deq_state.new_cycle_state);
494 /* Stop the TD queueing code from ringing the doorbell until
495 * this command completes. The HC won't set the dequeue pointer
496 * if the ring is running, and ringing the doorbell starts the
497 * ring running.
498 */
499 ep_ring->state |= SET_DEQ_PENDING;
500 ring_cmd_db(xhci);
501 } else {
502 /* Otherwise just ring the doorbell to restart the ring */
503 ring_ep_doorbell(xhci, slot_id, ep_index);
504 }
505
506 /*
507 * Drop the lock and complete the URBs in the cancelled TD list.
508 * New TDs to be cancelled might be added to the end of the list before
509 * we can complete all the URBs for the TDs we already unlinked.
510 * So stop when we've completed the URB for the last TD we unlinked.
511 */
512 do {
513 cur_td = list_entry(ep_ring->cancelled_td_list.next,
514 struct xhci_td, cancelled_td_list);
515 list_del(&cur_td->cancelled_td_list);
516
517 /* Clean up the cancelled URB */
518#ifdef CONFIG_USB_HCD_STAT
519 hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length,
520 ktime_sub(stop_time, cur_td->start_time));
521#endif
522 cur_td->urb->hcpriv = NULL;
523 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
524
525 xhci_dbg(xhci, "Giveback cancelled URB 0x%x\n",
526 (unsigned int) cur_td->urb);
527 spin_unlock(&xhci->lock);
528 /* Doesn't matter what we pass for status, since the core will
529 * just overwrite it (because the URB has been unlinked).
530 */
531 usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0);
532 kfree(cur_td);
533
534 spin_lock(&xhci->lock);
535 } while (cur_td != last_unlinked_td);
536
537 /* Return to the event handler with xhci->lock re-acquired */
538}
539
540/*
541 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
542 * we need to clear the set deq pending flag in the endpoint ring state, so that
543 * the TD queueing code can ring the doorbell again. We also need to ring the
544 * endpoint doorbell to restart the ring, but only if there aren't more
545 * cancellations pending.
546 */
547static void handle_set_deq_completion(struct xhci_hcd *xhci,
548 struct xhci_event_cmd *event,
549 union xhci_trb *trb)
550{
551 unsigned int slot_id;
552 unsigned int ep_index;
553 struct xhci_ring *ep_ring;
554 struct xhci_virt_device *dev;
555
556 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
557 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
558 dev = xhci->devs[slot_id];
559 ep_ring = dev->ep_rings[ep_index];
560
561 if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
562 unsigned int ep_state;
563 unsigned int slot_state;
564
565 switch (GET_COMP_CODE(event->status)) {
566 case COMP_TRB_ERR:
567 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
568 "of stream ID configuration\n");
569 break;
570 case COMP_CTX_STATE:
571 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
572 "to incorrect slot or ep state.\n");
573 ep_state = dev->out_ctx->ep[ep_index].ep_info;
574 ep_state &= EP_STATE_MASK;
575 slot_state = dev->out_ctx->slot.dev_state;
576 slot_state = GET_SLOT_STATE(slot_state);
577 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
578 slot_state, ep_state);
579 break;
580 case COMP_EBADSLT:
581 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
582 "slot %u was not enabled.\n", slot_id);
583 break;
584 default:
585 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
586 "completion code of %u.\n",
587 GET_COMP_CODE(event->status));
588 break;
589 }
590 /* OK what do we do now? The endpoint state is hosed, and we
591 * should never get to this point if the synchronization between
592 * queueing, and endpoint state are correct. This might happen
593 * if the device gets disconnected after we've finished
594 * cancelling URBs, which might not be an error...
595 */
596 } else {
597 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, "
598 "deq[1] = 0x%x.\n",
599 dev->out_ctx->ep[ep_index].deq[0],
600 dev->out_ctx->ep[ep_index].deq[1]);
601 }
602
603 ep_ring->state &= ~SET_DEQ_PENDING;
604 ring_ep_doorbell(xhci, slot_id, ep_index);
605}
606
607
253static void handle_cmd_completion(struct xhci_hcd *xhci, 608static void handle_cmd_completion(struct xhci_hcd *xhci,
254 struct xhci_event_cmd *event) 609 struct xhci_event_cmd *event)
255{ 610{
@@ -290,6 +645,12 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
290 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); 645 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
291 complete(&xhci->addr_dev); 646 complete(&xhci->addr_dev);
292 break; 647 break;
648 case TRB_TYPE(TRB_STOP_RING):
649 handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue);
650 break;
651 case TRB_TYPE(TRB_SET_DEQ):
652 handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
653 break;
293 case TRB_TYPE(TRB_CMD_NOOP): 654 case TRB_TYPE(TRB_CMD_NOOP):
294 ++xhci->noops_handled; 655 ++xhci->noops_handled;
295 break; 656 break;
@@ -346,11 +707,9 @@ static struct xhci_segment *trb_in_td(
346 cur_seg = start_seg; 707 cur_seg = start_seg;
347 708
348 do { 709 do {
349 /* 710 /* We may get an event for a Link TRB in the middle of a TD */
350 * Last TRB is a link TRB (unless we start inserting links in 711 end_seg_dma = trb_virt_to_dma(cur_seg,
351 * the middle, FIXME if you do) 712 &start_seg->trbs[TRBS_PER_SEGMENT - 1]);
352 */
353 end_seg_dma = trb_virt_to_dma(cur_seg, &start_seg->trbs[TRBS_PER_SEGMENT - 2]);
354 /* If the end TRB isn't in this segment, this is set to 0 */ 713 /* If the end TRB isn't in this segment, this is set to 0 */
355 end_trb_dma = trb_virt_to_dma(cur_seg, end_trb); 714 end_trb_dma = trb_virt_to_dma(cur_seg, end_trb);
356 715
@@ -396,7 +755,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
396 dma_addr_t event_dma; 755 dma_addr_t event_dma;
397 struct xhci_segment *event_seg; 756 struct xhci_segment *event_seg;
398 union xhci_trb *event_trb; 757 union xhci_trb *event_trb;
399 struct urb *urb; 758 struct urb *urb = 0;
400 int status = -EINPROGRESS; 759 int status = -EINPROGRESS;
401 760
402 xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; 761 xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
@@ -457,6 +816,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
457 case COMP_SUCCESS: 816 case COMP_SUCCESS:
458 case COMP_SHORT_TX: 817 case COMP_SHORT_TX:
459 break; 818 break;
819 case COMP_STOP:
820 xhci_dbg(xhci, "Stopped on Transfer TRB\n");
821 break;
822 case COMP_STOP_INVAL:
823 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
824 break;
460 case COMP_STALL: 825 case COMP_STALL:
461 xhci_warn(xhci, "WARN: Stalled endpoint\n"); 826 xhci_warn(xhci, "WARN: Stalled endpoint\n");
462 status = -EPIPE; 827 status = -EPIPE;
@@ -510,11 +875,15 @@ static int handle_tx_event(struct xhci_hcd *xhci,
510 if (event_trb != ep_ring->dequeue) { 875 if (event_trb != ep_ring->dequeue) {
511 /* The event was for the status stage */ 876 /* The event was for the status stage */
512 if (event_trb == td->last_trb) { 877 if (event_trb == td->last_trb) {
513 td->urb->actual_length = td->urb->transfer_buffer_length; 878 td->urb->actual_length =
879 td->urb->transfer_buffer_length;
514 } else { 880 } else {
515 /* The event was for the data stage */ 881 /* Maybe the event was for the data stage? */
516 td->urb->actual_length = td->urb->transfer_buffer_length - 882 if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
517 TRB_LEN(event->transfer_len); 883 /* We didn't stop on a link TRB in the middle */
884 td->urb->actual_length =
885 td->urb->transfer_buffer_length -
886 TRB_LEN(event->transfer_len);
518 } 887 }
519 } 888 }
520 } else { 889 } else {
@@ -573,29 +942,55 @@ static int handle_tx_event(struct xhci_hcd *xhci,
573 status = 0; 942 status = 0;
574 } 943 }
575 } else { 944 } else {
576 /* Slow path - walk the list, starting from the first 945 /* Slow path - walk the list, starting from the dequeue
577 * TRB to get the actual length transferred 946 * pointer, to get the actual length transferred.
578 */ 947 */
948 union xhci_trb *cur_trb;
949 struct xhci_segment *cur_seg;
950
579 td->urb->actual_length = 0; 951 td->urb->actual_length = 0;
580 while (ep_ring->dequeue != event_trb) { 952 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
581 td->urb->actual_length += TRB_LEN(ep_ring->dequeue->generic.field[2]); 953 cur_trb != event_trb;
582 inc_deq(xhci, ep_ring, false); 954 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
955 if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP &&
956 TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK)
957 td->urb->actual_length +=
958 TRB_LEN(cur_trb->generic.field[2]);
583 } 959 }
584 td->urb->actual_length += TRB_LEN(ep_ring->dequeue->generic.field[2]) - 960 /* If the ring didn't stop on a Link or No-op TRB, add
585 TRB_LEN(event->transfer_len); 961 * in the actual bytes transferred from the Normal TRB
586 962 */
963 if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
964 td->urb->actual_length +=
965 TRB_LEN(cur_trb->generic.field[2]) -
966 TRB_LEN(event->transfer_len);
587 } 967 }
588 } 968 }
589 /* Update ring dequeue pointer */ 969 /* The Endpoint Stop Command completion will take care of
590 while (ep_ring->dequeue != td->last_trb) 970 * any stopped TDs. A stopped TD may be restarted, so don't update the
971 * ring dequeue pointer or take this TD off any lists yet.
972 */
973 if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL ||
974 GET_COMP_CODE(event->transfer_len) == COMP_STOP) {
975 ep_ring->stopped_td = td;
976 ep_ring->stopped_trb = event_trb;
977 } else {
978 /* Update ring dequeue pointer */
979 while (ep_ring->dequeue != td->last_trb)
980 inc_deq(xhci, ep_ring, false);
591 inc_deq(xhci, ep_ring, false); 981 inc_deq(xhci, ep_ring, false);
592 inc_deq(xhci, ep_ring, false);
593 982
594 /* Clean up the endpoint's TD list */ 983 /* Clean up the endpoint's TD list */
595 urb = td->urb; 984 urb = td->urb;
596 list_del(&td->td_list); 985 list_del(&td->td_list);
597 kfree(td); 986 /* Was this TD slated to be cancelled but completed anyway? */
598 urb->hcpriv = NULL; 987 if (!list_empty(&td->cancelled_td_list)) {
988 list_del(&td->cancelled_td_list);
989 ep_ring->cancels_pending--;
990 }
991 kfree(td);
992 urb->hcpriv = NULL;
993 }
599cleanup: 994cleanup:
600 inc_deq(xhci, xhci->event_ring, true); 995 inc_deq(xhci, xhci->event_ring, true);
601 set_hc_event_deq(xhci); 996 set_hc_event_deq(xhci);
@@ -744,6 +1139,7 @@ int xhci_prepare_transfer(struct xhci_hcd *xhci,
744 if (!*td) 1139 if (!*td)
745 return -ENOMEM; 1140 return -ENOMEM;
746 INIT_LIST_HEAD(&(*td)->td_list); 1141 INIT_LIST_HEAD(&(*td)->td_list);
1142 INIT_LIST_HEAD(&(*td)->cancelled_td_list);
747 1143
748 ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb); 1144 ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
749 if (unlikely(ret)) { 1145 if (unlikely(ret)) {
@@ -755,6 +1151,8 @@ int xhci_prepare_transfer(struct xhci_hcd *xhci,
755 urb->hcpriv = (void *) (*td); 1151 urb->hcpriv = (void *) (*td);
756 /* Add this TD to the tail of the endpoint ring's TD list */ 1152 /* Add this TD to the tail of the endpoint ring's TD list */
757 list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list); 1153 list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list);
1154 (*td)->start_seg = xdev->ep_rings[ep_index]->enq_seg;
1155 (*td)->first_trb = xdev->ep_rings[ep_index]->enqueue;
758 1156
759 return 0; 1157 return 0;
760} 1158}
@@ -823,19 +1221,13 @@ void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
823 unsigned int ep_index, int start_cycle, 1221 unsigned int ep_index, int start_cycle,
824 struct xhci_generic_trb *start_trb, struct xhci_td *td) 1222 struct xhci_generic_trb *start_trb, struct xhci_td *td)
825{ 1223{
826 u32 field;
827
828 /* 1224 /*
829 * Pass all the TRBs to the hardware at once and make sure this write 1225 * Pass all the TRBs to the hardware at once and make sure this write
830 * isn't reordered. 1226 * isn't reordered.
831 */ 1227 */
832 wmb(); 1228 wmb();
833 start_trb->field[3] |= start_cycle; 1229 start_trb->field[3] |= start_cycle;
834 field = xhci_readl(xhci, &xhci->dba->doorbell[slot_id]) & DB_MASK; 1230 ring_ep_doorbell(xhci, slot_id, ep_index);
835 xhci_writel(xhci, field | EPI_TO_DB(ep_index),
836 &xhci->dba->doorbell[slot_id]);
837 /* Flush PCI posted writes */
838 xhci_readl(xhci, &xhci->dba->doorbell[slot_id]);
839} 1231}
840 1232
841int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1233int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
@@ -1221,3 +1613,36 @@ int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 s
1221 return queue_command(xhci, in_ctx_ptr, 0, 0, 1613 return queue_command(xhci, in_ctx_ptr, 0, 0,
1222 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); 1614 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
1223} 1615}
1616
1617int queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
1618 unsigned int ep_index)
1619{
1620 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
1621 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
1622 u32 type = TRB_TYPE(TRB_STOP_RING);
1623
1624 return queue_command(xhci, 0, 0, 0,
1625 trb_slot_id | trb_ep_index | type);
1626}
1627
1628/* Set Transfer Ring Dequeue Pointer command.
1629 * This should not be used for endpoints that have streams enabled.
1630 */
1631static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
1632 unsigned int ep_index, struct xhci_segment *deq_seg,
1633 union xhci_trb *deq_ptr, u32 cycle_state)
1634{
1635 dma_addr_t addr;
1636 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
1637 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
1638 u32 type = TRB_TYPE(TRB_SET_DEQ);
1639
1640 addr = trb_virt_to_dma(deq_seg, deq_ptr);
1641 if (addr == 0)
1642 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
1643 xhci_warn(xhci, "WARN deq seg = 0x%x, deq pt = 0x%x\n",
1644 (unsigned int) deq_seg,
1645 (unsigned int) deq_ptr);
1646 return queue_command(xhci, (u32) addr | cycle_state, 0, 0,
1647 trb_slot_id | trb_ep_index | type);
1648}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 06e07616631f..7b7103405c69 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -514,6 +514,7 @@ struct xhci_slot_ctx {
514/* bits 8:26 reserved */ 514/* bits 8:26 reserved */
515/* Slot state */ 515/* Slot state */
516#define SLOT_STATE (0x1f << 27) 516#define SLOT_STATE (0x1f << 27)
517#define GET_SLOT_STATE(p) (((p) & (0x1f << 27)) >> 27)
517 518
518 519
519/** 520/**
@@ -765,6 +766,11 @@ struct xhci_event_cmd {
765#define TRB_TO_SLOT_ID(p) (((p) & (0xff<<24)) >> 24) 766#define TRB_TO_SLOT_ID(p) (((p) & (0xff<<24)) >> 24)
766#define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24) 767#define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24)
767 768
769/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
770#define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1)
771#define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16)
772
773
768/* Port Status Change Event TRB fields */ 774/* Port Status Change Event TRB fields */
769/* Port ID - bits 31:24 */ 775/* Port ID - bits 31:24 */
770#define GET_PORT_ID(p) (((p) & (0xff << 24)) >> 24) 776#define GET_PORT_ID(p) (((p) & (0xff << 24)) >> 24)
@@ -893,12 +899,6 @@ union xhci_trb {
893#define TRB_MAX_BUFF_SHIFT 16 899#define TRB_MAX_BUFF_SHIFT 16
894#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT) 900#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT)
895 901
896struct xhci_td {
897 struct list_head td_list;
898 struct urb *urb;
899 union xhci_trb *last_trb;
900};
901
902struct xhci_segment { 902struct xhci_segment {
903 union xhci_trb *trbs; 903 union xhci_trb *trbs;
904 /* private to HCD */ 904 /* private to HCD */
@@ -906,6 +906,15 @@ struct xhci_segment {
906 dma_addr_t dma; 906 dma_addr_t dma;
907} __attribute__ ((packed)); 907} __attribute__ ((packed));
908 908
909struct xhci_td {
910 struct list_head td_list;
911 struct list_head cancelled_td_list;
912 struct urb *urb;
913 struct xhci_segment *start_seg;
914 union xhci_trb *first_trb;
915 union xhci_trb *last_trb;
916};
917
909struct xhci_ring { 918struct xhci_ring {
910 struct xhci_segment *first_seg; 919 struct xhci_segment *first_seg;
911 union xhci_trb *enqueue; 920 union xhci_trb *enqueue;
@@ -915,6 +924,14 @@ struct xhci_ring {
915 struct xhci_segment *deq_seg; 924 struct xhci_segment *deq_seg;
916 unsigned int deq_updates; 925 unsigned int deq_updates;
917 struct list_head td_list; 926 struct list_head td_list;
927 /* ---- Related to URB cancellation ---- */
928 struct list_head cancelled_td_list;
929 unsigned int cancels_pending;
930 unsigned int state;
931#define SET_DEQ_PENDING (1 << 0)
932 /* The TRB that was last reported in a stopped endpoint ring */
933 union xhci_trb *stopped_trb;
934 struct xhci_td *stopped_td;
918 /* 935 /*
919 * Write the cycle state into the TRB cycle field to give ownership of 936 * Write the cycle state into the TRB cycle field to give ownership of
920 * the TRB to the host controller (if we are the producer), or to check 937 * the TRB to the host controller (if we are the producer), or to check
@@ -1119,6 +1136,8 @@ void handle_event(struct xhci_hcd *xhci);
1119void set_hc_event_deq(struct xhci_hcd *xhci); 1136void set_hc_event_deq(struct xhci_hcd *xhci);
1120int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id); 1137int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
1121int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); 1138int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id);
1139int queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
1140 unsigned int ep_index);
1122int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index); 1141int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index);
1123int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index); 1142int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index);
1124int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); 1143int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id);