diff options
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
| -rw-r--r-- | drivers/usb/host/xhci-ring.c | 305 |
1 files changed, 218 insertions, 87 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 02d81985c45..aa88a067148 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
| @@ -135,6 +135,7 @@ static void next_trb(struct xhci_hcd *xhci, | |||
| 135 | static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) | 135 | static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) |
| 136 | { | 136 | { |
| 137 | union xhci_trb *next = ++(ring->dequeue); | 137 | union xhci_trb *next = ++(ring->dequeue); |
| 138 | unsigned long long addr; | ||
| 138 | 139 | ||
| 139 | ring->deq_updates++; | 140 | ring->deq_updates++; |
| 140 | /* Update the dequeue pointer further if that was a link TRB or we're at | 141 | /* Update the dequeue pointer further if that was a link TRB or we're at |
| @@ -152,6 +153,13 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
| 152 | ring->dequeue = ring->deq_seg->trbs; | 153 | ring->dequeue = ring->deq_seg->trbs; |
| 153 | next = ring->dequeue; | 154 | next = ring->dequeue; |
| 154 | } | 155 | } |
| 156 | addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); | ||
| 157 | if (ring == xhci->event_ring) | ||
| 158 | xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr); | ||
| 159 | else if (ring == xhci->cmd_ring) | ||
| 160 | xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr); | ||
| 161 | else | ||
| 162 | xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr); | ||
| 155 | } | 163 | } |
| 156 | 164 | ||
| 157 | /* | 165 | /* |
| @@ -171,6 +179,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
| 171 | { | 179 | { |
| 172 | u32 chain; | 180 | u32 chain; |
| 173 | union xhci_trb *next; | 181 | union xhci_trb *next; |
| 182 | unsigned long long addr; | ||
| 174 | 183 | ||
| 175 | chain = ring->enqueue->generic.field[3] & TRB_CHAIN; | 184 | chain = ring->enqueue->generic.field[3] & TRB_CHAIN; |
| 176 | next = ++(ring->enqueue); | 185 | next = ++(ring->enqueue); |
| @@ -204,6 +213,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
| 204 | ring->enqueue = ring->enq_seg->trbs; | 213 | ring->enqueue = ring->enq_seg->trbs; |
| 205 | next = ring->enqueue; | 214 | next = ring->enqueue; |
| 206 | } | 215 | } |
| 216 | addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); | ||
| 217 | if (ring == xhci->event_ring) | ||
| 218 | xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr); | ||
| 219 | else if (ring == xhci->cmd_ring) | ||
| 220 | xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr); | ||
| 221 | else | ||
| 222 | xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr); | ||
| 207 | } | 223 | } |
| 208 | 224 | ||
| 209 | /* | 225 | /* |
| @@ -237,7 +253,7 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, | |||
| 237 | 253 | ||
| 238 | void xhci_set_hc_event_deq(struct xhci_hcd *xhci) | 254 | void xhci_set_hc_event_deq(struct xhci_hcd *xhci) |
| 239 | { | 255 | { |
| 240 | u32 temp; | 256 | u64 temp; |
| 241 | dma_addr_t deq; | 257 | dma_addr_t deq; |
| 242 | 258 | ||
| 243 | deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, | 259 | deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, |
| @@ -246,13 +262,15 @@ void xhci_set_hc_event_deq(struct xhci_hcd *xhci) | |||
| 246 | xhci_warn(xhci, "WARN something wrong with SW event ring " | 262 | xhci_warn(xhci, "WARN something wrong with SW event ring " |
| 247 | "dequeue ptr.\n"); | 263 | "dequeue ptr.\n"); |
| 248 | /* Update HC event ring dequeue pointer */ | 264 | /* Update HC event ring dequeue pointer */ |
| 249 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); | 265 | temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
| 250 | temp &= ERST_PTR_MASK; | 266 | temp &= ERST_PTR_MASK; |
| 251 | if (!in_interrupt()) | 267 | /* Don't clear the EHB bit (which is RW1C) because |
| 252 | xhci_dbg(xhci, "// Write event ring dequeue pointer\n"); | 268 | * there might be more events to service. |
| 253 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); | 269 | */ |
| 254 | xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp, | 270 | temp &= ~ERST_EHB; |
| 255 | &xhci->ir_set->erst_dequeue[0]); | 271 | xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n"); |
| 272 | xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, | ||
| 273 | &xhci->ir_set->erst_dequeue); | ||
| 256 | } | 274 | } |
| 257 | 275 | ||
| 258 | /* Ring the host controller doorbell after placing a command on the ring */ | 276 | /* Ring the host controller doorbell after placing a command on the ring */ |
| @@ -279,7 +297,8 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci, | |||
| 279 | /* Don't ring the doorbell for this endpoint if there are pending | 297 | /* Don't ring the doorbell for this endpoint if there are pending |
| 280 | * cancellations because the we don't want to interrupt processing. | 298 | * cancellations because the we don't want to interrupt processing. |
| 281 | */ | 299 | */ |
| 282 | if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) { | 300 | if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING) |
| 301 | && !(ep_ring->state & EP_HALTED)) { | ||
| 283 | field = xhci_readl(xhci, db_addr) & DB_MASK; | 302 | field = xhci_readl(xhci, db_addr) & DB_MASK; |
| 284 | xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); | 303 | xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); |
| 285 | /* Flush PCI posted writes - FIXME Matthew Wilcox says this | 304 | /* Flush PCI posted writes - FIXME Matthew Wilcox says this |
| @@ -316,12 +335,6 @@ static struct xhci_segment *find_trb_seg( | |||
| 316 | return cur_seg; | 335 | return cur_seg; |
| 317 | } | 336 | } |
| 318 | 337 | ||
| 319 | struct dequeue_state { | ||
| 320 | struct xhci_segment *new_deq_seg; | ||
| 321 | union xhci_trb *new_deq_ptr; | ||
| 322 | int new_cycle_state; | ||
| 323 | }; | ||
| 324 | |||
| 325 | /* | 338 | /* |
| 326 | * Move the xHC's endpoint ring dequeue pointer past cur_td. | 339 | * Move the xHC's endpoint ring dequeue pointer past cur_td. |
| 327 | * Record the new state of the xHC's endpoint ring dequeue segment, | 340 | * Record the new state of the xHC's endpoint ring dequeue segment, |
| @@ -336,24 +349,30 @@ struct dequeue_state { | |||
| 336 | * - Finally we move the dequeue state one TRB further, toggling the cycle bit | 349 | * - Finally we move the dequeue state one TRB further, toggling the cycle bit |
| 337 | * if we've moved it past a link TRB with the toggle cycle bit set. | 350 | * if we've moved it past a link TRB with the toggle cycle bit set. |
| 338 | */ | 351 | */ |
| 339 | static void find_new_dequeue_state(struct xhci_hcd *xhci, | 352 | void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, |
| 340 | unsigned int slot_id, unsigned int ep_index, | 353 | unsigned int slot_id, unsigned int ep_index, |
| 341 | struct xhci_td *cur_td, struct dequeue_state *state) | 354 | struct xhci_td *cur_td, struct xhci_dequeue_state *state) |
| 342 | { | 355 | { |
| 343 | struct xhci_virt_device *dev = xhci->devs[slot_id]; | 356 | struct xhci_virt_device *dev = xhci->devs[slot_id]; |
| 344 | struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; | 357 | struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; |
| 345 | struct xhci_generic_trb *trb; | 358 | struct xhci_generic_trb *trb; |
| 359 | struct xhci_ep_ctx *ep_ctx; | ||
| 360 | dma_addr_t addr; | ||
| 346 | 361 | ||
| 347 | state->new_cycle_state = 0; | 362 | state->new_cycle_state = 0; |
| 363 | xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); | ||
| 348 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, | 364 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, |
| 349 | ep_ring->stopped_trb, | 365 | ep_ring->stopped_trb, |
| 350 | &state->new_cycle_state); | 366 | &state->new_cycle_state); |
| 351 | if (!state->new_deq_seg) | 367 | if (!state->new_deq_seg) |
| 352 | BUG(); | 368 | BUG(); |
| 353 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ | 369 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ |
| 354 | state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0]; | 370 | xhci_dbg(xhci, "Finding endpoint context\n"); |
| 371 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | ||
| 372 | state->new_cycle_state = 0x1 & ep_ctx->deq; | ||
| 355 | 373 | ||
| 356 | state->new_deq_ptr = cur_td->last_trb; | 374 | state->new_deq_ptr = cur_td->last_trb; |
| 375 | xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); | ||
| 357 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, | 376 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, |
| 358 | state->new_deq_ptr, | 377 | state->new_deq_ptr, |
| 359 | &state->new_cycle_state); | 378 | &state->new_cycle_state); |
| @@ -367,6 +386,12 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci, | |||
| 367 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); | 386 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); |
| 368 | 387 | ||
| 369 | /* Don't update the ring cycle state for the producer (us). */ | 388 | /* Don't update the ring cycle state for the producer (us). */ |
| 389 | xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", | ||
| 390 | state->new_deq_seg); | ||
| 391 | addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); | ||
| 392 | xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", | ||
| 393 | (unsigned long long) addr); | ||
| 394 | xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n"); | ||
| 370 | ep_ring->dequeue = state->new_deq_ptr; | 395 | ep_ring->dequeue = state->new_deq_ptr; |
| 371 | ep_ring->deq_seg = state->new_deq_seg; | 396 | ep_ring->deq_seg = state->new_deq_seg; |
| 372 | } | 397 | } |
| @@ -416,6 +441,30 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |||
| 416 | unsigned int ep_index, struct xhci_segment *deq_seg, | 441 | unsigned int ep_index, struct xhci_segment *deq_seg, |
| 417 | union xhci_trb *deq_ptr, u32 cycle_state); | 442 | union xhci_trb *deq_ptr, u32 cycle_state); |
| 418 | 443 | ||
| 444 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | ||
| 445 | struct xhci_ring *ep_ring, unsigned int slot_id, | ||
| 446 | unsigned int ep_index, struct xhci_dequeue_state *deq_state) | ||
| 447 | { | ||
| 448 | xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " | ||
| 449 | "new deq ptr = %p (0x%llx dma), new cycle = %u\n", | ||
| 450 | deq_state->new_deq_seg, | ||
| 451 | (unsigned long long)deq_state->new_deq_seg->dma, | ||
| 452 | deq_state->new_deq_ptr, | ||
| 453 | (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), | ||
| 454 | deq_state->new_cycle_state); | ||
| 455 | queue_set_tr_deq(xhci, slot_id, ep_index, | ||
| 456 | deq_state->new_deq_seg, | ||
| 457 | deq_state->new_deq_ptr, | ||
| 458 | (u32) deq_state->new_cycle_state); | ||
| 459 | /* Stop the TD queueing code from ringing the doorbell until | ||
| 460 | * this command completes. The HC won't set the dequeue pointer | ||
| 461 | * if the ring is running, and ringing the doorbell starts the | ||
| 462 | * ring running. | ||
| 463 | */ | ||
| 464 | ep_ring->state |= SET_DEQ_PENDING; | ||
| 465 | xhci_ring_cmd_db(xhci); | ||
| 466 | } | ||
| 467 | |||
| 419 | /* | 468 | /* |
| 420 | * When we get a command completion for a Stop Endpoint Command, we need to | 469 | * When we get a command completion for a Stop Endpoint Command, we need to |
| 421 | * unlink any cancelled TDs from the ring. There are two ways to do that: | 470 | * unlink any cancelled TDs from the ring. There are two ways to do that: |
| @@ -436,7 +485,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
| 436 | struct xhci_td *cur_td = 0; | 485 | struct xhci_td *cur_td = 0; |
| 437 | struct xhci_td *last_unlinked_td; | 486 | struct xhci_td *last_unlinked_td; |
| 438 | 487 | ||
| 439 | struct dequeue_state deq_state; | 488 | struct xhci_dequeue_state deq_state; |
| 440 | #ifdef CONFIG_USB_HCD_STAT | 489 | #ifdef CONFIG_USB_HCD_STAT |
| 441 | ktime_t stop_time = ktime_get(); | 490 | ktime_t stop_time = ktime_get(); |
| 442 | #endif | 491 | #endif |
| @@ -464,7 +513,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
| 464 | * move the xHC endpoint ring dequeue pointer past this TD. | 513 | * move the xHC endpoint ring dequeue pointer past this TD. |
| 465 | */ | 514 | */ |
| 466 | if (cur_td == ep_ring->stopped_td) | 515 | if (cur_td == ep_ring->stopped_td) |
| 467 | find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, | 516 | xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, |
| 468 | &deq_state); | 517 | &deq_state); |
| 469 | else | 518 | else |
| 470 | td_to_noop(xhci, ep_ring, cur_td); | 519 | td_to_noop(xhci, ep_ring, cur_td); |
| @@ -480,24 +529,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
| 480 | 529 | ||
| 481 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ | 530 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ |
| 482 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { | 531 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { |
| 483 | xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " | 532 | xhci_queue_new_dequeue_state(xhci, ep_ring, |
| 484 | "new deq ptr = %p (0x%llx dma), new cycle = %u\n", | 533 | slot_id, ep_index, &deq_state); |
| 485 | deq_state.new_deq_seg, | ||
| 486 | (unsigned long long)deq_state.new_deq_seg->dma, | ||
| 487 | deq_state.new_deq_ptr, | ||
| 488 | (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr), | ||
| 489 | deq_state.new_cycle_state); | ||
| 490 | queue_set_tr_deq(xhci, slot_id, ep_index, | ||
| 491 | deq_state.new_deq_seg, | ||
| 492 | deq_state.new_deq_ptr, | ||
| 493 | (u32) deq_state.new_cycle_state); | ||
| 494 | /* Stop the TD queueing code from ringing the doorbell until | ||
| 495 | * this command completes. The HC won't set the dequeue pointer | ||
| 496 | * if the ring is running, and ringing the doorbell starts the | ||
| 497 | * ring running. | ||
| 498 | */ | ||
| 499 | ep_ring->state |= SET_DEQ_PENDING; | ||
| 500 | xhci_ring_cmd_db(xhci); | ||
| 501 | } else { | 534 | } else { |
| 502 | /* Otherwise just ring the doorbell to restart the ring */ | 535 | /* Otherwise just ring the doorbell to restart the ring */ |
| 503 | ring_ep_doorbell(xhci, slot_id, ep_index); | 536 | ring_ep_doorbell(xhci, slot_id, ep_index); |
| @@ -551,11 +584,15 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
| 551 | unsigned int ep_index; | 584 | unsigned int ep_index; |
| 552 | struct xhci_ring *ep_ring; | 585 | struct xhci_ring *ep_ring; |
| 553 | struct xhci_virt_device *dev; | 586 | struct xhci_virt_device *dev; |
| 587 | struct xhci_ep_ctx *ep_ctx; | ||
| 588 | struct xhci_slot_ctx *slot_ctx; | ||
| 554 | 589 | ||
| 555 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | 590 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); |
| 556 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | 591 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); |
| 557 | dev = xhci->devs[slot_id]; | 592 | dev = xhci->devs[slot_id]; |
| 558 | ep_ring = dev->ep_rings[ep_index]; | 593 | ep_ring = dev->ep_rings[ep_index]; |
| 594 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | ||
| 595 | slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); | ||
| 559 | 596 | ||
| 560 | if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { | 597 | if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { |
| 561 | unsigned int ep_state; | 598 | unsigned int ep_state; |
| @@ -569,9 +606,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
| 569 | case COMP_CTX_STATE: | 606 | case COMP_CTX_STATE: |
| 570 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " | 607 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " |
| 571 | "to incorrect slot or ep state.\n"); | 608 | "to incorrect slot or ep state.\n"); |
| 572 | ep_state = dev->out_ctx->ep[ep_index].ep_info; | 609 | ep_state = ep_ctx->ep_info; |
| 573 | ep_state &= EP_STATE_MASK; | 610 | ep_state &= EP_STATE_MASK; |
| 574 | slot_state = dev->out_ctx->slot.dev_state; | 611 | slot_state = slot_ctx->dev_state; |
| 575 | slot_state = GET_SLOT_STATE(slot_state); | 612 | slot_state = GET_SLOT_STATE(slot_state); |
| 576 | xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", | 613 | xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", |
| 577 | slot_state, ep_state); | 614 | slot_state, ep_state); |
| @@ -593,16 +630,33 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
| 593 | * cancelling URBs, which might not be an error... | 630 | * cancelling URBs, which might not be an error... |
| 594 | */ | 631 | */ |
| 595 | } else { | 632 | } else { |
| 596 | xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, " | 633 | xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", |
| 597 | "deq[1] = 0x%x.\n", | 634 | ep_ctx->deq); |
| 598 | dev->out_ctx->ep[ep_index].deq[0], | ||
| 599 | dev->out_ctx->ep[ep_index].deq[1]); | ||
| 600 | } | 635 | } |
| 601 | 636 | ||
| 602 | ep_ring->state &= ~SET_DEQ_PENDING; | 637 | ep_ring->state &= ~SET_DEQ_PENDING; |
| 603 | ring_ep_doorbell(xhci, slot_id, ep_index); | 638 | ring_ep_doorbell(xhci, slot_id, ep_index); |
| 604 | } | 639 | } |
| 605 | 640 | ||
| 641 | static void handle_reset_ep_completion(struct xhci_hcd *xhci, | ||
| 642 | struct xhci_event_cmd *event, | ||
| 643 | union xhci_trb *trb) | ||
| 644 | { | ||
| 645 | int slot_id; | ||
| 646 | unsigned int ep_index; | ||
| 647 | |||
| 648 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | ||
| 649 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | ||
| 650 | /* This command will only fail if the endpoint wasn't halted, | ||
| 651 | * but we don't care. | ||
| 652 | */ | ||
| 653 | xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", | ||
| 654 | (unsigned int) GET_COMP_CODE(event->status)); | ||
| 655 | |||
| 656 | /* Clear our internal halted state and restart the ring */ | ||
| 657 | xhci->devs[slot_id]->ep_rings[ep_index]->state &= ~EP_HALTED; | ||
| 658 | ring_ep_doorbell(xhci, slot_id, ep_index); | ||
| 659 | } | ||
| 606 | 660 | ||
| 607 | static void handle_cmd_completion(struct xhci_hcd *xhci, | 661 | static void handle_cmd_completion(struct xhci_hcd *xhci, |
| 608 | struct xhci_event_cmd *event) | 662 | struct xhci_event_cmd *event) |
| @@ -611,7 +665,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
| 611 | u64 cmd_dma; | 665 | u64 cmd_dma; |
| 612 | dma_addr_t cmd_dequeue_dma; | 666 | dma_addr_t cmd_dequeue_dma; |
| 613 | 667 | ||
| 614 | cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; | 668 | cmd_dma = event->cmd_trb; |
| 615 | cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, | 669 | cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, |
| 616 | xhci->cmd_ring->dequeue); | 670 | xhci->cmd_ring->dequeue); |
| 617 | /* Is the command ring deq ptr out of sync with the deq seg ptr? */ | 671 | /* Is the command ring deq ptr out of sync with the deq seg ptr? */ |
| @@ -653,6 +707,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
| 653 | case TRB_TYPE(TRB_CMD_NOOP): | 707 | case TRB_TYPE(TRB_CMD_NOOP): |
| 654 | ++xhci->noops_handled; | 708 | ++xhci->noops_handled; |
| 655 | break; | 709 | break; |
| 710 | case TRB_TYPE(TRB_RESET_EP): | ||
| 711 | handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); | ||
| 712 | break; | ||
| 656 | default: | 713 | default: |
| 657 | /* Skip over unknown commands on the event ring */ | 714 | /* Skip over unknown commands on the event ring */ |
| 658 | xhci->error_bitmask |= 1 << 6; | 715 | xhci->error_bitmask |= 1 << 6; |
| @@ -756,7 +813,9 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 756 | union xhci_trb *event_trb; | 813 | union xhci_trb *event_trb; |
| 757 | struct urb *urb = 0; | 814 | struct urb *urb = 0; |
| 758 | int status = -EINPROGRESS; | 815 | int status = -EINPROGRESS; |
| 816 | struct xhci_ep_ctx *ep_ctx; | ||
| 759 | 817 | ||
| 818 | xhci_dbg(xhci, "In %s\n", __func__); | ||
| 760 | xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; | 819 | xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; |
| 761 | if (!xdev) { | 820 | if (!xdev) { |
| 762 | xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); | 821 | xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); |
| @@ -765,17 +824,17 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 765 | 824 | ||
| 766 | /* Endpoint ID is 1 based, our index is zero based */ | 825 | /* Endpoint ID is 1 based, our index is zero based */ |
| 767 | ep_index = TRB_TO_EP_ID(event->flags) - 1; | 826 | ep_index = TRB_TO_EP_ID(event->flags) - 1; |
| 827 | xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); | ||
| 768 | ep_ring = xdev->ep_rings[ep_index]; | 828 | ep_ring = xdev->ep_rings[ep_index]; |
| 769 | if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { | 829 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); |
| 830 | if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { | ||
| 770 | xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); | 831 | xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); |
| 771 | return -ENODEV; | 832 | return -ENODEV; |
| 772 | } | 833 | } |
| 773 | 834 | ||
| 774 | event_dma = event->buffer[0]; | 835 | event_dma = event->buffer; |
| 775 | if (event->buffer[1] != 0) | ||
| 776 | xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n"); | ||
| 777 | |||
| 778 | /* This TRB should be in the TD at the head of this ring's TD list */ | 836 | /* This TRB should be in the TD at the head of this ring's TD list */ |
| 837 | xhci_dbg(xhci, "%s - checking for list empty\n", __func__); | ||
| 779 | if (list_empty(&ep_ring->td_list)) { | 838 | if (list_empty(&ep_ring->td_list)) { |
| 780 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", | 839 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", |
| 781 | TRB_TO_SLOT_ID(event->flags), ep_index); | 840 | TRB_TO_SLOT_ID(event->flags), ep_index); |
| @@ -785,11 +844,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 785 | urb = NULL; | 844 | urb = NULL; |
| 786 | goto cleanup; | 845 | goto cleanup; |
| 787 | } | 846 | } |
| 847 | xhci_dbg(xhci, "%s - getting list entry\n", __func__); | ||
| 788 | td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); | 848 | td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); |
| 789 | 849 | ||
| 790 | /* Is this a TRB in the currently executing TD? */ | 850 | /* Is this a TRB in the currently executing TD? */ |
| 851 | xhci_dbg(xhci, "%s - looking for TD\n", __func__); | ||
| 791 | event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, | 852 | event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, |
| 792 | td->last_trb, event_dma); | 853 | td->last_trb, event_dma); |
| 854 | xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg); | ||
| 793 | if (!event_seg) { | 855 | if (!event_seg) { |
| 794 | /* HC is busted, give up! */ | 856 | /* HC is busted, give up! */ |
| 795 | xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); | 857 | xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); |
| @@ -798,10 +860,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 798 | event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; | 860 | event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; |
| 799 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", | 861 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", |
| 800 | (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); | 862 | (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); |
| 801 | xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n", | 863 | xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n", |
| 802 | (unsigned int) event->buffer[0]); | 864 | lower_32_bits(event->buffer)); |
| 803 | xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n", | 865 | xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n", |
| 804 | (unsigned int) event->buffer[1]); | 866 | upper_32_bits(event->buffer)); |
| 805 | xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", | 867 | xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", |
| 806 | (unsigned int) event->transfer_len); | 868 | (unsigned int) event->transfer_len); |
| 807 | xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", | 869 | xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", |
| @@ -823,6 +885,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 823 | break; | 885 | break; |
| 824 | case COMP_STALL: | 886 | case COMP_STALL: |
| 825 | xhci_warn(xhci, "WARN: Stalled endpoint\n"); | 887 | xhci_warn(xhci, "WARN: Stalled endpoint\n"); |
| 888 | ep_ring->state |= EP_HALTED; | ||
| 826 | status = -EPIPE; | 889 | status = -EPIPE; |
| 827 | break; | 890 | break; |
| 828 | case COMP_TRB_ERR: | 891 | case COMP_TRB_ERR: |
| @@ -833,6 +896,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 833 | xhci_warn(xhci, "WARN: transfer error on endpoint\n"); | 896 | xhci_warn(xhci, "WARN: transfer error on endpoint\n"); |
| 834 | status = -EPROTO; | 897 | status = -EPROTO; |
| 835 | break; | 898 | break; |
| 899 | case COMP_BABBLE: | ||
| 900 | xhci_warn(xhci, "WARN: babble error on endpoint\n"); | ||
| 901 | status = -EOVERFLOW; | ||
| 902 | break; | ||
| 836 | case COMP_DB_ERR: | 903 | case COMP_DB_ERR: |
| 837 | xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); | 904 | xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); |
| 838 | status = -ENOSR; | 905 | status = -ENOSR; |
| @@ -874,15 +941,26 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 874 | if (event_trb != ep_ring->dequeue) { | 941 | if (event_trb != ep_ring->dequeue) { |
| 875 | /* The event was for the status stage */ | 942 | /* The event was for the status stage */ |
| 876 | if (event_trb == td->last_trb) { | 943 | if (event_trb == td->last_trb) { |
| 877 | td->urb->actual_length = | 944 | if (td->urb->actual_length != 0) { |
| 878 | td->urb->transfer_buffer_length; | 945 | /* Don't overwrite a previously set error code */ |
| 946 | if (status == -EINPROGRESS || status == 0) | ||
| 947 | /* Did we already see a short data stage? */ | ||
| 948 | status = -EREMOTEIO; | ||
| 949 | } else { | ||
| 950 | td->urb->actual_length = | ||
| 951 | td->urb->transfer_buffer_length; | ||
| 952 | } | ||
| 879 | } else { | 953 | } else { |
| 880 | /* Maybe the event was for the data stage? */ | 954 | /* Maybe the event was for the data stage? */ |
| 881 | if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) | 955 | if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) { |
| 882 | /* We didn't stop on a link TRB in the middle */ | 956 | /* We didn't stop on a link TRB in the middle */ |
| 883 | td->urb->actual_length = | 957 | td->urb->actual_length = |
| 884 | td->urb->transfer_buffer_length - | 958 | td->urb->transfer_buffer_length - |
| 885 | TRB_LEN(event->transfer_len); | 959 | TRB_LEN(event->transfer_len); |
| 960 | xhci_dbg(xhci, "Waiting for status stage event\n"); | ||
| 961 | urb = NULL; | ||
| 962 | goto cleanup; | ||
| 963 | } | ||
| 886 | } | 964 | } |
| 887 | } | 965 | } |
| 888 | } else { | 966 | } else { |
| @@ -929,16 +1007,20 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 929 | TRB_LEN(event->transfer_len)); | 1007 | TRB_LEN(event->transfer_len)); |
| 930 | td->urb->actual_length = 0; | 1008 | td->urb->actual_length = 0; |
| 931 | } | 1009 | } |
| 932 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) | 1010 | /* Don't overwrite a previously set error code */ |
| 933 | status = -EREMOTEIO; | 1011 | if (status == -EINPROGRESS) { |
| 934 | else | 1012 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) |
| 935 | status = 0; | 1013 | status = -EREMOTEIO; |
| 1014 | else | ||
| 1015 | status = 0; | ||
| 1016 | } | ||
| 936 | } else { | 1017 | } else { |
| 937 | td->urb->actual_length = td->urb->transfer_buffer_length; | 1018 | td->urb->actual_length = td->urb->transfer_buffer_length; |
| 938 | /* Ignore a short packet completion if the | 1019 | /* Ignore a short packet completion if the |
| 939 | * untransferred length was zero. | 1020 | * untransferred length was zero. |
| 940 | */ | 1021 | */ |
| 941 | status = 0; | 1022 | if (status == -EREMOTEIO) |
| 1023 | status = 0; | ||
| 942 | } | 1024 | } |
| 943 | } else { | 1025 | } else { |
| 944 | /* Slow path - walk the list, starting from the dequeue | 1026 | /* Slow path - walk the list, starting from the dequeue |
| @@ -965,19 +1047,30 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 965 | TRB_LEN(event->transfer_len); | 1047 | TRB_LEN(event->transfer_len); |
| 966 | } | 1048 | } |
| 967 | } | 1049 | } |
| 968 | /* The Endpoint Stop Command completion will take care of | ||
| 969 | * any stopped TDs. A stopped TD may be restarted, so don't update the | ||
| 970 | * ring dequeue pointer or take this TD off any lists yet. | ||
| 971 | */ | ||
| 972 | if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || | 1050 | if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || |
| 973 | GET_COMP_CODE(event->transfer_len) == COMP_STOP) { | 1051 | GET_COMP_CODE(event->transfer_len) == COMP_STOP) { |
| 1052 | /* The Endpoint Stop Command completion will take care of any | ||
| 1053 | * stopped TDs. A stopped TD may be restarted, so don't update | ||
| 1054 | * the ring dequeue pointer or take this TD off any lists yet. | ||
| 1055 | */ | ||
| 974 | ep_ring->stopped_td = td; | 1056 | ep_ring->stopped_td = td; |
| 975 | ep_ring->stopped_trb = event_trb; | 1057 | ep_ring->stopped_trb = event_trb; |
| 976 | } else { | 1058 | } else { |
| 977 | /* Update ring dequeue pointer */ | 1059 | if (GET_COMP_CODE(event->transfer_len) == COMP_STALL) { |
| 978 | while (ep_ring->dequeue != td->last_trb) | 1060 | /* The transfer is completed from the driver's |
| 1061 | * perspective, but we need to issue a set dequeue | ||
| 1062 | * command for this stalled endpoint to move the dequeue | ||
| 1063 | * pointer past the TD. We can't do that here because | ||
| 1064 | * the halt condition must be cleared first. | ||
| 1065 | */ | ||
| 1066 | ep_ring->stopped_td = td; | ||
| 1067 | ep_ring->stopped_trb = event_trb; | ||
| 1068 | } else { | ||
| 1069 | /* Update ring dequeue pointer */ | ||
| 1070 | while (ep_ring->dequeue != td->last_trb) | ||
| 1071 | inc_deq(xhci, ep_ring, false); | ||
| 979 | inc_deq(xhci, ep_ring, false); | 1072 | inc_deq(xhci, ep_ring, false); |
| 980 | inc_deq(xhci, ep_ring, false); | 1073 | } |
| 981 | 1074 | ||
| 982 | /* Clean up the endpoint's TD list */ | 1075 | /* Clean up the endpoint's TD list */ |
| 983 | urb = td->urb; | 1076 | urb = td->urb; |
| @@ -987,7 +1080,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 987 | list_del(&td->cancelled_td_list); | 1080 | list_del(&td->cancelled_td_list); |
| 988 | ep_ring->cancels_pending--; | 1081 | ep_ring->cancels_pending--; |
| 989 | } | 1082 | } |
| 990 | kfree(td); | 1083 | /* Leave the TD around for the reset endpoint function to use */ |
| 1084 | if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) { | ||
| 1085 | kfree(td); | ||
| 1086 | } | ||
| 991 | urb->hcpriv = NULL; | 1087 | urb->hcpriv = NULL; |
| 992 | } | 1088 | } |
| 993 | cleanup: | 1089 | cleanup: |
| @@ -997,6 +1093,8 @@ cleanup: | |||
| 997 | /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ | 1093 | /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ |
| 998 | if (urb) { | 1094 | if (urb) { |
| 999 | usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); | 1095 | usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); |
| 1096 | xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n", | ||
| 1097 | urb, td->urb->actual_length, status); | ||
| 1000 | spin_unlock(&xhci->lock); | 1098 | spin_unlock(&xhci->lock); |
| 1001 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); | 1099 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); |
| 1002 | spin_lock(&xhci->lock); | 1100 | spin_lock(&xhci->lock); |
| @@ -1014,6 +1112,7 @@ void xhci_handle_event(struct xhci_hcd *xhci) | |||
| 1014 | int update_ptrs = 1; | 1112 | int update_ptrs = 1; |
| 1015 | int ret; | 1113 | int ret; |
| 1016 | 1114 | ||
| 1115 | xhci_dbg(xhci, "In %s\n", __func__); | ||
| 1017 | if (!xhci->event_ring || !xhci->event_ring->dequeue) { | 1116 | if (!xhci->event_ring || !xhci->event_ring->dequeue) { |
| 1018 | xhci->error_bitmask |= 1 << 1; | 1117 | xhci->error_bitmask |= 1 << 1; |
| 1019 | return; | 1118 | return; |
| @@ -1026,18 +1125,25 @@ void xhci_handle_event(struct xhci_hcd *xhci) | |||
| 1026 | xhci->error_bitmask |= 1 << 2; | 1125 | xhci->error_bitmask |= 1 << 2; |
| 1027 | return; | 1126 | return; |
| 1028 | } | 1127 | } |
| 1128 | xhci_dbg(xhci, "%s - OS owns TRB\n", __func__); | ||
| 1029 | 1129 | ||
| 1030 | /* FIXME: Handle more event types. */ | 1130 | /* FIXME: Handle more event types. */ |
| 1031 | switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { | 1131 | switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { |
| 1032 | case TRB_TYPE(TRB_COMPLETION): | 1132 | case TRB_TYPE(TRB_COMPLETION): |
| 1133 | xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__); | ||
| 1033 | handle_cmd_completion(xhci, &event->event_cmd); | 1134 | handle_cmd_completion(xhci, &event->event_cmd); |
| 1135 | xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__); | ||
| 1034 | break; | 1136 | break; |
| 1035 | case TRB_TYPE(TRB_PORT_STATUS): | 1137 | case TRB_TYPE(TRB_PORT_STATUS): |
| 1138 | xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__); | ||
| 1036 | handle_port_status(xhci, event); | 1139 | handle_port_status(xhci, event); |
| 1140 | xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__); | ||
| 1037 | update_ptrs = 0; | 1141 | update_ptrs = 0; |
| 1038 | break; | 1142 | break; |
| 1039 | case TRB_TYPE(TRB_TRANSFER): | 1143 | case TRB_TYPE(TRB_TRANSFER): |
| 1144 | xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__); | ||
| 1040 | ret = handle_tx_event(xhci, &event->trans_event); | 1145 | ret = handle_tx_event(xhci, &event->trans_event); |
| 1146 | xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__); | ||
| 1041 | if (ret < 0) | 1147 | if (ret < 0) |
| 1042 | xhci->error_bitmask |= 1 << 9; | 1148 | xhci->error_bitmask |= 1 << 9; |
| 1043 | else | 1149 | else |
| @@ -1093,13 +1199,13 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
| 1093 | */ | 1199 | */ |
| 1094 | xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); | 1200 | xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); |
| 1095 | return -ENOENT; | 1201 | return -ENOENT; |
| 1096 | case EP_STATE_HALTED: | ||
| 1097 | case EP_STATE_ERROR: | 1202 | case EP_STATE_ERROR: |
| 1098 | xhci_warn(xhci, "WARN waiting for halt or error on ep " | 1203 | xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); |
| 1099 | "to be cleared\n"); | ||
| 1100 | /* FIXME event handling code for error needs to clear it */ | 1204 | /* FIXME event handling code for error needs to clear it */ |
| 1101 | /* XXX not sure if this should be -ENOENT or not */ | 1205 | /* XXX not sure if this should be -ENOENT or not */ |
| 1102 | return -EINVAL; | 1206 | return -EINVAL; |
| 1207 | case EP_STATE_HALTED: | ||
| 1208 | xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); | ||
| 1103 | case EP_STATE_STOPPED: | 1209 | case EP_STATE_STOPPED: |
| 1104 | case EP_STATE_RUNNING: | 1210 | case EP_STATE_RUNNING: |
| 1105 | break; | 1211 | break; |
| @@ -1128,9 +1234,9 @@ static int prepare_transfer(struct xhci_hcd *xhci, | |||
| 1128 | gfp_t mem_flags) | 1234 | gfp_t mem_flags) |
| 1129 | { | 1235 | { |
| 1130 | int ret; | 1236 | int ret; |
| 1131 | 1237 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); | |
| 1132 | ret = prepare_ring(xhci, xdev->ep_rings[ep_index], | 1238 | ret = prepare_ring(xhci, xdev->ep_rings[ep_index], |
| 1133 | xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK, | 1239 | ep_ctx->ep_info & EP_STATE_MASK, |
| 1134 | num_trbs, mem_flags); | 1240 | num_trbs, mem_flags); |
| 1135 | if (ret) | 1241 | if (ret) |
| 1136 | return ret; | 1242 | return ret; |
| @@ -1285,6 +1391,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
| 1285 | /* Queue the first TRB, even if it's zero-length */ | 1391 | /* Queue the first TRB, even if it's zero-length */ |
| 1286 | do { | 1392 | do { |
| 1287 | u32 field = 0; | 1393 | u32 field = 0; |
| 1394 | u32 length_field = 0; | ||
| 1288 | 1395 | ||
| 1289 | /* Don't change the cycle bit of the first TRB until later */ | 1396 | /* Don't change the cycle bit of the first TRB until later */ |
| 1290 | if (first_trb) | 1397 | if (first_trb) |
| @@ -1314,10 +1421,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
| 1314 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), | 1421 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), |
| 1315 | (unsigned int) addr + trb_buff_len); | 1422 | (unsigned int) addr + trb_buff_len); |
| 1316 | } | 1423 | } |
| 1424 | length_field = TRB_LEN(trb_buff_len) | | ||
| 1425 | TD_REMAINDER(urb->transfer_buffer_length - running_total) | | ||
| 1426 | TRB_INTR_TARGET(0); | ||
| 1317 | queue_trb(xhci, ep_ring, false, | 1427 | queue_trb(xhci, ep_ring, false, |
| 1318 | (u32) addr, | 1428 | lower_32_bits(addr), |
| 1319 | (u32) ((u64) addr >> 32), | 1429 | upper_32_bits(addr), |
| 1320 | TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), | 1430 | length_field, |
| 1321 | /* We always want to know if the TRB was short, | 1431 | /* We always want to know if the TRB was short, |
| 1322 | * or we won't get an event when it completes. | 1432 | * or we won't get an event when it completes. |
| 1323 | * (Unless we use event data TRBs, which are a | 1433 | * (Unless we use event data TRBs, which are a |
| @@ -1365,7 +1475,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
| 1365 | struct xhci_generic_trb *start_trb; | 1475 | struct xhci_generic_trb *start_trb; |
| 1366 | bool first_trb; | 1476 | bool first_trb; |
| 1367 | int start_cycle; | 1477 | int start_cycle; |
| 1368 | u32 field; | 1478 | u32 field, length_field; |
| 1369 | 1479 | ||
| 1370 | int running_total, trb_buff_len, ret; | 1480 | int running_total, trb_buff_len, ret; |
| 1371 | u64 addr; | 1481 | u64 addr; |
| @@ -1443,10 +1553,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
| 1443 | td->last_trb = ep_ring->enqueue; | 1553 | td->last_trb = ep_ring->enqueue; |
| 1444 | field |= TRB_IOC; | 1554 | field |= TRB_IOC; |
| 1445 | } | 1555 | } |
| 1556 | length_field = TRB_LEN(trb_buff_len) | | ||
| 1557 | TD_REMAINDER(urb->transfer_buffer_length - running_total) | | ||
| 1558 | TRB_INTR_TARGET(0); | ||
| 1446 | queue_trb(xhci, ep_ring, false, | 1559 | queue_trb(xhci, ep_ring, false, |
| 1447 | (u32) addr, | 1560 | lower_32_bits(addr), |
| 1448 | (u32) ((u64) addr >> 32), | 1561 | upper_32_bits(addr), |
| 1449 | TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), | 1562 | length_field, |
| 1450 | /* We always want to know if the TRB was short, | 1563 | /* We always want to know if the TRB was short, |
| 1451 | * or we won't get an event when it completes. | 1564 | * or we won't get an event when it completes. |
| 1452 | * (Unless we use event data TRBs, which are a | 1565 | * (Unless we use event data TRBs, which are a |
| @@ -1478,7 +1591,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
| 1478 | struct usb_ctrlrequest *setup; | 1591 | struct usb_ctrlrequest *setup; |
| 1479 | struct xhci_generic_trb *start_trb; | 1592 | struct xhci_generic_trb *start_trb; |
| 1480 | int start_cycle; | 1593 | int start_cycle; |
| 1481 | u32 field; | 1594 | u32 field, length_field; |
| 1482 | struct xhci_td *td; | 1595 | struct xhci_td *td; |
| 1483 | 1596 | ||
| 1484 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | 1597 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; |
| @@ -1528,13 +1641,16 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
| 1528 | 1641 | ||
| 1529 | /* If there's data, queue data TRBs */ | 1642 | /* If there's data, queue data TRBs */ |
| 1530 | field = 0; | 1643 | field = 0; |
| 1644 | length_field = TRB_LEN(urb->transfer_buffer_length) | | ||
| 1645 | TD_REMAINDER(urb->transfer_buffer_length) | | ||
| 1646 | TRB_INTR_TARGET(0); | ||
| 1531 | if (urb->transfer_buffer_length > 0) { | 1647 | if (urb->transfer_buffer_length > 0) { |
| 1532 | if (setup->bRequestType & USB_DIR_IN) | 1648 | if (setup->bRequestType & USB_DIR_IN) |
| 1533 | field |= TRB_DIR_IN; | 1649 | field |= TRB_DIR_IN; |
| 1534 | queue_trb(xhci, ep_ring, false, | 1650 | queue_trb(xhci, ep_ring, false, |
| 1535 | lower_32_bits(urb->transfer_dma), | 1651 | lower_32_bits(urb->transfer_dma), |
| 1536 | upper_32_bits(urb->transfer_dma), | 1652 | upper_32_bits(urb->transfer_dma), |
| 1537 | TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0), | 1653 | length_field, |
| 1538 | /* Event on short tx */ | 1654 | /* Event on short tx */ |
| 1539 | field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); | 1655 | field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); |
| 1540 | } | 1656 | } |
| @@ -1603,7 +1719,8 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) | |||
| 1603 | int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 1719 | int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, |
| 1604 | u32 slot_id) | 1720 | u32 slot_id) |
| 1605 | { | 1721 | { |
| 1606 | return queue_command(xhci, in_ctx_ptr, 0, 0, | 1722 | return queue_command(xhci, lower_32_bits(in_ctx_ptr), |
| 1723 | upper_32_bits(in_ctx_ptr), 0, | ||
| 1607 | TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); | 1724 | TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); |
| 1608 | } | 1725 | } |
| 1609 | 1726 | ||
| @@ -1611,7 +1728,8 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | |||
| 1611 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 1728 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, |
| 1612 | u32 slot_id) | 1729 | u32 slot_id) |
| 1613 | { | 1730 | { |
| 1614 | return queue_command(xhci, in_ctx_ptr, 0, 0, | 1731 | return queue_command(xhci, lower_32_bits(in_ctx_ptr), |
| 1732 | upper_32_bits(in_ctx_ptr), 0, | ||
| 1615 | TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); | 1733 | TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); |
| 1616 | } | 1734 | } |
| 1617 | 1735 | ||
| @@ -1639,10 +1757,23 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |||
| 1639 | u32 type = TRB_TYPE(TRB_SET_DEQ); | 1757 | u32 type = TRB_TYPE(TRB_SET_DEQ); |
| 1640 | 1758 | ||
| 1641 | addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); | 1759 | addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); |
| 1642 | if (addr == 0) | 1760 | if (addr == 0) { |
| 1643 | xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); | 1761 | xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); |
| 1644 | xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", | 1762 | xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", |
| 1645 | deq_seg, deq_ptr); | 1763 | deq_seg, deq_ptr); |
| 1646 | return queue_command(xhci, (u32) addr | cycle_state, 0, 0, | 1764 | return 0; |
| 1765 | } | ||
| 1766 | return queue_command(xhci, lower_32_bits(addr) | cycle_state, | ||
| 1767 | upper_32_bits(addr), 0, | ||
| 1647 | trb_slot_id | trb_ep_index | type); | 1768 | trb_slot_id | trb_ep_index | type); |
| 1648 | } | 1769 | } |
| 1770 | |||
| 1771 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, | ||
| 1772 | unsigned int ep_index) | ||
| 1773 | { | ||
| 1774 | u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); | ||
| 1775 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); | ||
| 1776 | u32 type = TRB_TYPE(TRB_RESET_EP); | ||
| 1777 | |||
| 1778 | return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type); | ||
| 1779 | } | ||
