aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2009-04-29 22:04:32 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-06-16 00:44:50 -0400
commit3841d56ebb9730c786a59bf3207529c35214df26 (patch)
tree6f9bd9e5eb0d291217240e1a38af0f1afadaa836 /drivers/usb
parent045f123d9c83b9a18c9d43a9afbf52bf0799640d (diff)
USB: xhci: Fix register write order.
The 0.95 xHCI spec says that if the xHCI HW support 64-bit addressing, you must write the whole 64-bit address as one atomic operation, or write the low 32 bits, and then the high 32 bits. I had the register writes swapped in some places. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/host/xhci-hcd.c4
-rw-r--r--drivers/usb/host/xhci-mem.c22
2 files changed, 13 insertions, 13 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 9ffa1fa507c..13188077387 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -416,11 +416,11 @@ int xhci_run(struct usb_hcd *hcd)
416 xhci_dbg(xhci, "Event ring:\n"); 416 xhci_dbg(xhci, "Event ring:\n");
417 xhci_debug_ring(xhci, xhci->event_ring); 417 xhci_debug_ring(xhci, xhci->event_ring);
418 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 418 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
419 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]);
420 xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp);
421 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); 419 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
422 temp &= ERST_PTR_MASK; 420 temp &= ERST_PTR_MASK;
423 xhci_dbg(xhci, "ERST deq = 0x%x\n", temp); 421 xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
422 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]);
423 xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp);
424 424
425 temp = xhci_readl(xhci, &xhci->op_regs->command); 425 temp = xhci_readl(xhci, &xhci->op_regs->command);
426 temp |= (CMD_RUN); 426 temp |= (CMD_RUN);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 6b75ca9180e..6523e399fe7 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -475,8 +475,8 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
475 if (!virt_dev->new_ep_rings[ep_index]) 475 if (!virt_dev->new_ep_rings[ep_index])
476 return -ENOMEM; 476 return -ENOMEM;
477 ep_ring = virt_dev->new_ep_rings[ep_index]; 477 ep_ring = virt_dev->new_ep_rings[ep_index];
478 ep_ctx->deq[1] = 0;
479 ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state; 478 ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state;
479 ep_ctx->deq[1] = 0;
480 480
481 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); 481 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
482 482
@@ -533,8 +533,8 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
533 533
534 ep_ctx->ep_info = 0; 534 ep_ctx->ep_info = 0;
535 ep_ctx->ep_info2 = 0; 535 ep_ctx->ep_info2 = 0;
536 ep_ctx->deq[1] = 0;
537 ep_ctx->deq[0] = 0; 536 ep_ctx->deq[0] = 0;
537 ep_ctx->deq[1] = 0;
538 ep_ctx->tx_info = 0; 538 ep_ctx->tx_info = 0;
539 /* Don't free the endpoint ring until the set interface or configuration 539 /* Don't free the endpoint ring until the set interface or configuration
540 * request succeeds. 540 * request succeeds.
@@ -549,10 +549,10 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
549 549
550 /* Free the Event Ring Segment Table and the actual Event Ring */ 550 /* Free the Event Ring Segment Table and the actual Event Ring */
551 xhci_writel(xhci, 0, &xhci->ir_set->erst_size); 551 xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
552 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
553 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]); 552 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]);
554 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); 553 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
555 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]); 554 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
555 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
556 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 556 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
557 if (xhci->erst.entries) 557 if (xhci->erst.entries)
558 pci_free_consistent(pdev, size, 558 pci_free_consistent(pdev, size,
@@ -564,8 +564,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
564 xhci->event_ring = NULL; 564 xhci->event_ring = NULL;
565 xhci_dbg(xhci, "Freed event ring\n"); 565 xhci_dbg(xhci, "Freed event ring\n");
566 566
567 xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
568 xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]); 567 xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]);
568 xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
569 if (xhci->cmd_ring) 569 if (xhci->cmd_ring)
570 xhci_ring_free(xhci, xhci->cmd_ring); 570 xhci_ring_free(xhci, xhci->cmd_ring);
571 xhci->cmd_ring = NULL; 571 xhci->cmd_ring = NULL;
@@ -584,8 +584,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
584 xhci->device_pool = NULL; 584 xhci->device_pool = NULL;
585 xhci_dbg(xhci, "Freed device context pool\n"); 585 xhci_dbg(xhci, "Freed device context pool\n");
586 586
587 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
588 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); 587 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]);
588 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
589 if (xhci->dcbaa) 589 if (xhci->dcbaa)
590 pci_free_consistent(pdev, sizeof(*xhci->dcbaa), 590 pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
591 xhci->dcbaa, xhci->dcbaa->dma); 591 xhci->dcbaa, xhci->dcbaa->dma);
@@ -645,8 +645,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
645 xhci->dcbaa->dma = dma; 645 xhci->dcbaa->dma = dma;
646 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", 646 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
647 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); 647 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
648 xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
649 xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); 648 xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]);
649 xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
650 650
651 /* 651 /*
652 * Initialize the ring segment pool. The ring must be a contiguous 652 * Initialize the ring segment pool. The ring must be a contiguous
@@ -677,10 +677,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
677 val = (val & ~CMD_RING_ADDR_MASK) | 677 val = (val & ~CMD_RING_ADDR_MASK) |
678 (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) | 678 (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) |
679 xhci->cmd_ring->cycle_state; 679 xhci->cmd_ring->cycle_state;
680 xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
681 xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
682 xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val); 680 xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val);
683 xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]); 681 xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]);
682 xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
683 xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
684 xhci_dbg_cmd_ptrs(xhci); 684 xhci_dbg_cmd_ptrs(xhci);
685 685
686 val = xhci_readl(xhci, &xhci->cap_regs->db_off); 686 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
@@ -720,8 +720,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
720 /* set ring base address and size for each segment table entry */ 720 /* set ring base address and size for each segment table entry */
721 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { 721 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
722 struct xhci_erst_entry *entry = &xhci->erst.entries[val]; 722 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
723 entry->seg_addr[1] = 0;
724 entry->seg_addr[0] = seg->dma; 723 entry->seg_addr[0] = seg->dma;
724 entry->seg_addr[1] = 0;
725 entry->seg_size = TRBS_PER_SEGMENT; 725 entry->seg_size = TRBS_PER_SEGMENT;
726 entry->rsvd = 0; 726 entry->rsvd = 0;
727 seg = seg->next; 727 seg = seg->next;
@@ -739,11 +739,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
739 /* set the segment table base address */ 739 /* set the segment table base address */
740 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", 740 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
741 (unsigned long long)xhci->erst.erst_dma_addr); 741 (unsigned long long)xhci->erst.erst_dma_addr);
742 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
743 val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]); 742 val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]);
744 val &= ERST_PTR_MASK; 743 val &= ERST_PTR_MASK;
745 val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK); 744 val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK);
746 xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]); 745 xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]);
746 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
747 747
748 /* Set the event ring dequeue address */ 748 /* Set the event ring dequeue address */
749 set_hc_event_deq(xhci); 749 set_hc_event_deq(xhci);