diff options
author | Sarah Sharp <sarah.a.sharp@linux.intel.com> | 2009-07-27 15:03:31 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-07-28 17:31:12 -0400 |
commit | 8e595a5d30a5ee4bb745d4da6439d73ed7d91054 (patch) | |
tree | 0050cb2c24643b602a8b3c40adef3e7b73fe81fc /drivers/usb/host/xhci-mem.c | |
parent | b11069f5f6ce6e359f853e908b0917303fcdec8f (diff) |
USB: xhci: Represent 64-bit addresses with one u64.
There are several xHCI data structures that use two 32-bit fields to
represent a 64-bit address. Since some architectures don't support 64-bit
PCI writes, the fields need to be written in two 32-bit writes. The xHCI
specification says that if a platform is incapable of generating 64-bit
writes, software must write the low 32-bits first, then the high 32-bits.
Hardware that supports 64-bit addressing will wait for the high 32-bit
write before reading the revised value, and hardware that only supports
32-bit writes will ignore the high 32-bit write.
Previous xHCI code represented 64-bit addresses with two u32 values. This
lead to buggy code that would write the 32-bits in the wrong order, or
forget to write the upper 32-bits. Change the two u32s to one u64 and
create a function call to write all 64-bit addresses in the proper order.
This new function could be modified in the future if all platforms support
64-bit writes.
Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci-mem.c')
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 61 |
1 files changed, 24 insertions, 37 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index c8a72de1c508..ec825f16dcee 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -88,7 +88,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, | |||
88 | return; | 88 | return; |
89 | prev->next = next; | 89 | prev->next = next; |
90 | if (link_trbs) { | 90 | if (link_trbs) { |
91 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma; | 91 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma; |
92 | 92 | ||
93 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ | 93 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ |
94 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; | 94 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; |
@@ -200,8 +200,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | |||
200 | return; | 200 | return; |
201 | 201 | ||
202 | dev = xhci->devs[slot_id]; | 202 | dev = xhci->devs[slot_id]; |
203 | xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0; | 203 | xhci->dcbaa->dev_context_ptrs[slot_id] = 0; |
204 | xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; | ||
205 | if (!dev) | 204 | if (!dev) |
206 | return; | 205 | return; |
207 | 206 | ||
@@ -265,13 +264,12 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
265 | * Point to output device context in dcbaa; skip the output control | 264 | * Point to output device context in dcbaa; skip the output control |
266 | * context, which is eight 32 bit fields (or 32 bytes long) | 265 | * context, which is eight 32 bit fields (or 32 bytes long) |
267 | */ | 266 | */ |
268 | xhci->dcbaa->dev_context_ptrs[2*slot_id] = | 267 | xhci->dcbaa->dev_context_ptrs[slot_id] = |
269 | (u32) dev->out_ctx_dma + (32); | 268 | (u32) dev->out_ctx_dma + (32); |
270 | xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", | 269 | xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", |
271 | slot_id, | 270 | slot_id, |
272 | &xhci->dcbaa->dev_context_ptrs[2*slot_id], | 271 | &xhci->dcbaa->dev_context_ptrs[slot_id], |
273 | (unsigned long long)dev->out_ctx_dma); | 272 | (unsigned long long)dev->out_ctx_dma); |
274 | xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; | ||
275 | 273 | ||
276 | return 1; | 274 | return 1; |
277 | fail: | 275 | fail: |
@@ -360,10 +358,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
360 | ep0_ctx->ep_info2 |= MAX_BURST(0); | 358 | ep0_ctx->ep_info2 |= MAX_BURST(0); |
361 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); | 359 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); |
362 | 360 | ||
363 | ep0_ctx->deq[0] = | 361 | ep0_ctx->deq = |
364 | dev->ep_rings[0]->first_seg->dma; | 362 | dev->ep_rings[0]->first_seg->dma; |
365 | ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state; | 363 | ep0_ctx->deq |= dev->ep_rings[0]->cycle_state; |
366 | ep0_ctx->deq[1] = 0; | ||
367 | 364 | ||
368 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ | 365 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ |
369 | 366 | ||
@@ -477,8 +474,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
477 | if (!virt_dev->new_ep_rings[ep_index]) | 474 | if (!virt_dev->new_ep_rings[ep_index]) |
478 | return -ENOMEM; | 475 | return -ENOMEM; |
479 | ep_ring = virt_dev->new_ep_rings[ep_index]; | 476 | ep_ring = virt_dev->new_ep_rings[ep_index]; |
480 | ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state; | 477 | ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; |
481 | ep_ctx->deq[1] = 0; | ||
482 | 478 | ||
483 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); | 479 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); |
484 | 480 | ||
@@ -535,8 +531,7 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci, | |||
535 | 531 | ||
536 | ep_ctx->ep_info = 0; | 532 | ep_ctx->ep_info = 0; |
537 | ep_ctx->ep_info2 = 0; | 533 | ep_ctx->ep_info2 = 0; |
538 | ep_ctx->deq[0] = 0; | 534 | ep_ctx->deq = 0; |
539 | ep_ctx->deq[1] = 0; | ||
540 | ep_ctx->tx_info = 0; | 535 | ep_ctx->tx_info = 0; |
541 | /* Don't free the endpoint ring until the set interface or configuration | 536 | /* Don't free the endpoint ring until the set interface or configuration |
542 | * request succeeds. | 537 | * request succeeds. |
@@ -551,10 +546,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
551 | 546 | ||
552 | /* Free the Event Ring Segment Table and the actual Event Ring */ | 547 | /* Free the Event Ring Segment Table and the actual Event Ring */ |
553 | xhci_writel(xhci, 0, &xhci->ir_set->erst_size); | 548 | xhci_writel(xhci, 0, &xhci->ir_set->erst_size); |
554 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]); | 549 | xhci_write_64(xhci, 0, &xhci->ir_set->erst_base); |
555 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); | 550 | xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue); |
556 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]); | ||
557 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); | ||
558 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); | 551 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); |
559 | if (xhci->erst.entries) | 552 | if (xhci->erst.entries) |
560 | pci_free_consistent(pdev, size, | 553 | pci_free_consistent(pdev, size, |
@@ -566,8 +559,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
566 | xhci->event_ring = NULL; | 559 | xhci->event_ring = NULL; |
567 | xhci_dbg(xhci, "Freed event ring\n"); | 560 | xhci_dbg(xhci, "Freed event ring\n"); |
568 | 561 | ||
569 | xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]); | 562 | xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); |
570 | xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]); | ||
571 | if (xhci->cmd_ring) | 563 | if (xhci->cmd_ring) |
572 | xhci_ring_free(xhci, xhci->cmd_ring); | 564 | xhci_ring_free(xhci, xhci->cmd_ring); |
573 | xhci->cmd_ring = NULL; | 565 | xhci->cmd_ring = NULL; |
@@ -586,8 +578,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
586 | xhci->device_pool = NULL; | 578 | xhci->device_pool = NULL; |
587 | xhci_dbg(xhci, "Freed device context pool\n"); | 579 | xhci_dbg(xhci, "Freed device context pool\n"); |
588 | 580 | ||
589 | xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); | 581 | xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); |
590 | xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]); | ||
591 | if (xhci->dcbaa) | 582 | if (xhci->dcbaa) |
592 | pci_free_consistent(pdev, sizeof(*xhci->dcbaa), | 583 | pci_free_consistent(pdev, sizeof(*xhci->dcbaa), |
593 | xhci->dcbaa, xhci->dcbaa->dma); | 584 | xhci->dcbaa, xhci->dcbaa->dma); |
@@ -602,6 +593,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
602 | dma_addr_t dma; | 593 | dma_addr_t dma; |
603 | struct device *dev = xhci_to_hcd(xhci)->self.controller; | 594 | struct device *dev = xhci_to_hcd(xhci)->self.controller; |
604 | unsigned int val, val2; | 595 | unsigned int val, val2; |
596 | u64 val_64; | ||
605 | struct xhci_segment *seg; | 597 | struct xhci_segment *seg; |
606 | u32 page_size; | 598 | u32 page_size; |
607 | int i; | 599 | int i; |
@@ -647,8 +639,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
647 | xhci->dcbaa->dma = dma; | 639 | xhci->dcbaa->dma = dma; |
648 | xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", | 640 | xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", |
649 | (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); | 641 | (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); |
650 | xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); | 642 | xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); |
651 | xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]); | ||
652 | 643 | ||
653 | /* | 644 | /* |
654 | * Initialize the ring segment pool. The ring must be a contiguous | 645 | * Initialize the ring segment pool. The ring must be a contiguous |
@@ -675,14 +666,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
675 | (unsigned long long)xhci->cmd_ring->first_seg->dma); | 666 | (unsigned long long)xhci->cmd_ring->first_seg->dma); |
676 | 667 | ||
677 | /* Set the address in the Command Ring Control register */ | 668 | /* Set the address in the Command Ring Control register */ |
678 | val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); | 669 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
679 | val = (val & ~CMD_RING_ADDR_MASK) | | 670 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | |
680 | (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) | | 671 | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | |
681 | xhci->cmd_ring->cycle_state; | 672 | xhci->cmd_ring->cycle_state; |
682 | xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val); | 673 | xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); |
683 | xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]); | 674 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); |
684 | xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n"); | ||
685 | xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]); | ||
686 | xhci_dbg_cmd_ptrs(xhci); | 675 | xhci_dbg_cmd_ptrs(xhci); |
687 | 676 | ||
688 | val = xhci_readl(xhci, &xhci->cap_regs->db_off); | 677 | val = xhci_readl(xhci, &xhci->cap_regs->db_off); |
@@ -722,8 +711,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
722 | /* set ring base address and size for each segment table entry */ | 711 | /* set ring base address and size for each segment table entry */ |
723 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { | 712 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { |
724 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; | 713 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; |
725 | entry->seg_addr[0] = seg->dma; | 714 | entry->seg_addr = seg->dma; |
726 | entry->seg_addr[1] = 0; | ||
727 | entry->seg_size = TRBS_PER_SEGMENT; | 715 | entry->seg_size = TRBS_PER_SEGMENT; |
728 | entry->rsvd = 0; | 716 | entry->rsvd = 0; |
729 | seg = seg->next; | 717 | seg = seg->next; |
@@ -741,11 +729,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
741 | /* set the segment table base address */ | 729 | /* set the segment table base address */ |
742 | xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", | 730 | xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", |
743 | (unsigned long long)xhci->erst.erst_dma_addr); | 731 | (unsigned long long)xhci->erst.erst_dma_addr); |
744 | val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]); | 732 | val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); |
745 | val &= ERST_PTR_MASK; | 733 | val_64 &= ERST_PTR_MASK; |
746 | val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK); | 734 | val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); |
747 | xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]); | 735 | xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); |
748 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); | ||
749 | 736 | ||
750 | /* Set the event ring dequeue address */ | 737 | /* Set the event ring dequeue address */ |
751 | xhci_set_hc_event_deq(xhci); | 738 | xhci_set_hc_event_deq(xhci); |