diff options
author | Sarah Sharp <sarah.a.sharp@linux.intel.com> | 2009-07-27 15:03:31 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-07-28 17:31:12 -0400 |
commit | 8e595a5d30a5ee4bb745d4da6439d73ed7d91054 (patch) | |
tree | 0050cb2c24643b602a8b3c40adef3e7b73fe81fc /drivers/usb/host/xhci-ring.c | |
parent | b11069f5f6ce6e359f853e908b0917303fcdec8f (diff) |
USB: xhci: Represent 64-bit addresses with one u64.
There are several xHCI data structures that use two 32-bit fields to
represent a 64-bit address. Since some architectures don't support 64-bit
PCI writes, the fields need to be written in two 32-bit writes. The xHCI
specification says that if a platform is incapable of generating 64-bit
writes, software must write the low 32-bits first, then the high 32-bits.
Hardware that supports 64-bit addressing will wait for the high 32-bit
write before reading the revised value, and hardware that only supports
32-bit writes will ignore the high 32-bit write.
Previous xHCI code represented 64-bit addresses with two u32 values. This
lead to buggy code that would write the 32-bits in the wrong order, or
forget to write the upper 32-bits. Change the two u32s to one u64 and
create a function call to write all 64-bit addresses in the proper order.
This new function could be modified in the future if all platforms support
64-bit writes.
Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 49 |
1 files changed, 23 insertions, 26 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index d672ba14ff80..588686fca471 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -237,7 +237,7 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, | |||
237 | 237 | ||
238 | void xhci_set_hc_event_deq(struct xhci_hcd *xhci) | 238 | void xhci_set_hc_event_deq(struct xhci_hcd *xhci) |
239 | { | 239 | { |
240 | u32 temp; | 240 | u64 temp; |
241 | dma_addr_t deq; | 241 | dma_addr_t deq; |
242 | 242 | ||
243 | deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, | 243 | deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, |
@@ -246,13 +246,12 @@ void xhci_set_hc_event_deq(struct xhci_hcd *xhci) | |||
246 | xhci_warn(xhci, "WARN something wrong with SW event ring " | 246 | xhci_warn(xhci, "WARN something wrong with SW event ring " |
247 | "dequeue ptr.\n"); | 247 | "dequeue ptr.\n"); |
248 | /* Update HC event ring dequeue pointer */ | 248 | /* Update HC event ring dequeue pointer */ |
249 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); | 249 | temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
250 | temp &= ERST_PTR_MASK; | 250 | temp &= ERST_PTR_MASK; |
251 | if (!in_interrupt()) | 251 | if (!in_interrupt()) |
252 | xhci_dbg(xhci, "// Write event ring dequeue pointer\n"); | 252 | xhci_dbg(xhci, "// Write event ring dequeue pointer\n"); |
253 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); | 253 | xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, |
254 | xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp, | 254 | &xhci->ir_set->erst_dequeue); |
255 | &xhci->ir_set->erst_dequeue[0]); | ||
256 | } | 255 | } |
257 | 256 | ||
258 | /* Ring the host controller doorbell after placing a command on the ring */ | 257 | /* Ring the host controller doorbell after placing a command on the ring */ |
@@ -352,7 +351,7 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci, | |||
352 | if (!state->new_deq_seg) | 351 | if (!state->new_deq_seg) |
353 | BUG(); | 352 | BUG(); |
354 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ | 353 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ |
355 | state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0]; | 354 | state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq; |
356 | 355 | ||
357 | state->new_deq_ptr = cur_td->last_trb; | 356 | state->new_deq_ptr = cur_td->last_trb; |
358 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, | 357 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, |
@@ -594,10 +593,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
594 | * cancelling URBs, which might not be an error... | 593 | * cancelling URBs, which might not be an error... |
595 | */ | 594 | */ |
596 | } else { | 595 | } else { |
597 | xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, " | 596 | xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", |
598 | "deq[1] = 0x%x.\n", | 597 | dev->out_ctx->ep[ep_index].deq); |
599 | dev->out_ctx->ep[ep_index].deq[0], | ||
600 | dev->out_ctx->ep[ep_index].deq[1]); | ||
601 | } | 598 | } |
602 | 599 | ||
603 | ep_ring->state &= ~SET_DEQ_PENDING; | 600 | ep_ring->state &= ~SET_DEQ_PENDING; |
@@ -631,7 +628,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
631 | u64 cmd_dma; | 628 | u64 cmd_dma; |
632 | dma_addr_t cmd_dequeue_dma; | 629 | dma_addr_t cmd_dequeue_dma; |
633 | 630 | ||
634 | cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; | 631 | cmd_dma = event->cmd_trb; |
635 | cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, | 632 | cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, |
636 | xhci->cmd_ring->dequeue); | 633 | xhci->cmd_ring->dequeue); |
637 | /* Is the command ring deq ptr out of sync with the deq seg ptr? */ | 634 | /* Is the command ring deq ptr out of sync with the deq seg ptr? */ |
@@ -794,10 +791,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
794 | return -ENODEV; | 791 | return -ENODEV; |
795 | } | 792 | } |
796 | 793 | ||
797 | event_dma = event->buffer[0]; | 794 | event_dma = event->buffer; |
798 | if (event->buffer[1] != 0) | ||
799 | xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n"); | ||
800 | |||
801 | /* This TRB should be in the TD at the head of this ring's TD list */ | 795 | /* This TRB should be in the TD at the head of this ring's TD list */ |
802 | if (list_empty(&ep_ring->td_list)) { | 796 | if (list_empty(&ep_ring->td_list)) { |
803 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", | 797 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", |
@@ -821,10 +815,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
821 | event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; | 815 | event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; |
822 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", | 816 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", |
823 | (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); | 817 | (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); |
824 | xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n", | 818 | xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n", |
825 | (unsigned int) event->buffer[0]); | 819 | lower_32_bits(event->buffer)); |
826 | xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n", | 820 | xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n", |
827 | (unsigned int) event->buffer[1]); | 821 | upper_32_bits(event->buffer)); |
828 | xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", | 822 | xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", |
829 | (unsigned int) event->transfer_len); | 823 | (unsigned int) event->transfer_len); |
830 | xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", | 824 | xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", |
@@ -1343,8 +1337,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1343 | TD_REMAINDER(urb->transfer_buffer_length - running_total) | | 1337 | TD_REMAINDER(urb->transfer_buffer_length - running_total) | |
1344 | TRB_INTR_TARGET(0); | 1338 | TRB_INTR_TARGET(0); |
1345 | queue_trb(xhci, ep_ring, false, | 1339 | queue_trb(xhci, ep_ring, false, |
1346 | (u32) addr, | 1340 | lower_32_bits(addr), |
1347 | (u32) ((u64) addr >> 32), | 1341 | upper_32_bits(addr), |
1348 | length_field, | 1342 | length_field, |
1349 | /* We always want to know if the TRB was short, | 1343 | /* We always want to know if the TRB was short, |
1350 | * or we won't get an event when it completes. | 1344 | * or we won't get an event when it completes. |
@@ -1475,8 +1469,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1475 | TD_REMAINDER(urb->transfer_buffer_length - running_total) | | 1469 | TD_REMAINDER(urb->transfer_buffer_length - running_total) | |
1476 | TRB_INTR_TARGET(0); | 1470 | TRB_INTR_TARGET(0); |
1477 | queue_trb(xhci, ep_ring, false, | 1471 | queue_trb(xhci, ep_ring, false, |
1478 | (u32) addr, | 1472 | lower_32_bits(addr), |
1479 | (u32) ((u64) addr >> 32), | 1473 | upper_32_bits(addr), |
1480 | length_field, | 1474 | length_field, |
1481 | /* We always want to know if the TRB was short, | 1475 | /* We always want to know if the TRB was short, |
1482 | * or we won't get an event when it completes. | 1476 | * or we won't get an event when it completes. |
@@ -1637,7 +1631,8 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) | |||
1637 | int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 1631 | int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, |
1638 | u32 slot_id) | 1632 | u32 slot_id) |
1639 | { | 1633 | { |
1640 | return queue_command(xhci, in_ctx_ptr, 0, 0, | 1634 | return queue_command(xhci, lower_32_bits(in_ctx_ptr), |
1635 | upper_32_bits(in_ctx_ptr), 0, | ||
1641 | TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); | 1636 | TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); |
1642 | } | 1637 | } |
1643 | 1638 | ||
@@ -1645,7 +1640,8 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | |||
1645 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 1640 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, |
1646 | u32 slot_id) | 1641 | u32 slot_id) |
1647 | { | 1642 | { |
1648 | return queue_command(xhci, in_ctx_ptr, 0, 0, | 1643 | return queue_command(xhci, lower_32_bits(in_ctx_ptr), |
1644 | upper_32_bits(in_ctx_ptr), 0, | ||
1649 | TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); | 1645 | TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); |
1650 | } | 1646 | } |
1651 | 1647 | ||
@@ -1677,7 +1673,8 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |||
1677 | xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); | 1673 | xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); |
1678 | xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", | 1674 | xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", |
1679 | deq_seg, deq_ptr); | 1675 | deq_seg, deq_ptr); |
1680 | return queue_command(xhci, (u32) addr | cycle_state, 0, 0, | 1676 | return queue_command(xhci, lower_32_bits(addr) | cycle_state, |
1677 | upper_32_bits(addr), 0, | ||
1681 | trb_slot_id | trb_ep_index | type); | 1678 | trb_slot_id | trb_ep_index | type); |
1682 | } | 1679 | } |
1683 | 1680 | ||