aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2009-07-27 15:03:31 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-07-28 17:31:12 -0400
commit8e595a5d30a5ee4bb745d4da6439d73ed7d91054 (patch)
tree0050cb2c24643b602a8b3c40adef3e7b73fe81fc /drivers/usb
parentb11069f5f6ce6e359f853e908b0917303fcdec8f (diff)
USB: xhci: Represent 64-bit addresses with one u64.
There are several xHCI data structures that use two 32-bit fields to represent a 64-bit address. Since some architectures don't support 64-bit PCI writes, the fields need to be written in two 32-bit writes. The xHCI specification says that if a platform is incapable of generating 64-bit writes, software must write the low 32-bits first, then the high 32-bits. Hardware that supports 64-bit addressing will wait for the high 32-bit write before reading the revised value, and hardware that only supports 32-bit writes will ignore the high 32-bit write. Previous xHCI code represented 64-bit addresses with two u32 values. This lead to buggy code that would write the 32-bits in the wrong order, or forget to write the upper 32-bits. Change the two u32s to one u64 and create a function call to write all 64-bit addresses in the proper order. This new function could be modified in the future if all platforms support 64-bit writes. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/host/xhci-dbg.c67
-rw-r--r--drivers/usb/host/xhci-hcd.c43
-rw-r--r--drivers/usb/host/xhci-mem.c61
-rw-r--r--drivers/usb/host/xhci-ring.c49
-rw-r--r--drivers/usb/host/xhci.h65
5 files changed, 137 insertions, 148 deletions
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 56032f2d84e8..6d62e4abe3c6 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -173,6 +173,7 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int
173{ 173{
174 void *addr; 174 void *addr;
175 u32 temp; 175 u32 temp;
176 u64 temp_64;
176 177
177 addr = &ir_set->irq_pending; 178 addr = &ir_set->irq_pending;
178 temp = xhci_readl(xhci, addr); 179 temp = xhci_readl(xhci, addr);
@@ -200,25 +201,15 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int
200 xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n", 201 xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n",
201 addr, (unsigned int)temp); 202 addr, (unsigned int)temp);
202 203
203 addr = &ir_set->erst_base[0]; 204 addr = &ir_set->erst_base;
204 temp = xhci_readl(xhci, addr); 205 temp_64 = xhci_read_64(xhci, addr);
205 xhci_dbg(xhci, " %p: ir_set.erst_base[0] = 0x%x\n", 206 xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n",
206 addr, (unsigned int) temp); 207 addr, temp_64);
207
208 addr = &ir_set->erst_base[1];
209 temp = xhci_readl(xhci, addr);
210 xhci_dbg(xhci, " %p: ir_set.erst_base[1] = 0x%x\n",
211 addr, (unsigned int) temp);
212
213 addr = &ir_set->erst_dequeue[0];
214 temp = xhci_readl(xhci, addr);
215 xhci_dbg(xhci, " %p: ir_set.erst_dequeue[0] = 0x%x\n",
216 addr, (unsigned int) temp);
217 208
218 addr = &ir_set->erst_dequeue[1]; 209 addr = &ir_set->erst_dequeue;
219 temp = xhci_readl(xhci, addr); 210 temp_64 = xhci_read_64(xhci, addr);
220 xhci_dbg(xhci, " %p: ir_set.erst_dequeue[1] = 0x%x\n", 211 xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n",
221 addr, (unsigned int) temp); 212 addr, temp_64);
222} 213}
223 214
224void xhci_print_run_regs(struct xhci_hcd *xhci) 215void xhci_print_run_regs(struct xhci_hcd *xhci)
@@ -268,8 +259,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
268 xhci_dbg(xhci, "Link TRB:\n"); 259 xhci_dbg(xhci, "Link TRB:\n");
269 xhci_print_trb_offsets(xhci, trb); 260 xhci_print_trb_offsets(xhci, trb);
270 261
271 address = trb->link.segment_ptr[0] + 262 address = trb->link.segment_ptr;
272 (((u64) trb->link.segment_ptr[1]) << 32);
273 xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); 263 xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
274 264
275 xhci_dbg(xhci, "Interrupter target = 0x%x\n", 265 xhci_dbg(xhci, "Interrupter target = 0x%x\n",
@@ -282,8 +272,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
282 (unsigned int) (trb->link.control & TRB_NO_SNOOP)); 272 (unsigned int) (trb->link.control & TRB_NO_SNOOP));
283 break; 273 break;
284 case TRB_TYPE(TRB_TRANSFER): 274 case TRB_TYPE(TRB_TRANSFER):
285 address = trb->trans_event.buffer[0] + 275 address = trb->trans_event.buffer;
286 (((u64) trb->trans_event.buffer[1]) << 32);
287 /* 276 /*
288 * FIXME: look at flags to figure out if it's an address or if 277 * FIXME: look at flags to figure out if it's an address or if
289 * the data is directly in the buffer field. 278 * the data is directly in the buffer field.
@@ -291,8 +280,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
291 xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); 280 xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
292 break; 281 break;
293 case TRB_TYPE(TRB_COMPLETION): 282 case TRB_TYPE(TRB_COMPLETION):
294 address = trb->event_cmd.cmd_trb[0] + 283 address = trb->event_cmd.cmd_trb;
295 (((u64) trb->event_cmd.cmd_trb[1]) << 32);
296 xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); 284 xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
297 xhci_dbg(xhci, "Completion status = %u\n", 285 xhci_dbg(xhci, "Completion status = %u\n",
298 (unsigned int) GET_COMP_CODE(trb->event_cmd.status)); 286 (unsigned int) GET_COMP_CODE(trb->event_cmd.status));
@@ -328,8 +316,8 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
328 for (i = 0; i < TRBS_PER_SEGMENT; ++i) { 316 for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
329 trb = &seg->trbs[i]; 317 trb = &seg->trbs[i];
330 xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr, 318 xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
331 (unsigned int) trb->link.segment_ptr[0], 319 lower_32_bits(trb->link.segment_ptr),
332 (unsigned int) trb->link.segment_ptr[1], 320 upper_32_bits(trb->link.segment_ptr),
333 (unsigned int) trb->link.intr_target, 321 (unsigned int) trb->link.intr_target,
334 (unsigned int) trb->link.control); 322 (unsigned int) trb->link.control);
335 addr += sizeof(*trb); 323 addr += sizeof(*trb);
@@ -386,8 +374,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
386 entry = &erst->entries[i]; 374 entry = &erst->entries[i];
387 xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", 375 xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
388 (unsigned int) addr, 376 (unsigned int) addr,
389 (unsigned int) entry->seg_addr[0], 377 lower_32_bits(entry->seg_addr),
390 (unsigned int) entry->seg_addr[1], 378 upper_32_bits(entry->seg_addr),
391 (unsigned int) entry->seg_size, 379 (unsigned int) entry->seg_size,
392 (unsigned int) entry->rsvd); 380 (unsigned int) entry->rsvd);
393 addr += sizeof(*entry); 381 addr += sizeof(*entry);
@@ -396,12 +384,13 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
396 384
397void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) 385void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
398{ 386{
399 u32 val; 387 u64 val;
400 388
401 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); 389 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
402 xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val); 390 xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
403 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]); 391 lower_32_bits(val));
404 xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val); 392 xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
393 upper_32_bits(val));
405} 394}
406 395
407void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep) 396void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep)
@@ -462,14 +451,10 @@ void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_ad
462 &ctx->ep[i].ep_info2, 451 &ctx->ep[i].ep_info2,
463 (unsigned long long)dma, ctx->ep[i].ep_info2); 452 (unsigned long long)dma, ctx->ep[i].ep_info2);
464 dma += field_size; 453 dma += field_size;
465 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n", 454 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
466 &ctx->ep[i].deq[0], 455 &ctx->ep[i].deq,
467 (unsigned long long)dma, ctx->ep[i].deq[0]); 456 (unsigned long long)dma, ctx->ep[i].deq);
468 dma += field_size; 457 dma += 2*field_size;
469 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n",
470 &ctx->ep[i].deq[1],
471 (unsigned long long)dma, ctx->ep[i].deq[1]);
472 dma += field_size;
473 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n", 458 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
474 &ctx->ep[i].tx_info, 459 &ctx->ep[i].tx_info,
475 (unsigned long long)dma, ctx->ep[i].tx_info); 460 (unsigned long long)dma, ctx->ep[i].tx_info);
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index ff99365cae42..e15773598e4e 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -226,6 +226,7 @@ int xhci_init(struct usb_hcd *hcd)
226static void xhci_work(struct xhci_hcd *xhci) 226static void xhci_work(struct xhci_hcd *xhci)
227{ 227{
228 u32 temp; 228 u32 temp;
229 u64 temp_64;
229 230
230 /* 231 /*
231 * Clear the op reg interrupt status first, 232 * Clear the op reg interrupt status first,
@@ -249,8 +250,8 @@ static void xhci_work(struct xhci_hcd *xhci)
249 xhci_handle_event(xhci); 250 xhci_handle_event(xhci);
250 251
251 /* Clear the event handler busy flag; the event ring should be empty. */ 252 /* Clear the event handler busy flag; the event ring should be empty. */
252 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); 253 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
253 xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]); 254 xhci_write_64(xhci, temp_64 & ~ERST_EHB, &xhci->ir_set->erst_dequeue);
254 /* Flush posted writes -- FIXME is this necessary? */ 255 /* Flush posted writes -- FIXME is this necessary? */
255 xhci_readl(xhci, &xhci->ir_set->irq_pending); 256 xhci_readl(xhci, &xhci->ir_set->irq_pending);
256} 257}
@@ -295,6 +296,7 @@ void xhci_event_ring_work(unsigned long arg)
295{ 296{
296 unsigned long flags; 297 unsigned long flags;
297 int temp; 298 int temp;
299 u64 temp_64;
298 struct xhci_hcd *xhci = (struct xhci_hcd *) arg; 300 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
299 int i, j; 301 int i, j;
300 302
@@ -311,9 +313,9 @@ void xhci_event_ring_work(unsigned long arg)
311 xhci_dbg(xhci, "Event ring:\n"); 313 xhci_dbg(xhci, "Event ring:\n");
312 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); 314 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
313 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 315 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
314 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); 316 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
315 temp &= ERST_PTR_MASK; 317 temp_64 &= ~ERST_PTR_MASK;
316 xhci_dbg(xhci, "ERST deq = 0x%x\n", temp); 318 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
317 xhci_dbg(xhci, "Command ring:\n"); 319 xhci_dbg(xhci, "Command ring:\n");
318 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); 320 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
319 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 321 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
@@ -356,6 +358,7 @@ void xhci_event_ring_work(unsigned long arg)
356int xhci_run(struct usb_hcd *hcd) 358int xhci_run(struct usb_hcd *hcd)
357{ 359{
358 u32 temp; 360 u32 temp;
361 u64 temp_64;
359 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 362 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
360 void (*doorbell)(struct xhci_hcd *) = NULL; 363 void (*doorbell)(struct xhci_hcd *) = NULL;
361 364
@@ -416,11 +419,9 @@ int xhci_run(struct usb_hcd *hcd)
416 xhci_dbg(xhci, "Event ring:\n"); 419 xhci_dbg(xhci, "Event ring:\n");
417 xhci_debug_ring(xhci, xhci->event_ring); 420 xhci_debug_ring(xhci, xhci->event_ring);
418 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 421 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
419 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); 422 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
420 temp &= ERST_PTR_MASK; 423 temp_64 &= ~ERST_PTR_MASK;
421 xhci_dbg(xhci, "ERST deq = 0x%x\n", temp); 424 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
422 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]);
423 xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp);
424 425
425 temp = xhci_readl(xhci, &xhci->op_regs->command); 426 temp = xhci_readl(xhci, &xhci->op_regs->command);
426 temp |= (CMD_RUN); 427 temp |= (CMD_RUN);
@@ -888,8 +889,7 @@ static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
888 ep_ctx = &virt_dev->in_ctx->ep[i]; 889 ep_ctx = &virt_dev->in_ctx->ep[i];
889 ep_ctx->ep_info = 0; 890 ep_ctx->ep_info = 0;
890 ep_ctx->ep_info2 = 0; 891 ep_ctx->ep_info2 = 0;
891 ep_ctx->deq[0] = 0; 892 ep_ctx->deq = 0;
892 ep_ctx->deq[1] = 0;
893 ep_ctx->tx_info = 0; 893 ep_ctx->tx_info = 0;
894 } 894 }
895} 895}
@@ -1165,7 +1165,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1165 struct xhci_virt_device *virt_dev; 1165 struct xhci_virt_device *virt_dev;
1166 int ret = 0; 1166 int ret = 0;
1167 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1167 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1168 u32 temp; 1168 u64 temp_64;
1169 1169
1170 if (!udev->slot_id) { 1170 if (!udev->slot_id) {
1171 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); 1171 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
@@ -1227,18 +1227,13 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1227 if (ret) { 1227 if (ret) {
1228 return ret; 1228 return ret;
1229 } 1229 }
1230 temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]); 1230 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
1231 xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp); 1231 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
1232 temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]); 1232 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
1233 xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp);
1234 xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n",
1235 udev->slot_id,
1236 &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id],
1237 xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]);
1238 xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n",
1239 udev->slot_id, 1233 udev->slot_id,
1240 &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1], 1234 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
1241 xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]); 1235 (unsigned long long)
1236 xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
1242 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", 1237 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
1243 (unsigned long long)virt_dev->out_ctx_dma); 1238 (unsigned long long)virt_dev->out_ctx_dma);
1244 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 1239 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index c8a72de1c508..ec825f16dcee 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -88,7 +88,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
88 return; 88 return;
89 prev->next = next; 89 prev->next = next;
90 if (link_trbs) { 90 if (link_trbs) {
91 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma; 91 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma;
92 92
93 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ 93 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
94 val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; 94 val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
@@ -200,8 +200,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
200 return; 200 return;
201 201
202 dev = xhci->devs[slot_id]; 202 dev = xhci->devs[slot_id];
203 xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0; 203 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
204 xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
205 if (!dev) 204 if (!dev)
206 return; 205 return;
207 206
@@ -265,13 +264,12 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
265 * Point to output device context in dcbaa; skip the output control 264 * Point to output device context in dcbaa; skip the output control
266 * context, which is eight 32 bit fields (or 32 bytes long) 265 * context, which is eight 32 bit fields (or 32 bytes long)
267 */ 266 */
268 xhci->dcbaa->dev_context_ptrs[2*slot_id] = 267 xhci->dcbaa->dev_context_ptrs[slot_id] =
269 (u32) dev->out_ctx_dma + (32); 268 (u32) dev->out_ctx_dma + (32);
270 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", 269 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
271 slot_id, 270 slot_id,
272 &xhci->dcbaa->dev_context_ptrs[2*slot_id], 271 &xhci->dcbaa->dev_context_ptrs[slot_id],
273 (unsigned long long)dev->out_ctx_dma); 272 (unsigned long long)dev->out_ctx_dma);
274 xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
275 273
276 return 1; 274 return 1;
277fail: 275fail:
@@ -360,10 +358,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
360 ep0_ctx->ep_info2 |= MAX_BURST(0); 358 ep0_ctx->ep_info2 |= MAX_BURST(0);
361 ep0_ctx->ep_info2 |= ERROR_COUNT(3); 359 ep0_ctx->ep_info2 |= ERROR_COUNT(3);
362 360
363 ep0_ctx->deq[0] = 361 ep0_ctx->deq =
364 dev->ep_rings[0]->first_seg->dma; 362 dev->ep_rings[0]->first_seg->dma;
365 ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state; 363 ep0_ctx->deq |= dev->ep_rings[0]->cycle_state;
366 ep0_ctx->deq[1] = 0;
367 364
368 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ 365 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
369 366
@@ -477,8 +474,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
477 if (!virt_dev->new_ep_rings[ep_index]) 474 if (!virt_dev->new_ep_rings[ep_index])
478 return -ENOMEM; 475 return -ENOMEM;
479 ep_ring = virt_dev->new_ep_rings[ep_index]; 476 ep_ring = virt_dev->new_ep_rings[ep_index];
480 ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state; 477 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
481 ep_ctx->deq[1] = 0;
482 478
483 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); 479 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
484 480
@@ -535,8 +531,7 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
535 531
536 ep_ctx->ep_info = 0; 532 ep_ctx->ep_info = 0;
537 ep_ctx->ep_info2 = 0; 533 ep_ctx->ep_info2 = 0;
538 ep_ctx->deq[0] = 0; 534 ep_ctx->deq = 0;
539 ep_ctx->deq[1] = 0;
540 ep_ctx->tx_info = 0; 535 ep_ctx->tx_info = 0;
541 /* Don't free the endpoint ring until the set interface or configuration 536 /* Don't free the endpoint ring until the set interface or configuration
542 * request succeeds. 537 * request succeeds.
@@ -551,10 +546,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
551 546
552 /* Free the Event Ring Segment Table and the actual Event Ring */ 547 /* Free the Event Ring Segment Table and the actual Event Ring */
553 xhci_writel(xhci, 0, &xhci->ir_set->erst_size); 548 xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
554 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]); 549 xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
555 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); 550 xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
556 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
557 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
558 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 551 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
559 if (xhci->erst.entries) 552 if (xhci->erst.entries)
560 pci_free_consistent(pdev, size, 553 pci_free_consistent(pdev, size,
@@ -566,8 +559,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
566 xhci->event_ring = NULL; 559 xhci->event_ring = NULL;
567 xhci_dbg(xhci, "Freed event ring\n"); 560 xhci_dbg(xhci, "Freed event ring\n");
568 561
569 xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]); 562 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
570 xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
571 if (xhci->cmd_ring) 563 if (xhci->cmd_ring)
572 xhci_ring_free(xhci, xhci->cmd_ring); 564 xhci_ring_free(xhci, xhci->cmd_ring);
573 xhci->cmd_ring = NULL; 565 xhci->cmd_ring = NULL;
@@ -586,8 +578,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
586 xhci->device_pool = NULL; 578 xhci->device_pool = NULL;
587 xhci_dbg(xhci, "Freed device context pool\n"); 579 xhci_dbg(xhci, "Freed device context pool\n");
588 580
589 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); 581 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
590 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
591 if (xhci->dcbaa) 582 if (xhci->dcbaa)
592 pci_free_consistent(pdev, sizeof(*xhci->dcbaa), 583 pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
593 xhci->dcbaa, xhci->dcbaa->dma); 584 xhci->dcbaa, xhci->dcbaa->dma);
@@ -602,6 +593,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
602 dma_addr_t dma; 593 dma_addr_t dma;
603 struct device *dev = xhci_to_hcd(xhci)->self.controller; 594 struct device *dev = xhci_to_hcd(xhci)->self.controller;
604 unsigned int val, val2; 595 unsigned int val, val2;
596 u64 val_64;
605 struct xhci_segment *seg; 597 struct xhci_segment *seg;
606 u32 page_size; 598 u32 page_size;
607 int i; 599 int i;
@@ -647,8 +639,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
647 xhci->dcbaa->dma = dma; 639 xhci->dcbaa->dma = dma;
648 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", 640 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
649 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); 641 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
650 xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); 642 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
651 xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
652 643
653 /* 644 /*
654 * Initialize the ring segment pool. The ring must be a contiguous 645 * Initialize the ring segment pool. The ring must be a contiguous
@@ -675,14 +666,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
675 (unsigned long long)xhci->cmd_ring->first_seg->dma); 666 (unsigned long long)xhci->cmd_ring->first_seg->dma);
676 667
677 /* Set the address in the Command Ring Control register */ 668 /* Set the address in the Command Ring Control register */
678 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); 669 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
679 val = (val & ~CMD_RING_ADDR_MASK) | 670 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
680 (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) | 671 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
681 xhci->cmd_ring->cycle_state; 672 xhci->cmd_ring->cycle_state;
682 xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val); 673 xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
683 xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]); 674 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
684 xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
685 xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
686 xhci_dbg_cmd_ptrs(xhci); 675 xhci_dbg_cmd_ptrs(xhci);
687 676
688 val = xhci_readl(xhci, &xhci->cap_regs->db_off); 677 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
@@ -722,8 +711,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
722 /* set ring base address and size for each segment table entry */ 711 /* set ring base address and size for each segment table entry */
723 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { 712 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
724 struct xhci_erst_entry *entry = &xhci->erst.entries[val]; 713 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
725 entry->seg_addr[0] = seg->dma; 714 entry->seg_addr = seg->dma;
726 entry->seg_addr[1] = 0;
727 entry->seg_size = TRBS_PER_SEGMENT; 715 entry->seg_size = TRBS_PER_SEGMENT;
728 entry->rsvd = 0; 716 entry->rsvd = 0;
729 seg = seg->next; 717 seg = seg->next;
@@ -741,11 +729,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
741 /* set the segment table base address */ 729 /* set the segment table base address */
742 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", 730 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
743 (unsigned long long)xhci->erst.erst_dma_addr); 731 (unsigned long long)xhci->erst.erst_dma_addr);
744 val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]); 732 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
745 val &= ERST_PTR_MASK; 733 val_64 &= ERST_PTR_MASK;
746 val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK); 734 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
747 xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]); 735 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
748 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
749 736
750 /* Set the event ring dequeue address */ 737 /* Set the event ring dequeue address */
751 xhci_set_hc_event_deq(xhci); 738 xhci_set_hc_event_deq(xhci);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index d672ba14ff80..588686fca471 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -237,7 +237,7 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
237 237
238void xhci_set_hc_event_deq(struct xhci_hcd *xhci) 238void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
239{ 239{
240 u32 temp; 240 u64 temp;
241 dma_addr_t deq; 241 dma_addr_t deq;
242 242
243 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, 243 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
@@ -246,13 +246,12 @@ void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
246 xhci_warn(xhci, "WARN something wrong with SW event ring " 246 xhci_warn(xhci, "WARN something wrong with SW event ring "
247 "dequeue ptr.\n"); 247 "dequeue ptr.\n");
248 /* Update HC event ring dequeue pointer */ 248 /* Update HC event ring dequeue pointer */
249 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); 249 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
250 temp &= ERST_PTR_MASK; 250 temp &= ERST_PTR_MASK;
251 if (!in_interrupt()) 251 if (!in_interrupt())
252 xhci_dbg(xhci, "// Write event ring dequeue pointer\n"); 252 xhci_dbg(xhci, "// Write event ring dequeue pointer\n");
253 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); 253 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
254 xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp, 254 &xhci->ir_set->erst_dequeue);
255 &xhci->ir_set->erst_dequeue[0]);
256} 255}
257 256
258/* Ring the host controller doorbell after placing a command on the ring */ 257/* Ring the host controller doorbell after placing a command on the ring */
@@ -352,7 +351,7 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci,
352 if (!state->new_deq_seg) 351 if (!state->new_deq_seg)
353 BUG(); 352 BUG();
354 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 353 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
355 state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0]; 354 state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq;
356 355
357 state->new_deq_ptr = cur_td->last_trb; 356 state->new_deq_ptr = cur_td->last_trb;
358 state->new_deq_seg = find_trb_seg(state->new_deq_seg, 357 state->new_deq_seg = find_trb_seg(state->new_deq_seg,
@@ -594,10 +593,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
594 * cancelling URBs, which might not be an error... 593 * cancelling URBs, which might not be an error...
595 */ 594 */
596 } else { 595 } else {
597 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, " 596 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
598 "deq[1] = 0x%x.\n", 597 dev->out_ctx->ep[ep_index].deq);
599 dev->out_ctx->ep[ep_index].deq[0],
600 dev->out_ctx->ep[ep_index].deq[1]);
601 } 598 }
602 599
603 ep_ring->state &= ~SET_DEQ_PENDING; 600 ep_ring->state &= ~SET_DEQ_PENDING;
@@ -631,7 +628,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
631 u64 cmd_dma; 628 u64 cmd_dma;
632 dma_addr_t cmd_dequeue_dma; 629 dma_addr_t cmd_dequeue_dma;
633 630
634 cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; 631 cmd_dma = event->cmd_trb;
635 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 632 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
636 xhci->cmd_ring->dequeue); 633 xhci->cmd_ring->dequeue);
637 /* Is the command ring deq ptr out of sync with the deq seg ptr? */ 634 /* Is the command ring deq ptr out of sync with the deq seg ptr? */
@@ -794,10 +791,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
794 return -ENODEV; 791 return -ENODEV;
795 } 792 }
796 793
797 event_dma = event->buffer[0]; 794 event_dma = event->buffer;
798 if (event->buffer[1] != 0)
799 xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n");
800
801 /* This TRB should be in the TD at the head of this ring's TD list */ 795 /* This TRB should be in the TD at the head of this ring's TD list */
802 if (list_empty(&ep_ring->td_list)) { 796 if (list_empty(&ep_ring->td_list)) {
803 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", 797 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
@@ -821,10 +815,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
821 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; 815 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
822 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 816 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
823 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); 817 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
824 xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n", 818 xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n",
825 (unsigned int) event->buffer[0]); 819 lower_32_bits(event->buffer));
826 xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n", 820 xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n",
827 (unsigned int) event->buffer[1]); 821 upper_32_bits(event->buffer));
828 xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", 822 xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
829 (unsigned int) event->transfer_len); 823 (unsigned int) event->transfer_len);
830 xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", 824 xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
@@ -1343,8 +1337,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1343 TD_REMAINDER(urb->transfer_buffer_length - running_total) | 1337 TD_REMAINDER(urb->transfer_buffer_length - running_total) |
1344 TRB_INTR_TARGET(0); 1338 TRB_INTR_TARGET(0);
1345 queue_trb(xhci, ep_ring, false, 1339 queue_trb(xhci, ep_ring, false,
1346 (u32) addr, 1340 lower_32_bits(addr),
1347 (u32) ((u64) addr >> 32), 1341 upper_32_bits(addr),
1348 length_field, 1342 length_field,
1349 /* We always want to know if the TRB was short, 1343 /* We always want to know if the TRB was short,
1350 * or we won't get an event when it completes. 1344 * or we won't get an event when it completes.
@@ -1475,8 +1469,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1475 TD_REMAINDER(urb->transfer_buffer_length - running_total) | 1469 TD_REMAINDER(urb->transfer_buffer_length - running_total) |
1476 TRB_INTR_TARGET(0); 1470 TRB_INTR_TARGET(0);
1477 queue_trb(xhci, ep_ring, false, 1471 queue_trb(xhci, ep_ring, false,
1478 (u32) addr, 1472 lower_32_bits(addr),
1479 (u32) ((u64) addr >> 32), 1473 upper_32_bits(addr),
1480 length_field, 1474 length_field,
1481 /* We always want to know if the TRB was short, 1475 /* We always want to know if the TRB was short,
1482 * or we won't get an event when it completes. 1476 * or we won't get an event when it completes.
@@ -1637,7 +1631,8 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
1637int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 1631int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1638 u32 slot_id) 1632 u32 slot_id)
1639{ 1633{
1640 return queue_command(xhci, in_ctx_ptr, 0, 0, 1634 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
1635 upper_32_bits(in_ctx_ptr), 0,
1641 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); 1636 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
1642} 1637}
1643 1638
@@ -1645,7 +1640,8 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1645int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 1640int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1646 u32 slot_id) 1641 u32 slot_id)
1647{ 1642{
1648 return queue_command(xhci, in_ctx_ptr, 0, 0, 1643 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
1644 upper_32_bits(in_ctx_ptr), 0,
1649 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); 1645 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
1650} 1646}
1651 1647
@@ -1677,7 +1673,8 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
1677 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 1673 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
1678 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", 1674 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
1679 deq_seg, deq_ptr); 1675 deq_seg, deq_ptr);
1680 return queue_command(xhci, (u32) addr | cycle_state, 0, 0, 1676 return queue_command(xhci, lower_32_bits(addr) | cycle_state,
1677 upper_32_bits(addr), 0,
1681 trb_slot_id | trb_ep_index | type); 1678 trb_slot_id | trb_ep_index | type);
1682} 1679}
1683 1680
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index cde648a524f5..60770c89132b 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -25,6 +25,7 @@
25 25
26#include <linux/usb.h> 26#include <linux/usb.h>
27#include <linux/timer.h> 27#include <linux/timer.h>
28#include <linux/kernel.h>
28 29
29#include "../core/hcd.h" 30#include "../core/hcd.h"
30/* Code sharing between pci-quirks and xhci hcd */ 31/* Code sharing between pci-quirks and xhci hcd */
@@ -42,14 +43,6 @@
42 * xHCI register interface. 43 * xHCI register interface.
43 * This corresponds to the eXtensible Host Controller Interface (xHCI) 44 * This corresponds to the eXtensible Host Controller Interface (xHCI)
44 * Revision 0.95 specification 45 * Revision 0.95 specification
45 *
46 * Registers should always be accessed with double word or quad word accesses.
47 *
48 * Some xHCI implementations may support 64-bit address pointers. Registers
49 * with 64-bit address pointers should be written to with dword accesses by
50 * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
51 * xHCI implementations that do not support 64-bit address pointers will ignore
52 * the high dword, and write order is irrelevant.
53 */ 46 */
54 47
55/** 48/**
@@ -166,10 +159,10 @@ struct xhci_op_regs {
166 u32 reserved1; 159 u32 reserved1;
167 u32 reserved2; 160 u32 reserved2;
168 u32 dev_notification; 161 u32 dev_notification;
169 u32 cmd_ring[2]; 162 u64 cmd_ring;
170 /* rsvd: offset 0x20-2F */ 163 /* rsvd: offset 0x20-2F */
171 u32 reserved3[4]; 164 u32 reserved3[4];
172 u32 dcbaa_ptr[2]; 165 u64 dcbaa_ptr;
173 u32 config_reg; 166 u32 config_reg;
174 /* rsvd: offset 0x3C-3FF */ 167 /* rsvd: offset 0x3C-3FF */
175 u32 reserved4[241]; 168 u32 reserved4[241];
@@ -254,7 +247,7 @@ struct xhci_op_regs {
254#define CMD_RING_RUNNING (1 << 3) 247#define CMD_RING_RUNNING (1 << 3)
255/* bits 4:5 reserved and should be preserved */ 248/* bits 4:5 reserved and should be preserved */
256/* Command Ring pointer - bit mask for the lower 32 bits. */ 249/* Command Ring pointer - bit mask for the lower 32 bits. */
257#define CMD_RING_ADDR_MASK (0xffffffc0) 250#define CMD_RING_RSVD_BITS (0x3f)
258 251
259/* CONFIG - Configure Register - config_reg bitmasks */ 252/* CONFIG - Configure Register - config_reg bitmasks */
260/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ 253/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
@@ -382,8 +375,8 @@ struct xhci_intr_reg {
382 u32 irq_control; 375 u32 irq_control;
383 u32 erst_size; 376 u32 erst_size;
384 u32 rsvd; 377 u32 rsvd;
385 u32 erst_base[2]; 378 u64 erst_base;
386 u32 erst_dequeue[2]; 379 u64 erst_dequeue;
387}; 380};
388 381
389/* irq_pending bitmasks */ 382/* irq_pending bitmasks */
@@ -538,7 +531,7 @@ struct xhci_slot_ctx {
538struct xhci_ep_ctx { 531struct xhci_ep_ctx {
539 u32 ep_info; 532 u32 ep_info;
540 u32 ep_info2; 533 u32 ep_info2;
541 u32 deq[2]; 534 u64 deq;
542 u32 tx_info; 535 u32 tx_info;
543 /* offset 0x14 - 0x1f reserved for HC internal use */ 536 /* offset 0x14 - 0x1f reserved for HC internal use */
544 u32 reserved[3]; 537 u32 reserved[3];
@@ -641,7 +634,7 @@ struct xhci_virt_device {
641 */ 634 */
642struct xhci_device_context_array { 635struct xhci_device_context_array {
643 /* 64-bit device addresses; we only write 32-bit addresses */ 636 /* 64-bit device addresses; we only write 32-bit addresses */
644 u32 dev_context_ptrs[2*MAX_HC_SLOTS]; 637 u64 dev_context_ptrs[MAX_HC_SLOTS];
645 /* private xHCD pointers */ 638 /* private xHCD pointers */
646 dma_addr_t dma; 639 dma_addr_t dma;
647}; 640};
@@ -654,7 +647,7 @@ struct xhci_device_context_array {
654 647
655struct xhci_stream_ctx { 648struct xhci_stream_ctx {
656 /* 64-bit stream ring address, cycle state, and stream type */ 649 /* 64-bit stream ring address, cycle state, and stream type */
657 u32 stream_ring[2]; 650 u64 stream_ring;
658 /* offset 0x14 - 0x1f reserved for HC internal use */ 651 /* offset 0x14 - 0x1f reserved for HC internal use */
659 u32 reserved[2]; 652 u32 reserved[2];
660}; 653};
@@ -662,7 +655,7 @@ struct xhci_stream_ctx {
662 655
663struct xhci_transfer_event { 656struct xhci_transfer_event {
664 /* 64-bit buffer address, or immediate data */ 657 /* 64-bit buffer address, or immediate data */
665 u32 buffer[2]; 658 u64 buffer;
666 u32 transfer_len; 659 u32 transfer_len;
667 /* This field is interpreted differently based on the type of TRB */ 660 /* This field is interpreted differently based on the type of TRB */
668 u32 flags; 661 u32 flags;
@@ -744,7 +737,7 @@ struct xhci_transfer_event {
744 737
745struct xhci_link_trb { 738struct xhci_link_trb {
746 /* 64-bit segment pointer*/ 739 /* 64-bit segment pointer*/
747 u32 segment_ptr[2]; 740 u64 segment_ptr;
748 u32 intr_target; 741 u32 intr_target;
749 u32 control; 742 u32 control;
750}; 743};
@@ -755,7 +748,7 @@ struct xhci_link_trb {
755/* Command completion event TRB */ 748/* Command completion event TRB */
756struct xhci_event_cmd { 749struct xhci_event_cmd {
757 /* Pointer to command TRB, or the value passed by the event data trb */ 750 /* Pointer to command TRB, or the value passed by the event data trb */
758 u32 cmd_trb[2]; 751 u64 cmd_trb;
759 u32 status; 752 u32 status;
760 u32 flags; 753 u32 flags;
761}; 754};
@@ -943,7 +936,7 @@ struct xhci_ring {
943 936
944struct xhci_erst_entry { 937struct xhci_erst_entry {
945 /* 64-bit event ring segment address */ 938 /* 64-bit event ring segment address */
946 u32 seg_addr[2]; 939 u64 seg_addr;
947 u32 seg_size; 940 u32 seg_size;
948 /* Set to zero */ 941 /* Set to zero */
949 u32 rsvd; 942 u32 rsvd;
@@ -1079,6 +1072,38 @@ static inline void xhci_writel(struct xhci_hcd *xhci,
1079 writel(val, regs); 1072 writel(val, regs);
1080} 1073}
1081 1074
1075/*
1076 * Registers should always be accessed with double word or quad word accesses.
1077 *
1078 * Some xHCI implementations may support 64-bit address pointers. Registers
1079 * with 64-bit address pointers should be written to with dword accesses by
1080 * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
1081 * xHCI implementations that do not support 64-bit address pointers will ignore
1082 * the high dword, and write order is irrelevant.
1083 */
1084static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
1085 __u64 __iomem *regs)
1086{
1087 __u32 __iomem *ptr = (__u32 __iomem *) regs;
1088 u64 val_lo = readl(ptr);
1089 u64 val_hi = readl(ptr + 1);
1090 return val_lo + (val_hi << 32);
1091}
1092static inline void xhci_write_64(struct xhci_hcd *xhci,
1093 const u64 val, __u64 __iomem *regs)
1094{
1095 __u32 __iomem *ptr = (__u32 __iomem *) regs;
1096 u32 val_lo = lower_32_bits(val);
1097 u32 val_hi = upper_32_bits(val);
1098
1099 if (!in_interrupt())
1100 xhci_dbg(xhci,
1101 "`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n",
1102 regs, (long unsigned int) val);
1103 writel(val_lo, ptr);
1104 writel(val_hi, ptr + 1);
1105}
1106
1082/* xHCI debugging */ 1107/* xHCI debugging */
1083void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); 1108void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
1084void xhci_print_registers(struct xhci_hcd *xhci); 1109void xhci_print_registers(struct xhci_hcd *xhci);