aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2009-04-29 22:05:20 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-06-16 00:44:50 -0400
commit23e3be113f42790736319c049c78e5f9a4394c02 (patch)
treebdbc5fe6350d60202ed04e70a756cade8c92c596 /drivers
parent06e7a1487b61e1ae909c4a4c264b4428c55beb7e (diff)
USB: xhci: Avoid global namespace pollution.
Make all globally visible functions start with xhci_ and mark functions as static if they're only called within the same C file. Fix some long lines while we're at it. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/usb/host/xhci-dbg.c14
-rw-r--r--drivers/usb/host/xhci-hcd.c42
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-ring.c72
-rw-r--r--drivers/usb/host/xhci.h24
5 files changed, 82 insertions, 72 deletions
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 264c38059d4a..6473cbf329f9 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -55,7 +55,7 @@ void xhci_dbg_regs(struct xhci_hcd *xhci)
55 xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba); 55 xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
56} 56}
57 57
58void xhci_print_cap_regs(struct xhci_hcd *xhci) 58static void xhci_print_cap_regs(struct xhci_hcd *xhci)
59{ 59{
60 u32 temp; 60 u32 temp;
61 61
@@ -106,7 +106,7 @@ void xhci_print_cap_regs(struct xhci_hcd *xhci)
106 xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK); 106 xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
107} 107}
108 108
109void xhci_print_command_reg(struct xhci_hcd *xhci) 109static void xhci_print_command_reg(struct xhci_hcd *xhci)
110{ 110{
111 u32 temp; 111 u32 temp;
112 112
@@ -124,7 +124,7 @@ void xhci_print_command_reg(struct xhci_hcd *xhci)
124 (temp & CMD_LRESET) ? "not " : ""); 124 (temp & CMD_LRESET) ? "not " : "");
125} 125}
126 126
127void xhci_print_status(struct xhci_hcd *xhci) 127static void xhci_print_status(struct xhci_hcd *xhci)
128{ 128{
129 u32 temp; 129 u32 temp;
130 130
@@ -138,14 +138,14 @@ void xhci_print_status(struct xhci_hcd *xhci)
138 (temp & STS_HALT) ? "halted" : "running"); 138 (temp & STS_HALT) ? "halted" : "running");
139} 139}
140 140
141void xhci_print_op_regs(struct xhci_hcd *xhci) 141static void xhci_print_op_regs(struct xhci_hcd *xhci)
142{ 142{
143 xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs); 143 xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
144 xhci_print_command_reg(xhci); 144 xhci_print_command_reg(xhci);
145 xhci_print_status(xhci); 145 xhci_print_status(xhci);
146} 146}
147 147
148void xhci_print_ports(struct xhci_hcd *xhci) 148static void xhci_print_ports(struct xhci_hcd *xhci)
149{ 149{
150 u32 __iomem *addr; 150 u32 __iomem *addr;
151 int i, j; 151 int i, j;
@@ -340,13 +340,13 @@ void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
340{ 340{
341 xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n", 341 xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
342 ring->dequeue, 342 ring->dequeue,
343 (unsigned long long)trb_virt_to_dma(ring->deq_seg, 343 (unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg,
344 ring->dequeue)); 344 ring->dequeue));
345 xhci_dbg(xhci, "Ring deq updated %u times\n", 345 xhci_dbg(xhci, "Ring deq updated %u times\n",
346 ring->deq_updates); 346 ring->deq_updates);
347 xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n", 347 xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
348 ring->enqueue, 348 ring->enqueue,
349 (unsigned long long)trb_virt_to_dma(ring->enq_seg, 349 (unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg,
350 ring->enqueue)); 350 ring->enqueue));
351 xhci_dbg(xhci, "Ring enq updated %u times\n", 351 xhci_dbg(xhci, "Ring enq updated %u times\n",
352 ring->enq_updates); 352 ring->enq_updates);
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 13188077387c..94447bcdf19f 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -291,7 +291,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
291} 291}
292 292
293#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 293#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
294void event_ring_work(unsigned long arg) 294void xhci_event_ring_work(unsigned long arg)
295{ 295{
296 unsigned long flags; 296 unsigned long flags;
297 int temp; 297 int temp;
@@ -330,8 +330,8 @@ void event_ring_work(unsigned long arg)
330 } 330 }
331 331
332 if (xhci->noops_submitted != NUM_TEST_NOOPS) 332 if (xhci->noops_submitted != NUM_TEST_NOOPS)
333 if (setup_one_noop(xhci)) 333 if (xhci_setup_one_noop(xhci))
334 ring_cmd_db(xhci); 334 xhci_ring_cmd_db(xhci);
335 spin_unlock_irqrestore(&xhci->lock, flags); 335 spin_unlock_irqrestore(&xhci->lock, flags);
336 336
337 if (!xhci->zombie) 337 if (!xhci->zombie)
@@ -374,7 +374,7 @@ int xhci_run(struct usb_hcd *hcd)
374#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 374#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
375 init_timer(&xhci->event_ring_timer); 375 init_timer(&xhci->event_ring_timer);
376 xhci->event_ring_timer.data = (unsigned long) xhci; 376 xhci->event_ring_timer.data = (unsigned long) xhci;
377 xhci->event_ring_timer.function = event_ring_work; 377 xhci->event_ring_timer.function = xhci_event_ring_work;
378 /* Poll the event ring */ 378 /* Poll the event ring */
379 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ; 379 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
380 xhci->zombie = 0; 380 xhci->zombie = 0;
@@ -404,7 +404,7 @@ int xhci_run(struct usb_hcd *hcd)
404 xhci_print_ir_set(xhci, xhci->ir_set, 0); 404 xhci_print_ir_set(xhci, xhci->ir_set, 0);
405 405
406 if (NUM_TEST_NOOPS > 0) 406 if (NUM_TEST_NOOPS > 0)
407 doorbell = setup_one_noop(xhci); 407 doorbell = xhci_setup_one_noop(xhci);
408 408
409 xhci_dbg(xhci, "Command ring memory map follows:\n"); 409 xhci_dbg(xhci, "Command ring memory map follows:\n");
410 xhci_debug_ring(xhci, xhci->cmd_ring); 410 xhci_debug_ring(xhci, xhci->cmd_ring);
@@ -600,9 +600,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
600 goto exit; 600 goto exit;
601 } 601 }
602 if (usb_endpoint_xfer_control(&urb->ep->desc)) 602 if (usb_endpoint_xfer_control(&urb->ep->desc))
603 ret = queue_ctrl_tx(xhci, mem_flags, urb, slot_id, ep_index); 603 ret = xhci_queue_ctrl_tx(xhci, mem_flags, urb,
604 slot_id, ep_index);
604 else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) 605 else if (usb_endpoint_xfer_bulk(&urb->ep->desc))
605 ret = queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); 606 ret = xhci_queue_bulk_tx(xhci, mem_flags, urb,
607 slot_id, ep_index);
606 else 608 else
607 ret = -EINVAL; 609 ret = -EINVAL;
608exit: 610exit:
@@ -668,8 +670,8 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
668 * the first cancellation to be handled. 670 * the first cancellation to be handled.
669 */ 671 */
670 if (ep_ring->cancels_pending == 1) { 672 if (ep_ring->cancels_pending == 1) {
671 queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); 673 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
672 ring_cmd_db(xhci); 674 xhci_ring_cmd_db(xhci);
673 } 675 }
674done: 676done:
675 spin_unlock_irqrestore(&xhci->lock, flags); 677 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -913,13 +915,14 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
913 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 915 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma,
914 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); 916 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
915 917
916 ret = queue_configure_endpoint(xhci, virt_dev->in_ctx_dma, udev->slot_id); 918 ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma,
919 udev->slot_id);
917 if (ret < 0) { 920 if (ret < 0) {
918 xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); 921 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
919 spin_unlock_irqrestore(&xhci->lock, flags); 922 spin_unlock_irqrestore(&xhci->lock, flags);
920 return -ENOMEM; 923 return -ENOMEM;
921 } 924 }
922 ring_cmd_db(xhci); 925 xhci_ring_cmd_db(xhci);
923 spin_unlock_irqrestore(&xhci->lock, flags); 926 spin_unlock_irqrestore(&xhci->lock, flags);
924 927
925 /* Wait for the configure endpoint command to complete */ 928 /* Wait for the configure endpoint command to complete */
@@ -1033,12 +1036,12 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
1033 return; 1036 return;
1034 1037
1035 spin_lock_irqsave(&xhci->lock, flags); 1038 spin_lock_irqsave(&xhci->lock, flags);
1036 if (queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { 1039 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
1037 spin_unlock_irqrestore(&xhci->lock, flags); 1040 spin_unlock_irqrestore(&xhci->lock, flags);
1038 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 1041 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1039 return; 1042 return;
1040 } 1043 }
1041 ring_cmd_db(xhci); 1044 xhci_ring_cmd_db(xhci);
1042 spin_unlock_irqrestore(&xhci->lock, flags); 1045 spin_unlock_irqrestore(&xhci->lock, flags);
1043 /* 1046 /*
1044 * Event command completion handler will free any data structures 1047 * Event command completion handler will free any data structures
@@ -1058,13 +1061,13 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
1058 int ret; 1061 int ret;
1059 1062
1060 spin_lock_irqsave(&xhci->lock, flags); 1063 spin_lock_irqsave(&xhci->lock, flags);
1061 ret = queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); 1064 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
1062 if (ret) { 1065 if (ret) {
1063 spin_unlock_irqrestore(&xhci->lock, flags); 1066 spin_unlock_irqrestore(&xhci->lock, flags);
1064 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 1067 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1065 return 0; 1068 return 0;
1066 } 1069 }
1067 ring_cmd_db(xhci); 1070 xhci_ring_cmd_db(xhci);
1068 spin_unlock_irqrestore(&xhci->lock, flags); 1071 spin_unlock_irqrestore(&xhci->lock, flags);
1069 1072
1070 /* XXX: how much time for xHC slot assignment? */ 1073 /* XXX: how much time for xHC slot assignment? */
@@ -1086,8 +1089,8 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
1086 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) { 1089 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
1087 /* Disable slot, if we can do it without mem alloc */ 1090 /* Disable slot, if we can do it without mem alloc */
1088 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 1091 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
1089 if (!queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) 1092 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
1090 ring_cmd_db(xhci); 1093 xhci_ring_cmd_db(xhci);
1091 spin_unlock_irqrestore(&xhci->lock, flags); 1094 spin_unlock_irqrestore(&xhci->lock, flags);
1092 return 0; 1095 return 0;
1093 } 1096 }
@@ -1129,13 +1132,14 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1129 xhci_setup_addressable_virt_dev(xhci, udev); 1132 xhci_setup_addressable_virt_dev(xhci, udev);
1130 /* Otherwise, assume the core has the device configured how it wants */ 1133 /* Otherwise, assume the core has the device configured how it wants */
1131 1134
1132 ret = queue_address_device(xhci, virt_dev->in_ctx_dma, udev->slot_id); 1135 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma,
1136 udev->slot_id);
1133 if (ret) { 1137 if (ret) {
1134 spin_unlock_irqrestore(&xhci->lock, flags); 1138 spin_unlock_irqrestore(&xhci->lock, flags);
1135 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 1139 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1136 return ret; 1140 return ret;
1137 } 1141 }
1138 ring_cmd_db(xhci); 1142 xhci_ring_cmd_db(xhci);
1139 spin_unlock_irqrestore(&xhci->lock, flags); 1143 spin_unlock_irqrestore(&xhci->lock, flags);
1140 1144
1141 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ 1145 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 6523e399fe73..f49f280cfd43 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -746,7 +746,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
746 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); 746 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
747 747
748 /* Set the event ring dequeue address */ 748 /* Set the event ring dequeue address */
749 set_hc_event_deq(xhci); 749 xhci_set_hc_event_deq(xhci);
750 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); 750 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
751 xhci_print_ir_set(xhci, xhci->ir_set, 0); 751 xhci_print_ir_set(xhci, xhci->ir_set, 0);
752 752
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 8fb5d52c08c9..f692e74f269c 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -71,7 +71,7 @@
71 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA 71 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
72 * address of the TRB. 72 * address of the TRB.
73 */ 73 */
74dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, 74dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
75 union xhci_trb *trb) 75 union xhci_trb *trb)
76{ 76{
77 dma_addr_t offset; 77 dma_addr_t offset;
@@ -235,12 +235,12 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
235 return 1; 235 return 1;
236} 236}
237 237
238void set_hc_event_deq(struct xhci_hcd *xhci) 238void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
239{ 239{
240 u32 temp; 240 u32 temp;
241 dma_addr_t deq; 241 dma_addr_t deq;
242 242
243 deq = trb_virt_to_dma(xhci->event_ring->deq_seg, 243 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
244 xhci->event_ring->dequeue); 244 xhci->event_ring->dequeue);
245 if (deq == 0 && !in_interrupt()) 245 if (deq == 0 && !in_interrupt())
246 xhci_warn(xhci, "WARN something wrong with SW event ring " 246 xhci_warn(xhci, "WARN something wrong with SW event ring "
@@ -256,7 +256,7 @@ void set_hc_event_deq(struct xhci_hcd *xhci)
256} 256}
257 257
258/* Ring the host controller doorbell after placing a command on the ring */ 258/* Ring the host controller doorbell after placing a command on the ring */
259void ring_cmd_db(struct xhci_hcd *xhci) 259void xhci_ring_cmd_db(struct xhci_hcd *xhci)
260{ 260{
261 u32 temp; 261 u32 temp;
262 262
@@ -371,7 +371,7 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci,
371 ep_ring->deq_seg = state->new_deq_seg; 371 ep_ring->deq_seg = state->new_deq_seg;
372} 372}
373 373
374void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 374static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
375 struct xhci_td *cur_td) 375 struct xhci_td *cur_td)
376{ 376{
377 struct xhci_segment *cur_seg; 377 struct xhci_segment *cur_seg;
@@ -390,7 +390,7 @@ void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
390 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 390 xhci_dbg(xhci, "Address = %p (0x%llx dma); "
391 "in seg %p (0x%llx dma)\n", 391 "in seg %p (0x%llx dma)\n",
392 cur_trb, 392 cur_trb,
393 (unsigned long long)trb_virt_to_dma(cur_seg, cur_trb), 393 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
394 cur_seg, 394 cur_seg,
395 (unsigned long long)cur_seg->dma); 395 (unsigned long long)cur_seg->dma);
396 } else { 396 } else {
@@ -403,7 +403,7 @@ void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
403 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " 403 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
404 "in seg %p (0x%llx dma)\n", 404 "in seg %p (0x%llx dma)\n",
405 cur_trb, 405 cur_trb,
406 (unsigned long long)trb_virt_to_dma(cur_seg, cur_trb), 406 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
407 cur_seg, 407 cur_seg,
408 (unsigned long long)cur_seg->dma); 408 (unsigned long long)cur_seg->dma);
409 } 409 }
@@ -458,7 +458,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
458 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); 458 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
459 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", 459 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
460 cur_td->first_trb, 460 cur_td->first_trb,
461 (unsigned long long)trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); 461 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
462 /* 462 /*
463 * If we stopped on the TD we need to cancel, then we have to 463 * If we stopped on the TD we need to cancel, then we have to
464 * move the xHC endpoint ring dequeue pointer past this TD. 464 * move the xHC endpoint ring dequeue pointer past this TD.
@@ -485,7 +485,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
485 deq_state.new_deq_seg, 485 deq_state.new_deq_seg,
486 (unsigned long long)deq_state.new_deq_seg->dma, 486 (unsigned long long)deq_state.new_deq_seg->dma,
487 deq_state.new_deq_ptr, 487 deq_state.new_deq_ptr,
488 (unsigned long long)trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr), 488 (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
489 deq_state.new_cycle_state); 489 deq_state.new_cycle_state);
490 queue_set_tr_deq(xhci, slot_id, ep_index, 490 queue_set_tr_deq(xhci, slot_id, ep_index,
491 deq_state.new_deq_seg, 491 deq_state.new_deq_seg,
@@ -497,7 +497,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
497 * ring running. 497 * ring running.
498 */ 498 */
499 ep_ring->state |= SET_DEQ_PENDING; 499 ep_ring->state |= SET_DEQ_PENDING;
500 ring_cmd_db(xhci); 500 xhci_ring_cmd_db(xhci);
501 } else { 501 } else {
502 /* Otherwise just ring the doorbell to restart the ring */ 502 /* Otherwise just ring the doorbell to restart the ring */
503 ring_ep_doorbell(xhci, slot_id, ep_index); 503 ring_ep_doorbell(xhci, slot_id, ep_index);
@@ -612,7 +612,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
612 dma_addr_t cmd_dequeue_dma; 612 dma_addr_t cmd_dequeue_dma;
613 613
614 cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; 614 cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0];
615 cmd_dequeue_dma = trb_virt_to_dma(xhci->cmd_ring->deq_seg, 615 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
616 xhci->cmd_ring->dequeue); 616 xhci->cmd_ring->dequeue);
617 /* Is the command ring deq ptr out of sync with the deq seg ptr? */ 617 /* Is the command ring deq ptr out of sync with the deq seg ptr? */
618 if (cmd_dequeue_dma == 0) { 618 if (cmd_dequeue_dma == 0) {
@@ -677,7 +677,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
677 677
678 /* Update event ring dequeue pointer before dropping the lock */ 678 /* Update event ring dequeue pointer before dropping the lock */
679 inc_deq(xhci, xhci->event_ring, true); 679 inc_deq(xhci, xhci->event_ring, true);
680 set_hc_event_deq(xhci); 680 xhci_set_hc_event_deq(xhci);
681 681
682 spin_unlock(&xhci->lock); 682 spin_unlock(&xhci->lock);
683 /* Pass this up to the core */ 683 /* Pass this up to the core */
@@ -702,15 +702,15 @@ static struct xhci_segment *trb_in_td(
702 dma_addr_t end_trb_dma; 702 dma_addr_t end_trb_dma;
703 struct xhci_segment *cur_seg; 703 struct xhci_segment *cur_seg;
704 704
705 start_dma = trb_virt_to_dma(start_seg, start_trb); 705 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
706 cur_seg = start_seg; 706 cur_seg = start_seg;
707 707
708 do { 708 do {
709 /* We may get an event for a Link TRB in the middle of a TD */ 709 /* We may get an event for a Link TRB in the middle of a TD */
710 end_seg_dma = trb_virt_to_dma(cur_seg, 710 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
711 &start_seg->trbs[TRBS_PER_SEGMENT - 1]); 711 &start_seg->trbs[TRBS_PER_SEGMENT - 1]);
712 /* If the end TRB isn't in this segment, this is set to 0 */ 712 /* If the end TRB isn't in this segment, this is set to 0 */
713 end_trb_dma = trb_virt_to_dma(cur_seg, end_trb); 713 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
714 714
715 if (end_trb_dma > 0) { 715 if (end_trb_dma > 0) {
716 /* The end TRB is in this segment, so suspect should be here */ 716 /* The end TRB is in this segment, so suspect should be here */
@@ -734,7 +734,7 @@ static struct xhci_segment *trb_in_td(
734 return cur_seg; 734 return cur_seg;
735 } 735 }
736 cur_seg = cur_seg->next; 736 cur_seg = cur_seg->next;
737 start_dma = trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); 737 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
738 } while (1); 738 } while (1);
739 739
740} 740}
@@ -992,7 +992,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
992 } 992 }
993cleanup: 993cleanup:
994 inc_deq(xhci, xhci->event_ring, true); 994 inc_deq(xhci, xhci->event_ring, true);
995 set_hc_event_deq(xhci); 995 xhci_set_hc_event_deq(xhci);
996 996
997 /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ 997 /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
998 if (urb) { 998 if (urb) {
@@ -1050,7 +1050,7 @@ void xhci_handle_event(struct xhci_hcd *xhci)
1050 if (update_ptrs) { 1050 if (update_ptrs) {
1051 /* Update SW and HC event ring dequeue pointer */ 1051 /* Update SW and HC event ring dequeue pointer */
1052 inc_deq(xhci, xhci->event_ring, true); 1052 inc_deq(xhci, xhci->event_ring, true);
1053 set_hc_event_deq(xhci); 1053 xhci_set_hc_event_deq(xhci);
1054 } 1054 }
1055 /* Are there more items on the event ring? */ 1055 /* Are there more items on the event ring? */
1056 xhci_handle_event(xhci); 1056 xhci_handle_event(xhci);
@@ -1119,7 +1119,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
1119 return 0; 1119 return 0;
1120} 1120}
1121 1121
1122int xhci_prepare_transfer(struct xhci_hcd *xhci, 1122static int prepare_transfer(struct xhci_hcd *xhci,
1123 struct xhci_virt_device *xdev, 1123 struct xhci_virt_device *xdev,
1124 unsigned int ep_index, 1124 unsigned int ep_index,
1125 unsigned int num_trbs, 1125 unsigned int num_trbs,
@@ -1156,7 +1156,7 @@ int xhci_prepare_transfer(struct xhci_hcd *xhci,
1156 return 0; 1156 return 0;
1157} 1157}
1158 1158
1159unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) 1159static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
1160{ 1160{
1161 int num_sgs, num_trbs, running_total, temp, i; 1161 int num_sgs, num_trbs, running_total, temp, i;
1162 struct scatterlist *sg; 1162 struct scatterlist *sg;
@@ -1200,7 +1200,7 @@ unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
1200 return num_trbs; 1200 return num_trbs;
1201} 1201}
1202 1202
1203void check_trb_math(struct urb *urb, int num_trbs, int running_total) 1203static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
1204{ 1204{
1205 if (num_trbs != 0) 1205 if (num_trbs != 0)
1206 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " 1206 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
@@ -1216,7 +1216,7 @@ void check_trb_math(struct urb *urb, int num_trbs, int running_total)
1216 urb->transfer_buffer_length); 1216 urb->transfer_buffer_length);
1217} 1217}
1218 1218
1219void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, 1219static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
1220 unsigned int ep_index, int start_cycle, 1220 unsigned int ep_index, int start_cycle,
1221 struct xhci_generic_trb *start_trb, struct xhci_td *td) 1221 struct xhci_generic_trb *start_trb, struct xhci_td *td)
1222{ 1222{
@@ -1229,7 +1229,7 @@ void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
1229 ring_ep_doorbell(xhci, slot_id, ep_index); 1229 ring_ep_doorbell(xhci, slot_id, ep_index);
1230} 1230}
1231 1231
1232int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1232static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1233 struct urb *urb, int slot_id, unsigned int ep_index) 1233 struct urb *urb, int slot_id, unsigned int ep_index)
1234{ 1234{
1235 struct xhci_ring *ep_ring; 1235 struct xhci_ring *ep_ring;
@@ -1248,7 +1248,7 @@ int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1248 num_trbs = count_sg_trbs_needed(xhci, urb); 1248 num_trbs = count_sg_trbs_needed(xhci, urb);
1249 num_sgs = urb->num_sgs; 1249 num_sgs = urb->num_sgs;
1250 1250
1251 trb_buff_len = xhci_prepare_transfer(xhci, xhci->devs[slot_id], 1251 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
1252 ep_index, num_trbs, urb, &td, mem_flags); 1252 ep_index, num_trbs, urb, &td, mem_flags);
1253 if (trb_buff_len < 0) 1253 if (trb_buff_len < 0)
1254 return trb_buff_len; 1254 return trb_buff_len;
@@ -1356,7 +1356,7 @@ int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1356} 1356}
1357 1357
1358/* This is very similar to what ehci-q.c qtd_fill() does */ 1358/* This is very similar to what ehci-q.c qtd_fill() does */
1359int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1359int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1360 struct urb *urb, int slot_id, unsigned int ep_index) 1360 struct urb *urb, int slot_id, unsigned int ep_index)
1361{ 1361{
1362 struct xhci_ring *ep_ring; 1362 struct xhci_ring *ep_ring;
@@ -1400,7 +1400,7 @@ int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1400 (unsigned long long)urb->transfer_dma, 1400 (unsigned long long)urb->transfer_dma,
1401 num_trbs); 1401 num_trbs);
1402 1402
1403 ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 1403 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
1404 num_trbs, urb, &td, mem_flags); 1404 num_trbs, urb, &td, mem_flags);
1405 if (ret < 0) 1405 if (ret < 0)
1406 return ret; 1406 return ret;
@@ -1469,7 +1469,7 @@ int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1469} 1469}
1470 1470
1471/* Caller must have locked xhci->lock */ 1471/* Caller must have locked xhci->lock */
1472int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1472int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1473 struct urb *urb, int slot_id, unsigned int ep_index) 1473 struct urb *urb, int slot_id, unsigned int ep_index)
1474{ 1474{
1475 struct xhci_ring *ep_ring; 1475 struct xhci_ring *ep_ring;
@@ -1502,7 +1502,7 @@ int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1502 */ 1502 */
1503 if (urb->transfer_buffer_length > 0) 1503 if (urb->transfer_buffer_length > 0)
1504 num_trbs++; 1504 num_trbs++;
1505 ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs, 1505 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs,
1506 urb, &td, mem_flags); 1506 urb, &td, mem_flags);
1507 if (ret < 0) 1507 if (ret < 0)
1508 return ret; 1508 return ret;
@@ -1584,36 +1584,38 @@ static int queue_cmd_noop(struct xhci_hcd *xhci)
1584 * Place a no-op command on the command ring to test the command and 1584 * Place a no-op command on the command ring to test the command and
1585 * event ring. 1585 * event ring.
1586 */ 1586 */
1587void *setup_one_noop(struct xhci_hcd *xhci) 1587void *xhci_setup_one_noop(struct xhci_hcd *xhci)
1588{ 1588{
1589 if (queue_cmd_noop(xhci) < 0) 1589 if (queue_cmd_noop(xhci) < 0)
1590 return NULL; 1590 return NULL;
1591 xhci->noops_submitted++; 1591 xhci->noops_submitted++;
1592 return ring_cmd_db; 1592 return xhci_ring_cmd_db;
1593} 1593}
1594 1594
1595/* Queue a slot enable or disable request on the command ring */ 1595/* Queue a slot enable or disable request on the command ring */
1596int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) 1596int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
1597{ 1597{
1598 return queue_command(xhci, 0, 0, 0, 1598 return queue_command(xhci, 0, 0, 0,
1599 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id)); 1599 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id));
1600} 1600}
1601 1601
1602/* Queue an address device command TRB */ 1602/* Queue an address device command TRB */
1603int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id) 1603int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1604 u32 slot_id)
1604{ 1605{
1605 return queue_command(xhci, in_ctx_ptr, 0, 0, 1606 return queue_command(xhci, in_ctx_ptr, 0, 0,
1606 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); 1607 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
1607} 1608}
1608 1609
1609/* Queue a configure endpoint command TRB */ 1610/* Queue a configure endpoint command TRB */
1610int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id) 1611int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1612 u32 slot_id)
1611{ 1613{
1612 return queue_command(xhci, in_ctx_ptr, 0, 0, 1614 return queue_command(xhci, in_ctx_ptr, 0, 0,
1613 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); 1615 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
1614} 1616}
1615 1617
1616int queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, 1618int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
1617 unsigned int ep_index) 1619 unsigned int ep_index)
1618{ 1620{
1619 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 1621 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
@@ -1636,7 +1638,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
1636 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 1638 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
1637 u32 type = TRB_TYPE(TRB_SET_DEQ); 1639 u32 type = TRB_TYPE(TRB_SET_DEQ);
1638 1640
1639 addr = trb_virt_to_dma(deq_seg, deq_ptr); 1641 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
1640 if (addr == 0) 1642 if (addr == 0)
1641 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 1643 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
1642 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", 1644 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 13c9166e758a..df8778e1cfc6 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1130,18 +1130,22 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1130void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); 1130void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1131 1131
1132/* xHCI ring, segment, TRB, and TD functions */ 1132/* xHCI ring, segment, TRB, and TD functions */
1133dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); 1133dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
1134void ring_cmd_db(struct xhci_hcd *xhci); 1134void xhci_ring_cmd_db(struct xhci_hcd *xhci);
1135void *setup_one_noop(struct xhci_hcd *xhci); 1135void *xhci_setup_one_noop(struct xhci_hcd *xhci);
1136void xhci_handle_event(struct xhci_hcd *xhci); 1136void xhci_handle_event(struct xhci_hcd *xhci);
1137void set_hc_event_deq(struct xhci_hcd *xhci); 1137void xhci_set_hc_event_deq(struct xhci_hcd *xhci);
1138int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id); 1138int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
1139int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); 1139int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1140int queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, 1140 u32 slot_id);
1141int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
1141 unsigned int ep_index); 1142 unsigned int ep_index);
1142int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index); 1143int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1143int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index); 1144 int slot_id, unsigned int ep_index);
1144int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); 1145int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1146 int slot_id, unsigned int ep_index);
1147int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1148 u32 slot_id);
1145 1149
1146/* xHCI roothub code */ 1150/* xHCI roothub code */
1147int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, 1151int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,