aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2009-04-29 22:05:20 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-06-16 00:44:50 -0400
commit23e3be113f42790736319c049c78e5f9a4394c02 (patch)
treebdbc5fe6350d60202ed04e70a756cade8c92c596 /drivers/usb/host/xhci-ring.c
parent06e7a1487b61e1ae909c4a4c264b4428c55beb7e (diff)
USB: xhci: Avoid global namespace pollution.
Make all globally visible functions start with xhci_ and mark functions as static if they're only called within the same C file. Fix some long lines while we're at it. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c72
1 files changed, 37 insertions, 35 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 8fb5d52c08c9..f692e74f269c 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -71,7 +71,7 @@
71 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA 71 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
72 * address of the TRB. 72 * address of the TRB.
73 */ 73 */
74dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, 74dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
75 union xhci_trb *trb) 75 union xhci_trb *trb)
76{ 76{
77 dma_addr_t offset; 77 dma_addr_t offset;
@@ -235,12 +235,12 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
235 return 1; 235 return 1;
236} 236}
237 237
238void set_hc_event_deq(struct xhci_hcd *xhci) 238void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
239{ 239{
240 u32 temp; 240 u32 temp;
241 dma_addr_t deq; 241 dma_addr_t deq;
242 242
243 deq = trb_virt_to_dma(xhci->event_ring->deq_seg, 243 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
244 xhci->event_ring->dequeue); 244 xhci->event_ring->dequeue);
245 if (deq == 0 && !in_interrupt()) 245 if (deq == 0 && !in_interrupt())
246 xhci_warn(xhci, "WARN something wrong with SW event ring " 246 xhci_warn(xhci, "WARN something wrong with SW event ring "
@@ -256,7 +256,7 @@ void set_hc_event_deq(struct xhci_hcd *xhci)
256} 256}
257 257
258/* Ring the host controller doorbell after placing a command on the ring */ 258/* Ring the host controller doorbell after placing a command on the ring */
259void ring_cmd_db(struct xhci_hcd *xhci) 259void xhci_ring_cmd_db(struct xhci_hcd *xhci)
260{ 260{
261 u32 temp; 261 u32 temp;
262 262
@@ -371,7 +371,7 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci,
371 ep_ring->deq_seg = state->new_deq_seg; 371 ep_ring->deq_seg = state->new_deq_seg;
372} 372}
373 373
374void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 374static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
375 struct xhci_td *cur_td) 375 struct xhci_td *cur_td)
376{ 376{
377 struct xhci_segment *cur_seg; 377 struct xhci_segment *cur_seg;
@@ -390,7 +390,7 @@ void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
390 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 390 xhci_dbg(xhci, "Address = %p (0x%llx dma); "
391 "in seg %p (0x%llx dma)\n", 391 "in seg %p (0x%llx dma)\n",
392 cur_trb, 392 cur_trb,
393 (unsigned long long)trb_virt_to_dma(cur_seg, cur_trb), 393 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
394 cur_seg, 394 cur_seg,
395 (unsigned long long)cur_seg->dma); 395 (unsigned long long)cur_seg->dma);
396 } else { 396 } else {
@@ -403,7 +403,7 @@ void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
403 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " 403 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
404 "in seg %p (0x%llx dma)\n", 404 "in seg %p (0x%llx dma)\n",
405 cur_trb, 405 cur_trb,
406 (unsigned long long)trb_virt_to_dma(cur_seg, cur_trb), 406 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
407 cur_seg, 407 cur_seg,
408 (unsigned long long)cur_seg->dma); 408 (unsigned long long)cur_seg->dma);
409 } 409 }
@@ -458,7 +458,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
458 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); 458 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
459 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", 459 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
460 cur_td->first_trb, 460 cur_td->first_trb,
461 (unsigned long long)trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); 461 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
462 /* 462 /*
463 * If we stopped on the TD we need to cancel, then we have to 463 * If we stopped on the TD we need to cancel, then we have to
464 * move the xHC endpoint ring dequeue pointer past this TD. 464 * move the xHC endpoint ring dequeue pointer past this TD.
@@ -485,7 +485,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
485 deq_state.new_deq_seg, 485 deq_state.new_deq_seg,
486 (unsigned long long)deq_state.new_deq_seg->dma, 486 (unsigned long long)deq_state.new_deq_seg->dma,
487 deq_state.new_deq_ptr, 487 deq_state.new_deq_ptr,
488 (unsigned long long)trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr), 488 (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
489 deq_state.new_cycle_state); 489 deq_state.new_cycle_state);
490 queue_set_tr_deq(xhci, slot_id, ep_index, 490 queue_set_tr_deq(xhci, slot_id, ep_index,
491 deq_state.new_deq_seg, 491 deq_state.new_deq_seg,
@@ -497,7 +497,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
497 * ring running. 497 * ring running.
498 */ 498 */
499 ep_ring->state |= SET_DEQ_PENDING; 499 ep_ring->state |= SET_DEQ_PENDING;
500 ring_cmd_db(xhci); 500 xhci_ring_cmd_db(xhci);
501 } else { 501 } else {
502 /* Otherwise just ring the doorbell to restart the ring */ 502 /* Otherwise just ring the doorbell to restart the ring */
503 ring_ep_doorbell(xhci, slot_id, ep_index); 503 ring_ep_doorbell(xhci, slot_id, ep_index);
@@ -612,7 +612,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
612 dma_addr_t cmd_dequeue_dma; 612 dma_addr_t cmd_dequeue_dma;
613 613
614 cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; 614 cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0];
615 cmd_dequeue_dma = trb_virt_to_dma(xhci->cmd_ring->deq_seg, 615 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
616 xhci->cmd_ring->dequeue); 616 xhci->cmd_ring->dequeue);
617 /* Is the command ring deq ptr out of sync with the deq seg ptr? */ 617 /* Is the command ring deq ptr out of sync with the deq seg ptr? */
618 if (cmd_dequeue_dma == 0) { 618 if (cmd_dequeue_dma == 0) {
@@ -677,7 +677,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
677 677
678 /* Update event ring dequeue pointer before dropping the lock */ 678 /* Update event ring dequeue pointer before dropping the lock */
679 inc_deq(xhci, xhci->event_ring, true); 679 inc_deq(xhci, xhci->event_ring, true);
680 set_hc_event_deq(xhci); 680 xhci_set_hc_event_deq(xhci);
681 681
682 spin_unlock(&xhci->lock); 682 spin_unlock(&xhci->lock);
683 /* Pass this up to the core */ 683 /* Pass this up to the core */
@@ -702,15 +702,15 @@ static struct xhci_segment *trb_in_td(
702 dma_addr_t end_trb_dma; 702 dma_addr_t end_trb_dma;
703 struct xhci_segment *cur_seg; 703 struct xhci_segment *cur_seg;
704 704
705 start_dma = trb_virt_to_dma(start_seg, start_trb); 705 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
706 cur_seg = start_seg; 706 cur_seg = start_seg;
707 707
708 do { 708 do {
709 /* We may get an event for a Link TRB in the middle of a TD */ 709 /* We may get an event for a Link TRB in the middle of a TD */
710 end_seg_dma = trb_virt_to_dma(cur_seg, 710 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
711 &start_seg->trbs[TRBS_PER_SEGMENT - 1]); 711 &start_seg->trbs[TRBS_PER_SEGMENT - 1]);
712 /* If the end TRB isn't in this segment, this is set to 0 */ 712 /* If the end TRB isn't in this segment, this is set to 0 */
713 end_trb_dma = trb_virt_to_dma(cur_seg, end_trb); 713 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
714 714
715 if (end_trb_dma > 0) { 715 if (end_trb_dma > 0) {
716 /* The end TRB is in this segment, so suspect should be here */ 716 /* The end TRB is in this segment, so suspect should be here */
@@ -734,7 +734,7 @@ static struct xhci_segment *trb_in_td(
734 return cur_seg; 734 return cur_seg;
735 } 735 }
736 cur_seg = cur_seg->next; 736 cur_seg = cur_seg->next;
737 start_dma = trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); 737 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
738 } while (1); 738 } while (1);
739 739
740} 740}
@@ -992,7 +992,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
992 } 992 }
993cleanup: 993cleanup:
994 inc_deq(xhci, xhci->event_ring, true); 994 inc_deq(xhci, xhci->event_ring, true);
995 set_hc_event_deq(xhci); 995 xhci_set_hc_event_deq(xhci);
996 996
997 /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ 997 /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
998 if (urb) { 998 if (urb) {
@@ -1050,7 +1050,7 @@ void xhci_handle_event(struct xhci_hcd *xhci)
1050 if (update_ptrs) { 1050 if (update_ptrs) {
1051 /* Update SW and HC event ring dequeue pointer */ 1051 /* Update SW and HC event ring dequeue pointer */
1052 inc_deq(xhci, xhci->event_ring, true); 1052 inc_deq(xhci, xhci->event_ring, true);
1053 set_hc_event_deq(xhci); 1053 xhci_set_hc_event_deq(xhci);
1054 } 1054 }
1055 /* Are there more items on the event ring? */ 1055 /* Are there more items on the event ring? */
1056 xhci_handle_event(xhci); 1056 xhci_handle_event(xhci);
@@ -1119,7 +1119,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
1119 return 0; 1119 return 0;
1120} 1120}
1121 1121
1122int xhci_prepare_transfer(struct xhci_hcd *xhci, 1122static int prepare_transfer(struct xhci_hcd *xhci,
1123 struct xhci_virt_device *xdev, 1123 struct xhci_virt_device *xdev,
1124 unsigned int ep_index, 1124 unsigned int ep_index,
1125 unsigned int num_trbs, 1125 unsigned int num_trbs,
@@ -1156,7 +1156,7 @@ int xhci_prepare_transfer(struct xhci_hcd *xhci,
1156 return 0; 1156 return 0;
1157} 1157}
1158 1158
1159unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) 1159static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
1160{ 1160{
1161 int num_sgs, num_trbs, running_total, temp, i; 1161 int num_sgs, num_trbs, running_total, temp, i;
1162 struct scatterlist *sg; 1162 struct scatterlist *sg;
@@ -1200,7 +1200,7 @@ unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
1200 return num_trbs; 1200 return num_trbs;
1201} 1201}
1202 1202
1203void check_trb_math(struct urb *urb, int num_trbs, int running_total) 1203static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
1204{ 1204{
1205 if (num_trbs != 0) 1205 if (num_trbs != 0)
1206 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " 1206 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
@@ -1216,7 +1216,7 @@ void check_trb_math(struct urb *urb, int num_trbs, int running_total)
1216 urb->transfer_buffer_length); 1216 urb->transfer_buffer_length);
1217} 1217}
1218 1218
1219void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, 1219static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
1220 unsigned int ep_index, int start_cycle, 1220 unsigned int ep_index, int start_cycle,
1221 struct xhci_generic_trb *start_trb, struct xhci_td *td) 1221 struct xhci_generic_trb *start_trb, struct xhci_td *td)
1222{ 1222{
@@ -1229,7 +1229,7 @@ void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
1229 ring_ep_doorbell(xhci, slot_id, ep_index); 1229 ring_ep_doorbell(xhci, slot_id, ep_index);
1230} 1230}
1231 1231
1232int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1232static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1233 struct urb *urb, int slot_id, unsigned int ep_index) 1233 struct urb *urb, int slot_id, unsigned int ep_index)
1234{ 1234{
1235 struct xhci_ring *ep_ring; 1235 struct xhci_ring *ep_ring;
@@ -1248,7 +1248,7 @@ int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1248 num_trbs = count_sg_trbs_needed(xhci, urb); 1248 num_trbs = count_sg_trbs_needed(xhci, urb);
1249 num_sgs = urb->num_sgs; 1249 num_sgs = urb->num_sgs;
1250 1250
1251 trb_buff_len = xhci_prepare_transfer(xhci, xhci->devs[slot_id], 1251 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
1252 ep_index, num_trbs, urb, &td, mem_flags); 1252 ep_index, num_trbs, urb, &td, mem_flags);
1253 if (trb_buff_len < 0) 1253 if (trb_buff_len < 0)
1254 return trb_buff_len; 1254 return trb_buff_len;
@@ -1356,7 +1356,7 @@ int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1356} 1356}
1357 1357
1358/* This is very similar to what ehci-q.c qtd_fill() does */ 1358/* This is very similar to what ehci-q.c qtd_fill() does */
1359int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1359int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1360 struct urb *urb, int slot_id, unsigned int ep_index) 1360 struct urb *urb, int slot_id, unsigned int ep_index)
1361{ 1361{
1362 struct xhci_ring *ep_ring; 1362 struct xhci_ring *ep_ring;
@@ -1400,7 +1400,7 @@ int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1400 (unsigned long long)urb->transfer_dma, 1400 (unsigned long long)urb->transfer_dma,
1401 num_trbs); 1401 num_trbs);
1402 1402
1403 ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 1403 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
1404 num_trbs, urb, &td, mem_flags); 1404 num_trbs, urb, &td, mem_flags);
1405 if (ret < 0) 1405 if (ret < 0)
1406 return ret; 1406 return ret;
@@ -1469,7 +1469,7 @@ int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1469} 1469}
1470 1470
1471/* Caller must have locked xhci->lock */ 1471/* Caller must have locked xhci->lock */
1472int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1472int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1473 struct urb *urb, int slot_id, unsigned int ep_index) 1473 struct urb *urb, int slot_id, unsigned int ep_index)
1474{ 1474{
1475 struct xhci_ring *ep_ring; 1475 struct xhci_ring *ep_ring;
@@ -1502,7 +1502,7 @@ int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1502 */ 1502 */
1503 if (urb->transfer_buffer_length > 0) 1503 if (urb->transfer_buffer_length > 0)
1504 num_trbs++; 1504 num_trbs++;
1505 ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs, 1505 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs,
1506 urb, &td, mem_flags); 1506 urb, &td, mem_flags);
1507 if (ret < 0) 1507 if (ret < 0)
1508 return ret; 1508 return ret;
@@ -1584,36 +1584,38 @@ static int queue_cmd_noop(struct xhci_hcd *xhci)
1584 * Place a no-op command on the command ring to test the command and 1584 * Place a no-op command on the command ring to test the command and
1585 * event ring. 1585 * event ring.
1586 */ 1586 */
1587void *setup_one_noop(struct xhci_hcd *xhci) 1587void *xhci_setup_one_noop(struct xhci_hcd *xhci)
1588{ 1588{
1589 if (queue_cmd_noop(xhci) < 0) 1589 if (queue_cmd_noop(xhci) < 0)
1590 return NULL; 1590 return NULL;
1591 xhci->noops_submitted++; 1591 xhci->noops_submitted++;
1592 return ring_cmd_db; 1592 return xhci_ring_cmd_db;
1593} 1593}
1594 1594
1595/* Queue a slot enable or disable request on the command ring */ 1595/* Queue a slot enable or disable request on the command ring */
1596int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) 1596int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
1597{ 1597{
1598 return queue_command(xhci, 0, 0, 0, 1598 return queue_command(xhci, 0, 0, 0,
1599 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id)); 1599 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id));
1600} 1600}
1601 1601
1602/* Queue an address device command TRB */ 1602/* Queue an address device command TRB */
1603int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id) 1603int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1604 u32 slot_id)
1604{ 1605{
1605 return queue_command(xhci, in_ctx_ptr, 0, 0, 1606 return queue_command(xhci, in_ctx_ptr, 0, 0,
1606 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); 1607 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
1607} 1608}
1608 1609
1609/* Queue a configure endpoint command TRB */ 1610/* Queue a configure endpoint command TRB */
1610int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id) 1611int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1612 u32 slot_id)
1611{ 1613{
1612 return queue_command(xhci, in_ctx_ptr, 0, 0, 1614 return queue_command(xhci, in_ctx_ptr, 0, 0,
1613 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); 1615 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
1614} 1616}
1615 1617
1616int queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, 1618int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
1617 unsigned int ep_index) 1619 unsigned int ep_index)
1618{ 1620{
1619 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 1621 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
@@ -1636,7 +1638,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
1636 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 1638 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
1637 u32 type = TRB_TYPE(TRB_SET_DEQ); 1639 u32 type = TRB_TYPE(TRB_SET_DEQ);
1638 1640
1639 addr = trb_virt_to_dma(deq_seg, deq_ptr); 1641 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
1640 if (addr == 0) 1642 if (addr == 0)
1641 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 1643 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
1642 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", 1644 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",