diff options
Diffstat (limited to 'drivers/usb/host/xhci-hcd.c')
-rw-r--r-- | drivers/usb/host/xhci-hcd.c | 42 |
1 files changed, 23 insertions, 19 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index 13188077387c..94447bcdf19f 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c | |||
@@ -291,7 +291,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) | |||
291 | } | 291 | } |
292 | 292 | ||
293 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | 293 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
294 | void event_ring_work(unsigned long arg) | 294 | void xhci_event_ring_work(unsigned long arg) |
295 | { | 295 | { |
296 | unsigned long flags; | 296 | unsigned long flags; |
297 | int temp; | 297 | int temp; |
@@ -330,8 +330,8 @@ void event_ring_work(unsigned long arg) | |||
330 | } | 330 | } |
331 | 331 | ||
332 | if (xhci->noops_submitted != NUM_TEST_NOOPS) | 332 | if (xhci->noops_submitted != NUM_TEST_NOOPS) |
333 | if (setup_one_noop(xhci)) | 333 | if (xhci_setup_one_noop(xhci)) |
334 | ring_cmd_db(xhci); | 334 | xhci_ring_cmd_db(xhci); |
335 | spin_unlock_irqrestore(&xhci->lock, flags); | 335 | spin_unlock_irqrestore(&xhci->lock, flags); |
336 | 336 | ||
337 | if (!xhci->zombie) | 337 | if (!xhci->zombie) |
@@ -374,7 +374,7 @@ int xhci_run(struct usb_hcd *hcd) | |||
374 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | 374 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
375 | init_timer(&xhci->event_ring_timer); | 375 | init_timer(&xhci->event_ring_timer); |
376 | xhci->event_ring_timer.data = (unsigned long) xhci; | 376 | xhci->event_ring_timer.data = (unsigned long) xhci; |
377 | xhci->event_ring_timer.function = event_ring_work; | 377 | xhci->event_ring_timer.function = xhci_event_ring_work; |
378 | /* Poll the event ring */ | 378 | /* Poll the event ring */ |
379 | xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ; | 379 | xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ; |
380 | xhci->zombie = 0; | 380 | xhci->zombie = 0; |
@@ -404,7 +404,7 @@ int xhci_run(struct usb_hcd *hcd) | |||
404 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | 404 | xhci_print_ir_set(xhci, xhci->ir_set, 0); |
405 | 405 | ||
406 | if (NUM_TEST_NOOPS > 0) | 406 | if (NUM_TEST_NOOPS > 0) |
407 | doorbell = setup_one_noop(xhci); | 407 | doorbell = xhci_setup_one_noop(xhci); |
408 | 408 | ||
409 | xhci_dbg(xhci, "Command ring memory map follows:\n"); | 409 | xhci_dbg(xhci, "Command ring memory map follows:\n"); |
410 | xhci_debug_ring(xhci, xhci->cmd_ring); | 410 | xhci_debug_ring(xhci, xhci->cmd_ring); |
@@ -600,9 +600,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
600 | goto exit; | 600 | goto exit; |
601 | } | 601 | } |
602 | if (usb_endpoint_xfer_control(&urb->ep->desc)) | 602 | if (usb_endpoint_xfer_control(&urb->ep->desc)) |
603 | ret = queue_ctrl_tx(xhci, mem_flags, urb, slot_id, ep_index); | 603 | ret = xhci_queue_ctrl_tx(xhci, mem_flags, urb, |
604 | slot_id, ep_index); | ||
604 | else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) | 605 | else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) |
605 | ret = queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); | 606 | ret = xhci_queue_bulk_tx(xhci, mem_flags, urb, |
607 | slot_id, ep_index); | ||
606 | else | 608 | else |
607 | ret = -EINVAL; | 609 | ret = -EINVAL; |
608 | exit: | 610 | exit: |
@@ -668,8 +670,8 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
668 | * the first cancellation to be handled. | 670 | * the first cancellation to be handled. |
669 | */ | 671 | */ |
670 | if (ep_ring->cancels_pending == 1) { | 672 | if (ep_ring->cancels_pending == 1) { |
671 | queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); | 673 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); |
672 | ring_cmd_db(xhci); | 674 | xhci_ring_cmd_db(xhci); |
673 | } | 675 | } |
674 | done: | 676 | done: |
675 | spin_unlock_irqrestore(&xhci->lock, flags); | 677 | spin_unlock_irqrestore(&xhci->lock, flags); |
@@ -913,13 +915,14 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
913 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, | 915 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, |
914 | LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); | 916 | LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); |
915 | 917 | ||
916 | ret = queue_configure_endpoint(xhci, virt_dev->in_ctx_dma, udev->slot_id); | 918 | ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma, |
919 | udev->slot_id); | ||
917 | if (ret < 0) { | 920 | if (ret < 0) { |
918 | xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); | 921 | xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); |
919 | spin_unlock_irqrestore(&xhci->lock, flags); | 922 | spin_unlock_irqrestore(&xhci->lock, flags); |
920 | return -ENOMEM; | 923 | return -ENOMEM; |
921 | } | 924 | } |
922 | ring_cmd_db(xhci); | 925 | xhci_ring_cmd_db(xhci); |
923 | spin_unlock_irqrestore(&xhci->lock, flags); | 926 | spin_unlock_irqrestore(&xhci->lock, flags); |
924 | 927 | ||
925 | /* Wait for the configure endpoint command to complete */ | 928 | /* Wait for the configure endpoint command to complete */ |
@@ -1033,12 +1036,12 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
1033 | return; | 1036 | return; |
1034 | 1037 | ||
1035 | spin_lock_irqsave(&xhci->lock, flags); | 1038 | spin_lock_irqsave(&xhci->lock, flags); |
1036 | if (queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { | 1039 | if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { |
1037 | spin_unlock_irqrestore(&xhci->lock, flags); | 1040 | spin_unlock_irqrestore(&xhci->lock, flags); |
1038 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | 1041 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
1039 | return; | 1042 | return; |
1040 | } | 1043 | } |
1041 | ring_cmd_db(xhci); | 1044 | xhci_ring_cmd_db(xhci); |
1042 | spin_unlock_irqrestore(&xhci->lock, flags); | 1045 | spin_unlock_irqrestore(&xhci->lock, flags); |
1043 | /* | 1046 | /* |
1044 | * Event command completion handler will free any data structures | 1047 | * Event command completion handler will free any data structures |
@@ -1058,13 +1061,13 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
1058 | int ret; | 1061 | int ret; |
1059 | 1062 | ||
1060 | spin_lock_irqsave(&xhci->lock, flags); | 1063 | spin_lock_irqsave(&xhci->lock, flags); |
1061 | ret = queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); | 1064 | ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); |
1062 | if (ret) { | 1065 | if (ret) { |
1063 | spin_unlock_irqrestore(&xhci->lock, flags); | 1066 | spin_unlock_irqrestore(&xhci->lock, flags); |
1064 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | 1067 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
1065 | return 0; | 1068 | return 0; |
1066 | } | 1069 | } |
1067 | ring_cmd_db(xhci); | 1070 | xhci_ring_cmd_db(xhci); |
1068 | spin_unlock_irqrestore(&xhci->lock, flags); | 1071 | spin_unlock_irqrestore(&xhci->lock, flags); |
1069 | 1072 | ||
1070 | /* XXX: how much time for xHC slot assignment? */ | 1073 | /* XXX: how much time for xHC slot assignment? */ |
@@ -1086,8 +1089,8 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
1086 | if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) { | 1089 | if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) { |
1087 | /* Disable slot, if we can do it without mem alloc */ | 1090 | /* Disable slot, if we can do it without mem alloc */ |
1088 | xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); | 1091 | xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); |
1089 | if (!queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) | 1092 | if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) |
1090 | ring_cmd_db(xhci); | 1093 | xhci_ring_cmd_db(xhci); |
1091 | spin_unlock_irqrestore(&xhci->lock, flags); | 1094 | spin_unlock_irqrestore(&xhci->lock, flags); |
1092 | return 0; | 1095 | return 0; |
1093 | } | 1096 | } |
@@ -1129,13 +1132,14 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
1129 | xhci_setup_addressable_virt_dev(xhci, udev); | 1132 | xhci_setup_addressable_virt_dev(xhci, udev); |
1130 | /* Otherwise, assume the core has the device configured how it wants */ | 1133 | /* Otherwise, assume the core has the device configured how it wants */ |
1131 | 1134 | ||
1132 | ret = queue_address_device(xhci, virt_dev->in_ctx_dma, udev->slot_id); | 1135 | ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma, |
1136 | udev->slot_id); | ||
1133 | if (ret) { | 1137 | if (ret) { |
1134 | spin_unlock_irqrestore(&xhci->lock, flags); | 1138 | spin_unlock_irqrestore(&xhci->lock, flags); |
1135 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | 1139 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
1136 | return ret; | 1140 | return ret; |
1137 | } | 1141 | } |
1138 | ring_cmd_db(xhci); | 1142 | xhci_ring_cmd_db(xhci); |
1139 | spin_unlock_irqrestore(&xhci->lock, flags); | 1143 | spin_unlock_irqrestore(&xhci->lock, flags); |
1140 | 1144 | ||
1141 | /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ | 1145 | /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ |