diff options
author | Sarah Sharp <sarah.a.sharp@linux.intel.com> | 2009-09-04 13:53:13 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-09-23 09:46:39 -0400 |
commit | 913a8a344ffcaf0b4a586d6662a2c66a7106557d (patch) | |
tree | 07a2a10118ab15bd4c597c1b1460c8028a3b1adc /drivers/usb/host/xhci-hcd.c | |
parent | 5270b951b9cd5e50aea55cb52684a171fb10381c (diff) |
USB: xhci: Change how xHCI commands are handled.
Some commands to the xHCI hardware cannot be allowed to fail due to out of
memory issues or the command ring being full.
Add a way to reserve a TRB on the command ring, and make all command
queueing functions indicate whether they are using a reserved TRB.
Add a way to pre-allocate all the memory a command might need. A command
needs an input context, a variable to store the status, and (optionally) a
completion for the caller to wait on. Change all code that assumes the
input device context, status, and completion for a command is stored in
the xhci virtual USB device structure (xhci_virt_device).
Store pending completions in a FIFO in xhci_virt_device. Make the event
handler for a configure endpoint command check to see whether a pending
command in the list has completed. We need to use separate input device
contexts for some configure endpoint commands, since multiple drivers can
submit requests at the same time that require a configure endpoint
command.
Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci-hcd.c')
-rw-r--r-- | drivers/usb/host/xhci-hcd.c | 82 |
1 files changed, 52 insertions, 30 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index 9c985d1245f3..2fcc360f0648 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c | |||
@@ -612,8 +612,8 @@ int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, | |||
612 | } | 612 | } |
613 | 613 | ||
614 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, | 614 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, |
615 | struct usb_device *udev, struct xhci_virt_device *virt_dev, | 615 | struct usb_device *udev, struct xhci_command *command, |
616 | bool ctx_change); | 616 | bool ctx_change, bool must_succeed); |
617 | 617 | ||
618 | /* | 618 | /* |
619 | * Full speed devices may have a max packet size greater than 8 bytes, but the | 619 | * Full speed devices may have a max packet size greater than 8 bytes, but the |
@@ -645,7 +645,8 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | |||
645 | xhci_dbg(xhci, "Issuing evaluate context command.\n"); | 645 | xhci_dbg(xhci, "Issuing evaluate context command.\n"); |
646 | 646 | ||
647 | /* Set up the modified control endpoint 0 */ | 647 | /* Set up the modified control endpoint 0 */ |
648 | xhci_endpoint_copy(xhci, xhci->devs[slot_id], ep_index); | 648 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, |
649 | xhci->devs[slot_id]->out_ctx, ep_index); | ||
649 | in_ctx = xhci->devs[slot_id]->in_ctx; | 650 | in_ctx = xhci->devs[slot_id]->in_ctx; |
650 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); | 651 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); |
651 | ep_ctx->ep_info2 &= ~MAX_PACKET_MASK; | 652 | ep_ctx->ep_info2 &= ~MAX_PACKET_MASK; |
@@ -664,8 +665,8 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | |||
664 | xhci_dbg(xhci, "Slot %d output context\n", slot_id); | 665 | xhci_dbg(xhci, "Slot %d output context\n", slot_id); |
665 | xhci_dbg_ctx(xhci, out_ctx, ep_index); | 666 | xhci_dbg_ctx(xhci, out_ctx, ep_index); |
666 | 667 | ||
667 | ret = xhci_configure_endpoint(xhci, urb->dev, | 668 | ret = xhci_configure_endpoint(xhci, urb->dev, NULL, |
668 | xhci->devs[slot_id], true); | 669 | true, false); |
669 | 670 | ||
670 | /* Clean up the input context for later use by bandwidth | 671 | /* Clean up the input context for later use by bandwidth |
671 | * functions. | 672 | * functions. |
@@ -1038,11 +1039,11 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir | |||
1038 | } | 1039 | } |
1039 | 1040 | ||
1040 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, | 1041 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, |
1041 | struct usb_device *udev, struct xhci_virt_device *virt_dev) | 1042 | struct usb_device *udev, int *cmd_status) |
1042 | { | 1043 | { |
1043 | int ret; | 1044 | int ret; |
1044 | 1045 | ||
1045 | switch (virt_dev->cmd_status) { | 1046 | switch (*cmd_status) { |
1046 | case COMP_ENOMEM: | 1047 | case COMP_ENOMEM: |
1047 | dev_warn(&udev->dev, "Not enough host controller resources " | 1048 | dev_warn(&udev->dev, "Not enough host controller resources " |
1048 | "for new device state.\n"); | 1049 | "for new device state.\n"); |
@@ -1068,7 +1069,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, | |||
1068 | break; | 1069 | break; |
1069 | default: | 1070 | default: |
1070 | xhci_err(xhci, "ERROR: unexpected command completion " | 1071 | xhci_err(xhci, "ERROR: unexpected command completion " |
1071 | "code 0x%x.\n", virt_dev->cmd_status); | 1072 | "code 0x%x.\n", *cmd_status); |
1072 | ret = -EINVAL; | 1073 | ret = -EINVAL; |
1073 | break; | 1074 | break; |
1074 | } | 1075 | } |
@@ -1076,11 +1077,12 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, | |||
1076 | } | 1077 | } |
1077 | 1078 | ||
1078 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, | 1079 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, |
1079 | struct usb_device *udev, struct xhci_virt_device *virt_dev) | 1080 | struct usb_device *udev, int *cmd_status) |
1080 | { | 1081 | { |
1081 | int ret; | 1082 | int ret; |
1083 | struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; | ||
1082 | 1084 | ||
1083 | switch (virt_dev->cmd_status) { | 1085 | switch (*cmd_status) { |
1084 | case COMP_EINVAL: | 1086 | case COMP_EINVAL: |
1085 | dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " | 1087 | dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " |
1086 | "context command.\n"); | 1088 | "context command.\n"); |
@@ -1101,7 +1103,7 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci, | |||
1101 | break; | 1103 | break; |
1102 | default: | 1104 | default: |
1103 | xhci_err(xhci, "ERROR: unexpected command completion " | 1105 | xhci_err(xhci, "ERROR: unexpected command completion " |
1104 | "code 0x%x.\n", virt_dev->cmd_status); | 1106 | "code 0x%x.\n", *cmd_status); |
1105 | ret = -EINVAL; | 1107 | ret = -EINVAL; |
1106 | break; | 1108 | break; |
1107 | } | 1109 | } |
@@ -1112,19 +1114,37 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci, | |||
1112 | * and wait for it to finish. | 1114 | * and wait for it to finish. |
1113 | */ | 1115 | */ |
1114 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, | 1116 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, |
1115 | struct usb_device *udev, struct xhci_virt_device *virt_dev, | 1117 | struct usb_device *udev, |
1116 | bool ctx_change) | 1118 | struct xhci_command *command, |
1119 | bool ctx_change, bool must_succeed) | ||
1117 | { | 1120 | { |
1118 | int ret; | 1121 | int ret; |
1119 | int timeleft; | 1122 | int timeleft; |
1120 | unsigned long flags; | 1123 | unsigned long flags; |
1124 | struct xhci_container_ctx *in_ctx; | ||
1125 | struct completion *cmd_completion; | ||
1126 | int *cmd_status; | ||
1127 | struct xhci_virt_device *virt_dev; | ||
1121 | 1128 | ||
1122 | spin_lock_irqsave(&xhci->lock, flags); | 1129 | spin_lock_irqsave(&xhci->lock, flags); |
1130 | virt_dev = xhci->devs[udev->slot_id]; | ||
1131 | if (command) { | ||
1132 | in_ctx = command->in_ctx; | ||
1133 | cmd_completion = command->completion; | ||
1134 | cmd_status = &command->status; | ||
1135 | command->command_trb = xhci->cmd_ring->enqueue; | ||
1136 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); | ||
1137 | } else { | ||
1138 | in_ctx = virt_dev->in_ctx; | ||
1139 | cmd_completion = &virt_dev->cmd_completion; | ||
1140 | cmd_status = &virt_dev->cmd_status; | ||
1141 | } | ||
1142 | |||
1123 | if (!ctx_change) | 1143 | if (!ctx_change) |
1124 | ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma, | 1144 | ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, |
1125 | udev->slot_id); | 1145 | udev->slot_id, must_succeed); |
1126 | else | 1146 | else |
1127 | ret = xhci_queue_evaluate_context(xhci, virt_dev->in_ctx->dma, | 1147 | ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, |
1128 | udev->slot_id); | 1148 | udev->slot_id); |
1129 | if (ret < 0) { | 1149 | if (ret < 0) { |
1130 | spin_unlock_irqrestore(&xhci->lock, flags); | 1150 | spin_unlock_irqrestore(&xhci->lock, flags); |
@@ -1136,7 +1156,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
1136 | 1156 | ||
1137 | /* Wait for the configure endpoint command to complete */ | 1157 | /* Wait for the configure endpoint command to complete */ |
1138 | timeleft = wait_for_completion_interruptible_timeout( | 1158 | timeleft = wait_for_completion_interruptible_timeout( |
1139 | &virt_dev->cmd_completion, | 1159 | cmd_completion, |
1140 | USB_CTRL_SET_TIMEOUT); | 1160 | USB_CTRL_SET_TIMEOUT); |
1141 | if (timeleft <= 0) { | 1161 | if (timeleft <= 0) { |
1142 | xhci_warn(xhci, "%s while waiting for %s command\n", | 1162 | xhci_warn(xhci, "%s while waiting for %s command\n", |
@@ -1149,8 +1169,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
1149 | } | 1169 | } |
1150 | 1170 | ||
1151 | if (!ctx_change) | 1171 | if (!ctx_change) |
1152 | return xhci_configure_endpoint_result(xhci, udev, virt_dev); | 1172 | return xhci_configure_endpoint_result(xhci, udev, cmd_status); |
1153 | return xhci_evaluate_context_result(xhci, udev, virt_dev); | 1173 | return xhci_evaluate_context_result(xhci, udev, cmd_status); |
1154 | } | 1174 | } |
1155 | 1175 | ||
1156 | /* Called after one or more calls to xhci_add_endpoint() or | 1176 | /* Called after one or more calls to xhci_add_endpoint() or |
@@ -1196,7 +1216,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
1196 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, | 1216 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, |
1197 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); | 1217 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); |
1198 | 1218 | ||
1199 | ret = xhci_configure_endpoint(xhci, udev, virt_dev, false); | 1219 | ret = xhci_configure_endpoint(xhci, udev, NULL, |
1220 | false, false); | ||
1200 | if (ret) { | 1221 | if (ret) { |
1201 | /* Callee should call reset_bandwidth() */ | 1222 | /* Callee should call reset_bandwidth() */ |
1202 | return ret; | 1223 | return ret; |
@@ -1248,19 +1269,19 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
1248 | } | 1269 | } |
1249 | 1270 | ||
1250 | static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, | 1271 | static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, |
1251 | unsigned int slot_id, u32 add_flags, u32 drop_flags) | 1272 | struct xhci_container_ctx *in_ctx, |
1273 | struct xhci_container_ctx *out_ctx, | ||
1274 | u32 add_flags, u32 drop_flags) | ||
1252 | { | 1275 | { |
1253 | struct xhci_input_control_ctx *ctrl_ctx; | 1276 | struct xhci_input_control_ctx *ctrl_ctx; |
1254 | ctrl_ctx = xhci_get_input_control_ctx(xhci, | 1277 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
1255 | xhci->devs[slot_id]->in_ctx); | ||
1256 | ctrl_ctx->add_flags = add_flags; | 1278 | ctrl_ctx->add_flags = add_flags; |
1257 | ctrl_ctx->drop_flags = drop_flags; | 1279 | ctrl_ctx->drop_flags = drop_flags; |
1258 | xhci_slot_copy(xhci, xhci->devs[slot_id]); | 1280 | xhci_slot_copy(xhci, in_ctx, out_ctx); |
1259 | ctrl_ctx->add_flags |= SLOT_FLAG; | 1281 | ctrl_ctx->add_flags |= SLOT_FLAG; |
1260 | 1282 | ||
1261 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", slot_id); | 1283 | xhci_dbg(xhci, "Input Context:\n"); |
1262 | xhci_dbg_ctx(xhci, xhci->devs[slot_id]->in_ctx, | 1284 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); |
1263 | xhci_last_valid_endpoint(add_flags)); | ||
1264 | } | 1285 | } |
1265 | 1286 | ||
1266 | void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, | 1287 | void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, |
@@ -1272,7 +1293,8 @@ void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, | |||
1272 | u32 added_ctxs; | 1293 | u32 added_ctxs; |
1273 | dma_addr_t addr; | 1294 | dma_addr_t addr; |
1274 | 1295 | ||
1275 | xhci_endpoint_copy(xhci, xhci->devs[slot_id], ep_index); | 1296 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, |
1297 | xhci->devs[slot_id]->out_ctx, ep_index); | ||
1276 | in_ctx = xhci->devs[slot_id]->in_ctx; | 1298 | in_ctx = xhci->devs[slot_id]->in_ctx; |
1277 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); | 1299 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); |
1278 | addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, | 1300 | addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, |
@@ -1288,8 +1310,8 @@ void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, | |||
1288 | ep_ctx->deq = addr | deq_state->new_cycle_state; | 1310 | ep_ctx->deq = addr | deq_state->new_cycle_state; |
1289 | 1311 | ||
1290 | added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); | 1312 | added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); |
1291 | xhci_setup_input_ctx_for_config_ep(xhci, slot_id, | 1313 | xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, |
1292 | added_ctxs, added_ctxs); | 1314 | xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs); |
1293 | } | 1315 | } |
1294 | 1316 | ||
1295 | void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, | 1317 | void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, |