aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-hcd.c
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2009-09-04 13:53:09 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-09-23 09:46:39 -0400
commit63a0d9abd18cdcf5a985029c266c6bfe0511768f (patch)
tree2ae717082d022b2a86a64b86dee48ddfb2be0627 /drivers/usb/host/xhci-hcd.c
parent9e221be815cd263480928248bfd4541497017a1b (diff)
USB: xhci: Endpoint representation refactoring.
The xhci_ring structure contained information that is really related to an endpoint, not a ring. This will cause problems later when endpoint streams are supported and there are multiple rings per endpoint. Move the endpoint state and cancellation information into a new virtual endpoint structure, xhci_virt_ep. The list of TRBs to be cancelled should be per endpoint, not per ring, for easy access. There can be only one TRB that the endpoint stopped on after a stop endpoint command (even with streams enabled); move the stopped TRB information into the new virtual endpoint structure. Also move the 31 endpoint rings and temporary ring storage from the virtual device structure (xhci_virt_device) into the virtual endpoint structure (xhci_virt_ep). Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci-hcd.c')
-rw-r--r--drivers/usb/host/xhci-hcd.c59
1 files changed, 31 insertions, 28 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index e478a63488fb..3ab9090c22dc 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -351,13 +351,14 @@ void xhci_event_ring_work(unsigned long arg)
351 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 351 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
352 xhci_dbg_cmd_ptrs(xhci); 352 xhci_dbg_cmd_ptrs(xhci);
353 for (i = 0; i < MAX_HC_SLOTS; ++i) { 353 for (i = 0; i < MAX_HC_SLOTS; ++i) {
354 if (xhci->devs[i]) { 354 if (!xhci->devs[i])
355 for (j = 0; j < 31; ++j) { 355 continue;
356 if (xhci->devs[i]->ep_rings[j]) { 356 for (j = 0; j < 31; ++j) {
357 xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j); 357 struct xhci_ring *ring = xhci->devs[i]->eps[j].ring;
358 xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg); 358 if (!ring)
359 } 359 continue;
360 } 360 xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
361 xhci_debug_segment(xhci, ring->deq_seg);
361 } 362 }
362 } 363 }
363 364
@@ -778,6 +779,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
778 struct xhci_td *td; 779 struct xhci_td *td;
779 unsigned int ep_index; 780 unsigned int ep_index;
780 struct xhci_ring *ep_ring; 781 struct xhci_ring *ep_ring;
782 struct xhci_virt_ep *ep;
781 783
782 xhci = hcd_to_xhci(hcd); 784 xhci = hcd_to_xhci(hcd);
783 spin_lock_irqsave(&xhci->lock, flags); 785 spin_lock_irqsave(&xhci->lock, flags);
@@ -790,17 +792,18 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
790 xhci_dbg(xhci, "Event ring:\n"); 792 xhci_dbg(xhci, "Event ring:\n");
791 xhci_debug_ring(xhci, xhci->event_ring); 793 xhci_debug_ring(xhci, xhci->event_ring);
792 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 794 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
793 ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index]; 795 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
796 ep_ring = ep->ring;
794 xhci_dbg(xhci, "Endpoint ring:\n"); 797 xhci_dbg(xhci, "Endpoint ring:\n");
795 xhci_debug_ring(xhci, ep_ring); 798 xhci_debug_ring(xhci, ep_ring);
796 td = (struct xhci_td *) urb->hcpriv; 799 td = (struct xhci_td *) urb->hcpriv;
797 800
798 ep_ring->cancels_pending++; 801 ep->cancels_pending++;
799 list_add_tail(&td->cancelled_td_list, &ep_ring->cancelled_td_list); 802 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
800 /* Queue a stop endpoint command, but only if this is 803 /* Queue a stop endpoint command, but only if this is
801 * the first cancellation to be handled. 804 * the first cancellation to be handled.
802 */ 805 */
803 if (ep_ring->cancels_pending == 1) { 806 if (ep->cancels_pending == 1) {
804 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); 807 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
805 xhci_ring_cmd_db(xhci); 808 xhci_ring_cmd_db(xhci);
806 } 809 }
@@ -1206,10 +1209,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1206 xhci_zero_in_ctx(xhci, virt_dev); 1209 xhci_zero_in_ctx(xhci, virt_dev);
1207 /* Free any old rings */ 1210 /* Free any old rings */
1208 for (i = 1; i < 31; ++i) { 1211 for (i = 1; i < 31; ++i) {
1209 if (virt_dev->new_ep_rings[i]) { 1212 if (virt_dev->eps[i].new_ring) {
1210 xhci_ring_free(xhci, virt_dev->ep_rings[i]); 1213 xhci_ring_free(xhci, virt_dev->eps[i].ring);
1211 virt_dev->ep_rings[i] = virt_dev->new_ep_rings[i]; 1214 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
1212 virt_dev->new_ep_rings[i] = NULL; 1215 virt_dev->eps[i].new_ring = NULL;
1213 } 1216 }
1214 } 1217 }
1215 1218
@@ -1236,9 +1239,9 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1236 virt_dev = xhci->devs[udev->slot_id]; 1239 virt_dev = xhci->devs[udev->slot_id];
1237 /* Free any rings allocated for added endpoints */ 1240 /* Free any rings allocated for added endpoints */
1238 for (i = 0; i < 31; ++i) { 1241 for (i = 0; i < 31; ++i) {
1239 if (virt_dev->new_ep_rings[i]) { 1242 if (virt_dev->eps[i].new_ring) {
1240 xhci_ring_free(xhci, virt_dev->new_ep_rings[i]); 1243 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
1241 virt_dev->new_ep_rings[i] = NULL; 1244 virt_dev->eps[i].new_ring = NULL;
1242 } 1245 }
1243 } 1246 }
1244 xhci_zero_in_ctx(xhci, virt_dev); 1247 xhci_zero_in_ctx(xhci, virt_dev);
@@ -1281,17 +1284,18 @@ void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
1281} 1284}
1282 1285
1283void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, 1286void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
1284 struct usb_device *udev, 1287 struct usb_device *udev, unsigned int ep_index)
1285 unsigned int ep_index, struct xhci_ring *ep_ring)
1286{ 1288{
1287 struct xhci_dequeue_state deq_state; 1289 struct xhci_dequeue_state deq_state;
1290 struct xhci_virt_ep *ep;
1288 1291
1289 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); 1292 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
1293 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
1290 /* We need to move the HW's dequeue pointer past this TD, 1294 /* We need to move the HW's dequeue pointer past this TD,
1291 * or it will attempt to resend it on the next doorbell ring. 1295 * or it will attempt to resend it on the next doorbell ring.
1292 */ 1296 */
1293 xhci_find_new_dequeue_state(xhci, udev->slot_id, 1297 xhci_find_new_dequeue_state(xhci, udev->slot_id,
1294 ep_index, ep_ring->stopped_td, 1298 ep_index, ep->stopped_td,
1295 &deq_state); 1299 &deq_state);
1296 1300
1297 /* HW with the reset endpoint quirk will use the saved dequeue state to 1301 /* HW with the reset endpoint quirk will use the saved dequeue state to
@@ -1299,8 +1303,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
1299 */ 1303 */
1300 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { 1304 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
1301 xhci_dbg(xhci, "Queueing new dequeue state\n"); 1305 xhci_dbg(xhci, "Queueing new dequeue state\n");
1302 xhci_queue_new_dequeue_state(xhci, ep_ring, 1306 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
1303 udev->slot_id,
1304 ep_index, &deq_state); 1307 ep_index, &deq_state);
1305 } else { 1308 } else {
1306 /* Better hope no one uses the input context between now and the 1309 /* Better hope no one uses the input context between now and the
@@ -1327,7 +1330,7 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
1327 unsigned int ep_index; 1330 unsigned int ep_index;
1328 unsigned long flags; 1331 unsigned long flags;
1329 int ret; 1332 int ret;
1330 struct xhci_ring *ep_ring; 1333 struct xhci_virt_ep *virt_ep;
1331 1334
1332 xhci = hcd_to_xhci(hcd); 1335 xhci = hcd_to_xhci(hcd);
1333 udev = (struct usb_device *) ep->hcpriv; 1336 udev = (struct usb_device *) ep->hcpriv;
@@ -1337,8 +1340,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
1337 if (!ep->hcpriv) 1340 if (!ep->hcpriv)
1338 return; 1341 return;
1339 ep_index = xhci_get_endpoint_index(&ep->desc); 1342 ep_index = xhci_get_endpoint_index(&ep->desc);
1340 ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index]; 1343 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
1341 if (!ep_ring->stopped_td) { 1344 if (!virt_ep->stopped_td) {
1342 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", 1345 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
1343 ep->desc.bEndpointAddress); 1346 ep->desc.bEndpointAddress);
1344 return; 1347 return;
@@ -1357,8 +1360,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
1357 * command. Better hope that last command worked! 1360 * command. Better hope that last command worked!
1358 */ 1361 */
1359 if (!ret) { 1362 if (!ret) {
1360 xhci_cleanup_stalled_ring(xhci, udev, ep_index, ep_ring); 1363 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
1361 kfree(ep_ring->stopped_td); 1364 kfree(virt_ep->stopped_td);
1362 xhci_ring_cmd_db(xhci); 1365 xhci_ring_cmd_db(xhci);
1363 } 1366 }
1364 spin_unlock_irqrestore(&xhci->lock, flags); 1367 spin_unlock_irqrestore(&xhci->lock, flags);