diff options
Diffstat (limited to 'drivers/usb/host/uhci-q.c')
-rw-r--r-- | drivers/usb/host/uhci-q.c | 168 |
1 files changed, 128 insertions, 40 deletions
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index 12af6fb05a30..2be84b3b40fe 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c | |||
@@ -37,6 +37,46 @@ static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) | |||
37 | uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC); | 37 | uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC); |
38 | } | 38 | } |
39 | 39 | ||
40 | |||
41 | /* | ||
42 | * Full-Speed Bandwidth Reclamation (FSBR). | ||
43 | * We turn on FSBR whenever a queue that wants it is advancing, | ||
44 | * and leave it on for a short time thereafter. | ||
45 | */ | ||
46 | static void uhci_fsbr_on(struct uhci_hcd *uhci) | ||
47 | { | ||
48 | uhci->fsbr_is_on = 1; | ||
49 | uhci->skel_term_qh->link = cpu_to_le32( | ||
50 | uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH; | ||
51 | } | ||
52 | |||
53 | static void uhci_fsbr_off(struct uhci_hcd *uhci) | ||
54 | { | ||
55 | uhci->fsbr_is_on = 0; | ||
56 | uhci->skel_term_qh->link = UHCI_PTR_TERM; | ||
57 | } | ||
58 | |||
59 | static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb) | ||
60 | { | ||
61 | struct urb_priv *urbp = urb->hcpriv; | ||
62 | |||
63 | if (!(urb->transfer_flags & URB_NO_FSBR)) | ||
64 | urbp->fsbr = 1; | ||
65 | } | ||
66 | |||
67 | static void uhci_qh_wants_fsbr(struct uhci_hcd *uhci, struct uhci_qh *qh) | ||
68 | { | ||
69 | struct urb_priv *urbp = | ||
70 | list_entry(qh->queue.next, struct urb_priv, node); | ||
71 | |||
72 | if (urbp->fsbr) { | ||
73 | uhci->fsbr_jiffies = jiffies; | ||
74 | if (!uhci->fsbr_is_on) | ||
75 | uhci_fsbr_on(uhci); | ||
76 | } | ||
77 | } | ||
78 | |||
79 | |||
40 | static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci) | 80 | static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci) |
41 | { | 81 | { |
42 | dma_addr_t dma_handle; | 82 | dma_addr_t dma_handle; |
@@ -331,6 +371,10 @@ static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | |||
331 | qh->element = cpu_to_le32(td->dma_handle); | 371 | qh->element = cpu_to_le32(td->dma_handle); |
332 | } | 372 | } |
333 | 373 | ||
374 | /* Treat the queue as if it has just advanced */ | ||
375 | qh->wait_expired = 0; | ||
376 | qh->advance_jiffies = jiffies; | ||
377 | |||
334 | if (qh->state == QH_STATE_ACTIVE) | 378 | if (qh->state == QH_STATE_ACTIVE) |
335 | return; | 379 | return; |
336 | qh->state = QH_STATE_ACTIVE; | 380 | qh->state = QH_STATE_ACTIVE; |
@@ -445,28 +489,6 @@ static void uhci_free_urb_priv(struct uhci_hcd *uhci, | |||
445 | kmem_cache_free(uhci_up_cachep, urbp); | 489 | kmem_cache_free(uhci_up_cachep, urbp); |
446 | } | 490 | } |
447 | 491 | ||
448 | static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb) | ||
449 | { | ||
450 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; | ||
451 | |||
452 | if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) { | ||
453 | urbp->fsbr = 1; | ||
454 | if (!uhci->fsbr++ && !uhci->fsbrtimeout) | ||
455 | uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH; | ||
456 | } | ||
457 | } | ||
458 | |||
459 | static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb) | ||
460 | { | ||
461 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; | ||
462 | |||
463 | if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) { | ||
464 | urbp->fsbr = 0; | ||
465 | if (!--uhci->fsbr) | ||
466 | uhci->fsbrtimeout = jiffies + FSBR_DELAY; | ||
467 | } | ||
468 | } | ||
469 | |||
470 | /* | 492 | /* |
471 | * Map status to standard result codes | 493 | * Map status to standard result codes |
472 | * | 494 | * |
@@ -613,7 +635,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
613 | qh->skel = uhci->skel_ls_control_qh; | 635 | qh->skel = uhci->skel_ls_control_qh; |
614 | else { | 636 | else { |
615 | qh->skel = uhci->skel_fs_control_qh; | 637 | qh->skel = uhci->skel_fs_control_qh; |
616 | uhci_inc_fsbr(uhci, urb); | 638 | uhci_add_fsbr(uhci, urb); |
617 | } | 639 | } |
618 | 640 | ||
619 | urb->actual_length = -8; /* Account for the SETUP packet */ | 641 | urb->actual_length = -8; /* Account for the SETUP packet */ |
@@ -756,7 +778,7 @@ static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, | |||
756 | qh->skel = uhci->skel_bulk_qh; | 778 | qh->skel = uhci->skel_bulk_qh; |
757 | ret = uhci_submit_common(uhci, urb, qh); | 779 | ret = uhci_submit_common(uhci, urb, qh); |
758 | if (ret == 0) | 780 | if (ret == 0) |
759 | uhci_inc_fsbr(uhci, urb); | 781 | uhci_add_fsbr(uhci, urb); |
760 | return ret; | 782 | return ret; |
761 | } | 783 | } |
762 | 784 | ||
@@ -1075,8 +1097,10 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd, | |||
1075 | * the QH is new and idle or else it's unlinked and waiting to | 1097 | * the QH is new and idle or else it's unlinked and waiting to |
1076 | * become idle, so we can activate it right away. But only if the | 1098 | * become idle, so we can activate it right away. But only if the |
1077 | * queue isn't stopped. */ | 1099 | * queue isn't stopped. */ |
1078 | if (qh->queue.next == &urbp->node && !qh->is_stopped) | 1100 | if (qh->queue.next == &urbp->node && !qh->is_stopped) { |
1079 | uhci_activate_qh(uhci, qh); | 1101 | uhci_activate_qh(uhci, qh); |
1102 | uhci_qh_wants_fsbr(uhci, qh); | ||
1103 | } | ||
1080 | goto done; | 1104 | goto done; |
1081 | 1105 | ||
1082 | err_submit_failed: | 1106 | err_submit_failed: |
@@ -1135,7 +1159,6 @@ __acquires(uhci->lock) | |||
1135 | qh->needs_fixup = 0; | 1159 | qh->needs_fixup = 0; |
1136 | } | 1160 | } |
1137 | 1161 | ||
1138 | uhci_dec_fsbr(uhci, urb); /* Safe since it checks */ | ||
1139 | uhci_free_urb_priv(uhci, urbp); | 1162 | uhci_free_urb_priv(uhci, urbp); |
1140 | 1163 | ||
1141 | switch (qh->type) { | 1164 | switch (qh->type) { |
@@ -1239,6 +1262,18 @@ restart: | |||
1239 | if (!list_empty(&qh->queue)) { | 1262 | if (!list_empty(&qh->queue)) { |
1240 | if (qh->needs_fixup) | 1263 | if (qh->needs_fixup) |
1241 | uhci_fixup_toggles(qh, 0); | 1264 | uhci_fixup_toggles(qh, 0); |
1265 | |||
1266 | /* If the first URB on the queue wants FSBR but its time | ||
1267 | * limit has expired, set the next TD to interrupt on | ||
1268 | * completion before reactivating the QH. */ | ||
1269 | urbp = list_entry(qh->queue.next, struct urb_priv, node); | ||
1270 | if (urbp->fsbr && qh->wait_expired) { | ||
1271 | struct uhci_td *td = list_entry(urbp->td_list.next, | ||
1272 | struct uhci_td, list); | ||
1273 | |||
1274 | td->status |= __cpu_to_le32(TD_CTRL_IOC); | ||
1275 | } | ||
1276 | |||
1242 | uhci_activate_qh(uhci, qh); | 1277 | uhci_activate_qh(uhci, qh); |
1243 | } | 1278 | } |
1244 | 1279 | ||
@@ -1249,6 +1284,62 @@ restart: | |||
1249 | } | 1284 | } |
1250 | 1285 | ||
1251 | /* | 1286 | /* |
1287 | * Check for queues that have made some forward progress. | ||
1288 | * Returns 0 if the queue is not Isochronous, is ACTIVE, and | ||
1289 | * has not advanced since last examined; 1 otherwise. | ||
1290 | */ | ||
1291 | static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh) | ||
1292 | { | ||
1293 | struct urb_priv *urbp = NULL; | ||
1294 | struct uhci_td *td; | ||
1295 | int ret = 1; | ||
1296 | unsigned status; | ||
1297 | |||
1298 | if (qh->type == USB_ENDPOINT_XFER_ISOC) | ||
1299 | return ret; | ||
1300 | |||
1301 | /* Treat an UNLINKING queue as though it hasn't advanced. | ||
1302 | * This is okay because reactivation will treat it as though | ||
1303 | * it has advanced, and if it is going to become IDLE then | ||
1304 | * this doesn't matter anyway. Furthermore it's possible | ||
1305 | * for an UNLINKING queue not to have any URBs at all, or | ||
1306 | * for its first URB not to have any TDs (if it was dequeued | ||
1307 | * just as it completed). So it's not easy in any case to | ||
1308 | * test whether such queues have advanced. */ | ||
1309 | if (qh->state != QH_STATE_ACTIVE) { | ||
1310 | urbp = NULL; | ||
1311 | status = 0; | ||
1312 | |||
1313 | } else { | ||
1314 | urbp = list_entry(qh->queue.next, struct urb_priv, node); | ||
1315 | td = list_entry(urbp->td_list.next, struct uhci_td, list); | ||
1316 | status = td_status(td); | ||
1317 | if (!(status & TD_CTRL_ACTIVE)) { | ||
1318 | |||
1319 | /* We're okay, the queue has advanced */ | ||
1320 | qh->wait_expired = 0; | ||
1321 | qh->advance_jiffies = jiffies; | ||
1322 | return ret; | ||
1323 | } | ||
1324 | ret = 0; | ||
1325 | } | ||
1326 | |||
1327 | /* The queue hasn't advanced; check for timeout */ | ||
1328 | if (!qh->wait_expired && time_after(jiffies, | ||
1329 | qh->advance_jiffies + QH_WAIT_TIMEOUT)) { | ||
1330 | qh->wait_expired = 1; | ||
1331 | |||
1332 | /* If the current URB wants FSBR, unlink it temporarily | ||
1333 | * so that we can safely set the next TD to interrupt on | ||
1334 | * completion. That way we'll know as soon as the queue | ||
1335 | * starts moving again. */ | ||
1336 | if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC)) | ||
1337 | uhci_unlink_qh(uhci, qh); | ||
1338 | } | ||
1339 | return ret; | ||
1340 | } | ||
1341 | |||
1342 | /* | ||
1252 | * Process events in the schedule, but only in one thread at a time | 1343 | * Process events in the schedule, but only in one thread at a time |
1253 | */ | 1344 | */ |
1254 | static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs) | 1345 | static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs) |
@@ -1262,7 +1353,7 @@ static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs) | |||
1262 | return; | 1353 | return; |
1263 | } | 1354 | } |
1264 | uhci->scan_in_progress = 1; | 1355 | uhci->scan_in_progress = 1; |
1265 | rescan: | 1356 | rescan: |
1266 | uhci->need_rescan = 0; | 1357 | uhci->need_rescan = 0; |
1267 | 1358 | ||
1268 | uhci_clear_next_interrupt(uhci); | 1359 | uhci_clear_next_interrupt(uhci); |
@@ -1275,7 +1366,12 @@ static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs) | |||
1275 | while ((qh = uhci->next_qh) != uhci->skelqh[i]) { | 1366 | while ((qh = uhci->next_qh) != uhci->skelqh[i]) { |
1276 | uhci->next_qh = list_entry(qh->node.next, | 1367 | uhci->next_qh = list_entry(qh->node.next, |
1277 | struct uhci_qh, node); | 1368 | struct uhci_qh, node); |
1278 | uhci_scan_qh(uhci, qh, regs); | 1369 | |
1370 | if (uhci_advance_check(uhci, qh)) { | ||
1371 | uhci_scan_qh(uhci, qh, regs); | ||
1372 | if (qh->state == QH_STATE_ACTIVE) | ||
1373 | uhci_qh_wants_fsbr(uhci, qh); | ||
1374 | } | ||
1279 | } | 1375 | } |
1280 | } | 1376 | } |
1281 | 1377 | ||
@@ -1283,20 +1379,12 @@ static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs) | |||
1283 | goto rescan; | 1379 | goto rescan; |
1284 | uhci->scan_in_progress = 0; | 1380 | uhci->scan_in_progress = 0; |
1285 | 1381 | ||
1382 | if (uhci->fsbr_is_on && time_after(jiffies, | ||
1383 | uhci->fsbr_jiffies + FSBR_OFF_DELAY)) | ||
1384 | uhci_fsbr_off(uhci); | ||
1385 | |||
1286 | if (list_empty(&uhci->skel_unlink_qh->node)) | 1386 | if (list_empty(&uhci->skel_unlink_qh->node)) |
1287 | uhci_clear_next_interrupt(uhci); | 1387 | uhci_clear_next_interrupt(uhci); |
1288 | else | 1388 | else |
1289 | uhci_set_next_interrupt(uhci); | 1389 | uhci_set_next_interrupt(uhci); |
1290 | } | 1390 | } |
1291 | |||
1292 | static void check_fsbr(struct uhci_hcd *uhci) | ||
1293 | { | ||
1294 | /* For now, don't scan URBs for FSBR timeouts. | ||
1295 | * Add it back in later... */ | ||
1296 | |||
1297 | /* Really disable FSBR */ | ||
1298 | if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) { | ||
1299 | uhci->fsbrtimeout = 0; | ||
1300 | uhci->skel_term_qh->link = UHCI_PTR_TERM; | ||
1301 | } | ||
1302 | } | ||