diff options
author | Alan Stern <stern@rowland.harvard.edu> | 2007-01-16 11:56:32 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2007-02-07 18:44:37 -0500 |
commit | 3ca2a3211ee5078d49b04fe7149ff2a76473be51 (patch) | |
tree | 53f6df56c8d24c7750fced8aca7678867bdfc63d /drivers/usb/host/uhci-q.c | |
parent | 6a6c957eba20814456bc4bffbd4ec42406f9eb02 (diff) |
UHCI: fix bandwidth allocation
This patch (as840) fixes the bandwidth allocation mechanism in
uhci-hcd. It has never worked correctly.
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/uhci-q.c')
-rw-r--r-- | drivers/usb/host/uhci-q.c | 241 |
1 files changed, 175 insertions, 66 deletions
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index 5afcc5227284..2cbb239e63f8 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c | |||
@@ -261,6 +261,14 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, | |||
261 | qh->udev = udev; | 261 | qh->udev = udev; |
262 | hep->hcpriv = qh; | 262 | hep->hcpriv = qh; |
263 | 263 | ||
264 | if (qh->type == USB_ENDPOINT_XFER_INT || | ||
265 | qh->type == USB_ENDPOINT_XFER_ISOC) | ||
266 | qh->load = usb_calc_bus_time(udev->speed, | ||
267 | usb_endpoint_dir_in(&hep->desc), | ||
268 | qh->type == USB_ENDPOINT_XFER_ISOC, | ||
269 | le16_to_cpu(hep->desc.wMaxPacketSize)) | ||
270 | / 1000 + 1; | ||
271 | |||
264 | } else { /* Skeleton QH */ | 272 | } else { /* Skeleton QH */ |
265 | qh->state = QH_STATE_ACTIVE; | 273 | qh->state = QH_STATE_ACTIVE; |
266 | qh->type = -1; | 274 | qh->type = -1; |
@@ -496,6 +504,121 @@ static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh) | |||
496 | wake_up_all(&uhci->waitqh); | 504 | wake_up_all(&uhci->waitqh); |
497 | } | 505 | } |
498 | 506 | ||
507 | /* | ||
508 | * Find the highest existing bandwidth load for a given phase and period. | ||
509 | */ | ||
510 | static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period) | ||
511 | { | ||
512 | int highest_load = uhci->load[phase]; | ||
513 | |||
514 | for (phase += period; phase < MAX_PHASE; phase += period) | ||
515 | highest_load = max_t(int, highest_load, uhci->load[phase]); | ||
516 | return highest_load; | ||
517 | } | ||
518 | |||
519 | /* | ||
520 | * Set qh->phase to the optimal phase for a periodic transfer and | ||
521 | * check whether the bandwidth requirement is acceptable. | ||
522 | */ | ||
523 | static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) | ||
524 | { | ||
525 | int minimax_load; | ||
526 | |||
527 | /* Find the optimal phase (unless it is already set) and get | ||
528 | * its load value. */ | ||
529 | if (qh->phase >= 0) | ||
530 | minimax_load = uhci_highest_load(uhci, qh->phase, qh->period); | ||
531 | else { | ||
532 | int phase, load; | ||
533 | int max_phase = min_t(int, MAX_PHASE, qh->period); | ||
534 | |||
535 | qh->phase = 0; | ||
536 | minimax_load = uhci_highest_load(uhci, qh->phase, qh->period); | ||
537 | for (phase = 1; phase < max_phase; ++phase) { | ||
538 | load = uhci_highest_load(uhci, phase, qh->period); | ||
539 | if (load < minimax_load) { | ||
540 | minimax_load = load; | ||
541 | qh->phase = phase; | ||
542 | } | ||
543 | } | ||
544 | } | ||
545 | |||
546 | /* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */ | ||
547 | if (minimax_load + qh->load > 900) { | ||
548 | dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: " | ||
549 | "period %d, phase %d, %d + %d us\n", | ||
550 | qh->period, qh->phase, minimax_load, qh->load); | ||
551 | return -ENOSPC; | ||
552 | } | ||
553 | return 0; | ||
554 | } | ||
555 | |||
556 | /* | ||
557 | * Reserve a periodic QH's bandwidth in the schedule | ||
558 | */ | ||
559 | static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) | ||
560 | { | ||
561 | int i; | ||
562 | int load = qh->load; | ||
563 | char *p = "??"; | ||
564 | |||
565 | for (i = qh->phase; i < MAX_PHASE; i += qh->period) { | ||
566 | uhci->load[i] += load; | ||
567 | uhci->total_load += load; | ||
568 | } | ||
569 | uhci_to_hcd(uhci)->self.bandwidth_allocated = | ||
570 | uhci->total_load / MAX_PHASE; | ||
571 | switch (qh->type) { | ||
572 | case USB_ENDPOINT_XFER_INT: | ||
573 | ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs; | ||
574 | p = "INT"; | ||
575 | break; | ||
576 | case USB_ENDPOINT_XFER_ISOC: | ||
577 | ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs; | ||
578 | p = "ISO"; | ||
579 | break; | ||
580 | } | ||
581 | qh->bandwidth_reserved = 1; | ||
582 | dev_dbg(uhci_dev(uhci), | ||
583 | "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n", | ||
584 | "reserve", qh->udev->devnum, | ||
585 | qh->hep->desc.bEndpointAddress, p, | ||
586 | qh->period, qh->phase, load); | ||
587 | } | ||
588 | |||
589 | /* | ||
590 | * Release a periodic QH's bandwidth reservation | ||
591 | */ | ||
592 | static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) | ||
593 | { | ||
594 | int i; | ||
595 | int load = qh->load; | ||
596 | char *p = "??"; | ||
597 | |||
598 | for (i = qh->phase; i < MAX_PHASE; i += qh->period) { | ||
599 | uhci->load[i] -= load; | ||
600 | uhci->total_load -= load; | ||
601 | } | ||
602 | uhci_to_hcd(uhci)->self.bandwidth_allocated = | ||
603 | uhci->total_load / MAX_PHASE; | ||
604 | switch (qh->type) { | ||
605 | case USB_ENDPOINT_XFER_INT: | ||
606 | --uhci_to_hcd(uhci)->self.bandwidth_int_reqs; | ||
607 | p = "INT"; | ||
608 | break; | ||
609 | case USB_ENDPOINT_XFER_ISOC: | ||
610 | --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs; | ||
611 | p = "ISO"; | ||
612 | break; | ||
613 | } | ||
614 | qh->bandwidth_reserved = 0; | ||
615 | dev_dbg(uhci_dev(uhci), | ||
616 | "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n", | ||
617 | "release", qh->udev->devnum, | ||
618 | qh->hep->desc.bEndpointAddress, p, | ||
619 | qh->period, qh->phase, load); | ||
620 | } | ||
621 | |||
499 | static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, | 622 | static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, |
500 | struct urb *urb) | 623 | struct urb *urb) |
501 | { | 624 | { |
@@ -799,7 +922,6 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, | |||
799 | wmb(); | 922 | wmb(); |
800 | qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE); | 923 | qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE); |
801 | qh->dummy_td = td; | 924 | qh->dummy_td = td; |
802 | qh->period = urb->interval; | ||
803 | 925 | ||
804 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), | 926 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
805 | usb_pipeout(urb->pipe), toggle); | 927 | usb_pipeout(urb->pipe), toggle); |
@@ -830,28 +952,42 @@ static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, | |||
830 | static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, | 952 | static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, |
831 | struct uhci_qh *qh) | 953 | struct uhci_qh *qh) |
832 | { | 954 | { |
833 | int exponent; | 955 | int ret; |
834 | 956 | ||
835 | /* USB 1.1 interrupt transfers only involve one packet per interval. | 957 | /* USB 1.1 interrupt transfers only involve one packet per interval. |
836 | * Drivers can submit URBs of any length, but longer ones will need | 958 | * Drivers can submit URBs of any length, but longer ones will need |
837 | * multiple intervals to complete. | 959 | * multiple intervals to complete. |
838 | */ | 960 | */ |
839 | 961 | ||
840 | /* Figure out which power-of-two queue to use */ | 962 | if (!qh->bandwidth_reserved) { |
841 | for (exponent = 7; exponent >= 0; --exponent) { | 963 | int exponent; |
842 | if ((1 << exponent) <= urb->interval) | ||
843 | break; | ||
844 | } | ||
845 | if (exponent < 0) | ||
846 | return -EINVAL; | ||
847 | urb->interval = 1 << exponent; | ||
848 | 964 | ||
849 | if (qh->period == 0) | 965 | /* Figure out which power-of-two queue to use */ |
966 | for (exponent = 7; exponent >= 0; --exponent) { | ||
967 | if ((1 << exponent) <= urb->interval) | ||
968 | break; | ||
969 | } | ||
970 | if (exponent < 0) | ||
971 | return -EINVAL; | ||
972 | qh->period = 1 << exponent; | ||
850 | qh->skel = uhci->skelqh[UHCI_SKEL_INDEX(exponent)]; | 973 | qh->skel = uhci->skelqh[UHCI_SKEL_INDEX(exponent)]; |
851 | else if (qh->period != urb->interval) | ||
852 | return -EINVAL; /* Can't change the period */ | ||
853 | 974 | ||
854 | return uhci_submit_common(uhci, urb, qh); | 975 | /* For now, interrupt phase is fixed by the layout |
976 | * of the QH lists. */ | ||
977 | qh->phase = (qh->period / 2) & (MAX_PHASE - 1); | ||
978 | ret = uhci_check_bandwidth(uhci, qh); | ||
979 | if (ret) | ||
980 | return ret; | ||
981 | } else if (qh->period > urb->interval) | ||
982 | return -EINVAL; /* Can't decrease the period */ | ||
983 | |||
984 | ret = uhci_submit_common(uhci, urb, qh); | ||
985 | if (ret == 0) { | ||
986 | urb->interval = qh->period; | ||
987 | if (!qh->bandwidth_reserved) | ||
988 | uhci_reserve_bandwidth(uhci, qh); | ||
989 | } | ||
990 | return ret; | ||
855 | } | 991 | } |
856 | 992 | ||
857 | /* | 993 | /* |
@@ -998,15 +1134,32 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, | |||
998 | return -EFBIG; | 1134 | return -EFBIG; |
999 | 1135 | ||
1000 | /* Check the period and figure out the starting frame number */ | 1136 | /* Check the period and figure out the starting frame number */ |
1001 | if (qh->period == 0) { | 1137 | if (!qh->bandwidth_reserved) { |
1138 | qh->period = urb->interval; | ||
1002 | if (urb->transfer_flags & URB_ISO_ASAP) { | 1139 | if (urb->transfer_flags & URB_ISO_ASAP) { |
1140 | qh->phase = -1; /* Find the best phase */ | ||
1141 | i = uhci_check_bandwidth(uhci, qh); | ||
1142 | if (i) | ||
1143 | return i; | ||
1144 | |||
1145 | /* Allow a little time to allocate the TDs */ | ||
1003 | uhci_get_current_frame_number(uhci); | 1146 | uhci_get_current_frame_number(uhci); |
1004 | urb->start_frame = uhci->frame_number + 10; | 1147 | frame = uhci->frame_number + 10; |
1148 | |||
1149 | /* Move forward to the first frame having the | ||
1150 | * correct phase */ | ||
1151 | urb->start_frame = frame + ((qh->phase - frame) & | ||
1152 | (qh->period - 1)); | ||
1005 | } else { | 1153 | } else { |
1006 | i = urb->start_frame - uhci->last_iso_frame; | 1154 | i = urb->start_frame - uhci->last_iso_frame; |
1007 | if (i <= 0 || i >= UHCI_NUMFRAMES) | 1155 | if (i <= 0 || i >= UHCI_NUMFRAMES) |
1008 | return -EINVAL; | 1156 | return -EINVAL; |
1157 | qh->phase = urb->start_frame & (qh->period - 1); | ||
1158 | i = uhci_check_bandwidth(uhci, qh); | ||
1159 | if (i) | ||
1160 | return i; | ||
1009 | } | 1161 | } |
1162 | |||
1010 | } else if (qh->period != urb->interval) { | 1163 | } else if (qh->period != urb->interval) { |
1011 | return -EINVAL; /* Can't change the period */ | 1164 | return -EINVAL; /* Can't change the period */ |
1012 | 1165 | ||
@@ -1052,9 +1205,6 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, | |||
1052 | /* Set the interrupt-on-completion flag on the last packet. */ | 1205 | /* Set the interrupt-on-completion flag on the last packet. */ |
1053 | td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); | 1206 | td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); |
1054 | 1207 | ||
1055 | qh->skel = uhci->skel_iso_qh; | ||
1056 | qh->period = urb->interval; | ||
1057 | |||
1058 | /* Add the TDs to the frame list */ | 1208 | /* Add the TDs to the frame list */ |
1059 | frame = urb->start_frame; | 1209 | frame = urb->start_frame; |
1060 | list_for_each_entry(td, &urbp->td_list, list) { | 1210 | list_for_each_entry(td, &urbp->td_list, list) { |
@@ -1068,6 +1218,9 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, | |||
1068 | qh->iso_status = 0; | 1218 | qh->iso_status = 0; |
1069 | } | 1219 | } |
1070 | 1220 | ||
1221 | qh->skel = uhci->skel_iso_qh; | ||
1222 | if (!qh->bandwidth_reserved) | ||
1223 | uhci_reserve_bandwidth(uhci, qh); | ||
1071 | return 0; | 1224 | return 0; |
1072 | } | 1225 | } |
1073 | 1226 | ||
@@ -1122,7 +1275,6 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd, | |||
1122 | unsigned long flags; | 1275 | unsigned long flags; |
1123 | struct urb_priv *urbp; | 1276 | struct urb_priv *urbp; |
1124 | struct uhci_qh *qh; | 1277 | struct uhci_qh *qh; |
1125 | int bustime; | ||
1126 | 1278 | ||
1127 | spin_lock_irqsave(&uhci->lock, flags); | 1279 | spin_lock_irqsave(&uhci->lock, flags); |
1128 | 1280 | ||
@@ -1152,35 +1304,11 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd, | |||
1152 | ret = uhci_submit_bulk(uhci, urb, qh); | 1304 | ret = uhci_submit_bulk(uhci, urb, qh); |
1153 | break; | 1305 | break; |
1154 | case USB_ENDPOINT_XFER_INT: | 1306 | case USB_ENDPOINT_XFER_INT: |
1155 | if (list_empty(&qh->queue)) { | 1307 | ret = uhci_submit_interrupt(uhci, urb, qh); |
1156 | bustime = usb_check_bandwidth(urb->dev, urb); | ||
1157 | if (bustime < 0) | ||
1158 | ret = bustime; | ||
1159 | else { | ||
1160 | ret = uhci_submit_interrupt(uhci, urb, qh); | ||
1161 | if (ret == 0) | ||
1162 | usb_claim_bandwidth(urb->dev, urb, bustime, 0); | ||
1163 | } | ||
1164 | } else { /* inherit from parent */ | ||
1165 | struct urb_priv *eurbp; | ||
1166 | |||
1167 | eurbp = list_entry(qh->queue.prev, struct urb_priv, | ||
1168 | node); | ||
1169 | urb->bandwidth = eurbp->urb->bandwidth; | ||
1170 | ret = uhci_submit_interrupt(uhci, urb, qh); | ||
1171 | } | ||
1172 | break; | 1308 | break; |
1173 | case USB_ENDPOINT_XFER_ISOC: | 1309 | case USB_ENDPOINT_XFER_ISOC: |
1174 | urb->error_count = 0; | 1310 | urb->error_count = 0; |
1175 | bustime = usb_check_bandwidth(urb->dev, urb); | ||
1176 | if (bustime < 0) { | ||
1177 | ret = bustime; | ||
1178 | break; | ||
1179 | } | ||
1180 | |||
1181 | ret = uhci_submit_isochronous(uhci, urb, qh); | 1311 | ret = uhci_submit_isochronous(uhci, urb, qh); |
1182 | if (ret == 0) | ||
1183 | usb_claim_bandwidth(urb->dev, urb, bustime, 1); | ||
1184 | break; | 1312 | break; |
1185 | } | 1313 | } |
1186 | if (ret != 0) | 1314 | if (ret != 0) |
@@ -1277,24 +1405,6 @@ __acquires(uhci->lock) | |||
1277 | 1405 | ||
1278 | uhci_free_urb_priv(uhci, urbp); | 1406 | uhci_free_urb_priv(uhci, urbp); |
1279 | 1407 | ||
1280 | switch (qh->type) { | ||
1281 | case USB_ENDPOINT_XFER_ISOC: | ||
1282 | /* Release bandwidth for Interrupt or Isoc. transfers */ | ||
1283 | if (urb->bandwidth) | ||
1284 | usb_release_bandwidth(urb->dev, urb, 1); | ||
1285 | break; | ||
1286 | case USB_ENDPOINT_XFER_INT: | ||
1287 | /* Release bandwidth for Interrupt or Isoc. transfers */ | ||
1288 | /* Make sure we don't release if we have a queued URB */ | ||
1289 | if (list_empty(&qh->queue) && urb->bandwidth) | ||
1290 | usb_release_bandwidth(urb->dev, urb, 0); | ||
1291 | else | ||
1292 | /* bandwidth was passed on to queued URB, */ | ||
1293 | /* so don't let usb_unlink_urb() release it */ | ||
1294 | urb->bandwidth = 0; | ||
1295 | break; | ||
1296 | } | ||
1297 | |||
1298 | spin_unlock(&uhci->lock); | 1408 | spin_unlock(&uhci->lock); |
1299 | usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb); | 1409 | usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb); |
1300 | spin_lock(&uhci->lock); | 1410 | spin_lock(&uhci->lock); |
@@ -1303,9 +1413,8 @@ __acquires(uhci->lock) | |||
1303 | * reserved bandwidth. */ | 1413 | * reserved bandwidth. */ |
1304 | if (list_empty(&qh->queue)) { | 1414 | if (list_empty(&qh->queue)) { |
1305 | uhci_unlink_qh(uhci, qh); | 1415 | uhci_unlink_qh(uhci, qh); |
1306 | 1416 | if (qh->bandwidth_reserved) | |
1307 | /* Bandwidth stuff not yet implemented */ | 1417 | uhci_release_bandwidth(uhci, qh); |
1308 | qh->period = 0; | ||
1309 | } | 1418 | } |
1310 | } | 1419 | } |
1311 | 1420 | ||