aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/ehci-q.c
diff options
context:
space:
mode:
authorAlan Stern <stern@rowland.harvard.edu>2013-03-22 13:31:58 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-03-25 16:36:32 -0400
commit214ac7a0771d95d2f66d01bca5afeb2c9e8ac3c8 (patch)
tree11355668006ea8d2c2ebbb25770d11a8d397cb5b /drivers/usb/host/ehci-q.c
parent6e018751a35f6ef7ad04eb8006b5886b6a7c47f5 (diff)
USB: EHCI: improve end_unlink_async()
This patch (as1665) changes the way ehci-hcd's end_unlink_async() routine works in order to avoid recursive execution and to be more efficient: Now when an IAA cycle ends, a new one gets started up right away (if it is needed) instead of waiting until the just-unlinked QH has been processed. The async_iaa list is renamed to async_idle, which better expresses its new purpose: It is now the list of QHs which are now completely idle and are waiting to be processed by end_unlink_async(). A new flag is added to track whether an IAA cycle is in progress, because the list formerly known as async_iaa no longer stores the QHs waiting for the IAA to finish. The decision about how many QHs to process when an IAA cycle ends is now made at the end of the cycle, when we know the current state of the hardware, rather than at the beginning. This means a bunch of logic got moved from start_iaa_cycle() to end_unlink_async(). Signed-off-by: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/usb/host/ehci-q.c')
-rw-r--r--drivers/usb/host/ehci-q.c97
1 files changed, 55 insertions, 42 deletions
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 4a01367bb2a0..820583bfb5ee 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -960,7 +960,7 @@ static void disable_async(struct ehci_hcd *ehci)
960 960
961 /* The async schedule and unlink lists are supposed to be empty */ 961 /* The async schedule and unlink lists are supposed to be empty */
962 WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) || 962 WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) ||
963 !list_empty(&ehci->async_iaa)); 963 !list_empty(&ehci->async_idle));
964 964
965 /* Don't turn off the schedule until ASS is 1 */ 965 /* Don't turn off the schedule until ASS is 1 */
966 ehci_poll_ASS(ehci); 966 ehci_poll_ASS(ehci);
@@ -1164,41 +1164,19 @@ static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
1164 ehci->qh_scan_next = qh->qh_next.qh; 1164 ehci->qh_scan_next = qh->qh_next.qh;
1165} 1165}
1166 1166
1167static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested) 1167static void start_iaa_cycle(struct ehci_hcd *ehci)
1168{ 1168{
1169 /* 1169 /* Do nothing if an IAA cycle is already running */
1170 * Do nothing if an IAA cycle is already running or 1170 if (ehci->iaa_in_progress)
1171 * if one will be started shortly.
1172 */
1173 if (!list_empty(&ehci->async_iaa) || ehci->async_unlinking)
1174 return; 1171 return;
1172 ehci->iaa_in_progress = true;
1175 1173
1176 /* If the controller isn't running, we don't have to wait for it */ 1174 /* If the controller isn't running, we don't have to wait for it */
1177 if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) { 1175 if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
1178 1176 end_unlink_async(ehci);
1179 /* Do all the waiting QHs */
1180 list_splice_tail_init(&ehci->async_unlink, &ehci->async_iaa);
1181
1182 if (!nested) /* Avoid recursion */
1183 end_unlink_async(ehci);
1184 1177
1185 /* Otherwise start a new IAA cycle */ 1178 /* Otherwise start a new IAA cycle */
1186 } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) { 1179 } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) {
1187 struct ehci_qh *qh;
1188
1189 /* Do only the first waiting QH (nVidia bug?) */
1190 qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
1191 unlink_node);
1192
1193 /*
1194 * Intel (?) bug: The HC can write back the overlay region
1195 * even after the IAA interrupt occurs. In self-defense,
1196 * always go through two IAA cycles for each QH.
1197 */
1198 if (qh->qh_state == QH_STATE_UNLINK_WAIT)
1199 qh->qh_state = QH_STATE_UNLINK;
1200 else
1201 list_move_tail(&qh->unlink_node, &ehci->async_iaa);
1202 1180
1203 /* Make sure the unlinks are all visible to the hardware */ 1181 /* Make sure the unlinks are all visible to the hardware */
1204 wmb(); 1182 wmb();
@@ -1215,16 +1193,59 @@ static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested)
1215static void end_unlink_async(struct ehci_hcd *ehci) 1193static void end_unlink_async(struct ehci_hcd *ehci)
1216{ 1194{
1217 struct ehci_qh *qh; 1195 struct ehci_qh *qh;
1196 bool early_exit;
1218 1197
1219 if (ehci->has_synopsys_hc_bug) 1198 if (ehci->has_synopsys_hc_bug)
1220 ehci_writel(ehci, (u32) ehci->async->qh_dma, 1199 ehci_writel(ehci, (u32) ehci->async->qh_dma,
1221 &ehci->regs->async_next); 1200 &ehci->regs->async_next);
1222 1201
1202 /* The current IAA cycle has ended */
1203 ehci->iaa_in_progress = false;
1204
1205 if (list_empty(&ehci->async_unlink))
1206 return;
1207 qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
1208 unlink_node); /* QH whose IAA cycle just ended */
1209
1210 /*
1211 * If async_unlinking is set then this routine is already running,
1212 * either on the stack or on another CPU.
1213 */
1214 early_exit = ehci->async_unlinking;
1215
1216 /* If the controller isn't running, process all the waiting QHs */
1217 if (ehci->rh_state < EHCI_RH_RUNNING)
1218 list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle);
1219
1220 /*
1221 * Intel (?) bug: The HC can write back the overlay region even
1222 * after the IAA interrupt occurs. In self-defense, always go
1223 * through two IAA cycles for each QH.
1224 */
1225 else if (qh->qh_state == QH_STATE_UNLINK_WAIT) {
1226 qh->qh_state = QH_STATE_UNLINK;
1227 early_exit = true;
1228 }
1229
1230 /* Otherwise process only the first waiting QH (NVIDIA bug?) */
1231 else
1232 list_move_tail(&qh->unlink_node, &ehci->async_idle);
1233
1234 /* Start a new IAA cycle if any QHs are waiting for it */
1235 if (!list_empty(&ehci->async_unlink))
1236 start_iaa_cycle(ehci);
1237
1238 /*
1239 * Don't allow nesting or concurrent calls,
1240 * or wait for the second IAA cycle for the next QH.
1241 */
1242 if (early_exit)
1243 return;
1244
1223 /* Process the idle QHs */ 1245 /* Process the idle QHs */
1224 restart:
1225 ehci->async_unlinking = true; 1246 ehci->async_unlinking = true;
1226 while (!list_empty(&ehci->async_iaa)) { 1247 while (!list_empty(&ehci->async_idle)) {
1227 qh = list_first_entry(&ehci->async_iaa, struct ehci_qh, 1248 qh = list_first_entry(&ehci->async_idle, struct ehci_qh,
1228 unlink_node); 1249 unlink_node);
1229 list_del(&qh->unlink_node); 1250 list_del(&qh->unlink_node);
1230 1251
@@ -1239,13 +1260,6 @@ static void end_unlink_async(struct ehci_hcd *ehci)
1239 disable_async(ehci); 1260 disable_async(ehci);
1240 } 1261 }
1241 ehci->async_unlinking = false; 1262 ehci->async_unlinking = false;
1242
1243 /* Start a new IAA cycle if any QHs are waiting for it */
1244 if (!list_empty(&ehci->async_unlink)) {
1245 start_iaa_cycle(ehci, true);
1246 if (unlikely(ehci->rh_state < EHCI_RH_RUNNING))
1247 goto restart;
1248 }
1249} 1263}
1250 1264
1251static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh); 1265static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
@@ -1270,8 +1284,7 @@ static void unlink_empty_async(struct ehci_hcd *ehci)
1270 } 1284 }
1271 1285
1272 /* If nothing else is being unlinked, unlink the last empty QH */ 1286 /* If nothing else is being unlinked, unlink the last empty QH */
1273 if (list_empty(&ehci->async_iaa) && list_empty(&ehci->async_unlink) && 1287 if (list_empty(&ehci->async_unlink) && qh_to_unlink) {
1274 qh_to_unlink) {
1275 start_unlink_async(ehci, qh_to_unlink); 1288 start_unlink_async(ehci, qh_to_unlink);
1276 --count; 1289 --count;
1277 } 1290 }
@@ -1293,7 +1306,7 @@ static void unlink_empty_async_suspended(struct ehci_hcd *ehci)
1293 WARN_ON(!list_empty(&qh->qtd_list)); 1306 WARN_ON(!list_empty(&qh->qtd_list));
1294 single_unlink_async(ehci, qh); 1307 single_unlink_async(ehci, qh);
1295 } 1308 }
1296 start_iaa_cycle(ehci, false); 1309 start_iaa_cycle(ehci);
1297} 1310}
1298 1311
1299/* makes sure the async qh will become idle */ 1312/* makes sure the async qh will become idle */
@@ -1306,7 +1319,7 @@ static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
1306 return; 1319 return;
1307 1320
1308 single_unlink_async(ehci, qh); 1321 single_unlink_async(ehci, qh);
1309 start_iaa_cycle(ehci, false); 1322 start_iaa_cycle(ehci);
1310} 1323}
1311 1324
1312/*-------------------------------------------------------------------------*/ 1325/*-------------------------------------------------------------------------*/