aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/usb/host/ehci-hcd.c4
-rw-r--r--drivers/usb/host/ehci-q.c97
-rw-r--r--drivers/usb/host/ehci-timer.c2
-rw-r--r--drivers/usb/host/ehci.h3
4 files changed, 60 insertions, 46 deletions
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index b32323ca07d3..037a4729d549 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -483,7 +483,7 @@ static int ehci_init(struct usb_hcd *hcd)
483 */ 483 */
484 ehci->periodic_size = DEFAULT_I_TDPS; 484 ehci->periodic_size = DEFAULT_I_TDPS;
485 INIT_LIST_HEAD(&ehci->async_unlink); 485 INIT_LIST_HEAD(&ehci->async_unlink);
486 INIT_LIST_HEAD(&ehci->async_iaa); 486 INIT_LIST_HEAD(&ehci->async_idle);
487 INIT_LIST_HEAD(&ehci->intr_unlink); 487 INIT_LIST_HEAD(&ehci->intr_unlink);
488 INIT_LIST_HEAD(&ehci->intr_qh_list); 488 INIT_LIST_HEAD(&ehci->intr_qh_list);
489 INIT_LIST_HEAD(&ehci->cached_itd_list); 489 INIT_LIST_HEAD(&ehci->cached_itd_list);
@@ -752,7 +752,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
752 /* guard against (alleged) silicon errata */ 752 /* guard against (alleged) silicon errata */
753 if (cmd & CMD_IAAD) 753 if (cmd & CMD_IAAD)
754 ehci_dbg(ehci, "IAA with IAAD still set?\n"); 754 ehci_dbg(ehci, "IAA with IAAD still set?\n");
755 if (!list_empty(&ehci->async_iaa)) 755 if (ehci->iaa_in_progress)
756 COUNT(ehci->stats.iaa); 756 COUNT(ehci->stats.iaa);
757 end_unlink_async(ehci); 757 end_unlink_async(ehci);
758 } 758 }
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 4a01367bb2a0..820583bfb5ee 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -960,7 +960,7 @@ static void disable_async(struct ehci_hcd *ehci)
960 960
961 /* The async schedule and unlink lists are supposed to be empty */ 961 /* The async schedule and unlink lists are supposed to be empty */
962 WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) || 962 WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) ||
963 !list_empty(&ehci->async_iaa)); 963 !list_empty(&ehci->async_idle));
964 964
965 /* Don't turn off the schedule until ASS is 1 */ 965 /* Don't turn off the schedule until ASS is 1 */
966 ehci_poll_ASS(ehci); 966 ehci_poll_ASS(ehci);
@@ -1164,41 +1164,19 @@ static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
1164 ehci->qh_scan_next = qh->qh_next.qh; 1164 ehci->qh_scan_next = qh->qh_next.qh;
1165} 1165}
1166 1166
1167static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested) 1167static void start_iaa_cycle(struct ehci_hcd *ehci)
1168{ 1168{
1169 /* 1169 /* Do nothing if an IAA cycle is already running */
1170 * Do nothing if an IAA cycle is already running or 1170 if (ehci->iaa_in_progress)
1171 * if one will be started shortly.
1172 */
1173 if (!list_empty(&ehci->async_iaa) || ehci->async_unlinking)
1174 return; 1171 return;
1172 ehci->iaa_in_progress = true;
1175 1173
1176 /* If the controller isn't running, we don't have to wait for it */ 1174 /* If the controller isn't running, we don't have to wait for it */
1177 if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) { 1175 if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
1178 1176 end_unlink_async(ehci);
1179 /* Do all the waiting QHs */
1180 list_splice_tail_init(&ehci->async_unlink, &ehci->async_iaa);
1181
1182 if (!nested) /* Avoid recursion */
1183 end_unlink_async(ehci);
1184 1177
1185 /* Otherwise start a new IAA cycle */ 1178 /* Otherwise start a new IAA cycle */
1186 } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) { 1179 } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) {
1187 struct ehci_qh *qh;
1188
1189 /* Do only the first waiting QH (nVidia bug?) */
1190 qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
1191 unlink_node);
1192
1193 /*
1194 * Intel (?) bug: The HC can write back the overlay region
1195 * even after the IAA interrupt occurs. In self-defense,
1196 * always go through two IAA cycles for each QH.
1197 */
1198 if (qh->qh_state == QH_STATE_UNLINK_WAIT)
1199 qh->qh_state = QH_STATE_UNLINK;
1200 else
1201 list_move_tail(&qh->unlink_node, &ehci->async_iaa);
1202 1180
1203 /* Make sure the unlinks are all visible to the hardware */ 1181 /* Make sure the unlinks are all visible to the hardware */
1204 wmb(); 1182 wmb();
@@ -1215,16 +1193,59 @@ static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested)
1215static void end_unlink_async(struct ehci_hcd *ehci) 1193static void end_unlink_async(struct ehci_hcd *ehci)
1216{ 1194{
1217 struct ehci_qh *qh; 1195 struct ehci_qh *qh;
1196 bool early_exit;
1218 1197
1219 if (ehci->has_synopsys_hc_bug) 1198 if (ehci->has_synopsys_hc_bug)
1220 ehci_writel(ehci, (u32) ehci->async->qh_dma, 1199 ehci_writel(ehci, (u32) ehci->async->qh_dma,
1221 &ehci->regs->async_next); 1200 &ehci->regs->async_next);
1222 1201
1202 /* The current IAA cycle has ended */
1203 ehci->iaa_in_progress = false;
1204
1205 if (list_empty(&ehci->async_unlink))
1206 return;
1207 qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
1208 unlink_node); /* QH whose IAA cycle just ended */
1209
1210 /*
1211 * If async_unlinking is set then this routine is already running,
1212 * either on the stack or on another CPU.
1213 */
1214 early_exit = ehci->async_unlinking;
1215
1216 /* If the controller isn't running, process all the waiting QHs */
1217 if (ehci->rh_state < EHCI_RH_RUNNING)
1218 list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle);
1219
1220 /*
1221 * Intel (?) bug: The HC can write back the overlay region even
1222 * after the IAA interrupt occurs. In self-defense, always go
1223 * through two IAA cycles for each QH.
1224 */
1225 else if (qh->qh_state == QH_STATE_UNLINK_WAIT) {
1226 qh->qh_state = QH_STATE_UNLINK;
1227 early_exit = true;
1228 }
1229
1230 /* Otherwise process only the first waiting QH (NVIDIA bug?) */
1231 else
1232 list_move_tail(&qh->unlink_node, &ehci->async_idle);
1233
1234 /* Start a new IAA cycle if any QHs are waiting for it */
1235 if (!list_empty(&ehci->async_unlink))
1236 start_iaa_cycle(ehci);
1237
1238 /*
1239 * Don't allow nesting or concurrent calls,
1240 * or wait for the second IAA cycle for the next QH.
1241 */
1242 if (early_exit)
1243 return;
1244
1223 /* Process the idle QHs */ 1245 /* Process the idle QHs */
1224 restart:
1225 ehci->async_unlinking = true; 1246 ehci->async_unlinking = true;
1226 while (!list_empty(&ehci->async_iaa)) { 1247 while (!list_empty(&ehci->async_idle)) {
1227 qh = list_first_entry(&ehci->async_iaa, struct ehci_qh, 1248 qh = list_first_entry(&ehci->async_idle, struct ehci_qh,
1228 unlink_node); 1249 unlink_node);
1229 list_del(&qh->unlink_node); 1250 list_del(&qh->unlink_node);
1230 1251
@@ -1239,13 +1260,6 @@ static void end_unlink_async(struct ehci_hcd *ehci)
1239 disable_async(ehci); 1260 disable_async(ehci);
1240 } 1261 }
1241 ehci->async_unlinking = false; 1262 ehci->async_unlinking = false;
1242
1243 /* Start a new IAA cycle if any QHs are waiting for it */
1244 if (!list_empty(&ehci->async_unlink)) {
1245 start_iaa_cycle(ehci, true);
1246 if (unlikely(ehci->rh_state < EHCI_RH_RUNNING))
1247 goto restart;
1248 }
1249} 1263}
1250 1264
1251static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh); 1265static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
@@ -1270,8 +1284,7 @@ static void unlink_empty_async(struct ehci_hcd *ehci)
1270 } 1284 }
1271 1285
1272 /* If nothing else is being unlinked, unlink the last empty QH */ 1286 /* If nothing else is being unlinked, unlink the last empty QH */
1273 if (list_empty(&ehci->async_iaa) && list_empty(&ehci->async_unlink) && 1287 if (list_empty(&ehci->async_unlink) && qh_to_unlink) {
1274 qh_to_unlink) {
1275 start_unlink_async(ehci, qh_to_unlink); 1288 start_unlink_async(ehci, qh_to_unlink);
1276 --count; 1289 --count;
1277 } 1290 }
@@ -1293,7 +1306,7 @@ static void unlink_empty_async_suspended(struct ehci_hcd *ehci)
1293 WARN_ON(!list_empty(&qh->qtd_list)); 1306 WARN_ON(!list_empty(&qh->qtd_list));
1294 single_unlink_async(ehci, qh); 1307 single_unlink_async(ehci, qh);
1295 } 1308 }
1296 start_iaa_cycle(ehci, false); 1309 start_iaa_cycle(ehci);
1297} 1310}
1298 1311
1299/* makes sure the async qh will become idle */ 1312/* makes sure the async qh will become idle */
@@ -1306,7 +1319,7 @@ static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
1306 return; 1319 return;
1307 1320
1308 single_unlink_async(ehci, qh); 1321 single_unlink_async(ehci, qh);
1309 start_iaa_cycle(ehci, false); 1322 start_iaa_cycle(ehci);
1310} 1323}
1311 1324
1312/*-------------------------------------------------------------------------*/ 1325/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
index f63a98353efd..11e5b32f73e9 100644
--- a/drivers/usb/host/ehci-timer.c
+++ b/drivers/usb/host/ehci-timer.c
@@ -304,7 +304,7 @@ static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
304 * (a) SMP races against real IAA firing and retriggering, and 304 * (a) SMP races against real IAA firing and retriggering, and
305 * (b) clean HC shutdown, when IAA watchdog was pending. 305 * (b) clean HC shutdown, when IAA watchdog was pending.
306 */ 306 */
307 if (ehci->rh_state != EHCI_RH_RUNNING) 307 if (!ehci->iaa_in_progress || ehci->rh_state != EHCI_RH_RUNNING)
308 return; 308 return;
309 309
310 /* If we get here, IAA is *REALLY* late. It's barely 310 /* If we get here, IAA is *REALLY* late. It's barely
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 13f67041502e..e66699950997 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -121,6 +121,7 @@ struct ehci_hcd { /* one per controller */
121 bool scanning:1; 121 bool scanning:1;
122 bool need_rescan:1; 122 bool need_rescan:1;
123 bool intr_unlinking:1; 123 bool intr_unlinking:1;
124 bool iaa_in_progress:1;
124 bool async_unlinking:1; 125 bool async_unlinking:1;
125 bool shutdown:1; 126 bool shutdown:1;
126 struct ehci_qh *qh_scan_next; 127 struct ehci_qh *qh_scan_next;
@@ -129,7 +130,7 @@ struct ehci_hcd { /* one per controller */
129 struct ehci_qh *async; 130 struct ehci_qh *async;
130 struct ehci_qh *dummy; /* For AMD quirk use */ 131 struct ehci_qh *dummy; /* For AMD quirk use */
131 struct list_head async_unlink; 132 struct list_head async_unlink;
132 struct list_head async_iaa; 133 struct list_head async_idle;
133 unsigned async_unlink_cycle; 134 unsigned async_unlink_cycle;
134 unsigned async_count; /* async activity count */ 135 unsigned async_count; /* async activity count */
135 136