diff options
Diffstat (limited to 'drivers/usb/host/ehci-timer.c')
| -rw-r--r-- | drivers/usb/host/ehci-timer.c | 401 |
1 files changed, 401 insertions, 0 deletions
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c new file mode 100644 index 000000000000..eb896a2c8f2e --- /dev/null +++ b/drivers/usb/host/ehci-timer.c | |||
| @@ -0,0 +1,401 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2012 by Alan Stern | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License as published by the | ||
| 6 | * Free Software Foundation; either version 2 of the License, or (at your | ||
| 7 | * option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but | ||
| 10 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
| 11 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
| 12 | * for more details. | ||
| 13 | */ | ||
| 14 | |||
| 15 | /* This file is part of ehci-hcd.c */ | ||
| 16 | |||
| 17 | /*-------------------------------------------------------------------------*/ | ||
| 18 | |||
| 19 | /* Set a bit in the USBCMD register */ | ||
| 20 | static void ehci_set_command_bit(struct ehci_hcd *ehci, u32 bit) | ||
| 21 | { | ||
| 22 | ehci->command |= bit; | ||
| 23 | ehci_writel(ehci, ehci->command, &ehci->regs->command); | ||
| 24 | |||
| 25 | /* unblock posted write */ | ||
| 26 | ehci_readl(ehci, &ehci->regs->command); | ||
| 27 | } | ||
| 28 | |||
| 29 | /* Clear a bit in the USBCMD register */ | ||
| 30 | static void ehci_clear_command_bit(struct ehci_hcd *ehci, u32 bit) | ||
| 31 | { | ||
| 32 | ehci->command &= ~bit; | ||
| 33 | ehci_writel(ehci, ehci->command, &ehci->regs->command); | ||
| 34 | |||
| 35 | /* unblock posted write */ | ||
| 36 | ehci_readl(ehci, &ehci->regs->command); | ||
| 37 | } | ||
| 38 | |||
| 39 | /*-------------------------------------------------------------------------*/ | ||
| 40 | |||
| 41 | /* | ||
| 42 | * EHCI timer support... Now using hrtimers. | ||
| 43 | * | ||
| 44 | * Lots of different events are triggered from ehci->hrtimer. Whenever | ||
| 45 | * the timer routine runs, it checks each possible event; events that are | ||
| 46 | * currently enabled and whose expiration time has passed get handled. | ||
| 47 | * The set of enabled events is stored as a collection of bitflags in | ||
| 48 | * ehci->enabled_hrtimer_events, and they are numbered in order of | ||
| 49 | * increasing delay values (ranging between 1 ms and 100 ms). | ||
| 50 | * | ||
| 51 | * Rather than implementing a sorted list or tree of all pending events, | ||
| 52 | * we keep track only of the lowest-numbered pending event, in | ||
| 53 | * ehci->next_hrtimer_event. Whenever ehci->hrtimer gets restarted, its | ||
| 54 | * expiration time is set to the timeout value for this event. | ||
| 55 | * | ||
| 56 | * As a result, events might not get handled right away; the actual delay | ||
| 57 | * could be anywhere up to twice the requested delay. This doesn't | ||
| 58 | * matter, because none of the events are especially time-critical. The | ||
| 59 | * ones that matter most all have a delay of 1 ms, so they will be | ||
| 60 | * handled after 2 ms at most, which is okay. In addition to this, we | ||
| 61 | * allow for an expiration range of 1 ms. | ||
| 62 | */ | ||
| 63 | |||
| 64 | /* | ||
| 65 | * Delay lengths for the hrtimer event types. | ||
| 66 | * Keep this list sorted by delay length, in the same order as | ||
| 67 | * the event types indexed by enum ehci_hrtimer_event in ehci.h. | ||
| 68 | */ | ||
| 69 | static unsigned event_delays_ns[] = { | ||
| 70 | 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_ASS */ | ||
| 71 | 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_PSS */ | ||
| 72 | 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_DEAD */ | ||
| 73 | 1125 * NSEC_PER_USEC, /* EHCI_HRTIMER_UNLINK_INTR */ | ||
| 74 | 2 * NSEC_PER_MSEC, /* EHCI_HRTIMER_FREE_ITDS */ | ||
| 75 | 6 * NSEC_PER_MSEC, /* EHCI_HRTIMER_ASYNC_UNLINKS */ | ||
| 76 | 10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IAA_WATCHDOG */ | ||
| 77 | 10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_PERIODIC */ | ||
| 78 | 15 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_ASYNC */ | ||
| 79 | 100 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IO_WATCHDOG */ | ||
| 80 | }; | ||
| 81 | |||
| 82 | /* Enable a pending hrtimer event */ | ||
| 83 | static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event, | ||
| 84 | bool resched) | ||
| 85 | { | ||
| 86 | ktime_t *timeout = &ehci->hr_timeouts[event]; | ||
| 87 | |||
| 88 | if (resched) | ||
| 89 | *timeout = ktime_add(ktime_get(), | ||
| 90 | ktime_set(0, event_delays_ns[event])); | ||
| 91 | ehci->enabled_hrtimer_events |= (1 << event); | ||
| 92 | |||
| 93 | /* Track only the lowest-numbered pending event */ | ||
| 94 | if (event < ehci->next_hrtimer_event) { | ||
| 95 | ehci->next_hrtimer_event = event; | ||
| 96 | hrtimer_start_range_ns(&ehci->hrtimer, *timeout, | ||
| 97 | NSEC_PER_MSEC, HRTIMER_MODE_ABS); | ||
| 98 | } | ||
| 99 | } | ||
| 100 | |||
| 101 | |||
| 102 | /* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */ | ||
| 103 | static void ehci_poll_ASS(struct ehci_hcd *ehci) | ||
| 104 | { | ||
| 105 | unsigned actual, want; | ||
| 106 | |||
| 107 | /* Don't enable anything if the controller isn't running (e.g., died) */ | ||
| 108 | if (ehci->rh_state != EHCI_RH_RUNNING) | ||
| 109 | return; | ||
| 110 | |||
| 111 | want = (ehci->command & CMD_ASE) ? STS_ASS : 0; | ||
| 112 | actual = ehci_readl(ehci, &ehci->regs->status) & STS_ASS; | ||
| 113 | |||
| 114 | if (want != actual) { | ||
| 115 | |||
| 116 | /* Poll again later, but give up after about 20 ms */ | ||
| 117 | if (ehci->ASS_poll_count++ < 20) { | ||
| 118 | ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); | ||
| 119 | return; | ||
| 120 | } | ||
| 121 | ehci_warn(ehci, "Waited too long for the async schedule status, giving up\n"); | ||
| 122 | } | ||
| 123 | ehci->ASS_poll_count = 0; | ||
| 124 | |||
| 125 | /* The status is up-to-date; restart or stop the schedule as needed */ | ||
| 126 | if (want == 0) { /* Stopped */ | ||
| 127 | if (ehci->async_count > 0) | ||
| 128 | ehci_set_command_bit(ehci, CMD_ASE); | ||
| 129 | |||
| 130 | } else { /* Running */ | ||
| 131 | if (ehci->async_count == 0) { | ||
| 132 | |||
| 133 | /* Turn off the schedule after a while */ | ||
| 134 | ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_ASYNC, | ||
| 135 | true); | ||
| 136 | } | ||
| 137 | } | ||
| 138 | } | ||
| 139 | |||
| 140 | /* Turn off the async schedule after a brief delay */ | ||
| 141 | static void ehci_disable_ASE(struct ehci_hcd *ehci) | ||
| 142 | { | ||
| 143 | ehci_clear_command_bit(ehci, CMD_ASE); | ||
| 144 | } | ||
| 145 | |||
| 146 | |||
| 147 | /* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */ | ||
| 148 | static void ehci_poll_PSS(struct ehci_hcd *ehci) | ||
| 149 | { | ||
| 150 | unsigned actual, want; | ||
| 151 | |||
| 152 | /* Don't do anything if the controller isn't running (e.g., died) */ | ||
| 153 | if (ehci->rh_state != EHCI_RH_RUNNING) | ||
| 154 | return; | ||
| 155 | |||
| 156 | want = (ehci->command & CMD_PSE) ? STS_PSS : 0; | ||
| 157 | actual = ehci_readl(ehci, &ehci->regs->status) & STS_PSS; | ||
| 158 | |||
| 159 | if (want != actual) { | ||
| 160 | |||
| 161 | /* Poll again later, but give up after about 20 ms */ | ||
| 162 | if (ehci->PSS_poll_count++ < 20) { | ||
| 163 | ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); | ||
| 164 | return; | ||
| 165 | } | ||
| 166 | ehci_warn(ehci, "Waited too long for the periodic schedule status, giving up\n"); | ||
| 167 | } | ||
| 168 | ehci->PSS_poll_count = 0; | ||
| 169 | |||
| 170 | /* The status is up-to-date; restart or stop the schedule as needed */ | ||
| 171 | if (want == 0) { /* Stopped */ | ||
| 172 | if (ehci->periodic_count > 0) | ||
| 173 | ehci_set_command_bit(ehci, CMD_PSE); | ||
| 174 | |||
| 175 | } else { /* Running */ | ||
| 176 | if (ehci->periodic_count == 0) { | ||
| 177 | |||
| 178 | /* Turn off the schedule after a while */ | ||
| 179 | ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_PERIODIC, | ||
| 180 | true); | ||
| 181 | } | ||
| 182 | } | ||
| 183 | } | ||
| 184 | |||
| 185 | /* Turn off the periodic schedule after a brief delay */ | ||
| 186 | static void ehci_disable_PSE(struct ehci_hcd *ehci) | ||
| 187 | { | ||
| 188 | ehci_clear_command_bit(ehci, CMD_PSE); | ||
| 189 | } | ||
| 190 | |||
| 191 | |||
| 192 | /* Poll the STS_HALT status bit; see when a dead controller stops */ | ||
| 193 | static void ehci_handle_controller_death(struct ehci_hcd *ehci) | ||
| 194 | { | ||
| 195 | if (!(ehci_readl(ehci, &ehci->regs->status) & STS_HALT)) { | ||
| 196 | |||
| 197 | /* Give up after a few milliseconds */ | ||
| 198 | if (ehci->died_poll_count++ < 5) { | ||
| 199 | /* Try again later */ | ||
| 200 | ehci_enable_event(ehci, EHCI_HRTIMER_POLL_DEAD, true); | ||
| 201 | return; | ||
| 202 | } | ||
| 203 | ehci_warn(ehci, "Waited too long for the controller to stop, giving up\n"); | ||
| 204 | } | ||
| 205 | |||
| 206 | /* Clean up the mess */ | ||
| 207 | ehci->rh_state = EHCI_RH_HALTED; | ||
| 208 | ehci_writel(ehci, 0, &ehci->regs->configured_flag); | ||
| 209 | ehci_writel(ehci, 0, &ehci->regs->intr_enable); | ||
| 210 | ehci_work(ehci); | ||
| 211 | end_unlink_async(ehci); | ||
| 212 | |||
| 213 | /* Not in process context, so don't try to reset the controller */ | ||
| 214 | } | ||
| 215 | |||
| 216 | |||
| 217 | /* Handle unlinked interrupt QHs once they are gone from the hardware */ | ||
| 218 | static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci) | ||
| 219 | { | ||
| 220 | bool stopped = (ehci->rh_state < EHCI_RH_RUNNING); | ||
| 221 | |||
| 222 | /* | ||
| 223 | * Process all the QHs on the intr_unlink list that were added | ||
| 224 | * before the current unlink cycle began. The list is in | ||
| 225 | * temporal order, so stop when we reach the first entry in the | ||
| 226 | * current cycle. But if the root hub isn't running then | ||
| 227 | * process all the QHs on the list. | ||
| 228 | */ | ||
| 229 | ehci->intr_unlinking = true; | ||
| 230 | while (ehci->intr_unlink) { | ||
| 231 | struct ehci_qh *qh = ehci->intr_unlink; | ||
| 232 | |||
| 233 | if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle) | ||
| 234 | break; | ||
| 235 | ehci->intr_unlink = qh->unlink_next; | ||
| 236 | qh->unlink_next = NULL; | ||
| 237 | end_unlink_intr(ehci, qh); | ||
| 238 | } | ||
| 239 | |||
| 240 | /* Handle remaining entries later */ | ||
| 241 | if (ehci->intr_unlink) { | ||
| 242 | ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true); | ||
| 243 | ++ehci->intr_unlink_cycle; | ||
| 244 | } | ||
| 245 | ehci->intr_unlinking = false; | ||
| 246 | } | ||
| 247 | |||
| 248 | |||
| 249 | /* Start another free-iTDs/siTDs cycle */ | ||
| 250 | static void start_free_itds(struct ehci_hcd *ehci) | ||
| 251 | { | ||
| 252 | if (!(ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_FREE_ITDS))) { | ||
| 253 | ehci->last_itd_to_free = list_entry( | ||
| 254 | ehci->cached_itd_list.prev, | ||
| 255 | struct ehci_itd, itd_list); | ||
| 256 | ehci->last_sitd_to_free = list_entry( | ||
| 257 | ehci->cached_sitd_list.prev, | ||
| 258 | struct ehci_sitd, sitd_list); | ||
| 259 | ehci_enable_event(ehci, EHCI_HRTIMER_FREE_ITDS, true); | ||
| 260 | } | ||
| 261 | } | ||
| 262 | |||
| 263 | /* Wait for controller to stop using old iTDs and siTDs */ | ||
| 264 | static void end_free_itds(struct ehci_hcd *ehci) | ||
| 265 | { | ||
| 266 | struct ehci_itd *itd, *n; | ||
| 267 | struct ehci_sitd *sitd, *sn; | ||
| 268 | |||
| 269 | if (ehci->rh_state < EHCI_RH_RUNNING) { | ||
| 270 | ehci->last_itd_to_free = NULL; | ||
| 271 | ehci->last_sitd_to_free = NULL; | ||
| 272 | } | ||
| 273 | |||
| 274 | list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) { | ||
| 275 | list_del(&itd->itd_list); | ||
| 276 | dma_pool_free(ehci->itd_pool, itd, itd->itd_dma); | ||
| 277 | if (itd == ehci->last_itd_to_free) | ||
| 278 | break; | ||
| 279 | } | ||
| 280 | list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) { | ||
| 281 | list_del(&sitd->sitd_list); | ||
| 282 | dma_pool_free(ehci->sitd_pool, sitd, sitd->sitd_dma); | ||
| 283 | if (sitd == ehci->last_sitd_to_free) | ||
| 284 | break; | ||
| 285 | } | ||
| 286 | |||
| 287 | if (!list_empty(&ehci->cached_itd_list) || | ||
| 288 | !list_empty(&ehci->cached_sitd_list)) | ||
| 289 | start_free_itds(ehci); | ||
| 290 | } | ||
| 291 | |||
| 292 | |||
| 293 | /* Handle lost (or very late) IAA interrupts */ | ||
| 294 | static void ehci_iaa_watchdog(struct ehci_hcd *ehci) | ||
| 295 | { | ||
| 296 | if (ehci->rh_state != EHCI_RH_RUNNING) | ||
| 297 | return; | ||
| 298 | |||
| 299 | /* | ||
| 300 | * Lost IAA irqs wedge things badly; seen first with a vt8235. | ||
| 301 | * So we need this watchdog, but must protect it against both | ||
| 302 | * (a) SMP races against real IAA firing and retriggering, and | ||
| 303 | * (b) clean HC shutdown, when IAA watchdog was pending. | ||
| 304 | */ | ||
| 305 | if (ehci->async_iaa) { | ||
| 306 | u32 cmd, status; | ||
| 307 | |||
| 308 | /* If we get here, IAA is *REALLY* late. It's barely | ||
| 309 | * conceivable that the system is so busy that CMD_IAAD | ||
| 310 | * is still legitimately set, so let's be sure it's | ||
| 311 | * clear before we read STS_IAA. (The HC should clear | ||
| 312 | * CMD_IAAD when it sets STS_IAA.) | ||
| 313 | */ | ||
| 314 | cmd = ehci_readl(ehci, &ehci->regs->command); | ||
| 315 | |||
| 316 | /* | ||
| 317 | * If IAA is set here it either legitimately triggered | ||
| 318 | * after the watchdog timer expired (_way_ late, so we'll | ||
| 319 | * still count it as lost) ... or a silicon erratum: | ||
| 320 | * - VIA seems to set IAA without triggering the IRQ; | ||
| 321 | * - IAAD potentially cleared without setting IAA. | ||
| 322 | */ | ||
| 323 | status = ehci_readl(ehci, &ehci->regs->status); | ||
| 324 | if ((status & STS_IAA) || !(cmd & CMD_IAAD)) { | ||
| 325 | COUNT(ehci->stats.lost_iaa); | ||
| 326 | ehci_writel(ehci, STS_IAA, &ehci->regs->status); | ||
| 327 | } | ||
| 328 | |||
| 329 | ehci_vdbg(ehci, "IAA watchdog: status %x cmd %x\n", | ||
| 330 | status, cmd); | ||
| 331 | end_unlink_async(ehci); | ||
| 332 | } | ||
| 333 | } | ||
| 334 | |||
| 335 | |||
| 336 | /* Enable the I/O watchdog, if appropriate */ | ||
| 337 | static void turn_on_io_watchdog(struct ehci_hcd *ehci) | ||
| 338 | { | ||
| 339 | /* Not needed if the controller isn't running or it's already enabled */ | ||
| 340 | if (ehci->rh_state != EHCI_RH_RUNNING || | ||
| 341 | (ehci->enabled_hrtimer_events & | ||
| 342 | BIT(EHCI_HRTIMER_IO_WATCHDOG))) | ||
| 343 | return; | ||
| 344 | |||
| 345 | /* | ||
| 346 | * Isochronous transfers always need the watchdog. | ||
| 347 | * For other sorts we use it only if the flag is set. | ||
| 348 | */ | ||
| 349 | if (ehci->isoc_count > 0 || (ehci->need_io_watchdog && | ||
| 350 | ehci->async_count + ehci->intr_count > 0)) | ||
| 351 | ehci_enable_event(ehci, EHCI_HRTIMER_IO_WATCHDOG, true); | ||
| 352 | } | ||
| 353 | |||
| 354 | |||
| 355 | /* | ||
| 356 | * Handler functions for the hrtimer event types. | ||
| 357 | * Keep this array in the same order as the event types indexed by | ||
| 358 | * enum ehci_hrtimer_event in ehci.h. | ||
| 359 | */ | ||
| 360 | static void (*event_handlers[])(struct ehci_hcd *) = { | ||
| 361 | ehci_poll_ASS, /* EHCI_HRTIMER_POLL_ASS */ | ||
| 362 | ehci_poll_PSS, /* EHCI_HRTIMER_POLL_PSS */ | ||
| 363 | ehci_handle_controller_death, /* EHCI_HRTIMER_POLL_DEAD */ | ||
| 364 | ehci_handle_intr_unlinks, /* EHCI_HRTIMER_UNLINK_INTR */ | ||
| 365 | end_free_itds, /* EHCI_HRTIMER_FREE_ITDS */ | ||
| 366 | unlink_empty_async, /* EHCI_HRTIMER_ASYNC_UNLINKS */ | ||
| 367 | ehci_iaa_watchdog, /* EHCI_HRTIMER_IAA_WATCHDOG */ | ||
| 368 | ehci_disable_PSE, /* EHCI_HRTIMER_DISABLE_PERIODIC */ | ||
| 369 | ehci_disable_ASE, /* EHCI_HRTIMER_DISABLE_ASYNC */ | ||
| 370 | ehci_work, /* EHCI_HRTIMER_IO_WATCHDOG */ | ||
| 371 | }; | ||
| 372 | |||
| 373 | static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t) | ||
| 374 | { | ||
| 375 | struct ehci_hcd *ehci = container_of(t, struct ehci_hcd, hrtimer); | ||
| 376 | ktime_t now; | ||
| 377 | unsigned long events; | ||
| 378 | unsigned long flags; | ||
| 379 | unsigned e; | ||
| 380 | |||
| 381 | spin_lock_irqsave(&ehci->lock, flags); | ||
| 382 | |||
| 383 | events = ehci->enabled_hrtimer_events; | ||
| 384 | ehci->enabled_hrtimer_events = 0; | ||
| 385 | ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT; | ||
| 386 | |||
| 387 | /* | ||
| 388 | * Check each pending event. If its time has expired, handle | ||
| 389 | * the event; otherwise re-enable it. | ||
| 390 | */ | ||
| 391 | now = ktime_get(); | ||
| 392 | for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) { | ||
| 393 | if (now.tv64 >= ehci->hr_timeouts[e].tv64) | ||
| 394 | event_handlers[e](ehci); | ||
| 395 | else | ||
| 396 | ehci_enable_event(ehci, e, false); | ||
| 397 | } | ||
| 398 | |||
| 399 | spin_unlock_irqrestore(&ehci->lock, flags); | ||
| 400 | return HRTIMER_NORESTART; | ||
| 401 | } | ||
