aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-08-21 02:38:44 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-08-21 02:38:44 -0400
commit8eb891fc809b2300137bcd247025628c06c95a63 (patch)
tree5f7da9517835fef971a2f0c8ed69ca86c3f8099d /drivers
parent848c4dd5153c7a0de55470ce99a8e13a63b4703f (diff)
Revert "USB: EHCI cpufreq fix"
This reverts commit 196705c9bbc03540429b0f7cf9ee35c2f928a534. It was reported to cause a regression by Daniel Exner, and Arjan van de Ven points out that we actually already have infrastructure in place for setting limits on acceptable DMA latency that would be the much more correct fix for the problem with some Broadcom EHCI controllers. Fixed up trivial conflicts due to the changes to support big-endian host controller descriptors in drivers/usb/host/{ehci-sched.c,ehci.h}. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/usb/host/ehci-hcd.c67
-rw-r--r--drivers/usb/host/ehci-mem.c3
-rw-r--r--drivers/usb/host/ehci-q.c4
-rw-r--r--drivers/usb/host/ehci-sched.c127
-rw-r--r--drivers/usb/host/ehci.h14
5 files changed, 0 insertions, 215 deletions
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index c4e15ed1405a..35cdba10411b 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -275,58 +275,6 @@ static void ehci_work(struct ehci_hcd *ehci);
275 275
276/*-------------------------------------------------------------------------*/ 276/*-------------------------------------------------------------------------*/
277 277
278#ifdef CONFIG_CPU_FREQ
279
280#include <linux/cpufreq.h>
281
282static void ehci_cpufreq_pause (struct ehci_hcd *ehci)
283{
284 unsigned long flags;
285
286 spin_lock_irqsave(&ehci->lock, flags);
287 if (!ehci->cpufreq_changing++)
288 qh_inactivate_split_intr_qhs(ehci);
289 spin_unlock_irqrestore(&ehci->lock, flags);
290}
291
292static void ehci_cpufreq_unpause (struct ehci_hcd *ehci)
293{
294 unsigned long flags;
295
296 spin_lock_irqsave(&ehci->lock, flags);
297 if (!--ehci->cpufreq_changing)
298 qh_reactivate_split_intr_qhs(ehci);
299 spin_unlock_irqrestore(&ehci->lock, flags);
300}
301
302/*
303 * ehci_cpufreq_notifier is needed to avoid MMF errors that occur when
304 * EHCI controllers that don't cache many uframes get delayed trying to
305 * read main memory during CPU frequency transitions. This can cause
306 * split interrupt transactions to not be completed in the required uframe.
307 * This has been observed on the Broadcom/ServerWorks HT1000 controller.
308 */
309static int ehci_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
310 void *data)
311{
312 struct ehci_hcd *ehci = container_of(nb, struct ehci_hcd,
313 cpufreq_transition);
314
315 switch (val) {
316 case CPUFREQ_PRECHANGE:
317 ehci_cpufreq_pause(ehci);
318 break;
319 case CPUFREQ_POSTCHANGE:
320 ehci_cpufreq_unpause(ehci);
321 break;
322 }
323 return 0;
324}
325
326#endif
327
328/*-------------------------------------------------------------------------*/
329
330static void ehci_watchdog (unsigned long param) 278static void ehci_watchdog (unsigned long param)
331{ 279{
332 struct ehci_hcd *ehci = (struct ehci_hcd *) param; 280 struct ehci_hcd *ehci = (struct ehci_hcd *) param;
@@ -460,10 +408,6 @@ static void ehci_stop (struct usb_hcd *hcd)
460 ehci_writel(ehci, 0, &ehci->regs->intr_enable); 408 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
461 spin_unlock_irq(&ehci->lock); 409 spin_unlock_irq(&ehci->lock);
462 410
463#ifdef CONFIG_CPU_FREQ
464 cpufreq_unregister_notifier(&ehci->cpufreq_transition,
465 CPUFREQ_TRANSITION_NOTIFIER);
466#endif
467 /* let companion controllers work when we aren't */ 411 /* let companion controllers work when we aren't */
468 ehci_writel(ehci, 0, &ehci->regs->configured_flag); 412 ehci_writel(ehci, 0, &ehci->regs->configured_flag);
469 413
@@ -569,17 +513,6 @@ static int ehci_init(struct usb_hcd *hcd)
569 } 513 }
570 ehci->command = temp; 514 ehci->command = temp;
571 515
572#ifdef CONFIG_CPU_FREQ
573 INIT_LIST_HEAD(&ehci->split_intr_qhs);
574 /*
575 * If the EHCI controller caches enough uframes, this probably
576 * isn't needed unless there are so many low/full speed devices
577 * that the controller's can't cache it all.
578 */
579 ehci->cpufreq_transition.notifier_call = ehci_cpufreq_notifier;
580 cpufreq_register_notifier(&ehci->cpufreq_transition,
581 CPUFREQ_TRANSITION_NOTIFIER);
582#endif
583 return 0; 516 return 0;
584} 517}
585 518
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index 8816d09903d0..0431397836f6 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -94,9 +94,6 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
94 qh->qh_dma = dma; 94 qh->qh_dma = dma;
95 // INIT_LIST_HEAD (&qh->qh_list); 95 // INIT_LIST_HEAD (&qh->qh_list);
96 INIT_LIST_HEAD (&qh->qtd_list); 96 INIT_LIST_HEAD (&qh->qtd_list);
97#ifdef CONFIG_CPU_FREQ
98 INIT_LIST_HEAD (&qh->split_intr_qhs);
99#endif
100 97
101 /* dummy td enables safe urb queuing */ 98 /* dummy td enables safe urb queuing */
102 qh->dummy = ehci_qtd_alloc (ehci, flags); 99 qh->dummy = ehci_qtd_alloc (ehci, flags);
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 2284028f8aa5..140bfa423e07 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -312,10 +312,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
312 struct urb *urb; 312 struct urb *urb;
313 u32 token = 0; 313 u32 token = 0;
314 314
315 /* ignore QHs that are currently inactive */
316 if (qh->hw_info1 & __constant_cpu_to_le32(QH_INACTIVATE))
317 break;
318
319 qtd = list_entry (entry, struct ehci_qtd, qtd_list); 315 qtd = list_entry (entry, struct ehci_qtd, qtd_list);
320 urb = qtd->urb; 316 urb = qtd->urb;
321 317
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index d4a8ace49676..e682f2342ef8 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -479,109 +479,6 @@ static int disable_periodic (struct ehci_hcd *ehci)
479} 479}
480 480
481/*-------------------------------------------------------------------------*/ 481/*-------------------------------------------------------------------------*/
482#ifdef CONFIG_CPU_FREQ
483
484static int safe_to_modify_i (struct ehci_hcd *ehci, struct ehci_qh *qh)
485{
486 int now; /* current (frame * 8) + uframe */
487 int prev_start, next_start; /* uframes from/to split start */
488 int start_uframe = ffs(le32_to_cpup (&qh->hw_info2) & QH_SMASK);
489 int end_uframe = fls((le32_to_cpup (&qh->hw_info2) & QH_CMASK) >> 8);
490 int split_duration = end_uframe - start_uframe;
491
492 now = readl(&ehci->regs->frame_index) % (ehci->periodic_size << 3);
493
494 next_start = ((1024 << 3) + (qh->start << 3) + start_uframe - now)
495 % (qh->period << 3);
496 prev_start = (qh->period << 3) - next_start;
497
498 /*
499 * Make sure there will be at least one uframe when qh is safe.
500 */
501 if ((qh->period << 3) <= (ehci->i_thresh + 2 + split_duration))
502 /* never safe */
503 return -EINVAL;
504
505 /*
506 * Wait 1 uframe after transaction should have started, to make
507 * sure controller has time to write back overlay, so we can
508 * check QTD_STS_STS to see if transaction is in progress.
509 */
510 if ((next_start > ehci->i_thresh) && (prev_start > 1))
511 /* safe to set "i" bit if split isn't in progress */
512 return (qh->hw_token & STATUS_BIT(ehci)) ? 0 : 1;
513 else
514 return 0;
515}
516
517/* Set inactivate bit for all the split interrupt QHs. */
518static void qh_inactivate_split_intr_qhs (struct ehci_hcd *ehci)
519{
520 struct ehci_qh *qh;
521 int not_done, safe;
522 u32 inactivate = INACTIVATE_BIT(ehci);
523 u32 active = ACTIVE_BIT(ehci);
524
525 do {
526 not_done = 0;
527 list_for_each_entry(qh, &ehci->split_intr_qhs,
528 split_intr_qhs) {
529 if (qh->hw_info1 & inactivate)
530 /* already off */
531 continue;
532 /*
533 * To avoid setting "I" after the start split happens,
534 * don't set it if the QH might be cached in the
535 * controller. Some HCs (Broadcom/ServerWorks HT1000)
536 * will stop in the middle of a split transaction when
537 * the "I" bit is set.
538 */
539 safe = safe_to_modify_i(ehci, qh);
540 if (safe == 0) {
541 not_done = 1;
542 } else if (safe > 0) {
543 qh->was_active = qh->hw_token & active;
544 qh->hw_info1 |= inactivate;
545 }
546 }
547 } while (not_done);
548 wmb();
549}
550
551static void qh_reactivate_split_intr_qhs (struct ehci_hcd *ehci)
552{
553 struct ehci_qh *qh;
554 u32 token;
555 int not_done, safe;
556 u32 inactivate = INACTIVATE_BIT(ehci);
557 u32 active = ACTIVE_BIT(ehci);
558 u32 halt = HALT_BIT(ehci);
559
560 do {
561 not_done = 0;
562 list_for_each_entry(qh, &ehci->split_intr_qhs, split_intr_qhs) {
563 if (!(qh->hw_info1 & inactivate)) /* already on */
564 continue;
565 /*
566 * Don't reactivate if cached, or controller might
567 * overwrite overlay after we modify it!
568 */
569 safe = safe_to_modify_i(ehci, qh);
570 if (safe == 0) {
571 not_done = 1;
572 } else if (safe > 0) {
573 /* See EHCI 1.0 section 4.15.2.4. */
574 token = qh->hw_token;
575 qh->hw_token = (token | halt) & ~active;
576 wmb();
577 qh->hw_info1 &= ~inactivate;
578 wmb();
579 qh->hw_token = (token & ~halt) | qh->was_active;
580 }
581 }
582 } while (not_done);
583}
584#endif
585 482
586/* periodic schedule slots have iso tds (normal or split) first, then a 483/* periodic schedule slots have iso tds (normal or split) first, then a
587 * sparse tree for active interrupt transfers. 484 * sparse tree for active interrupt transfers.
@@ -599,17 +496,6 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
599 period, hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK), 496 period, hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK),
600 qh, qh->start, qh->usecs, qh->c_usecs); 497 qh, qh->start, qh->usecs, qh->c_usecs);
601 498
602#ifdef CONFIG_CPU_FREQ
603 /*
604 * If low/full speed interrupt QHs are inactive (because of
605 * cpufreq changing processor speeds), start QH with I flag set--
606 * it will automatically be cleared when cpufreq is done.
607 */
608 if (ehci->cpufreq_changing)
609 if (!(qh->hw_info1 & (cpu_to_le32(1 << 13))))
610 qh->hw_info1 |= INACTIVATE_BIT(ehci);
611#endif
612
613 /* high bandwidth, or otherwise every microframe */ 499 /* high bandwidth, or otherwise every microframe */
614 if (period == 0) 500 if (period == 0)
615 period = 1; 501 period = 1;
@@ -658,12 +544,6 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
658 ? ((qh->usecs + qh->c_usecs) / qh->period) 544 ? ((qh->usecs + qh->c_usecs) / qh->period)
659 : (qh->usecs * 8); 545 : (qh->usecs * 8);
660 546
661#ifdef CONFIG_CPU_FREQ
662 /* add qh to list of low/full speed interrupt QHs, if applicable */
663 if (!(qh->hw_info1 & (cpu_to_le32(1 << 13)))) {
664 list_add(&qh->split_intr_qhs, &ehci->split_intr_qhs);
665 }
666#endif
667 /* maybe enable periodic schedule processing */ 547 /* maybe enable periodic schedule processing */
668 if (!ehci->periodic_sched++) 548 if (!ehci->periodic_sched++)
669 return enable_periodic (ehci); 549 return enable_periodic (ehci);
@@ -683,13 +563,6 @@ static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
683 // THEN 563 // THEN
684 // qh->hw_info1 |= __constant_cpu_to_hc32(1 << 7 /* "ignore" */); 564 // qh->hw_info1 |= __constant_cpu_to_hc32(1 << 7 /* "ignore" */);
685 565
686#ifdef CONFIG_CPU_FREQ
687 /* remove qh from list of low/full speed interrupt QHs */
688 if (!(qh->hw_info1 & (cpu_to_le32(1 << 13)))) {
689 list_del_init(&qh->split_intr_qhs);
690 }
691#endif
692
693 /* high bandwidth, or otherwise part of every microframe */ 566 /* high bandwidth, or otherwise part of every microframe */
694 if ((period = qh->period) == 0) 567 if ((period = qh->period) == 0)
695 period = 1; 568 period = 1;
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 2c68a04230c1..951d69fec513 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -71,12 +71,6 @@ struct ehci_hcd { /* one per controller */
71 __u32 hcs_params; /* cached register copy */ 71 __u32 hcs_params; /* cached register copy */
72 spinlock_t lock; 72 spinlock_t lock;
73 73
74#ifdef CONFIG_CPU_FREQ
75 struct notifier_block cpufreq_transition;
76 int cpufreq_changing;
77 struct list_head split_intr_qhs;
78#endif
79
80 /* async schedule support */ 74 /* async schedule support */
81 struct ehci_qh *async; 75 struct ehci_qh *async;
82 struct ehci_qh *reclaim; 76 struct ehci_qh *reclaim;
@@ -439,10 +433,6 @@ struct ehci_qh {
439 __hc32 hw_next; /* see EHCI 3.6.1 */ 433 __hc32 hw_next; /* see EHCI 3.6.1 */
440 __hc32 hw_info1; /* see EHCI 3.6.2 */ 434 __hc32 hw_info1; /* see EHCI 3.6.2 */
441#define QH_HEAD 0x00008000 435#define QH_HEAD 0x00008000
442#define QH_INACTIVATE 0x00000080
443
444#define INACTIVATE_BIT(ehci) cpu_to_hc32(ehci, QH_INACTIVATE)
445
446 __hc32 hw_info2; /* see EHCI 3.6.2 */ 436 __hc32 hw_info2; /* see EHCI 3.6.2 */
447#define QH_SMASK 0x000000ff 437#define QH_SMASK 0x000000ff
448#define QH_CMASK 0x0000ff00 438#define QH_CMASK 0x0000ff00
@@ -492,10 +482,6 @@ struct ehci_qh {
492 unsigned short start; /* where polling starts */ 482 unsigned short start; /* where polling starts */
493#define NO_FRAME ((unsigned short)~0) /* pick new start */ 483#define NO_FRAME ((unsigned short)~0) /* pick new start */
494 struct usb_device *dev; /* access to TT */ 484 struct usb_device *dev; /* access to TT */
495#ifdef CONFIG_CPU_FREQ
496 struct list_head split_intr_qhs; /* list of split qhs */
497 __le32 was_active; /* active bit before "i" set */
498#endif
499} __attribute__ ((aligned (32))); 485} __attribute__ ((aligned (32)));
500 486
501/*-------------------------------------------------------------------------*/ 487/*-------------------------------------------------------------------------*/