diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-26 12:21:46 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-26 12:21:46 -0500 |
commit | ea3d5226f52ef30f52aa0a04f47f5919c7facacf (patch) | |
tree | 090a48a6f6b2934d32a641ec6e61b3dc59a48514 /arch/i386/kernel | |
parent | 9654640d0af8f2de40ff3807d3695109d3463f54 (diff) |
Revert "[PATCH] i386: add idle notifier"
This reverts commit 2ff2d3d74705d34ab71b21f54634fcf50d57bdd5.
Uwe Bugla reports that he cannot mount a floppy drive any more, and Jiri
Slaby bisected it down to this commit.
Benjamin LaHaise also points out that this is a big hot-path, and that
interrupt delivery while idle is very common and should not go through
all these expensive gyrations.
Fix up conflicts in arch/i386/kernel/apic.c and arch/i386/kernel/irq.c
due to other unrelated irq changes.
Cc: Stephane Eranian <eranian@hpl.hp.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Andrew Morton <akpm@osdl.org>
Cc: Uwe Bugla <uwe.bugla@gmx.de>
Cc: Jiri Slaby <jirislaby@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r-- | arch/i386/kernel/apic.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mcheck/p4.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/irq.c | 3 | ||||
-rw-r--r-- | arch/i386/kernel/process.c | 53 | ||||
-rw-r--r-- | arch/i386/kernel/smp.c | 2 |
5 files changed, 1 insertions, 63 deletions
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 9655c233e6f1..7a2c9cbdb511 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <asm/hpet.h> | 38 | #include <asm/hpet.h> |
39 | #include <asm/i8253.h> | 39 | #include <asm/i8253.h> |
40 | #include <asm/nmi.h> | 40 | #include <asm/nmi.h> |
41 | #include <asm/idle.h> | ||
42 | 41 | ||
43 | #include <mach_apic.h> | 42 | #include <mach_apic.h> |
44 | #include <mach_apicdef.h> | 43 | #include <mach_apicdef.h> |
@@ -561,7 +560,6 @@ void fastcall smp_apic_timer_interrupt(struct pt_regs *regs) | |||
561 | * Besides, if we don't timer interrupts ignore the global | 560 | * Besides, if we don't timer interrupts ignore the global |
562 | * interrupt lock, which is the WrongThing (tm) to do. | 561 | * interrupt lock, which is the WrongThing (tm) to do. |
563 | */ | 562 | */ |
564 | exit_idle(); | ||
565 | irq_enter(); | 563 | irq_enter(); |
566 | local_apic_timer_interrupt(); | 564 | local_apic_timer_interrupt(); |
567 | irq_exit(); | 565 | irq_exit(); |
@@ -1221,7 +1219,6 @@ void smp_spurious_interrupt(struct pt_regs *regs) | |||
1221 | { | 1219 | { |
1222 | unsigned long v; | 1220 | unsigned long v; |
1223 | 1221 | ||
1224 | exit_idle(); | ||
1225 | irq_enter(); | 1222 | irq_enter(); |
1226 | /* | 1223 | /* |
1227 | * Check if this really is a spurious interrupt and ACK it | 1224 | * Check if this really is a spurious interrupt and ACK it |
@@ -1245,7 +1242,6 @@ void smp_error_interrupt(struct pt_regs *regs) | |||
1245 | { | 1242 | { |
1246 | unsigned long v, v1; | 1243 | unsigned long v, v1; |
1247 | 1244 | ||
1248 | exit_idle(); | ||
1249 | irq_enter(); | 1245 | irq_enter(); |
1250 | /* First tickle the hardware, only then report what went on. -- REW */ | 1246 | /* First tickle the hardware, only then report what went on. -- REW */ |
1251 | v = apic_read(APIC_ESR); | 1247 | v = apic_read(APIC_ESR); |
diff --git a/arch/i386/kernel/cpu/mcheck/p4.c b/arch/i386/kernel/cpu/mcheck/p4.c index 8359c19d3a23..504434a46011 100644 --- a/arch/i386/kernel/cpu/mcheck/p4.c +++ b/arch/i386/kernel/cpu/mcheck/p4.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <asm/system.h> | 12 | #include <asm/system.h> |
13 | #include <asm/msr.h> | 13 | #include <asm/msr.h> |
14 | #include <asm/apic.h> | 14 | #include <asm/apic.h> |
15 | #include <asm/idle.h> | ||
16 | 15 | ||
17 | #include <asm/therm_throt.h> | 16 | #include <asm/therm_throt.h> |
18 | 17 | ||
@@ -60,7 +59,6 @@ static void (*vendor_thermal_interrupt)(struct pt_regs *regs) = unexpected_therm | |||
60 | 59 | ||
61 | fastcall void smp_thermal_interrupt(struct pt_regs *regs) | 60 | fastcall void smp_thermal_interrupt(struct pt_regs *regs) |
62 | { | 61 | { |
63 | exit_idle(); | ||
64 | irq_enter(); | 62 | irq_enter(); |
65 | vendor_thermal_interrupt(regs); | 63 | vendor_thermal_interrupt(regs); |
66 | irq_exit(); | 64 | irq_exit(); |
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c index 0f2ca590bf23..8db8d514c9c0 100644 --- a/arch/i386/kernel/irq.c +++ b/arch/i386/kernel/irq.c | |||
@@ -18,8 +18,6 @@ | |||
18 | #include <linux/cpu.h> | 18 | #include <linux/cpu.h> |
19 | #include <linux/delay.h> | 19 | #include <linux/delay.h> |
20 | 20 | ||
21 | #include <asm/idle.h> | ||
22 | |||
23 | #include <asm/apic.h> | 21 | #include <asm/apic.h> |
24 | #include <asm/uaccess.h> | 22 | #include <asm/uaccess.h> |
25 | 23 | ||
@@ -77,7 +75,6 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs) | |||
77 | union irq_ctx *curctx, *irqctx; | 75 | union irq_ctx *curctx, *irqctx; |
78 | u32 *isp; | 76 | u32 *isp; |
79 | #endif | 77 | #endif |
80 | exit_idle(); | ||
81 | 78 | ||
82 | if (unlikely((unsigned)irq >= NR_IRQS)) { | 79 | if (unlikely((unsigned)irq >= NR_IRQS)) { |
83 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", | 80 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", |
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index bea304d48cdb..393a67d5d943 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
@@ -49,7 +49,6 @@ | |||
49 | #include <asm/i387.h> | 49 | #include <asm/i387.h> |
50 | #include <asm/desc.h> | 50 | #include <asm/desc.h> |
51 | #include <asm/vm86.h> | 51 | #include <asm/vm86.h> |
52 | #include <asm/idle.h> | ||
53 | #ifdef CONFIG_MATH_EMULATION | 52 | #ifdef CONFIG_MATH_EMULATION |
54 | #include <asm/math_emu.h> | 53 | #include <asm/math_emu.h> |
55 | #endif | 54 | #endif |
@@ -82,42 +81,6 @@ void (*pm_idle)(void); | |||
82 | EXPORT_SYMBOL(pm_idle); | 81 | EXPORT_SYMBOL(pm_idle); |
83 | static DEFINE_PER_CPU(unsigned int, cpu_idle_state); | 82 | static DEFINE_PER_CPU(unsigned int, cpu_idle_state); |
84 | 83 | ||
85 | static ATOMIC_NOTIFIER_HEAD(idle_notifier); | ||
86 | |||
87 | void idle_notifier_register(struct notifier_block *n) | ||
88 | { | ||
89 | atomic_notifier_chain_register(&idle_notifier, n); | ||
90 | } | ||
91 | |||
92 | void idle_notifier_unregister(struct notifier_block *n) | ||
93 | { | ||
94 | atomic_notifier_chain_unregister(&idle_notifier, n); | ||
95 | } | ||
96 | |||
97 | static DEFINE_PER_CPU(volatile unsigned long, idle_state); | ||
98 | |||
99 | void enter_idle(void) | ||
100 | { | ||
101 | /* needs to be atomic w.r.t. interrupts, not against other CPUs */ | ||
102 | __set_bit(0, &__get_cpu_var(idle_state)); | ||
103 | atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); | ||
104 | } | ||
105 | |||
106 | static void __exit_idle(void) | ||
107 | { | ||
108 | /* needs to be atomic w.r.t. interrupts, not against other CPUs */ | ||
109 | if (__test_and_clear_bit(0, &__get_cpu_var(idle_state)) == 0) | ||
110 | return; | ||
111 | atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); | ||
112 | } | ||
113 | |||
114 | void exit_idle(void) | ||
115 | { | ||
116 | if (current->pid) | ||
117 | return; | ||
118 | __exit_idle(); | ||
119 | } | ||
120 | |||
121 | void disable_hlt(void) | 84 | void disable_hlt(void) |
122 | { | 85 | { |
123 | hlt_counter++; | 86 | hlt_counter++; |
@@ -168,7 +131,6 @@ EXPORT_SYMBOL(default_idle); | |||
168 | */ | 131 | */ |
169 | static void poll_idle (void) | 132 | static void poll_idle (void) |
170 | { | 133 | { |
171 | local_irq_enable(); | ||
172 | cpu_relax(); | 134 | cpu_relax(); |
173 | } | 135 | } |
174 | 136 | ||
@@ -229,16 +191,7 @@ void cpu_idle(void) | |||
229 | play_dead(); | 191 | play_dead(); |
230 | 192 | ||
231 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; | 193 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; |
232 | |||
233 | /* | ||
234 | * Idle routines should keep interrupts disabled | ||
235 | * from here on, until they go to idle. | ||
236 | * Otherwise, idle callbacks can misfire. | ||
237 | */ | ||
238 | local_irq_disable(); | ||
239 | enter_idle(); | ||
240 | idle(); | 194 | idle(); |
241 | __exit_idle(); | ||
242 | } | 195 | } |
243 | tick_nohz_restart_sched_tick(); | 196 | tick_nohz_restart_sched_tick(); |
244 | preempt_enable_no_resched(); | 197 | preempt_enable_no_resched(); |
@@ -293,11 +246,7 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) | |||
293 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 246 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
294 | smp_mb(); | 247 | smp_mb(); |
295 | if (!need_resched()) | 248 | if (!need_resched()) |
296 | __sti_mwait(eax, ecx); | 249 | __mwait(eax, ecx); |
297 | else | ||
298 | local_irq_enable(); | ||
299 | } else { | ||
300 | local_irq_enable(); | ||
301 | } | 250 | } |
302 | } | 251 | } |
303 | 252 | ||
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index 9bd9637ae692..0e8977871b1f 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c | |||
@@ -23,7 +23,6 @@ | |||
23 | 23 | ||
24 | #include <asm/mtrr.h> | 24 | #include <asm/mtrr.h> |
25 | #include <asm/tlbflush.h> | 25 | #include <asm/tlbflush.h> |
26 | #include <asm/idle.h> | ||
27 | #include <mach_apic.h> | 26 | #include <mach_apic.h> |
28 | 27 | ||
29 | /* | 28 | /* |
@@ -624,7 +623,6 @@ fastcall void smp_call_function_interrupt(struct pt_regs *regs) | |||
624 | /* | 623 | /* |
625 | * At this point the info structure may be out of scope unless wait==1 | 624 | * At this point the info structure may be out of scope unless wait==1 |
626 | */ | 625 | */ |
627 | exit_idle(); | ||
628 | irq_enter(); | 626 | irq_enter(); |
629 | (*func)(info); | 627 | (*func)(info); |
630 | irq_exit(); | 628 | irq_exit(); |