diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-02-16 04:27:58 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-16 11:13:58 -0500 |
commit | e05d723f98595b2f4d368f63636a997d98703304 (patch) | |
tree | 53642dafc66ff9b61d2162b879860b6a038ce4ba /arch | |
parent | d66bea57e779cd592657cca6e61345ae899b78d9 (diff) |
[PATCH] i386, apic: clean up the APIC code
The apic code is quite unstructured and missing a lot of comments.
- Restructure the code into helper functions, timer, setup/shutdown,
interrupt and power management blocks.
- Fixup comments.
- Namespace fixups
- Inline helpers for version and is_integrated
- Combine the ack_bad_irq functions
No functional changes.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Zachary Amsden <zach@vmware.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Rohit Seth <rohitseth@google.com>
Cc: Andi Kleen <ak@suse.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/apic.c | 1553 | ||||
-rw-r--r-- | arch/i386/kernel/io_apic.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/irq.c | 22 | ||||
-rw-r--r-- | arch/i386/kernel/smpboot.c | 4 |
4 files changed, 815 insertions, 766 deletions
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index f4159e0a7ae9..b56448f214a7 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c | |||
@@ -45,6 +45,13 @@ | |||
45 | #include "io_ports.h" | 45 | #include "io_ports.h" |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * Sanity check | ||
49 | */ | ||
50 | #if (SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F | ||
51 | # error SPURIOUS_APIC_VECTOR definition error | ||
52 | #endif | ||
53 | |||
54 | /* | ||
48 | * cpu_mask that denotes the CPUs that needs timer interrupt coming in as | 55 | * cpu_mask that denotes the CPUs that needs timer interrupt coming in as |
49 | * IPIs in place of local APIC timers | 56 | * IPIs in place of local APIC timers |
50 | */ | 57 | */ |
@@ -52,121 +59,472 @@ static cpumask_t timer_bcast_ipi; | |||
52 | 59 | ||
53 | /* | 60 | /* |
54 | * Knob to control our willingness to enable the local APIC. | 61 | * Knob to control our willingness to enable the local APIC. |
62 | * | ||
63 | * -1=force-disable, +1=force-enable | ||
55 | */ | 64 | */ |
56 | static int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */ | 65 | static int enable_local_apic __initdata = 0; |
66 | |||
67 | /* | ||
68 | * Debug level, exported for io_apic.c | ||
69 | */ | ||
70 | int apic_verbosity; | ||
71 | |||
72 | static void apic_pm_activate(void); | ||
57 | 73 | ||
58 | static inline void lapic_disable(void) | 74 | |
75 | /* Using APIC to generate smp_local_timer_interrupt? */ | ||
76 | int using_apic_timer __read_mostly = 0; | ||
77 | |||
78 | /* Local APIC was disabled by the BIOS and enabled by the kernel */ | ||
79 | static int enabled_via_apicbase; | ||
80 | |||
81 | /* | ||
82 | * Get the LAPIC version | ||
83 | */ | ||
84 | static inline int lapic_get_version(void) | ||
59 | { | 85 | { |
60 | enable_local_apic = -1; | 86 | return GET_APIC_VERSION(apic_read(APIC_LVR)); |
61 | clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); | ||
62 | } | 87 | } |
63 | 88 | ||
64 | static inline void lapic_enable(void) | 89 | /* |
90 | * Check, if the APIC is integrated or a seperate chip | ||
91 | */ | ||
92 | static inline int lapic_is_integrated(void) | ||
65 | { | 93 | { |
66 | enable_local_apic = 1; | 94 | return APIC_INTEGRATED(lapic_get_version()); |
67 | } | 95 | } |
68 | 96 | ||
69 | /* | 97 | /* |
70 | * Debug level | 98 | * Check, whether this is a modern or a first generation APIC |
71 | */ | 99 | */ |
72 | int apic_verbosity; | ||
73 | |||
74 | |||
75 | static void apic_pm_activate(void); | ||
76 | |||
77 | static int modern_apic(void) | 100 | static int modern_apic(void) |
78 | { | 101 | { |
79 | unsigned int lvr, version; | ||
80 | /* AMD systems use old APIC versions, so check the CPU */ | 102 | /* AMD systems use old APIC versions, so check the CPU */ |
81 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && | 103 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && |
82 | boot_cpu_data.x86 >= 0xf) | 104 | boot_cpu_data.x86 >= 0xf) |
83 | return 1; | 105 | return 1; |
84 | lvr = apic_read(APIC_LVR); | 106 | return lapic_get_version() >= 0x14; |
85 | version = GET_APIC_VERSION(lvr); | 107 | } |
86 | return version >= 0x14; | 108 | |
109 | /** | ||
110 | * enable_NMI_through_LVT0 - enable NMI through local vector table 0 | ||
111 | */ | ||
112 | void enable_NMI_through_LVT0 (void * dummy) | ||
113 | { | ||
114 | unsigned int v = APIC_DM_NMI; | ||
115 | |||
116 | /* Level triggered for 82489DX */ | ||
117 | if (!lapic_is_integrated()) | ||
118 | v |= APIC_LVT_LEVEL_TRIGGER; | ||
119 | apic_write_around(APIC_LVT0, v); | ||
120 | } | ||
121 | |||
122 | /** | ||
123 | * get_physical_broadcast - Get number of physical broadcast IDs | ||
124 | */ | ||
125 | int get_physical_broadcast(void) | ||
126 | { | ||
127 | return modern_apic() ? 0xff : 0xf; | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * lapic_get_maxlvt - get the maximum number of local vector table entries | ||
132 | */ | ||
133 | int lapic_get_maxlvt(void) | ||
134 | { | ||
135 | unsigned int v = apic_read(APIC_LVR); | ||
136 | |||
137 | /* 82489DXs do not report # of LVT entries. */ | ||
138 | return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2; | ||
87 | } | 139 | } |
88 | 140 | ||
89 | /* | 141 | /* |
90 | * 'what should we do if we get a hw irq event on an illegal vector'. | 142 | * Local APIC timer |
91 | * each architecture has to answer this themselves. | 143 | */ |
144 | |||
145 | /* | ||
146 | * This part sets up the APIC 32 bit clock in LVTT1, with HZ interrupts | ||
147 | * per second. We assume that the caller has already set up the local | ||
148 | * APIC. | ||
149 | * | ||
150 | * The APIC timer is not exactly sync with the external timer chip, it | ||
151 | * closely follows bus clocks. | ||
152 | */ | ||
153 | |||
154 | /* | ||
155 | * The timer chip is already set up at HZ interrupts per second here, | ||
156 | * but we do not accept timer interrupts yet. We only allow the BP | ||
157 | * to calibrate. | ||
92 | */ | 158 | */ |
93 | void ack_bad_irq(unsigned int irq) | 159 | static unsigned int __devinit get_8254_timer_count(void) |
160 | { | ||
161 | unsigned long flags; | ||
162 | |||
163 | unsigned int count; | ||
164 | |||
165 | spin_lock_irqsave(&i8253_lock, flags); | ||
166 | |||
167 | outb_p(0x00, PIT_MODE); | ||
168 | count = inb_p(PIT_CH0); | ||
169 | count |= inb_p(PIT_CH0) << 8; | ||
170 | |||
171 | spin_unlock_irqrestore(&i8253_lock, flags); | ||
172 | |||
173 | return count; | ||
174 | } | ||
175 | |||
176 | /* next tick in 8254 can be caught by catching timer wraparound */ | ||
177 | static void __devinit wait_8254_wraparound(void) | ||
94 | { | 178 | { |
95 | printk("unexpected IRQ trap at vector %02x\n", irq); | 179 | unsigned int curr_count, prev_count; |
180 | |||
181 | curr_count = get_8254_timer_count(); | ||
182 | do { | ||
183 | prev_count = curr_count; | ||
184 | curr_count = get_8254_timer_count(); | ||
185 | |||
186 | /* workaround for broken Mercury/Neptune */ | ||
187 | if (prev_count >= curr_count + 0x100) | ||
188 | curr_count = get_8254_timer_count(); | ||
189 | |||
190 | } while (prev_count >= curr_count); | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * Default initialization for 8254 timers. If we use other timers like HPET, | ||
195 | * we override this later | ||
196 | */ | ||
197 | void (*wait_timer_tick)(void) __devinitdata = wait_8254_wraparound; | ||
198 | |||
199 | /* | ||
200 | * This function sets up the local APIC timer, with a timeout of | ||
201 | * 'clocks' APIC bus clock. During calibration we actually call | ||
202 | * this function twice on the boot CPU, once with a bogus timeout | ||
203 | * value, second time for real. The other (noncalibrating) CPUs | ||
204 | * call this function only once, with the real, calibrated value. | ||
205 | * | ||
206 | * We do reads before writes even if unnecessary, to get around the | ||
207 | * P5 APIC double write bug. | ||
208 | */ | ||
209 | |||
210 | #define APIC_DIVISOR 16 | ||
211 | |||
212 | static void __setup_APIC_LVTT(unsigned int clocks) | ||
213 | { | ||
214 | unsigned int lvtt_value, tmp_value; | ||
215 | int cpu = smp_processor_id(); | ||
216 | |||
217 | lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR; | ||
218 | if (!lapic_is_integrated()) | ||
219 | lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV); | ||
220 | |||
221 | if (cpu_isset(cpu, timer_bcast_ipi)) | ||
222 | lvtt_value |= APIC_LVT_MASKED; | ||
223 | |||
224 | apic_write_around(APIC_LVTT, lvtt_value); | ||
225 | |||
96 | /* | 226 | /* |
97 | * Currently unexpected vectors happen only on SMP and APIC. | 227 | * Divide PICLK by 16 |
98 | * We _must_ ack these because every local APIC has only N | ||
99 | * irq slots per priority level, and a 'hanging, unacked' IRQ | ||
100 | * holds up an irq slot - in excessive cases (when multiple | ||
101 | * unexpected vectors occur) that might lock up the APIC | ||
102 | * completely. | ||
103 | * But only ack when the APIC is enabled -AK | ||
104 | */ | 228 | */ |
105 | if (cpu_has_apic) | 229 | tmp_value = apic_read(APIC_TDCR); |
106 | ack_APIC_irq(); | 230 | apic_write_around(APIC_TDCR, (tmp_value |
231 | & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) | ||
232 | | APIC_TDR_DIV_16); | ||
233 | |||
234 | apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR); | ||
107 | } | 235 | } |
108 | 236 | ||
109 | void __init apic_intr_init(void) | 237 | static void __devinit setup_APIC_timer(unsigned int clocks) |
110 | { | 238 | { |
111 | #ifdef CONFIG_SMP | 239 | unsigned long flags; |
112 | smp_intr_init(); | ||
113 | #endif | ||
114 | /* self generated IPI for local APIC timer */ | ||
115 | set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); | ||
116 | 240 | ||
117 | /* IPI vectors for APIC spurious and error interrupts */ | 241 | local_irq_save(flags); |
118 | set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); | ||
119 | set_intr_gate(ERROR_APIC_VECTOR, error_interrupt); | ||
120 | 242 | ||
121 | /* thermal monitor LVT interrupt */ | 243 | /* |
122 | #ifdef CONFIG_X86_MCE_P4THERMAL | 244 | * Wait for IRQ0's slice: |
123 | set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | 245 | */ |
124 | #endif | 246 | wait_timer_tick(); |
247 | |||
248 | __setup_APIC_LVTT(clocks); | ||
249 | |||
250 | local_irq_restore(flags); | ||
125 | } | 251 | } |
126 | 252 | ||
127 | /* Using APIC to generate smp_local_timer_interrupt? */ | 253 | /* |
128 | int using_apic_timer __read_mostly = 0; | 254 | * In this function we calibrate APIC bus clocks to the external |
255 | * timer. Unfortunately we cannot use jiffies and the timer irq | ||
256 | * to calibrate, since some later bootup code depends on getting | ||
257 | * the first irq? Ugh. | ||
258 | * | ||
259 | * We want to do the calibration only once since we | ||
260 | * want to have local timer irqs syncron. CPUs connected | ||
261 | * by the same APIC bus have the very same bus frequency. | ||
262 | * And we want to have irqs off anyways, no accidental | ||
263 | * APIC irq that way. | ||
264 | */ | ||
129 | 265 | ||
130 | static int enabled_via_apicbase; | 266 | static int __init calibrate_APIC_clock(void) |
267 | { | ||
268 | unsigned long long t1 = 0, t2 = 0; | ||
269 | long tt1, tt2; | ||
270 | long result; | ||
271 | int i; | ||
272 | const int LOOPS = HZ/10; | ||
131 | 273 | ||
132 | void enable_NMI_through_LVT0 (void * dummy) | 274 | apic_printk(APIC_VERBOSE, "calibrating APIC timer ...\n"); |
275 | |||
276 | /* | ||
277 | * Put whatever arbitrary (but long enough) timeout | ||
278 | * value into the APIC clock, we just want to get the | ||
279 | * counter running for calibration. | ||
280 | */ | ||
281 | __setup_APIC_LVTT(1000000000); | ||
282 | |||
283 | /* | ||
284 | * The timer chip counts down to zero. Let's wait | ||
285 | * for a wraparound to start exact measurement: | ||
286 | * (the current tick might have been already half done) | ||
287 | */ | ||
288 | |||
289 | wait_timer_tick(); | ||
290 | |||
291 | /* | ||
292 | * We wrapped around just now. Let's start: | ||
293 | */ | ||
294 | if (cpu_has_tsc) | ||
295 | rdtscll(t1); | ||
296 | tt1 = apic_read(APIC_TMCCT); | ||
297 | |||
298 | /* | ||
299 | * Let's wait LOOPS wraprounds: | ||
300 | */ | ||
301 | for (i = 0; i < LOOPS; i++) | ||
302 | wait_timer_tick(); | ||
303 | |||
304 | tt2 = apic_read(APIC_TMCCT); | ||
305 | if (cpu_has_tsc) | ||
306 | rdtscll(t2); | ||
307 | |||
308 | /* | ||
309 | * The APIC bus clock counter is 32 bits only, it | ||
310 | * might have overflown, but note that we use signed | ||
311 | * longs, thus no extra care needed. | ||
312 | * | ||
313 | * underflown to be exact, as the timer counts down ;) | ||
314 | */ | ||
315 | |||
316 | result = (tt1-tt2)*APIC_DIVISOR/LOOPS; | ||
317 | |||
318 | if (cpu_has_tsc) | ||
319 | apic_printk(APIC_VERBOSE, "..... CPU clock speed is " | ||
320 | "%ld.%04ld MHz.\n", | ||
321 | ((long)(t2-t1)/LOOPS)/(1000000/HZ), | ||
322 | ((long)(t2-t1)/LOOPS)%(1000000/HZ)); | ||
323 | |||
324 | apic_printk(APIC_VERBOSE, "..... host bus clock speed is " | ||
325 | "%ld.%04ld MHz.\n", | ||
326 | result/(1000000/HZ), | ||
327 | result%(1000000/HZ)); | ||
328 | |||
329 | return result; | ||
330 | } | ||
331 | |||
332 | static unsigned int calibration_result; | ||
333 | |||
334 | void __init setup_boot_APIC_clock(void) | ||
133 | { | 335 | { |
134 | unsigned int v, ver; | 336 | unsigned long flags; |
337 | apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"); | ||
338 | using_apic_timer = 1; | ||
135 | 339 | ||
136 | ver = apic_read(APIC_LVR); | 340 | local_irq_save(flags); |
137 | ver = GET_APIC_VERSION(ver); | 341 | |
138 | v = APIC_DM_NMI; /* unmask and set to NMI */ | 342 | calibration_result = calibrate_APIC_clock(); |
139 | if (!APIC_INTEGRATED(ver)) /* 82489DX */ | 343 | /* |
140 | v |= APIC_LVT_LEVEL_TRIGGER; | 344 | * Now set up the timer for real. |
141 | apic_write_around(APIC_LVT0, v); | 345 | */ |
346 | setup_APIC_timer(calibration_result); | ||
347 | |||
348 | local_irq_restore(flags); | ||
142 | } | 349 | } |
143 | 350 | ||
144 | int get_physical_broadcast(void) | 351 | void __devinit setup_secondary_APIC_clock(void) |
145 | { | 352 | { |
146 | if (modern_apic()) | 353 | setup_APIC_timer(calibration_result); |
147 | return 0xff; | ||
148 | else | ||
149 | return 0xf; | ||
150 | } | 354 | } |
151 | 355 | ||
152 | int get_maxlvt(void) | 356 | void disable_APIC_timer(void) |
153 | { | 357 | { |
154 | unsigned int v, ver, maxlvt; | 358 | if (using_apic_timer) { |
359 | unsigned long v; | ||
155 | 360 | ||
156 | v = apic_read(APIC_LVR); | 361 | v = apic_read(APIC_LVTT); |
157 | ver = GET_APIC_VERSION(v); | 362 | /* |
158 | /* 82489DXs do not report # of LVT entries. */ | 363 | * When an illegal vector value (0-15) is written to an LVT |
159 | maxlvt = APIC_INTEGRATED(ver) ? GET_APIC_MAXLVT(v) : 2; | 364 | * entry and delivery mode is Fixed, the APIC may signal an |
160 | return maxlvt; | 365 | * illegal vector error, with out regard to whether the mask |
366 | * bit is set or whether an interrupt is actually seen on | ||
367 | * input. | ||
368 | * | ||
369 | * Boot sequence might call this function when the LVTT has | ||
370 | * '0' vector value. So make sure vector field is set to | ||
371 | * valid value. | ||
372 | */ | ||
373 | v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); | ||
374 | apic_write_around(APIC_LVTT, v); | ||
375 | } | ||
376 | } | ||
377 | |||
378 | void enable_APIC_timer(void) | ||
379 | { | ||
380 | int cpu = smp_processor_id(); | ||
381 | |||
382 | if (using_apic_timer && !cpu_isset(cpu, timer_bcast_ipi)) { | ||
383 | unsigned long v; | ||
384 | |||
385 | v = apic_read(APIC_LVTT); | ||
386 | apic_write_around(APIC_LVTT, v & ~APIC_LVT_MASKED); | ||
387 | } | ||
388 | } | ||
389 | |||
390 | void switch_APIC_timer_to_ipi(void *cpumask) | ||
391 | { | ||
392 | cpumask_t mask = *(cpumask_t *)cpumask; | ||
393 | int cpu = smp_processor_id(); | ||
394 | |||
395 | if (cpu_isset(cpu, mask) && | ||
396 | !cpu_isset(cpu, timer_bcast_ipi)) { | ||
397 | disable_APIC_timer(); | ||
398 | cpu_set(cpu, timer_bcast_ipi); | ||
399 | } | ||
400 | } | ||
401 | EXPORT_SYMBOL(switch_APIC_timer_to_ipi); | ||
402 | |||
403 | void switch_ipi_to_APIC_timer(void *cpumask) | ||
404 | { | ||
405 | cpumask_t mask = *(cpumask_t *)cpumask; | ||
406 | int cpu = smp_processor_id(); | ||
407 | |||
408 | if (cpu_isset(cpu, mask) && | ||
409 | cpu_isset(cpu, timer_bcast_ipi)) { | ||
410 | cpu_clear(cpu, timer_bcast_ipi); | ||
411 | enable_APIC_timer(); | ||
412 | } | ||
413 | } | ||
414 | EXPORT_SYMBOL(switch_ipi_to_APIC_timer); | ||
415 | |||
416 | /* | ||
417 | * Local timer interrupt handler. It does both profiling and | ||
418 | * process statistics/rescheduling. | ||
419 | */ | ||
420 | inline void smp_local_timer_interrupt(void) | ||
421 | { | ||
422 | profile_tick(CPU_PROFILING); | ||
423 | #ifdef CONFIG_SMP | ||
424 | update_process_times(user_mode_vm(get_irq_regs())); | ||
425 | #endif | ||
426 | |||
427 | /* | ||
428 | * We take the 'long' return path, and there every subsystem | ||
429 | * grabs the apropriate locks (kernel lock/ irq lock). | ||
430 | * | ||
431 | * we might want to decouple profiling from the 'long path', | ||
432 | * and do the profiling totally in assembly. | ||
433 | * | ||
434 | * Currently this isn't too much of an issue (performance wise), | ||
435 | * we can take more than 100K local irqs per second on a 100 MHz P5. | ||
436 | */ | ||
437 | } | ||
438 | |||
439 | /* | ||
440 | * Local APIC timer interrupt. This is the most natural way for doing | ||
441 | * local interrupts, but local timer interrupts can be emulated by | ||
442 | * broadcast interrupts too. [in case the hw doesn't support APIC timers] | ||
443 | * | ||
444 | * [ if a single-CPU system runs an SMP kernel then we call the local | ||
445 | * interrupt as well. Thus we cannot inline the local irq ... ] | ||
446 | */ | ||
447 | |||
448 | fastcall void smp_apic_timer_interrupt(struct pt_regs *regs) | ||
449 | { | ||
450 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
451 | int cpu = smp_processor_id(); | ||
452 | |||
453 | /* | ||
454 | * the NMI deadlock-detector uses this. | ||
455 | */ | ||
456 | per_cpu(irq_stat, cpu).apic_timer_irqs++; | ||
457 | |||
458 | /* | ||
459 | * NOTE! We'd better ACK the irq immediately, | ||
460 | * because timer handling can be slow. | ||
461 | */ | ||
462 | ack_APIC_irq(); | ||
463 | /* | ||
464 | * update_process_times() expects us to have done irq_enter(). | ||
465 | * Besides, if we don't timer interrupts ignore the global | ||
466 | * interrupt lock, which is the WrongThing (tm) to do. | ||
467 | */ | ||
468 | exit_idle(); | ||
469 | irq_enter(); | ||
470 | smp_local_timer_interrupt(); | ||
471 | irq_exit(); | ||
472 | set_irq_regs(old_regs); | ||
473 | } | ||
474 | |||
475 | #ifndef CONFIG_SMP | ||
476 | static void up_apic_timer_interrupt_call(void) | ||
477 | { | ||
478 | int cpu = smp_processor_id(); | ||
479 | |||
480 | /* | ||
481 | * the NMI deadlock-detector uses this. | ||
482 | */ | ||
483 | per_cpu(irq_stat, cpu).apic_timer_irqs++; | ||
484 | |||
485 | smp_local_timer_interrupt(); | ||
486 | } | ||
487 | #endif | ||
488 | |||
489 | void smp_send_timer_broadcast_ipi(void) | ||
490 | { | ||
491 | cpumask_t mask; | ||
492 | |||
493 | cpus_and(mask, cpu_online_map, timer_bcast_ipi); | ||
494 | if (!cpus_empty(mask)) { | ||
495 | #ifdef CONFIG_SMP | ||
496 | send_IPI_mask(mask, LOCAL_TIMER_VECTOR); | ||
497 | #else | ||
498 | /* | ||
499 | * We can directly call the apic timer interrupt handler | ||
500 | * in UP case. Minus all irq related functions | ||
501 | */ | ||
502 | up_apic_timer_interrupt_call(); | ||
503 | #endif | ||
504 | } | ||
505 | } | ||
506 | |||
507 | int setup_profiling_timer(unsigned int multiplier) | ||
508 | { | ||
509 | return -EINVAL; | ||
161 | } | 510 | } |
162 | 511 | ||
512 | /* | ||
513 | * Local APIC start and shutdown | ||
514 | */ | ||
515 | |||
516 | /** | ||
517 | * clear_local_APIC - shutdown the local APIC | ||
518 | * | ||
519 | * This is called, when a CPU is disabled and before rebooting, so the state of | ||
520 | * the local APIC has no dangling leftovers. Also used to cleanout any BIOS | ||
521 | * leftovers during boot. | ||
522 | */ | ||
163 | void clear_local_APIC(void) | 523 | void clear_local_APIC(void) |
164 | { | 524 | { |
165 | int maxlvt; | 525 | int maxlvt = lapic_get_maxlvt(); |
166 | unsigned long v; | 526 | unsigned long v; |
167 | 527 | ||
168 | maxlvt = get_maxlvt(); | ||
169 | |||
170 | /* | 528 | /* |
171 | * Masking an LVT entry can trigger a local APIC error | 529 | * Masking an LVT entry can trigger a local APIC error |
172 | * if the vector is zero. Mask LVTERR first to prevent this. | 530 | * if the vector is zero. Mask LVTERR first to prevent this. |
@@ -190,7 +548,7 @@ void clear_local_APIC(void) | |||
190 | apic_write_around(APIC_LVTPC, v | APIC_LVT_MASKED); | 548 | apic_write_around(APIC_LVTPC, v | APIC_LVT_MASKED); |
191 | } | 549 | } |
192 | 550 | ||
193 | /* lets not touch this if we didn't frob it */ | 551 | /* lets not touch this if we didn't frob it */ |
194 | #ifdef CONFIG_X86_MCE_P4THERMAL | 552 | #ifdef CONFIG_X86_MCE_P4THERMAL |
195 | if (maxlvt >= 5) { | 553 | if (maxlvt >= 5) { |
196 | v = apic_read(APIC_LVTTHMR); | 554 | v = apic_read(APIC_LVTTHMR); |
@@ -212,85 +570,18 @@ void clear_local_APIC(void) | |||
212 | if (maxlvt >= 5) | 570 | if (maxlvt >= 5) |
213 | apic_write_around(APIC_LVTTHMR, APIC_LVT_MASKED); | 571 | apic_write_around(APIC_LVTTHMR, APIC_LVT_MASKED); |
214 | #endif | 572 | #endif |
215 | v = GET_APIC_VERSION(apic_read(APIC_LVR)); | 573 | /* Integrated APIC (!82489DX) ? */ |
216 | if (APIC_INTEGRATED(v)) { /* !82489DX */ | 574 | if (lapic_is_integrated()) { |
217 | if (maxlvt > 3) /* Due to Pentium errata 3AP and 11AP. */ | 575 | if (maxlvt > 3) |
576 | /* Clear ESR due to Pentium errata 3AP and 11AP */ | ||
218 | apic_write(APIC_ESR, 0); | 577 | apic_write(APIC_ESR, 0); |
219 | apic_read(APIC_ESR); | 578 | apic_read(APIC_ESR); |
220 | } | 579 | } |
221 | } | 580 | } |
222 | 581 | ||
223 | void __init connect_bsp_APIC(void) | 582 | /** |
224 | { | 583 | * disable_local_APIC - clear and disable the local APIC |
225 | if (pic_mode) { | 584 | */ |
226 | /* | ||
227 | * Do not trust the local APIC being empty at bootup. | ||
228 | */ | ||
229 | clear_local_APIC(); | ||
230 | /* | ||
231 | * PIC mode, enable APIC mode in the IMCR, i.e. | ||
232 | * connect BSP's local APIC to INT and NMI lines. | ||
233 | */ | ||
234 | apic_printk(APIC_VERBOSE, "leaving PIC mode, " | ||
235 | "enabling APIC mode.\n"); | ||
236 | outb(0x70, 0x22); | ||
237 | outb(0x01, 0x23); | ||
238 | } | ||
239 | enable_apic_mode(); | ||
240 | } | ||
241 | |||
242 | void disconnect_bsp_APIC(int virt_wire_setup) | ||
243 | { | ||
244 | if (pic_mode) { | ||
245 | /* | ||
246 | * Put the board back into PIC mode (has an effect | ||
247 | * only on certain older boards). Note that APIC | ||
248 | * interrupts, including IPIs, won't work beyond | ||
249 | * this point! The only exception are INIT IPIs. | ||
250 | */ | ||
251 | apic_printk(APIC_VERBOSE, "disabling APIC mode, " | ||
252 | "entering PIC mode.\n"); | ||
253 | outb(0x70, 0x22); | ||
254 | outb(0x00, 0x23); | ||
255 | } | ||
256 | else { | ||
257 | /* Go back to Virtual Wire compatibility mode */ | ||
258 | unsigned long value; | ||
259 | |||
260 | /* For the spurious interrupt use vector F, and enable it */ | ||
261 | value = apic_read(APIC_SPIV); | ||
262 | value &= ~APIC_VECTOR_MASK; | ||
263 | value |= APIC_SPIV_APIC_ENABLED; | ||
264 | value |= 0xf; | ||
265 | apic_write_around(APIC_SPIV, value); | ||
266 | |||
267 | if (!virt_wire_setup) { | ||
268 | /* For LVT0 make it edge triggered, active high, external and enabled */ | ||
269 | value = apic_read(APIC_LVT0); | ||
270 | value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | | ||
271 | APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | | ||
272 | APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED ); | ||
273 | value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; | ||
274 | value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT); | ||
275 | apic_write_around(APIC_LVT0, value); | ||
276 | } | ||
277 | else { | ||
278 | /* Disable LVT0 */ | ||
279 | apic_write_around(APIC_LVT0, APIC_LVT_MASKED); | ||
280 | } | ||
281 | |||
282 | /* For LVT1 make it edge triggered, active high, nmi and enabled */ | ||
283 | value = apic_read(APIC_LVT1); | ||
284 | value &= ~( | ||
285 | APIC_MODE_MASK | APIC_SEND_PENDING | | ||
286 | APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | | ||
287 | APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); | ||
288 | value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; | ||
289 | value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI); | ||
290 | apic_write_around(APIC_LVT1, value); | ||
291 | } | ||
292 | } | ||
293 | |||
294 | void disable_local_APIC(void) | 585 | void disable_local_APIC(void) |
295 | { | 586 | { |
296 | unsigned long value; | 587 | unsigned long value; |
@@ -305,8 +596,13 @@ void disable_local_APIC(void) | |||
305 | value &= ~APIC_SPIV_APIC_ENABLED; | 596 | value &= ~APIC_SPIV_APIC_ENABLED; |
306 | apic_write_around(APIC_SPIV, value); | 597 | apic_write_around(APIC_SPIV, value); |
307 | 598 | ||
599 | /* | ||
600 | * When LAPIC was disabled by the BIOS and enabled by the kernel, | ||
601 | * restore the disabled state. | ||
602 | */ | ||
308 | if (enabled_via_apicbase) { | 603 | if (enabled_via_apicbase) { |
309 | unsigned int l, h; | 604 | unsigned int l, h; |
605 | |||
310 | rdmsr(MSR_IA32_APICBASE, l, h); | 606 | rdmsr(MSR_IA32_APICBASE, l, h); |
311 | l &= ~MSR_IA32_APICBASE_ENABLE; | 607 | l &= ~MSR_IA32_APICBASE_ENABLE; |
312 | wrmsr(MSR_IA32_APICBASE, l, h); | 608 | wrmsr(MSR_IA32_APICBASE, l, h); |
@@ -314,6 +610,28 @@ void disable_local_APIC(void) | |||
314 | } | 610 | } |
315 | 611 | ||
316 | /* | 612 | /* |
613 | * If Linux enabled the LAPIC against the BIOS default disable it down before | ||
614 | * re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and | ||
615 | * not power-off. Additionally clear all LVT entries before disable_local_APIC | ||
616 | * for the case where Linux didn't enable the LAPIC. | ||
617 | */ | ||
618 | void lapic_shutdown(void) | ||
619 | { | ||
620 | unsigned long flags; | ||
621 | |||
622 | if (!cpu_has_apic) | ||
623 | return; | ||
624 | |||
625 | local_irq_save(flags); | ||
626 | clear_local_APIC(); | ||
627 | |||
628 | if (enabled_via_apicbase) | ||
629 | disable_local_APIC(); | ||
630 | |||
631 | local_irq_restore(flags); | ||
632 | } | ||
633 | |||
634 | /* | ||
317 | * This is to verify that we're looking at a real local APIC. | 635 | * This is to verify that we're looking at a real local APIC. |
318 | * Check these against your board if the CPUs aren't getting | 636 | * Check these against your board if the CPUs aren't getting |
319 | * started for no apparent reason. | 637 | * started for no apparent reason. |
@@ -345,7 +663,7 @@ int __init verify_local_APIC(void) | |||
345 | reg1 = GET_APIC_VERSION(reg0); | 663 | reg1 = GET_APIC_VERSION(reg0); |
346 | if (reg1 == 0x00 || reg1 == 0xff) | 664 | if (reg1 == 0x00 || reg1 == 0xff) |
347 | return 0; | 665 | return 0; |
348 | reg1 = get_maxlvt(); | 666 | reg1 = lapic_get_maxlvt(); |
349 | if (reg1 < 0x02 || reg1 == 0xff) | 667 | if (reg1 < 0x02 || reg1 == 0xff) |
350 | return 0; | 668 | return 0; |
351 | 669 | ||
@@ -368,10 +686,15 @@ int __init verify_local_APIC(void) | |||
368 | return 1; | 686 | return 1; |
369 | } | 687 | } |
370 | 688 | ||
689 | /** | ||
690 | * sync_Arb_IDs - synchronize APIC bus arbitration IDs | ||
691 | */ | ||
371 | void __init sync_Arb_IDs(void) | 692 | void __init sync_Arb_IDs(void) |
372 | { | 693 | { |
373 | /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 | 694 | /* |
374 | And not needed on AMD */ | 695 | * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not |
696 | * needed on AMD. | ||
697 | */ | ||
375 | if (modern_apic()) | 698 | if (modern_apic()) |
376 | return; | 699 | return; |
377 | /* | 700 | /* |
@@ -384,14 +707,12 @@ void __init sync_Arb_IDs(void) | |||
384 | | APIC_DM_INIT); | 707 | | APIC_DM_INIT); |
385 | } | 708 | } |
386 | 709 | ||
387 | extern void __error_in_apic_c (void); | ||
388 | |||
389 | /* | 710 | /* |
390 | * An initial setup of the virtual wire mode. | 711 | * An initial setup of the virtual wire mode. |
391 | */ | 712 | */ |
392 | void __init init_bsp_APIC(void) | 713 | void __init init_bsp_APIC(void) |
393 | { | 714 | { |
394 | unsigned long value, ver; | 715 | unsigned long value; |
395 | 716 | ||
396 | /* | 717 | /* |
397 | * Don't do the setup now if we have a SMP BIOS as the | 718 | * Don't do the setup now if we have a SMP BIOS as the |
@@ -400,9 +721,6 @@ void __init init_bsp_APIC(void) | |||
400 | if (smp_found_config || !cpu_has_apic) | 721 | if (smp_found_config || !cpu_has_apic) |
401 | return; | 722 | return; |
402 | 723 | ||
403 | value = apic_read(APIC_LVR); | ||
404 | ver = GET_APIC_VERSION(value); | ||
405 | |||
406 | /* | 724 | /* |
407 | * Do not trust the local APIC being empty at bootup. | 725 | * Do not trust the local APIC being empty at bootup. |
408 | */ | 726 | */ |
@@ -414,9 +732,10 @@ void __init init_bsp_APIC(void) | |||
414 | value = apic_read(APIC_SPIV); | 732 | value = apic_read(APIC_SPIV); |
415 | value &= ~APIC_VECTOR_MASK; | 733 | value &= ~APIC_VECTOR_MASK; |
416 | value |= APIC_SPIV_APIC_ENABLED; | 734 | value |= APIC_SPIV_APIC_ENABLED; |
417 | 735 | ||
418 | /* This bit is reserved on P4/Xeon and should be cleared */ | 736 | /* This bit is reserved on P4/Xeon and should be cleared */ |
419 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 15)) | 737 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && |
738 | (boot_cpu_data.x86 == 15)) | ||
420 | value &= ~APIC_SPIV_FOCUS_DISABLED; | 739 | value &= ~APIC_SPIV_FOCUS_DISABLED; |
421 | else | 740 | else |
422 | value |= APIC_SPIV_FOCUS_DISABLED; | 741 | value |= APIC_SPIV_FOCUS_DISABLED; |
@@ -428,14 +747,17 @@ void __init init_bsp_APIC(void) | |||
428 | */ | 747 | */ |
429 | apic_write_around(APIC_LVT0, APIC_DM_EXTINT); | 748 | apic_write_around(APIC_LVT0, APIC_DM_EXTINT); |
430 | value = APIC_DM_NMI; | 749 | value = APIC_DM_NMI; |
431 | if (!APIC_INTEGRATED(ver)) /* 82489DX */ | 750 | if (!lapic_is_integrated()) /* 82489DX */ |
432 | value |= APIC_LVT_LEVEL_TRIGGER; | 751 | value |= APIC_LVT_LEVEL_TRIGGER; |
433 | apic_write_around(APIC_LVT1, value); | 752 | apic_write_around(APIC_LVT1, value); |
434 | } | 753 | } |
435 | 754 | ||
755 | /** | ||
756 | * setup_local_APIC - setup the local APIC | ||
757 | */ | ||
436 | void __devinit setup_local_APIC(void) | 758 | void __devinit setup_local_APIC(void) |
437 | { | 759 | { |
438 | unsigned long oldvalue, value, ver, maxlvt; | 760 | unsigned long oldvalue, value, maxlvt, integrated; |
439 | int i, j; | 761 | int i, j; |
440 | 762 | ||
441 | /* Pound the ESR really hard over the head with a big hammer - mbligh */ | 763 | /* Pound the ESR really hard over the head with a big hammer - mbligh */ |
@@ -446,11 +768,7 @@ void __devinit setup_local_APIC(void) | |||
446 | apic_write(APIC_ESR, 0); | 768 | apic_write(APIC_ESR, 0); |
447 | } | 769 | } |
448 | 770 | ||
449 | value = apic_read(APIC_LVR); | 771 | integrated = lapic_is_integrated(); |
450 | ver = GET_APIC_VERSION(value); | ||
451 | |||
452 | if ((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f) | ||
453 | __error_in_apic_c(); | ||
454 | 772 | ||
455 | /* | 773 | /* |
456 | * Double-check whether this APIC is really registered. | 774 | * Double-check whether this APIC is really registered. |
@@ -521,13 +839,10 @@ void __devinit setup_local_APIC(void) | |||
521 | * like LRU than MRU (the short-term load is more even across CPUs). | 839 | * like LRU than MRU (the short-term load is more even across CPUs). |
522 | * See also the comment in end_level_ioapic_irq(). --macro | 840 | * See also the comment in end_level_ioapic_irq(). --macro |
523 | */ | 841 | */ |
524 | #if 1 | 842 | |
525 | /* Enable focus processor (bit==0) */ | 843 | /* Enable focus processor (bit==0) */ |
526 | value &= ~APIC_SPIV_FOCUS_DISABLED; | 844 | value &= ~APIC_SPIV_FOCUS_DISABLED; |
527 | #else | 845 | |
528 | /* Disable focus processor (bit==1) */ | ||
529 | value |= APIC_SPIV_FOCUS_DISABLED; | ||
530 | #endif | ||
531 | /* | 846 | /* |
532 | * Set spurious IRQ vector | 847 | * Set spurious IRQ vector |
533 | */ | 848 | */ |
@@ -563,17 +878,18 @@ void __devinit setup_local_APIC(void) | |||
563 | value = APIC_DM_NMI; | 878 | value = APIC_DM_NMI; |
564 | else | 879 | else |
565 | value = APIC_DM_NMI | APIC_LVT_MASKED; | 880 | value = APIC_DM_NMI | APIC_LVT_MASKED; |
566 | if (!APIC_INTEGRATED(ver)) /* 82489DX */ | 881 | if (!integrated) /* 82489DX */ |
567 | value |= APIC_LVT_LEVEL_TRIGGER; | 882 | value |= APIC_LVT_LEVEL_TRIGGER; |
568 | apic_write_around(APIC_LVT1, value); | 883 | apic_write_around(APIC_LVT1, value); |
569 | 884 | ||
570 | if (APIC_INTEGRATED(ver) && !esr_disable) { /* !82489DX */ | 885 | if (integrated && !esr_disable) { /* !82489DX */ |
571 | maxlvt = get_maxlvt(); | 886 | maxlvt = lapic_get_maxlvt(); |
572 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ | 887 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
573 | apic_write(APIC_ESR, 0); | 888 | apic_write(APIC_ESR, 0); |
574 | oldvalue = apic_read(APIC_ESR); | 889 | oldvalue = apic_read(APIC_ESR); |
575 | 890 | ||
576 | value = ERROR_APIC_VECTOR; // enables sending errors | 891 | /* enables sending errors */ |
892 | value = ERROR_APIC_VECTOR; | ||
577 | apic_write_around(APIC_LVTERR, value); | 893 | apic_write_around(APIC_LVTERR, value); |
578 | /* | 894 | /* |
579 | * spec says clear errors after enabling vector. | 895 | * spec says clear errors after enabling vector. |
@@ -586,16 +902,16 @@ void __devinit setup_local_APIC(void) | |||
586 | "vector: 0x%08lx after: 0x%08lx\n", | 902 | "vector: 0x%08lx after: 0x%08lx\n", |
587 | oldvalue, value); | 903 | oldvalue, value); |
588 | } else { | 904 | } else { |
589 | if (esr_disable) | 905 | if (esr_disable) |
590 | /* | 906 | /* |
591 | * Something untraceble is creating bad interrupts on | 907 | * Something untraceble is creating bad interrupts on |
592 | * secondary quads ... for the moment, just leave the | 908 | * secondary quads ... for the moment, just leave the |
593 | * ESR disabled - we can't do anything useful with the | 909 | * ESR disabled - we can't do anything useful with the |
594 | * errors anyway - mbligh | 910 | * errors anyway - mbligh |
595 | */ | 911 | */ |
596 | printk("Leaving ESR disabled.\n"); | 912 | printk(KERN_INFO "Leaving ESR disabled.\n"); |
597 | else | 913 | else |
598 | printk("No ESR for 82489DX.\n"); | 914 | printk(KERN_INFO "No ESR for 82489DX.\n"); |
599 | } | 915 | } |
600 | 916 | ||
601 | setup_apic_nmi_watchdog(NULL); | 917 | setup_apic_nmi_watchdog(NULL); |
@@ -603,190 +919,8 @@ void __devinit setup_local_APIC(void) | |||
603 | } | 919 | } |
604 | 920 | ||
605 | /* | 921 | /* |
606 | * If Linux enabled the LAPIC against the BIOS default | 922 | * Detect and initialize APIC |
607 | * disable it down before re-entering the BIOS on shutdown. | ||
608 | * Otherwise the BIOS may get confused and not power-off. | ||
609 | * Additionally clear all LVT entries before disable_local_APIC | ||
610 | * for the case where Linux didn't enable the LAPIC. | ||
611 | */ | ||
612 | void lapic_shutdown(void) | ||
613 | { | ||
614 | unsigned long flags; | ||
615 | |||
616 | if (!cpu_has_apic) | ||
617 | return; | ||
618 | |||
619 | local_irq_save(flags); | ||
620 | clear_local_APIC(); | ||
621 | |||
622 | if (enabled_via_apicbase) | ||
623 | disable_local_APIC(); | ||
624 | |||
625 | local_irq_restore(flags); | ||
626 | } | ||
627 | |||
628 | #ifdef CONFIG_PM | ||
629 | |||
630 | static struct { | ||
631 | int active; | ||
632 | /* r/w apic fields */ | ||
633 | unsigned int apic_id; | ||
634 | unsigned int apic_taskpri; | ||
635 | unsigned int apic_ldr; | ||
636 | unsigned int apic_dfr; | ||
637 | unsigned int apic_spiv; | ||
638 | unsigned int apic_lvtt; | ||
639 | unsigned int apic_lvtpc; | ||
640 | unsigned int apic_lvt0; | ||
641 | unsigned int apic_lvt1; | ||
642 | unsigned int apic_lvterr; | ||
643 | unsigned int apic_tmict; | ||
644 | unsigned int apic_tdcr; | ||
645 | unsigned int apic_thmr; | ||
646 | } apic_pm_state; | ||
647 | |||
648 | static int lapic_suspend(struct sys_device *dev, pm_message_t state) | ||
649 | { | ||
650 | unsigned long flags; | ||
651 | int maxlvt; | ||
652 | |||
653 | if (!apic_pm_state.active) | ||
654 | return 0; | ||
655 | |||
656 | maxlvt = get_maxlvt(); | ||
657 | |||
658 | apic_pm_state.apic_id = apic_read(APIC_ID); | ||
659 | apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); | ||
660 | apic_pm_state.apic_ldr = apic_read(APIC_LDR); | ||
661 | apic_pm_state.apic_dfr = apic_read(APIC_DFR); | ||
662 | apic_pm_state.apic_spiv = apic_read(APIC_SPIV); | ||
663 | apic_pm_state.apic_lvtt = apic_read(APIC_LVTT); | ||
664 | if (maxlvt >= 4) | ||
665 | apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC); | ||
666 | apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0); | ||
667 | apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1); | ||
668 | apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); | ||
669 | apic_pm_state.apic_tmict = apic_read(APIC_TMICT); | ||
670 | apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); | ||
671 | #ifdef CONFIG_X86_MCE_P4THERMAL | ||
672 | if (maxlvt >= 5) | ||
673 | apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); | ||
674 | #endif | ||
675 | |||
676 | local_irq_save(flags); | ||
677 | disable_local_APIC(); | ||
678 | local_irq_restore(flags); | ||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | static int lapic_resume(struct sys_device *dev) | ||
683 | { | ||
684 | unsigned int l, h; | ||
685 | unsigned long flags; | ||
686 | int maxlvt; | ||
687 | |||
688 | if (!apic_pm_state.active) | ||
689 | return 0; | ||
690 | |||
691 | maxlvt = get_maxlvt(); | ||
692 | |||
693 | local_irq_save(flags); | ||
694 | |||
695 | /* | ||
696 | * Make sure the APICBASE points to the right address | ||
697 | * | ||
698 | * FIXME! This will be wrong if we ever support suspend on | ||
699 | * SMP! We'll need to do this as part of the CPU restore! | ||
700 | */ | ||
701 | rdmsr(MSR_IA32_APICBASE, l, h); | ||
702 | l &= ~MSR_IA32_APICBASE_BASE; | ||
703 | l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; | ||
704 | wrmsr(MSR_IA32_APICBASE, l, h); | ||
705 | |||
706 | apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED); | ||
707 | apic_write(APIC_ID, apic_pm_state.apic_id); | ||
708 | apic_write(APIC_DFR, apic_pm_state.apic_dfr); | ||
709 | apic_write(APIC_LDR, apic_pm_state.apic_ldr); | ||
710 | apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri); | ||
711 | apic_write(APIC_SPIV, apic_pm_state.apic_spiv); | ||
712 | apic_write(APIC_LVT0, apic_pm_state.apic_lvt0); | ||
713 | apic_write(APIC_LVT1, apic_pm_state.apic_lvt1); | ||
714 | #ifdef CONFIG_X86_MCE_P4THERMAL | ||
715 | if (maxlvt >= 5) | ||
716 | apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr); | ||
717 | #endif | ||
718 | if (maxlvt >= 4) | ||
719 | apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc); | ||
720 | apic_write(APIC_LVTT, apic_pm_state.apic_lvtt); | ||
721 | apic_write(APIC_TDCR, apic_pm_state.apic_tdcr); | ||
722 | apic_write(APIC_TMICT, apic_pm_state.apic_tmict); | ||
723 | apic_write(APIC_ESR, 0); | ||
724 | apic_read(APIC_ESR); | ||
725 | apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr); | ||
726 | apic_write(APIC_ESR, 0); | ||
727 | apic_read(APIC_ESR); | ||
728 | local_irq_restore(flags); | ||
729 | return 0; | ||
730 | } | ||
731 | |||
732 | /* | ||
733 | * This device has no shutdown method - fully functioning local APICs | ||
734 | * are needed on every CPU up until machine_halt/restart/poweroff. | ||
735 | */ | ||
736 | |||
737 | static struct sysdev_class lapic_sysclass = { | ||
738 | set_kset_name("lapic"), | ||
739 | .resume = lapic_resume, | ||
740 | .suspend = lapic_suspend, | ||
741 | }; | ||
742 | |||
743 | static struct sys_device device_lapic = { | ||
744 | .id = 0, | ||
745 | .cls = &lapic_sysclass, | ||
746 | }; | ||
747 | |||
748 | static void __devinit apic_pm_activate(void) | ||
749 | { | ||
750 | apic_pm_state.active = 1; | ||
751 | } | ||
752 | |||
753 | static int __init init_lapic_sysfs(void) | ||
754 | { | ||
755 | int error; | ||
756 | |||
757 | if (!cpu_has_apic) | ||
758 | return 0; | ||
759 | /* XXX: remove suspend/resume procs if !apic_pm_state.active? */ | ||
760 | |||
761 | error = sysdev_class_register(&lapic_sysclass); | ||
762 | if (!error) | ||
763 | error = sysdev_register(&device_lapic); | ||
764 | return error; | ||
765 | } | ||
766 | device_initcall(init_lapic_sysfs); | ||
767 | |||
768 | #else /* CONFIG_PM */ | ||
769 | |||
770 | static void apic_pm_activate(void) { } | ||
771 | |||
772 | #endif /* CONFIG_PM */ | ||
773 | |||
774 | /* | ||
775 | * Detect and enable local APICs on non-SMP boards. | ||
776 | * Original code written by Keir Fraser. | ||
777 | */ | 923 | */ |
778 | |||
779 | static int __init apic_set_verbosity(char *str) | ||
780 | { | ||
781 | if (strcmp("debug", str) == 0) | ||
782 | apic_verbosity = APIC_DEBUG; | ||
783 | else if (strcmp("verbose", str) == 0) | ||
784 | apic_verbosity = APIC_VERBOSE; | ||
785 | return 1; | ||
786 | } | ||
787 | |||
788 | __setup("apic=", apic_set_verbosity); | ||
789 | |||
790 | static int __init detect_init_APIC (void) | 924 | static int __init detect_init_APIC (void) |
791 | { | 925 | { |
792 | u32 h, l, features; | 926 | u32 h, l, features; |
@@ -798,7 +932,7 @@ static int __init detect_init_APIC (void) | |||
798 | switch (boot_cpu_data.x86_vendor) { | 932 | switch (boot_cpu_data.x86_vendor) { |
799 | case X86_VENDOR_AMD: | 933 | case X86_VENDOR_AMD: |
800 | if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || | 934 | if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || |
801 | (boot_cpu_data.x86 == 15)) | 935 | (boot_cpu_data.x86 == 15)) |
802 | break; | 936 | break; |
803 | goto no_apic; | 937 | goto no_apic; |
804 | case X86_VENDOR_INTEL: | 938 | case X86_VENDOR_INTEL: |
@@ -812,23 +946,23 @@ static int __init detect_init_APIC (void) | |||
812 | 946 | ||
813 | if (!cpu_has_apic) { | 947 | if (!cpu_has_apic) { |
814 | /* | 948 | /* |
815 | * Over-ride BIOS and try to enable the local | 949 | * Over-ride BIOS and try to enable the local APIC only if |
816 | * APIC only if "lapic" specified. | 950 | * "lapic" specified. |
817 | */ | 951 | */ |
818 | if (enable_local_apic <= 0) { | 952 | if (enable_local_apic <= 0) { |
819 | printk("Local APIC disabled by BIOS -- " | 953 | printk(KERN_INFO "Local APIC disabled by BIOS -- " |
820 | "you can enable it with \"lapic\"\n"); | 954 | "you can enable it with \"lapic\"\n"); |
821 | return -1; | 955 | return -1; |
822 | } | 956 | } |
823 | /* | 957 | /* |
824 | * Some BIOSes disable the local APIC in the | 958 | * Some BIOSes disable the local APIC in the APIC_BASE |
825 | * APIC_BASE MSR. This can only be done in | 959 | * MSR. This can only be done in software for Intel P6 or later |
826 | * software for Intel P6 or later and AMD K7 | 960 | * and AMD K7 (Model > 1) or later. |
827 | * (Model > 1) or later. | ||
828 | */ | 961 | */ |
829 | rdmsr(MSR_IA32_APICBASE, l, h); | 962 | rdmsr(MSR_IA32_APICBASE, l, h); |
830 | if (!(l & MSR_IA32_APICBASE_ENABLE)) { | 963 | if (!(l & MSR_IA32_APICBASE_ENABLE)) { |
831 | printk("Local APIC disabled by BIOS -- reenabling.\n"); | 964 | printk(KERN_INFO |
965 | "Local APIC disabled by BIOS -- reenabling.\n"); | ||
832 | l &= ~MSR_IA32_APICBASE_BASE; | 966 | l &= ~MSR_IA32_APICBASE_BASE; |
833 | l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; | 967 | l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; |
834 | wrmsr(MSR_IA32_APICBASE, l, h); | 968 | wrmsr(MSR_IA32_APICBASE, l, h); |
@@ -841,7 +975,7 @@ static int __init detect_init_APIC (void) | |||
841 | */ | 975 | */ |
842 | features = cpuid_edx(1); | 976 | features = cpuid_edx(1); |
843 | if (!(features & (1 << X86_FEATURE_APIC))) { | 977 | if (!(features & (1 << X86_FEATURE_APIC))) { |
844 | printk("Could not enable APIC!\n"); | 978 | printk(KERN_WARNING "Could not enable APIC!\n"); |
845 | return -1; | 979 | return -1; |
846 | } | 980 | } |
847 | set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); | 981 | set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); |
@@ -855,17 +989,20 @@ static int __init detect_init_APIC (void) | |||
855 | if (nmi_watchdog != NMI_NONE) | 989 | if (nmi_watchdog != NMI_NONE) |
856 | nmi_watchdog = NMI_LOCAL_APIC; | 990 | nmi_watchdog = NMI_LOCAL_APIC; |
857 | 991 | ||
858 | printk("Found and enabled local APIC!\n"); | 992 | printk(KERN_INFO "Found and enabled local APIC!\n"); |
859 | 993 | ||
860 | apic_pm_activate(); | 994 | apic_pm_activate(); |
861 | 995 | ||
862 | return 0; | 996 | return 0; |
863 | 997 | ||
864 | no_apic: | 998 | no_apic: |
865 | printk("No local APIC present or hardware disabled\n"); | 999 | printk(KERN_INFO "No local APIC present or hardware disabled\n"); |
866 | return -1; | 1000 | return -1; |
867 | } | 1001 | } |
868 | 1002 | ||
1003 | /** | ||
1004 | * init_apic_mappings - initialize APIC mappings | ||
1005 | */ | ||
869 | void __init init_apic_mappings(void) | 1006 | void __init init_apic_mappings(void) |
870 | { | 1007 | { |
871 | unsigned long apic_phys; | 1008 | unsigned long apic_phys; |
@@ -925,381 +1062,88 @@ fake_ioapic_page: | |||
925 | } | 1062 | } |
926 | 1063 | ||
927 | /* | 1064 | /* |
928 | * This part sets up the APIC 32 bit clock in LVTT1, with HZ interrupts | 1065 | * This initializes the IO-APIC and APIC hardware if this is |
929 | * per second. We assume that the caller has already set up the local | 1066 | * a UP kernel. |
930 | * APIC. | ||
931 | * | ||
932 | * The APIC timer is not exactly sync with the external timer chip, it | ||
933 | * closely follows bus clocks. | ||
934 | */ | ||
935 | |||
936 | /* | ||
937 | * The timer chip is already set up at HZ interrupts per second here, | ||
938 | * but we do not accept timer interrupts yet. We only allow the BP | ||
939 | * to calibrate. | ||
940 | */ | ||
941 | static unsigned int __devinit get_8254_timer_count(void) | ||
942 | { | ||
943 | unsigned long flags; | ||
944 | |||
945 | unsigned int count; | ||
946 | |||
947 | spin_lock_irqsave(&i8253_lock, flags); | ||
948 | |||
949 | outb_p(0x00, PIT_MODE); | ||
950 | count = inb_p(PIT_CH0); | ||
951 | count |= inb_p(PIT_CH0) << 8; | ||
952 | |||
953 | spin_unlock_irqrestore(&i8253_lock, flags); | ||
954 | |||
955 | return count; | ||
956 | } | ||
957 | |||
958 | /* next tick in 8254 can be caught by catching timer wraparound */ | ||
959 | static void __devinit wait_8254_wraparound(void) | ||
960 | { | ||
961 | unsigned int curr_count, prev_count; | ||
962 | |||
963 | curr_count = get_8254_timer_count(); | ||
964 | do { | ||
965 | prev_count = curr_count; | ||
966 | curr_count = get_8254_timer_count(); | ||
967 | |||
968 | /* workaround for broken Mercury/Neptune */ | ||
969 | if (prev_count >= curr_count + 0x100) | ||
970 | curr_count = get_8254_timer_count(); | ||
971 | |||
972 | } while (prev_count >= curr_count); | ||
973 | } | ||
974 | |||
975 | /* | ||
976 | * Default initialization for 8254 timers. If we use other timers like HPET, | ||
977 | * we override this later | ||
978 | */ | ||
979 | void (*wait_timer_tick)(void) __devinitdata = wait_8254_wraparound; | ||
980 | |||
981 | /* | ||
982 | * This function sets up the local APIC timer, with a timeout of | ||
983 | * 'clocks' APIC bus clock. During calibration we actually call | ||
984 | * this function twice on the boot CPU, once with a bogus timeout | ||
985 | * value, second time for real. The other (noncalibrating) CPUs | ||
986 | * call this function only once, with the real, calibrated value. | ||
987 | * | ||
988 | * We do reads before writes even if unnecessary, to get around the | ||
989 | * P5 APIC double write bug. | ||
990 | */ | 1067 | */ |
991 | 1068 | int __init APIC_init_uniprocessor (void) | |
992 | #define APIC_DIVISOR 16 | ||
993 | |||
994 | static void __setup_APIC_LVTT(unsigned int clocks) | ||
995 | { | 1069 | { |
996 | unsigned int lvtt_value, tmp_value, ver; | 1070 | if (enable_local_apic < 0) |
997 | int cpu = smp_processor_id(); | 1071 | clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); |
998 | |||
999 | ver = GET_APIC_VERSION(apic_read(APIC_LVR)); | ||
1000 | lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR; | ||
1001 | if (!APIC_INTEGRATED(ver)) | ||
1002 | lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV); | ||
1003 | |||
1004 | if (cpu_isset(cpu, timer_bcast_ipi)) | ||
1005 | lvtt_value |= APIC_LVT_MASKED; | ||
1006 | 1072 | ||
1007 | apic_write_around(APIC_LVTT, lvtt_value); | 1073 | if (!smp_found_config && !cpu_has_apic) |
1074 | return -1; | ||
1008 | 1075 | ||
1009 | /* | 1076 | /* |
1010 | * Divide PICLK by 16 | 1077 | * Complain if the BIOS pretends there is one. |
1011 | */ | 1078 | */ |
1012 | tmp_value = apic_read(APIC_TDCR); | 1079 | if (!cpu_has_apic && |
1013 | apic_write_around(APIC_TDCR, (tmp_value | 1080 | APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { |
1014 | & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) | 1081 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", |
1015 | | APIC_TDR_DIV_16); | 1082 | boot_cpu_physical_apicid); |
1016 | 1083 | clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); | |
1017 | apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR); | 1084 | return -1; |
1018 | } | 1085 | } |
1019 | 1086 | ||
1020 | static void __devinit setup_APIC_timer(unsigned int clocks) | 1087 | verify_local_APIC(); |
1021 | { | ||
1022 | unsigned long flags; | ||
1023 | 1088 | ||
1024 | local_irq_save(flags); | 1089 | connect_bsp_APIC(); |
1025 | 1090 | ||
1026 | /* | 1091 | /* |
1027 | * Wait for IRQ0's slice: | 1092 | * Hack: In case of kdump, after a crash, kernel might be booting |
1093 | * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid | ||
1094 | * might be zero if read from MP tables. Get it from LAPIC. | ||
1028 | */ | 1095 | */ |
1029 | wait_timer_tick(); | 1096 | #ifdef CONFIG_CRASH_DUMP |
1097 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); | ||
1098 | #endif | ||
1099 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); | ||
1030 | 1100 | ||
1031 | __setup_APIC_LVTT(clocks); | 1101 | setup_local_APIC(); |
1032 | 1102 | ||
1033 | local_irq_restore(flags); | 1103 | #ifdef CONFIG_X86_IO_APIC |
1104 | if (smp_found_config) | ||
1105 | if (!skip_ioapic_setup && nr_ioapics) | ||
1106 | setup_IO_APIC(); | ||
1107 | #endif | ||
1108 | setup_boot_clock(); | ||
1109 | |||
1110 | return 0; | ||
1034 | } | 1111 | } |
1035 | 1112 | ||
1036 | /* | 1113 | /* |
1037 | * In this function we calibrate APIC bus clocks to the external | 1114 | * APIC command line parameters |
1038 | * timer. Unfortunately we cannot use jiffies and the timer irq | ||
1039 | * to calibrate, since some later bootup code depends on getting | ||
1040 | * the first irq? Ugh. | ||
1041 | * | ||
1042 | * We want to do the calibration only once since we | ||
1043 | * want to have local timer irqs syncron. CPUs connected | ||
1044 | * by the same APIC bus have the very same bus frequency. | ||
1045 | * And we want to have irqs off anyways, no accidental | ||
1046 | * APIC irq that way. | ||
1047 | */ | 1115 | */ |
1048 | 1116 | static int __init parse_lapic(char *arg) | |
1049 | static int __init calibrate_APIC_clock(void) | ||
1050 | { | ||
1051 | unsigned long long t1 = 0, t2 = 0; | ||
1052 | long tt1, tt2; | ||
1053 | long result; | ||
1054 | int i; | ||
1055 | const int LOOPS = HZ/10; | ||
1056 | |||
1057 | apic_printk(APIC_VERBOSE, "calibrating APIC timer ...\n"); | ||
1058 | |||
1059 | /* | ||
1060 | * Put whatever arbitrary (but long enough) timeout | ||
1061 | * value into the APIC clock, we just want to get the | ||
1062 | * counter running for calibration. | ||
1063 | */ | ||
1064 | __setup_APIC_LVTT(1000000000); | ||
1065 | |||
1066 | /* | ||
1067 | * The timer chip counts down to zero. Let's wait | ||
1068 | * for a wraparound to start exact measurement: | ||
1069 | * (the current tick might have been already half done) | ||
1070 | */ | ||
1071 | |||
1072 | wait_timer_tick(); | ||
1073 | |||
1074 | /* | ||
1075 | * We wrapped around just now. Let's start: | ||
1076 | */ | ||
1077 | if (cpu_has_tsc) | ||
1078 | rdtscll(t1); | ||
1079 | tt1 = apic_read(APIC_TMCCT); | ||
1080 | |||
1081 | /* | ||
1082 | * Let's wait LOOPS wraprounds: | ||
1083 | */ | ||
1084 | for (i = 0; i < LOOPS; i++) | ||
1085 | wait_timer_tick(); | ||
1086 | |||
1087 | tt2 = apic_read(APIC_TMCCT); | ||
1088 | if (cpu_has_tsc) | ||
1089 | rdtscll(t2); | ||
1090 | |||
1091 | /* | ||
1092 | * The APIC bus clock counter is 32 bits only, it | ||
1093 | * might have overflown, but note that we use signed | ||
1094 | * longs, thus no extra care needed. | ||
1095 | * | ||
1096 | * underflown to be exact, as the timer counts down ;) | ||
1097 | */ | ||
1098 | |||
1099 | result = (tt1-tt2)*APIC_DIVISOR/LOOPS; | ||
1100 | |||
1101 | if (cpu_has_tsc) | ||
1102 | apic_printk(APIC_VERBOSE, "..... CPU clock speed is " | ||
1103 | "%ld.%04ld MHz.\n", | ||
1104 | ((long)(t2-t1)/LOOPS)/(1000000/HZ), | ||
1105 | ((long)(t2-t1)/LOOPS)%(1000000/HZ)); | ||
1106 | |||
1107 | apic_printk(APIC_VERBOSE, "..... host bus clock speed is " | ||
1108 | "%ld.%04ld MHz.\n", | ||
1109 | result/(1000000/HZ), | ||
1110 | result%(1000000/HZ)); | ||
1111 | |||
1112 | return result; | ||
1113 | } | ||
1114 | |||
1115 | static unsigned int calibration_result; | ||
1116 | |||
1117 | void __init setup_boot_APIC_clock(void) | ||
1118 | { | ||
1119 | unsigned long flags; | ||
1120 | apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"); | ||
1121 | using_apic_timer = 1; | ||
1122 | |||
1123 | local_irq_save(flags); | ||
1124 | |||
1125 | calibration_result = calibrate_APIC_clock(); | ||
1126 | /* | ||
1127 | * Now set up the timer for real. | ||
1128 | */ | ||
1129 | setup_APIC_timer(calibration_result); | ||
1130 | |||
1131 | local_irq_restore(flags); | ||
1132 | } | ||
1133 | |||
1134 | void __devinit setup_secondary_APIC_clock(void) | ||
1135 | { | ||
1136 | setup_APIC_timer(calibration_result); | ||
1137 | } | ||
1138 | |||
1139 | void disable_APIC_timer(void) | ||
1140 | { | ||
1141 | if (using_apic_timer) { | ||
1142 | unsigned long v; | ||
1143 | |||
1144 | v = apic_read(APIC_LVTT); | ||
1145 | /* | ||
1146 | * When an illegal vector value (0-15) is written to an LVT | ||
1147 | * entry and delivery mode is Fixed, the APIC may signal an | ||
1148 | * illegal vector error, with out regard to whether the mask | ||
1149 | * bit is set or whether an interrupt is actually seen on input. | ||
1150 | * | ||
1151 | * Boot sequence might call this function when the LVTT has | ||
1152 | * '0' vector value. So make sure vector field is set to | ||
1153 | * valid value. | ||
1154 | */ | ||
1155 | v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); | ||
1156 | apic_write_around(APIC_LVTT, v); | ||
1157 | } | ||
1158 | } | ||
1159 | |||
1160 | void enable_APIC_timer(void) | ||
1161 | { | 1117 | { |
1162 | int cpu = smp_processor_id(); | 1118 | enable_local_apic = 1; |
1163 | 1119 | return 0; | |
1164 | if (using_apic_timer && | ||
1165 | !cpu_isset(cpu, timer_bcast_ipi)) { | ||
1166 | unsigned long v; | ||
1167 | |||
1168 | v = apic_read(APIC_LVTT); | ||
1169 | apic_write_around(APIC_LVTT, v & ~APIC_LVT_MASKED); | ||
1170 | } | ||
1171 | } | 1120 | } |
1121 | early_param("lapic", parse_lapic); | ||
1172 | 1122 | ||
1173 | void switch_APIC_timer_to_ipi(void *cpumask) | 1123 | static int __init parse_nolapic(char *arg) |
1174 | { | 1124 | { |
1175 | cpumask_t mask = *(cpumask_t *)cpumask; | 1125 | enable_local_apic = -1; |
1176 | int cpu = smp_processor_id(); | 1126 | clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); |
1177 | 1127 | return 0; | |
1178 | if (cpu_isset(cpu, mask) && | ||
1179 | !cpu_isset(cpu, timer_bcast_ipi)) { | ||
1180 | disable_APIC_timer(); | ||
1181 | cpu_set(cpu, timer_bcast_ipi); | ||
1182 | } | ||
1183 | } | 1128 | } |
1184 | EXPORT_SYMBOL(switch_APIC_timer_to_ipi); | 1129 | early_param("nolapic", parse_nolapic); |
1185 | 1130 | ||
1186 | void switch_ipi_to_APIC_timer(void *cpumask) | 1131 | static int __init apic_set_verbosity(char *str) |
1187 | { | 1132 | { |
1188 | cpumask_t mask = *(cpumask_t *)cpumask; | 1133 | if (strcmp("debug", str) == 0) |
1189 | int cpu = smp_processor_id(); | 1134 | apic_verbosity = APIC_DEBUG; |
1190 | 1135 | else if (strcmp("verbose", str) == 0) | |
1191 | if (cpu_isset(cpu, mask) && | 1136 | apic_verbosity = APIC_VERBOSE; |
1192 | cpu_isset(cpu, timer_bcast_ipi)) { | 1137 | return 1; |
1193 | cpu_clear(cpu, timer_bcast_ipi); | ||
1194 | enable_APIC_timer(); | ||
1195 | } | ||
1196 | } | 1138 | } |
1197 | EXPORT_SYMBOL(switch_ipi_to_APIC_timer); | ||
1198 | 1139 | ||
1199 | #undef APIC_DIVISOR | 1140 | __setup("apic=", apic_set_verbosity); |
1200 | |||
1201 | /* | ||
1202 | * Local timer interrupt handler. It does both profiling and | ||
1203 | * process statistics/rescheduling. | ||
1204 | * | ||
1205 | * We do profiling in every local tick, statistics/rescheduling | ||
1206 | * happen only every 'profiling multiplier' ticks. The default | ||
1207 | * multiplier is 1 and it can be changed by writing the new multiplier | ||
1208 | * value into /proc/profile. | ||
1209 | */ | ||
1210 | |||
1211 | inline void smp_local_timer_interrupt(void) | ||
1212 | { | ||
1213 | profile_tick(CPU_PROFILING); | ||
1214 | #ifdef CONFIG_SMP | ||
1215 | update_process_times(user_mode_vm(get_irq_regs())); | ||
1216 | #endif | ||
1217 | 1141 | ||
1218 | /* | ||
1219 | * We take the 'long' return path, and there every subsystem | ||
1220 | * grabs the apropriate locks (kernel lock/ irq lock). | ||
1221 | * | ||
1222 | * we might want to decouple profiling from the 'long path', | ||
1223 | * and do the profiling totally in assembly. | ||
1224 | * | ||
1225 | * Currently this isn't too much of an issue (performance wise), | ||
1226 | * we can take more than 100K local irqs per second on a 100 MHz P5. | ||
1227 | */ | ||
1228 | } | ||
1229 | 1142 | ||
1230 | /* | 1143 | /* |
1231 | * Local APIC timer interrupt. This is the most natural way for doing | 1144 | * Local APIC interrupts |
1232 | * local interrupts, but local timer interrupts can be emulated by | ||
1233 | * broadcast interrupts too. [in case the hw doesn't support APIC timers] | ||
1234 | * | ||
1235 | * [ if a single-CPU system runs an SMP kernel then we call the local | ||
1236 | * interrupt as well. Thus we cannot inline the local irq ... ] | ||
1237 | */ | 1145 | */ |
1238 | 1146 | ||
1239 | fastcall void smp_apic_timer_interrupt(struct pt_regs *regs) | ||
1240 | { | ||
1241 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
1242 | int cpu = smp_processor_id(); | ||
1243 | |||
1244 | /* | ||
1245 | * the NMI deadlock-detector uses this. | ||
1246 | */ | ||
1247 | per_cpu(irq_stat, cpu).apic_timer_irqs++; | ||
1248 | |||
1249 | /* | ||
1250 | * NOTE! We'd better ACK the irq immediately, | ||
1251 | * because timer handling can be slow. | ||
1252 | */ | ||
1253 | ack_APIC_irq(); | ||
1254 | /* | ||
1255 | * update_process_times() expects us to have done irq_enter(). | ||
1256 | * Besides, if we don't timer interrupts ignore the global | ||
1257 | * interrupt lock, which is the WrongThing (tm) to do. | ||
1258 | */ | ||
1259 | exit_idle(); | ||
1260 | irq_enter(); | ||
1261 | smp_local_timer_interrupt(); | ||
1262 | irq_exit(); | ||
1263 | set_irq_regs(old_regs); | ||
1264 | } | ||
1265 | |||
1266 | #ifndef CONFIG_SMP | ||
1267 | static void up_apic_timer_interrupt_call(void) | ||
1268 | { | ||
1269 | int cpu = smp_processor_id(); | ||
1270 | |||
1271 | /* | ||
1272 | * the NMI deadlock-detector uses this. | ||
1273 | */ | ||
1274 | per_cpu(irq_stat, cpu).apic_timer_irqs++; | ||
1275 | |||
1276 | smp_local_timer_interrupt(); | ||
1277 | } | ||
1278 | #endif | ||
1279 | |||
1280 | void smp_send_timer_broadcast_ipi(void) | ||
1281 | { | ||
1282 | cpumask_t mask; | ||
1283 | |||
1284 | cpus_and(mask, cpu_online_map, timer_bcast_ipi); | ||
1285 | if (!cpus_empty(mask)) { | ||
1286 | #ifdef CONFIG_SMP | ||
1287 | send_IPI_mask(mask, LOCAL_TIMER_VECTOR); | ||
1288 | #else | ||
1289 | /* | ||
1290 | * We can directly call the apic timer interrupt handler | ||
1291 | * in UP case. Minus all irq related functions | ||
1292 | */ | ||
1293 | up_apic_timer_interrupt_call(); | ||
1294 | #endif | ||
1295 | } | ||
1296 | } | ||
1297 | |||
1298 | int setup_profiling_timer(unsigned int multiplier) | ||
1299 | { | ||
1300 | return -EINVAL; | ||
1301 | } | ||
1302 | |||
1303 | /* | 1147 | /* |
1304 | * This interrupt should _never_ happen with our APIC/SMP architecture | 1148 | * This interrupt should _never_ happen with our APIC/SMP architecture |
1305 | */ | 1149 | */ |
@@ -1319,15 +1163,14 @@ fastcall void smp_spurious_interrupt(struct pt_regs *regs) | |||
1319 | ack_APIC_irq(); | 1163 | ack_APIC_irq(); |
1320 | 1164 | ||
1321 | /* see sw-dev-man vol 3, chapter 7.4.13.5 */ | 1165 | /* see sw-dev-man vol 3, chapter 7.4.13.5 */ |
1322 | printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should never happen.\n", | 1166 | printk(KERN_INFO "spurious APIC interrupt on CPU#%d, " |
1323 | smp_processor_id()); | 1167 | "should never happen.\n", smp_processor_id()); |
1324 | irq_exit(); | 1168 | irq_exit(); |
1325 | } | 1169 | } |
1326 | 1170 | ||
1327 | /* | 1171 | /* |
1328 | * This interrupt should never happen with our APIC/SMP architecture | 1172 | * This interrupt should never happen with our APIC/SMP architecture |
1329 | */ | 1173 | */ |
1330 | |||
1331 | fastcall void smp_error_interrupt(struct pt_regs *regs) | 1174 | fastcall void smp_error_interrupt(struct pt_regs *regs) |
1332 | { | 1175 | { |
1333 | unsigned long v, v1; | 1176 | unsigned long v, v1; |
@@ -1352,69 +1195,261 @@ fastcall void smp_error_interrupt(struct pt_regs *regs) | |||
1352 | 7: Illegal register address | 1195 | 7: Illegal register address |
1353 | */ | 1196 | */ |
1354 | printk (KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n", | 1197 | printk (KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n", |
1355 | smp_processor_id(), v , v1); | 1198 | smp_processor_id(), v , v1); |
1356 | irq_exit(); | 1199 | irq_exit(); |
1357 | } | 1200 | } |
1358 | 1201 | ||
1359 | /* | 1202 | /* |
1360 | * This initializes the IO-APIC and APIC hardware if this is | 1203 | * Initialize APIC interrupts |
1361 | * a UP kernel. | ||
1362 | */ | 1204 | */ |
1363 | int __init APIC_init_uniprocessor (void) | 1205 | void __init apic_intr_init(void) |
1364 | { | 1206 | { |
1365 | if (enable_local_apic < 0) | 1207 | #ifdef CONFIG_SMP |
1366 | clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); | 1208 | smp_intr_init(); |
1209 | #endif | ||
1210 | /* self generated IPI for local APIC timer */ | ||
1211 | set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); | ||
1367 | 1212 | ||
1368 | if (!smp_found_config && !cpu_has_apic) | 1213 | /* IPI vectors for APIC spurious and error interrupts */ |
1369 | return -1; | 1214 | set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); |
1215 | set_intr_gate(ERROR_APIC_VECTOR, error_interrupt); | ||
1370 | 1216 | ||
1371 | /* | 1217 | /* thermal monitor LVT interrupt */ |
1372 | * Complain if the BIOS pretends there is one. | 1218 | #ifdef CONFIG_X86_MCE_P4THERMAL |
1373 | */ | 1219 | set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); |
1374 | if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { | 1220 | #endif |
1375 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", | 1221 | } |
1376 | boot_cpu_physical_apicid); | 1222 | |
1377 | clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); | 1223 | /** |
1378 | return -1; | 1224 | * connect_bsp_APIC - attach the APIC to the interrupt system |
1225 | */ | ||
1226 | void __init connect_bsp_APIC(void) | ||
1227 | { | ||
1228 | if (pic_mode) { | ||
1229 | /* | ||
1230 | * Do not trust the local APIC being empty at bootup. | ||
1231 | */ | ||
1232 | clear_local_APIC(); | ||
1233 | /* | ||
1234 | * PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's | ||
1235 | * local APIC to INT and NMI lines. | ||
1236 | */ | ||
1237 | apic_printk(APIC_VERBOSE, "leaving PIC mode, " | ||
1238 | "enabling APIC mode.\n"); | ||
1239 | outb(0x70, 0x22); | ||
1240 | outb(0x01, 0x23); | ||
1379 | } | 1241 | } |
1242 | enable_apic_mode(); | ||
1243 | } | ||
1380 | 1244 | ||
1381 | verify_local_APIC(); | 1245 | /** |
1246 | * disconnect_bsp_APIC - detach the APIC from the interrupt system | ||
1247 | * @virt_wire_setup: indicates, whether virtual wire mode is selected | ||
1248 | * | ||
1249 | * Virtual wire mode is necessary to deliver legacy interrupts even when the | ||
1250 | * APIC is disabled. | ||
1251 | */ | ||
1252 | void disconnect_bsp_APIC(int virt_wire_setup) | ||
1253 | { | ||
1254 | if (pic_mode) { | ||
1255 | /* | ||
1256 | * Put the board back into PIC mode (has an effect only on | ||
1257 | * certain older boards). Note that APIC interrupts, including | ||
1258 | * IPIs, won't work beyond this point! The only exception are | ||
1259 | * INIT IPIs. | ||
1260 | */ | ||
1261 | apic_printk(APIC_VERBOSE, "disabling APIC mode, " | ||
1262 | "entering PIC mode.\n"); | ||
1263 | outb(0x70, 0x22); | ||
1264 | outb(0x00, 0x23); | ||
1265 | } else { | ||
1266 | /* Go back to Virtual Wire compatibility mode */ | ||
1267 | unsigned long value; | ||
1382 | 1268 | ||
1383 | connect_bsp_APIC(); | 1269 | /* For the spurious interrupt use vector F, and enable it */ |
1270 | value = apic_read(APIC_SPIV); | ||
1271 | value &= ~APIC_VECTOR_MASK; | ||
1272 | value |= APIC_SPIV_APIC_ENABLED; | ||
1273 | value |= 0xf; | ||
1274 | apic_write_around(APIC_SPIV, value); | ||
1384 | 1275 | ||
1385 | /* | 1276 | if (!virt_wire_setup) { |
1386 | * Hack: In case of kdump, after a crash, kernel might be booting | 1277 | /* |
1387 | * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid | 1278 | * For LVT0 make it edge triggered, active high, |
1388 | * might be zero if read from MP tables. Get it from LAPIC. | 1279 | * external and enabled |
1389 | */ | 1280 | */ |
1390 | #ifdef CONFIG_CRASH_DUMP | 1281 | value = apic_read(APIC_LVT0); |
1391 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); | 1282 | value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | |
1392 | #endif | 1283 | APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | |
1393 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); | 1284 | APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED ); |
1285 | value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; | ||
1286 | value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT); | ||
1287 | apic_write_around(APIC_LVT0, value); | ||
1288 | } else { | ||
1289 | /* Disable LVT0 */ | ||
1290 | apic_write_around(APIC_LVT0, APIC_LVT_MASKED); | ||
1291 | } | ||
1394 | 1292 | ||
1395 | setup_local_APIC(); | 1293 | /* |
1294 | * For LVT1 make it edge triggered, active high, nmi and | ||
1295 | * enabled | ||
1296 | */ | ||
1297 | value = apic_read(APIC_LVT1); | ||
1298 | value &= ~( | ||
1299 | APIC_MODE_MASK | APIC_SEND_PENDING | | ||
1300 | APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | | ||
1301 | APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); | ||
1302 | value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; | ||
1303 | value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI); | ||
1304 | apic_write_around(APIC_LVT1, value); | ||
1305 | } | ||
1306 | } | ||
1396 | 1307 | ||
1397 | #ifdef CONFIG_X86_IO_APIC | 1308 | /* |
1398 | if (smp_found_config) | 1309 | * Power management |
1399 | if (!skip_ioapic_setup && nr_ioapics) | 1310 | */ |
1400 | setup_IO_APIC(); | 1311 | #ifdef CONFIG_PM |
1312 | |||
1313 | static struct { | ||
1314 | int active; | ||
1315 | /* r/w apic fields */ | ||
1316 | unsigned int apic_id; | ||
1317 | unsigned int apic_taskpri; | ||
1318 | unsigned int apic_ldr; | ||
1319 | unsigned int apic_dfr; | ||
1320 | unsigned int apic_spiv; | ||
1321 | unsigned int apic_lvtt; | ||
1322 | unsigned int apic_lvtpc; | ||
1323 | unsigned int apic_lvt0; | ||
1324 | unsigned int apic_lvt1; | ||
1325 | unsigned int apic_lvterr; | ||
1326 | unsigned int apic_tmict; | ||
1327 | unsigned int apic_tdcr; | ||
1328 | unsigned int apic_thmr; | ||
1329 | } apic_pm_state; | ||
1330 | |||
1331 | static int lapic_suspend(struct sys_device *dev, pm_message_t state) | ||
1332 | { | ||
1333 | unsigned long flags; | ||
1334 | int maxlvt; | ||
1335 | |||
1336 | if (!apic_pm_state.active) | ||
1337 | return 0; | ||
1338 | |||
1339 | maxlvt = lapic_get_maxlvt(); | ||
1340 | |||
1341 | apic_pm_state.apic_id = apic_read(APIC_ID); | ||
1342 | apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); | ||
1343 | apic_pm_state.apic_ldr = apic_read(APIC_LDR); | ||
1344 | apic_pm_state.apic_dfr = apic_read(APIC_DFR); | ||
1345 | apic_pm_state.apic_spiv = apic_read(APIC_SPIV); | ||
1346 | apic_pm_state.apic_lvtt = apic_read(APIC_LVTT); | ||
1347 | if (maxlvt >= 4) | ||
1348 | apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC); | ||
1349 | apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0); | ||
1350 | apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1); | ||
1351 | apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); | ||
1352 | apic_pm_state.apic_tmict = apic_read(APIC_TMICT); | ||
1353 | apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); | ||
1354 | #ifdef CONFIG_X86_MCE_P4THERMAL | ||
1355 | if (maxlvt >= 5) | ||
1356 | apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); | ||
1401 | #endif | 1357 | #endif |
1402 | setup_boot_clock(); | ||
1403 | 1358 | ||
1359 | local_irq_save(flags); | ||
1360 | disable_local_APIC(); | ||
1361 | local_irq_restore(flags); | ||
1404 | return 0; | 1362 | return 0; |
1405 | } | 1363 | } |
1406 | 1364 | ||
1407 | static int __init parse_lapic(char *arg) | 1365 | static int lapic_resume(struct sys_device *dev) |
1408 | { | 1366 | { |
1409 | lapic_enable(); | 1367 | unsigned int l, h; |
1368 | unsigned long flags; | ||
1369 | int maxlvt; | ||
1370 | |||
1371 | if (!apic_pm_state.active) | ||
1372 | return 0; | ||
1373 | |||
1374 | maxlvt = lapic_get_maxlvt(); | ||
1375 | |||
1376 | local_irq_save(flags); | ||
1377 | |||
1378 | /* | ||
1379 | * Make sure the APICBASE points to the right address | ||
1380 | * | ||
1381 | * FIXME! This will be wrong if we ever support suspend on | ||
1382 | * SMP! We'll need to do this as part of the CPU restore! | ||
1383 | */ | ||
1384 | rdmsr(MSR_IA32_APICBASE, l, h); | ||
1385 | l &= ~MSR_IA32_APICBASE_BASE; | ||
1386 | l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; | ||
1387 | wrmsr(MSR_IA32_APICBASE, l, h); | ||
1388 | |||
1389 | apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED); | ||
1390 | apic_write(APIC_ID, apic_pm_state.apic_id); | ||
1391 | apic_write(APIC_DFR, apic_pm_state.apic_dfr); | ||
1392 | apic_write(APIC_LDR, apic_pm_state.apic_ldr); | ||
1393 | apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri); | ||
1394 | apic_write(APIC_SPIV, apic_pm_state.apic_spiv); | ||
1395 | apic_write(APIC_LVT0, apic_pm_state.apic_lvt0); | ||
1396 | apic_write(APIC_LVT1, apic_pm_state.apic_lvt1); | ||
1397 | #ifdef CONFIG_X86_MCE_P4THERMAL | ||
1398 | if (maxlvt >= 5) | ||
1399 | apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr); | ||
1400 | #endif | ||
1401 | if (maxlvt >= 4) | ||
1402 | apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc); | ||
1403 | apic_write(APIC_LVTT, apic_pm_state.apic_lvtt); | ||
1404 | apic_write(APIC_TDCR, apic_pm_state.apic_tdcr); | ||
1405 | apic_write(APIC_TMICT, apic_pm_state.apic_tmict); | ||
1406 | apic_write(APIC_ESR, 0); | ||
1407 | apic_read(APIC_ESR); | ||
1408 | apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr); | ||
1409 | apic_write(APIC_ESR, 0); | ||
1410 | apic_read(APIC_ESR); | ||
1411 | local_irq_restore(flags); | ||
1410 | return 0; | 1412 | return 0; |
1411 | } | 1413 | } |
1412 | early_param("lapic", parse_lapic); | ||
1413 | 1414 | ||
1414 | static int __init parse_nolapic(char *arg) | 1415 | /* |
1416 | * This device has no shutdown method - fully functioning local APICs | ||
1417 | * are needed on every CPU up until machine_halt/restart/poweroff. | ||
1418 | */ | ||
1419 | |||
1420 | static struct sysdev_class lapic_sysclass = { | ||
1421 | set_kset_name("lapic"), | ||
1422 | .resume = lapic_resume, | ||
1423 | .suspend = lapic_suspend, | ||
1424 | }; | ||
1425 | |||
1426 | static struct sys_device device_lapic = { | ||
1427 | .id = 0, | ||
1428 | .cls = &lapic_sysclass, | ||
1429 | }; | ||
1430 | |||
1431 | static void __devinit apic_pm_activate(void) | ||
1415 | { | 1432 | { |
1416 | lapic_disable(); | 1433 | apic_pm_state.active = 1; |
1417 | return 0; | ||
1418 | } | 1434 | } |
1419 | early_param("nolapic", parse_nolapic); | ||
1420 | 1435 | ||
1436 | static int __init init_lapic_sysfs(void) | ||
1437 | { | ||
1438 | int error; | ||
1439 | |||
1440 | if (!cpu_has_apic) | ||
1441 | return 0; | ||
1442 | /* XXX: remove suspend/resume procs if !apic_pm_state.active? */ | ||
1443 | |||
1444 | error = sysdev_class_register(&lapic_sysclass); | ||
1445 | if (!error) | ||
1446 | error = sysdev_register(&device_lapic); | ||
1447 | return error; | ||
1448 | } | ||
1449 | device_initcall(init_lapic_sysfs); | ||
1450 | |||
1451 | #else /* CONFIG_PM */ | ||
1452 | |||
1453 | static void apic_pm_activate(void) { } | ||
1454 | |||
1455 | #endif /* CONFIG_PM */ | ||
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index c76be1110922..f003a4ce0a93 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c | |||
@@ -1588,7 +1588,7 @@ void /*__init*/ print_local_APIC(void * dummy) | |||
1588 | v = apic_read(APIC_LVR); | 1588 | v = apic_read(APIC_LVR); |
1589 | printk(KERN_INFO "... APIC VERSION: %08x\n", v); | 1589 | printk(KERN_INFO "... APIC VERSION: %08x\n", v); |
1590 | ver = GET_APIC_VERSION(v); | 1590 | ver = GET_APIC_VERSION(v); |
1591 | maxlvt = get_maxlvt(); | 1591 | maxlvt = lapic_get_maxlvt(); |
1592 | 1592 | ||
1593 | v = apic_read(APIC_TASKPRI); | 1593 | v = apic_read(APIC_TASKPRI); |
1594 | printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); | 1594 | printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); |
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c index 5785d84103a6..0f2ca590bf23 100644 --- a/arch/i386/kernel/irq.c +++ b/arch/i386/kernel/irq.c | |||
@@ -10,7 +10,6 @@ | |||
10 | * io_apic.c.) | 10 | * io_apic.c.) |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <asm/uaccess.h> | ||
14 | #include <linux/module.h> | 13 | #include <linux/module.h> |
15 | #include <linux/seq_file.h> | 14 | #include <linux/seq_file.h> |
16 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
@@ -21,19 +20,34 @@ | |||
21 | 20 | ||
22 | #include <asm/idle.h> | 21 | #include <asm/idle.h> |
23 | 22 | ||
23 | #include <asm/apic.h> | ||
24 | #include <asm/uaccess.h> | ||
25 | |||
24 | DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; | 26 | DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; |
25 | EXPORT_PER_CPU_SYMBOL(irq_stat); | 27 | EXPORT_PER_CPU_SYMBOL(irq_stat); |
26 | 28 | ||
27 | #ifndef CONFIG_X86_LOCAL_APIC | ||
28 | /* | 29 | /* |
29 | * 'what should we do if we get a hw irq event on an illegal vector'. | 30 | * 'what should we do if we get a hw irq event on an illegal vector'. |
30 | * each architecture has to answer this themselves. | 31 | * each architecture has to answer this themselves. |
31 | */ | 32 | */ |
32 | void ack_bad_irq(unsigned int irq) | 33 | void ack_bad_irq(unsigned int irq) |
33 | { | 34 | { |
34 | printk("unexpected IRQ trap at vector %02x\n", irq); | 35 | printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq); |
35 | } | 36 | |
37 | #ifdef CONFIG_X86_LOCAL_APIC | ||
38 | /* | ||
39 | * Currently unexpected vectors happen only on SMP and APIC. | ||
40 | * We _must_ ack these because every local APIC has only N | ||
41 | * irq slots per priority level, and a 'hanging, unacked' IRQ | ||
42 | * holds up an irq slot - in excessive cases (when multiple | ||
43 | * unexpected vectors occur) that might lock up the APIC | ||
44 | * completely. | ||
45 | * But only ack when the APIC is enabled -AK | ||
46 | */ | ||
47 | if (cpu_has_apic) | ||
48 | ack_APIC_irq(); | ||
36 | #endif | 49 | #endif |
50 | } | ||
37 | 51 | ||
38 | #ifdef CONFIG_4KSTACKS | 52 | #ifdef CONFIG_4KSTACKS |
39 | /* | 53 | /* |
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 6ddffe8aabb2..6cdd941fc2f2 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
@@ -600,7 +600,7 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) | |||
600 | /* | 600 | /* |
601 | * Due to the Pentium erratum 3AP. | 601 | * Due to the Pentium erratum 3AP. |
602 | */ | 602 | */ |
603 | maxlvt = get_maxlvt(); | 603 | maxlvt = lapic_get_maxlvt(); |
604 | if (maxlvt > 3) { | 604 | if (maxlvt > 3) { |
605 | apic_read_around(APIC_SPIV); | 605 | apic_read_around(APIC_SPIV); |
606 | apic_write(APIC_ESR, 0); | 606 | apic_write(APIC_ESR, 0); |
@@ -697,7 +697,7 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | |||
697 | */ | 697 | */ |
698 | Dprintk("#startup loops: %d.\n", num_starts); | 698 | Dprintk("#startup loops: %d.\n", num_starts); |
699 | 699 | ||
700 | maxlvt = get_maxlvt(); | 700 | maxlvt = lapic_get_maxlvt(); |
701 | 701 | ||
702 | for (j = 1; j <= num_starts; j++) { | 702 | for (j = 1; j <= num_starts; j++) { |
703 | Dprintk("Sending STARTUP #%d.\n",j); | 703 | Dprintk("Sending STARTUP #%d.\n",j); |