aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/apic_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/apic_32.c')
-rw-r--r--arch/x86/kernel/apic_32.c445
1 files changed, 257 insertions, 188 deletions
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index 0ff576d026a4..21c831d96af3 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -60,10 +60,8 @@ unsigned long mp_lapic_addr;
60static int force_enable_local_apic; 60static int force_enable_local_apic;
61int disable_apic; 61int disable_apic;
62 62
63/* Local APIC timer verification ok */
64static int local_apic_timer_verify_ok;
65/* Disable local APIC timer from the kernel commandline or via dmi quirk */ 63/* Disable local APIC timer from the kernel commandline or via dmi quirk */
66static int local_apic_timer_disabled; 64static int disable_apic_timer __cpuinitdata;
67/* Local APIC timer works in C2 */ 65/* Local APIC timer works in C2 */
68int local_apic_timer_c2_ok; 66int local_apic_timer_c2_ok;
69EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); 67EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
@@ -130,7 +128,11 @@ static inline int lapic_get_version(void)
130 */ 128 */
131static inline int lapic_is_integrated(void) 129static inline int lapic_is_integrated(void)
132{ 130{
131#ifdef CONFIG_X86_64
132 return 1;
133#else
133 return APIC_INTEGRATED(lapic_get_version()); 134 return APIC_INTEGRATED(lapic_get_version());
135#endif
134} 136}
135 137
136/* 138/*
@@ -145,13 +147,18 @@ static int modern_apic(void)
145 return lapic_get_version() >= 0x14; 147 return lapic_get_version() >= 0x14;
146} 148}
147 149
148void apic_wait_icr_idle(void) 150/*
151 * Paravirt kernels also might be using these below ops. So we still
152 * use generic apic_read()/apic_write(), which might be pointing to different
153 * ops in PARAVIRT case.
154 */
155void xapic_wait_icr_idle(void)
149{ 156{
150 while (apic_read(APIC_ICR) & APIC_ICR_BUSY) 157 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
151 cpu_relax(); 158 cpu_relax();
152} 159}
153 160
154u32 safe_apic_wait_icr_idle(void) 161u32 safe_xapic_wait_icr_idle(void)
155{ 162{
156 u32 send_status; 163 u32 send_status;
157 int timeout; 164 int timeout;
@@ -167,16 +174,48 @@ u32 safe_apic_wait_icr_idle(void)
167 return send_status; 174 return send_status;
168} 175}
169 176
177void xapic_icr_write(u32 low, u32 id)
178{
179 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
180 apic_write(APIC_ICR, low);
181}
182
183u64 xapic_icr_read(void)
184{
185 u32 icr1, icr2;
186
187 icr2 = apic_read(APIC_ICR2);
188 icr1 = apic_read(APIC_ICR);
189
190 return icr1 | ((u64)icr2 << 32);
191}
192
193static struct apic_ops xapic_ops = {
194 .read = native_apic_mem_read,
195 .write = native_apic_mem_write,
196 .icr_read = xapic_icr_read,
197 .icr_write = xapic_icr_write,
198 .wait_icr_idle = xapic_wait_icr_idle,
199 .safe_wait_icr_idle = safe_xapic_wait_icr_idle,
200};
201
202struct apic_ops __read_mostly *apic_ops = &xapic_ops;
203EXPORT_SYMBOL_GPL(apic_ops);
204
170/** 205/**
171 * enable_NMI_through_LVT0 - enable NMI through local vector table 0 206 * enable_NMI_through_LVT0 - enable NMI through local vector table 0
172 */ 207 */
173void __cpuinit enable_NMI_through_LVT0(void) 208void __cpuinit enable_NMI_through_LVT0(void)
174{ 209{
175 unsigned int v = APIC_DM_NMI; 210 unsigned int v;
211
212 /* unmask and set to NMI */
213 v = APIC_DM_NMI;
176 214
177 /* Level triggered for 82489DX */ 215 /* Level triggered for 82489DX (32bit mode) */
178 if (!lapic_is_integrated()) 216 if (!lapic_is_integrated())
179 v |= APIC_LVT_LEVEL_TRIGGER; 217 v |= APIC_LVT_LEVEL_TRIGGER;
218
180 apic_write(APIC_LVT0, v); 219 apic_write(APIC_LVT0, v);
181} 220}
182 221
@@ -193,9 +232,13 @@ int get_physical_broadcast(void)
193 */ 232 */
194int lapic_get_maxlvt(void) 233int lapic_get_maxlvt(void)
195{ 234{
196 unsigned int v = apic_read(APIC_LVR); 235 unsigned int v;
197 236
198 /* 82489DXs do not report # of LVT entries. */ 237 v = apic_read(APIC_LVR);
238 /*
239 * - we always have APIC integrated on 64bit mode
240 * - 82489DXs do not report # of LVT entries
241 */
199 return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2; 242 return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
200} 243}
201 244
@@ -203,8 +246,12 @@ int lapic_get_maxlvt(void)
203 * Local APIC timer 246 * Local APIC timer
204 */ 247 */
205 248
206/* Clock divisor is set to 16 */ 249/* Clock divisor */
250#ifdef CONFG_X86_64
251#define APIC_DIVISOR 1
252#else
207#define APIC_DIVISOR 16 253#define APIC_DIVISOR 16
254#endif
208 255
209/* 256/*
210 * This function sets up the local APIC timer, with a timeout of 257 * This function sets up the local APIC timer, with a timeout of
@@ -212,6 +259,9 @@ int lapic_get_maxlvt(void)
212 * this function twice on the boot CPU, once with a bogus timeout 259 * this function twice on the boot CPU, once with a bogus timeout
213 * value, second time for real. The other (noncalibrating) CPUs 260 * value, second time for real. The other (noncalibrating) CPUs
214 * call this function only once, with the real, calibrated value. 261 * call this function only once, with the real, calibrated value.
262 *
263 * We do reads before writes even if unnecessary, to get around the
264 * P5 APIC double write bug.
215 */ 265 */
216static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) 266static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
217{ 267{
@@ -233,14 +283,48 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
233 */ 283 */
234 tmp_value = apic_read(APIC_TDCR); 284 tmp_value = apic_read(APIC_TDCR);
235 apic_write(APIC_TDCR, 285 apic_write(APIC_TDCR,
236 (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) | 286 (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
237 APIC_TDR_DIV_16); 287 APIC_TDR_DIV_16);
238 288
239 if (!oneshot) 289 if (!oneshot)
240 apic_write(APIC_TMICT, clocks / APIC_DIVISOR); 290 apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
241} 291}
242 292
243/* 293/*
294 * Setup extended LVT, AMD specific (K8, family 10h)
295 *
296 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
297 * MCE interrupts are supported. Thus MCE offset must be set to 0.
298 *
299 * If mask=1, the LVT entry does not generate interrupts while mask=0
300 * enables the vector. See also the BKDGs.
301 */
302
303#define APIC_EILVT_LVTOFF_MCE 0
304#define APIC_EILVT_LVTOFF_IBS 1
305
306static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
307{
308 unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
309 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
310
311 apic_write(reg, v);
312}
313
314u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
315{
316 setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
317 return APIC_EILVT_LVTOFF_MCE;
318}
319
320u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
321{
322 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
323 return APIC_EILVT_LVTOFF_IBS;
324}
325EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs);
326
327/*
244 * Program the next event, relative to now 328 * Program the next event, relative to now
245 */ 329 */
246static int lapic_next_event(unsigned long delta, 330static int lapic_next_event(unsigned long delta,
@@ -259,8 +343,8 @@ static void lapic_timer_setup(enum clock_event_mode mode,
259 unsigned long flags; 343 unsigned long flags;
260 unsigned int v; 344 unsigned int v;
261 345
262 /* Lapic used for broadcast ? */ 346 /* Lapic used as dummy for broadcast ? */
263 if (!local_apic_timer_verify_ok) 347 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
264 return; 348 return;
265 349
266 local_irq_save(flags); 350 local_irq_save(flags);
@@ -473,7 +557,7 @@ static int __init calibrate_APIC_clock(void)
473 return -1; 557 return -1;
474 } 558 }
475 559
476 local_apic_timer_verify_ok = 1; 560 levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
477 561
478 /* We trust the pm timer based calibration */ 562 /* We trust the pm timer based calibration */
479 if (!pm_referenced) { 563 if (!pm_referenced) {
@@ -507,11 +591,11 @@ static int __init calibrate_APIC_clock(void)
507 if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2) 591 if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
508 apic_printk(APIC_VERBOSE, "... jiffies result ok\n"); 592 apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
509 else 593 else
510 local_apic_timer_verify_ok = 0; 594 levt->features |= CLOCK_EVT_FEAT_DUMMY;
511 } else 595 } else
512 local_irq_enable(); 596 local_irq_enable();
513 597
514 if (!local_apic_timer_verify_ok) { 598 if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
515 printk(KERN_WARNING 599 printk(KERN_WARNING
516 "APIC timer disabled due to verification failure.\n"); 600 "APIC timer disabled due to verification failure.\n");
517 return -1; 601 return -1;
@@ -533,7 +617,8 @@ void __init setup_boot_APIC_clock(void)
533 * timer as a dummy clock event source on SMP systems, so the 617 * timer as a dummy clock event source on SMP systems, so the
534 * broadcast mechanism is used. On UP systems simply ignore it. 618 * broadcast mechanism is used. On UP systems simply ignore it.
535 */ 619 */
536 if (local_apic_timer_disabled) { 620 if (disable_apic_timer) {
621 printk(KERN_INFO "Disabling APIC timer\n");
537 /* No broadcast on UP ! */ 622 /* No broadcast on UP ! */
538 if (num_possible_cpus() > 1) { 623 if (num_possible_cpus() > 1) {
539 lapic_clockevent.mult = 1; 624 lapic_clockevent.mult = 1;
@@ -602,7 +687,11 @@ static void local_apic_timer_interrupt(void)
602 /* 687 /*
603 * the NMI deadlock-detector uses this. 688 * the NMI deadlock-detector uses this.
604 */ 689 */
690#ifdef CONFIG_X86_64
691 add_pda(apic_timer_irqs, 1);
692#else
605 per_cpu(irq_stat, cpu).apic_timer_irqs++; 693 per_cpu(irq_stat, cpu).apic_timer_irqs++;
694#endif
606 695
607 evt->event_handler(evt); 696 evt->event_handler(evt);
608} 697}
@@ -642,39 +731,6 @@ int setup_profiling_timer(unsigned int multiplier)
642} 731}
643 732
644/* 733/*
645 * Setup extended LVT, AMD specific (K8, family 10h)
646 *
647 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
648 * MCE interrupts are supported. Thus MCE offset must be set to 0.
649 *
650 * If mask=1, the LVT entry does not generate interrupts while mask=0
651 * enables the vector. See also the BKDGs.
652 */
653
654#define APIC_EILVT_LVTOFF_MCE 0
655#define APIC_EILVT_LVTOFF_IBS 1
656
657static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
658{
659 unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
660 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
661 apic_write(reg, v);
662}
663
664u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
665{
666 setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
667 return APIC_EILVT_LVTOFF_MCE;
668}
669
670u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
671{
672 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
673 return APIC_EILVT_LVTOFF_IBS;
674}
675EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs);
676
677/*
678 * Local APIC start and shutdown 734 * Local APIC start and shutdown
679 */ 735 */
680 736
@@ -719,7 +775,7 @@ void clear_local_APIC(void)
719 } 775 }
720 776
721 /* lets not touch this if we didn't frob it */ 777 /* lets not touch this if we didn't frob it */
722#ifdef CONFIG_X86_MCE_P4THERMAL 778#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(X86_MCE_INTEL)
723 if (maxlvt >= 5) { 779 if (maxlvt >= 5) {
724 v = apic_read(APIC_LVTTHMR); 780 v = apic_read(APIC_LVTTHMR);
725 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); 781 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
@@ -736,10 +792,6 @@ void clear_local_APIC(void)
736 if (maxlvt >= 4) 792 if (maxlvt >= 4)
737 apic_write(APIC_LVTPC, APIC_LVT_MASKED); 793 apic_write(APIC_LVTPC, APIC_LVT_MASKED);
738 794
739#ifdef CONFIG_X86_MCE_P4THERMAL
740 if (maxlvt >= 5)
741 apic_write(APIC_LVTTHMR, APIC_LVT_MASKED);
742#endif
743 /* Integrated APIC (!82489DX) ? */ 795 /* Integrated APIC (!82489DX) ? */
744 if (lapic_is_integrated()) { 796 if (lapic_is_integrated()) {
745 if (maxlvt > 3) 797 if (maxlvt > 3)
@@ -754,7 +806,7 @@ void clear_local_APIC(void)
754 */ 806 */
755void disable_local_APIC(void) 807void disable_local_APIC(void)
756{ 808{
757 unsigned long value; 809 unsigned int value;
758 810
759 clear_local_APIC(); 811 clear_local_APIC();
760 812
@@ -766,6 +818,7 @@ void disable_local_APIC(void)
766 value &= ~APIC_SPIV_APIC_ENABLED; 818 value &= ~APIC_SPIV_APIC_ENABLED;
767 apic_write(APIC_SPIV, value); 819 apic_write(APIC_SPIV, value);
768 820
821#ifdef CONFIG_X86_32
769 /* 822 /*
770 * When LAPIC was disabled by the BIOS and enabled by the kernel, 823 * When LAPIC was disabled by the BIOS and enabled by the kernel,
771 * restore the disabled state. 824 * restore the disabled state.
@@ -777,6 +830,7 @@ void disable_local_APIC(void)
777 l &= ~MSR_IA32_APICBASE_ENABLE; 830 l &= ~MSR_IA32_APICBASE_ENABLE;
778 wrmsr(MSR_IA32_APICBASE, l, h); 831 wrmsr(MSR_IA32_APICBASE, l, h);
779 } 832 }
833#endif
780} 834}
781 835
782/* 836/*
@@ -793,11 +847,15 @@ void lapic_shutdown(void)
793 return; 847 return;
794 848
795 local_irq_save(flags); 849 local_irq_save(flags);
796 clear_local_APIC();
797 850
798 if (enabled_via_apicbase) 851#ifdef CONFIG_X86_32
852 if (!enabled_via_apicbase)
853 clear_local_APIC();
854 else
855#endif
799 disable_local_APIC(); 856 disable_local_APIC();
800 857
858
801 local_irq_restore(flags); 859 local_irq_restore(flags);
802} 860}
803 861
@@ -842,6 +900,12 @@ int __init verify_local_APIC(void)
842 */ 900 */
843 reg0 = apic_read(APIC_ID); 901 reg0 = apic_read(APIC_ID);
844 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); 902 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
903 apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
904 reg1 = apic_read(APIC_ID);
905 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
906 apic_write(APIC_ID, reg0);
907 if (reg1 != (reg0 ^ APIC_ID_MASK))
908 return 0;
845 909
846 /* 910 /*
847 * The next two are just to see if we have sane values. 911 * The next two are just to see if we have sane values.
@@ -867,14 +931,15 @@ void __init sync_Arb_IDs(void)
867 */ 931 */
868 if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 932 if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
869 return; 933 return;
934
870 /* 935 /*
871 * Wait for idle. 936 * Wait for idle.
872 */ 937 */
873 apic_wait_icr_idle(); 938 apic_wait_icr_idle();
874 939
875 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n"); 940 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
876 apic_write(APIC_ICR, 941 apic_write(APIC_ICR, APIC_DEST_ALLINC |
877 APIC_DEST_ALLINC | APIC_INT_LEVELTRIG | APIC_DM_INIT); 942 APIC_INT_LEVELTRIG | APIC_DM_INIT);
878} 943}
879 944
880/* 945/*
@@ -882,7 +947,7 @@ void __init sync_Arb_IDs(void)
882 */ 947 */
883void __init init_bsp_APIC(void) 948void __init init_bsp_APIC(void)
884{ 949{
885 unsigned long value; 950 unsigned int value;
886 951
887 /* 952 /*
888 * Don't do the setup now if we have a SMP BIOS as the 953 * Don't do the setup now if we have a SMP BIOS as the
@@ -903,11 +968,13 @@ void __init init_bsp_APIC(void)
903 value &= ~APIC_VECTOR_MASK; 968 value &= ~APIC_VECTOR_MASK;
904 value |= APIC_SPIV_APIC_ENABLED; 969 value |= APIC_SPIV_APIC_ENABLED;
905 970
971#ifdef CONFIG_X86_32
906 /* This bit is reserved on P4/Xeon and should be cleared */ 972 /* This bit is reserved on P4/Xeon and should be cleared */
907 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 973 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
908 (boot_cpu_data.x86 == 15)) 974 (boot_cpu_data.x86 == 15))
909 value &= ~APIC_SPIV_FOCUS_DISABLED; 975 value &= ~APIC_SPIV_FOCUS_DISABLED;
910 else 976 else
977#endif
911 value |= APIC_SPIV_FOCUS_DISABLED; 978 value |= APIC_SPIV_FOCUS_DISABLED;
912 value |= SPURIOUS_APIC_VECTOR; 979 value |= SPURIOUS_APIC_VECTOR;
913 apic_write(APIC_SPIV, value); 980 apic_write(APIC_SPIV, value);
@@ -926,6 +993,16 @@ static void __cpuinit lapic_setup_esr(void)
926{ 993{
927 unsigned long oldvalue, value, maxlvt; 994 unsigned long oldvalue, value, maxlvt;
928 if (lapic_is_integrated() && !esr_disable) { 995 if (lapic_is_integrated() && !esr_disable) {
996 if (esr_disable) {
997 /*
998 * Something untraceable is creating bad interrupts on
999 * secondary quads ... for the moment, just leave the
1000 * ESR disabled - we can't do anything useful with the
1001 * errors anyway - mbligh
1002 */
1003 printk(KERN_INFO "Leaving ESR disabled.\n");
1004 return;
1005 }
929 /* !82489DX */ 1006 /* !82489DX */
930 maxlvt = lapic_get_maxlvt(); 1007 maxlvt = lapic_get_maxlvt();
931 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 1008 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
@@ -946,16 +1023,7 @@ static void __cpuinit lapic_setup_esr(void)
946 "vector: 0x%08lx after: 0x%08lx\n", 1023 "vector: 0x%08lx after: 0x%08lx\n",
947 oldvalue, value); 1024 oldvalue, value);
948 } else { 1025 } else {
949 if (esr_disable) 1026 printk(KERN_INFO "No ESR for 82489DX.\n");
950 /*
951 * Something untraceable is creating bad interrupts on
952 * secondary quads ... for the moment, just leave the
953 * ESR disabled - we can't do anything useful with the
954 * errors anyway - mbligh
955 */
956 printk(KERN_INFO "Leaving ESR disabled.\n");
957 else
958 printk(KERN_INFO "No ESR for 82489DX.\n");
959 } 1027 }
960} 1028}
961 1029
@@ -1093,13 +1161,17 @@ void __cpuinit setup_local_APIC(void)
1093 1161
1094void __cpuinit end_local_APIC_setup(void) 1162void __cpuinit end_local_APIC_setup(void)
1095{ 1163{
1096 unsigned long value;
1097
1098 lapic_setup_esr(); 1164 lapic_setup_esr();
1099 /* Disable the local apic timer */ 1165
1100 value = apic_read(APIC_LVTT); 1166#ifdef CONFIG_X86_32
1101 value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); 1167 {
1102 apic_write(APIC_LVTT, value); 1168 unsigned int value;
1169 /* Disable the local apic timer */
1170 value = apic_read(APIC_LVTT);
1171 value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1172 apic_write(APIC_LVTT, value);
1173 }
1174#endif
1103 1175
1104 setup_apic_nmi_watchdog(NULL); 1176 setup_apic_nmi_watchdog(NULL);
1105 apic_pm_activate(); 1177 apic_pm_activate();
@@ -1209,7 +1281,7 @@ void __init init_apic_mappings(void)
1209 * default configuration (or the MP table is broken). 1281 * default configuration (or the MP table is broken).
1210 */ 1282 */
1211 if (boot_cpu_physical_apicid == -1U) 1283 if (boot_cpu_physical_apicid == -1U)
1212 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); 1284 boot_cpu_physical_apicid = read_apic_id();
1213 1285
1214} 1286}
1215 1287
@@ -1246,7 +1318,7 @@ int __init APIC_init_uniprocessor(void)
1246 * might be zero if read from MP tables. Get it from LAPIC. 1318 * might be zero if read from MP tables. Get it from LAPIC.
1247 */ 1319 */
1248#ifdef CONFIG_CRASH_DUMP 1320#ifdef CONFIG_CRASH_DUMP
1249 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); 1321 boot_cpu_physical_apicid = read_apic_id();
1250#endif 1322#endif
1251 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); 1323 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1252 1324
@@ -1325,59 +1397,12 @@ void smp_error_interrupt(struct pt_regs *regs)
1325 irq_exit(); 1397 irq_exit();
1326} 1398}
1327 1399
1328#ifdef CONFIG_SMP
1329void __init smp_intr_init(void)
1330{
1331 /*
1332 * IRQ0 must be given a fixed assignment and initialized,
1333 * because it's used before the IO-APIC is set up.
1334 */
1335 set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
1336
1337 /*
1338 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
1339 * IPI, driven by wakeup.
1340 */
1341 alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
1342
1343 /* IPI for invalidation */
1344 alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
1345
1346 /* IPI for generic function call */
1347 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
1348
1349 /* IPI for single call function */
1350 set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
1351 call_function_single_interrupt);
1352}
1353#endif
1354
1355/*
1356 * Initialize APIC interrupts
1357 */
1358void __init apic_intr_init(void)
1359{
1360#ifdef CONFIG_SMP
1361 smp_intr_init();
1362#endif
1363 /* self generated IPI for local APIC timer */
1364 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
1365
1366 /* IPI vectors for APIC spurious and error interrupts */
1367 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
1368 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
1369
1370 /* thermal monitor LVT interrupt */
1371#ifdef CONFIG_X86_MCE_P4THERMAL
1372 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
1373#endif
1374}
1375
1376/** 1400/**
1377 * connect_bsp_APIC - attach the APIC to the interrupt system 1401 * connect_bsp_APIC - attach the APIC to the interrupt system
1378 */ 1402 */
1379void __init connect_bsp_APIC(void) 1403void __init connect_bsp_APIC(void)
1380{ 1404{
1405#ifdef CONFIG_X86_32
1381 if (pic_mode) { 1406 if (pic_mode) {
1382 /* 1407 /*
1383 * Do not trust the local APIC being empty at bootup. 1408 * Do not trust the local APIC being empty at bootup.
@@ -1392,6 +1417,7 @@ void __init connect_bsp_APIC(void)
1392 outb(0x70, 0x22); 1417 outb(0x70, 0x22);
1393 outb(0x01, 0x23); 1418 outb(0x01, 0x23);
1394 } 1419 }
1420#endif
1395 enable_apic_mode(); 1421 enable_apic_mode();
1396} 1422}
1397 1423
@@ -1404,6 +1430,9 @@ void __init connect_bsp_APIC(void)
1404 */ 1430 */
1405void disconnect_bsp_APIC(int virt_wire_setup) 1431void disconnect_bsp_APIC(int virt_wire_setup)
1406{ 1432{
1433 unsigned int value;
1434
1435#ifdef CONFIG_X86_32
1407 if (pic_mode) { 1436 if (pic_mode) {
1408 /* 1437 /*
1409 * Put the board back into PIC mode (has an effect only on 1438 * Put the board back into PIC mode (has an effect only on
@@ -1415,54 +1444,53 @@ void disconnect_bsp_APIC(int virt_wire_setup)
1415 "entering PIC mode.\n"); 1444 "entering PIC mode.\n");
1416 outb(0x70, 0x22); 1445 outb(0x70, 0x22);
1417 outb(0x00, 0x23); 1446 outb(0x00, 0x23);
1418 } else { 1447 return;
1419 /* Go back to Virtual Wire compatibility mode */ 1448 }
1420 unsigned long value; 1449#endif
1421 1450
1422 /* For the spurious interrupt use vector F, and enable it */ 1451 /* Go back to Virtual Wire compatibility mode */
1423 value = apic_read(APIC_SPIV);
1424 value &= ~APIC_VECTOR_MASK;
1425 value |= APIC_SPIV_APIC_ENABLED;
1426 value |= 0xf;
1427 apic_write(APIC_SPIV, value);
1428 1452
1429 if (!virt_wire_setup) { 1453 /* For the spurious interrupt use vector F, and enable it */
1430 /* 1454 value = apic_read(APIC_SPIV);
1431 * For LVT0 make it edge triggered, active high, 1455 value &= ~APIC_VECTOR_MASK;
1432 * external and enabled 1456 value |= APIC_SPIV_APIC_ENABLED;
1433 */ 1457 value |= 0xf;
1434 value = apic_read(APIC_LVT0); 1458 apic_write(APIC_SPIV, value);
1435 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1436 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1437 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1438 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1439 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
1440 apic_write(APIC_LVT0, value);
1441 } else {
1442 /* Disable LVT0 */
1443 apic_write(APIC_LVT0, APIC_LVT_MASKED);
1444 }
1445 1459
1460 if (!virt_wire_setup) {
1446 /* 1461 /*
1447 * For LVT1 make it edge triggered, active high, nmi and 1462 * For LVT0 make it edge triggered, active high,
1448 * enabled 1463 * external and enabled
1449 */ 1464 */
1450 value = apic_read(APIC_LVT1); 1465 value = apic_read(APIC_LVT0);
1451 value &= ~( 1466 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1452 APIC_MODE_MASK | APIC_SEND_PENDING |
1453 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | 1467 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1454 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); 1468 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1455 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; 1469 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1456 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI); 1470 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
1457 apic_write(APIC_LVT1, value); 1471 apic_write(APIC_LVT0, value);
1472 } else {
1473 /* Disable LVT0 */
1474 apic_write(APIC_LVT0, APIC_LVT_MASKED);
1458 } 1475 }
1476
1477 /*
1478 * For LVT1 make it edge triggered, active high,
1479 * nmi and enabled
1480 */
1481 value = apic_read(APIC_LVT1);
1482 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1483 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1484 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1485 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1486 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
1487 apic_write(APIC_LVT1, value);
1459} 1488}
1460 1489
1461void __cpuinit generic_processor_info(int apicid, int version) 1490void __cpuinit generic_processor_info(int apicid, int version)
1462{ 1491{
1463 int cpu; 1492 int cpu;
1464 cpumask_t tmp_map; 1493 cpumask_t tmp_map;
1465 physid_mask_t phys_cpu;
1466 1494
1467 /* 1495 /*
1468 * Validate version 1496 * Validate version
@@ -1475,9 +1503,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
1475 } 1503 }
1476 apic_version[apicid] = version; 1504 apic_version[apicid] = version;
1477 1505
1478 phys_cpu = apicid_to_cpu_present(apicid);
1479 physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
1480
1481 if (num_processors >= NR_CPUS) { 1506 if (num_processors >= NR_CPUS) {
1482 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." 1507 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
1483 " Processor ignored.\n", NR_CPUS); 1508 " Processor ignored.\n", NR_CPUS);
@@ -1488,17 +1513,19 @@ void __cpuinit generic_processor_info(int apicid, int version)
1488 cpus_complement(tmp_map, cpu_present_map); 1513 cpus_complement(tmp_map, cpu_present_map);
1489 cpu = first_cpu(tmp_map); 1514 cpu = first_cpu(tmp_map);
1490 1515
1491 if (apicid == boot_cpu_physical_apicid) 1516 physid_set(apicid, phys_cpu_present_map);
1517 if (apicid == boot_cpu_physical_apicid) {
1492 /* 1518 /*
1493 * x86_bios_cpu_apicid is required to have processors listed 1519 * x86_bios_cpu_apicid is required to have processors listed
1494 * in same order as logical cpu numbers. Hence the first 1520 * in same order as logical cpu numbers. Hence the first
1495 * entry is BSP, and so on. 1521 * entry is BSP, and so on.
1496 */ 1522 */
1497 cpu = 0; 1523 cpu = 0;
1498 1524 }
1499 if (apicid > max_physical_apicid) 1525 if (apicid > max_physical_apicid)
1500 max_physical_apicid = apicid; 1526 max_physical_apicid = apicid;
1501 1527
1528#ifdef CONFIG_X86_32
1502 /* 1529 /*
1503 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y 1530 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
1504 * but we need to work other dependencies like SMP_SUSPEND etc 1531 * but we need to work other dependencies like SMP_SUSPEND etc
@@ -1518,7 +1545,9 @@ void __cpuinit generic_processor_info(int apicid, int version)
1518 def_to_bigsmp = 1; 1545 def_to_bigsmp = 1;
1519 } 1546 }
1520 } 1547 }
1521#ifdef CONFIG_SMP 1548#endif
1549
1550#if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64)
1522 /* are we being called early in kernel startup? */ 1551 /* are we being called early in kernel startup? */
1523 if (early_per_cpu_ptr(x86_cpu_to_apicid)) { 1552 if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
1524 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); 1553 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
@@ -1531,6 +1560,7 @@ void __cpuinit generic_processor_info(int apicid, int version)
1531 per_cpu(x86_bios_cpu_apicid, cpu) = apicid; 1560 per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1532 } 1561 }
1533#endif 1562#endif
1563
1534 cpu_set(cpu, cpu_possible_map); 1564 cpu_set(cpu, cpu_possible_map);
1535 cpu_set(cpu, cpu_present_map); 1565 cpu_set(cpu, cpu_present_map);
1536} 1566}
@@ -1541,6 +1571,11 @@ void __cpuinit generic_processor_info(int apicid, int version)
1541#ifdef CONFIG_PM 1571#ifdef CONFIG_PM
1542 1572
1543static struct { 1573static struct {
1574 /*
1575 * 'active' is true if the local APIC was enabled by us and
1576 * not the BIOS; this signifies that we are also responsible
1577 * for disabling it before entering apm/acpi suspend
1578 */
1544 int active; 1579 int active;
1545 /* r/w apic fields */ 1580 /* r/w apic fields */
1546 unsigned int apic_id; 1581 unsigned int apic_id;
@@ -1581,7 +1616,7 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state)
1581 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); 1616 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
1582 apic_pm_state.apic_tmict = apic_read(APIC_TMICT); 1617 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
1583 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); 1618 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
1584#ifdef CONFIG_X86_MCE_P4THERMAL 1619#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
1585 if (maxlvt >= 5) 1620 if (maxlvt >= 5)
1586 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); 1621 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
1587#endif 1622#endif
@@ -1605,16 +1640,23 @@ static int lapic_resume(struct sys_device *dev)
1605 1640
1606 local_irq_save(flags); 1641 local_irq_save(flags);
1607 1642
1608 /* 1643#ifdef CONFIG_X86_64
1609 * Make sure the APICBASE points to the right address 1644 if (x2apic)
1610 * 1645 enable_x2apic();
1611 * FIXME! This will be wrong if we ever support suspend on 1646 else
1612 * SMP! We'll need to do this as part of the CPU restore! 1647#endif
1613 */ 1648 {
1614 rdmsr(MSR_IA32_APICBASE, l, h); 1649 /*
1615 l &= ~MSR_IA32_APICBASE_BASE; 1650 * Make sure the APICBASE points to the right address
1616 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; 1651 *
1617 wrmsr(MSR_IA32_APICBASE, l, h); 1652 * FIXME! This will be wrong if we ever support suspend on
1653 * SMP! We'll need to do this as part of the CPU restore!
1654 */
1655 rdmsr(MSR_IA32_APICBASE, l, h);
1656 l &= ~MSR_IA32_APICBASE_BASE;
1657 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
1658 wrmsr(MSR_IA32_APICBASE, l, h);
1659 }
1618 1660
1619 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED); 1661 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
1620 apic_write(APIC_ID, apic_pm_state.apic_id); 1662 apic_write(APIC_ID, apic_pm_state.apic_id);
@@ -1624,7 +1666,7 @@ static int lapic_resume(struct sys_device *dev)
1624 apic_write(APIC_SPIV, apic_pm_state.apic_spiv); 1666 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
1625 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0); 1667 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
1626 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1); 1668 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
1627#ifdef CONFIG_X86_MCE_P4THERMAL 1669#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
1628 if (maxlvt >= 5) 1670 if (maxlvt >= 5)
1629 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr); 1671 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
1630#endif 1672#endif
@@ -1638,7 +1680,9 @@ static int lapic_resume(struct sys_device *dev)
1638 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr); 1680 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
1639 apic_write(APIC_ESR, 0); 1681 apic_write(APIC_ESR, 0);
1640 apic_read(APIC_ESR); 1682 apic_read(APIC_ESR);
1683
1641 local_irq_restore(flags); 1684 local_irq_restore(flags);
1685
1642 return 0; 1686 return 0;
1643} 1687}
1644 1688
@@ -1694,20 +1738,20 @@ static int __init parse_lapic(char *arg)
1694} 1738}
1695early_param("lapic", parse_lapic); 1739early_param("lapic", parse_lapic);
1696 1740
1697static int __init parse_nolapic(char *arg) 1741static int __init setup_disableapic(char *arg)
1698{ 1742{
1699 disable_apic = 1; 1743 disable_apic = 1;
1700 setup_clear_cpu_cap(X86_FEATURE_APIC); 1744 setup_clear_cpu_cap(X86_FEATURE_APIC);
1701 return 0; 1745 return 0;
1702} 1746}
1703early_param("nolapic", parse_nolapic); 1747early_param("disableapic", setup_disableapic);
1704 1748
1705static int __init parse_disable_lapic_timer(char *arg) 1749/* same as disableapic, for compatibility */
1750static int __init setup_nolapic(char *arg)
1706{ 1751{
1707 local_apic_timer_disabled = 1; 1752 return setup_disableapic(arg);
1708 return 0;
1709} 1753}
1710early_param("nolapic_timer", parse_disable_lapic_timer); 1754early_param("nolapic", setup_nolapic);
1711 1755
1712static int __init parse_lapic_timer_c2_ok(char *arg) 1756static int __init parse_lapic_timer_c2_ok(char *arg)
1713{ 1757{
@@ -1716,15 +1760,40 @@ static int __init parse_lapic_timer_c2_ok(char *arg)
1716} 1760}
1717early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok); 1761early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1718 1762
1763static int __init parse_disable_apic_timer(char *arg)
1764{
1765 disable_apic_timer = 1;
1766 return 0;
1767}
1768early_param("noapictimer", parse_disable_apic_timer);
1769
1770static int __init parse_nolapic_timer(char *arg)
1771{
1772 disable_apic_timer = 1;
1773 return 0;
1774}
1775early_param("nolapic_timer", parse_nolapic_timer);
1776
1719static int __init apic_set_verbosity(char *arg) 1777static int __init apic_set_verbosity(char *arg)
1720{ 1778{
1721 if (!arg) 1779 if (!arg) {
1780#ifdef CONFIG_X86_64
1781 skip_ioapic_setup = 0;
1782 ioapic_force = 1;
1783 return 0;
1784#endif
1722 return -EINVAL; 1785 return -EINVAL;
1786 }
1723 1787
1724 if (strcmp(arg, "debug") == 0) 1788 if (strcmp("debug", arg) == 0)
1725 apic_verbosity = APIC_DEBUG; 1789 apic_verbosity = APIC_DEBUG;
1726 else if (strcmp(arg, "verbose") == 0) 1790 else if (strcmp("verbose", arg) == 0)
1727 apic_verbosity = APIC_VERBOSE; 1791 apic_verbosity = APIC_VERBOSE;
1792 else {
1793 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
1794 " use apic=verbose or apic=debug\n", arg);
1795 return -EINVAL;
1796 }
1728 1797
1729 return 0; 1798 return 0;
1730} 1799}