aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/mpparse.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/mpparse.c')
-rw-r--r--arch/i386/kernel/mpparse.c1109
1 files changed, 1109 insertions, 0 deletions
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
new file mode 100644
index 000000000000..1347ab4939e7
--- /dev/null
+++ b/arch/i386/kernel/mpparse.c
@@ -0,0 +1,1109 @@
1/*
2 * Intel Multiprocessor Specification 1.1 and 1.4
3 * compliant MP-table parsing routines.
4 *
5 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
7 *
8 * Fixes
9 * Erich Boleyn : MP v1.4 and additional changes.
10 * Alan Cox : Added EBDA scanning
11 * Ingo Molnar : various cleanups and rewrites
12 * Maciej W. Rozycki: Bits for default MP configurations
13 * Paul Diefenbaugh: Added full ACPI support
14 */
15
16#include <linux/mm.h>
17#include <linux/irq.h>
18#include <linux/init.h>
19#include <linux/acpi.h>
20#include <linux/delay.h>
21#include <linux/config.h>
22#include <linux/bootmem.h>
23#include <linux/smp_lock.h>
24#include <linux/kernel_stat.h>
25#include <linux/mc146818rtc.h>
26#include <linux/bitops.h>
27
28#include <asm/smp.h>
29#include <asm/acpi.h>
30#include <asm/mtrr.h>
31#include <asm/mpspec.h>
32#include <asm/io_apic.h>
33
34#include <mach_apic.h>
35#include <mach_mpparse.h>
36#include <bios_ebda.h>
37
38/* Have we found an MP table */
39int smp_found_config;
40unsigned int __initdata maxcpus = NR_CPUS;
41
42/*
43 * Various Linux-internal data structures created from the
44 * MP-table.
45 */
46int apic_version [MAX_APICS];
47int mp_bus_id_to_type [MAX_MP_BUSSES];
48int mp_bus_id_to_node [MAX_MP_BUSSES];
49int mp_bus_id_to_local [MAX_MP_BUSSES];
50int quad_local_to_mp_bus_id [NR_CPUS/4][4];
51int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
52static int mp_current_pci_id;
53
54/* I/O APIC entries */
55struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
56
57/* # of MP IRQ source entries */
58struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
59
60/* MP IRQ source entries */
61int mp_irq_entries;
62
63int nr_ioapics;
64
65int pic_mode;
66unsigned long mp_lapic_addr;
67
68/* Processor that is doing the boot up */
69unsigned int boot_cpu_physical_apicid = -1U;
70unsigned int boot_cpu_logical_apicid = -1U;
71/* Internal processor count */
72static unsigned int __initdata num_processors;
73
74/* Bitmask of physically existing CPUs */
75physid_mask_t phys_cpu_present_map;
76
77u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
78
79/*
80 * Intel MP BIOS table parsing routines:
81 */
82
83
84/*
85 * Checksum an MP configuration block.
86 */
87
88static int __init mpf_checksum(unsigned char *mp, int len)
89{
90 int sum = 0;
91
92 while (len--)
93 sum += *mp++;
94
95 return sum & 0xFF;
96}
97
98/*
99 * Have to match translation table entries to main table entries by counter
100 * hence the mpc_record variable .... can't see a less disgusting way of
101 * doing this ....
102 */
103
104static int mpc_record;
105static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
106
107#ifdef CONFIG_X86_NUMAQ
108static int MP_valid_apicid(int apicid, int version)
109{
110 return hweight_long(apicid & 0xf) == 1 && (apicid >> 4) != 0xf;
111}
112#else
113static int MP_valid_apicid(int apicid, int version)
114{
115 if (version >= 0x14)
116 return apicid < 0xff;
117 else
118 return apicid < 0xf;
119}
120#endif
121
122static void __init MP_processor_info (struct mpc_config_processor *m)
123{
124 int ver, apicid;
125 physid_mask_t tmp;
126
127 if (!(m->mpc_cpuflag & CPU_ENABLED))
128 return;
129
130 apicid = mpc_apic_id(m, translation_table[mpc_record]);
131
132 if (m->mpc_featureflag&(1<<0))
133 Dprintk(" Floating point unit present.\n");
134 if (m->mpc_featureflag&(1<<7))
135 Dprintk(" Machine Exception supported.\n");
136 if (m->mpc_featureflag&(1<<8))
137 Dprintk(" 64 bit compare & exchange supported.\n");
138 if (m->mpc_featureflag&(1<<9))
139 Dprintk(" Internal APIC present.\n");
140 if (m->mpc_featureflag&(1<<11))
141 Dprintk(" SEP present.\n");
142 if (m->mpc_featureflag&(1<<12))
143 Dprintk(" MTRR present.\n");
144 if (m->mpc_featureflag&(1<<13))
145 Dprintk(" PGE present.\n");
146 if (m->mpc_featureflag&(1<<14))
147 Dprintk(" MCA present.\n");
148 if (m->mpc_featureflag&(1<<15))
149 Dprintk(" CMOV present.\n");
150 if (m->mpc_featureflag&(1<<16))
151 Dprintk(" PAT present.\n");
152 if (m->mpc_featureflag&(1<<17))
153 Dprintk(" PSE present.\n");
154 if (m->mpc_featureflag&(1<<18))
155 Dprintk(" PSN present.\n");
156 if (m->mpc_featureflag&(1<<19))
157 Dprintk(" Cache Line Flush Instruction present.\n");
158 /* 20 Reserved */
159 if (m->mpc_featureflag&(1<<21))
160 Dprintk(" Debug Trace and EMON Store present.\n");
161 if (m->mpc_featureflag&(1<<22))
162 Dprintk(" ACPI Thermal Throttle Registers present.\n");
163 if (m->mpc_featureflag&(1<<23))
164 Dprintk(" MMX present.\n");
165 if (m->mpc_featureflag&(1<<24))
166 Dprintk(" FXSR present.\n");
167 if (m->mpc_featureflag&(1<<25))
168 Dprintk(" XMM present.\n");
169 if (m->mpc_featureflag&(1<<26))
170 Dprintk(" Willamette New Instructions present.\n");
171 if (m->mpc_featureflag&(1<<27))
172 Dprintk(" Self Snoop present.\n");
173 if (m->mpc_featureflag&(1<<28))
174 Dprintk(" HT present.\n");
175 if (m->mpc_featureflag&(1<<29))
176 Dprintk(" Thermal Monitor present.\n");
177 /* 30, 31 Reserved */
178
179
180 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
181 Dprintk(" Bootup CPU\n");
182 boot_cpu_physical_apicid = m->mpc_apicid;
183 boot_cpu_logical_apicid = apicid;
184 }
185
186 if (num_processors >= NR_CPUS) {
187 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
188 " Processor ignored.\n", NR_CPUS);
189 return;
190 }
191
192 if (num_processors >= maxcpus) {
193 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
194 " Processor ignored.\n", maxcpus);
195 return;
196 }
197 num_processors++;
198 ver = m->mpc_apicver;
199
200 if (!MP_valid_apicid(apicid, ver)) {
201 printk(KERN_WARNING "Processor #%d INVALID. (Max ID: %d).\n",
202 m->mpc_apicid, MAX_APICS);
203 --num_processors;
204 return;
205 }
206
207 tmp = apicid_to_cpu_present(apicid);
208 physids_or(phys_cpu_present_map, phys_cpu_present_map, tmp);
209
210 /*
211 * Validate version
212 */
213 if (ver == 0x0) {
214 printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
215 ver = 0x10;
216 }
217 apic_version[m->mpc_apicid] = ver;
218 bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
219}
220
221static void __init MP_bus_info (struct mpc_config_bus *m)
222{
223 char str[7];
224
225 memcpy(str, m->mpc_bustype, 6);
226 str[6] = 0;
227
228 mpc_oem_bus_info(m, str, translation_table[mpc_record]);
229
230 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
231 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
232 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
233 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
234 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
235 mpc_oem_pci_bus(m, translation_table[mpc_record]);
236 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
237 mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
238 mp_current_pci_id++;
239 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
240 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
241 } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
242 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
243 } else {
244 printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
245 }
246}
247
248static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
249{
250 if (!(m->mpc_flags & MPC_APIC_USABLE))
251 return;
252
253 printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n",
254 m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
255 if (nr_ioapics >= MAX_IO_APICS) {
256 printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
257 MAX_IO_APICS, nr_ioapics);
258 panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
259 }
260 if (!m->mpc_apicaddr) {
261 printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
262 " found in MP table, skipping!\n");
263 return;
264 }
265 mp_ioapics[nr_ioapics] = *m;
266 nr_ioapics++;
267}
268
269static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
270{
271 mp_irqs [mp_irq_entries] = *m;
272 Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
273 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
274 m->mpc_irqtype, m->mpc_irqflag & 3,
275 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
276 m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
277 if (++mp_irq_entries == MAX_IRQ_SOURCES)
278 panic("Max # of irq sources exceeded!!\n");
279}
280
281static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
282{
283 Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
284 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
285 m->mpc_irqtype, m->mpc_irqflag & 3,
286 (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
287 m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
288 /*
289 * Well it seems all SMP boards in existence
290 * use ExtINT/LVT1 == LINT0 and
291 * NMI/LVT2 == LINT1 - the following check
292 * will show us if this assumptions is false.
293 * Until then we do not have to add baggage.
294 */
295 if ((m->mpc_irqtype == mp_ExtINT) &&
296 (m->mpc_destapiclint != 0))
297 BUG();
298 if ((m->mpc_irqtype == mp_NMI) &&
299 (m->mpc_destapiclint != 1))
300 BUG();
301}
302
303#ifdef CONFIG_X86_NUMAQ
304static void __init MP_translation_info (struct mpc_config_translation *m)
305{
306 printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
307
308 if (mpc_record >= MAX_MPC_ENTRY)
309 printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
310 else
311 translation_table[mpc_record] = m; /* stash this for later */
312 if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
313 node_set_online(m->trans_quad);
314}
315
316/*
317 * Read/parse the MPC oem tables
318 */
319
320static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
321 unsigned short oemsize)
322{
323 int count = sizeof (*oemtable); /* the header size */
324 unsigned char *oemptr = ((unsigned char *)oemtable)+count;
325
326 mpc_record = 0;
327 printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
328 if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
329 {
330 printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
331 oemtable->oem_signature[0],
332 oemtable->oem_signature[1],
333 oemtable->oem_signature[2],
334 oemtable->oem_signature[3]);
335 return;
336 }
337 if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
338 {
339 printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
340 return;
341 }
342 while (count < oemtable->oem_length) {
343 switch (*oemptr) {
344 case MP_TRANSLATION:
345 {
346 struct mpc_config_translation *m=
347 (struct mpc_config_translation *)oemptr;
348 MP_translation_info(m);
349 oemptr += sizeof(*m);
350 count += sizeof(*m);
351 ++mpc_record;
352 break;
353 }
354 default:
355 {
356 printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
357 return;
358 }
359 }
360 }
361}
362
363static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
364 char *productid)
365{
366 if (strncmp(oem, "IBM NUMA", 8))
367 printk("Warning! May not be a NUMA-Q system!\n");
368 if (mpc->mpc_oemptr)
369 smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
370 mpc->mpc_oemsize);
371}
372#endif /* CONFIG_X86_NUMAQ */
373
374/*
375 * Read/parse the MPC
376 */
377
378static int __init smp_read_mpc(struct mp_config_table *mpc)
379{
380 char str[16];
381 char oem[10];
382 int count=sizeof(*mpc);
383 unsigned char *mpt=((unsigned char *)mpc)+count;
384
385 if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
386 printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
387 *(u32 *)mpc->mpc_signature);
388 return 0;
389 }
390 if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
391 printk(KERN_ERR "SMP mptable: checksum error!\n");
392 return 0;
393 }
394 if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
395 printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
396 mpc->mpc_spec);
397 return 0;
398 }
399 if (!mpc->mpc_lapic) {
400 printk(KERN_ERR "SMP mptable: null local APIC address!\n");
401 return 0;
402 }
403 memcpy(oem,mpc->mpc_oem,8);
404 oem[8]=0;
405 printk(KERN_INFO "OEM ID: %s ",oem);
406
407 memcpy(str,mpc->mpc_productid,12);
408 str[12]=0;
409 printk("Product ID: %s ",str);
410
411 mps_oem_check(mpc, oem, str);
412
413 printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
414
415 /*
416 * Save the local APIC address (it might be non-default) -- but only
417 * if we're not using ACPI.
418 */
419 if (!acpi_lapic)
420 mp_lapic_addr = mpc->mpc_lapic;
421
422 /*
423 * Now process the configuration blocks.
424 */
425 mpc_record = 0;
426 while (count < mpc->mpc_length) {
427 switch(*mpt) {
428 case MP_PROCESSOR:
429 {
430 struct mpc_config_processor *m=
431 (struct mpc_config_processor *)mpt;
432 /* ACPI may have already provided this data */
433 if (!acpi_lapic)
434 MP_processor_info(m);
435 mpt += sizeof(*m);
436 count += sizeof(*m);
437 break;
438 }
439 case MP_BUS:
440 {
441 struct mpc_config_bus *m=
442 (struct mpc_config_bus *)mpt;
443 MP_bus_info(m);
444 mpt += sizeof(*m);
445 count += sizeof(*m);
446 break;
447 }
448 case MP_IOAPIC:
449 {
450 struct mpc_config_ioapic *m=
451 (struct mpc_config_ioapic *)mpt;
452 MP_ioapic_info(m);
453 mpt+=sizeof(*m);
454 count+=sizeof(*m);
455 break;
456 }
457 case MP_INTSRC:
458 {
459 struct mpc_config_intsrc *m=
460 (struct mpc_config_intsrc *)mpt;
461
462 MP_intsrc_info(m);
463 mpt+=sizeof(*m);
464 count+=sizeof(*m);
465 break;
466 }
467 case MP_LINTSRC:
468 {
469 struct mpc_config_lintsrc *m=
470 (struct mpc_config_lintsrc *)mpt;
471 MP_lintsrc_info(m);
472 mpt+=sizeof(*m);
473 count+=sizeof(*m);
474 break;
475 }
476 default:
477 {
478 count = mpc->mpc_length;
479 break;
480 }
481 }
482 ++mpc_record;
483 }
484 clustered_apic_check();
485 if (!num_processors)
486 printk(KERN_ERR "SMP mptable: no processors registered!\n");
487 return num_processors;
488}
489
490static int __init ELCR_trigger(unsigned int irq)
491{
492 unsigned int port;
493
494 port = 0x4d0 + (irq >> 3);
495 return (inb(port) >> (irq & 7)) & 1;
496}
497
498static void __init construct_default_ioirq_mptable(int mpc_default_type)
499{
500 struct mpc_config_intsrc intsrc;
501 int i;
502 int ELCR_fallback = 0;
503
504 intsrc.mpc_type = MP_INTSRC;
505 intsrc.mpc_irqflag = 0; /* conforming */
506 intsrc.mpc_srcbus = 0;
507 intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
508
509 intsrc.mpc_irqtype = mp_INT;
510
511 /*
512 * If true, we have an ISA/PCI system with no IRQ entries
513 * in the MP table. To prevent the PCI interrupts from being set up
514 * incorrectly, we try to use the ELCR. The sanity check to see if
515 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
516 * never be level sensitive, so we simply see if the ELCR agrees.
517 * If it does, we assume it's valid.
518 */
519 if (mpc_default_type == 5) {
520 printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
521
522 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
523 printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
524 else {
525 printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
526 ELCR_fallback = 1;
527 }
528 }
529
530 for (i = 0; i < 16; i++) {
531 switch (mpc_default_type) {
532 case 2:
533 if (i == 0 || i == 13)
534 continue; /* IRQ0 & IRQ13 not connected */
535 /* fall through */
536 default:
537 if (i == 2)
538 continue; /* IRQ2 is never connected */
539 }
540
541 if (ELCR_fallback) {
542 /*
543 * If the ELCR indicates a level-sensitive interrupt, we
544 * copy that information over to the MP table in the
545 * irqflag field (level sensitive, active high polarity).
546 */
547 if (ELCR_trigger(i))
548 intsrc.mpc_irqflag = 13;
549 else
550 intsrc.mpc_irqflag = 0;
551 }
552
553 intsrc.mpc_srcbusirq = i;
554 intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
555 MP_intsrc_info(&intsrc);
556 }
557
558 intsrc.mpc_irqtype = mp_ExtINT;
559 intsrc.mpc_srcbusirq = 0;
560 intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
561 MP_intsrc_info(&intsrc);
562}
563
564static inline void __init construct_default_ISA_mptable(int mpc_default_type)
565{
566 struct mpc_config_processor processor;
567 struct mpc_config_bus bus;
568 struct mpc_config_ioapic ioapic;
569 struct mpc_config_lintsrc lintsrc;
570 int linttypes[2] = { mp_ExtINT, mp_NMI };
571 int i;
572
573 /*
574 * local APIC has default address
575 */
576 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
577
578 /*
579 * 2 CPUs, numbered 0 & 1.
580 */
581 processor.mpc_type = MP_PROCESSOR;
582 /* Either an integrated APIC or a discrete 82489DX. */
583 processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
584 processor.mpc_cpuflag = CPU_ENABLED;
585 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
586 (boot_cpu_data.x86_model << 4) |
587 boot_cpu_data.x86_mask;
588 processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
589 processor.mpc_reserved[0] = 0;
590 processor.mpc_reserved[1] = 0;
591 for (i = 0; i < 2; i++) {
592 processor.mpc_apicid = i;
593 MP_processor_info(&processor);
594 }
595
596 bus.mpc_type = MP_BUS;
597 bus.mpc_busid = 0;
598 switch (mpc_default_type) {
599 default:
600 printk("???\n");
601 printk(KERN_ERR "Unknown standard configuration %d\n",
602 mpc_default_type);
603 /* fall through */
604 case 1:
605 case 5:
606 memcpy(bus.mpc_bustype, "ISA ", 6);
607 break;
608 case 2:
609 case 6:
610 case 3:
611 memcpy(bus.mpc_bustype, "EISA ", 6);
612 break;
613 case 4:
614 case 7:
615 memcpy(bus.mpc_bustype, "MCA ", 6);
616 }
617 MP_bus_info(&bus);
618 if (mpc_default_type > 4) {
619 bus.mpc_busid = 1;
620 memcpy(bus.mpc_bustype, "PCI ", 6);
621 MP_bus_info(&bus);
622 }
623
624 ioapic.mpc_type = MP_IOAPIC;
625 ioapic.mpc_apicid = 2;
626 ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
627 ioapic.mpc_flags = MPC_APIC_USABLE;
628 ioapic.mpc_apicaddr = 0xFEC00000;
629 MP_ioapic_info(&ioapic);
630
631 /*
632 * We set up most of the low 16 IO-APIC pins according to MPS rules.
633 */
634 construct_default_ioirq_mptable(mpc_default_type);
635
636 lintsrc.mpc_type = MP_LINTSRC;
637 lintsrc.mpc_irqflag = 0; /* conforming */
638 lintsrc.mpc_srcbusid = 0;
639 lintsrc.mpc_srcbusirq = 0;
640 lintsrc.mpc_destapic = MP_APIC_ALL;
641 for (i = 0; i < 2; i++) {
642 lintsrc.mpc_irqtype = linttypes[i];
643 lintsrc.mpc_destapiclint = i;
644 MP_lintsrc_info(&lintsrc);
645 }
646}
647
648static struct intel_mp_floating *mpf_found;
649
650/*
651 * Scan the memory blocks for an SMP configuration block.
652 */
653void __init get_smp_config (void)
654{
655 struct intel_mp_floating *mpf = mpf_found;
656
657 /*
658 * ACPI may be used to obtain the entire SMP configuration or just to
659 * enumerate/configure processors (CONFIG_ACPI_BOOT). Note that
660 * ACPI supports both logical (e.g. Hyper-Threading) and physical
661 * processors, where MPS only supports physical.
662 */
663 if (acpi_lapic && acpi_ioapic) {
664 printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
665 return;
666 }
667 else if (acpi_lapic)
668 printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
669
670 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
671 if (mpf->mpf_feature2 & (1<<7)) {
672 printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
673 pic_mode = 1;
674 } else {
675 printk(KERN_INFO " Virtual Wire compatibility mode.\n");
676 pic_mode = 0;
677 }
678
679 /*
680 * Now see if we need to read further.
681 */
682 if (mpf->mpf_feature1 != 0) {
683
684 printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
685 construct_default_ISA_mptable(mpf->mpf_feature1);
686
687 } else if (mpf->mpf_physptr) {
688
689 /*
690 * Read the physical hardware table. Anything here will
691 * override the defaults.
692 */
693 if (!smp_read_mpc((void *)mpf->mpf_physptr)) {
694 smp_found_config = 0;
695 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
696 printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
697 return;
698 }
699 /*
700 * If there are no explicit MP IRQ entries, then we are
701 * broken. We set up most of the low 16 IO-APIC pins to
702 * ISA defaults and hope it will work.
703 */
704 if (!mp_irq_entries) {
705 struct mpc_config_bus bus;
706
707 printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
708
709 bus.mpc_type = MP_BUS;
710 bus.mpc_busid = 0;
711 memcpy(bus.mpc_bustype, "ISA ", 6);
712 MP_bus_info(&bus);
713
714 construct_default_ioirq_mptable(0);
715 }
716
717 } else
718 BUG();
719
720 printk(KERN_INFO "Processors: %d\n", num_processors);
721 /*
722 * Only use the first configuration found.
723 */
724}
725
726static int __init smp_scan_config (unsigned long base, unsigned long length)
727{
728 unsigned long *bp = phys_to_virt(base);
729 struct intel_mp_floating *mpf;
730
731 Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
732 if (sizeof(*mpf) != 16)
733 printk("Error: MPF size\n");
734
735 while (length > 0) {
736 mpf = (struct intel_mp_floating *)bp;
737 if ((*bp == SMP_MAGIC_IDENT) &&
738 (mpf->mpf_length == 1) &&
739 !mpf_checksum((unsigned char *)bp, 16) &&
740 ((mpf->mpf_specification == 1)
741 || (mpf->mpf_specification == 4)) ) {
742
743 smp_found_config = 1;
744 printk(KERN_INFO "found SMP MP-table at %08lx\n",
745 virt_to_phys(mpf));
746 reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
747 if (mpf->mpf_physptr) {
748 /*
749 * We cannot access to MPC table to compute
750 * table size yet, as only few megabytes from
751 * the bottom is mapped now.
752 * PC-9800's MPC table places on the very last
753 * of physical memory; so that simply reserving
754 * PAGE_SIZE from mpg->mpf_physptr yields BUG()
755 * in reserve_bootmem.
756 */
757 unsigned long size = PAGE_SIZE;
758 unsigned long end = max_low_pfn * PAGE_SIZE;
759 if (mpf->mpf_physptr + size > end)
760 size = end - mpf->mpf_physptr;
761 reserve_bootmem(mpf->mpf_physptr, size);
762 }
763
764 mpf_found = mpf;
765 return 1;
766 }
767 bp += 4;
768 length -= 16;
769 }
770 return 0;
771}
772
773void __init find_smp_config (void)
774{
775 unsigned int address;
776
777 /*
778 * FIXME: Linux assumes you have 640K of base ram..
779 * this continues the error...
780 *
781 * 1) Scan the bottom 1K for a signature
782 * 2) Scan the top 1K of base RAM
783 * 3) Scan the 64K of bios
784 */
785 if (smp_scan_config(0x0,0x400) ||
786 smp_scan_config(639*0x400,0x400) ||
787 smp_scan_config(0xF0000,0x10000))
788 return;
789 /*
790 * If it is an SMP machine we should know now, unless the
791 * configuration is in an EISA/MCA bus machine with an
792 * extended bios data area.
793 *
794 * there is a real-mode segmented pointer pointing to the
795 * 4K EBDA area at 0x40E, calculate and scan it here.
796 *
797 * NOTE! There are Linux loaders that will corrupt the EBDA
798 * area, and as such this kind of SMP config may be less
799 * trustworthy, simply because the SMP table may have been
800 * stomped on during early boot. These loaders are buggy and
801 * should be fixed.
802 *
803 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
804 */
805
806 address = get_bios_ebda();
807 if (address)
808 smp_scan_config(address, 0x400);
809}
810
811/* --------------------------------------------------------------------------
812 ACPI-based MP Configuration
813 -------------------------------------------------------------------------- */
814
815#ifdef CONFIG_ACPI_BOOT
816
817void __init mp_register_lapic_address (
818 u64 address)
819{
820 mp_lapic_addr = (unsigned long) address;
821
822 set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
823
824 if (boot_cpu_physical_apicid == -1U)
825 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
826
827 Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
828}
829
830
831void __init mp_register_lapic (
832 u8 id,
833 u8 enabled)
834{
835 struct mpc_config_processor processor;
836 int boot_cpu = 0;
837
838 if (MAX_APICS - id <= 0) {
839 printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
840 id, MAX_APICS);
841 return;
842 }
843
844 if (id == boot_cpu_physical_apicid)
845 boot_cpu = 1;
846
847 processor.mpc_type = MP_PROCESSOR;
848 processor.mpc_apicid = id;
849 processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
850 processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
851 processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
852 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
853 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
854 processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
855 processor.mpc_reserved[0] = 0;
856 processor.mpc_reserved[1] = 0;
857
858 MP_processor_info(&processor);
859}
860
861#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_ACPI_INTERPRETER) || defined(CONFIG_ACPI_BOOT))
862
863#define MP_ISA_BUS 0
864#define MP_MAX_IOAPIC_PIN 127
865
866static struct mp_ioapic_routing {
867 int apic_id;
868 int gsi_base;
869 int gsi_end;
870 u32 pin_programmed[4];
871} mp_ioapic_routing[MAX_IO_APICS];
872
873
874static int mp_find_ioapic (
875 int gsi)
876{
877 int i = 0;
878
879 /* Find the IOAPIC that manages this GSI. */
880 for (i = 0; i < nr_ioapics; i++) {
881 if ((gsi >= mp_ioapic_routing[i].gsi_base)
882 && (gsi <= mp_ioapic_routing[i].gsi_end))
883 return i;
884 }
885
886 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
887
888 return -1;
889}
890
891
892void __init mp_register_ioapic (
893 u8 id,
894 u32 address,
895 u32 gsi_base)
896{
897 int idx = 0;
898
899 if (nr_ioapics >= MAX_IO_APICS) {
900 printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
901 "(found %d)\n", MAX_IO_APICS, nr_ioapics);
902 panic("Recompile kernel with bigger MAX_IO_APICS!\n");
903 }
904 if (!address) {
905 printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
906 " found in MADT table, skipping!\n");
907 return;
908 }
909
910 idx = nr_ioapics++;
911
912 mp_ioapics[idx].mpc_type = MP_IOAPIC;
913 mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
914 mp_ioapics[idx].mpc_apicaddr = address;
915
916 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
917 mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id);
918 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
919
920 /*
921 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
922 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
923 */
924 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
925 mp_ioapic_routing[idx].gsi_base = gsi_base;
926 mp_ioapic_routing[idx].gsi_end = gsi_base +
927 io_apic_get_redir_entries(idx);
928
929 printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
930 "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
931 mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
932 mp_ioapic_routing[idx].gsi_base,
933 mp_ioapic_routing[idx].gsi_end);
934
935 return;
936}
937
938
939void __init mp_override_legacy_irq (
940 u8 bus_irq,
941 u8 polarity,
942 u8 trigger,
943 u32 gsi)
944{
945 struct mpc_config_intsrc intsrc;
946 int ioapic = -1;
947 int pin = -1;
948
949 /*
950 * Convert 'gsi' to 'ioapic.pin'.
951 */
952 ioapic = mp_find_ioapic(gsi);
953 if (ioapic < 0)
954 return;
955 pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
956
957 /*
958 * TBD: This check is for faulty timer entries, where the override
959 * erroneously sets the trigger to level, resulting in a HUGE
960 * increase of timer interrupts!
961 */
962 if ((bus_irq == 0) && (trigger == 3))
963 trigger = 1;
964
965 intsrc.mpc_type = MP_INTSRC;
966 intsrc.mpc_irqtype = mp_INT;
967 intsrc.mpc_irqflag = (trigger << 2) | polarity;
968 intsrc.mpc_srcbus = MP_ISA_BUS;
969 intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
970 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
971 intsrc.mpc_dstirq = pin; /* INTIN# */
972
973 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
974 intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
975 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
976 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
977
978 mp_irqs[mp_irq_entries] = intsrc;
979 if (++mp_irq_entries == MAX_IRQ_SOURCES)
980 panic("Max # of irq sources exceeded!\n");
981
982 return;
983}
984
985int es7000_plat;
986
987void __init mp_config_acpi_legacy_irqs (void)
988{
989 struct mpc_config_intsrc intsrc;
990 int i = 0;
991 int ioapic = -1;
992
993 /*
994 * Fabricate the legacy ISA bus (bus #31).
995 */
996 mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
997 Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
998
999 /*
1000 * Older generations of ES7000 have no legacy identity mappings
1001 */
1002 if (es7000_plat == 1)
1003 return;
1004
1005 /*
1006 * Locate the IOAPIC that manages the ISA IRQs (0-15).
1007 */
1008 ioapic = mp_find_ioapic(0);
1009 if (ioapic < 0)
1010 return;
1011
1012 intsrc.mpc_type = MP_INTSRC;
1013 intsrc.mpc_irqflag = 0; /* Conforming */
1014 intsrc.mpc_srcbus = MP_ISA_BUS;
1015 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
1016
1017 /*
1018 * Use the default configuration for the IRQs 0-15. Unless
1019 * overriden by (MADT) interrupt source override entries.
1020 */
1021 for (i = 0; i < 16; i++) {
1022 int idx;
1023
1024 for (idx = 0; idx < mp_irq_entries; idx++) {
1025 struct mpc_config_intsrc *irq = mp_irqs + idx;
1026
1027 /* Do we already have a mapping for this ISA IRQ? */
1028 if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
1029 break;
1030
1031 /* Do we already have a mapping for this IOAPIC pin */
1032 if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
1033 (irq->mpc_dstirq == i))
1034 break;
1035 }
1036
1037 if (idx != mp_irq_entries) {
1038 printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
1039 continue; /* IRQ already used */
1040 }
1041
1042 intsrc.mpc_irqtype = mp_INT;
1043 intsrc.mpc_srcbusirq = i; /* Identity mapped */
1044 intsrc.mpc_dstirq = i;
1045
1046 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
1047 "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
1048 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
1049 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
1050 intsrc.mpc_dstirq);
1051
1052 mp_irqs[mp_irq_entries] = intsrc;
1053 if (++mp_irq_entries == MAX_IRQ_SOURCES)
1054 panic("Max # of irq sources exceeded!\n");
1055 }
1056}
1057
1058int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
1059{
1060 int ioapic = -1;
1061 int ioapic_pin = 0;
1062 int idx, bit = 0;
1063
1064#ifdef CONFIG_ACPI_BUS
1065 /* Don't set up the ACPI SCI because it's already set up */
1066 if (acpi_fadt.sci_int == gsi)
1067 return gsi;
1068#endif
1069
1070 ioapic = mp_find_ioapic(gsi);
1071 if (ioapic < 0) {
1072 printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
1073 return gsi;
1074 }
1075
1076 ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
1077
1078 if (ioapic_renumber_irq)
1079 gsi = ioapic_renumber_irq(ioapic, gsi);
1080
1081 /*
1082 * Avoid pin reprogramming. PRTs typically include entries
1083 * with redundant pin->gsi mappings (but unique PCI devices);
1084 * we only program the IOAPIC on the first.
1085 */
1086 bit = ioapic_pin % 32;
1087 idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
1088 if (idx > 3) {
1089 printk(KERN_ERR "Invalid reference to IOAPIC pin "
1090 "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
1091 ioapic_pin);
1092 return gsi;
1093 }
1094 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
1095 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
1096 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
1097 return gsi;
1098 }
1099
1100 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
1101
1102 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
1103 edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
1104 active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
1105 return gsi;
1106}
1107
1108#endif /*CONFIG_X86_IO_APIC && (CONFIG_ACPI_INTERPRETER || CONFIG_ACPI_BOOT)*/
1109#endif /*CONFIG_ACPI_BOOT*/