aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/Kconfig56
-rw-r--r--arch/i386/defconfig2
-rw-r--r--arch/i386/kernel/acpi/Makefile2
-rw-r--r--arch/i386/kernel/acpi/boot.c19
-rw-r--r--arch/i386/kernel/acpi/cstate.c58
-rw-r--r--arch/i386/kernel/acpi/processor.c75
-rw-r--r--arch/i386/kernel/apic.c5
-rw-r--r--arch/i386/kernel/cpu/amd.c8
-rw-r--r--arch/i386/kernel/cpu/centaur.c12
-rw-r--r--arch/i386/kernel/cpu/common.c11
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c71
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c12
-rw-r--r--arch/i386/kernel/cpu/cyrix.c18
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c12
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c15
-rw-r--r--arch/i386/kernel/cpu/nexgen.c8
-rw-r--r--arch/i386/kernel/cpu/rise.c8
-rw-r--r--arch/i386/kernel/cpu/transmeta.c10
-rw-r--r--arch/i386/kernel/cpu/umc.c8
-rw-r--r--arch/i386/kernel/head.S4
-rw-r--r--arch/i386/kernel/mpparse.c8
-rw-r--r--arch/i386/kernel/nmi.c2
-rw-r--r--arch/i386/kernel/process.c6
-rw-r--r--arch/i386/kernel/quirks.c9
-rw-r--r--arch/i386/kernel/signal.c109
-rw-r--r--arch/i386/kernel/smpboot.c4
-rw-r--r--arch/i386/kernel/syscall_table.S16
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c14
-rw-r--r--arch/i386/kernel/traps.c9
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c2
-rw-r--r--arch/i386/oprofile/backtrace.c19
-rw-r--r--arch/i386/pci/irq.c5
-rw-r--r--arch/i386/pci/mmconfig.c15
33 files changed, 371 insertions, 261 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index cbde675bc95c..0afec8566e7b 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -47,15 +47,6 @@ config DMI
47 47
48source "init/Kconfig" 48source "init/Kconfig"
49 49
50config DOUBLEFAULT
51 default y
52 bool "Enable doublefault exception handler" if EMBEDDED
53 help
54 This option allows trapping of rare doublefault exceptions that
55 would otherwise cause a system to silently reboot. Disabling this
56 option saves about 4k and might cause you much additional grey
57 hair.
58
59menu "Processor type and features" 50menu "Processor type and features"
60 51
61choice 52choice
@@ -451,12 +442,50 @@ config HIGHMEM4G
451 442
452config HIGHMEM64G 443config HIGHMEM64G
453 bool "64GB" 444 bool "64GB"
445 depends on X86_CMPXCHG64
454 help 446 help
455 Select this if you have a 32-bit processor and more than 4 447 Select this if you have a 32-bit processor and more than 4
456 gigabytes of physical RAM. 448 gigabytes of physical RAM.
457 449
458endchoice 450endchoice
459 451
452choice
453 depends on EXPERIMENTAL && !X86_PAE
454 prompt "Memory split"
455 default VMSPLIT_3G
456 help
457 Select the desired split between kernel and user memory.
458
459 If the address range available to the kernel is less than the
460 physical memory installed, the remaining memory will be available
461 as "high memory". Accessing high memory is a little more costly
462 than low memory, as it needs to be mapped into the kernel first.
463 Note that increasing the kernel address space limits the range
464 available to user programs, making the address space there
465 tighter. Selecting anything other than the default 3G/1G split
466 will also likely make your kernel incompatible with binary-only
467 kernel modules.
468
469 If you are not absolutely sure what you are doing, leave this
470 option alone!
471
472 config VMSPLIT_3G
473 bool "3G/1G user/kernel split"
474 config VMSPLIT_3G_OPT
475 bool "3G/1G user/kernel split (for full 1G low memory)"
476 config VMSPLIT_2G
477 bool "2G/2G user/kernel split"
478 config VMSPLIT_1G
479 bool "1G/3G user/kernel split"
480endchoice
481
482config PAGE_OFFSET
483 hex
484 default 0xB0000000 if VMSPLIT_3G_OPT
485 default 0x78000000 if VMSPLIT_2G
486 default 0x40000000 if VMSPLIT_1G
487 default 0xC0000000
488
460config HIGHMEM 489config HIGHMEM
461 bool 490 bool
462 depends on HIGHMEM64G || HIGHMEM4G 491 depends on HIGHMEM64G || HIGHMEM4G
@@ -711,6 +740,15 @@ config HOTPLUG_CPU
711 740
712 Say N. 741 Say N.
713 742
743config DOUBLEFAULT
744 default y
745 bool "Enable doublefault exception handler" if EMBEDDED
746 help
747 This option allows trapping of rare doublefault exceptions that
748 would otherwise cause a system to silently reboot. Disabling this
749 option saves about 4k and might cause you much additional grey
750 hair.
751
714endmenu 752endmenu
715 753
716 754
diff --git a/arch/i386/defconfig b/arch/i386/defconfig
index 6a431b926019..3cbe6e9cb9fc 100644
--- a/arch/i386/defconfig
+++ b/arch/i386/defconfig
@@ -644,6 +644,8 @@ CONFIG_8139TOO_PIO=y
644# CONFIG_ACENIC is not set 644# CONFIG_ACENIC is not set
645# CONFIG_DL2K is not set 645# CONFIG_DL2K is not set
646# CONFIG_E1000 is not set 646# CONFIG_E1000 is not set
647# CONFIG_E1000_NAPI is not set
648# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
647# CONFIG_NS83820 is not set 649# CONFIG_NS83820 is not set
648# CONFIG_HAMACHI is not set 650# CONFIG_HAMACHI is not set
649# CONFIG_YELLOWFIN is not set 651# CONFIG_YELLOWFIN is not set
diff --git a/arch/i386/kernel/acpi/Makefile b/arch/i386/kernel/acpi/Makefile
index 267ca48e1b6c..d51c7313cae8 100644
--- a/arch/i386/kernel/acpi/Makefile
+++ b/arch/i386/kernel/acpi/Makefile
@@ -3,6 +3,6 @@ obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o
3obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o 3obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
4 4
5ifneq ($(CONFIG_ACPI_PROCESSOR),) 5ifneq ($(CONFIG_ACPI_PROCESSOR),)
6obj-y += cstate.o 6obj-y += cstate.o processor.o
7endif 7endif
8 8
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 2111529dea77..79577f0ace98 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -248,10 +248,17 @@ acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
248 248
249 acpi_table_print_madt_entry(header); 249 acpi_table_print_madt_entry(header);
250 250
251 /* Register even disabled CPUs for cpu hotplug */ 251 /* Record local apic id only when enabled */
252 252 if (processor->flags.enabled)
253 x86_acpiid_to_apicid[processor->acpi_id] = processor->id; 253 x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
254 254
255 /*
256 * We need to register disabled CPU as well to permit
257 * counting disabled CPUs. This allows us to size
258 * cpus_possible_map more accurately, to permit
259 * to not preallocating memory for all NR_CPUS
260 * when we use CPU hotplug.
261 */
255 mp_register_lapic(processor->id, /* APIC ID */ 262 mp_register_lapic(processor->id, /* APIC ID */
256 processor->flags.enabled); /* Enabled? */ 263 processor->flags.enabled); /* Enabled? */
257 264
@@ -464,7 +471,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
464 * success: return IRQ number (>=0) 471 * success: return IRQ number (>=0)
465 * failure: return < 0 472 * failure: return < 0
466 */ 473 */
467int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low) 474int acpi_register_gsi(u32 gsi, int triggering, int polarity)
468{ 475{
469 unsigned int irq; 476 unsigned int irq;
470 unsigned int plat_gsi = gsi; 477 unsigned int plat_gsi = gsi;
@@ -476,14 +483,14 @@ int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
476 if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { 483 if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
477 extern void eisa_set_level_irq(unsigned int irq); 484 extern void eisa_set_level_irq(unsigned int irq);
478 485
479 if (edge_level == ACPI_LEVEL_SENSITIVE) 486 if (triggering == ACPI_LEVEL_SENSITIVE)
480 eisa_set_level_irq(gsi); 487 eisa_set_level_irq(gsi);
481 } 488 }
482#endif 489#endif
483 490
484#ifdef CONFIG_X86_IO_APIC 491#ifdef CONFIG_X86_IO_APIC
485 if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) { 492 if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
486 plat_gsi = mp_register_gsi(gsi, edge_level, active_high_low); 493 plat_gsi = mp_register_gsi(gsi, triggering, polarity);
487 } 494 }
488#endif 495#endif
489 acpi_gsi_to_irq(plat_gsi, &irq); 496 acpi_gsi_to_irq(plat_gsi, &irq);
diff --git a/arch/i386/kernel/acpi/cstate.c b/arch/i386/kernel/acpi/cstate.c
index 4c3036ba65df..25db49ef1770 100644
--- a/arch/i386/kernel/acpi/cstate.c
+++ b/arch/i386/kernel/acpi/cstate.c
@@ -14,64 +14,6 @@
14#include <acpi/processor.h> 14#include <acpi/processor.h>
15#include <asm/acpi.h> 15#include <asm/acpi.h>
16 16
17static void acpi_processor_power_init_intel_pdc(struct acpi_processor_power
18 *pow)
19{
20 struct acpi_object_list *obj_list;
21 union acpi_object *obj;
22 u32 *buf;
23
24 /* allocate and initialize pdc. It will be used later. */
25 obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
26 if (!obj_list) {
27 printk(KERN_ERR "Memory allocation error\n");
28 return;
29 }
30
31 obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
32 if (!obj) {
33 printk(KERN_ERR "Memory allocation error\n");
34 kfree(obj_list);
35 return;
36 }
37
38 buf = kmalloc(12, GFP_KERNEL);
39 if (!buf) {
40 printk(KERN_ERR "Memory allocation error\n");
41 kfree(obj);
42 kfree(obj_list);
43 return;
44 }
45
46 buf[0] = ACPI_PDC_REVISION_ID;
47 buf[1] = 1;
48 buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
49
50 obj->type = ACPI_TYPE_BUFFER;
51 obj->buffer.length = 12;
52 obj->buffer.pointer = (u8 *) buf;
53 obj_list->count = 1;
54 obj_list->pointer = obj;
55 pow->pdc = obj_list;
56
57 return;
58}
59
60/* Initialize _PDC data based on the CPU vendor */
61void acpi_processor_power_init_pdc(struct acpi_processor_power *pow,
62 unsigned int cpu)
63{
64 struct cpuinfo_x86 *c = cpu_data + cpu;
65
66 pow->pdc = NULL;
67 if (c->x86_vendor == X86_VENDOR_INTEL)
68 acpi_processor_power_init_intel_pdc(pow);
69
70 return;
71}
72
73EXPORT_SYMBOL(acpi_processor_power_init_pdc);
74
75/* 17/*
76 * Initialize bm_flags based on the CPU cache properties 18 * Initialize bm_flags based on the CPU cache properties
77 * On SMP it depends on cache configuration 19 * On SMP it depends on cache configuration
diff --git a/arch/i386/kernel/acpi/processor.c b/arch/i386/kernel/acpi/processor.c
new file mode 100644
index 000000000000..9f4cc02717ec
--- /dev/null
+++ b/arch/i386/kernel/acpi/processor.c
@@ -0,0 +1,75 @@
1/*
2 * arch/i386/kernel/acpi/processor.c
3 *
4 * Copyright (C) 2005 Intel Corporation
5 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6 * - Added _PDC for platforms with Intel CPUs
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/acpi.h>
13
14#include <acpi/processor.h>
15#include <asm/acpi.h>
16
17static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
18{
19 struct acpi_object_list *obj_list;
20 union acpi_object *obj;
21 u32 *buf;
22
23 /* allocate and initialize pdc. It will be used later. */
24 obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
25 if (!obj_list) {
26 printk(KERN_ERR "Memory allocation error\n");
27 return;
28 }
29
30 obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
31 if (!obj) {
32 printk(KERN_ERR "Memory allocation error\n");
33 kfree(obj_list);
34 return;
35 }
36
37 buf = kmalloc(12, GFP_KERNEL);
38 if (!buf) {
39 printk(KERN_ERR "Memory allocation error\n");
40 kfree(obj);
41 kfree(obj_list);
42 return;
43 }
44
45 buf[0] = ACPI_PDC_REVISION_ID;
46 buf[1] = 1;
47 buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
48
49 if (cpu_has(c, X86_FEATURE_EST))
50 buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
51
52 obj->type = ACPI_TYPE_BUFFER;
53 obj->buffer.length = 12;
54 obj->buffer.pointer = (u8 *) buf;
55 obj_list->count = 1;
56 obj_list->pointer = obj;
57 pr->pdc = obj_list;
58
59 return;
60}
61
62/* Initialize _PDC data based on the CPU vendor */
63void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
64{
65 unsigned int cpu = pr->id;
66 struct cpuinfo_x86 *c = cpu_data + cpu;
67
68 pr->pdc = NULL;
69 if (c->x86_vendor == X86_VENDOR_INTEL)
70 init_intel_pdc(pr, c);
71
72 return;
73}
74
75EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index acd3f1e34ca6..f39e09ef64ec 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -75,8 +75,10 @@ void ack_bad_irq(unsigned int irq)
75 * holds up an irq slot - in excessive cases (when multiple 75 * holds up an irq slot - in excessive cases (when multiple
76 * unexpected vectors occur) that might lock up the APIC 76 * unexpected vectors occur) that might lock up the APIC
77 * completely. 77 * completely.
78 * But only ack when the APIC is enabled -AK
78 */ 79 */
79 ack_APIC_irq(); 80 if (cpu_has_apic)
81 ack_APIC_irq();
80} 82}
81 83
82void __init apic_intr_init(void) 84void __init apic_intr_init(void)
@@ -1303,6 +1305,7 @@ int __init APIC_init_uniprocessor (void)
1303 if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { 1305 if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
1304 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", 1306 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1305 boot_cpu_physical_apicid); 1307 boot_cpu_physical_apicid);
1308 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
1306 return -1; 1309 return -1;
1307 } 1310 }
1308 1311
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index 333578a4e91a..0810f81f2a05 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -282,3 +282,11 @@ int __init amd_init_cpu(void)
282} 282}
283 283
284//early_arch_initcall(amd_init_cpu); 284//early_arch_initcall(amd_init_cpu);
285
286static int __init amd_exit_cpu(void)
287{
288 cpu_devs[X86_VENDOR_AMD] = NULL;
289 return 0;
290}
291
292late_initcall(amd_exit_cpu);
diff --git a/arch/i386/kernel/cpu/centaur.c b/arch/i386/kernel/cpu/centaur.c
index 394814e57672..f52669ecb93f 100644
--- a/arch/i386/kernel/cpu/centaur.c
+++ b/arch/i386/kernel/cpu/centaur.c
@@ -405,10 +405,6 @@ static void __init init_centaur(struct cpuinfo_x86 *c)
405 winchip2_protect_mcr(); 405 winchip2_protect_mcr();
406#endif 406#endif
407 break; 407 break;
408 case 10:
409 name="4";
410 /* no info on the WC4 yet */
411 break;
412 default: 408 default:
413 name="??"; 409 name="??";
414 } 410 }
@@ -474,3 +470,11 @@ int __init centaur_init_cpu(void)
474} 470}
475 471
476//early_arch_initcall(centaur_init_cpu); 472//early_arch_initcall(centaur_init_cpu);
473
474static int __init centaur_exit_cpu(void)
475{
476 cpu_devs[X86_VENDOR_CENTAUR] = NULL;
477 return 0;
478}
479
480late_initcall(centaur_exit_cpu);
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 15aee26ec2b6..7eb9213734a3 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -44,6 +44,7 @@ static void default_init(struct cpuinfo_x86 * c)
44 44
45static struct cpu_dev default_cpu = { 45static struct cpu_dev default_cpu = {
46 .c_init = default_init, 46 .c_init = default_init,
47 .c_vendor = "Unknown",
47}; 48};
48static struct cpu_dev * this_cpu = &default_cpu; 49static struct cpu_dev * this_cpu = &default_cpu;
49 50
@@ -150,6 +151,7 @@ static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
150{ 151{
151 char *v = c->x86_vendor_id; 152 char *v = c->x86_vendor_id;
152 int i; 153 int i;
154 static int printed;
153 155
154 for (i = 0; i < X86_VENDOR_NUM; i++) { 156 for (i = 0; i < X86_VENDOR_NUM; i++) {
155 if (cpu_devs[i]) { 157 if (cpu_devs[i]) {
@@ -159,10 +161,17 @@ static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
159 c->x86_vendor = i; 161 c->x86_vendor = i;
160 if (!early) 162 if (!early)
161 this_cpu = cpu_devs[i]; 163 this_cpu = cpu_devs[i];
162 break; 164 return;
163 } 165 }
164 } 166 }
165 } 167 }
168 if (!printed) {
169 printed++;
170 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
171 printk(KERN_ERR "CPU: Your system may be unstable.\n");
172 }
173 c->x86_vendor = X86_VENDOR_UNKNOWN;
174 this_cpu = &default_cpu;
166} 175}
167 176
168 177
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 7975e79d5fa4..3852d0a4c1b5 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -295,68 +295,6 @@ acpi_cpufreq_guess_freq (
295} 295}
296 296
297 297
298/*
299 * acpi_processor_cpu_init_pdc_est - let BIOS know about the SMP capabilities
300 * of this driver
301 * @perf: processor-specific acpi_io_data struct
302 * @cpu: CPU being initialized
303 *
304 * To avoid issues with legacy OSes, some BIOSes require to be informed of
305 * the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
306 * accordingly, for Enhanced Speedstep. Actual call to _PDC is done in
307 * driver/acpi/processor.c
308 */
309static void
310acpi_processor_cpu_init_pdc_est(
311 struct acpi_processor_performance *perf,
312 unsigned int cpu,
313 struct acpi_object_list *obj_list
314 )
315{
316 union acpi_object *obj;
317 u32 *buf;
318 struct cpuinfo_x86 *c = cpu_data + cpu;
319 dprintk("acpi_processor_cpu_init_pdc_est\n");
320
321 if (!cpu_has(c, X86_FEATURE_EST))
322 return;
323
324 /* Initialize pdc. It will be used later. */
325 if (!obj_list)
326 return;
327
328 if (!(obj_list->count && obj_list->pointer))
329 return;
330
331 obj = obj_list->pointer;
332 if ((obj->buffer.length == 12) && obj->buffer.pointer) {
333 buf = (u32 *)obj->buffer.pointer;
334 buf[0] = ACPI_PDC_REVISION_ID;
335 buf[1] = 1;
336 buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
337 perf->pdc = obj_list;
338 }
339 return;
340}
341
342
343/* CPU specific PDC initialization */
344static void
345acpi_processor_cpu_init_pdc(
346 struct acpi_processor_performance *perf,
347 unsigned int cpu,
348 struct acpi_object_list *obj_list
349 )
350{
351 struct cpuinfo_x86 *c = cpu_data + cpu;
352 dprintk("acpi_processor_cpu_init_pdc\n");
353 perf->pdc = NULL;
354 if (cpu_has(c, X86_FEATURE_EST))
355 acpi_processor_cpu_init_pdc_est(perf, cpu, obj_list);
356 return;
357}
358
359
360static int 298static int
361acpi_cpufreq_cpu_init ( 299acpi_cpufreq_cpu_init (
362 struct cpufreq_policy *policy) 300 struct cpufreq_policy *policy)
@@ -367,14 +305,7 @@ acpi_cpufreq_cpu_init (
367 unsigned int result = 0; 305 unsigned int result = 0;
368 struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; 306 struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
369 307
370 union acpi_object arg0 = {ACPI_TYPE_BUFFER};
371 u32 arg0_buf[3];
372 struct acpi_object_list arg_list = {1, &arg0};
373
374 dprintk("acpi_cpufreq_cpu_init\n"); 308 dprintk("acpi_cpufreq_cpu_init\n");
375 /* setup arg_list for _PDC settings */
376 arg0.buffer.length = 12;
377 arg0.buffer.pointer = (u8 *) arg0_buf;
378 309
379 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); 310 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
380 if (!data) 311 if (!data)
@@ -382,9 +313,7 @@ acpi_cpufreq_cpu_init (
382 313
383 acpi_io_data[cpu] = data; 314 acpi_io_data[cpu] = data;
384 315
385 acpi_processor_cpu_init_pdc(&data->acpi_data, cpu, &arg_list);
386 result = acpi_processor_register_performance(&data->acpi_data, cpu); 316 result = acpi_processor_register_performance(&data->acpi_data, cpu);
387 data->acpi_data.pdc = NULL;
388 317
389 if (result) 318 if (result)
390 goto err_free; 319 goto err_free;
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index 9a826cde4fd1..c173c0fa117a 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -362,22 +362,10 @@ static struct acpi_processor_performance p;
362 */ 362 */
363static int centrino_cpu_init_acpi(struct cpufreq_policy *policy) 363static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
364{ 364{
365 union acpi_object arg0 = {ACPI_TYPE_BUFFER};
366 u32 arg0_buf[3];
367 struct acpi_object_list arg_list = {1, &arg0};
368 unsigned long cur_freq; 365 unsigned long cur_freq;
369 int result = 0, i; 366 int result = 0, i;
370 unsigned int cpu = policy->cpu; 367 unsigned int cpu = policy->cpu;
371 368
372 /* _PDC settings */
373 arg0.buffer.length = 12;
374 arg0.buffer.pointer = (u8 *) arg0_buf;
375 arg0_buf[0] = ACPI_PDC_REVISION_ID;
376 arg0_buf[1] = 1;
377 arg0_buf[2] = ACPI_PDC_EST_CAPABILITY_SMP_MSR;
378
379 p.pdc = &arg_list;
380
381 /* register with ACPI core */ 369 /* register with ACPI core */
382 if (acpi_processor_register_performance(&p, cpu)) { 370 if (acpi_processor_register_performance(&p, cpu)) {
383 dprintk(KERN_INFO PFX "obtaining ACPI data failed\n"); 371 dprintk(KERN_INFO PFX "obtaining ACPI data failed\n");
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c
index 75015975d038..00f2e058797c 100644
--- a/arch/i386/kernel/cpu/cyrix.c
+++ b/arch/i386/kernel/cpu/cyrix.c
@@ -345,7 +345,7 @@ static void __init init_cyrix(struct cpuinfo_x86 *c)
345/* 345/*
346 * Handle National Semiconductor branded processors 346 * Handle National Semiconductor branded processors
347 */ 347 */
348static void __devinit init_nsc(struct cpuinfo_x86 *c) 348static void __init init_nsc(struct cpuinfo_x86 *c)
349{ 349{
350 /* There may be GX1 processors in the wild that are branded 350 /* There may be GX1 processors in the wild that are branded
351 * NSC and not Cyrix. 351 * NSC and not Cyrix.
@@ -444,6 +444,14 @@ int __init cyrix_init_cpu(void)
444 444
445//early_arch_initcall(cyrix_init_cpu); 445//early_arch_initcall(cyrix_init_cpu);
446 446
447static int __init cyrix_exit_cpu(void)
448{
449 cpu_devs[X86_VENDOR_CYRIX] = NULL;
450 return 0;
451}
452
453late_initcall(cyrix_exit_cpu);
454
447static struct cpu_dev nsc_cpu_dev __initdata = { 455static struct cpu_dev nsc_cpu_dev __initdata = {
448 .c_vendor = "NSC", 456 .c_vendor = "NSC",
449 .c_ident = { "Geode by NSC" }, 457 .c_ident = { "Geode by NSC" },
@@ -458,3 +466,11 @@ int __init nsc_init_cpu(void)
458} 466}
459 467
460//early_arch_initcall(nsc_init_cpu); 468//early_arch_initcall(nsc_init_cpu);
469
470static int __init nsc_exit_cpu(void)
471{
472 cpu_devs[X86_VENDOR_NSC] = NULL;
473 return 0;
474}
475
476late_initcall(nsc_exit_cpu);
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index fbfd374aa336..ffe58cee0c48 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -43,13 +43,23 @@ static struct _cache_table cache_table[] __cpuinitdata =
43 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */ 43 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
44 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */ 44 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
45 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 45 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
46 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
46 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */ 47 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
47 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 48 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
49 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
50 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
48 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */ 51 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
49 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */ 52 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
50 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */ 53 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
51 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */ 54 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
52 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */ 55 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
56 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
57 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
58 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
59 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
60 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
61 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
62 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
53 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 63 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
54 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 64 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
55 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 65 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
@@ -57,6 +67,7 @@ static struct _cache_table cache_table[] __cpuinitdata =
57 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */ 67 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
58 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */ 68 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
59 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */ 69 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
70 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
60 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */ 71 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
61 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 72 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
62 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 73 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
@@ -141,6 +152,7 @@ static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_le
141 return 0; 152 return 0;
142} 153}
143 154
155/* will only be called once; __init is safe here */
144static int __init find_num_cache_leaves(void) 156static int __init find_num_cache_leaves(void)
145{ 157{
146 unsigned int eax, ebx, ecx, edx; 158 unsigned int eax, ebx, ecx, edx;
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index 1e9db198c440..3b4618bed70d 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -44,12 +44,10 @@
44#include <asm/msr.h> 44#include <asm/msr.h>
45#include "mtrr.h" 45#include "mtrr.h"
46 46
47#define MTRR_VERSION "2.0 (20020519)"
48
49u32 num_var_ranges = 0; 47u32 num_var_ranges = 0;
50 48
51unsigned int *usage_table; 49unsigned int *usage_table;
52static DECLARE_MUTEX(main_lock); 50static DECLARE_MUTEX(mtrr_sem);
53 51
54u32 size_or_mask, size_and_mask; 52u32 size_or_mask, size_and_mask;
55 53
@@ -335,7 +333,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
335 /* No CPU hotplug when we change MTRR entries */ 333 /* No CPU hotplug when we change MTRR entries */
336 lock_cpu_hotplug(); 334 lock_cpu_hotplug();
337 /* Search for existing MTRR */ 335 /* Search for existing MTRR */
338 down(&main_lock); 336 down(&mtrr_sem);
339 for (i = 0; i < num_var_ranges; ++i) { 337 for (i = 0; i < num_var_ranges; ++i) {
340 mtrr_if->get(i, &lbase, &lsize, &ltype); 338 mtrr_if->get(i, &lbase, &lsize, &ltype);
341 if (base >= lbase + lsize) 339 if (base >= lbase + lsize)
@@ -373,7 +371,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
373 printk(KERN_INFO "mtrr: no more MTRRs available\n"); 371 printk(KERN_INFO "mtrr: no more MTRRs available\n");
374 error = i; 372 error = i;
375 out: 373 out:
376 up(&main_lock); 374 up(&mtrr_sem);
377 unlock_cpu_hotplug(); 375 unlock_cpu_hotplug();
378 return error; 376 return error;
379} 377}
@@ -466,7 +464,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
466 max = num_var_ranges; 464 max = num_var_ranges;
467 /* No CPU hotplug when we change MTRR entries */ 465 /* No CPU hotplug when we change MTRR entries */
468 lock_cpu_hotplug(); 466 lock_cpu_hotplug();
469 down(&main_lock); 467 down(&mtrr_sem);
470 if (reg < 0) { 468 if (reg < 0) {
471 /* Search for existing MTRR */ 469 /* Search for existing MTRR */
472 for (i = 0; i < max; ++i) { 470 for (i = 0; i < max; ++i) {
@@ -505,7 +503,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
505 set_mtrr(reg, 0, 0, 0); 503 set_mtrr(reg, 0, 0, 0);
506 error = reg; 504 error = reg;
507 out: 505 out:
508 up(&main_lock); 506 up(&mtrr_sem);
509 unlock_cpu_hotplug(); 507 unlock_cpu_hotplug();
510 return error; 508 return error;
511} 509}
@@ -671,7 +669,6 @@ void __init mtrr_bp_init(void)
671 break; 669 break;
672 } 670 }
673 } 671 }
674 printk(KERN_INFO "mtrr: v%s\n",MTRR_VERSION);
675 672
676 if (mtrr_if) { 673 if (mtrr_if) {
677 set_num_var_ranges(); 674 set_num_var_ranges();
@@ -688,7 +685,7 @@ void mtrr_ap_init(void)
688 if (!mtrr_if || !use_intel()) 685 if (!mtrr_if || !use_intel())
689 return; 686 return;
690 /* 687 /*
691 * Ideally we should hold main_lock here to avoid mtrr entries changed, 688 * Ideally we should hold mtrr_sem here to avoid mtrr entries changed,
692 * but this routine will be called in cpu boot time, holding the lock 689 * but this routine will be called in cpu boot time, holding the lock
693 * breaks it. This routine is called in two cases: 1.very earily time 690 * breaks it. This routine is called in two cases: 1.very earily time
694 * of software resume, when there absolutely isn't mtrr entry changes; 691 * of software resume, when there absolutely isn't mtrr entry changes;
diff --git a/arch/i386/kernel/cpu/nexgen.c b/arch/i386/kernel/cpu/nexgen.c
index 30898a260a5c..ad87fa58058d 100644
--- a/arch/i386/kernel/cpu/nexgen.c
+++ b/arch/i386/kernel/cpu/nexgen.c
@@ -61,3 +61,11 @@ int __init nexgen_init_cpu(void)
61} 61}
62 62
63//early_arch_initcall(nexgen_init_cpu); 63//early_arch_initcall(nexgen_init_cpu);
64
65static int __init nexgen_exit_cpu(void)
66{
67 cpu_devs[X86_VENDOR_NEXGEN] = NULL;
68 return 0;
69}
70
71late_initcall(nexgen_exit_cpu);
diff --git a/arch/i386/kernel/cpu/rise.c b/arch/i386/kernel/cpu/rise.c
index 8602425628ca..d08d5a2811c8 100644
--- a/arch/i386/kernel/cpu/rise.c
+++ b/arch/i386/kernel/cpu/rise.c
@@ -51,3 +51,11 @@ int __init rise_init_cpu(void)
51} 51}
52 52
53//early_arch_initcall(rise_init_cpu); 53//early_arch_initcall(rise_init_cpu);
54
55static int __init rise_exit_cpu(void)
56{
57 cpu_devs[X86_VENDOR_RISE] = NULL;
58 return 0;
59}
60
61late_initcall(rise_exit_cpu);
diff --git a/arch/i386/kernel/cpu/transmeta.c b/arch/i386/kernel/cpu/transmeta.c
index fc426380366b..bdbeb77f4e22 100644
--- a/arch/i386/kernel/cpu/transmeta.c
+++ b/arch/i386/kernel/cpu/transmeta.c
@@ -84,7 +84,7 @@ static void __init init_transmeta(struct cpuinfo_x86 *c)
84#endif 84#endif
85} 85}
86 86
87static void transmeta_identify(struct cpuinfo_x86 * c) 87static void __init transmeta_identify(struct cpuinfo_x86 * c)
88{ 88{
89 u32 xlvl; 89 u32 xlvl;
90 generic_identify(c); 90 generic_identify(c);
@@ -111,3 +111,11 @@ int __init transmeta_init_cpu(void)
111} 111}
112 112
113//early_arch_initcall(transmeta_init_cpu); 113//early_arch_initcall(transmeta_init_cpu);
114
115static int __init transmeta_exit_cpu(void)
116{
117 cpu_devs[X86_VENDOR_TRANSMETA] = NULL;
118 return 0;
119}
120
121late_initcall(transmeta_exit_cpu);
diff --git a/arch/i386/kernel/cpu/umc.c b/arch/i386/kernel/cpu/umc.c
index 264fcad559d5..2cd988f6dc55 100644
--- a/arch/i386/kernel/cpu/umc.c
+++ b/arch/i386/kernel/cpu/umc.c
@@ -31,3 +31,11 @@ int __init umc_init_cpu(void)
31} 31}
32 32
33//early_arch_initcall(umc_init_cpu); 33//early_arch_initcall(umc_init_cpu);
34
35static int __init umc_exit_cpu(void)
36{
37 cpu_devs[X86_VENDOR_UMC] = NULL;
38 return 0;
39}
40
41late_initcall(umc_exit_cpu);
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index 5884469f6bfe..2bee6499edd9 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -398,7 +398,11 @@ ignore_int:
398 pushl 32(%esp) 398 pushl 32(%esp)
399 pushl 40(%esp) 399 pushl 40(%esp)
400 pushl $int_msg 400 pushl $int_msg
401#ifdef CONFIG_EARLY_PRINTK
402 call early_printk
403#else
401 call printk 404 call printk
405#endif
402 addl $(5*4),%esp 406 addl $(5*4),%esp
403 popl %ds 407 popl %ds
404 popl %es 408 popl %es
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 91a64016956e..0102f3d50e57 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -1080,7 +1080,7 @@ void __init mp_config_acpi_legacy_irqs (void)
1080 1080
1081#define MAX_GSI_NUM 4096 1081#define MAX_GSI_NUM 4096
1082 1082
1083int mp_register_gsi (u32 gsi, int edge_level, int active_high_low) 1083int mp_register_gsi (u32 gsi, int triggering, int polarity)
1084{ 1084{
1085 int ioapic = -1; 1085 int ioapic = -1;
1086 int ioapic_pin = 0; 1086 int ioapic_pin = 0;
@@ -1129,7 +1129,7 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
1129 1129
1130 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit); 1130 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
1131 1131
1132 if (edge_level) { 1132 if (triggering == ACPI_LEVEL_SENSITIVE) {
1133 /* 1133 /*
1134 * For PCI devices assign IRQs in order, avoiding gaps 1134 * For PCI devices assign IRQs in order, avoiding gaps
1135 * due to unused I/O APIC pins. 1135 * due to unused I/O APIC pins.
@@ -1151,8 +1151,8 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
1151 } 1151 }
1152 1152
1153 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, 1153 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
1154 edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1, 1154 triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
1155 active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1); 1155 polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
1156 return gsi; 1156 return gsi;
1157} 1157}
1158 1158
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index d661703ac1cb..63f39a7e2c96 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -138,7 +138,7 @@ static int __init check_nmi_watchdog(void)
138 if (nmi_watchdog == NMI_LOCAL_APIC) 138 if (nmi_watchdog == NMI_LOCAL_APIC)
139 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); 139 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
140 140
141 for (cpu = 0; cpu < NR_CPUS; cpu++) 141 for_each_cpu(cpu)
142 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; 142 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
143 local_irq_enable(); 143 local_irq_enable();
144 mdelay((10*1000)/nmi_hz); // wait 10 ticks 144 mdelay((10*1000)/nmi_hz); // wait 10 ticks
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 2185377fdde1..0480454ebffa 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -297,8 +297,10 @@ void show_regs(struct pt_regs * regs)
297 297
298 if (user_mode(regs)) 298 if (user_mode(regs))
299 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); 299 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
300 printk(" EFLAGS: %08lx %s (%s)\n", 300 printk(" EFLAGS: %08lx %s (%s %.*s)\n",
301 regs->eflags, print_tainted(), system_utsname.release); 301 regs->eflags, print_tainted(), system_utsname.release,
302 (int)strcspn(system_utsname.version, " "),
303 system_utsname.version);
302 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", 304 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
303 regs->eax,regs->ebx,regs->ecx,regs->edx); 305 regs->eax,regs->ebx,regs->ecx,regs->edx);
304 printk("ESI: %08lx EDI: %08lx EBP: %08lx", 306 printk("ESI: %08lx EDI: %08lx EBP: %08lx",
diff --git a/arch/i386/kernel/quirks.c b/arch/i386/kernel/quirks.c
index aaf89cb2bc51..87ccdac84928 100644
--- a/arch/i386/kernel/quirks.c
+++ b/arch/i386/kernel/quirks.c
@@ -25,8 +25,7 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
25 25
26 /* enable access to config space*/ 26 /* enable access to config space*/
27 pci_read_config_byte(dev, 0xf4, &config); 27 pci_read_config_byte(dev, 0xf4, &config);
28 config |= 0x2; 28 pci_write_config_byte(dev, 0xf4, config|0x2);
29 pci_write_config_byte(dev, 0xf4, config);
30 29
31 /* read xTPR register */ 30 /* read xTPR register */
32 raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word); 31 raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
@@ -42,9 +41,9 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
42#endif 41#endif
43 } 42 }
44 43
45 config &= ~0x2; 44 /* put back the original value for config space*/
46 /* disable access to config space*/ 45 if (!(config & 0x2))
47 pci_write_config_byte(dev, 0xf4, config); 46 pci_write_config_byte(dev, 0xf4, config);
48} 47}
49DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance); 48DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance);
50DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance); 49DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance);
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index adcd069db91e..963616d364ec 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -37,51 +37,17 @@
37asmlinkage int 37asmlinkage int
38sys_sigsuspend(int history0, int history1, old_sigset_t mask) 38sys_sigsuspend(int history0, int history1, old_sigset_t mask)
39{ 39{
40 struct pt_regs * regs = (struct pt_regs *) &history0;
41 sigset_t saveset;
42
43 mask &= _BLOCKABLE; 40 mask &= _BLOCKABLE;
44 spin_lock_irq(&current->sighand->siglock); 41 spin_lock_irq(&current->sighand->siglock);
45 saveset = current->blocked; 42 current->saved_sigmask = current->blocked;
46 siginitset(&current->blocked, mask); 43 siginitset(&current->blocked, mask);
47 recalc_sigpending(); 44 recalc_sigpending();
48 spin_unlock_irq(&current->sighand->siglock); 45 spin_unlock_irq(&current->sighand->siglock);
49 46
50 regs->eax = -EINTR; 47 current->state = TASK_INTERRUPTIBLE;
51 while (1) { 48 schedule();
52 current->state = TASK_INTERRUPTIBLE; 49 set_thread_flag(TIF_RESTORE_SIGMASK);
53 schedule(); 50 return -ERESTARTNOHAND;
54 if (do_signal(regs, &saveset))
55 return -EINTR;
56 }
57}
58
59asmlinkage int
60sys_rt_sigsuspend(struct pt_regs regs)
61{
62 sigset_t saveset, newset;
63
64 /* XXX: Don't preclude handling different sized sigset_t's. */
65 if (regs.ecx != sizeof(sigset_t))
66 return -EINVAL;
67
68 if (copy_from_user(&newset, (sigset_t __user *)regs.ebx, sizeof(newset)))
69 return -EFAULT;
70 sigdelsetmask(&newset, ~_BLOCKABLE);
71
72 spin_lock_irq(&current->sighand->siglock);
73 saveset = current->blocked;
74 current->blocked = newset;
75 recalc_sigpending();
76 spin_unlock_irq(&current->sighand->siglock);
77
78 regs.eax = -EINTR;
79 while (1) {
80 current->state = TASK_INTERRUPTIBLE;
81 schedule();
82 if (do_signal(&regs, &saveset))
83 return -EINTR;
84 }
85} 51}
86 52
87asmlinkage int 53asmlinkage int
@@ -433,11 +399,11 @@ static int setup_frame(int sig, struct k_sigaction *ka,
433 current->comm, current->pid, frame, regs->eip, frame->pretcode); 399 current->comm, current->pid, frame, regs->eip, frame->pretcode);
434#endif 400#endif
435 401
436 return 1; 402 return 0;
437 403
438give_sigsegv: 404give_sigsegv:
439 force_sigsegv(sig, current); 405 force_sigsegv(sig, current);
440 return 0; 406 return -EFAULT;
441} 407}
442 408
443static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 409static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
@@ -527,11 +493,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
527 current->comm, current->pid, frame, regs->eip, frame->pretcode); 493 current->comm, current->pid, frame, regs->eip, frame->pretcode);
528#endif 494#endif
529 495
530 return 1; 496 return 0;
531 497
532give_sigsegv: 498give_sigsegv:
533 force_sigsegv(sig, current); 499 force_sigsegv(sig, current);
534 return 0; 500 return -EFAULT;
535} 501}
536 502
537/* 503/*
@@ -581,7 +547,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
581 else 547 else
582 ret = setup_frame(sig, ka, oldset, regs); 548 ret = setup_frame(sig, ka, oldset, regs);
583 549
584 if (ret) { 550 if (ret == 0) {
585 spin_lock_irq(&current->sighand->siglock); 551 spin_lock_irq(&current->sighand->siglock);
586 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 552 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
587 if (!(ka->sa.sa_flags & SA_NODEFER)) 553 if (!(ka->sa.sa_flags & SA_NODEFER))
@@ -598,11 +564,12 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
598 * want to handle. Thus you cannot kill init even with a SIGKILL even by 564 * want to handle. Thus you cannot kill init even with a SIGKILL even by
599 * mistake. 565 * mistake.
600 */ 566 */
601int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset) 567static void fastcall do_signal(struct pt_regs *regs)
602{ 568{
603 siginfo_t info; 569 siginfo_t info;
604 int signr; 570 int signr;
605 struct k_sigaction ka; 571 struct k_sigaction ka;
572 sigset_t *oldset;
606 573
607 /* 574 /*
608 * We want the common case to go fast, which 575 * We want the common case to go fast, which
@@ -613,12 +580,14 @@ int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
613 * CS suffices. 580 * CS suffices.
614 */ 581 */
615 if (!user_mode(regs)) 582 if (!user_mode(regs))
616 return 1; 583 return;
617 584
618 if (try_to_freeze()) 585 if (try_to_freeze())
619 goto no_signal; 586 goto no_signal;
620 587
621 if (!oldset) 588 if (test_thread_flag(TIF_RESTORE_SIGMASK))
589 oldset = &current->saved_sigmask;
590 else
622 oldset = &current->blocked; 591 oldset = &current->blocked;
623 592
624 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 593 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -628,38 +597,55 @@ int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
628 * have been cleared if the watchpoint triggered 597 * have been cleared if the watchpoint triggered
629 * inside the kernel. 598 * inside the kernel.
630 */ 599 */
631 if (unlikely(current->thread.debugreg[7])) { 600 if (unlikely(current->thread.debugreg[7]))
632 set_debugreg(current->thread.debugreg[7], 7); 601 set_debugreg(current->thread.debugreg[7], 7);
633 }
634 602
635 /* Whee! Actually deliver the signal. */ 603 /* Whee! Actually deliver the signal. */
636 return handle_signal(signr, &info, &ka, oldset, regs); 604 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
605 /* a signal was successfully delivered; the saved
606 * sigmask will have been stored in the signal frame,
607 * and will be restored by sigreturn, so we can simply
608 * clear the TIF_RESTORE_SIGMASK flag */
609 if (test_thread_flag(TIF_RESTORE_SIGMASK))
610 clear_thread_flag(TIF_RESTORE_SIGMASK);
611 }
612
613 return;
637 } 614 }
638 615
639 no_signal: 616no_signal:
640 /* Did we come from a system call? */ 617 /* Did we come from a system call? */
641 if (regs->orig_eax >= 0) { 618 if (regs->orig_eax >= 0) {
642 /* Restart the system call - no handlers present */ 619 /* Restart the system call - no handlers present */
643 if (regs->eax == -ERESTARTNOHAND || 620 switch (regs->eax) {
644 regs->eax == -ERESTARTSYS || 621 case -ERESTARTNOHAND:
645 regs->eax == -ERESTARTNOINTR) { 622 case -ERESTARTSYS:
623 case -ERESTARTNOINTR:
646 regs->eax = regs->orig_eax; 624 regs->eax = regs->orig_eax;
647 regs->eip -= 2; 625 regs->eip -= 2;
648 } 626 break;
649 if (regs->eax == -ERESTART_RESTARTBLOCK){ 627
628 case -ERESTART_RESTARTBLOCK:
650 regs->eax = __NR_restart_syscall; 629 regs->eax = __NR_restart_syscall;
651 regs->eip -= 2; 630 regs->eip -= 2;
631 break;
652 } 632 }
653 } 633 }
654 return 0; 634
635 /* if there's no signal to deliver, we just put the saved sigmask
636 * back */
637 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
638 clear_thread_flag(TIF_RESTORE_SIGMASK);
639 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
640 }
655} 641}
656 642
657/* 643/*
658 * notification of userspace execution resumption 644 * notification of userspace execution resumption
659 * - triggered by current->work.notify_resume 645 * - triggered by the TIF_WORK_MASK flags
660 */ 646 */
661__attribute__((regparm(3))) 647__attribute__((regparm(3)))
662void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, 648void do_notify_resume(struct pt_regs *regs, void *_unused,
663 __u32 thread_info_flags) 649 __u32 thread_info_flags)
664{ 650{
665 /* Pending single-step? */ 651 /* Pending single-step? */
@@ -667,9 +653,10 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
667 regs->eflags |= TF_MASK; 653 regs->eflags |= TF_MASK;
668 clear_thread_flag(TIF_SINGLESTEP); 654 clear_thread_flag(TIF_SINGLESTEP);
669 } 655 }
656
670 /* deal with pending signal delivery */ 657 /* deal with pending signal delivery */
671 if (thread_info_flags & _TIF_SIGPENDING) 658 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
672 do_signal(regs,oldset); 659 do_signal(regs);
673 660
674 clear_thread_flag(TIF_IRET); 661 clear_thread_flag(TIF_IRET);
675} 662}
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 255adb498268..fb00ab7b7612 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -87,11 +87,7 @@ EXPORT_SYMBOL(cpu_online_map);
87cpumask_t cpu_callin_map; 87cpumask_t cpu_callin_map;
88cpumask_t cpu_callout_map; 88cpumask_t cpu_callout_map;
89EXPORT_SYMBOL(cpu_callout_map); 89EXPORT_SYMBOL(cpu_callout_map);
90#ifdef CONFIG_HOTPLUG_CPU
91cpumask_t cpu_possible_map = CPU_MASK_ALL;
92#else
93cpumask_t cpu_possible_map; 90cpumask_t cpu_possible_map;
94#endif
95EXPORT_SYMBOL(cpu_possible_map); 91EXPORT_SYMBOL(cpu_possible_map);
96static cpumask_t smp_commenced_mask; 92static cpumask_t smp_commenced_mask;
97 93
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index 6ff3e5243226..ac687d00a1ce 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -294,3 +294,19 @@ ENTRY(sys_call_table)
294 .long sys_inotify_add_watch 294 .long sys_inotify_add_watch
295 .long sys_inotify_rm_watch 295 .long sys_inotify_rm_watch
296 .long sys_migrate_pages 296 .long sys_migrate_pages
297 .long sys_openat /* 295 */
298 .long sys_mkdirat
299 .long sys_mknodat
300 .long sys_fchownat
301 .long sys_futimesat
302 .long sys_fstatat64 /* 300 */
303 .long sys_unlinkat
304 .long sys_renameat
305 .long sys_linkat
306 .long sys_symlinkat
307 .long sys_readlinkat /* 305 */
308 .long sys_fchmodat
309 .long sys_faccessat
310 .long sys_pselect6
311 .long sys_ppoll
312 .long sys_unshare /* 310 */
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
index 591a642af884..a7f5a2aceba2 100644
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ b/arch/i386/kernel/timers/timer_tsc.c
@@ -45,6 +45,15 @@ static unsigned long last_tsc_high; /* msb 32 bits of Time Stamp Counter */
45static unsigned long long monotonic_base; 45static unsigned long long monotonic_base;
46static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED; 46static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
47 47
48/* Avoid compensating for lost ticks before TSCs are synched */
49static int detect_lost_ticks;
50static int __init start_lost_tick_compensation(void)
51{
52 detect_lost_ticks = 1;
53 return 0;
54}
55late_initcall(start_lost_tick_compensation);
56
48/* convert from cycles(64bits) => nanoseconds (64bits) 57/* convert from cycles(64bits) => nanoseconds (64bits)
49 * basic equation: 58 * basic equation:
50 * ns = cycles / (freq / ns_per_sec) 59 * ns = cycles / (freq / ns_per_sec)
@@ -196,7 +205,8 @@ static void mark_offset_tsc_hpet(void)
196 205
197 /* lost tick compensation */ 206 /* lost tick compensation */
198 offset = hpet_readl(HPET_T0_CMP) - hpet_tick; 207 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
199 if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))) { 208 if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))
209 && detect_lost_ticks) {
200 int lost_ticks = (offset - hpet_last) / hpet_tick; 210 int lost_ticks = (offset - hpet_last) / hpet_tick;
201 jiffies_64 += lost_ticks; 211 jiffies_64 += lost_ticks;
202 } 212 }
@@ -426,7 +436,7 @@ static void mark_offset_tsc(void)
426 delta += delay_at_last_interrupt; 436 delta += delay_at_last_interrupt;
427 lost = delta/(1000000/HZ); 437 lost = delta/(1000000/HZ);
428 delay = delta%(1000000/HZ); 438 delay = delta%(1000000/HZ);
429 if (lost >= 2) { 439 if (lost >= 2 && detect_lost_ticks) {
430 jiffies_64 += lost-1; 440 jiffies_64 += lost-1;
431 441
432 /* sanity check to ensure we're not always losing ticks */ 442 /* sanity check to ensure we're not always losing ticks */
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 0aaebf3e1cfa..b814dbdcc91e 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -166,7 +166,8 @@ static void show_trace_log_lvl(struct task_struct *task,
166 stack = (unsigned long*)context->previous_esp; 166 stack = (unsigned long*)context->previous_esp;
167 if (!stack) 167 if (!stack)
168 break; 168 break;
169 printk(KERN_EMERG " =======================\n"); 169 printk(log_lvl);
170 printk(" =======================\n");
170 } 171 }
171} 172}
172 173
@@ -239,9 +240,11 @@ void show_registers(struct pt_regs *regs)
239 } 240 }
240 print_modules(); 241 print_modules();
241 printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n" 242 printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n"
242 "EFLAGS: %08lx (%s) \n", 243 "EFLAGS: %08lx (%s %.*s) \n",
243 smp_processor_id(), 0xffff & regs->xcs, regs->eip, 244 smp_processor_id(), 0xffff & regs->xcs, regs->eip,
244 print_tainted(), regs->eflags, system_utsname.release); 245 print_tainted(), regs->eflags, system_utsname.release,
246 (int)strcspn(system_utsname.version, " "),
247 system_utsname.version);
245 print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip); 248 print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
246 printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", 249 printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
247 regs->eax, regs->ebx, regs->ecx, regs->edx); 250 regs->eax, regs->ebx, regs->ecx, regs->edx);
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index 72a1b9cae2e4..6e4c3baef6cc 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -240,7 +240,7 @@ static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
240cpumask_t cpu_callin_map = CPU_MASK_NONE; 240cpumask_t cpu_callin_map = CPU_MASK_NONE;
241cpumask_t cpu_callout_map = CPU_MASK_NONE; 241cpumask_t cpu_callout_map = CPU_MASK_NONE;
242EXPORT_SYMBOL(cpu_callout_map); 242EXPORT_SYMBOL(cpu_callout_map);
243cpumask_t cpu_possible_map = CPU_MASK_ALL; 243cpumask_t cpu_possible_map = CPU_MASK_NONE;
244EXPORT_SYMBOL(cpu_possible_map); 244EXPORT_SYMBOL(cpu_possible_map);
245 245
246/* The per processor IRQ masks (these are usually kept in sync) */ 246/* The per processor IRQ masks (these are usually kept in sync) */
diff --git a/arch/i386/oprofile/backtrace.c b/arch/i386/oprofile/backtrace.c
index 21654be3f73f..acc18138fb22 100644
--- a/arch/i386/oprofile/backtrace.c
+++ b/arch/i386/oprofile/backtrace.c
@@ -49,7 +49,9 @@ dump_backtrace(struct frame_head * head)
49 * | stack | 49 * | stack |
50 * --------------- saved regs->ebp value if valid (frame_head address) 50 * --------------- saved regs->ebp value if valid (frame_head address)
51 * . . 51 * . .
52 * --------------- struct pt_regs stored on stack (struct pt_regs *) 52 * --------------- saved regs->rsp value if x86_64
53 * | |
54 * --------------- struct pt_regs * stored on stack if 32-bit
53 * | | 55 * | |
54 * . . 56 * . .
55 * | | 57 * | |
@@ -57,13 +59,26 @@ dump_backtrace(struct frame_head * head)
57 * | | 59 * | |
58 * | | \/ Lower addresses 60 * | | \/ Lower addresses
59 * 61 *
60 * Thus, &pt_regs <-> stack base restricts the valid(ish) ebp values 62 * Thus, regs (or regs->rsp for x86_64) <-> stack base restricts the
63 * valid(ish) ebp values. Note: (1) for x86_64, NMI and several other
64 * exceptions use special stacks, maintained by the interrupt stack table
65 * (IST). These stacks are set up in trap_init() in
66 * arch/x86_64/kernel/traps.c. Thus, for x86_64, regs now does not point
67 * to the kernel stack; instead, it points to some location on the NMI
68 * stack. On the other hand, regs->rsp is the stack pointer saved when the
69 * NMI occurred. (2) For 32-bit, regs->esp is not valid because the
70 * processor does not save %esp on the kernel stack when interrupts occur
71 * in the kernel mode.
61 */ 72 */
62#ifdef CONFIG_FRAME_POINTER 73#ifdef CONFIG_FRAME_POINTER
63static int valid_kernel_stack(struct frame_head * head, struct pt_regs * regs) 74static int valid_kernel_stack(struct frame_head * head, struct pt_regs * regs)
64{ 75{
65 unsigned long headaddr = (unsigned long)head; 76 unsigned long headaddr = (unsigned long)head;
77#ifdef CONFIG_X86_64
78 unsigned long stack = (unsigned long)regs->rsp;
79#else
66 unsigned long stack = (unsigned long)regs; 80 unsigned long stack = (unsigned long)regs;
81#endif
67 unsigned long stack_base = (stack & ~(THREAD_SIZE - 1)) + THREAD_SIZE; 82 unsigned long stack_base = (stack & ~(THREAD_SIZE - 1)) + THREAD_SIZE;
68 83
69 return headaddr > stack && headaddr < stack_base; 84 return headaddr > stack && headaddr < stack_base;
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index e715aa930036..3ca59cad05f3 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -539,6 +539,11 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
539 case PCI_DEVICE_ID_INTEL_ICH7_30: 539 case PCI_DEVICE_ID_INTEL_ICH7_30:
540 case PCI_DEVICE_ID_INTEL_ICH7_31: 540 case PCI_DEVICE_ID_INTEL_ICH7_31:
541 case PCI_DEVICE_ID_INTEL_ESB2_0: 541 case PCI_DEVICE_ID_INTEL_ESB2_0:
542 case PCI_DEVICE_ID_INTEL_ICH8_0:
543 case PCI_DEVICE_ID_INTEL_ICH8_1:
544 case PCI_DEVICE_ID_INTEL_ICH8_2:
545 case PCI_DEVICE_ID_INTEL_ICH8_3:
546 case PCI_DEVICE_ID_INTEL_ICH8_4:
542 r->name = "PIIX/ICH"; 547 r->name = "PIIX/ICH";
543 r->get = pirq_piix_get; 548 r->get = pirq_piix_get;
544 r->set = pirq_piix_set; 549 r->set = pirq_piix_set;
diff --git a/arch/i386/pci/mmconfig.c b/arch/i386/pci/mmconfig.c
index 4bb4d4b0f73a..0ee8a983708c 100644
--- a/arch/i386/pci/mmconfig.c
+++ b/arch/i386/pci/mmconfig.c
@@ -36,8 +36,7 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
36 while (1) { 36 while (1) {
37 ++cfg_num; 37 ++cfg_num;
38 if (cfg_num >= pci_mmcfg_config_num) { 38 if (cfg_num >= pci_mmcfg_config_num) {
39 /* Not found - fallback to type 1 */ 39 break;
40 return 0;
41 } 40 }
42 cfg = &pci_mmcfg_config[cfg_num]; 41 cfg = &pci_mmcfg_config[cfg_num];
43 if (cfg->pci_segment_group_number != seg) 42 if (cfg->pci_segment_group_number != seg)
@@ -46,6 +45,18 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
46 (cfg->end_bus_number >= bus)) 45 (cfg->end_bus_number >= bus))
47 return cfg->base_address; 46 return cfg->base_address;
48 } 47 }
48
49 /* Handle more broken MCFG tables on Asus etc.
50 They only contain a single entry for bus 0-0. Assume
51 this applies to all busses. */
52 cfg = &pci_mmcfg_config[0];
53 if (pci_mmcfg_config_num == 1 &&
54 cfg->pci_segment_group_number == 0 &&
55 (cfg->start_bus_number | cfg->end_bus_number) == 0)
56 return cfg->base_address;
57
58 /* Fall back to type 0 */
59 return 0;
49} 60}
50 61
51static inline void pci_exp_set_dev_base(unsigned int base, int bus, int devfn) 62static inline void pci_exp_set_dev_base(unsigned int base, int bus, int devfn)