aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/Kconfig8
-rw-r--r--arch/i386/Kconfig.cpu14
-rw-r--r--arch/i386/Kconfig.debug10
-rw-r--r--arch/i386/kernel/apic.c2
-rw-r--r--arch/i386/kernel/apm.c97
-rw-r--r--arch/i386/kernel/cpu/amd.c7
-rw-r--r--arch/i386/kernel/cpu/common.c8
-rw-r--r--arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c3
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c50
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.h9
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-ich.c47
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.c32
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.h1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-smi.c1
-rw-r--r--arch/i386/kernel/cpu/cyrix.c27
-rw-r--r--arch/i386/kernel/cpu/proc.c6
-rw-r--r--arch/i386/kernel/cpuid.c3
-rw-r--r--arch/i386/kernel/entry.S1
-rw-r--r--arch/i386/kernel/head.S27
-rw-r--r--arch/i386/kernel/i386_ksyms.c3
-rw-r--r--arch/i386/kernel/io_apic.c4
-rw-r--r--arch/i386/kernel/kprobes.c2
-rw-r--r--arch/i386/kernel/mpparse.c26
-rw-r--r--arch/i386/kernel/msr.c3
-rw-r--r--arch/i386/kernel/process.c20
-rw-r--r--arch/i386/kernel/ptrace.c9
-rw-r--r--arch/i386/kernel/reboot.c6
-rw-r--r--arch/i386/kernel/setup.c8
-rw-r--r--arch/i386/kernel/smpboot.c9
-rw-r--r--arch/i386/kernel/syscall_table.S1
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c2
-rw-r--r--arch/i386/kernel/traps.c42
-rw-r--r--arch/i386/mm/init.c24
-rw-r--r--arch/i386/mm/ioremap.c37
-rw-r--r--arch/i386/mm/pageattr.c27
-rw-r--r--arch/i386/pci/Makefile2
-rw-r--r--arch/i386/pci/direct.c4
-rw-r--r--arch/i386/pci/irq.c2
-rw-r--r--arch/i386/pci/mmconfig.c65
-rw-r--r--arch/i386/pci/pci.h7
40 files changed, 444 insertions, 212 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 6004bb0795e0..968fabd8723f 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -464,7 +464,6 @@ config NUMA
464 depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI)) 464 depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI))
465 default n if X86_PC 465 default n if X86_PC
466 default y if (X86_NUMAQ || X86_SUMMIT) 466 default y if (X86_NUMAQ || X86_SUMMIT)
467 select SPARSEMEM_STATIC
468 467
469# Need comments to help the hapless user trying to turn on NUMA support 468# Need comments to help the hapless user trying to turn on NUMA support
470comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support" 469comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
@@ -493,6 +492,10 @@ config HAVE_ARCH_ALLOC_REMAP
493 depends on NUMA 492 depends on NUMA
494 default y 493 default y
495 494
495config ARCH_FLATMEM_ENABLE
496 def_bool y
497 depends on (ARCH_SELECT_MEMORY_MODEL && X86_PC)
498
496config ARCH_DISCONTIGMEM_ENABLE 499config ARCH_DISCONTIGMEM_ENABLE
497 def_bool y 500 def_bool y
498 depends on NUMA 501 depends on NUMA
@@ -503,7 +506,8 @@ config ARCH_DISCONTIGMEM_DEFAULT
503 506
504config ARCH_SPARSEMEM_ENABLE 507config ARCH_SPARSEMEM_ENABLE
505 def_bool y 508 def_bool y
506 depends on NUMA 509 depends on (NUMA || (X86_PC && EXPERIMENTAL))
510 select SPARSEMEM_STATIC
507 511
508config ARCH_SELECT_MEMORY_MODEL 512config ARCH_SELECT_MEMORY_MODEL
509 def_bool y 513 def_bool y
diff --git a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu
index 53bbb3c008ee..79603b3471f9 100644
--- a/arch/i386/Kconfig.cpu
+++ b/arch/i386/Kconfig.cpu
@@ -39,6 +39,7 @@ config M386
39 - "Winchip-2" for IDT Winchip 2. 39 - "Winchip-2" for IDT Winchip 2.
40 - "Winchip-2A" for IDT Winchips with 3dNow! capabilities. 40 - "Winchip-2A" for IDT Winchips with 3dNow! capabilities.
41 - "GeodeGX1" for Geode GX1 (Cyrix MediaGX). 41 - "GeodeGX1" for Geode GX1 (Cyrix MediaGX).
42 - "Geode GX/LX" For AMD Geode GX and LX processors.
42 - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3. 43 - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
43 - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above). 44 - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above).
44 45
@@ -171,6 +172,11 @@ config MGEODEGX1
171 help 172 help
172 Select this for a Geode GX1 (Cyrix MediaGX) chip. 173 Select this for a Geode GX1 (Cyrix MediaGX) chip.
173 174
175config MGEODE_LX
176 bool "Geode GX/LX"
177 help
178 Select this for AMD Geode GX and LX processors.
179
174config MCYRIXIII 180config MCYRIXIII
175 bool "CyrixIII/VIA-C3" 181 bool "CyrixIII/VIA-C3"
176 help 182 help
@@ -220,8 +226,8 @@ config X86_XADD
220config X86_L1_CACHE_SHIFT 226config X86_L1_CACHE_SHIFT
221 int 227 int
222 default "7" if MPENTIUM4 || X86_GENERIC 228 default "7" if MPENTIUM4 || X86_GENERIC
223 default "4" if X86_ELAN || M486 || M386 229 default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
224 default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODEGX1 230 default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
225 default "6" if MK7 || MK8 || MPENTIUMM 231 default "6" if MK7 || MK8 || MPENTIUMM
226 232
227config RWSEM_GENERIC_SPINLOCK 233config RWSEM_GENERIC_SPINLOCK
@@ -290,12 +296,12 @@ config X86_INTEL_USERCOPY
290 296
291config X86_USE_PPRO_CHECKSUM 297config X86_USE_PPRO_CHECKSUM
292 bool 298 bool
293 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON 299 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX
294 default y 300 default y
295 301
296config X86_USE_3DNOW 302config X86_USE_3DNOW
297 bool 303 bool
298 depends on MCYRIXIII || MK7 304 depends on MCYRIXIII || MK7 || MGEODE_LX
299 default y 305 default y
300 306
301config X86_OOSTORE 307config X86_OOSTORE
diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug
index c48b424dd640..bf32ecc9ad04 100644
--- a/arch/i386/Kconfig.debug
+++ b/arch/i386/Kconfig.debug
@@ -42,6 +42,16 @@ config DEBUG_PAGEALLOC
42 This results in a large slowdown, but helps to find certain types 42 This results in a large slowdown, but helps to find certain types
43 of memory corruptions. 43 of memory corruptions.
44 44
45config DEBUG_RODATA
46 bool "Write protect kernel read-only data structures"
47 depends on DEBUG_KERNEL
48 help
49 Mark the kernel read-only data as write-protected in the pagetables,
50 in order to catch accidental (and incorrect) writes to such const
51 data. This option may have a slight performance impact because a
52 portion of the kernel code won't be covered by a 2MB TLB anymore.
53 If in doubt, say "N".
54
45config 4KSTACKS 55config 4KSTACKS
46 bool "Use 4Kb for kernel stacks instead of 8Kb" 56 bool "Use 4Kb for kernel stacks instead of 8Kb"
47 depends on DEBUG_KERNEL 57 depends on DEBUG_KERNEL
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 496a2c9909fe..d8f94e78de8a 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -721,7 +721,7 @@ static int __init apic_set_verbosity(char *str)
721 apic_verbosity = APIC_VERBOSE; 721 apic_verbosity = APIC_VERBOSE;
722 else 722 else
723 printk(KERN_WARNING "APIC Verbosity level %s not recognised" 723 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
724 " use apic=verbose or apic=debug", str); 724 " use apic=verbose or apic=debug\n", str);
725 725
726 return 0; 726 return 0;
727} 727}
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index 1e60acbed3c1..2d793d4aef1a 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -303,17 +303,6 @@ extern int (*console_blank_hook)(int);
303#include "apm.h" 303#include "apm.h"
304 304
305/* 305/*
306 * Define to make all _set_limit calls use 64k limits. The APM 1.1 BIOS is
307 * supposed to provide limit information that it recognizes. Many machines
308 * do this correctly, but many others do not restrict themselves to their
309 * claimed limit. When this happens, they will cause a segmentation
310 * violation in the kernel at boot time. Most BIOS's, however, will
311 * respect a 64k limit, so we use that. If you want to be pedantic and
312 * hold your BIOS to its claims, then undefine this.
313 */
314#define APM_RELAX_SEGMENTS
315
316/*
317 * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend. 306 * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend.
318 * This patched by Chad Miller <cmiller@surfsouth.com>, original code by 307 * This patched by Chad Miller <cmiller@surfsouth.com>, original code by
319 * David Chen <chen@ctpa04.mit.edu> 308 * David Chen <chen@ctpa04.mit.edu>
@@ -1075,22 +1064,23 @@ static int apm_engage_power_management(u_short device, int enable)
1075 1064
1076static int apm_console_blank(int blank) 1065static int apm_console_blank(int blank)
1077{ 1066{
1078 int error; 1067 int error, i;
1079 u_short state; 1068 u_short state;
1069 static const u_short dev[3] = { 0x100, 0x1FF, 0x101 };
1080 1070
1081 state = blank ? APM_STATE_STANDBY : APM_STATE_READY; 1071 state = blank ? APM_STATE_STANDBY : APM_STATE_READY;
1082 /* Blank the first display device */ 1072
1083 error = set_power_state(0x100, state); 1073 for (i = 0; i < ARRAY_SIZE(dev); i++) {
1084 if ((error != APM_SUCCESS) && (error != APM_NO_ERROR)) { 1074 error = set_power_state(dev[i], state);
1085 /* try to blank them all instead */ 1075
1086 error = set_power_state(0x1ff, state); 1076 if ((error == APM_SUCCESS) || (error == APM_NO_ERROR))
1087 if ((error != APM_SUCCESS) && (error != APM_NO_ERROR)) 1077 return 1;
1088 /* try to blank device one instead */ 1078
1089 error = set_power_state(0x101, state); 1079 if (error == APM_NOT_ENGAGED)
1080 break;
1090 } 1081 }
1091 if ((error == APM_SUCCESS) || (error == APM_NO_ERROR)) 1082
1092 return 1; 1083 if (error == APM_NOT_ENGAGED && state != APM_STATE_READY) {
1093 if (error == APM_NOT_ENGAGED) {
1094 static int tried; 1084 static int tried;
1095 int eng_error; 1085 int eng_error;
1096 if (tried++ == 0) { 1086 if (tried++ == 0) {
@@ -2233,8 +2223,8 @@ static struct dmi_system_id __initdata apm_dmi_table[] = {
2233static int __init apm_init(void) 2223static int __init apm_init(void)
2234{ 2224{
2235 struct proc_dir_entry *apm_proc; 2225 struct proc_dir_entry *apm_proc;
2226 struct desc_struct *gdt;
2236 int ret; 2227 int ret;
2237 int i;
2238 2228
2239 dmi_check_system(apm_dmi_table); 2229 dmi_check_system(apm_dmi_table);
2240 2230
@@ -2312,45 +2302,30 @@ static int __init apm_init(void)
2312 set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); 2302 set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
2313 _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); 2303 _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
2314 2304
2305 /*
2306 * Set up the long jump entry point to the APM BIOS, which is called
2307 * from inline assembly.
2308 */
2315 apm_bios_entry.offset = apm_info.bios.offset; 2309 apm_bios_entry.offset = apm_info.bios.offset;
2316 apm_bios_entry.segment = APM_CS; 2310 apm_bios_entry.segment = APM_CS;
2317 2311
2318 for (i = 0; i < NR_CPUS; i++) { 2312 /*
2319 struct desc_struct *gdt = get_cpu_gdt_table(i); 2313 * The APM 1.1 BIOS is supposed to provide limit information that it
2320 set_base(gdt[APM_CS >> 3], 2314 * recognizes. Many machines do this correctly, but many others do
2321 __va((unsigned long)apm_info.bios.cseg << 4)); 2315 * not restrict themselves to their claimed limit. When this happens,
2322 set_base(gdt[APM_CS_16 >> 3], 2316 * they will cause a segmentation violation in the kernel at boot time.
2323 __va((unsigned long)apm_info.bios.cseg_16 << 4)); 2317 * Most BIOS's, however, will respect a 64k limit, so we use that.
2324 set_base(gdt[APM_DS >> 3], 2318 *
2325 __va((unsigned long)apm_info.bios.dseg << 4)); 2319 * Note we only set APM segments on CPU zero, since we pin the APM
2326#ifndef APM_RELAX_SEGMENTS 2320 * code to that CPU.
2327 if (apm_info.bios.version == 0x100) { 2321 */
2328#endif 2322 gdt = get_cpu_gdt_table(0);
2329 /* For ASUS motherboard, Award BIOS rev 110 (and others?) */ 2323 set_base(gdt[APM_CS >> 3],
2330 _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 - 1); 2324 __va((unsigned long)apm_info.bios.cseg << 4));
2331 /* For some unknown machine. */ 2325 set_base(gdt[APM_CS_16 >> 3],
2332 _set_limit((char *)&gdt[APM_CS_16 >> 3], 64 * 1024 - 1); 2326 __va((unsigned long)apm_info.bios.cseg_16 << 4));
2333 /* For the DEC Hinote Ultra CT475 (and others?) */ 2327 set_base(gdt[APM_DS >> 3],
2334 _set_limit((char *)&gdt[APM_DS >> 3], 64 * 1024 - 1); 2328 __va((unsigned long)apm_info.bios.dseg << 4));
2335#ifndef APM_RELAX_SEGMENTS
2336 } else {
2337 _set_limit((char *)&gdt[APM_CS >> 3],
2338 (apm_info.bios.cseg_len - 1) & 0xffff);
2339 _set_limit((char *)&gdt[APM_CS_16 >> 3],
2340 (apm_info.bios.cseg_16_len - 1) & 0xffff);
2341 _set_limit((char *)&gdt[APM_DS >> 3],
2342 (apm_info.bios.dseg_len - 1) & 0xffff);
2343 /* workaround for broken BIOSes */
2344 if (apm_info.bios.cseg_len <= apm_info.bios.offset)
2345 _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 -1);
2346 if (apm_info.bios.dseg_len <= 0x40) { /* 0x40 * 4kB == 64kB */
2347 /* for the BIOS that assumes granularity = 1 */
2348 gdt[APM_DS >> 3].b |= 0x800000;
2349 printk(KERN_NOTICE "apm: we set the granularity of dseg.\n");
2350 }
2351 }
2352#endif
2353 }
2354 2329
2355 apm_proc = create_proc_info_entry("apm", 0, NULL, apm_get_info); 2330 apm_proc = create_proc_info_entry("apm", 0, NULL, apm_get_info);
2356 if (apm_proc) 2331 if (apm_proc)
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index e344ef88cfcd..e7697e077f6b 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -161,8 +161,13 @@ static void __init init_amd(struct cpuinfo_x86 *c)
161 set_bit(X86_FEATURE_K6_MTRR, c->x86_capability); 161 set_bit(X86_FEATURE_K6_MTRR, c->x86_capability);
162 break; 162 break;
163 } 163 }
164 break;
165 164
165 if (c->x86_model == 10) {
166 /* AMD Geode LX is model 10 */
167 /* placeholder for any needed mods */
168 break;
169 }
170 break;
166 case 6: /* An Athlon/Duron */ 171 case 6: /* An Athlon/Duron */
167 172
168 /* Bit 15 of Athlon specific MSR 15, needs to be 0 173 /* Bit 15 of Athlon specific MSR 15, needs to be 0
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 31e344b26bae..cca655688ffc 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -18,9 +18,6 @@
18 18
19#include "cpu.h" 19#include "cpu.h"
20 20
21DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
22EXPORT_PER_CPU_SYMBOL(cpu_gdt_table);
23
24DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); 21DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
25EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); 22EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
26 23
@@ -599,11 +596,6 @@ void __devinit cpu_init(void)
599 load_idt(&idt_descr); 596 load_idt(&idt_descr);
600 597
601 /* 598 /*
602 * Delete NT
603 */
604 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
605
606 /*
607 * Set up and load the per-CPU TSS and LDT 599 * Set up and load the per-CPU TSS and LDT
608 */ 600 */
609 atomic_inc(&init_mm.mm_count); 601 atomic_inc(&init_mm.mm_count);
diff --git a/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c
index 04a405345203..2b62dee35c6c 100644
--- a/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c
+++ b/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c
@@ -177,9 +177,10 @@ static unsigned int nforce2_fsb_read(int bootfsb)
177 */ 177 */
178static int nforce2_set_fsb(unsigned int fsb) 178static int nforce2_set_fsb(unsigned int fsb)
179{ 179{
180 u32 pll, temp = 0; 180 u32 temp = 0;
181 unsigned int tfsb; 181 unsigned int tfsb;
182 int diff; 182 int diff;
183 int pll = 0;
183 184
184 if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) { 185 if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) {
185 printk(KERN_ERR "cpufreq: FSB %d is out of range!\n", fsb); 186 printk(KERN_ERR "cpufreq: FSB %d is out of range!\n", fsb);
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index 68a1fc87f4ca..0fbbd4c1072e 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -45,7 +45,7 @@
45 45
46#define PFX "powernow-k8: " 46#define PFX "powernow-k8: "
47#define BFX PFX "BIOS error: " 47#define BFX PFX "BIOS error: "
48#define VERSION "version 1.50.4" 48#define VERSION "version 1.60.0"
49#include "powernow-k8.h" 49#include "powernow-k8.h"
50 50
51/* serialize freq changes */ 51/* serialize freq changes */
@@ -216,10 +216,10 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid)
216 216
217 do { 217 do {
218 wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS); 218 wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
219 if (i++ > 100) { 219 if (i++ > 100) {
220 printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n"); 220 printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n");
221 return 1; 221 return 1;
222 } 222 }
223 } while (query_current_values_with_pending_wait(data)); 223 } while (query_current_values_with_pending_wait(data));
224 224
225 if (savefid != data->currfid) { 225 if (savefid != data->currfid) {
@@ -336,7 +336,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid
336/* Phase 2 - core frequency transition */ 336/* Phase 2 - core frequency transition */
337static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) 337static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
338{ 338{
339 u32 vcoreqfid, vcocurrfid, vcofiddiff, savevid = data->currvid; 339 u32 vcoreqfid, vcocurrfid, vcofiddiff, fid_interval, savevid = data->currvid;
340 340
341 if ((reqfid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) { 341 if ((reqfid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) {
342 printk(KERN_ERR PFX "ph2: illegal lo-lo transition 0x%x 0x%x\n", 342 printk(KERN_ERR PFX "ph2: illegal lo-lo transition 0x%x 0x%x\n",
@@ -359,9 +359,11 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
359 : vcoreqfid - vcocurrfid; 359 : vcoreqfid - vcocurrfid;
360 360
361 while (vcofiddiff > 2) { 361 while (vcofiddiff > 2) {
362 (data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2);
363
362 if (reqfid > data->currfid) { 364 if (reqfid > data->currfid) {
363 if (data->currfid > LO_FID_TABLE_TOP) { 365 if (data->currfid > LO_FID_TABLE_TOP) {
364 if (write_new_fid(data, data->currfid + 2)) { 366 if (write_new_fid(data, data->currfid + fid_interval)) {
365 return 1; 367 return 1;
366 } 368 }
367 } else { 369 } else {
@@ -371,7 +373,7 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
371 } 373 }
372 } 374 }
373 } else { 375 } else {
374 if (write_new_fid(data, data->currfid - 2)) 376 if (write_new_fid(data, data->currfid - fid_interval))
375 return 1; 377 return 1;
376 } 378 }
377 379
@@ -464,7 +466,7 @@ static int check_supported_cpu(unsigned int cpu)
464 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 466 set_cpus_allowed(current, cpumask_of_cpu(cpu));
465 467
466 if (smp_processor_id() != cpu) { 468 if (smp_processor_id() != cpu) {
467 printk(KERN_ERR "limiting to cpu %u failed\n", cpu); 469 printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
468 goto out; 470 goto out;
469 } 471 }
470 472
@@ -474,7 +476,7 @@ static int check_supported_cpu(unsigned int cpu)
474 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); 476 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
475 if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || 477 if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
476 ((eax & CPUID_XFAM) != CPUID_XFAM_K8) || 478 ((eax & CPUID_XFAM) != CPUID_XFAM_K8) ||
477 ((eax & CPUID_XMOD) > CPUID_XMOD_REV_F)) { 479 ((eax & CPUID_XMOD) > CPUID_XMOD_REV_G)) {
478 printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax); 480 printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax);
479 goto out; 481 goto out;
480 } 482 }
@@ -517,22 +519,24 @@ static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8
517 printk(KERN_ERR BFX "maxvid exceeded with pstate %d\n", j); 519 printk(KERN_ERR BFX "maxvid exceeded with pstate %d\n", j);
518 return -ENODEV; 520 return -ENODEV;
519 } 521 }
520 if ((pst[j].fid > MAX_FID) 522 if (pst[j].fid > MAX_FID) {
521 || (pst[j].fid & 1) 523 printk(KERN_ERR BFX "maxfid exceeded with pstate %d\n", j);
522 || (j && (pst[j].fid < HI_FID_TABLE_BOTTOM))) { 524 return -ENODEV;
525 }
526 if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) {
523 /* Only first fid is allowed to be in "low" range */ 527 /* Only first fid is allowed to be in "low" range */
524 printk(KERN_ERR PFX "two low fids - %d : 0x%x\n", j, pst[j].fid); 528 printk(KERN_ERR BFX "two low fids - %d : 0x%x\n", j, pst[j].fid);
525 return -EINVAL; 529 return -EINVAL;
526 } 530 }
527 if (pst[j].fid < lastfid) 531 if (pst[j].fid < lastfid)
528 lastfid = pst[j].fid; 532 lastfid = pst[j].fid;
529 } 533 }
530 if (lastfid & 1) { 534 if (lastfid & 1) {
531 printk(KERN_ERR PFX "lastfid invalid\n"); 535 printk(KERN_ERR BFX "lastfid invalid\n");
532 return -EINVAL; 536 return -EINVAL;
533 } 537 }
534 if (lastfid > LO_FID_TABLE_TOP) 538 if (lastfid > LO_FID_TABLE_TOP)
535 printk(KERN_INFO PFX "first fid not from lo freq table\n"); 539 printk(KERN_INFO BFX "first fid not from lo freq table\n");
536 540
537 return 0; 541 return 0;
538} 542}
@@ -631,7 +635,7 @@ static int find_psb_table(struct powernow_k8_data *data)
631 635
632 dprintk("table vers: 0x%x\n", psb->tableversion); 636 dprintk("table vers: 0x%x\n", psb->tableversion);
633 if (psb->tableversion != PSB_VERSION_1_4) { 637 if (psb->tableversion != PSB_VERSION_1_4) {
634 printk(KERN_INFO BFX "PSB table is not v1.4\n"); 638 printk(KERN_ERR BFX "PSB table is not v1.4\n");
635 return -ENODEV; 639 return -ENODEV;
636 } 640 }
637 641
@@ -689,7 +693,7 @@ static int find_psb_table(struct powernow_k8_data *data)
689 * BIOS and Kernel Developer's Guide, which is available on 693 * BIOS and Kernel Developer's Guide, which is available on
690 * www.amd.com 694 * www.amd.com
691 */ 695 */
692 printk(KERN_INFO PFX "BIOS error - no PSB or ACPI _PSS objects\n"); 696 printk(KERN_ERR PFX "BIOS error - no PSB or ACPI _PSS objects\n");
693 return -ENODEV; 697 return -ENODEV;
694} 698}
695 699
@@ -912,7 +916,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
912 set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); 916 set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
913 917
914 if (smp_processor_id() != pol->cpu) { 918 if (smp_processor_id() != pol->cpu) {
915 printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu); 919 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
916 goto err_out; 920 goto err_out;
917 } 921 }
918 922
@@ -982,6 +986,9 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
982 cpumask_t oldmask = CPU_MASK_ALL; 986 cpumask_t oldmask = CPU_MASK_ALL;
983 int rc, i; 987 int rc, i;
984 988
989 if (!cpu_online(pol->cpu))
990 return -ENODEV;
991
985 if (!check_supported_cpu(pol->cpu)) 992 if (!check_supported_cpu(pol->cpu))
986 return -ENODEV; 993 return -ENODEV;
987 994
@@ -1021,7 +1028,7 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
1021 set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); 1028 set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
1022 1029
1023 if (smp_processor_id() != pol->cpu) { 1030 if (smp_processor_id() != pol->cpu) {
1024 printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu); 1031 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
1025 goto err_out; 1032 goto err_out;
1026 } 1033 }
1027 1034
@@ -1162,10 +1169,9 @@ static void __exit powernowk8_exit(void)
1162 cpufreq_unregister_driver(&cpufreq_amd64_driver); 1169 cpufreq_unregister_driver(&cpufreq_amd64_driver);
1163} 1170}
1164 1171
1165MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com."); 1172MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>");
1166MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver."); 1173MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver.");
1167MODULE_LICENSE("GPL"); 1174MODULE_LICENSE("GPL");
1168 1175
1169late_initcall(powernowk8_init); 1176late_initcall(powernowk8_init);
1170module_exit(powernowk8_exit); 1177module_exit(powernowk8_exit);
1171
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
index b1e85bb36396..d0de37d58e9a 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
@@ -42,7 +42,7 @@ struct powernow_k8_data {
42#define CPUID_XFAM 0x0ff00000 /* extended family */ 42#define CPUID_XFAM 0x0ff00000 /* extended family */
43#define CPUID_XFAM_K8 0 43#define CPUID_XFAM_K8 0
44#define CPUID_XMOD 0x000f0000 /* extended model */ 44#define CPUID_XMOD 0x000f0000 /* extended model */
45#define CPUID_XMOD_REV_F 0x00040000 45#define CPUID_XMOD_REV_G 0x00060000
46#define CPUID_USE_XFAM_XMOD 0x00000f00 46#define CPUID_USE_XFAM_XMOD 0x00000f00
47#define CPUID_GET_MAX_CAPABILITIES 0x80000000 47#define CPUID_GET_MAX_CAPABILITIES 0x80000000
48#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007 48#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
@@ -86,13 +86,14 @@ struct powernow_k8_data {
86 * low fid table 86 * low fid table
87 * - lowest entry in the high fid table must be a <= 200MHz + 2 * the entry 87 * - lowest entry in the high fid table must be a <= 200MHz + 2 * the entry
88 * in the low fid table 88 * in the low fid table
89 * - the parts can only step at 200 MHz intervals, so 1.9 GHz is never valid 89 * - the parts can only step at <= 200 MHz intervals, odd fid values are
90 * supported in revision G and later revisions.
90 * - lowest frequency must be >= interprocessor hypertransport link speed 91 * - lowest frequency must be >= interprocessor hypertransport link speed
91 * (only applies to MP systems obviously) 92 * (only applies to MP systems obviously)
92 */ 93 */
93 94
94/* fids (frequency identifiers) are arranged in 2 tables - lo and hi */ 95/* fids (frequency identifiers) are arranged in 2 tables - lo and hi */
95#define LO_FID_TABLE_TOP 6 /* fid values marking the boundary */ 96#define LO_FID_TABLE_TOP 7 /* fid values marking the boundary */
96#define HI_FID_TABLE_BOTTOM 8 /* between the low and high tables */ 97#define HI_FID_TABLE_BOTTOM 8 /* between the low and high tables */
97 98
98#define LO_VCOFREQ_TABLE_TOP 1400 /* corresponding vco frequency values */ 99#define LO_VCOFREQ_TABLE_TOP 1400 /* corresponding vco frequency values */
@@ -106,7 +107,7 @@ struct powernow_k8_data {
106#define MIN_FREQ 800 /* Min and max freqs, per spec */ 107#define MIN_FREQ 800 /* Min and max freqs, per spec */
107#define MAX_FREQ 5000 108#define MAX_FREQ 5000
108 109
109#define INVALID_FID_MASK 0xffffffc1 /* not a valid fid if these bits are set */ 110#define INVALID_FID_MASK 0xffffffc0 /* not a valid fid if these bits are set */
110#define INVALID_VID_MASK 0xffffffc0 /* not a valid vid if these bits are set */ 111#define INVALID_VID_MASK 0xffffffc0 /* not a valid vid if these bits are set */
111 112
112#define VID_OFF 0x3f 113#define VID_OFF 0x3f
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c b/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c
index 5b7d18a06afa..b425cd3d1838 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c
@@ -40,6 +40,7 @@ static struct pci_dev *speedstep_chipset_dev;
40 */ 40 */
41static unsigned int speedstep_processor = 0; 41static unsigned int speedstep_processor = 0;
42 42
43static u32 pmbase;
43 44
44/* 45/*
45 * There are only two frequency states for each processor. Values 46 * There are only two frequency states for each processor. Values
@@ -56,34 +57,47 @@ static struct cpufreq_frequency_table speedstep_freqs[] = {
56 57
57 58
58/** 59/**
59 * speedstep_set_state - set the SpeedStep state 60 * speedstep_find_register - read the PMBASE address
60 * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
61 * 61 *
62 * Tries to change the SpeedStep state. 62 * Returns: -ENODEV if no register could be found
63 */ 63 */
64static void speedstep_set_state (unsigned int state) 64static int speedstep_find_register (void)
65{ 65{
66 u32 pmbase; 66 if (!speedstep_chipset_dev)
67 u8 pm2_blk; 67 return -ENODEV;
68 u8 value;
69 unsigned long flags;
70
71 if (!speedstep_chipset_dev || (state > 0x1))
72 return;
73 68
74 /* get PMBASE */ 69 /* get PMBASE */
75 pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase); 70 pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase);
76 if (!(pmbase & 0x01)) { 71 if (!(pmbase & 0x01)) {
77 printk(KERN_ERR "speedstep-ich: could not find speedstep register\n"); 72 printk(KERN_ERR "speedstep-ich: could not find speedstep register\n");
78 return; 73 return -ENODEV;
79 } 74 }
80 75
81 pmbase &= 0xFFFFFFFE; 76 pmbase &= 0xFFFFFFFE;
82 if (!pmbase) { 77 if (!pmbase) {
83 printk(KERN_ERR "speedstep-ich: could not find speedstep register\n"); 78 printk(KERN_ERR "speedstep-ich: could not find speedstep register\n");
84 return; 79 return -ENODEV;
85 } 80 }
86 81
82 dprintk("pmbase is 0x%x\n", pmbase);
83 return 0;
84}
85
86/**
87 * speedstep_set_state - set the SpeedStep state
88 * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
89 *
90 * Tries to change the SpeedStep state.
91 */
92static void speedstep_set_state (unsigned int state)
93{
94 u8 pm2_blk;
95 u8 value;
96 unsigned long flags;
97
98 if (state > 0x1)
99 return;
100
87 /* Disable IRQs */ 101 /* Disable IRQs */
88 local_irq_save(flags); 102 local_irq_save(flags);
89 103
@@ -315,10 +329,11 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
315 cpus_allowed = current->cpus_allowed; 329 cpus_allowed = current->cpus_allowed;
316 set_cpus_allowed(current, policy->cpus); 330 set_cpus_allowed(current, policy->cpus);
317 331
318 /* detect low and high frequency */ 332 /* detect low and high frequency and transition latency */
319 result = speedstep_get_freqs(speedstep_processor, 333 result = speedstep_get_freqs(speedstep_processor,
320 &speedstep_freqs[SPEEDSTEP_LOW].frequency, 334 &speedstep_freqs[SPEEDSTEP_LOW].frequency,
321 &speedstep_freqs[SPEEDSTEP_HIGH].frequency, 335 &speedstep_freqs[SPEEDSTEP_HIGH].frequency,
336 &policy->cpuinfo.transition_latency,
322 &speedstep_set_state); 337 &speedstep_set_state);
323 set_cpus_allowed(current, cpus_allowed); 338 set_cpus_allowed(current, cpus_allowed);
324 if (result) 339 if (result)
@@ -335,7 +350,6 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
335 350
336 /* cpuinfo and default policy values */ 351 /* cpuinfo and default policy values */
337 policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 352 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
338 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
339 policy->cur = speed; 353 policy->cur = speed;
340 354
341 result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); 355 result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
@@ -400,6 +414,9 @@ static int __init speedstep_init(void)
400 return -EINVAL; 414 return -EINVAL;
401 } 415 }
402 416
417 if (speedstep_find_register())
418 return -ENODEV;
419
403 return cpufreq_register_driver(&speedstep_driver); 420 return cpufreq_register_driver(&speedstep_driver);
404} 421}
405 422
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
index d368b3f5fce8..7c47005a1805 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
@@ -320,11 +320,13 @@ EXPORT_SYMBOL_GPL(speedstep_detect_processor);
320unsigned int speedstep_get_freqs(unsigned int processor, 320unsigned int speedstep_get_freqs(unsigned int processor,
321 unsigned int *low_speed, 321 unsigned int *low_speed,
322 unsigned int *high_speed, 322 unsigned int *high_speed,
323 unsigned int *transition_latency,
323 void (*set_state) (unsigned int state)) 324 void (*set_state) (unsigned int state))
324{ 325{
325 unsigned int prev_speed; 326 unsigned int prev_speed;
326 unsigned int ret = 0; 327 unsigned int ret = 0;
327 unsigned long flags; 328 unsigned long flags;
329 struct timeval tv1, tv2;
328 330
329 if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) 331 if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
330 return -EINVAL; 332 return -EINVAL;
@@ -337,7 +339,7 @@ unsigned int speedstep_get_freqs(unsigned int processor,
337 return -EIO; 339 return -EIO;
338 340
339 dprintk("previous speed is %u\n", prev_speed); 341 dprintk("previous speed is %u\n", prev_speed);
340 342
341 local_irq_save(flags); 343 local_irq_save(flags);
342 344
343 /* switch to low state */ 345 /* switch to low state */
@@ -350,8 +352,17 @@ unsigned int speedstep_get_freqs(unsigned int processor,
350 352
351 dprintk("low speed is %u\n", *low_speed); 353 dprintk("low speed is %u\n", *low_speed);
352 354
355 /* start latency measurement */
356 if (transition_latency)
357 do_gettimeofday(&tv1);
358
353 /* switch to high state */ 359 /* switch to high state */
354 set_state(SPEEDSTEP_HIGH); 360 set_state(SPEEDSTEP_HIGH);
361
362 /* end latency measurement */
363 if (transition_latency)
364 do_gettimeofday(&tv2);
365
355 *high_speed = speedstep_get_processor_frequency(processor); 366 *high_speed = speedstep_get_processor_frequency(processor);
356 if (!*high_speed) { 367 if (!*high_speed) {
357 ret = -EIO; 368 ret = -EIO;
@@ -369,6 +380,25 @@ unsigned int speedstep_get_freqs(unsigned int processor,
369 if (*high_speed != prev_speed) 380 if (*high_speed != prev_speed)
370 set_state(SPEEDSTEP_LOW); 381 set_state(SPEEDSTEP_LOW);
371 382
383 if (transition_latency) {
384 *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC +
385 tv2.tv_usec - tv1.tv_usec;
386 dprintk("transition latency is %u uSec\n", *transition_latency);
387
388 /* convert uSec to nSec and add 20% for safety reasons */
389 *transition_latency *= 1200;
390
391 /* check if the latency measurement is too high or too low
392 * and set it to a safe value (500uSec) in that case
393 */
394 if (*transition_latency > 10000000 || *transition_latency < 50000) {
395 printk (KERN_WARNING "speedstep: frequency transition measured seems out of "
396 "range (%u nSec), falling back to a safe one of %u nSec.\n",
397 *transition_latency, 500000);
398 *transition_latency = 500000;
399 }
400 }
401
372 out: 402 out:
373 local_irq_restore(flags); 403 local_irq_restore(flags);
374 return (ret); 404 return (ret);
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
index 261a2c9b7f6b..6a727fd3a77e 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
@@ -44,4 +44,5 @@ extern unsigned int speedstep_get_processor_frequency(unsigned int processor);
44extern unsigned int speedstep_get_freqs(unsigned int processor, 44extern unsigned int speedstep_get_freqs(unsigned int processor,
45 unsigned int *low_speed, 45 unsigned int *low_speed,
46 unsigned int *high_speed, 46 unsigned int *high_speed,
47 unsigned int *transition_latency,
47 void (*set_state) (unsigned int state)); 48 void (*set_state) (unsigned int state));
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
index 2718fb6f6aba..28cc5d524afc 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
@@ -269,6 +269,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
269 result = speedstep_get_freqs(speedstep_processor, 269 result = speedstep_get_freqs(speedstep_processor,
270 &speedstep_freqs[SPEEDSTEP_LOW].frequency, 270 &speedstep_freqs[SPEEDSTEP_LOW].frequency,
271 &speedstep_freqs[SPEEDSTEP_HIGH].frequency, 271 &speedstep_freqs[SPEEDSTEP_HIGH].frequency,
272 NULL,
272 &speedstep_set_state); 273 &speedstep_set_state);
273 274
274 if (result) { 275 if (result) {
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c
index ff87cc22b323..75015975d038 100644
--- a/arch/i386/kernel/cpu/cyrix.c
+++ b/arch/i386/kernel/cpu/cyrix.c
@@ -343,6 +343,31 @@ static void __init init_cyrix(struct cpuinfo_x86 *c)
343} 343}
344 344
345/* 345/*
346 * Handle National Semiconductor branded processors
347 */
348static void __devinit init_nsc(struct cpuinfo_x86 *c)
349{
350 /* There may be GX1 processors in the wild that are branded
351 * NSC and not Cyrix.
352 *
353 * This function only handles the GX processor, and kicks every
354 * thing else to the Cyrix init function above - that should
355 * cover any processors that might have been branded differently
356 * after NSC aquired Cyrix.
357 *
358 * If this breaks your GX1 horribly, please e-mail
359 * info-linux@ldcmail.amd.com to tell us.
360 */
361
362 /* Handle the GX (Formally known as the GX2) */
363
364 if (c->x86 == 5 && c->x86_model == 5)
365 display_cacheinfo(c);
366 else
367 init_cyrix(c);
368}
369
370/*
346 * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected 371 * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
347 * by the fact that they preserve the flags across the division of 5/2. 372 * by the fact that they preserve the flags across the division of 5/2.
348 * PII and PPro exhibit this behavior too, but they have cpuid available. 373 * PII and PPro exhibit this behavior too, but they have cpuid available.
@@ -422,7 +447,7 @@ int __init cyrix_init_cpu(void)
422static struct cpu_dev nsc_cpu_dev __initdata = { 447static struct cpu_dev nsc_cpu_dev __initdata = {
423 .c_vendor = "NSC", 448 .c_vendor = "NSC",
424 .c_ident = { "Geode by NSC" }, 449 .c_ident = { "Geode by NSC" },
425 .c_init = init_cyrix, 450 .c_init = init_nsc,
426 .c_identify = generic_identify, 451 .c_identify = generic_identify,
427}; 452};
428 453
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index e7921315ae9d..6d91b274589c 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -3,6 +3,7 @@
3#include <linux/string.h> 3#include <linux/string.h>
4#include <asm/semaphore.h> 4#include <asm/semaphore.h>
5#include <linux/seq_file.h> 5#include <linux/seq_file.h>
6#include <linux/cpufreq.h>
6 7
7/* 8/*
8 * Get CPU information for use by the procfs. 9 * Get CPU information for use by the procfs.
@@ -86,8 +87,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
86 seq_printf(m, "stepping\t: unknown\n"); 87 seq_printf(m, "stepping\t: unknown\n");
87 88
88 if ( cpu_has(c, X86_FEATURE_TSC) ) { 89 if ( cpu_has(c, X86_FEATURE_TSC) ) {
90 unsigned int freq = cpufreq_quick_get(n);
91 if (!freq)
92 freq = cpu_khz;
89 seq_printf(m, "cpu MHz\t\t: %u.%03u\n", 93 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
90 cpu_khz / 1000, (cpu_khz % 1000)); 94 freq / 1000, (freq % 1000));
91 } 95 }
92 96
93 /* Cache size */ 97 /* Cache size */
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index 13bae799e626..006141d1c12a 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -117,14 +117,13 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
117{ 117{
118 char __user *tmp = buf; 118 char __user *tmp = buf;
119 u32 data[4]; 119 u32 data[4];
120 size_t rv;
121 u32 reg = *ppos; 120 u32 reg = *ppos;
122 int cpu = iminor(file->f_dentry->d_inode); 121 int cpu = iminor(file->f_dentry->d_inode);
123 122
124 if (count % 16) 123 if (count % 16)
125 return -EINVAL; /* Invalid chunk size */ 124 return -EINVAL; /* Invalid chunk size */
126 125
127 for (rv = 0; count; count -= 16) { 126 for (; count; count -= 16) {
128 do_cpuid(cpu, reg, data); 127 do_cpuid(cpu, reg, data);
129 if (copy_to_user(tmp, &data, 16)) 128 if (copy_to_user(tmp, &data, 16))
130 return -EFAULT; 129 return -EFAULT;
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index e50b93155249..607c06007508 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -657,6 +657,7 @@ ENTRY(spurious_interrupt_bug)
657 pushl $do_spurious_interrupt_bug 657 pushl $do_spurious_interrupt_bug
658 jmp error_code 658 jmp error_code
659 659
660.section .rodata,"a"
660#include "syscall_table.S" 661#include "syscall_table.S"
661 662
662syscall_table_size=(.-sys_call_table) 663syscall_table_size=(.-sys_call_table)
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index e437fb367498..5884469f6bfe 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -504,19 +504,24 @@ ENTRY(cpu_gdt_table)
504 .quad 0x0000000000000000 /* 0x80 TSS descriptor */ 504 .quad 0x0000000000000000 /* 0x80 TSS descriptor */
505 .quad 0x0000000000000000 /* 0x88 LDT descriptor */ 505 .quad 0x0000000000000000 /* 0x88 LDT descriptor */
506 506
507 /* Segments used for calling PnP BIOS */ 507 /*
508 .quad 0x00c09a0000000000 /* 0x90 32-bit code */ 508 * Segments used for calling PnP BIOS have byte granularity.
509 .quad 0x00809a0000000000 /* 0x98 16-bit code */ 509 * They code segments and data segments have fixed 64k limits,
510 .quad 0x0080920000000000 /* 0xa0 16-bit data */ 510 * the transfer segment sizes are set at run time.
511 .quad 0x0080920000000000 /* 0xa8 16-bit data */ 511 */
512 .quad 0x0080920000000000 /* 0xb0 16-bit data */ 512 .quad 0x00409a000000ffff /* 0x90 32-bit code */
513 .quad 0x00009a000000ffff /* 0x98 16-bit code */
514 .quad 0x000092000000ffff /* 0xa0 16-bit data */
515 .quad 0x0000920000000000 /* 0xa8 16-bit data */
516 .quad 0x0000920000000000 /* 0xb0 16-bit data */
517
513 /* 518 /*
514 * The APM segments have byte granularity and their bases 519 * The APM segments have byte granularity and their bases
515 * and limits are set at run time. 520 * are set at run time. All have 64k limits.
516 */ 521 */
517 .quad 0x00409a0000000000 /* 0xb8 APM CS code */ 522 .quad 0x00409a000000ffff /* 0xb8 APM CS code */
518 .quad 0x00009a0000000000 /* 0xc0 APM CS 16 code (16 bit) */ 523 .quad 0x00009a000000ffff /* 0xc0 APM CS 16 code (16 bit) */
519 .quad 0x0040920000000000 /* 0xc8 APM DS data */ 524 .quad 0x004092000000ffff /* 0xc8 APM DS data */
520 525
521 .quad 0x0000920000000000 /* 0xd0 - ESPFIX 16-bit SS */ 526 .quad 0x0000920000000000 /* 0xd0 - ESPFIX 16-bit SS */
522 .quad 0x0000000000000000 /* 0xd8 - unused */ 527 .quad 0x0000000000000000 /* 0xd8 - unused */
@@ -525,3 +530,5 @@ ENTRY(cpu_gdt_table)
525 .quad 0x0000000000000000 /* 0xf0 - unused */ 530 .quad 0x0000000000000000 /* 0xf0 - unused */
526 .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */ 531 .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
527 532
533 /* Be sure this is zeroed to avoid false validations in Xen */
534 .fill PAGE_SIZE_asm / 8 - GDT_ENTRIES,8,0
diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c
index 180f070d03cb..3999bec50c33 100644
--- a/arch/i386/kernel/i386_ksyms.c
+++ b/arch/i386/kernel/i386_ksyms.c
@@ -3,8 +3,7 @@
3#include <asm/checksum.h> 3#include <asm/checksum.h>
4#include <asm/desc.h> 4#include <asm/desc.h>
5 5
6/* This is definitely a GPL-only symbol */ 6EXPORT_SYMBOL_GPL(cpu_gdt_descr);
7EXPORT_SYMBOL_GPL(cpu_gdt_table);
8 7
9EXPORT_SYMBOL(__down_failed); 8EXPORT_SYMBOL(__down_failed);
10EXPORT_SYMBOL(__down_failed_interruptible); 9EXPORT_SYMBOL(__down_failed_interruptible);
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 22c8675c79f4..7554f8fd874a 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -1722,8 +1722,8 @@ void disable_IO_APIC(void)
1722 entry.dest_mode = 0; /* Physical */ 1722 entry.dest_mode = 0; /* Physical */
1723 entry.delivery_mode = dest_ExtINT; /* ExtInt */ 1723 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1724 entry.vector = 0; 1724 entry.vector = 0;
1725 entry.dest.physical.physical_dest = 0; 1725 entry.dest.physical.physical_dest =
1726 1726 GET_APIC_ID(apic_read(APIC_ID));
1727 1727
1728 /* 1728 /*
1729 * Add it to the IO-APIC irq-routing table: 1729 * Add it to the IO-APIC irq-routing table:
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 32b0c24ab9a6..19edcd526ba4 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -191,7 +191,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
191 */ 191 */
192 save_previous_kprobe(kcb); 192 save_previous_kprobe(kcb);
193 set_current_kprobe(p, regs, kcb); 193 set_current_kprobe(p, regs, kcb);
194 p->nmissed++; 194 kprobes_inc_nmissed_count(p);
195 prepare_singlestep(p, regs); 195 prepare_singlestep(p, regs);
196 kcb->kprobe_status = KPROBE_REENTER; 196 kcb->kprobe_status = KPROBE_REENTER;
197 return 1; 197 return 1;
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index d7cede83ba2e..0102f3d50e57 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -38,6 +38,12 @@
38int smp_found_config; 38int smp_found_config;
39unsigned int __initdata maxcpus = NR_CPUS; 39unsigned int __initdata maxcpus = NR_CPUS;
40 40
41#ifdef CONFIG_HOTPLUG_CPU
42#define CPU_HOTPLUG_ENABLED (1)
43#else
44#define CPU_HOTPLUG_ENABLED (0)
45#endif
46
41/* 47/*
42 * Various Linux-internal data structures created from the 48 * Various Linux-internal data structures created from the
43 * MP-table. 49 * MP-table.
@@ -219,14 +225,18 @@ static void __devinit MP_processor_info (struct mpc_config_processor *m)
219 cpu_set(num_processors, cpu_possible_map); 225 cpu_set(num_processors, cpu_possible_map);
220 num_processors++; 226 num_processors++;
221 227
222 if ((num_processors > 8) && 228 if (CPU_HOTPLUG_ENABLED || (num_processors > 8)) {
223 ((APIC_XAPIC(ver) && 229 switch (boot_cpu_data.x86_vendor) {
224 (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) || 230 case X86_VENDOR_INTEL:
225 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))) 231 if (!APIC_XAPIC(ver)) {
226 def_to_bigsmp = 1; 232 def_to_bigsmp = 0;
227 else 233 break;
228 def_to_bigsmp = 0; 234 }
229 235 /* If P4 and above fall through */
236 case X86_VENDOR_AMD:
237 def_to_bigsmp = 1;
238 }
239 }
230 bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; 240 bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
231} 241}
232 242
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
index 44470fea4309..1d0a55e68760 100644
--- a/arch/i386/kernel/msr.c
+++ b/arch/i386/kernel/msr.c
@@ -172,7 +172,6 @@ static ssize_t msr_read(struct file *file, char __user * buf,
172{ 172{
173 u32 __user *tmp = (u32 __user *) buf; 173 u32 __user *tmp = (u32 __user *) buf;
174 u32 data[2]; 174 u32 data[2];
175 size_t rv;
176 u32 reg = *ppos; 175 u32 reg = *ppos;
177 int cpu = iminor(file->f_dentry->d_inode); 176 int cpu = iminor(file->f_dentry->d_inode);
178 int err; 177 int err;
@@ -180,7 +179,7 @@ static ssize_t msr_read(struct file *file, char __user * buf,
180 if (count % 8) 179 if (count % 8)
181 return -EINVAL; /* Invalid chunk size */ 180 return -EINVAL; /* Invalid chunk size */
182 181
183 for (rv = 0; count; count -= 8) { 182 for (; count; count -= 8) {
184 err = do_rdmsr(cpu, reg, &data[0], &data[1]); 183 err = do_rdmsr(cpu, reg, &data[0], &data[1]);
185 if (err) 184 if (err)
186 return err; 185 return err;
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index df6c2bcde067..45e7f0ac4b04 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -308,9 +308,7 @@ void show_regs(struct pt_regs * regs)
308 cr0 = read_cr0(); 308 cr0 = read_cr0();
309 cr2 = read_cr2(); 309 cr2 = read_cr2();
310 cr3 = read_cr3(); 310 cr3 = read_cr3();
311 if (current_cpu_data.x86 > 4) { 311 cr4 = read_cr4_safe();
312 cr4 = read_cr4();
313 }
314 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); 312 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
315 show_trace(NULL, &regs->esp); 313 show_trace(NULL, &regs->esp);
316} 314}
@@ -404,17 +402,7 @@ void flush_thread(void)
404 402
405void release_thread(struct task_struct *dead_task) 403void release_thread(struct task_struct *dead_task)
406{ 404{
407 if (dead_task->mm) { 405 BUG_ON(dead_task->mm);
408 // temporary debugging check
409 if (dead_task->mm->context.size) {
410 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
411 dead_task->comm,
412 dead_task->mm->context.ldt,
413 dead_task->mm->context.size);
414 BUG();
415 }
416 }
417
418 release_vm86_irqs(dead_task); 406 release_vm86_irqs(dead_task);
419} 407}
420 408
@@ -554,7 +542,9 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
554 struct pt_regs ptregs; 542 struct pt_regs ptregs;
555 543
556 ptregs = *(struct pt_regs *) 544 ptregs = *(struct pt_regs *)
557 ((unsigned long)tsk->thread_info+THREAD_SIZE - sizeof(ptregs)); 545 ((unsigned long)tsk->thread_info +
546 /* see comments in copy_thread() about -8 */
547 THREAD_SIZE - sizeof(ptregs) - 8);
558 ptregs.xcs &= 0xffff; 548 ptregs.xcs &= 0xffff;
559 ptregs.xds &= 0xffff; 549 ptregs.xds &= 0xffff;
560 ptregs.xes &= 0xffff; 550 ptregs.xes &= 0xffff;
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index 5ffbb4b7ad05..5c1fb6aada5b 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -32,9 +32,12 @@
32 * in exit.c or in signal.c. 32 * in exit.c or in signal.c.
33 */ 33 */
34 34
35/* determines which flags the user has access to. */ 35/*
36/* 1 = access 0 = no access */ 36 * Determines which flags the user has access to [1 = access, 0 = no access].
37#define FLAG_MASK 0x00044dd5 37 * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
38 * Also masks reserved bits (31-22, 15, 5, 3, 1).
39 */
40#define FLAG_MASK 0x00054dd5
38 41
39/* set's the trap flag. */ 42/* set's the trap flag. */
40#define TRAP_FLAG 0x100 43#define TRAP_FLAG 0x100
diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c
index 2afe0f8d555a..2fa5803a759d 100644
--- a/arch/i386/kernel/reboot.c
+++ b/arch/i386/kernel/reboot.c
@@ -111,12 +111,12 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
111 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"), 111 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
112 }, 112 },
113 }, 113 },
114 { /* Handle problems with rebooting on HP nc6120 */ 114 { /* Handle problems with rebooting on HP laptops */
115 .callback = set_bios_reboot, 115 .callback = set_bios_reboot,
116 .ident = "HP Compaq nc6120", 116 .ident = "HP Compaq Laptop",
117 .matches = { 117 .matches = {
118 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 118 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
119 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6120"), 119 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
120 }, 120 },
121 }, 121 },
122 { } 122 { }
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index fdfcb0cba9b4..27c956db0461 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -954,6 +954,12 @@ efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
954 return 0; 954 return 0;
955} 955}
956 956
957static int __init
958efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
959{
960 memory_present(0, start, end);
961 return 0;
962}
957 963
958/* 964/*
959 * Find the highest page frame number we have available 965 * Find the highest page frame number we have available
@@ -965,6 +971,7 @@ void __init find_max_pfn(void)
965 max_pfn = 0; 971 max_pfn = 0;
966 if (efi_enabled) { 972 if (efi_enabled) {
967 efi_memmap_walk(efi_find_max_pfn, &max_pfn); 973 efi_memmap_walk(efi_find_max_pfn, &max_pfn);
974 efi_memmap_walk(efi_memory_present_wrapper, NULL);
968 return; 975 return;
969 } 976 }
970 977
@@ -979,6 +986,7 @@ void __init find_max_pfn(void)
979 continue; 986 continue;
980 if (end > max_pfn) 987 if (end > max_pfn)
981 max_pfn = end; 988 max_pfn = end;
989 memory_present(0, start, end);
982 } 990 }
983} 991}
984 992
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index d16520da4550..b3c2e2c26743 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -903,6 +903,12 @@ static int __devinit do_boot_cpu(int apicid, int cpu)
903 unsigned long start_eip; 903 unsigned long start_eip;
904 unsigned short nmi_high = 0, nmi_low = 0; 904 unsigned short nmi_high = 0, nmi_low = 0;
905 905
906 if (!cpu_gdt_descr[cpu].address &&
907 !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
908 printk("Failed to allocate GDT for CPU %d\n", cpu);
909 return 1;
910 }
911
906 ++cpucount; 912 ++cpucount;
907 913
908 /* 914 /*
@@ -1338,8 +1344,7 @@ int __cpu_disable(void)
1338 if (cpu == 0) 1344 if (cpu == 0)
1339 return -EBUSY; 1345 return -EBUSY;
1340 1346
1341 /* We enable the timer again on the exit path of the death loop */ 1347 clear_local_APIC();
1342 disable_APIC_timer();
1343 /* Allow any queued timer interrupts to get serviced */ 1348 /* Allow any queued timer interrupts to get serviced */
1344 local_irq_enable(); 1349 local_irq_enable();
1345 mdelay(1); 1350 mdelay(1);
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index 9b21a31d4f4e..f7ba4acc20ec 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -1,4 +1,3 @@
1.data
2ENTRY(sys_call_table) 1ENTRY(sys_call_table)
3 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ 2 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
4 .long sys_exit 3 .long sys_exit
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
index d395e3b42485..47675bbbb316 100644
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ b/arch/i386/kernel/timers/timer_tsc.c
@@ -330,7 +330,9 @@ int recalibrate_cpu_khz(void)
330 unsigned int cpu_khz_old = cpu_khz; 330 unsigned int cpu_khz_old = cpu_khz;
331 331
332 if (cpu_has_tsc) { 332 if (cpu_has_tsc) {
333 local_irq_disable();
333 init_cpu_khz(); 334 init_cpu_khz();
335 local_irq_enable();
334 cpu_data[0].loops_per_jiffy = 336 cpu_data[0].loops_per_jiffy =
335 cpufreq_scale(cpu_data[0].loops_per_jiffy, 337 cpufreq_scale(cpu_data[0].loops_per_jiffy,
336 cpu_khz_old, 338 cpu_khz_old,
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index c34d1bfc5161..53ad954e3ba4 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -306,14 +306,17 @@ void die(const char * str, struct pt_regs * regs, long err)
306 .lock_owner_depth = 0 306 .lock_owner_depth = 0
307 }; 307 };
308 static int die_counter; 308 static int die_counter;
309 unsigned long flags;
309 310
310 if (die.lock_owner != raw_smp_processor_id()) { 311 if (die.lock_owner != raw_smp_processor_id()) {
311 console_verbose(); 312 console_verbose();
312 spin_lock_irq(&die.lock); 313 spin_lock_irqsave(&die.lock, flags);
313 die.lock_owner = smp_processor_id(); 314 die.lock_owner = smp_processor_id();
314 die.lock_owner_depth = 0; 315 die.lock_owner_depth = 0;
315 bust_spinlocks(1); 316 bust_spinlocks(1);
316 } 317 }
318 else
319 local_save_flags(flags);
317 320
318 if (++die.lock_owner_depth < 3) { 321 if (++die.lock_owner_depth < 3) {
319 int nl = 0; 322 int nl = 0;
@@ -340,7 +343,7 @@ void die(const char * str, struct pt_regs * regs, long err)
340 343
341 bust_spinlocks(0); 344 bust_spinlocks(0);
342 die.lock_owner = -1; 345 die.lock_owner = -1;
343 spin_unlock_irq(&die.lock); 346 spin_unlock_irqrestore(&die.lock, flags);
344 347
345 if (kexec_should_crash(current)) 348 if (kexec_should_crash(current))
346 crash_kexec(regs); 349 crash_kexec(regs);
@@ -452,7 +455,7 @@ DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
452#endif 455#endif
453DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow) 456DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
454DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds) 457DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
455DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip) 458DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
456DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) 459DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
457DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) 460DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
458DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 461DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
@@ -650,13 +653,6 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code)
650 653
651 cpu = smp_processor_id(); 654 cpu = smp_processor_id();
652 655
653#ifdef CONFIG_HOTPLUG_CPU
654 if (!cpu_online(cpu)) {
655 nmi_exit();
656 return;
657 }
658#endif
659
660 ++nmi_count(cpu); 656 ++nmi_count(cpu);
661 657
662 if (!rcu_dereference(nmi_callback)(regs, cpu)) 658 if (!rcu_dereference(nmi_callback)(regs, cpu))
@@ -1082,9 +1078,9 @@ void __init trap_init(void)
1082 set_trap_gate(0,&divide_error); 1078 set_trap_gate(0,&divide_error);
1083 set_intr_gate(1,&debug); 1079 set_intr_gate(1,&debug);
1084 set_intr_gate(2,&nmi); 1080 set_intr_gate(2,&nmi);
1085 set_system_intr_gate(3, &int3); /* int3-5 can be called from all */ 1081 set_system_intr_gate(3, &int3); /* int3/4 can be called from all */
1086 set_system_gate(4,&overflow); 1082 set_system_gate(4,&overflow);
1087 set_system_gate(5,&bounds); 1083 set_trap_gate(5,&bounds);
1088 set_trap_gate(6,&invalid_op); 1084 set_trap_gate(6,&invalid_op);
1089 set_trap_gate(7,&device_not_available); 1085 set_trap_gate(7,&device_not_available);
1090 set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS); 1086 set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS);
@@ -1102,6 +1098,28 @@ void __init trap_init(void)
1102#endif 1098#endif
1103 set_trap_gate(19,&simd_coprocessor_error); 1099 set_trap_gate(19,&simd_coprocessor_error);
1104 1100
1101 if (cpu_has_fxsr) {
1102 /*
1103 * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
1104 * Generates a compile-time "error: zero width for bit-field" if
1105 * the alignment is wrong.
1106 */
1107 struct fxsrAlignAssert {
1108 int _:!(offsetof(struct task_struct,
1109 thread.i387.fxsave) & 15);
1110 };
1111
1112 printk(KERN_INFO "Enabling fast FPU save and restore... ");
1113 set_in_cr4(X86_CR4_OSFXSR);
1114 printk("done.\n");
1115 }
1116 if (cpu_has_xmm) {
1117 printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
1118 "support... ");
1119 set_in_cr4(X86_CR4_OSXMMEXCPT);
1120 printk("done.\n");
1121 }
1122
1105 set_system_gate(SYSCALL_VECTOR,&system_call); 1123 set_system_gate(SYSCALL_VECTOR,&system_call);
1106 1124
1107 /* 1125 /*
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 06e26f006238..7df494b51a5b 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -735,6 +735,30 @@ void free_initmem(void)
735 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10); 735 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
736} 736}
737 737
738#ifdef CONFIG_DEBUG_RODATA
739
740extern char __start_rodata, __end_rodata;
741void mark_rodata_ro(void)
742{
743 unsigned long addr = (unsigned long)&__start_rodata;
744
745 for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
746 change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
747
748 printk ("Write protecting the kernel read-only data: %luk\n",
749 (unsigned long)(&__end_rodata - &__start_rodata) >> 10);
750
751 /*
752 * change_page_attr() requires a global_flush_tlb() call after it.
753 * We do this after the printk so that if something went wrong in the
754 * change, the printk gets out at least to give a better debug hint
755 * of who is the culprit.
756 */
757 global_flush_tlb();
758}
759#endif
760
761
738#ifdef CONFIG_BLK_DEV_INITRD 762#ifdef CONFIG_BLK_DEV_INITRD
739void free_initrd_mem(unsigned long start, unsigned long end) 763void free_initrd_mem(unsigned long start, unsigned long end)
740{ 764{
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c
index 5d09de8d1c6b..247fde76aaed 100644
--- a/arch/i386/mm/ioremap.c
+++ b/arch/i386/mm/ioremap.c
@@ -223,9 +223,15 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
223} 223}
224EXPORT_SYMBOL(ioremap_nocache); 224EXPORT_SYMBOL(ioremap_nocache);
225 225
226/**
227 * iounmap - Free a IO remapping
228 * @addr: virtual address from ioremap_*
229 *
230 * Caller must ensure there is only one unmapping for the same pointer.
231 */
226void iounmap(volatile void __iomem *addr) 232void iounmap(volatile void __iomem *addr)
227{ 233{
228 struct vm_struct *p; 234 struct vm_struct *p, *o;
229 235
230 if ((void __force *)addr <= high_memory) 236 if ((void __force *)addr <= high_memory)
231 return; 237 return;
@@ -239,22 +245,37 @@ void iounmap(volatile void __iomem *addr)
239 addr < phys_to_virt(ISA_END_ADDRESS)) 245 addr < phys_to_virt(ISA_END_ADDRESS))
240 return; 246 return;
241 247
242 write_lock(&vmlist_lock); 248 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
243 p = __remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr)); 249
244 if (!p) { 250 /* Use the vm area unlocked, assuming the caller
245 printk(KERN_WARNING "iounmap: bad address %p\n", addr); 251 ensures there isn't another iounmap for the same address
252 in parallel. Reuse of the virtual address is prevented by
253 leaving it in the global lists until we're done with it.
254 cpa takes care of the direct mappings. */
255 read_lock(&vmlist_lock);
256 for (p = vmlist; p; p = p->next) {
257 if (p->addr == addr)
258 break;
259 }
260 read_unlock(&vmlist_lock);
261
262 if (!p) {
263 printk("iounmap: bad address %p\n", addr);
246 dump_stack(); 264 dump_stack();
247 goto out_unlock; 265 return;
248 } 266 }
249 267
268 /* Reset the direct mapping. Can block */
250 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) { 269 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
251 change_page_attr(virt_to_page(__va(p->phys_addr)), 270 change_page_attr(virt_to_page(__va(p->phys_addr)),
252 p->size >> PAGE_SHIFT, 271 p->size >> PAGE_SHIFT,
253 PAGE_KERNEL); 272 PAGE_KERNEL);
254 global_flush_tlb(); 273 global_flush_tlb();
255 } 274 }
256out_unlock: 275
257 write_unlock(&vmlist_lock); 276 /* Finally remove it */
277 o = remove_vm_area((void *)addr);
278 BUG_ON(p != o || o == NULL);
258 kfree(p); 279 kfree(p);
259} 280}
260EXPORT_SYMBOL(iounmap); 281EXPORT_SYMBOL(iounmap);
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index f600fc244f02..c30a16df6440 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -13,6 +13,7 @@
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/tlbflush.h> 14#include <asm/tlbflush.h>
15#include <asm/pgalloc.h> 15#include <asm/pgalloc.h>
16#include <asm/sections.h>
16 17
17static DEFINE_SPINLOCK(cpa_lock); 18static DEFINE_SPINLOCK(cpa_lock);
18static struct list_head df_list = LIST_HEAD_INIT(df_list); 19static struct list_head df_list = LIST_HEAD_INIT(df_list);
@@ -36,7 +37,8 @@ pte_t *lookup_address(unsigned long address)
36 return pte_offset_kernel(pmd, address); 37 return pte_offset_kernel(pmd, address);
37} 38}
38 39
39static struct page *split_large_page(unsigned long address, pgprot_t prot) 40static struct page *split_large_page(unsigned long address, pgprot_t prot,
41 pgprot_t ref_prot)
40{ 42{
41 int i; 43 int i;
42 unsigned long addr; 44 unsigned long addr;
@@ -54,7 +56,7 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot)
54 pbase = (pte_t *)page_address(base); 56 pbase = (pte_t *)page_address(base);
55 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { 57 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
56 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, 58 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
57 addr == address ? prot : PAGE_KERNEL)); 59 addr == address ? prot : ref_prot));
58 } 60 }
59 return base; 61 return base;
60} 62}
@@ -98,11 +100,18 @@ static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
98 */ 100 */
99static inline void revert_page(struct page *kpte_page, unsigned long address) 101static inline void revert_page(struct page *kpte_page, unsigned long address)
100{ 102{
101 pte_t *linear = (pte_t *) 103 pgprot_t ref_prot;
104 pte_t *linear;
105
106 ref_prot =
107 ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
108 ? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE;
109
110 linear = (pte_t *)
102 pmd_offset(pud_offset(pgd_offset_k(address), address), address); 111 pmd_offset(pud_offset(pgd_offset_k(address), address), address);
103 set_pmd_pte(linear, address, 112 set_pmd_pte(linear, address,
104 pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT, 113 pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
105 PAGE_KERNEL_LARGE)); 114 ref_prot));
106} 115}
107 116
108static int 117static int
@@ -123,10 +132,16 @@ __change_page_attr(struct page *page, pgprot_t prot)
123 if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 132 if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
124 set_pte_atomic(kpte, mk_pte(page, prot)); 133 set_pte_atomic(kpte, mk_pte(page, prot));
125 } else { 134 } else {
126 struct page *split = split_large_page(address, prot); 135 pgprot_t ref_prot;
136 struct page *split;
137
138 ref_prot =
139 ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
140 ? PAGE_KERNEL_EXEC : PAGE_KERNEL;
141 split = split_large_page(address, prot, ref_prot);
127 if (!split) 142 if (!split)
128 return -ENOMEM; 143 return -ENOMEM;
129 set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL)); 144 set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
130 kpte_page = split; 145 kpte_page = split;
131 } 146 }
132 get_page(kpte_page); 147 get_page(kpte_page);
diff --git a/arch/i386/pci/Makefile b/arch/i386/pci/Makefile
index ead6122dd06d..5461d4d5ea1e 100644
--- a/arch/i386/pci/Makefile
+++ b/arch/i386/pci/Makefile
@@ -1,7 +1,7 @@
1obj-y := i386.o 1obj-y := i386.o
2 2
3obj-$(CONFIG_PCI_BIOS) += pcbios.o 3obj-$(CONFIG_PCI_BIOS) += pcbios.o
4obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o 4obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o direct.o
5obj-$(CONFIG_PCI_DIRECT) += direct.o 5obj-$(CONFIG_PCI_DIRECT) += direct.o
6 6
7pci-y := fixup.o 7pci-y := fixup.o
diff --git a/arch/i386/pci/direct.c b/arch/i386/pci/direct.c
index 94331d6be7a3..e3ac502bf2fb 100644
--- a/arch/i386/pci/direct.c
+++ b/arch/i386/pci/direct.c
@@ -13,7 +13,7 @@
13#define PCI_CONF1_ADDRESS(bus, devfn, reg) \ 13#define PCI_CONF1_ADDRESS(bus, devfn, reg) \
14 (0x80000000 | (bus << 16) | (devfn << 8) | (reg & ~3)) 14 (0x80000000 | (bus << 16) | (devfn << 8) | (reg & ~3))
15 15
16static int pci_conf1_read(unsigned int seg, unsigned int bus, 16int pci_conf1_read(unsigned int seg, unsigned int bus,
17 unsigned int devfn, int reg, int len, u32 *value) 17 unsigned int devfn, int reg, int len, u32 *value)
18{ 18{
19 unsigned long flags; 19 unsigned long flags;
@@ -42,7 +42,7 @@ static int pci_conf1_read(unsigned int seg, unsigned int bus,
42 return 0; 42 return 0;
43} 43}
44 44
45static int pci_conf1_write(unsigned int seg, unsigned int bus, 45int pci_conf1_write(unsigned int seg, unsigned int bus,
46 unsigned int devfn, int reg, int len, u32 value) 46 unsigned int devfn, int reg, int len, u32 value)
47{ 47{
48 unsigned long flags; 48 unsigned long flags;
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index 19e6f4871d1e..ee8e01697d96 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -846,7 +846,7 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
846 * reported by the device if possible. 846 * reported by the device if possible.
847 */ 847 */
848 newirq = dev->irq; 848 newirq = dev->irq;
849 if (!((1 << newirq) & mask)) { 849 if (newirq && !((1 << newirq) & mask)) {
850 if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0; 850 if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0;
851 else printk(KERN_WARNING "PCI: IRQ %i for device %s doesn't match PIRQ mask - try pci=usepirqmask\n", newirq, pci_name(dev)); 851 else printk(KERN_WARNING "PCI: IRQ %i for device %s doesn't match PIRQ mask - try pci=usepirqmask\n", newirq, pci_name(dev));
852 } 852 }
diff --git a/arch/i386/pci/mmconfig.c b/arch/i386/pci/mmconfig.c
index dfbf80cff834..4bb4d4b0f73a 100644
--- a/arch/i386/pci/mmconfig.c
+++ b/arch/i386/pci/mmconfig.c
@@ -19,21 +19,25 @@
19/* The base address of the last MMCONFIG device accessed */ 19/* The base address of the last MMCONFIG device accessed */
20static u32 mmcfg_last_accessed_device; 20static u32 mmcfg_last_accessed_device;
21 21
22static DECLARE_BITMAP(fallback_slots, 32);
23
22/* 24/*
23 * Functions for accessing PCI configuration space with MMCONFIG accesses 25 * Functions for accessing PCI configuration space with MMCONFIG accesses
24 */ 26 */
25static u32 get_base_addr(unsigned int seg, int bus) 27static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
26{ 28{
27 int cfg_num = -1; 29 int cfg_num = -1;
28 struct acpi_table_mcfg_config *cfg; 30 struct acpi_table_mcfg_config *cfg;
29 31
32 if (seg == 0 && bus == 0 &&
33 test_bit(PCI_SLOT(devfn), fallback_slots))
34 return 0;
35
30 while (1) { 36 while (1) {
31 ++cfg_num; 37 ++cfg_num;
32 if (cfg_num >= pci_mmcfg_config_num) { 38 if (cfg_num >= pci_mmcfg_config_num) {
33 /* something bad is going on, no cfg table is found. */ 39 /* Not found - fallback to type 1 */
34 /* so we fall back to the old way we used to do this */ 40 return 0;
35 /* and just rely on the first entry to be correct. */
36 return pci_mmcfg_config[0].base_address;
37 } 41 }
38 cfg = &pci_mmcfg_config[cfg_num]; 42 cfg = &pci_mmcfg_config[cfg_num];
39 if (cfg->pci_segment_group_number != seg) 43 if (cfg->pci_segment_group_number != seg)
@@ -44,9 +48,9 @@ static u32 get_base_addr(unsigned int seg, int bus)
44 } 48 }
45} 49}
46 50
47static inline void pci_exp_set_dev_base(unsigned int seg, int bus, int devfn) 51static inline void pci_exp_set_dev_base(unsigned int base, int bus, int devfn)
48{ 52{
49 u32 dev_base = get_base_addr(seg, bus) | (bus << 20) | (devfn << 12); 53 u32 dev_base = base | (bus << 20) | (devfn << 12);
50 if (dev_base != mmcfg_last_accessed_device) { 54 if (dev_base != mmcfg_last_accessed_device) {
51 mmcfg_last_accessed_device = dev_base; 55 mmcfg_last_accessed_device = dev_base;
52 set_fixmap_nocache(FIX_PCIE_MCFG, dev_base); 56 set_fixmap_nocache(FIX_PCIE_MCFG, dev_base);
@@ -57,13 +61,18 @@ static int pci_mmcfg_read(unsigned int seg, unsigned int bus,
57 unsigned int devfn, int reg, int len, u32 *value) 61 unsigned int devfn, int reg, int len, u32 *value)
58{ 62{
59 unsigned long flags; 63 unsigned long flags;
64 u32 base;
60 65
61 if (!value || (bus > 255) || (devfn > 255) || (reg > 4095)) 66 if (!value || (bus > 255) || (devfn > 255) || (reg > 4095))
62 return -EINVAL; 67 return -EINVAL;
63 68
69 base = get_base_addr(seg, bus, devfn);
70 if (!base)
71 return pci_conf1_read(seg,bus,devfn,reg,len,value);
72
64 spin_lock_irqsave(&pci_config_lock, flags); 73 spin_lock_irqsave(&pci_config_lock, flags);
65 74
66 pci_exp_set_dev_base(seg, bus, devfn); 75 pci_exp_set_dev_base(base, bus, devfn);
67 76
68 switch (len) { 77 switch (len) {
69 case 1: 78 case 1:
@@ -86,13 +95,18 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
86 unsigned int devfn, int reg, int len, u32 value) 95 unsigned int devfn, int reg, int len, u32 value)
87{ 96{
88 unsigned long flags; 97 unsigned long flags;
98 u32 base;
89 99
90 if ((bus > 255) || (devfn > 255) || (reg > 4095)) 100 if ((bus > 255) || (devfn > 255) || (reg > 4095))
91 return -EINVAL; 101 return -EINVAL;
92 102
103 base = get_base_addr(seg, bus, devfn);
104 if (!base)
105 return pci_conf1_write(seg,bus,devfn,reg,len,value);
106
93 spin_lock_irqsave(&pci_config_lock, flags); 107 spin_lock_irqsave(&pci_config_lock, flags);
94 108
95 pci_exp_set_dev_base(seg, bus, devfn); 109 pci_exp_set_dev_base(base, bus, devfn);
96 110
97 switch (len) { 111 switch (len) {
98 case 1: 112 case 1:
@@ -116,6 +130,37 @@ static struct pci_raw_ops pci_mmcfg = {
116 .write = pci_mmcfg_write, 130 .write = pci_mmcfg_write,
117}; 131};
118 132
133/* K8 systems have some devices (typically in the builtin northbridge)
134 that are only accessible using type1
135 Normally this can be expressed in the MCFG by not listing them
136 and assigning suitable _SEGs, but this isn't implemented in some BIOS.
137 Instead try to discover all devices on bus 0 that are unreachable using MM
138 and fallback for them.
139 We only do this for bus 0/seg 0 */
140static __init void unreachable_devices(void)
141{
142 int i;
143 unsigned long flags;
144
145 for (i = 0; i < 32; i++) {
146 u32 val1;
147 u32 addr;
148
149 pci_conf1_read(0, 0, PCI_DEVFN(i, 0), 0, 4, &val1);
150 if (val1 == 0xffffffff)
151 continue;
152
153 /* Locking probably not needed, but safer */
154 spin_lock_irqsave(&pci_config_lock, flags);
155 addr = get_base_addr(0, 0, PCI_DEVFN(i, 0));
156 if (addr != 0)
157 pci_exp_set_dev_base(addr, 0, PCI_DEVFN(i, 0));
158 if (addr == 0 || readl((u32 __iomem *)mmcfg_virt_addr) != val1)
159 set_bit(i, fallback_slots);
160 spin_unlock_irqrestore(&pci_config_lock, flags);
161 }
162}
163
119static int __init pci_mmcfg_init(void) 164static int __init pci_mmcfg_init(void)
120{ 165{
121 if ((pci_probe & PCI_PROBE_MMCONF) == 0) 166 if ((pci_probe & PCI_PROBE_MMCONF) == 0)
@@ -131,6 +176,8 @@ static int __init pci_mmcfg_init(void)
131 raw_pci_ops = &pci_mmcfg; 176 raw_pci_ops = &pci_mmcfg;
132 pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; 177 pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
133 178
179 unreachable_devices();
180
134 out: 181 out:
135 return 0; 182 return 0;
136} 183}
diff --git a/arch/i386/pci/pci.h b/arch/i386/pci/pci.h
index 127d53ad16be..f550781ec310 100644
--- a/arch/i386/pci/pci.h
+++ b/arch/i386/pci/pci.h
@@ -74,3 +74,10 @@ extern spinlock_t pci_config_lock;
74 74
75extern int (*pcibios_enable_irq)(struct pci_dev *dev); 75extern int (*pcibios_enable_irq)(struct pci_dev *dev);
76extern void (*pcibios_disable_irq)(struct pci_dev *dev); 76extern void (*pcibios_disable_irq)(struct pci_dev *dev);
77
78extern int pci_conf1_write(unsigned int seg, unsigned int bus,
79 unsigned int devfn, int reg, int len, u32 value);
80extern int pci_conf1_read(unsigned int seg, unsigned int bus,
81 unsigned int devfn, int reg, int len, u32 *value);
82
83