aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/Kconfig310
-rw-r--r--arch/i386/Kconfig.cpu309
-rw-r--r--arch/i386/Makefile31
-rw-r--r--arch/i386/Makefile.cpu41
-rw-r--r--arch/i386/kernel/apic.c82
-rw-r--r--arch/i386/kernel/apm.c40
-rw-r--r--arch/i386/kernel/cpu/common.c15
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/p4-clockmod.c1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c1
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c87
-rw-r--r--arch/i386/kernel/cpu/mcheck/p6.c11
-rw-r--r--arch/i386/kernel/cpu/mtrr/if.c119
-rw-r--r--arch/i386/kernel/cpu/proc.c2
-rw-r--r--arch/i386/kernel/cpuid.c2
-rw-r--r--arch/i386/kernel/crash.c7
-rw-r--r--arch/i386/kernel/i8259.c4
-rw-r--r--arch/i386/kernel/io_apic.c153
-rw-r--r--arch/i386/kernel/irq.c8
-rw-r--r--arch/i386/kernel/mpparse.c41
-rw-r--r--arch/i386/kernel/msr.c2
-rw-r--r--arch/i386/kernel/nmi.c39
-rw-r--r--arch/i386/kernel/ptrace.c2
-rw-r--r--arch/i386/kernel/reboot_fixups.c2
-rw-r--r--arch/i386/kernel/setup.c24
-rw-r--r--arch/i386/kernel/smpboot.c72
-rw-r--r--arch/i386/kernel/srat.c7
-rw-r--r--arch/i386/kernel/time.c16
-rw-r--r--arch/i386/kernel/time_hpet.c20
-rw-r--r--arch/i386/kernel/timers/timer_hpet.c17
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c21
-rw-r--r--arch/i386/kernel/traps.c1
-rw-r--r--arch/i386/kernel/vm86.c17
-rw-r--r--arch/i386/mach-es7000/es7000.h11
-rw-r--r--arch/i386/mach-es7000/es7000plat.c11
-rw-r--r--arch/i386/mm/discontig.c4
-rw-r--r--arch/i386/mm/fault.c2
-rw-r--r--arch/i386/mm/init.c62
-rw-r--r--arch/i386/mm/ioremap.c4
-rw-r--r--arch/i386/mm/pgtable.c11
-rw-r--r--arch/i386/oprofile/backtrace.c38
-rw-r--r--arch/i386/pci/fixup.c59
-rw-r--r--arch/i386/pci/irq.c55
-rw-r--r--arch/i386/power/cpu.c12
45 files changed, 1069 insertions, 706 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index d2703cda61ea..5383e5e2d9b7 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -5,7 +5,7 @@
5 5
6mainmenu "Linux Kernel Configuration" 6mainmenu "Linux Kernel Configuration"
7 7
8config X86 8config X86_32
9 bool 9 bool
10 default y 10 default y
11 help 11 help
@@ -18,6 +18,10 @@ config SEMAPHORE_SLEEPERS
18 bool 18 bool
19 default y 19 default y
20 20
21config X86
22 bool
23 default y
24
21config MMU 25config MMU
22 bool 26 bool
23 default y 27 default y
@@ -151,304 +155,7 @@ config ES7000_CLUSTERED_APIC
151 default y 155 default y
152 depends on SMP && X86_ES7000 && MPENTIUMIII 156 depends on SMP && X86_ES7000 && MPENTIUMIII
153 157
154if !X86_ELAN 158source "arch/i386/Kconfig.cpu"
155
156choice
157 prompt "Processor family"
158 default M686
159
160config M386
161 bool "386"
162 ---help---
163 This is the processor type of your CPU. This information is used for
164 optimizing purposes. In order to compile a kernel that can run on
165 all x86 CPU types (albeit not optimally fast), you can specify
166 "386" here.
167
168 The kernel will not necessarily run on earlier architectures than
169 the one you have chosen, e.g. a Pentium optimized kernel will run on
170 a PPro, but not necessarily on a i486.
171
172 Here are the settings recommended for greatest speed:
173 - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI
174 486DLC/DLC2, UMC 486SX-S and NexGen Nx586. Only "386" kernels
175 will run on a 386 class machine.
176 - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or
177 SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
178 - "586" for generic Pentium CPUs lacking the TSC
179 (time stamp counter) register.
180 - "Pentium-Classic" for the Intel Pentium.
181 - "Pentium-MMX" for the Intel Pentium MMX.
182 - "Pentium-Pro" for the Intel Pentium Pro.
183 - "Pentium-II" for the Intel Pentium II or pre-Coppermine Celeron.
184 - "Pentium-III" for the Intel Pentium III or Coppermine Celeron.
185 - "Pentium-4" for the Intel Pentium 4 or P4-based Celeron.
186 - "K6" for the AMD K6, K6-II and K6-III (aka K6-3D).
187 - "Athlon" for the AMD K7 family (Athlon/Duron/Thunderbird).
188 - "Crusoe" for the Transmeta Crusoe series.
189 - "Efficeon" for the Transmeta Efficeon series.
190 - "Winchip-C6" for original IDT Winchip.
191 - "Winchip-2" for IDT Winchip 2.
192 - "Winchip-2A" for IDT Winchips with 3dNow! capabilities.
193 - "GeodeGX1" for Geode GX1 (Cyrix MediaGX).
194 - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
195 - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above).
196
197 If you don't know what to do, choose "386".
198
199config M486
200 bool "486"
201 help
202 Select this for a 486 series processor, either Intel or one of the
203 compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX,
204 DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
205 U5S.
206
207config M586
208 bool "586/K5/5x86/6x86/6x86MX"
209 help
210 Select this for an 586 or 686 series processor such as the AMD K5,
211 the Cyrix 5x86, 6x86 and 6x86MX. This choice does not
212 assume the RDTSC (Read Time Stamp Counter) instruction.
213
214config M586TSC
215 bool "Pentium-Classic"
216 help
217 Select this for a Pentium Classic processor with the RDTSC (Read
218 Time Stamp Counter) instruction for benchmarking.
219
220config M586MMX
221 bool "Pentium-MMX"
222 help
223 Select this for a Pentium with the MMX graphics/multimedia
224 extended instructions.
225
226config M686
227 bool "Pentium-Pro"
228 help
229 Select this for Intel Pentium Pro chips. This enables the use of
230 Pentium Pro extended instructions, and disables the init-time guard
231 against the f00f bug found in earlier Pentiums.
232
233config MPENTIUMII
234 bool "Pentium-II/Celeron(pre-Coppermine)"
235 help
236 Select this for Intel chips based on the Pentium-II and
237 pre-Coppermine Celeron core. This option enables an unaligned
238 copy optimization, compiles the kernel with optimization flags
239 tailored for the chip, and applies any applicable Pentium Pro
240 optimizations.
241
242config MPENTIUMIII
243 bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
244 help
245 Select this for Intel chips based on the Pentium-III and
246 Celeron-Coppermine core. This option enables use of some
247 extended prefetch instructions in addition to the Pentium II
248 extensions.
249
250config MPENTIUMM
251 bool "Pentium M"
252 help
253 Select this for Intel Pentium M (not Pentium-4 M)
254 notebook chips.
255
256config MPENTIUM4
257 bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/Xeon"
258 help
259 Select this for Intel Pentium 4 chips. This includes the
260 Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M
261 (not Pentium M) chips. This option enables compile flags
262 optimized for the chip, uses the correct cache shift, and
263 applies any applicable Pentium III optimizations.
264
265config MK6
266 bool "K6/K6-II/K6-III"
267 help
268 Select this for an AMD K6-family processor. Enables use of
269 some extended instructions, and passes appropriate optimization
270 flags to GCC.
271
272config MK7
273 bool "Athlon/Duron/K7"
274 help
275 Select this for an AMD Athlon K7-family processor. Enables use of
276 some extended instructions, and passes appropriate optimization
277 flags to GCC.
278
279config MK8
280 bool "Opteron/Athlon64/Hammer/K8"
281 help
282 Select this for an AMD Opteron or Athlon64 Hammer-family processor. Enables
283 use of some extended instructions, and passes appropriate optimization
284 flags to GCC.
285
286config MCRUSOE
287 bool "Crusoe"
288 help
289 Select this for a Transmeta Crusoe processor. Treats the processor
290 like a 586 with TSC, and sets some GCC optimization flags (like a
291 Pentium Pro with no alignment requirements).
292
293config MEFFICEON
294 bool "Efficeon"
295 help
296 Select this for a Transmeta Efficeon processor.
297
298config MWINCHIPC6
299 bool "Winchip-C6"
300 help
301 Select this for an IDT Winchip C6 chip. Linux and GCC
302 treat this chip as a 586TSC with some extended instructions
303 and alignment requirements.
304
305config MWINCHIP2
306 bool "Winchip-2"
307 help
308 Select this for an IDT Winchip-2. Linux and GCC
309 treat this chip as a 586TSC with some extended instructions
310 and alignment requirements.
311
312config MWINCHIP3D
313 bool "Winchip-2A/Winchip-3"
314 help
315 Select this for an IDT Winchip-2A or 3. Linux and GCC
316 treat this chip as a 586TSC with some extended instructions
317 and alignment reqirements. Also enable out of order memory
318 stores for this CPU, which can increase performance of some
319 operations.
320
321config MGEODEGX1
322 bool "GeodeGX1"
323 help
324 Select this for a Geode GX1 (Cyrix MediaGX) chip.
325
326config MCYRIXIII
327 bool "CyrixIII/VIA-C3"
328 help
329 Select this for a Cyrix III or C3 chip. Presently Linux and GCC
330 treat this chip as a generic 586. Whilst the CPU is 686 class,
331 it lacks the cmov extension which gcc assumes is present when
332 generating 686 code.
333 Note that Nehemiah (Model 9) and above will not boot with this
334 kernel due to them lacking the 3DNow! instructions used in earlier
335 incarnations of the CPU.
336
337config MVIAC3_2
338 bool "VIA C3-2 (Nehemiah)"
339 help
340 Select this for a VIA C3 "Nehemiah". Selecting this enables usage
341 of SSE and tells gcc to treat the CPU as a 686.
342 Note, this kernel will not boot on older (pre model 9) C3s.
343
344endchoice
345
346config X86_GENERIC
347 bool "Generic x86 support"
348 help
349 Instead of just including optimizations for the selected
350 x86 variant (e.g. PII, Crusoe or Athlon), include some more
351 generic optimizations as well. This will make the kernel
352 perform better on x86 CPUs other than that selected.
353
354 This is really intended for distributors who need more
355 generic optimizations.
356
357endif
358
359#
360# Define implied options from the CPU selection here
361#
362config X86_CMPXCHG
363 bool
364 depends on !M386
365 default y
366
367config X86_XADD
368 bool
369 depends on !M386
370 default y
371
372config X86_L1_CACHE_SHIFT
373 int
374 default "7" if MPENTIUM4 || X86_GENERIC
375 default "4" if X86_ELAN || M486 || M386
376 default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODEGX1
377 default "6" if MK7 || MK8 || MPENTIUMM
378
379config RWSEM_GENERIC_SPINLOCK
380 bool
381 depends on M386
382 default y
383
384config RWSEM_XCHGADD_ALGORITHM
385 bool
386 depends on !M386
387 default y
388
389config GENERIC_CALIBRATE_DELAY
390 bool
391 default y
392
393config X86_PPRO_FENCE
394 bool
395 depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
396 default y
397
398config X86_F00F_BUG
399 bool
400 depends on M586MMX || M586TSC || M586 || M486 || M386
401 default y
402
403config X86_WP_WORKS_OK
404 bool
405 depends on !M386
406 default y
407
408config X86_INVLPG
409 bool
410 depends on !M386
411 default y
412
413config X86_BSWAP
414 bool
415 depends on !M386
416 default y
417
418config X86_POPAD_OK
419 bool
420 depends on !M386
421 default y
422
423config X86_ALIGNMENT_16
424 bool
425 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
426 default y
427
428config X86_GOOD_APIC
429 bool
430 depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8 || MEFFICEON
431 default y
432
433config X86_INTEL_USERCOPY
434 bool
435 depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON
436 default y
437
438config X86_USE_PPRO_CHECKSUM
439 bool
440 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON
441 default y
442
443config X86_USE_3DNOW
444 bool
445 depends on MCYRIXIII || MK7
446 default y
447
448config X86_OOSTORE
449 bool
450 depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR
451 default y
452 159
453config HPET_TIMER 160config HPET_TIMER
454 bool "HPET Timer Support" 161 bool "HPET Timer Support"
@@ -561,11 +268,6 @@ config X86_VISWS_APIC
561 depends on X86_VISWS 268 depends on X86_VISWS
562 default y 269 default y
563 270
564config X86_TSC
565 bool
566 depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1) && !X86_NUMAQ
567 default y
568
569config X86_MCE 271config X86_MCE
570 bool "Machine Check Exception" 272 bool "Machine Check Exception"
571 depends on !X86_VOYAGER 273 depends on !X86_VOYAGER
diff --git a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu
new file mode 100644
index 000000000000..53bbb3c008ee
--- /dev/null
+++ b/arch/i386/Kconfig.cpu
@@ -0,0 +1,309 @@
1# Put here option for CPU selection and depending optimization
2if !X86_ELAN
3
4choice
5 prompt "Processor family"
6 default M686
7
8config M386
9 bool "386"
10 ---help---
11 This is the processor type of your CPU. This information is used for
12 optimizing purposes. In order to compile a kernel that can run on
13 all x86 CPU types (albeit not optimally fast), you can specify
14 "386" here.
15
16 The kernel will not necessarily run on earlier architectures than
17 the one you have chosen, e.g. a Pentium optimized kernel will run on
18 a PPro, but not necessarily on a i486.
19
20 Here are the settings recommended for greatest speed:
21 - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI
22 486DLC/DLC2, UMC 486SX-S and NexGen Nx586. Only "386" kernels
23 will run on a 386 class machine.
24 - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or
25 SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
26 - "586" for generic Pentium CPUs lacking the TSC
27 (time stamp counter) register.
28 - "Pentium-Classic" for the Intel Pentium.
29 - "Pentium-MMX" for the Intel Pentium MMX.
30 - "Pentium-Pro" for the Intel Pentium Pro.
31 - "Pentium-II" for the Intel Pentium II or pre-Coppermine Celeron.
32 - "Pentium-III" for the Intel Pentium III or Coppermine Celeron.
33 - "Pentium-4" for the Intel Pentium 4 or P4-based Celeron.
34 - "K6" for the AMD K6, K6-II and K6-III (aka K6-3D).
35 - "Athlon" for the AMD K7 family (Athlon/Duron/Thunderbird).
36 - "Crusoe" for the Transmeta Crusoe series.
37 - "Efficeon" for the Transmeta Efficeon series.
38 - "Winchip-C6" for original IDT Winchip.
39 - "Winchip-2" for IDT Winchip 2.
40 - "Winchip-2A" for IDT Winchips with 3dNow! capabilities.
41 - "GeodeGX1" for Geode GX1 (Cyrix MediaGX).
42 - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
43 - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above).
44
45 If you don't know what to do, choose "386".
46
47config M486
48 bool "486"
49 help
50 Select this for a 486 series processor, either Intel or one of the
51 compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX,
52 DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
53 U5S.
54
55config M586
56 bool "586/K5/5x86/6x86/6x86MX"
57 help
58 Select this for an 586 or 686 series processor such as the AMD K5,
59 the Cyrix 5x86, 6x86 and 6x86MX. This choice does not
60 assume the RDTSC (Read Time Stamp Counter) instruction.
61
62config M586TSC
63 bool "Pentium-Classic"
64 help
65 Select this for a Pentium Classic processor with the RDTSC (Read
66 Time Stamp Counter) instruction for benchmarking.
67
68config M586MMX
69 bool "Pentium-MMX"
70 help
71 Select this for a Pentium with the MMX graphics/multimedia
72 extended instructions.
73
74config M686
75 bool "Pentium-Pro"
76 help
77 Select this for Intel Pentium Pro chips. This enables the use of
78 Pentium Pro extended instructions, and disables the init-time guard
79 against the f00f bug found in earlier Pentiums.
80
81config MPENTIUMII
82 bool "Pentium-II/Celeron(pre-Coppermine)"
83 help
84 Select this for Intel chips based on the Pentium-II and
85 pre-Coppermine Celeron core. This option enables an unaligned
86 copy optimization, compiles the kernel with optimization flags
87 tailored for the chip, and applies any applicable Pentium Pro
88 optimizations.
89
90config MPENTIUMIII
91 bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
92 help
93 Select this for Intel chips based on the Pentium-III and
94 Celeron-Coppermine core. This option enables use of some
95 extended prefetch instructions in addition to the Pentium II
96 extensions.
97
98config MPENTIUMM
99 bool "Pentium M"
100 help
101 Select this for Intel Pentium M (not Pentium-4 M)
102 notebook chips.
103
104config MPENTIUM4
105 bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/Xeon"
106 help
107 Select this for Intel Pentium 4 chips. This includes the
108 Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M
109 (not Pentium M) chips. This option enables compile flags
110 optimized for the chip, uses the correct cache shift, and
111 applies any applicable Pentium III optimizations.
112
113config MK6
114 bool "K6/K6-II/K6-III"
115 help
116 Select this for an AMD K6-family processor. Enables use of
117 some extended instructions, and passes appropriate optimization
118 flags to GCC.
119
120config MK7
121 bool "Athlon/Duron/K7"
122 help
123 Select this for an AMD Athlon K7-family processor. Enables use of
124 some extended instructions, and passes appropriate optimization
125 flags to GCC.
126
127config MK8
128 bool "Opteron/Athlon64/Hammer/K8"
129 help
130 Select this for an AMD Opteron or Athlon64 Hammer-family processor. Enables
131 use of some extended instructions, and passes appropriate optimization
132 flags to GCC.
133
134config MCRUSOE
135 bool "Crusoe"
136 help
137 Select this for a Transmeta Crusoe processor. Treats the processor
138 like a 586 with TSC, and sets some GCC optimization flags (like a
139 Pentium Pro with no alignment requirements).
140
141config MEFFICEON
142 bool "Efficeon"
143 help
144 Select this for a Transmeta Efficeon processor.
145
146config MWINCHIPC6
147 bool "Winchip-C6"
148 help
149 Select this for an IDT Winchip C6 chip. Linux and GCC
150 treat this chip as a 586TSC with some extended instructions
151 and alignment requirements.
152
153config MWINCHIP2
154 bool "Winchip-2"
155 help
156 Select this for an IDT Winchip-2. Linux and GCC
157 treat this chip as a 586TSC with some extended instructions
158 and alignment requirements.
159
160config MWINCHIP3D
161 bool "Winchip-2A/Winchip-3"
162 help
163 Select this for an IDT Winchip-2A or 3. Linux and GCC
164 treat this chip as a 586TSC with some extended instructions
165 and alignment reqirements. Also enable out of order memory
166 stores for this CPU, which can increase performance of some
167 operations.
168
169config MGEODEGX1
170 bool "GeodeGX1"
171 help
172 Select this for a Geode GX1 (Cyrix MediaGX) chip.
173
174config MCYRIXIII
175 bool "CyrixIII/VIA-C3"
176 help
177 Select this for a Cyrix III or C3 chip. Presently Linux and GCC
178 treat this chip as a generic 586. Whilst the CPU is 686 class,
179 it lacks the cmov extension which gcc assumes is present when
180 generating 686 code.
181 Note that Nehemiah (Model 9) and above will not boot with this
182 kernel due to them lacking the 3DNow! instructions used in earlier
183 incarnations of the CPU.
184
185config MVIAC3_2
186 bool "VIA C3-2 (Nehemiah)"
187 help
188 Select this for a VIA C3 "Nehemiah". Selecting this enables usage
189 of SSE and tells gcc to treat the CPU as a 686.
190 Note, this kernel will not boot on older (pre model 9) C3s.
191
192endchoice
193
194config X86_GENERIC
195 bool "Generic x86 support"
196 help
197 Instead of just including optimizations for the selected
198 x86 variant (e.g. PII, Crusoe or Athlon), include some more
199 generic optimizations as well. This will make the kernel
200 perform better on x86 CPUs other than that selected.
201
202 This is really intended for distributors who need more
203 generic optimizations.
204
205endif
206
207#
208# Define implied options from the CPU selection here
209#
210config X86_CMPXCHG
211 bool
212 depends on !M386
213 default y
214
215config X86_XADD
216 bool
217 depends on !M386
218 default y
219
220config X86_L1_CACHE_SHIFT
221 int
222 default "7" if MPENTIUM4 || X86_GENERIC
223 default "4" if X86_ELAN || M486 || M386
224 default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODEGX1
225 default "6" if MK7 || MK8 || MPENTIUMM
226
227config RWSEM_GENERIC_SPINLOCK
228 bool
229 depends on M386
230 default y
231
232config RWSEM_XCHGADD_ALGORITHM
233 bool
234 depends on !M386
235 default y
236
237config GENERIC_CALIBRATE_DELAY
238 bool
239 default y
240
241config X86_PPRO_FENCE
242 bool
243 depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
244 default y
245
246config X86_F00F_BUG
247 bool
248 depends on M586MMX || M586TSC || M586 || M486 || M386
249 default y
250
251config X86_WP_WORKS_OK
252 bool
253 depends on !M386
254 default y
255
256config X86_INVLPG
257 bool
258 depends on !M386
259 default y
260
261config X86_BSWAP
262 bool
263 depends on !M386
264 default y
265
266config X86_POPAD_OK
267 bool
268 depends on !M386
269 default y
270
271config X86_CMPXCHG64
272 bool
273 depends on !M386 && !M486
274 default y
275
276config X86_ALIGNMENT_16
277 bool
278 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
279 default y
280
281config X86_GOOD_APIC
282 bool
283 depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8 || MEFFICEON
284 default y
285
286config X86_INTEL_USERCOPY
287 bool
288 depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON
289 default y
290
291config X86_USE_PPRO_CHECKSUM
292 bool
293 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON
294 default y
295
296config X86_USE_3DNOW
297 bool
298 depends on MCYRIXIII || MK7
299 default y
300
301config X86_OOSTORE
302 bool
303 depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR
304 default y
305
306config X86_TSC
307 bool
308 depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1) && !X86_NUMAQ
309 default y
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index 09951990a622..d121ea18460f 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -34,35 +34,8 @@ CFLAGS += -pipe -msoft-float
34# prevent gcc from keeping the stack 16 byte aligned 34# prevent gcc from keeping the stack 16 byte aligned
35CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2) 35CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
36 36
37align := $(cc-option-align) 37# CPU-specific tuning. Anything which can be shared with UML should go here.
38cflags-$(CONFIG_M386) += -march=i386 38include $(srctree)/arch/i386/Makefile.cpu
39cflags-$(CONFIG_M486) += -march=i486
40cflags-$(CONFIG_M586) += -march=i586
41cflags-$(CONFIG_M586TSC) += -march=i586
42cflags-$(CONFIG_M586MMX) += $(call cc-option,-march=pentium-mmx,-march=i586)
43cflags-$(CONFIG_M686) += -march=i686
44cflags-$(CONFIG_MPENTIUMII) += -march=i686 $(call cc-option,-mtune=pentium2)
45cflags-$(CONFIG_MPENTIUMIII) += -march=i686 $(call cc-option,-mtune=pentium3)
46cflags-$(CONFIG_MPENTIUMM) += -march=i686 $(call cc-option,-mtune=pentium3)
47cflags-$(CONFIG_MPENTIUM4) += -march=i686 $(call cc-option,-mtune=pentium4)
48cflags-$(CONFIG_MK6) += -march=k6
49# Please note, that patches that add -march=athlon-xp and friends are pointless.
50# They make zero difference whatsosever to performance at this time.
51cflags-$(CONFIG_MK7) += $(call cc-option,-march=athlon,-march=i686 $(align)-functions=4)
52cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,$(call cc-option,-march=athlon,-march=i686 $(align)-functions=4))
53cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
54cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call cc-option,-mtune=pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
55cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
56cflags-$(CONFIG_MWINCHIP2) += $(call cc-option,-march=winchip2,-march=i586)
57cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
58cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
59cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
60
61# AMD Elan support
62cflags-$(CONFIG_X86_ELAN) += -march=i486
63
64# Geode GX1 support
65cflags-$(CONFIG_MGEODEGX1) += $(call cc-option,-march=pentium-mmx,-march=i486)
66 39
67# -mregparm=3 works ok on gcc-3.0 and later 40# -mregparm=3 works ok on gcc-3.0 and later
68# 41#
diff --git a/arch/i386/Makefile.cpu b/arch/i386/Makefile.cpu
new file mode 100644
index 000000000000..8e51456df23d
--- /dev/null
+++ b/arch/i386/Makefile.cpu
@@ -0,0 +1,41 @@
1# CPU tuning section - shared with UML.
2# Must change only cflags-y (or [yn]), not CFLAGS! That makes a difference for UML.
3
4#-mtune exists since gcc 3.4, and some -mcpu flavors didn't exist in gcc 2.95.
5HAS_MTUNE := $(call cc-option-yn, -mtune=i386)
6ifeq ($(HAS_MTUNE),y)
7tune = $(call cc-option,-mtune=$(1),)
8else
9tune = $(call cc-option,-mcpu=$(1),)
10endif
11
12align := $(cc-option-align)
13cflags-$(CONFIG_M386) += -march=i386
14cflags-$(CONFIG_M486) += -march=i486
15cflags-$(CONFIG_M586) += -march=i586
16cflags-$(CONFIG_M586TSC) += -march=i586
17cflags-$(CONFIG_M586MMX) += $(call cc-option,-march=pentium-mmx,-march=i586)
18cflags-$(CONFIG_M686) += -march=i686
19cflags-$(CONFIG_MPENTIUMII) += -march=i686 $(call tune,pentium2)
20cflags-$(CONFIG_MPENTIUMIII) += -march=i686 $(call tune,pentium3)
21cflags-$(CONFIG_MPENTIUMM) += -march=i686 $(call tune,pentium3)
22cflags-$(CONFIG_MPENTIUM4) += -march=i686 $(call tune,pentium4)
23cflags-$(CONFIG_MK6) += -march=k6
24# Please note, that patches that add -march=athlon-xp and friends are pointless.
25# They make zero difference whatsosever to performance at this time.
26cflags-$(CONFIG_MK7) += $(call cc-option,-march=athlon,-march=i686 $(align)-functions=4)
27cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,$(call cc-option,-march=athlon,-march=i686 $(align)-functions=4))
28cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
29cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
30cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
31cflags-$(CONFIG_MWINCHIP2) += $(call cc-option,-march=winchip2,-march=i586)
32cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
33cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
34cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
35
36# AMD Elan support
37cflags-$(CONFIG_X86_ELAN) += -march=i486
38
39# Geode GX1 support
40cflags-$(CONFIG_MGEODEGX1) += $(call cc-option,-march=pentium-mmx,-march=i486)
41
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 5546ddebec33..9204be6eedb3 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -803,6 +803,7 @@ no_apic:
803 803
804void __init init_apic_mappings(void) 804void __init init_apic_mappings(void)
805{ 805{
806 unsigned int orig_apicid;
806 unsigned long apic_phys; 807 unsigned long apic_phys;
807 808
808 /* 809 /*
@@ -824,8 +825,11 @@ void __init init_apic_mappings(void)
824 * Fetch the APIC ID of the BSP in case we have a 825 * Fetch the APIC ID of the BSP in case we have a
825 * default configuration (or the MP table is broken). 826 * default configuration (or the MP table is broken).
826 */ 827 */
827 if (boot_cpu_physical_apicid == -1U) 828 orig_apicid = boot_cpu_physical_apicid;
828 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); 829 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
830 if ((orig_apicid != -1U) && (orig_apicid != boot_cpu_physical_apicid))
831 printk(KERN_WARNING "Boot APIC ID in local APIC unexpected (%d vs %d)",
832 orig_apicid, boot_cpu_physical_apicid);
829 833
830#ifdef CONFIG_X86_IO_APIC 834#ifdef CONFIG_X86_IO_APIC
831 { 835 {
@@ -1046,10 +1050,11 @@ static unsigned int calibration_result;
1046 1050
1047void __init setup_boot_APIC_clock(void) 1051void __init setup_boot_APIC_clock(void)
1048{ 1052{
1053 unsigned long flags;
1049 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"); 1054 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n");
1050 using_apic_timer = 1; 1055 using_apic_timer = 1;
1051 1056
1052 local_irq_disable(); 1057 local_irq_save(flags);
1053 1058
1054 calibration_result = calibrate_APIC_clock(); 1059 calibration_result = calibrate_APIC_clock();
1055 /* 1060 /*
@@ -1057,7 +1062,7 @@ void __init setup_boot_APIC_clock(void)
1057 */ 1062 */
1058 setup_APIC_timer(calibration_result); 1063 setup_APIC_timer(calibration_result);
1059 1064
1060 local_irq_enable(); 1065 local_irq_restore(flags);
1061} 1066}
1062 1067
1063void __devinit setup_secondary_APIC_clock(void) 1068void __devinit setup_secondary_APIC_clock(void)
@@ -1254,40 +1259,81 @@ fastcall void smp_error_interrupt(struct pt_regs *regs)
1254} 1259}
1255 1260
1256/* 1261/*
1257 * This initializes the IO-APIC and APIC hardware if this is 1262 * This initializes the IO-APIC and APIC hardware.
1258 * a UP kernel.
1259 */ 1263 */
1260int __init APIC_init_uniprocessor (void) 1264int __init APIC_init(void)
1261{ 1265{
1262 if (enable_local_apic < 0) 1266 if (enable_local_apic < 0) {
1263 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); 1267 printk(KERN_INFO "APIC disabled\n");
1268 return -1;
1269 }
1264 1270
1265 if (!smp_found_config && !cpu_has_apic) 1271 /* See if we have a SMP configuration or have forced enabled
1272 * the local apic.
1273 */
1274 if (!smp_found_config && !acpi_lapic && !cpu_has_apic) {
1275 enable_local_apic = -1;
1266 return -1; 1276 return -1;
1277 }
1267 1278
1268 /* 1279 /*
1269 * Complain if the BIOS pretends there is one. 1280 * Complain if the BIOS pretends there is an apic.
1281 * Then get out because we don't have an a local apic.
1270 */ 1282 */
1271 if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { 1283 if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
1272 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", 1284 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1273 boot_cpu_physical_apicid); 1285 boot_cpu_physical_apicid);
1286 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1287 enable_local_apic = -1;
1274 return -1; 1288 return -1;
1275 } 1289 }
1276 1290
1277 verify_local_APIC(); 1291 verify_local_APIC();
1278 1292
1293 /*
1294 * Should not be necessary because the MP table should list the boot
1295 * CPU too, but we do it for the sake of robustness anyway.
1296 * Makes no sense to do this check in clustered apic mode, so skip it
1297 */
1298 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
1299 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
1300 boot_cpu_physical_apicid);
1301 physid_set(boot_cpu_physical_apicid, phys_cpu_present_map);
1302 }
1303
1304 /*
1305 * Switch from PIC to APIC mode.
1306 */
1279 connect_bsp_APIC(); 1307 connect_bsp_APIC();
1308 setup_local_APIC();
1280 1309
1281 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); 1310#ifdef CONFIG_X86_IO_APIC
1311 /*
1312 * Now start the IO-APICs
1313 */
1314 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1315 setup_IO_APIC();
1316#endif
1317 return 0;
1318}
1282 1319
1283 setup_local_APIC(); 1320void __init APIC_late_time_init(void)
1321{
1322 /* Improve our loops per jiffy estimate */
1323 loops_per_jiffy = ((1000 + HZ - 1)/HZ)*cpu_khz;
1324 boot_cpu_data.loops_per_jiffy = loops_per_jiffy;
1325 cpu_data[0].loops_per_jiffy = loops_per_jiffy;
1326
1327 /* setup_apic_nmi_watchdog doesn't work properly before cpu_khz is
1328 * initialized. So redo it here to ensure the boot cpu is setup
1329 * properly.
1330 */
1331 if (nmi_watchdog == NMI_LOCAL_APIC)
1332 setup_apic_nmi_watchdog();
1284 1333
1285#ifdef CONFIG_X86_IO_APIC 1334#ifdef CONFIG_X86_IO_APIC
1286 if (smp_found_config) 1335 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1287 if (!skip_ioapic_setup && nr_ioapics) 1336 IO_APIC_late_time_init();
1288 setup_IO_APIC();
1289#endif 1337#endif
1290 setup_boot_APIC_clock(); 1338 setup_boot_APIC_clock();
1291
1292 return 0;
1293} 1339}
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index d7811c4e8b50..d2ef0c2aa93e 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -597,12 +597,14 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
597 cpumask_t cpus; 597 cpumask_t cpus;
598 int cpu; 598 int cpu;
599 struct desc_struct save_desc_40; 599 struct desc_struct save_desc_40;
600 struct desc_struct *gdt;
600 601
601 cpus = apm_save_cpus(); 602 cpus = apm_save_cpus();
602 603
603 cpu = get_cpu(); 604 cpu = get_cpu();
604 save_desc_40 = per_cpu(cpu_gdt_table, cpu)[0x40 / 8]; 605 gdt = get_cpu_gdt_table(cpu);
605 per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = bad_bios_desc; 606 save_desc_40 = gdt[0x40 / 8];
607 gdt[0x40 / 8] = bad_bios_desc;
606 608
607 local_save_flags(flags); 609 local_save_flags(flags);
608 APM_DO_CLI; 610 APM_DO_CLI;
@@ -610,7 +612,7 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
610 apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi); 612 apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi);
611 APM_DO_RESTORE_SEGS; 613 APM_DO_RESTORE_SEGS;
612 local_irq_restore(flags); 614 local_irq_restore(flags);
613 per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = save_desc_40; 615 gdt[0x40 / 8] = save_desc_40;
614 put_cpu(); 616 put_cpu();
615 apm_restore_cpus(cpus); 617 apm_restore_cpus(cpus);
616 618
@@ -639,13 +641,14 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
639 cpumask_t cpus; 641 cpumask_t cpus;
640 int cpu; 642 int cpu;
641 struct desc_struct save_desc_40; 643 struct desc_struct save_desc_40;
642 644 struct desc_struct *gdt;
643 645
644 cpus = apm_save_cpus(); 646 cpus = apm_save_cpus();
645 647
646 cpu = get_cpu(); 648 cpu = get_cpu();
647 save_desc_40 = per_cpu(cpu_gdt_table, cpu)[0x40 / 8]; 649 gdt = get_cpu_gdt_table(cpu);
648 per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = bad_bios_desc; 650 save_desc_40 = gdt[0x40 / 8];
651 gdt[0x40 / 8] = bad_bios_desc;
649 652
650 local_save_flags(flags); 653 local_save_flags(flags);
651 APM_DO_CLI; 654 APM_DO_CLI;
@@ -653,7 +656,7 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
653 error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax); 656 error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax);
654 APM_DO_RESTORE_SEGS; 657 APM_DO_RESTORE_SEGS;
655 local_irq_restore(flags); 658 local_irq_restore(flags);
656 __get_cpu_var(cpu_gdt_table)[0x40 / 8] = save_desc_40; 659 gdt[0x40 / 8] = save_desc_40;
657 put_cpu(); 660 put_cpu();
658 apm_restore_cpus(cpus); 661 apm_restore_cpus(cpus);
659 return error; 662 return error;
@@ -2295,35 +2298,36 @@ static int __init apm_init(void)
2295 apm_bios_entry.segment = APM_CS; 2298 apm_bios_entry.segment = APM_CS;
2296 2299
2297 for (i = 0; i < NR_CPUS; i++) { 2300 for (i = 0; i < NR_CPUS; i++) {
2298 set_base(per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 2301 struct desc_struct *gdt = get_cpu_gdt_table(i);
2302 set_base(gdt[APM_CS >> 3],
2299 __va((unsigned long)apm_info.bios.cseg << 4)); 2303 __va((unsigned long)apm_info.bios.cseg << 4));
2300 set_base(per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], 2304 set_base(gdt[APM_CS_16 >> 3],
2301 __va((unsigned long)apm_info.bios.cseg_16 << 4)); 2305 __va((unsigned long)apm_info.bios.cseg_16 << 4));
2302 set_base(per_cpu(cpu_gdt_table, i)[APM_DS >> 3], 2306 set_base(gdt[APM_DS >> 3],
2303 __va((unsigned long)apm_info.bios.dseg << 4)); 2307 __va((unsigned long)apm_info.bios.dseg << 4));
2304#ifndef APM_RELAX_SEGMENTS 2308#ifndef APM_RELAX_SEGMENTS
2305 if (apm_info.bios.version == 0x100) { 2309 if (apm_info.bios.version == 0x100) {
2306#endif 2310#endif
2307 /* For ASUS motherboard, Award BIOS rev 110 (and others?) */ 2311 /* For ASUS motherboard, Award BIOS rev 110 (and others?) */
2308 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 64 * 1024 - 1); 2312 _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 - 1);
2309 /* For some unknown machine. */ 2313 /* For some unknown machine. */
2310 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], 64 * 1024 - 1); 2314 _set_limit((char *)&gdt[APM_CS_16 >> 3], 64 * 1024 - 1);
2311 /* For the DEC Hinote Ultra CT475 (and others?) */ 2315 /* For the DEC Hinote Ultra CT475 (and others?) */
2312 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_DS >> 3], 64 * 1024 - 1); 2316 _set_limit((char *)&gdt[APM_DS >> 3], 64 * 1024 - 1);
2313#ifndef APM_RELAX_SEGMENTS 2317#ifndef APM_RELAX_SEGMENTS
2314 } else { 2318 } else {
2315 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 2319 _set_limit((char *)&gdt[APM_CS >> 3],
2316 (apm_info.bios.cseg_len - 1) & 0xffff); 2320 (apm_info.bios.cseg_len - 1) & 0xffff);
2317 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], 2321 _set_limit((char *)&gdt[APM_CS_16 >> 3],
2318 (apm_info.bios.cseg_16_len - 1) & 0xffff); 2322 (apm_info.bios.cseg_16_len - 1) & 0xffff);
2319 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_DS >> 3], 2323 _set_limit((char *)&gdt[APM_DS >> 3],
2320 (apm_info.bios.dseg_len - 1) & 0xffff); 2324 (apm_info.bios.dseg_len - 1) & 0xffff);
2321 /* workaround for broken BIOSes */ 2325 /* workaround for broken BIOSes */
2322 if (apm_info.bios.cseg_len <= apm_info.bios.offset) 2326 if (apm_info.bios.cseg_len <= apm_info.bios.offset)
2323 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 64 * 1024 -1); 2327 _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 -1);
2324 if (apm_info.bios.dseg_len <= 0x40) { /* 0x40 * 4kB == 64kB */ 2328 if (apm_info.bios.dseg_len <= 0x40) { /* 0x40 * 4kB == 64kB */
2325 /* for the BIOS that assumes granularity = 1 */ 2329 /* for the BIOS that assumes granularity = 1 */
2326 per_cpu(cpu_gdt_table, i)[APM_DS >> 3].b |= 0x800000; 2330 gdt[APM_DS >> 3].b |= 0x800000;
2327 printk(KERN_NOTICE "apm: we set the granularity of dseg.\n"); 2331 printk(KERN_NOTICE "apm: we set the granularity of dseg.\n");
2328 } 2332 }
2329 } 2333 }
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 9ad43be9a01f..74145a33cb0f 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -573,6 +573,7 @@ void __devinit cpu_init(void)
573 int cpu = smp_processor_id(); 573 int cpu = smp_processor_id();
574 struct tss_struct * t = &per_cpu(init_tss, cpu); 574 struct tss_struct * t = &per_cpu(init_tss, cpu);
575 struct thread_struct *thread = &current->thread; 575 struct thread_struct *thread = &current->thread;
576 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
576 __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu); 577 __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
577 578
578 if (cpu_test_and_set(cpu, cpu_initialized)) { 579 if (cpu_test_and_set(cpu, cpu_initialized)) {
@@ -594,24 +595,16 @@ void __devinit cpu_init(void)
594 * Initialize the per-CPU GDT with the boot GDT, 595 * Initialize the per-CPU GDT with the boot GDT,
595 * and set up the GDT descriptor: 596 * and set up the GDT descriptor:
596 */ 597 */
597 memcpy(&per_cpu(cpu_gdt_table, cpu), cpu_gdt_table, 598 memcpy(gdt, cpu_gdt_table, GDT_SIZE);
598 GDT_SIZE);
599 599
600 /* Set up GDT entry for 16bit stack */ 600 /* Set up GDT entry for 16bit stack */
601 *(__u64 *)&(per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_ESPFIX_SS]) |= 601 *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
602 ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) | 602 ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
603 ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) | 603 ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
604 (CPU_16BIT_STACK_SIZE - 1); 604 (CPU_16BIT_STACK_SIZE - 1);
605 605
606 cpu_gdt_descr[cpu].size = GDT_SIZE - 1; 606 cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
607 cpu_gdt_descr[cpu].address = 607 cpu_gdt_descr[cpu].address = (unsigned long)gdt;
608 (unsigned long)&per_cpu(cpu_gdt_table, cpu);
609
610 /*
611 * Set up the per-thread TLS descriptor cache:
612 */
613 memcpy(thread->tls_array, &per_cpu(cpu_gdt_table, cpu),
614 GDT_ENTRY_TLS_ENTRIES * 8);
615 608
616 load_gdt(&cpu_gdt_descr[cpu]); 609 load_gdt(&cpu_gdt_descr[cpu]);
617 load_idt(&idt_descr); 610 load_idt(&idt_descr);
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 822c8ce9d1f1..caa9f7711343 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -32,6 +32,7 @@
32#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/compiler.h> 34#include <linux/compiler.h>
35#include <linux/sched.h> /* current */
35#include <asm/io.h> 36#include <asm/io.h>
36#include <asm/delay.h> 37#include <asm/delay.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
index aa622d52c6e5..270f2188d68b 100644
--- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
@@ -28,6 +28,7 @@
28#include <linux/cpufreq.h> 28#include <linux/cpufreq.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/cpumask.h> 30#include <linux/cpumask.h>
31#include <linux/sched.h> /* current / set_cpus_allowed() */
31 32
32#include <asm/processor.h> 33#include <asm/processor.h>
33#include <asm/msr.h> 34#include <asm/msr.h>
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index 58ca98fdc2ca..2d5c9adba0cd 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -32,6 +32,7 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/cpumask.h> 34#include <linux/cpumask.h>
35#include <linux/sched.h> /* for current / set_cpus_allowed() */
35 36
36#include <asm/msr.h> 37#include <asm/msr.h>
37#include <asm/io.h> 38#include <asm/io.h>
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index c397b6220430..1465974256c9 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -22,6 +22,7 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/cpufreq.h> 23#include <linux/cpufreq.h>
24#include <linux/config.h> 24#include <linux/config.h>
25#include <linux/sched.h> /* current */
25#include <linux/delay.h> 26#include <linux/delay.h>
26#include <linux/compiler.h> 27#include <linux/compiler.h>
27 28
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index 9e0d5f83cb9f..4dc42a189ae5 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Changes: 4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4) 5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
6 */ 7 */
7 8
8#include <linux/init.h> 9#include <linux/init.h>
@@ -10,6 +11,7 @@
10#include <linux/device.h> 11#include <linux/device.h>
11#include <linux/compiler.h> 12#include <linux/compiler.h>
12#include <linux/cpu.h> 13#include <linux/cpu.h>
14#include <linux/sched.h>
13 15
14#include <asm/processor.h> 16#include <asm/processor.h>
15#include <asm/smp.h> 17#include <asm/smp.h>
@@ -28,7 +30,7 @@ struct _cache_table
28}; 30};
29 31
30/* all the cache descriptor types we care about (no TLB or trace cache entries) */ 32/* all the cache descriptor types we care about (no TLB or trace cache entries) */
31static struct _cache_table cache_table[] __devinitdata = 33static struct _cache_table cache_table[] __cpuinitdata =
32{ 34{
33 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 35 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
34 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 36 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
@@ -117,10 +119,9 @@ struct _cpuid4_info {
117 cpumask_t shared_cpu_map; 119 cpumask_t shared_cpu_map;
118}; 120};
119 121
120#define MAX_CACHE_LEAVES 4
121static unsigned short num_cache_leaves; 122static unsigned short num_cache_leaves;
122 123
123static int __devinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) 124static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
124{ 125{
125 unsigned int eax, ebx, ecx, edx; 126 unsigned int eax, ebx, ecx, edx;
126 union _cpuid4_leaf_eax cache_eax; 127 union _cpuid4_leaf_eax cache_eax;
@@ -144,23 +145,18 @@ static int __init find_num_cache_leaves(void)
144{ 145{
145 unsigned int eax, ebx, ecx, edx; 146 unsigned int eax, ebx, ecx, edx;
146 union _cpuid4_leaf_eax cache_eax; 147 union _cpuid4_leaf_eax cache_eax;
147 int i; 148 int i = -1;
148 int retval;
149 149
150 retval = MAX_CACHE_LEAVES; 150 do {
151 /* Do cpuid(4) loop to find out num_cache_leaves */ 151 ++i;
152 for (i = 0; i < MAX_CACHE_LEAVES; i++) { 152 /* Do cpuid(4) loop to find out num_cache_leaves */
153 cpuid_count(4, i, &eax, &ebx, &ecx, &edx); 153 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
154 cache_eax.full = eax; 154 cache_eax.full = eax;
155 if (cache_eax.split.type == CACHE_TYPE_NULL) { 155 } while (cache_eax.split.type != CACHE_TYPE_NULL);
156 retval = i; 156 return i;
157 break;
158 }
159 }
160 return retval;
161} 157}
162 158
163unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c) 159unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
164{ 160{
165 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ 161 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
166 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ 162 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
@@ -284,13 +280,7 @@ unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
284 if ( l3 ) 280 if ( l3 )
285 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); 281 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
286 282
287 /* 283 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
288 * This assumes the L3 cache is shared; it typically lives in
289 * the northbridge. The L1 caches are included by the L2
290 * cache, and so should not be included for the purpose of
291 * SMP switching weights.
292 */
293 c->x86_cache_size = l2 ? l2 : (l1i+l1d);
294 } 284 }
295 285
296 return l2; 286 return l2;
@@ -301,7 +291,7 @@ static struct _cpuid4_info *cpuid4_info[NR_CPUS];
301#define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y])) 291#define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y]))
302 292
303#ifdef CONFIG_SMP 293#ifdef CONFIG_SMP
304static void __devinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 294static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
305{ 295{
306 struct _cpuid4_info *this_leaf; 296 struct _cpuid4_info *this_leaf;
307 unsigned long num_threads_sharing; 297 unsigned long num_threads_sharing;
@@ -334,7 +324,7 @@ static void free_cache_attributes(unsigned int cpu)
334 cpuid4_info[cpu] = NULL; 324 cpuid4_info[cpu] = NULL;
335} 325}
336 326
337static int __devinit detect_cache_attributes(unsigned int cpu) 327static int __cpuinit detect_cache_attributes(unsigned int cpu)
338{ 328{
339 struct _cpuid4_info *this_leaf; 329 struct _cpuid4_info *this_leaf;
340 unsigned long j; 330 unsigned long j;
@@ -511,7 +501,7 @@ static void cpuid4_cache_sysfs_exit(unsigned int cpu)
511 free_cache_attributes(cpu); 501 free_cache_attributes(cpu);
512} 502}
513 503
514static int __devinit cpuid4_cache_sysfs_init(unsigned int cpu) 504static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
515{ 505{
516 506
517 if (num_cache_leaves == 0) 507 if (num_cache_leaves == 0)
@@ -542,7 +532,7 @@ err_out:
542} 532}
543 533
544/* Add/Remove cache interface for CPU device */ 534/* Add/Remove cache interface for CPU device */
545static int __devinit cache_add_dev(struct sys_device * sys_dev) 535static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
546{ 536{
547 unsigned int cpu = sys_dev->id; 537 unsigned int cpu = sys_dev->id;
548 unsigned long i, j; 538 unsigned long i, j;
@@ -579,7 +569,7 @@ static int __devinit cache_add_dev(struct sys_device * sys_dev)
579 return retval; 569 return retval;
580} 570}
581 571
582static int __devexit cache_remove_dev(struct sys_device * sys_dev) 572static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
583{ 573{
584 unsigned int cpu = sys_dev->id; 574 unsigned int cpu = sys_dev->id;
585 unsigned long i; 575 unsigned long i;
@@ -588,24 +578,49 @@ static int __devexit cache_remove_dev(struct sys_device * sys_dev)
588 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); 578 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
589 kobject_unregister(cache_kobject[cpu]); 579 kobject_unregister(cache_kobject[cpu]);
590 cpuid4_cache_sysfs_exit(cpu); 580 cpuid4_cache_sysfs_exit(cpu);
591 return 0; 581 return;
582}
583
584static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
585 unsigned long action, void *hcpu)
586{
587 unsigned int cpu = (unsigned long)hcpu;
588 struct sys_device *sys_dev;
589
590 sys_dev = get_cpu_sysdev(cpu);
591 switch (action) {
592 case CPU_ONLINE:
593 cache_add_dev(sys_dev);
594 break;
595 case CPU_DEAD:
596 cache_remove_dev(sys_dev);
597 break;
598 }
599 return NOTIFY_OK;
592} 600}
593 601
594static struct sysdev_driver cache_sysdev_driver = { 602static struct notifier_block cacheinfo_cpu_notifier =
595 .add = cache_add_dev, 603{
596 .remove = __devexit_p(cache_remove_dev), 604 .notifier_call = cacheinfo_cpu_callback,
597}; 605};
598 606
599/* Register/Unregister the cpu_cache driver */ 607static int __cpuinit cache_sysfs_init(void)
600static int __devinit cache_register_driver(void)
601{ 608{
609 int i;
610
602 if (num_cache_leaves == 0) 611 if (num_cache_leaves == 0)
603 return 0; 612 return 0;
604 613
605 return sysdev_driver_register(&cpu_sysdev_class,&cache_sysdev_driver); 614 register_cpu_notifier(&cacheinfo_cpu_notifier);
615
616 for_each_online_cpu(i) {
617 cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,
618 (void *)(long)i);
619 }
620
621 return 0;
606} 622}
607 623
608device_initcall(cache_register_driver); 624device_initcall(cache_sysfs_init);
609 625
610#endif 626#endif
611
diff --git a/arch/i386/kernel/cpu/mcheck/p6.c b/arch/i386/kernel/cpu/mcheck/p6.c
index 3c035b8fa3d9..979b18bc95c1 100644
--- a/arch/i386/kernel/cpu/mcheck/p6.c
+++ b/arch/i386/kernel/cpu/mcheck/p6.c
@@ -102,11 +102,16 @@ void __devinit intel_p6_mcheck_init(struct cpuinfo_x86 *c)
102 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); 102 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
103 nr_mce_banks = l & 0xff; 103 nr_mce_banks = l & 0xff;
104 104
105 /* Don't enable bank 0 on intel P6 cores, it goes bang quickly. */ 105 /*
106 for (i=1; i<nr_mce_banks; i++) { 106 * Following the example in IA-32 SDM Vol 3:
107 * - MC0_CTL should not be written
108 * - Status registers on all banks should be cleared on reset
109 */
110 for (i=1; i<nr_mce_banks; i++)
107 wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); 111 wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
112
113 for (i=0; i<nr_mce_banks; i++)
108 wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); 114 wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
109 }
110 115
111 set_in_cr4 (X86_CR4_MCE); 116 set_in_cr4 (X86_CR4_MCE);
112 printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", 117 printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
diff --git a/arch/i386/kernel/cpu/mtrr/if.c b/arch/i386/kernel/cpu/mtrr/if.c
index 1923e0aed26a..cf39e205d33c 100644
--- a/arch/i386/kernel/cpu/mtrr/if.c
+++ b/arch/i386/kernel/cpu/mtrr/if.c
@@ -149,60 +149,89 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
149 return -EINVAL; 149 return -EINVAL;
150} 150}
151 151
152static int 152static long
153mtrr_ioctl(struct inode *inode, struct file *file, 153mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
154 unsigned int cmd, unsigned long __arg)
155{ 154{
156 int err; 155 int err = 0;
157 mtrr_type type; 156 mtrr_type type;
158 struct mtrr_sentry sentry; 157 struct mtrr_sentry sentry;
159 struct mtrr_gentry gentry; 158 struct mtrr_gentry gentry;
160 void __user *arg = (void __user *) __arg; 159 void __user *arg = (void __user *) __arg;
161 160
162 switch (cmd) { 161 switch (cmd) {
162 case MTRRIOC_ADD_ENTRY:
163 case MTRRIOC_SET_ENTRY:
164 case MTRRIOC_DEL_ENTRY:
165 case MTRRIOC_KILL_ENTRY:
166 case MTRRIOC_ADD_PAGE_ENTRY:
167 case MTRRIOC_SET_PAGE_ENTRY:
168 case MTRRIOC_DEL_PAGE_ENTRY:
169 case MTRRIOC_KILL_PAGE_ENTRY:
170 if (copy_from_user(&sentry, arg, sizeof sentry))
171 return -EFAULT;
172 break;
173 case MTRRIOC_GET_ENTRY:
174 case MTRRIOC_GET_PAGE_ENTRY:
175 if (copy_from_user(&gentry, arg, sizeof gentry))
176 return -EFAULT;
177 break;
178#ifdef CONFIG_COMPAT
179 case MTRRIOC32_ADD_ENTRY:
180 case MTRRIOC32_SET_ENTRY:
181 case MTRRIOC32_DEL_ENTRY:
182 case MTRRIOC32_KILL_ENTRY:
183 case MTRRIOC32_ADD_PAGE_ENTRY:
184 case MTRRIOC32_SET_PAGE_ENTRY:
185 case MTRRIOC32_DEL_PAGE_ENTRY:
186 case MTRRIOC32_KILL_PAGE_ENTRY: {
187 struct mtrr_sentry32 __user *s32 = (struct mtrr_sentry32 __user *)__arg;
188 err = get_user(sentry.base, &s32->base);
189 err |= get_user(sentry.size, &s32->size);
190 err |= get_user(sentry.type, &s32->type);
191 if (err)
192 return err;
193 break;
194 }
195 case MTRRIOC32_GET_ENTRY:
196 case MTRRIOC32_GET_PAGE_ENTRY: {
197 struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg;
198 err = get_user(gentry.regnum, &g32->regnum);
199 err |= get_user(gentry.base, &g32->base);
200 err |= get_user(gentry.size, &g32->size);
201 err |= get_user(gentry.type, &g32->type);
202 if (err)
203 return err;
204 break;
205 }
206#endif
207 }
208
209 switch (cmd) {
163 default: 210 default:
164 return -ENOTTY; 211 return -ENOTTY;
165 case MTRRIOC_ADD_ENTRY: 212 case MTRRIOC_ADD_ENTRY:
166 if (!capable(CAP_SYS_ADMIN)) 213 if (!capable(CAP_SYS_ADMIN))
167 return -EPERM; 214 return -EPERM;
168 if (copy_from_user(&sentry, arg, sizeof sentry))
169 return -EFAULT;
170 err = 215 err =
171 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1, 216 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1,
172 file, 0); 217 file, 0);
173 if (err < 0)
174 return err;
175 break; 218 break;
176 case MTRRIOC_SET_ENTRY: 219 case MTRRIOC_SET_ENTRY:
177 if (!capable(CAP_SYS_ADMIN)) 220 if (!capable(CAP_SYS_ADMIN))
178 return -EPERM; 221 return -EPERM;
179 if (copy_from_user(&sentry, arg, sizeof sentry))
180 return -EFAULT;
181 err = mtrr_add(sentry.base, sentry.size, sentry.type, 0); 222 err = mtrr_add(sentry.base, sentry.size, sentry.type, 0);
182 if (err < 0)
183 return err;
184 break; 223 break;
185 case MTRRIOC_DEL_ENTRY: 224 case MTRRIOC_DEL_ENTRY:
186 if (!capable(CAP_SYS_ADMIN)) 225 if (!capable(CAP_SYS_ADMIN))
187 return -EPERM; 226 return -EPERM;
188 if (copy_from_user(&sentry, arg, sizeof sentry))
189 return -EFAULT;
190 err = mtrr_file_del(sentry.base, sentry.size, file, 0); 227 err = mtrr_file_del(sentry.base, sentry.size, file, 0);
191 if (err < 0)
192 return err;
193 break; 228 break;
194 case MTRRIOC_KILL_ENTRY: 229 case MTRRIOC_KILL_ENTRY:
195 if (!capable(CAP_SYS_ADMIN)) 230 if (!capable(CAP_SYS_ADMIN))
196 return -EPERM; 231 return -EPERM;
197 if (copy_from_user(&sentry, arg, sizeof sentry))
198 return -EFAULT;
199 err = mtrr_del(-1, sentry.base, sentry.size); 232 err = mtrr_del(-1, sentry.base, sentry.size);
200 if (err < 0)
201 return err;
202 break; 233 break;
203 case MTRRIOC_GET_ENTRY: 234 case MTRRIOC_GET_ENTRY:
204 if (copy_from_user(&gentry, arg, sizeof gentry))
205 return -EFAULT;
206 if (gentry.regnum >= num_var_ranges) 235 if (gentry.regnum >= num_var_ranges)
207 return -EINVAL; 236 return -EINVAL;
208 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type); 237 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type);
@@ -217,60 +246,59 @@ mtrr_ioctl(struct inode *inode, struct file *file,
217 gentry.type = type; 246 gentry.type = type;
218 } 247 }
219 248
220 if (copy_to_user(arg, &gentry, sizeof gentry))
221 return -EFAULT;
222 break; 249 break;
223 case MTRRIOC_ADD_PAGE_ENTRY: 250 case MTRRIOC_ADD_PAGE_ENTRY:
224 if (!capable(CAP_SYS_ADMIN)) 251 if (!capable(CAP_SYS_ADMIN))
225 return -EPERM; 252 return -EPERM;
226 if (copy_from_user(&sentry, arg, sizeof sentry))
227 return -EFAULT;
228 err = 253 err =
229 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1, 254 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1,
230 file, 1); 255 file, 1);
231 if (err < 0)
232 return err;
233 break; 256 break;
234 case MTRRIOC_SET_PAGE_ENTRY: 257 case MTRRIOC_SET_PAGE_ENTRY:
235 if (!capable(CAP_SYS_ADMIN)) 258 if (!capable(CAP_SYS_ADMIN))
236 return -EPERM; 259 return -EPERM;
237 if (copy_from_user(&sentry, arg, sizeof sentry))
238 return -EFAULT;
239 err = mtrr_add_page(sentry.base, sentry.size, sentry.type, 0); 260 err = mtrr_add_page(sentry.base, sentry.size, sentry.type, 0);
240 if (err < 0)
241 return err;
242 break; 261 break;
243 case MTRRIOC_DEL_PAGE_ENTRY: 262 case MTRRIOC_DEL_PAGE_ENTRY:
244 if (!capable(CAP_SYS_ADMIN)) 263 if (!capable(CAP_SYS_ADMIN))
245 return -EPERM; 264 return -EPERM;
246 if (copy_from_user(&sentry, arg, sizeof sentry))
247 return -EFAULT;
248 err = mtrr_file_del(sentry.base, sentry.size, file, 1); 265 err = mtrr_file_del(sentry.base, sentry.size, file, 1);
249 if (err < 0)
250 return err;
251 break; 266 break;
252 case MTRRIOC_KILL_PAGE_ENTRY: 267 case MTRRIOC_KILL_PAGE_ENTRY:
253 if (!capable(CAP_SYS_ADMIN)) 268 if (!capable(CAP_SYS_ADMIN))
254 return -EPERM; 269 return -EPERM;
255 if (copy_from_user(&sentry, arg, sizeof sentry))
256 return -EFAULT;
257 err = mtrr_del_page(-1, sentry.base, sentry.size); 270 err = mtrr_del_page(-1, sentry.base, sentry.size);
258 if (err < 0)
259 return err;
260 break; 271 break;
261 case MTRRIOC_GET_PAGE_ENTRY: 272 case MTRRIOC_GET_PAGE_ENTRY:
262 if (copy_from_user(&gentry, arg, sizeof gentry))
263 return -EFAULT;
264 if (gentry.regnum >= num_var_ranges) 273 if (gentry.regnum >= num_var_ranges)
265 return -EINVAL; 274 return -EINVAL;
266 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type); 275 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type);
267 gentry.type = type; 276 gentry.type = type;
277 break;
278 }
279
280 if (err)
281 return err;
268 282
283 switch(cmd) {
284 case MTRRIOC_GET_ENTRY:
285 case MTRRIOC_GET_PAGE_ENTRY:
269 if (copy_to_user(arg, &gentry, sizeof gentry)) 286 if (copy_to_user(arg, &gentry, sizeof gentry))
270 return -EFAULT; 287 err = -EFAULT;
288 break;
289#ifdef CONFIG_COMPAT
290 case MTRRIOC32_GET_ENTRY:
291 case MTRRIOC32_GET_PAGE_ENTRY: {
292 struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg;
293 err = put_user(gentry.base, &g32->base);
294 err |= put_user(gentry.size, &g32->size);
295 err |= put_user(gentry.regnum, &g32->regnum);
296 err |= put_user(gentry.type, &g32->type);
271 break; 297 break;
272 } 298 }
273 return 0; 299#endif
300 }
301 return err;
274} 302}
275 303
276static int 304static int
@@ -310,7 +338,8 @@ static struct file_operations mtrr_fops = {
310 .read = seq_read, 338 .read = seq_read,
311 .llseek = seq_lseek, 339 .llseek = seq_lseek,
312 .write = mtrr_write, 340 .write = mtrr_write,
313 .ioctl = mtrr_ioctl, 341 .unlocked_ioctl = mtrr_ioctl,
342 .compat_ioctl = mtrr_ioctl,
314 .release = mtrr_close, 343 .release = mtrr_close,
315}; 344};
316 345
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 8bd77d948a84..41b871ecf4b3 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -44,7 +44,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
45 45
46 /* Intel-defined (#2) */ 46 /* Intel-defined (#2) */
47 "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est", 47 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", NULL, "est",
48 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL, 48 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
49 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 49 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
50 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 50 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index 4647db4ad6de..13bae799e626 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -163,7 +163,7 @@ static int cpuid_class_device_create(int i)
163 int err = 0; 163 int err = 0;
164 struct class_device *class_err; 164 struct class_device *class_err;
165 165
166 class_err = class_device_create(cpuid_class, MKDEV(CPUID_MAJOR, i), NULL, "cpu%d",i); 166 class_err = class_device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, i), NULL, "cpu%d",i);
167 if (IS_ERR(class_err)) 167 if (IS_ERR(class_err))
168 err = PTR_ERR(class_err); 168 err = PTR_ERR(class_err);
169 return err; 169 return err;
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index 0248e084017c..af809ccf5fbe 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -21,7 +21,6 @@
21#include <asm/hardirq.h> 21#include <asm/hardirq.h>
22#include <asm/nmi.h> 22#include <asm/nmi.h>
23#include <asm/hw_irq.h> 23#include <asm/hw_irq.h>
24#include <asm/apic.h>
25#include <mach_ipi.h> 24#include <mach_ipi.h>
26 25
27 26
@@ -148,7 +147,6 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
148 regs = &fixed_regs; 147 regs = &fixed_regs;
149 } 148 }
150 crash_save_this_cpu(regs, cpu); 149 crash_save_this_cpu(regs, cpu);
151 disable_local_APIC();
152 atomic_dec(&waiting_for_crash_ipi); 150 atomic_dec(&waiting_for_crash_ipi);
153 /* Assume hlt works */ 151 /* Assume hlt works */
154 halt(); 152 halt();
@@ -188,7 +186,6 @@ static void nmi_shootdown_cpus(void)
188 } 186 }
189 187
190 /* Leave the nmi callback set */ 188 /* Leave the nmi callback set */
191 disable_local_APIC();
192} 189}
193#else 190#else
194static void nmi_shootdown_cpus(void) 191static void nmi_shootdown_cpus(void)
@@ -213,9 +210,5 @@ void machine_crash_shutdown(struct pt_regs *regs)
213 /* Make a note of crashing cpu. Will be used in NMI callback.*/ 210 /* Make a note of crashing cpu. Will be used in NMI callback.*/
214 crashing_cpu = smp_processor_id(); 211 crashing_cpu = smp_processor_id();
215 nmi_shootdown_cpus(); 212 nmi_shootdown_cpus();
216 lapic_shutdown();
217#if defined(CONFIG_X86_IO_APIC)
218 disable_IO_APIC();
219#endif
220 crash_save_self(regs); 213 crash_save_self(regs);
221} 214}
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index 323ef8ab3244..d86f24909284 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -435,4 +435,8 @@ void __init init_IRQ(void)
435 setup_irq(FPU_IRQ, &fpu_irq); 435 setup_irq(FPU_IRQ, &fpu_irq);
436 436
437 irq_ctx_init(smp_processor_id()); 437 irq_ctx_init(smp_processor_id());
438
439#ifdef CONFIG_X86_LOCAL_APIC
440 APIC_init();
441#endif
438} 442}
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index fb3991e8229e..5a77c52b20a9 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -46,6 +46,9 @@
46int (*ioapic_renumber_irq)(int ioapic, int irq); 46int (*ioapic_renumber_irq)(int ioapic, int irq);
47atomic_t irq_mis_count; 47atomic_t irq_mis_count;
48 48
49/* Where if anywhere is the i8259 connect in external int mode */
50static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
51
49static DEFINE_SPINLOCK(ioapic_lock); 52static DEFINE_SPINLOCK(ioapic_lock);
50 53
51/* 54/*
@@ -738,7 +741,7 @@ static int find_irq_entry(int apic, int pin, int type)
738/* 741/*
739 * Find the pin to which IRQ[irq] (ISA) is connected 742 * Find the pin to which IRQ[irq] (ISA) is connected
740 */ 743 */
741static int find_isa_irq_pin(int irq, int type) 744static int __init find_isa_irq_pin(int irq, int type)
742{ 745{
743 int i; 746 int i;
744 747
@@ -758,6 +761,33 @@ static int find_isa_irq_pin(int irq, int type)
758 return -1; 761 return -1;
759} 762}
760 763
764static int __init find_isa_irq_apic(int irq, int type)
765{
766 int i;
767
768 for (i = 0; i < mp_irq_entries; i++) {
769 int lbus = mp_irqs[i].mpc_srcbus;
770
771 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
772 mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
773 mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
774 mp_bus_id_to_type[lbus] == MP_BUS_NEC98
775 ) &&
776 (mp_irqs[i].mpc_irqtype == type) &&
777 (mp_irqs[i].mpc_srcbusirq == irq))
778 break;
779 }
780 if (i < mp_irq_entries) {
781 int apic;
782 for(apic = 0; apic < nr_ioapics; apic++) {
783 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
784 return apic;
785 }
786 }
787
788 return -1;
789}
790
761/* 791/*
762 * Find a specific PCI IRQ entry. 792 * Find a specific PCI IRQ entry.
763 * Not an __init, possibly needed by modules 793 * Not an __init, possibly needed by modules
@@ -1253,7 +1283,7 @@ static void __init setup_IO_APIC_irqs(void)
1253/* 1283/*
1254 * Set up the 8259A-master output pin: 1284 * Set up the 8259A-master output pin:
1255 */ 1285 */
1256static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector) 1286static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
1257{ 1287{
1258 struct IO_APIC_route_entry entry; 1288 struct IO_APIC_route_entry entry;
1259 unsigned long flags; 1289 unsigned long flags;
@@ -1287,8 +1317,8 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector)
1287 * Add it to the IO-APIC irq-routing table: 1317 * Add it to the IO-APIC irq-routing table:
1288 */ 1318 */
1289 spin_lock_irqsave(&ioapic_lock, flags); 1319 spin_lock_irqsave(&ioapic_lock, flags);
1290 io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1)); 1320 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
1291 io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0)); 1321 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
1292 spin_unlock_irqrestore(&ioapic_lock, flags); 1322 spin_unlock_irqrestore(&ioapic_lock, flags);
1293 1323
1294 enable_8259A_irq(0); 1324 enable_8259A_irq(0);
@@ -1595,7 +1625,8 @@ void /*__init*/ print_PIC(void)
1595static void __init enable_IO_APIC(void) 1625static void __init enable_IO_APIC(void)
1596{ 1626{
1597 union IO_APIC_reg_01 reg_01; 1627 union IO_APIC_reg_01 reg_01;
1598 int i; 1628 int i8259_apic, i8259_pin;
1629 int i, apic;
1599 unsigned long flags; 1630 unsigned long flags;
1600 1631
1601 for (i = 0; i < PIN_MAP_SIZE; i++) { 1632 for (i = 0; i < PIN_MAP_SIZE; i++) {
@@ -1609,11 +1640,52 @@ static void __init enable_IO_APIC(void)
1609 /* 1640 /*
1610 * The number of IO-APIC IRQ registers (== #pins): 1641 * The number of IO-APIC IRQ registers (== #pins):
1611 */ 1642 */
1612 for (i = 0; i < nr_ioapics; i++) { 1643 for (apic = 0; apic < nr_ioapics; apic++) {
1613 spin_lock_irqsave(&ioapic_lock, flags); 1644 spin_lock_irqsave(&ioapic_lock, flags);
1614 reg_01.raw = io_apic_read(i, 1); 1645 reg_01.raw = io_apic_read(apic, 1);
1615 spin_unlock_irqrestore(&ioapic_lock, flags); 1646 spin_unlock_irqrestore(&ioapic_lock, flags);
1616 nr_ioapic_registers[i] = reg_01.bits.entries+1; 1647 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1648 }
1649 for(apic = 0; apic < nr_ioapics; apic++) {
1650 int pin;
1651 /* See if any of the pins is in ExtINT mode */
1652 for(pin = 0; pin < nr_ioapic_registers[i]; pin++) {
1653 struct IO_APIC_route_entry entry;
1654 spin_lock_irqsave(&ioapic_lock, flags);
1655 *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
1656 *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
1657 spin_unlock_irqrestore(&ioapic_lock, flags);
1658
1659
1660 /* If the interrupt line is enabled and in ExtInt mode
1661 * I have found the pin where the i8259 is connected.
1662 */
1663 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1664 ioapic_i8259.apic = apic;
1665 ioapic_i8259.pin = pin;
1666 goto found_i8259;
1667 }
1668 }
1669 }
1670 found_i8259:
1671 /* Look to see what if the MP table has reported the ExtINT */
1672 /* If we could not find the appropriate pin by looking at the ioapic
1673 * the i8259 probably is not connected the ioapic but give the
1674 * mptable a chance anyway.
1675 */
1676 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1677 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1678 /* Trust the MP table if nothing is setup in the hardware */
1679 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1680 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1681 ioapic_i8259.pin = i8259_pin;
1682 ioapic_i8259.apic = i8259_apic;
1683 }
1684 /* Complain if the MP table and the hardware disagree */
1685 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1686 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1687 {
1688 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1617 } 1689 }
1618 1690
1619 /* 1691 /*
@@ -1627,7 +1699,6 @@ static void __init enable_IO_APIC(void)
1627 */ 1699 */
1628void disable_IO_APIC(void) 1700void disable_IO_APIC(void)
1629{ 1701{
1630 int pin;
1631 /* 1702 /*
1632 * Clear the IO-APIC before rebooting: 1703 * Clear the IO-APIC before rebooting:
1633 */ 1704 */
@@ -1638,8 +1709,7 @@ void disable_IO_APIC(void)
1638 * Put that IOAPIC in virtual wire mode 1709 * Put that IOAPIC in virtual wire mode
1639 * so legacy interrupts can be delivered. 1710 * so legacy interrupts can be delivered.
1640 */ 1711 */
1641 pin = find_isa_irq_pin(0, mp_ExtINT); 1712 if (ioapic_i8259.pin != -1) {
1642 if (pin != -1) {
1643 struct IO_APIC_route_entry entry; 1713 struct IO_APIC_route_entry entry;
1644 unsigned long flags; 1714 unsigned long flags;
1645 1715
@@ -1650,7 +1720,7 @@ void disable_IO_APIC(void)
1650 entry.polarity = 0; /* High */ 1720 entry.polarity = 0; /* High */
1651 entry.delivery_status = 0; 1721 entry.delivery_status = 0;
1652 entry.dest_mode = 0; /* Physical */ 1722 entry.dest_mode = 0; /* Physical */
1653 entry.delivery_mode = 7; /* ExtInt */ 1723 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1654 entry.vector = 0; 1724 entry.vector = 0;
1655 entry.dest.physical.physical_dest = 0; 1725 entry.dest.physical.physical_dest = 0;
1656 1726
@@ -1659,11 +1729,13 @@ void disable_IO_APIC(void)
1659 * Add it to the IO-APIC irq-routing table: 1729 * Add it to the IO-APIC irq-routing table:
1660 */ 1730 */
1661 spin_lock_irqsave(&ioapic_lock, flags); 1731 spin_lock_irqsave(&ioapic_lock, flags);
1662 io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1)); 1732 io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
1663 io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0)); 1733 *(((int *)&entry)+1));
1734 io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
1735 *(((int *)&entry)+0));
1664 spin_unlock_irqrestore(&ioapic_lock, flags); 1736 spin_unlock_irqrestore(&ioapic_lock, flags);
1665 } 1737 }
1666 disconnect_bsp_APIC(pin != -1); 1738 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1667} 1739}
1668 1740
1669/* 1741/*
@@ -2113,20 +2185,21 @@ static void setup_nmi (void)
2113 */ 2185 */
2114static inline void unlock_ExtINT_logic(void) 2186static inline void unlock_ExtINT_logic(void)
2115{ 2187{
2116 int pin, i; 2188 int apic, pin, i;
2117 struct IO_APIC_route_entry entry0, entry1; 2189 struct IO_APIC_route_entry entry0, entry1;
2118 unsigned char save_control, save_freq_select; 2190 unsigned char save_control, save_freq_select;
2119 unsigned long flags; 2191 unsigned long flags;
2120 2192
2121 pin = find_isa_irq_pin(8, mp_INT); 2193 pin = find_isa_irq_pin(8, mp_INT);
2194 apic = find_isa_irq_apic(8, mp_INT);
2122 if (pin == -1) 2195 if (pin == -1)
2123 return; 2196 return;
2124 2197
2125 spin_lock_irqsave(&ioapic_lock, flags); 2198 spin_lock_irqsave(&ioapic_lock, flags);
2126 *(((int *)&entry0) + 1) = io_apic_read(0, 0x11 + 2 * pin); 2199 *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
2127 *(((int *)&entry0) + 0) = io_apic_read(0, 0x10 + 2 * pin); 2200 *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
2128 spin_unlock_irqrestore(&ioapic_lock, flags); 2201 spin_unlock_irqrestore(&ioapic_lock, flags);
2129 clear_IO_APIC_pin(0, pin); 2202 clear_IO_APIC_pin(apic, pin);
2130 2203
2131 memset(&entry1, 0, sizeof(entry1)); 2204 memset(&entry1, 0, sizeof(entry1));
2132 2205
@@ -2139,8 +2212,8 @@ static inline void unlock_ExtINT_logic(void)
2139 entry1.vector = 0; 2212 entry1.vector = 0;
2140 2213
2141 spin_lock_irqsave(&ioapic_lock, flags); 2214 spin_lock_irqsave(&ioapic_lock, flags);
2142 io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry1) + 1)); 2215 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
2143 io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry1) + 0)); 2216 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
2144 spin_unlock_irqrestore(&ioapic_lock, flags); 2217 spin_unlock_irqrestore(&ioapic_lock, flags);
2145 2218
2146 save_control = CMOS_READ(RTC_CONTROL); 2219 save_control = CMOS_READ(RTC_CONTROL);
@@ -2158,11 +2231,11 @@ static inline void unlock_ExtINT_logic(void)
2158 2231
2159 CMOS_WRITE(save_control, RTC_CONTROL); 2232 CMOS_WRITE(save_control, RTC_CONTROL);
2160 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 2233 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2161 clear_IO_APIC_pin(0, pin); 2234 clear_IO_APIC_pin(apic, pin);
2162 2235
2163 spin_lock_irqsave(&ioapic_lock, flags); 2236 spin_lock_irqsave(&ioapic_lock, flags);
2164 io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry0) + 1)); 2237 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
2165 io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry0) + 0)); 2238 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
2166 spin_unlock_irqrestore(&ioapic_lock, flags); 2239 spin_unlock_irqrestore(&ioapic_lock, flags);
2167} 2240}
2168 2241
@@ -2174,7 +2247,7 @@ static inline void unlock_ExtINT_logic(void)
2174 */ 2247 */
2175static inline void check_timer(void) 2248static inline void check_timer(void)
2176{ 2249{
2177 int pin1, pin2; 2250 int apic1, pin1, apic2, pin2;
2178 int vector; 2251 int vector;
2179 2252
2180 /* 2253 /*
@@ -2196,10 +2269,13 @@ static inline void check_timer(void)
2196 timer_ack = 1; 2269 timer_ack = 1;
2197 enable_8259A_irq(0); 2270 enable_8259A_irq(0);
2198 2271
2199 pin1 = find_isa_irq_pin(0, mp_INT); 2272 pin1 = find_isa_irq_pin(0, mp_INT);
2200 pin2 = find_isa_irq_pin(0, mp_ExtINT); 2273 apic1 = find_isa_irq_apic(0, mp_INT);
2274 pin2 = ioapic_i8259.pin;
2275 apic2 = ioapic_i8259.apic;
2201 2276
2202 printk(KERN_INFO "..TIMER: vector=0x%02X pin1=%d pin2=%d\n", vector, pin1, pin2); 2277 printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
2278 vector, apic1, pin1, apic2, pin2);
2203 2279
2204 if (pin1 != -1) { 2280 if (pin1 != -1) {
2205 /* 2281 /*
@@ -2216,8 +2292,9 @@ static inline void check_timer(void)
2216 clear_IO_APIC_pin(0, pin1); 2292 clear_IO_APIC_pin(0, pin1);
2217 return; 2293 return;
2218 } 2294 }
2219 clear_IO_APIC_pin(0, pin1); 2295 clear_IO_APIC_pin(apic1, pin1);
2220 printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to IO-APIC\n"); 2296 printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
2297 "IO-APIC\n");
2221 } 2298 }
2222 2299
2223 printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... "); 2300 printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
@@ -2226,13 +2303,13 @@ static inline void check_timer(void)
2226 /* 2303 /*
2227 * legacy devices should be connected to IO APIC #0 2304 * legacy devices should be connected to IO APIC #0
2228 */ 2305 */
2229 setup_ExtINT_IRQ0_pin(pin2, vector); 2306 setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
2230 if (timer_irq_works()) { 2307 if (timer_irq_works()) {
2231 printk("works.\n"); 2308 printk("works.\n");
2232 if (pin1 != -1) 2309 if (pin1 != -1)
2233 replace_pin_at_irq(0, 0, pin1, 0, pin2); 2310 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
2234 else 2311 else
2235 add_pin_to_irq(0, 0, pin2); 2312 add_pin_to_irq(0, apic2, pin2);
2236 if (nmi_watchdog == NMI_IO_APIC) { 2313 if (nmi_watchdog == NMI_IO_APIC) {
2237 setup_nmi(); 2314 setup_nmi();
2238 } 2315 }
@@ -2241,7 +2318,7 @@ static inline void check_timer(void)
2241 /* 2318 /*
2242 * Cleanup, just in case ... 2319 * Cleanup, just in case ...
2243 */ 2320 */
2244 clear_IO_APIC_pin(0, pin2); 2321 clear_IO_APIC_pin(apic2, pin2);
2245 } 2322 }
2246 printk(" failed.\n"); 2323 printk(" failed.\n");
2247 2324
@@ -2310,11 +2387,15 @@ void __init setup_IO_APIC(void)
2310 sync_Arb_IDs(); 2387 sync_Arb_IDs();
2311 setup_IO_APIC_irqs(); 2388 setup_IO_APIC_irqs();
2312 init_IO_APIC_traps(); 2389 init_IO_APIC_traps();
2313 check_timer();
2314 if (!acpi_ioapic) 2390 if (!acpi_ioapic)
2315 print_IO_APIC(); 2391 print_IO_APIC();
2316} 2392}
2317 2393
2394void __init IO_APIC_late_time_init(void)
2395{
2396 check_timer();
2397}
2398
2318/* 2399/*
2319 * Called after all the initialization is done. If we didnt find any 2400 * Called after all the initialization is done. If we didnt find any
2320 * APIC bugs then we can allow the modify fast path 2401 * APIC bugs then we can allow the modify fast path
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index ce66dcc26d90..1a201a932865 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -218,7 +218,7 @@ int show_interrupts(struct seq_file *p, void *v)
218 218
219 if (i == 0) { 219 if (i == 0) {
220 seq_printf(p, " "); 220 seq_printf(p, " ");
221 for_each_cpu(j) 221 for_each_online_cpu(j)
222 seq_printf(p, "CPU%d ",j); 222 seq_printf(p, "CPU%d ",j);
223 seq_putc(p, '\n'); 223 seq_putc(p, '\n');
224 } 224 }
@@ -232,7 +232,7 @@ int show_interrupts(struct seq_file *p, void *v)
232#ifndef CONFIG_SMP 232#ifndef CONFIG_SMP
233 seq_printf(p, "%10u ", kstat_irqs(i)); 233 seq_printf(p, "%10u ", kstat_irqs(i));
234#else 234#else
235 for_each_cpu(j) 235 for_each_online_cpu(j)
236 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 236 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
237#endif 237#endif
238 seq_printf(p, " %14s", irq_desc[i].handler->typename); 238 seq_printf(p, " %14s", irq_desc[i].handler->typename);
@@ -246,12 +246,12 @@ skip:
246 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 246 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
247 } else if (i == NR_IRQS) { 247 } else if (i == NR_IRQS) {
248 seq_printf(p, "NMI: "); 248 seq_printf(p, "NMI: ");
249 for_each_cpu(j) 249 for_each_online_cpu(j)
250 seq_printf(p, "%10u ", nmi_count(j)); 250 seq_printf(p, "%10u ", nmi_count(j));
251 seq_putc(p, '\n'); 251 seq_putc(p, '\n');
252#ifdef CONFIG_X86_LOCAL_APIC 252#ifdef CONFIG_X86_LOCAL_APIC
253 seq_printf(p, "LOC: "); 253 seq_printf(p, "LOC: ");
254 for_each_cpu(j) 254 for_each_online_cpu(j)
255 seq_printf(p, "%10u ", 255 seq_printf(p, "%10u ",
256 per_cpu(irq_stat,j).apic_timer_irqs); 256 per_cpu(irq_stat,j).apic_timer_irqs);
257 seq_putc(p, '\n'); 257 seq_putc(p, '\n');
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 27aabfceb67e..8f767d9aa45d 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -69,7 +69,7 @@ unsigned int def_to_bigsmp = 0;
69/* Processor that is doing the boot up */ 69/* Processor that is doing the boot up */
70unsigned int boot_cpu_physical_apicid = -1U; 70unsigned int boot_cpu_physical_apicid = -1U;
71/* Internal processor count */ 71/* Internal processor count */
72static unsigned int __initdata num_processors; 72static unsigned int __devinitdata num_processors;
73 73
74/* Bitmask of physically existing CPUs */ 74/* Bitmask of physically existing CPUs */
75physid_mask_t phys_cpu_present_map; 75physid_mask_t phys_cpu_present_map;
@@ -119,7 +119,7 @@ static int MP_valid_apicid(int apicid, int version)
119} 119}
120#endif 120#endif
121 121
122static void __init MP_processor_info (struct mpc_config_processor *m) 122static void __devinit MP_processor_info (struct mpc_config_processor *m)
123{ 123{
124 int ver, apicid; 124 int ver, apicid;
125 physid_mask_t phys_cpu; 125 physid_mask_t phys_cpu;
@@ -182,17 +182,6 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
182 boot_cpu_physical_apicid = m->mpc_apicid; 182 boot_cpu_physical_apicid = m->mpc_apicid;
183 } 183 }
184 184
185 if (num_processors >= NR_CPUS) {
186 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
187 " Processor ignored.\n", NR_CPUS);
188 return;
189 }
190
191 if (num_processors >= maxcpus) {
192 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
193 " Processor ignored.\n", maxcpus);
194 return;
195 }
196 ver = m->mpc_apicver; 185 ver = m->mpc_apicver;
197 186
198 if (!MP_valid_apicid(apicid, ver)) { 187 if (!MP_valid_apicid(apicid, ver)) {
@@ -201,11 +190,6 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
201 return; 190 return;
202 } 191 }
203 192
204 cpu_set(num_processors, cpu_possible_map);
205 num_processors++;
206 phys_cpu = apicid_to_cpu_present(apicid);
207 physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
208
209 /* 193 /*
210 * Validate version 194 * Validate version
211 */ 195 */
@@ -216,6 +200,25 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
216 ver = 0x10; 200 ver = 0x10;
217 } 201 }
218 apic_version[m->mpc_apicid] = ver; 202 apic_version[m->mpc_apicid] = ver;
203
204 phys_cpu = apicid_to_cpu_present(apicid);
205 physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
206
207 if (num_processors >= NR_CPUS) {
208 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
209 " Processor ignored.\n", NR_CPUS);
210 return;
211 }
212
213 if (num_processors >= maxcpus) {
214 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
215 " Processor ignored.\n", maxcpus);
216 return;
217 }
218
219 cpu_set(num_processors, cpu_possible_map);
220 num_processors++;
221
219 if ((num_processors > 8) && 222 if ((num_processors > 8) &&
220 APIC_XAPIC(ver) && 223 APIC_XAPIC(ver) &&
221 (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) 224 (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL))
@@ -834,7 +837,7 @@ void __init mp_register_lapic_address (
834} 837}
835 838
836 839
837void __init mp_register_lapic ( 840void __devinit mp_register_lapic (
838 u8 id, 841 u8 id,
839 u8 enabled) 842 u8 enabled)
840{ 843{
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
index 03100d6fc5d6..44470fea4309 100644
--- a/arch/i386/kernel/msr.c
+++ b/arch/i386/kernel/msr.c
@@ -246,7 +246,7 @@ static int msr_class_device_create(int i)
246 int err = 0; 246 int err = 0;
247 struct class_device *class_err; 247 struct class_device *class_err;
248 248
249 class_err = class_device_create(msr_class, MKDEV(MSR_MAJOR, i), NULL, "msr%d",i); 249 class_err = class_device_create(msr_class, NULL, MKDEV(MSR_MAJOR, i), NULL, "msr%d",i);
250 if (IS_ERR(class_err)) 250 if (IS_ERR(class_err))
251 err = PTR_ERR(class_err); 251 err = PTR_ERR(class_err);
252 return err; 252 return err;
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 72515b8a1b12..d661703ac1cb 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -100,16 +100,44 @@ int nmi_active;
100 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ 100 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
101 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) 101 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
102 102
103#ifdef CONFIG_SMP
104/* The performance counters used by NMI_LOCAL_APIC don't trigger when
105 * the CPU is idle. To make sure the NMI watchdog really ticks on all
106 * CPUs during the test make them busy.
107 */
108static __init void nmi_cpu_busy(void *data)
109{
110 volatile int *endflag = data;
111 local_irq_enable();
112 /* Intentionally don't use cpu_relax here. This is
113 to make sure that the performance counter really ticks,
114 even if there is a simulator or similar that catches the
115 pause instruction. On a real HT machine this is fine because
116 all other CPUs are busy with "useless" delay loops and don't
117 care if they get somewhat less cycles. */
118 while (*endflag == 0)
119 barrier();
120}
121#endif
122
103static int __init check_nmi_watchdog(void) 123static int __init check_nmi_watchdog(void)
104{ 124{
105 unsigned int prev_nmi_count[NR_CPUS]; 125 volatile int endflag = 0;
126 unsigned int *prev_nmi_count;
106 int cpu; 127 int cpu;
107 128
108 if (nmi_watchdog == NMI_NONE) 129 if (nmi_watchdog == NMI_NONE)
109 return 0; 130 return 0;
110 131
132 prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
133 if (!prev_nmi_count)
134 return -1;
135
111 printk(KERN_INFO "Testing NMI watchdog ... "); 136 printk(KERN_INFO "Testing NMI watchdog ... ");
112 137
138 if (nmi_watchdog == NMI_LOCAL_APIC)
139 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
140
113 for (cpu = 0; cpu < NR_CPUS; cpu++) 141 for (cpu = 0; cpu < NR_CPUS; cpu++)
114 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; 142 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
115 local_irq_enable(); 143 local_irq_enable();
@@ -123,12 +151,18 @@ static int __init check_nmi_watchdog(void)
123 continue; 151 continue;
124#endif 152#endif
125 if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) { 153 if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
126 printk("CPU#%d: NMI appears to be stuck!\n", cpu); 154 endflag = 1;
155 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
156 cpu,
157 prev_nmi_count[cpu],
158 nmi_count(cpu));
127 nmi_active = 0; 159 nmi_active = 0;
128 lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG; 160 lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
161 kfree(prev_nmi_count);
129 return -1; 162 return -1;
130 } 163 }
131 } 164 }
165 endflag = 1;
132 printk("OK.\n"); 166 printk("OK.\n");
133 167
134 /* now that we know it works we can reduce NMI frequency to 168 /* now that we know it works we can reduce NMI frequency to
@@ -136,6 +170,7 @@ static int __init check_nmi_watchdog(void)
136 if (nmi_watchdog == NMI_LOCAL_APIC) 170 if (nmi_watchdog == NMI_LOCAL_APIC)
137 nmi_hz = 1; 171 nmi_hz = 1;
138 172
173 kfree(prev_nmi_count);
139 return 0; 174 return 0;
140} 175}
141/* This needs to happen later in boot so counters are working */ 176/* This needs to happen later in boot so counters are working */
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index 7b6368bf8974..efd11f09c996 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -354,7 +354,7 @@ ptrace_set_thread_area(struct task_struct *child,
354 return 0; 354 return 0;
355} 355}
356 356
357asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 357asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
358{ 358{
359 struct task_struct *child; 359 struct task_struct *child;
360 struct user * dummy = NULL; 360 struct user * dummy = NULL;
diff --git a/arch/i386/kernel/reboot_fixups.c b/arch/i386/kernel/reboot_fixups.c
index 1b183b378c2c..c9b87330aeea 100644
--- a/arch/i386/kernel/reboot_fixups.c
+++ b/arch/i386/kernel/reboot_fixups.c
@@ -44,7 +44,7 @@ void mach_reboot_fixups(void)
44 44
45 for (i=0; i < (sizeof(fixups_table)/sizeof(fixups_table[0])); i++) { 45 for (i=0; i < (sizeof(fixups_table)/sizeof(fixups_table[0])); i++) {
46 cur = &(fixups_table[i]); 46 cur = &(fixups_table[i]);
47 dev = pci_get_device(cur->vendor, cur->device, 0); 47 dev = pci_get_device(cur->vendor, cur->device, NULL);
48 if (!dev) 48 if (!dev)
49 continue; 49 continue;
50 50
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 9b8c8a19824d..b48ac635f3c1 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -389,14 +389,24 @@ static void __init limit_regions(unsigned long long size)
389 } 389 }
390 } 390 }
391 for (i = 0; i < e820.nr_map; i++) { 391 for (i = 0; i < e820.nr_map; i++) {
392 if (e820.map[i].type == E820_RAM) { 392 current_addr = e820.map[i].addr + e820.map[i].size;
393 current_addr = e820.map[i].addr + e820.map[i].size; 393 if (current_addr < size)
394 if (current_addr >= size) { 394 continue;
395 e820.map[i].size -= current_addr-size; 395
396 e820.nr_map = i + 1; 396 if (e820.map[i].type != E820_RAM)
397 return; 397 continue;
398 } 398
399 if (e820.map[i].addr >= size) {
400 /*
401 * This region starts past the end of the
402 * requested size, skip it completely.
403 */
404 e820.nr_map = i;
405 } else {
406 e820.nr_map = i + 1;
407 e820.map[i].size -= current_addr - size;
399 } 408 }
409 return;
400 } 410 }
401} 411}
402 412
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 1fb26d0e30b6..5a2bbe0c4fff 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -87,7 +87,11 @@ EXPORT_SYMBOL(cpu_online_map);
87cpumask_t cpu_callin_map; 87cpumask_t cpu_callin_map;
88cpumask_t cpu_callout_map; 88cpumask_t cpu_callout_map;
89EXPORT_SYMBOL(cpu_callout_map); 89EXPORT_SYMBOL(cpu_callout_map);
90#ifdef CONFIG_HOTPLUG_CPU
91cpumask_t cpu_possible_map = CPU_MASK_ALL;
92#else
90cpumask_t cpu_possible_map; 93cpumask_t cpu_possible_map;
94#endif
91EXPORT_SYMBOL(cpu_possible_map); 95EXPORT_SYMBOL(cpu_possible_map);
92static cpumask_t smp_commenced_mask; 96static cpumask_t smp_commenced_mask;
93 97
@@ -1074,6 +1078,16 @@ void *xquad_portio;
1074EXPORT_SYMBOL(xquad_portio); 1078EXPORT_SYMBOL(xquad_portio);
1075#endif 1079#endif
1076 1080
1081/*
1082 * Fall back to non SMP mode after errors.
1083 *
1084 */
1085static __init void disable_smp(void)
1086{
1087 cpu_set(0, cpu_sibling_map[0]);
1088 cpu_set(0, cpu_core_map[0]);
1089}
1090
1077static void __init smp_boot_cpus(unsigned int max_cpus) 1091static void __init smp_boot_cpus(unsigned int max_cpus)
1078{ 1092{
1079 int apicid, cpu, bit, kicked; 1093 int apicid, cpu, bit, kicked;
@@ -1086,7 +1100,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1086 printk("CPU%d: ", 0); 1100 printk("CPU%d: ", 0);
1087 print_cpu_info(&cpu_data[0]); 1101 print_cpu_info(&cpu_data[0]);
1088 1102
1089 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
1090 boot_cpu_logical_apicid = logical_smp_processor_id(); 1103 boot_cpu_logical_apicid = logical_smp_processor_id();
1091 x86_cpu_to_apicid[0] = boot_cpu_physical_apicid; 1104 x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
1092 1105
@@ -1098,68 +1111,27 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1098 cpus_clear(cpu_core_map[0]); 1111 cpus_clear(cpu_core_map[0]);
1099 cpu_set(0, cpu_core_map[0]); 1112 cpu_set(0, cpu_core_map[0]);
1100 1113
1114 map_cpu_to_logical_apicid();
1115
1101 /* 1116 /*
1102 * If we couldn't find an SMP configuration at boot time, 1117 * If we couldn't find an SMP configuration at boot time,
1103 * get out of here now! 1118 * get out of here now!
1104 */ 1119 */
1105 if (!smp_found_config && !acpi_lapic) { 1120 if (!smp_found_config && !acpi_lapic) {
1106 printk(KERN_NOTICE "SMP motherboard not detected.\n"); 1121 printk(KERN_NOTICE "SMP motherboard not detected.\n");
1107 smpboot_clear_io_apic_irqs(); 1122 disable_smp();
1108 phys_cpu_present_map = physid_mask_of_physid(0);
1109 if (APIC_init_uniprocessor())
1110 printk(KERN_NOTICE "Local APIC not detected."
1111 " Using dummy APIC emulation.\n");
1112 map_cpu_to_logical_apicid();
1113 cpu_set(0, cpu_sibling_map[0]);
1114 cpu_set(0, cpu_core_map[0]);
1115 return; 1123 return;
1116 } 1124 }
1117 1125
1118 /* 1126 /*
1119 * Should not be necessary because the MP table should list the boot
1120 * CPU too, but we do it for the sake of robustness anyway.
1121 * Makes no sense to do this check in clustered apic mode, so skip it
1122 */
1123 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
1124 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
1125 boot_cpu_physical_apicid);
1126 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1127 }
1128
1129 /*
1130 * If we couldn't find a local APIC, then get out of here now!
1131 */
1132 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
1133 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1134 boot_cpu_physical_apicid);
1135 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1136 smpboot_clear_io_apic_irqs();
1137 phys_cpu_present_map = physid_mask_of_physid(0);
1138 cpu_set(0, cpu_sibling_map[0]);
1139 cpu_set(0, cpu_core_map[0]);
1140 return;
1141 }
1142
1143 verify_local_APIC();
1144
1145 /*
1146 * If SMP should be disabled, then really disable it! 1127 * If SMP should be disabled, then really disable it!
1147 */ 1128 */
1148 if (!max_cpus) { 1129 if (!max_cpus || (enable_local_apic < 0)) {
1149 smp_found_config = 0; 1130 printk(KERN_INFO "SMP mode deactivated.\n");
1150 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); 1131 disable_smp();
1151 smpboot_clear_io_apic_irqs();
1152 phys_cpu_present_map = physid_mask_of_physid(0);
1153 cpu_set(0, cpu_sibling_map[0]);
1154 cpu_set(0, cpu_core_map[0]);
1155 return; 1132 return;
1156 } 1133 }
1157 1134
1158 connect_bsp_APIC();
1159 setup_local_APIC();
1160 map_cpu_to_logical_apicid();
1161
1162
1163 setup_portio_remap(); 1135 setup_portio_remap();
1164 1136
1165 /* 1137 /*
@@ -1240,10 +1212,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1240 cpu_set(0, cpu_sibling_map[0]); 1212 cpu_set(0, cpu_sibling_map[0]);
1241 cpu_set(0, cpu_core_map[0]); 1213 cpu_set(0, cpu_core_map[0]);
1242 1214
1243 smpboot_setup_io_apic();
1244
1245 setup_boot_APIC_clock();
1246
1247 /* 1215 /*
1248 * Synchronize the TSC with the AP 1216 * Synchronize the TSC with the AP
1249 */ 1217 */
diff --git a/arch/i386/kernel/srat.c b/arch/i386/kernel/srat.c
index 516bf5653b02..8de658db8146 100644
--- a/arch/i386/kernel/srat.c
+++ b/arch/i386/kernel/srat.c
@@ -327,7 +327,12 @@ int __init get_memcfg_from_srat(void)
327 int tables = 0; 327 int tables = 0;
328 int i = 0; 328 int i = 0;
329 329
330 acpi_find_root_pointer(ACPI_PHYSICAL_ADDRESSING, rsdp_address); 330 if (ACPI_FAILURE(acpi_find_root_pointer(ACPI_PHYSICAL_ADDRESSING,
331 rsdp_address))) {
332 printk("%s: System description tables not found\n",
333 __FUNCTION__);
334 goto out_err;
335 }
331 336
332 if (rsdp_address->pointer_type == ACPI_PHYSICAL_POINTER) { 337 if (rsdp_address->pointer_type == ACPI_PHYSICAL_POINTER) {
333 printk("%s: assigning address to rsdp\n", __FUNCTION__); 338 printk("%s: assigning address to rsdp\n", __FUNCTION__);
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 2883a4d4f01f..07471bba2dc6 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -74,10 +74,6 @@ int pit_latch_buggy; /* extern */
74 74
75#include "do_timer.h" 75#include "do_timer.h"
76 76
77u64 jiffies_64 = INITIAL_JIFFIES;
78
79EXPORT_SYMBOL(jiffies_64);
80
81unsigned int cpu_khz; /* Detected as we calibrate the TSC */ 77unsigned int cpu_khz; /* Detected as we calibrate the TSC */
82EXPORT_SYMBOL(cpu_khz); 78EXPORT_SYMBOL(cpu_khz);
83 79
@@ -444,8 +440,8 @@ static int time_init_device(void)
444 440
445device_initcall(time_init_device); 441device_initcall(time_init_device);
446 442
447#ifdef CONFIG_HPET_TIMER
448extern void (*late_time_init)(void); 443extern void (*late_time_init)(void);
444#ifdef CONFIG_HPET_TIMER
449/* Duplicate of time_init() below, with hpet_enable part added */ 445/* Duplicate of time_init() below, with hpet_enable part added */
450static void __init hpet_time_init(void) 446static void __init hpet_time_init(void)
451{ 447{
@@ -462,6 +458,11 @@ static void __init hpet_time_init(void)
462 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name); 458 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
463 459
464 time_init_hook(); 460 time_init_hook();
461
462#ifdef CONFIG_X86_LOCAL_APIC
463 if (enable_local_apic >= 0)
464 APIC_late_time_init();
465#endif
465} 466}
466#endif 467#endif
467 468
@@ -486,4 +487,9 @@ void __init time_init(void)
486 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name); 487 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
487 488
488 time_init_hook(); 489 time_init_hook();
490
491#ifdef CONFIG_X86_LOCAL_APIC
492 if (enable_local_apic >= 0)
493 late_time_init = APIC_late_time_init;
494#endif
489} 495}
diff --git a/arch/i386/kernel/time_hpet.c b/arch/i386/kernel/time_hpet.c
index 658c0629ba6a..9caeaa315cd7 100644
--- a/arch/i386/kernel/time_hpet.c
+++ b/arch/i386/kernel/time_hpet.c
@@ -275,6 +275,7 @@ static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
275static unsigned long PIE_count; 275static unsigned long PIE_count;
276 276
277static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */ 277static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
278static unsigned int hpet_t1_cmp; /* cached comparator register */
278 279
279/* 280/*
280 * Timer 1 for RTC, we do not use periodic interrupt feature, 281 * Timer 1 for RTC, we do not use periodic interrupt feature,
@@ -306,10 +307,12 @@ int hpet_rtc_timer_init(void)
306 cnt = hpet_readl(HPET_COUNTER); 307 cnt = hpet_readl(HPET_COUNTER);
307 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq); 308 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
308 hpet_writel(cnt, HPET_T1_CMP); 309 hpet_writel(cnt, HPET_T1_CMP);
310 hpet_t1_cmp = cnt;
309 local_irq_restore(flags); 311 local_irq_restore(flags);
310 312
311 cfg = hpet_readl(HPET_T1_CFG); 313 cfg = hpet_readl(HPET_T1_CFG);
312 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT; 314 cfg &= ~HPET_TN_PERIODIC;
315 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
313 hpet_writel(cfg, HPET_T1_CFG); 316 hpet_writel(cfg, HPET_T1_CFG);
314 317
315 return 1; 318 return 1;
@@ -319,8 +322,12 @@ static void hpet_rtc_timer_reinit(void)
319{ 322{
320 unsigned int cfg, cnt; 323 unsigned int cfg, cnt;
321 324
322 if (!(PIE_on | AIE_on | UIE_on)) 325 if (unlikely(!(PIE_on | AIE_on | UIE_on))) {
326 cfg = hpet_readl(HPET_T1_CFG);
327 cfg &= ~HPET_TN_ENABLE;
328 hpet_writel(cfg, HPET_T1_CFG);
323 return; 329 return;
330 }
324 331
325 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ)) 332 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
326 hpet_rtc_int_freq = PIE_freq; 333 hpet_rtc_int_freq = PIE_freq;
@@ -328,15 +335,10 @@ static void hpet_rtc_timer_reinit(void)
328 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; 335 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
329 336
330 /* It is more accurate to use the comparator value than current count.*/ 337 /* It is more accurate to use the comparator value than current count.*/
331 cnt = hpet_readl(HPET_T1_CMP); 338 cnt = hpet_t1_cmp;
332 cnt += hpet_tick*HZ/hpet_rtc_int_freq; 339 cnt += hpet_tick*HZ/hpet_rtc_int_freq;
333 hpet_writel(cnt, HPET_T1_CMP); 340 hpet_writel(cnt, HPET_T1_CMP);
334 341 hpet_t1_cmp = cnt;
335 cfg = hpet_readl(HPET_T1_CFG);
336 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
337 hpet_writel(cfg, HPET_T1_CFG);
338
339 return;
340} 342}
341 343
342/* 344/*
diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c
index d973a8b681fd..be242723c339 100644
--- a/arch/i386/kernel/timers/timer_hpet.c
+++ b/arch/i386/kernel/timers/timer_hpet.c
@@ -30,23 +30,28 @@ static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
30 * basic equation: 30 * basic equation:
31 * ns = cycles / (freq / ns_per_sec) 31 * ns = cycles / (freq / ns_per_sec)
32 * ns = cycles * (ns_per_sec / freq) 32 * ns = cycles * (ns_per_sec / freq)
33 * ns = cycles * (10^9 / (cpu_mhz * 10^6)) 33 * ns = cycles * (10^9 / (cpu_khz * 10^3))
34 * ns = cycles * (10^3 / cpu_mhz) 34 * ns = cycles * (10^6 / cpu_khz)
35 * 35 *
36 * Then we use scaling math (suggested by george@mvista.com) to get: 36 * Then we use scaling math (suggested by george@mvista.com) to get:
37 * ns = cycles * (10^3 * SC / cpu_mhz) / SC 37 * ns = cycles * (10^6 * SC / cpu_khz) / SC
38 * ns = cycles * cyc2ns_scale / SC 38 * ns = cycles * cyc2ns_scale / SC
39 * 39 *
40 * And since SC is a constant power of two, we can convert the div 40 * And since SC is a constant power of two, we can convert the div
41 * into a shift. 41 * into a shift.
42 *
43 * We can use khz divisor instead of mhz to keep a better percision, since
44 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
45 * (mathieu.desnoyers@polymtl.ca)
46 *
42 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 47 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
43 */ 48 */
44static unsigned long cyc2ns_scale; 49static unsigned long cyc2ns_scale;
45#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ 50#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
46 51
47static inline void set_cyc2ns_scale(unsigned long cpu_mhz) 52static inline void set_cyc2ns_scale(unsigned long cpu_khz)
48{ 53{
49 cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz; 54 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
50} 55}
51 56
52static inline unsigned long long cycles_2_ns(unsigned long long cyc) 57static inline unsigned long long cycles_2_ns(unsigned long long cyc)
@@ -163,7 +168,7 @@ static int __init init_hpet(char* override)
163 printk("Detected %u.%03u MHz processor.\n", 168 printk("Detected %u.%03u MHz processor.\n",
164 cpu_khz / 1000, cpu_khz % 1000); 169 cpu_khz / 1000, cpu_khz % 1000);
165 } 170 }
166 set_cyc2ns_scale(cpu_khz/1000); 171 set_cyc2ns_scale(cpu_khz);
167 } 172 }
168 /* set this only when cpu_has_tsc */ 173 /* set this only when cpu_has_tsc */
169 timer_hpet.read_timer = read_timer_tsc; 174 timer_hpet.read_timer = read_timer_tsc;
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
index 6dd470cc9f72..d395e3b42485 100644
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ b/arch/i386/kernel/timers/timer_tsc.c
@@ -49,23 +49,28 @@ static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
49 * basic equation: 49 * basic equation:
50 * ns = cycles / (freq / ns_per_sec) 50 * ns = cycles / (freq / ns_per_sec)
51 * ns = cycles * (ns_per_sec / freq) 51 * ns = cycles * (ns_per_sec / freq)
52 * ns = cycles * (10^9 / (cpu_mhz * 10^6)) 52 * ns = cycles * (10^9 / (cpu_khz * 10^3))
53 * ns = cycles * (10^3 / cpu_mhz) 53 * ns = cycles * (10^6 / cpu_khz)
54 * 54 *
55 * Then we use scaling math (suggested by george@mvista.com) to get: 55 * Then we use scaling math (suggested by george@mvista.com) to get:
56 * ns = cycles * (10^3 * SC / cpu_mhz) / SC 56 * ns = cycles * (10^6 * SC / cpu_khz) / SC
57 * ns = cycles * cyc2ns_scale / SC 57 * ns = cycles * cyc2ns_scale / SC
58 * 58 *
59 * And since SC is a constant power of two, we can convert the div 59 * And since SC is a constant power of two, we can convert the div
60 * into a shift. 60 * into a shift.
61 *
62 * We can use khz divisor instead of mhz to keep a better percision, since
63 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
64 * (mathieu.desnoyers@polymtl.ca)
65 *
61 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 66 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
62 */ 67 */
63static unsigned long cyc2ns_scale; 68static unsigned long cyc2ns_scale;
64#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ 69#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
65 70
66static inline void set_cyc2ns_scale(unsigned long cpu_mhz) 71static inline void set_cyc2ns_scale(unsigned long cpu_khz)
67{ 72{
68 cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz; 73 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
69} 74}
70 75
71static inline unsigned long long cycles_2_ns(unsigned long long cyc) 76static inline unsigned long long cycles_2_ns(unsigned long long cyc)
@@ -286,7 +291,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
286 if (use_tsc) { 291 if (use_tsc) {
287 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { 292 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
288 fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq); 293 fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq);
289 set_cyc2ns_scale(cpu_khz/1000); 294 set_cyc2ns_scale(cpu_khz);
290 } 295 }
291 } 296 }
292#endif 297#endif
@@ -536,7 +541,7 @@ static int __init init_tsc(char* override)
536 printk("Detected %u.%03u MHz processor.\n", 541 printk("Detected %u.%03u MHz processor.\n",
537 cpu_khz / 1000, cpu_khz % 1000); 542 cpu_khz / 1000, cpu_khz % 1000);
538 } 543 }
539 set_cyc2ns_scale(cpu_khz/1000); 544 set_cyc2ns_scale(cpu_khz);
540 return 0; 545 return 0;
541 } 546 }
542 } 547 }
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 19e90bdd84ea..c34d1bfc5161 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -488,6 +488,7 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs,
488 tss->io_bitmap_max - thread->io_bitmap_max); 488 tss->io_bitmap_max - thread->io_bitmap_max);
489 tss->io_bitmap_max = thread->io_bitmap_max; 489 tss->io_bitmap_max = thread->io_bitmap_max;
490 tss->io_bitmap_base = IO_BITMAP_OFFSET; 490 tss->io_bitmap_base = IO_BITMAP_OFFSET;
491 tss->io_bitmap_owner = thread;
491 put_cpu(); 492 put_cpu();
492 return; 493 return;
493 } 494 }
diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c
index 16b485009622..fc1993564f98 100644
--- a/arch/i386/kernel/vm86.c
+++ b/arch/i386/kernel/vm86.c
@@ -134,17 +134,16 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
134 return ret; 134 return ret;
135} 135}
136 136
137static void mark_screen_rdonly(struct task_struct * tsk) 137static void mark_screen_rdonly(struct mm_struct *mm)
138{ 138{
139 pgd_t *pgd; 139 pgd_t *pgd;
140 pud_t *pud; 140 pud_t *pud;
141 pmd_t *pmd; 141 pmd_t *pmd;
142 pte_t *pte, *mapped; 142 pte_t *pte;
143 spinlock_t *ptl;
143 int i; 144 int i;
144 145
145 preempt_disable(); 146 pgd = pgd_offset(mm, 0xA0000);
146 spin_lock(&tsk->mm->page_table_lock);
147 pgd = pgd_offset(tsk->mm, 0xA0000);
148 if (pgd_none_or_clear_bad(pgd)) 147 if (pgd_none_or_clear_bad(pgd))
149 goto out; 148 goto out;
150 pud = pud_offset(pgd, 0xA0000); 149 pud = pud_offset(pgd, 0xA0000);
@@ -153,16 +152,14 @@ static void mark_screen_rdonly(struct task_struct * tsk)
153 pmd = pmd_offset(pud, 0xA0000); 152 pmd = pmd_offset(pud, 0xA0000);
154 if (pmd_none_or_clear_bad(pmd)) 153 if (pmd_none_or_clear_bad(pmd))
155 goto out; 154 goto out;
156 pte = mapped = pte_offset_map(pmd, 0xA0000); 155 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
157 for (i = 0; i < 32; i++) { 156 for (i = 0; i < 32; i++) {
158 if (pte_present(*pte)) 157 if (pte_present(*pte))
159 set_pte(pte, pte_wrprotect(*pte)); 158 set_pte(pte, pte_wrprotect(*pte));
160 pte++; 159 pte++;
161 } 160 }
162 pte_unmap(mapped); 161 pte_unmap_unlock(pte, ptl);
163out: 162out:
164 spin_unlock(&tsk->mm->page_table_lock);
165 preempt_enable();
166 flush_tlb(); 163 flush_tlb();
167} 164}
168 165
@@ -306,7 +303,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
306 303
307 tsk->thread.screen_bitmap = info->screen_bitmap; 304 tsk->thread.screen_bitmap = info->screen_bitmap;
308 if (info->flags & VM86_SCREEN_BITMAP) 305 if (info->flags & VM86_SCREEN_BITMAP)
309 mark_screen_rdonly(tsk); 306 mark_screen_rdonly(tsk->mm);
310 __asm__ __volatile__( 307 __asm__ __volatile__(
311 "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t" 308 "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t"
312 "movl %0,%%esp\n\t" 309 "movl %0,%%esp\n\t"
diff --git a/arch/i386/mach-es7000/es7000.h b/arch/i386/mach-es7000/es7000.h
index 898ed905e119..f1e3204f5dec 100644
--- a/arch/i386/mach-es7000/es7000.h
+++ b/arch/i386/mach-es7000/es7000.h
@@ -24,6 +24,15 @@
24 * http://www.unisys.com 24 * http://www.unisys.com
25 */ 25 */
26 26
27/*
28 * ES7000 chipsets
29 */
30
31#define NON_UNISYS 0
32#define ES7000_CLASSIC 1
33#define ES7000_ZORRO 2
34
35
27#define MIP_REG 1 36#define MIP_REG 1
28#define MIP_PSAI_REG 4 37#define MIP_PSAI_REG 4
29 38
@@ -106,6 +115,6 @@ struct mip_reg {
106 115
107extern int parse_unisys_oem (char *oemptr); 116extern int parse_unisys_oem (char *oemptr);
108extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); 117extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
109extern void setup_unisys (); 118extern void setup_unisys(void);
110extern int es7000_start_cpu(int cpu, unsigned long eip); 119extern int es7000_start_cpu(int cpu, unsigned long eip);
111extern void es7000_sw_apic(void); 120extern void es7000_sw_apic(void);
diff --git a/arch/i386/mach-es7000/es7000plat.c b/arch/i386/mach-es7000/es7000plat.c
index dc6660511b07..a9ab0644f403 100644
--- a/arch/i386/mach-es7000/es7000plat.c
+++ b/arch/i386/mach-es7000/es7000plat.c
@@ -62,6 +62,9 @@ static unsigned int base;
62static int 62static int
63es7000_rename_gsi(int ioapic, int gsi) 63es7000_rename_gsi(int ioapic, int gsi)
64{ 64{
65 if (es7000_plat == ES7000_ZORRO)
66 return gsi;
67
65 if (!base) { 68 if (!base) {
66 int i; 69 int i;
67 for (i = 0; i < nr_ioapics; i++) 70 for (i = 0; i < nr_ioapics; i++)
@@ -76,7 +79,7 @@ es7000_rename_gsi(int ioapic, int gsi)
76#endif /* (CONFIG_X86_IO_APIC) && (CONFIG_ACPI) */ 79#endif /* (CONFIG_X86_IO_APIC) && (CONFIG_ACPI) */
77 80
78void __init 81void __init
79setup_unisys () 82setup_unisys(void)
80{ 83{
81 /* 84 /*
82 * Determine the generation of the ES7000 currently running. 85 * Determine the generation of the ES7000 currently running.
@@ -86,9 +89,9 @@ setup_unisys ()
86 * 89 *
87 */ 90 */
88 if (!(boot_cpu_data.x86 <= 15 && boot_cpu_data.x86_model <= 2)) 91 if (!(boot_cpu_data.x86 <= 15 && boot_cpu_data.x86_model <= 2))
89 es7000_plat = 2; 92 es7000_plat = ES7000_ZORRO;
90 else 93 else
91 es7000_plat = 1; 94 es7000_plat = ES7000_CLASSIC;
92 ioapic_renumber_irq = es7000_rename_gsi; 95 ioapic_renumber_irq = es7000_rename_gsi;
93} 96}
94 97
@@ -151,7 +154,7 @@ parse_unisys_oem (char *oemptr)
151 } 154 }
152 155
153 if (success < 2) { 156 if (success < 2) {
154 es7000_plat = 0; 157 es7000_plat = NON_UNISYS;
155 } else 158 } else
156 setup_unisys(); 159 setup_unisys();
157 return es7000_plat; 160 return es7000_plat;
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
index 244d8ec66be2..c4af9638dbfa 100644
--- a/arch/i386/mm/discontig.c
+++ b/arch/i386/mm/discontig.c
@@ -98,7 +98,7 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
98 98
99extern unsigned long find_max_low_pfn(void); 99extern unsigned long find_max_low_pfn(void);
100extern void find_max_pfn(void); 100extern void find_max_pfn(void);
101extern void one_highpage_init(struct page *, int, int); 101extern void add_one_highpage_init(struct page *, int, int);
102 102
103extern struct e820map e820; 103extern struct e820map e820;
104extern unsigned long init_pg_tables_end; 104extern unsigned long init_pg_tables_end;
@@ -427,7 +427,7 @@ void __init set_highmem_pages_init(int bad_ppro)
427 if (!pfn_valid(node_pfn)) 427 if (!pfn_valid(node_pfn))
428 continue; 428 continue;
429 page = pfn_to_page(node_pfn); 429 page = pfn_to_page(node_pfn);
430 one_highpage_init(page, node_pfn, bad_ppro); 430 add_one_highpage_init(page, node_pfn, bad_ppro);
431 } 431 }
432 } 432 }
433 totalram_pages += totalhigh_pages; 433 totalram_pages += totalhigh_pages;
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 9edd4485b91e..cf572d9a3b6e 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -108,7 +108,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
108 desc = (void *)desc + (seg & ~7); 108 desc = (void *)desc + (seg & ~7);
109 } else { 109 } else {
110 /* Must disable preemption while reading the GDT. */ 110 /* Must disable preemption while reading the GDT. */
111 desc = (u32 *)&per_cpu(cpu_gdt_table, get_cpu()); 111 desc = (u32 *)get_cpu_gdt_table(get_cpu());
112 desc = (void *)desc + (seg & ~7); 112 desc = (void *)desc + (seg & ~7);
113 } 113 }
114 114
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 2ebaf75f732e..542d9298da5e 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -27,6 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/proc_fs.h> 28#include <linux/proc_fs.h>
29#include <linux/efi.h> 29#include <linux/efi.h>
30#include <linux/memory_hotplug.h>
30 31
31#include <asm/processor.h> 32#include <asm/processor.h>
32#include <asm/system.h> 33#include <asm/system.h>
@@ -266,17 +267,46 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
266 pkmap_page_table = pte; 267 pkmap_page_table = pte;
267} 268}
268 269
269void __init one_highpage_init(struct page *page, int pfn, int bad_ppro) 270void __devinit free_new_highpage(struct page *page)
271{
272 set_page_count(page, 1);
273 __free_page(page);
274 totalhigh_pages++;
275}
276
277void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
270{ 278{
271 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) { 279 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
272 ClearPageReserved(page); 280 ClearPageReserved(page);
273 set_page_count(page, 1); 281 free_new_highpage(page);
274 __free_page(page);
275 totalhigh_pages++;
276 } else 282 } else
277 SetPageReserved(page); 283 SetPageReserved(page);
278} 284}
279 285
286static int add_one_highpage_hotplug(struct page *page, unsigned long pfn)
287{
288 free_new_highpage(page);
289 totalram_pages++;
290#ifdef CONFIG_FLATMEM
291 max_mapnr = max(pfn, max_mapnr);
292#endif
293 num_physpages++;
294 return 0;
295}
296
297/*
298 * Not currently handling the NUMA case.
299 * Assuming single node and all memory that
300 * has been added dynamically that would be
301 * onlined here is in HIGHMEM
302 */
303void online_page(struct page *page)
304{
305 ClearPageReserved(page);
306 add_one_highpage_hotplug(page, page_to_pfn(page));
307}
308
309
280#ifdef CONFIG_NUMA 310#ifdef CONFIG_NUMA
281extern void set_highmem_pages_init(int); 311extern void set_highmem_pages_init(int);
282#else 312#else
@@ -284,7 +314,7 @@ static void __init set_highmem_pages_init(int bad_ppro)
284{ 314{
285 int pfn; 315 int pfn;
286 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) 316 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
287 one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro); 317 add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
288 totalram_pages += totalhigh_pages; 318 totalram_pages += totalhigh_pages;
289} 319}
290#endif /* CONFIG_FLATMEM */ 320#endif /* CONFIG_FLATMEM */
@@ -615,6 +645,28 @@ void __init mem_init(void)
615#endif 645#endif
616} 646}
617 647
648/*
649 * this is for the non-NUMA, single node SMP system case.
650 * Specifically, in the case of x86, we will always add
651 * memory to the highmem for now.
652 */
653#ifndef CONFIG_NEED_MULTIPLE_NODES
654int add_memory(u64 start, u64 size)
655{
656 struct pglist_data *pgdata = &contig_page_data;
657 struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
658 unsigned long start_pfn = start >> PAGE_SHIFT;
659 unsigned long nr_pages = size >> PAGE_SHIFT;
660
661 return __add_pages(zone, start_pfn, nr_pages);
662}
663
664int remove_memory(u64 start, u64 size)
665{
666 return -EINVAL;
667}
668#endif
669
618kmem_cache_t *pgd_cache; 670kmem_cache_t *pgd_cache;
619kmem_cache_t *pmd_cache; 671kmem_cache_t *pmd_cache;
620 672
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c
index f379b8d67558..5d09de8d1c6b 100644
--- a/arch/i386/mm/ioremap.c
+++ b/arch/i386/mm/ioremap.c
@@ -28,7 +28,7 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
28 unsigned long pfn; 28 unsigned long pfn;
29 29
30 pfn = phys_addr >> PAGE_SHIFT; 30 pfn = phys_addr >> PAGE_SHIFT;
31 pte = pte_alloc_kernel(&init_mm, pmd, addr); 31 pte = pte_alloc_kernel(pmd, addr);
32 if (!pte) 32 if (!pte)
33 return -ENOMEM; 33 return -ENOMEM;
34 do { 34 do {
@@ -87,14 +87,12 @@ static int ioremap_page_range(unsigned long addr,
87 flush_cache_all(); 87 flush_cache_all();
88 phys_addr -= addr; 88 phys_addr -= addr;
89 pgd = pgd_offset_k(addr); 89 pgd = pgd_offset_k(addr);
90 spin_lock(&init_mm.page_table_lock);
91 do { 90 do {
92 next = pgd_addr_end(addr, end); 91 next = pgd_addr_end(addr, end);
93 err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, flags); 92 err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, flags);
94 if (err) 93 if (err)
95 break; 94 break;
96 } while (pgd++, addr = next, addr != end); 95 } while (pgd++, addr = next, addr != end);
97 spin_unlock(&init_mm.page_table_lock);
98 flush_tlb_all(); 96 flush_tlb_all();
99 return err; 97 return err;
100} 98}
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index dcdce2c6c532..9db3242103be 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -31,11 +31,13 @@ void show_mem(void)
31 pg_data_t *pgdat; 31 pg_data_t *pgdat;
32 unsigned long i; 32 unsigned long i;
33 struct page_state ps; 33 struct page_state ps;
34 unsigned long flags;
34 35
35 printk(KERN_INFO "Mem-info:\n"); 36 printk(KERN_INFO "Mem-info:\n");
36 show_free_areas(); 37 show_free_areas();
37 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 38 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
38 for_each_pgdat(pgdat) { 39 for_each_pgdat(pgdat) {
40 pgdat_resize_lock(pgdat, &flags);
39 for (i = 0; i < pgdat->node_spanned_pages; ++i) { 41 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
40 page = pgdat_page_nr(pgdat, i); 42 page = pgdat_page_nr(pgdat, i);
41 total++; 43 total++;
@@ -48,6 +50,7 @@ void show_mem(void)
48 else if (page_count(page)) 50 else if (page_count(page))
49 shared += page_count(page) - 1; 51 shared += page_count(page) - 1;
50 } 52 }
53 pgdat_resize_unlock(pgdat, &flags);
51 } 54 }
52 printk(KERN_INFO "%d pages of RAM\n", total); 55 printk(KERN_INFO "%d pages of RAM\n", total);
53 printk(KERN_INFO "%d pages of HIGHMEM\n", highmem); 56 printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
@@ -188,19 +191,19 @@ static inline void pgd_list_add(pgd_t *pgd)
188 struct page *page = virt_to_page(pgd); 191 struct page *page = virt_to_page(pgd);
189 page->index = (unsigned long)pgd_list; 192 page->index = (unsigned long)pgd_list;
190 if (pgd_list) 193 if (pgd_list)
191 pgd_list->private = (unsigned long)&page->index; 194 set_page_private(pgd_list, (unsigned long)&page->index);
192 pgd_list = page; 195 pgd_list = page;
193 page->private = (unsigned long)&pgd_list; 196 set_page_private(page, (unsigned long)&pgd_list);
194} 197}
195 198
196static inline void pgd_list_del(pgd_t *pgd) 199static inline void pgd_list_del(pgd_t *pgd)
197{ 200{
198 struct page *next, **pprev, *page = virt_to_page(pgd); 201 struct page *next, **pprev, *page = virt_to_page(pgd);
199 next = (struct page *)page->index; 202 next = (struct page *)page->index;
200 pprev = (struct page **)page->private; 203 pprev = (struct page **)page_private(page);
201 *pprev = next; 204 *pprev = next;
202 if (next) 205 if (next)
203 next->private = (unsigned long)pprev; 206 set_page_private(next, (unsigned long)pprev);
204} 207}
205 208
206void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) 209void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
diff --git a/arch/i386/oprofile/backtrace.c b/arch/i386/oprofile/backtrace.c
index 65dfd2edb671..21654be3f73f 100644
--- a/arch/i386/oprofile/backtrace.c
+++ b/arch/i386/oprofile/backtrace.c
@@ -12,6 +12,7 @@
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/uaccess.h>
15 16
16struct frame_head { 17struct frame_head {
17 struct frame_head * ebp; 18 struct frame_head * ebp;
@@ -21,26 +22,22 @@ struct frame_head {
21static struct frame_head * 22static struct frame_head *
22dump_backtrace(struct frame_head * head) 23dump_backtrace(struct frame_head * head)
23{ 24{
24 oprofile_add_trace(head->ret); 25 struct frame_head bufhead[2];
25 26
26 /* frame pointers should strictly progress back up the stack 27 /* Also check accessibility of one struct frame_head beyond */
27 * (towards higher addresses) */ 28 if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
28 if (head >= head->ebp) 29 return NULL;
30 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
29 return NULL; 31 return NULL;
30 32
31 return head->ebp; 33 oprofile_add_trace(bufhead[0].ret);
32}
33
34/* check that the page(s) containing the frame head are present */
35static int pages_present(struct frame_head * head)
36{
37 struct mm_struct * mm = current->mm;
38 34
39 /* FIXME: only necessary once per page */ 35 /* frame pointers should strictly progress back up the stack
40 if (!check_user_page_readable(mm, (unsigned long)head)) 36 * (towards higher addresses) */
41 return 0; 37 if (head >= bufhead[0].ebp)
38 return NULL;
42 39
43 return check_user_page_readable(mm, (unsigned long)(head + 1)); 40 return bufhead[0].ebp;
44} 41}
45 42
46/* 43/*
@@ -97,15 +94,6 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
97 return; 94 return;
98 } 95 }
99 96
100#ifdef CONFIG_SMP 97 while (depth-- && head)
101 if (!spin_trylock(&current->mm->page_table_lock))
102 return;
103#endif
104
105 while (depth-- && head && pages_present(head))
106 head = dump_backtrace(head); 98 head = dump_backtrace(head);
107
108#ifdef CONFIG_SMP
109 spin_unlock(&current->mm->page_table_lock);
110#endif
111} 99}
diff --git a/arch/i386/pci/fixup.c b/arch/i386/pci/fixup.c
index 8e8e895e1b5a..330fd2b68075 100644
--- a/arch/i386/pci/fixup.c
+++ b/arch/i386/pci/fixup.c
@@ -2,6 +2,8 @@
2 * Exceptions for specific devices. Usually work-arounds for fatal design flaws. 2 * Exceptions for specific devices. Usually work-arounds for fatal design flaws.
3 */ 3 */
4 4
5#include <linux/delay.h>
6#include <linux/dmi.h>
5#include <linux/pci.h> 7#include <linux/pci.h>
6#include <linux/init.h> 8#include <linux/init.h>
7#include "pci.h" 9#include "pci.h"
@@ -384,3 +386,60 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev)
384 } 386 }
385} 387}
386DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video); 388DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video);
389
390/*
391 * Some Toshiba laptops need extra code to enable their TI TSB43AB22/A.
392 *
393 * We pretend to bring them out of full D3 state, and restore the proper
394 * IRQ, PCI cache line size, and BARs, otherwise the device won't function
395 * properly. In some cases, the device will generate an interrupt on
396 * the wrong IRQ line, causing any devices sharing the the line it's
397 * *supposed* to use to be disabled by the kernel's IRQ debug code.
398 */
399static u16 toshiba_line_size;
400
401static struct dmi_system_id __devinit toshiba_ohci1394_dmi_table[] = {
402 {
403 .ident = "Toshiba PS5 based laptop",
404 .matches = {
405 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
406 DMI_MATCH(DMI_PRODUCT_VERSION, "PS5"),
407 },
408 },
409 {
410 .ident = "Toshiba PSM4 based laptop",
411 .matches = {
412 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
413 DMI_MATCH(DMI_PRODUCT_VERSION, "PSM4"),
414 },
415 },
416 { }
417};
418
419static void __devinit pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev)
420{
421 if (!dmi_check_system(toshiba_ohci1394_dmi_table))
422 return; /* only applies to certain Toshibas (so far) */
423
424 dev->current_state = PCI_D3cold;
425 pci_read_config_word(dev, PCI_CACHE_LINE_SIZE, &toshiba_line_size);
426}
427DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0x8032,
428 pci_pre_fixup_toshiba_ohci1394);
429
430static void __devinit pci_post_fixup_toshiba_ohci1394(struct pci_dev *dev)
431{
432 if (!dmi_check_system(toshiba_ohci1394_dmi_table))
433 return; /* only applies to certain Toshibas (so far) */
434
435 /* Restore config space on Toshiba laptops */
436 mdelay(10);
437 pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, toshiba_line_size);
438 pci_write_config_word(dev, PCI_INTERRUPT_LINE, dev->irq);
439 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
440 pci_resource_start(dev, 0));
441 pci_write_config_dword(dev, PCI_BASE_ADDRESS_1,
442 pci_resource_start(dev, 1));
443}
444DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_TI, 0x8032,
445 pci_post_fixup_toshiba_ohci1394);
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index cddafe33ff7c..19e6f4871d1e 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -547,31 +547,48 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
547 return 0; 547 return 0;
548} 548}
549 549
550static __init int via_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) 550static __init int via_router_probe(struct irq_router *r,
551 struct pci_dev *router, u16 device)
551{ 552{
552 /* FIXME: We should move some of the quirk fixup stuff here */ 553 /* FIXME: We should move some of the quirk fixup stuff here */
553 554
554 if (router->device == PCI_DEVICE_ID_VIA_82C686 && 555 /*
555 device == PCI_DEVICE_ID_VIA_82C586_0) { 556 * work arounds for some buggy BIOSes
556 /* Asus k7m bios wrongly reports 82C686A as 586-compatible */ 557 */
557 device = PCI_DEVICE_ID_VIA_82C686; 558 if (device == PCI_DEVICE_ID_VIA_82C586_0) {
559 switch(router->device) {
560 case PCI_DEVICE_ID_VIA_82C686:
561 /*
562 * Asus k7m bios wrongly reports 82C686A
563 * as 586-compatible
564 */
565 device = PCI_DEVICE_ID_VIA_82C686;
566 break;
567 case PCI_DEVICE_ID_VIA_8235:
568 /**
569 * Asus a7v-x bios wrongly reports 8235
570 * as 586-compatible
571 */
572 device = PCI_DEVICE_ID_VIA_8235;
573 break;
574 }
558 } 575 }
559 576
560 switch(device) 577 switch(device) {
561 { 578 case PCI_DEVICE_ID_VIA_82C586_0:
562 case PCI_DEVICE_ID_VIA_82C586_0: 579 r->name = "VIA";
563 r->name = "VIA"; 580 r->get = pirq_via586_get;
564 r->get = pirq_via586_get; 581 r->set = pirq_via586_set;
565 r->set = pirq_via586_set; 582 return 1;
566 return 1; 583 case PCI_DEVICE_ID_VIA_82C596:
567 case PCI_DEVICE_ID_VIA_82C596: 584 case PCI_DEVICE_ID_VIA_82C686:
568 case PCI_DEVICE_ID_VIA_82C686: 585 case PCI_DEVICE_ID_VIA_8231:
569 case PCI_DEVICE_ID_VIA_8231: 586 case PCI_DEVICE_ID_VIA_8235:
570 /* FIXME: add new ones for 8233/5 */ 587 /* FIXME: add new ones for 8233/5 */
571 r->name = "VIA"; 588 r->name = "VIA";
572 r->get = pirq_via_get; 589 r->get = pirq_via_get;
573 r->set = pirq_via_set; 590 r->set = pirq_via_set;
574 return 1; 591 return 1;
575 } 592 }
576 return 0; 593 return 0;
577} 594}
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c
index b27c5acc79d0..1f1572692e0b 100644
--- a/arch/i386/power/cpu.c
+++ b/arch/i386/power/cpu.c
@@ -51,16 +51,14 @@ void save_processor_state(void)
51 __save_processor_state(&saved_context); 51 __save_processor_state(&saved_context);
52} 52}
53 53
54static void 54static void do_fpu_end(void)
55do_fpu_end(void)
56{ 55{
57 /* restore FPU regs if necessary */ 56 /*
58 /* Do it out of line so that gcc does not move cr0 load to some stupid place */ 57 * Restore FPU regs if necessary.
59 kernel_fpu_end(); 58 */
60 mxcsr_feature_mask_init(); 59 kernel_fpu_end();
61} 60}
62 61
63
64static void fix_processor_context(void) 62static void fix_processor_context(void)
65{ 63{
66 int cpu = smp_processor_id(); 64 int cpu = smp_processor_id();