aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/Kconfig78
-rw-r--r--arch/i386/Makefile16
-rw-r--r--arch/i386/Makefile.cpu10
-rw-r--r--arch/i386/boot/Makefile4
-rw-r--r--arch/i386/boot/compressed/misc.c2
-rw-r--r--arch/i386/boot/install.sh14
-rw-r--r--arch/i386/boot/video.S5
-rw-r--r--arch/i386/crypto/aes-i586-asm.S40
-rw-r--r--arch/i386/crypto/aes.c56
-rw-r--r--arch/i386/defconfig2
-rw-r--r--arch/i386/kernel/Makefile7
-rw-r--r--arch/i386/kernel/acpi/Makefile2
-rw-r--r--arch/i386/kernel/acpi/boot.c2
-rw-r--r--arch/i386/kernel/acpi/cstate.c58
-rw-r--r--arch/i386/kernel/acpi/processor.c75
-rw-r--r--arch/i386/kernel/apic.c129
-rw-r--r--arch/i386/kernel/apm.c5
-rw-r--r--arch/i386/kernel/cpu/amd.c7
-rw-r--r--arch/i386/kernel/cpu/changelog63
-rw-r--r--arch/i386/kernel/cpu/common.c19
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c76
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c17
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-est-common.h25
-rw-r--r--arch/i386/kernel/cpu/intel.c5
-rw-r--r--arch/i386/kernel/cpu/mtrr/changelog229
-rw-r--r--arch/i386/kernel/cpu/mtrr/if.c1
-rw-r--r--arch/i386/kernel/cpu/proc.c27
-rw-r--r--arch/i386/kernel/crash.c50
-rw-r--r--arch/i386/kernel/crash_dump.c74
-rw-r--r--arch/i386/kernel/dmi_scan.c16
-rw-r--r--arch/i386/kernel/entry.S2
-rw-r--r--arch/i386/kernel/init_task.c2
-rw-r--r--arch/i386/kernel/io_apic.c2
-rw-r--r--arch/i386/kernel/ioport.c1
-rw-r--r--arch/i386/kernel/irq.c2
-rw-r--r--arch/i386/kernel/kprobes.c23
-rw-r--r--arch/i386/kernel/microcode.c5
-rw-r--r--arch/i386/kernel/process.c27
-rw-r--r--arch/i386/kernel/quirks.c9
-rw-r--r--arch/i386/kernel/reboot.c7
-rw-r--r--arch/i386/kernel/scx200.c2
-rw-r--r--arch/i386/kernel/setup.c6
-rw-r--r--arch/i386/kernel/signal.c109
-rw-r--r--arch/i386/kernel/smpboot.c4
-rw-r--r--arch/i386/kernel/syscall_table.S16
-rw-r--r--arch/i386/kernel/time.c6
-rw-r--r--arch/i386/kernel/time_hpet.c2
-rw-r--r--arch/i386/kernel/traps.c108
-rw-r--r--arch/i386/kernel/vm86.c5
-rw-r--r--arch/i386/mm/init.c2
-rw-r--r--arch/i386/mm/pageattr.c4
-rw-r--r--arch/i386/pci/acpi.c2
-rw-r--r--arch/i386/pci/fixup.c23
-rw-r--r--arch/i386/pci/irq.c42
55 files changed, 664 insertions, 865 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 968fabd8723f..cbde675bc95c 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -29,10 +29,6 @@ config MMU
29config SBUS 29config SBUS
30 bool 30 bool
31 31
32config UID16
33 bool
34 default y
35
36config GENERIC_ISA_DMA 32config GENERIC_ISA_DMA
37 bool 33 bool
38 default y 34 default y
@@ -45,8 +41,21 @@ config ARCH_MAY_HAVE_PC_FDC
45 bool 41 bool
46 default y 42 default y
47 43
44config DMI
45 bool
46 default y
47
48source "init/Kconfig" 48source "init/Kconfig"
49 49
50config DOUBLEFAULT
51 default y
52 bool "Enable doublefault exception handler" if EMBEDDED
53 help
54 This option allows trapping of rare doublefault exceptions that
55 would otherwise cause a system to silently reboot. Disabling this
56 option saves about 4k and might cause you much additional grey
57 hair.
58
50menu "Processor type and features" 59menu "Processor type and features"
51 60
52choice 61choice
@@ -630,10 +639,6 @@ config REGPARM
630 and passes the first three arguments of a function call in registers. 639 and passes the first three arguments of a function call in registers.
631 This will probably break binary only modules. 640 This will probably break binary only modules.
632 641
633 This feature is only enabled for gcc-3.0 and later - earlier compilers
634 generate incorrect output with certain kernel constructs when
635 -mregparm=3 is used.
636
637config SECCOMP 642config SECCOMP
638 bool "Enable seccomp to safely compute untrusted bytecode" 643 bool "Enable seccomp to safely compute untrusted bytecode"
639 depends on PROC_FS 644 depends on PROC_FS
@@ -653,17 +658,6 @@ config SECCOMP
653 658
654source kernel/Kconfig.hz 659source kernel/Kconfig.hz
655 660
656config PHYSICAL_START
657 hex "Physical address where the kernel is loaded" if EMBEDDED
658 default "0x100000"
659 help
660 This gives the physical address where the kernel is loaded.
661 Primarily used in the case of kexec on panic where the
662 fail safe kernel needs to run at a different address than
663 the panic-ed kernel.
664
665 Don't change this unless you know what you are doing.
666
667config KEXEC 661config KEXEC
668 bool "kexec system call (EXPERIMENTAL)" 662 bool "kexec system call (EXPERIMENTAL)"
669 depends on EXPERIMENTAL 663 depends on EXPERIMENTAL
@@ -683,11 +677,40 @@ config KEXEC
683 677
684config CRASH_DUMP 678config CRASH_DUMP
685 bool "kernel crash dumps (EXPERIMENTAL)" 679 bool "kernel crash dumps (EXPERIMENTAL)"
686 depends on EMBEDDED
687 depends on EXPERIMENTAL 680 depends on EXPERIMENTAL
688 depends on HIGHMEM 681 depends on HIGHMEM
689 help 682 help
690 Generate crash dump after being started by kexec. 683 Generate crash dump after being started by kexec.
684
685config PHYSICAL_START
686 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
687
688 default "0x1000000" if CRASH_DUMP
689 default "0x100000"
690 help
691 This gives the physical address where the kernel is loaded. Normally
692 for regular kernels this value is 0x100000 (1MB). But in the case
693 of kexec on panic the fail safe kernel needs to run at a different
694 address than the panic-ed kernel. This option is used to set the load
695 address for kernels used to capture crash dump on being kexec'ed
696 after panic. The default value for crash dump kernels is
697 0x1000000 (16MB). This can also be set based on the "X" value as
698 specified in the "crashkernel=YM@XM" command line boot parameter
699 passed to the panic-ed kernel. Typically this parameter is set as
700 crashkernel=64M@16M. Please take a look at
701 Documentation/kdump/kdump.txt for more details about crash dumps.
702
703 Don't change this unless you know what you are doing.
704
705config HOTPLUG_CPU
706 bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
707 depends on SMP && HOTPLUG && EXPERIMENTAL
708 ---help---
709 Say Y here to experiment with turning CPUs off and on. CPUs
710 can be controlled through /sys/devices/system/cpu.
711
712 Say N.
713
691endmenu 714endmenu
692 715
693 716
@@ -703,7 +726,7 @@ depends on PM && !X86_VISWS
703 726
704config APM 727config APM
705 tristate "APM (Advanced Power Management) BIOS support" 728 tristate "APM (Advanced Power Management) BIOS support"
706 depends on PM && PM_LEGACY 729 depends on PM
707 ---help--- 730 ---help---
708 APM is a BIOS specification for saving power using several different 731 APM is a BIOS specification for saving power using several different
709 techniques. This is mostly useful for battery powered laptops with 732 techniques. This is mostly useful for battery powered laptops with
@@ -974,15 +997,6 @@ config SCx200
974 This support is also available as a module. If compiled as a 997 This support is also available as a module. If compiled as a
975 module, it will be called scx200. 998 module, it will be called scx200.
976 999
977config HOTPLUG_CPU
978 bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
979 depends on SMP && HOTPLUG && EXPERIMENTAL
980 ---help---
981 Say Y here to experiment with turning CPUs off and on. CPUs
982 can be controlled through /sys/devices/system/cpu.
983
984 Say N.
985
986source "drivers/pcmcia/Kconfig" 1000source "drivers/pcmcia/Kconfig"
987 1001
988source "drivers/pci/hotplug/Kconfig" 1002source "drivers/pci/hotplug/Kconfig"
@@ -1059,3 +1073,7 @@ config X86_TRAMPOLINE
1059 bool 1073 bool
1060 depends on X86_SMP || (X86_VOYAGER && SMP) 1074 depends on X86_SMP || (X86_VOYAGER && SMP)
1061 default y 1075 default y
1076
1077config KTIME_SCALAR
1078 bool
1079 default y
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index d121ea18460f..36bef6543ac1 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -37,14 +37,11 @@ CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
37# CPU-specific tuning. Anything which can be shared with UML should go here. 37# CPU-specific tuning. Anything which can be shared with UML should go here.
38include $(srctree)/arch/i386/Makefile.cpu 38include $(srctree)/arch/i386/Makefile.cpu
39 39
40# -mregparm=3 works ok on gcc-3.0 and later 40cflags-$(CONFIG_REGPARM) += -mregparm=3
41#
42GCC_VERSION := $(call cc-version)
43cflags-$(CONFIG_REGPARM) += $(shell if [ $(GCC_VERSION) -ge 0300 ] ; then echo "-mregparm=3"; fi ;)
44 41
45# Disable unit-at-a-time mode, it makes gcc use a lot more stack 42# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
46# due to the lack of sharing of stacklots. 43# a lot more stack due to the lack of sharing of stacklots:
47CFLAGS += $(call cc-option,-fno-unit-at-a-time) 44CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then echo $(call cc-option,-fno-unit-at-a-time); fi ;)
48 45
49CFLAGS += $(cflags-y) 46CFLAGS += $(cflags-y)
50 47
@@ -103,7 +100,7 @@ AFLAGS += $(mflags-y)
103boot := arch/i386/boot 100boot := arch/i386/boot
104 101
105.PHONY: zImage bzImage compressed zlilo bzlilo \ 102.PHONY: zImage bzImage compressed zlilo bzlilo \
106 zdisk bzdisk fdimage fdimage144 fdimage288 install kernel_install 103 zdisk bzdisk fdimage fdimage144 fdimage288 install
107 104
108all: bzImage 105all: bzImage
109 106
@@ -125,8 +122,7 @@ zdisk bzdisk: vmlinux
125fdimage fdimage144 fdimage288: vmlinux 122fdimage fdimage144 fdimage288: vmlinux
126 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@ 123 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@
127 124
128install: vmlinux 125install:
129install kernel_install:
130 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install 126 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
131 127
132archclean: 128archclean:
diff --git a/arch/i386/Makefile.cpu b/arch/i386/Makefile.cpu
index 8e51456df23d..dcd936ef45db 100644
--- a/arch/i386/Makefile.cpu
+++ b/arch/i386/Makefile.cpu
@@ -1,7 +1,7 @@
1# CPU tuning section - shared with UML. 1# CPU tuning section - shared with UML.
2# Must change only cflags-y (or [yn]), not CFLAGS! That makes a difference for UML. 2# Must change only cflags-y (or [yn]), not CFLAGS! That makes a difference for UML.
3 3
4#-mtune exists since gcc 3.4, and some -mcpu flavors didn't exist in gcc 2.95. 4#-mtune exists since gcc 3.4
5HAS_MTUNE := $(call cc-option-yn, -mtune=i386) 5HAS_MTUNE := $(call cc-option-yn, -mtune=i386)
6ifeq ($(HAS_MTUNE),y) 6ifeq ($(HAS_MTUNE),y)
7tune = $(call cc-option,-mtune=$(1),) 7tune = $(call cc-option,-mtune=$(1),)
@@ -14,7 +14,7 @@ cflags-$(CONFIG_M386) += -march=i386
14cflags-$(CONFIG_M486) += -march=i486 14cflags-$(CONFIG_M486) += -march=i486
15cflags-$(CONFIG_M586) += -march=i586 15cflags-$(CONFIG_M586) += -march=i586
16cflags-$(CONFIG_M586TSC) += -march=i586 16cflags-$(CONFIG_M586TSC) += -march=i586
17cflags-$(CONFIG_M586MMX) += $(call cc-option,-march=pentium-mmx,-march=i586) 17cflags-$(CONFIG_M586MMX) += -march=pentium-mmx
18cflags-$(CONFIG_M686) += -march=i686 18cflags-$(CONFIG_M686) += -march=i686
19cflags-$(CONFIG_MPENTIUMII) += -march=i686 $(call tune,pentium2) 19cflags-$(CONFIG_MPENTIUMII) += -march=i686 $(call tune,pentium2)
20cflags-$(CONFIG_MPENTIUMIII) += -march=i686 $(call tune,pentium3) 20cflags-$(CONFIG_MPENTIUMIII) += -march=i686 $(call tune,pentium3)
@@ -23,8 +23,8 @@ cflags-$(CONFIG_MPENTIUM4) += -march=i686 $(call tune,pentium4)
23cflags-$(CONFIG_MK6) += -march=k6 23cflags-$(CONFIG_MK6) += -march=k6
24# Please note, that patches that add -march=athlon-xp and friends are pointless. 24# Please note, that patches that add -march=athlon-xp and friends are pointless.
25# They make zero difference whatsosever to performance at this time. 25# They make zero difference whatsosever to performance at this time.
26cflags-$(CONFIG_MK7) += $(call cc-option,-march=athlon,-march=i686 $(align)-functions=4) 26cflags-$(CONFIG_MK7) += -march=athlon
27cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,$(call cc-option,-march=athlon,-march=i686 $(align)-functions=4)) 27cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
28cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0 28cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
29cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0 29cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
30cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586) 30cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
@@ -37,5 +37,5 @@ cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
37cflags-$(CONFIG_X86_ELAN) += -march=i486 37cflags-$(CONFIG_X86_ELAN) += -march=i486
38 38
39# Geode GX1 support 39# Geode GX1 support
40cflags-$(CONFIG_MGEODEGX1) += $(call cc-option,-march=pentium-mmx,-march=i486) 40cflags-$(CONFIG_MGEODEGX1) += -march=pentium-mmx
41 41
diff --git a/arch/i386/boot/Makefile b/arch/i386/boot/Makefile
index 1e71382d413a..f136752563b1 100644
--- a/arch/i386/boot/Makefile
+++ b/arch/i386/boot/Makefile
@@ -100,5 +100,5 @@ zlilo: $(BOOTIMAGE)
100 cp System.map $(INSTALL_PATH)/ 100 cp System.map $(INSTALL_PATH)/
101 if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi 101 if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
102 102
103install: $(BOOTIMAGE) 103install:
104 sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)" 104 sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
diff --git a/arch/i386/boot/compressed/misc.c b/arch/i386/boot/compressed/misc.c
index 82a807f9f5e6..f19f3a7492a5 100644
--- a/arch/i386/boot/compressed/misc.c
+++ b/arch/i386/boot/compressed/misc.c
@@ -11,7 +11,7 @@
11 11
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <linux/vmalloc.h> 13#include <linux/vmalloc.h>
14#include <linux/tty.h> 14#include <linux/screen_info.h>
15#include <asm/io.h> 15#include <asm/io.h>
16#include <asm/page.h> 16#include <asm/page.h>
17 17
diff --git a/arch/i386/boot/install.sh b/arch/i386/boot/install.sh
index f17b40dfc0f4..5e44c736eea8 100644
--- a/arch/i386/boot/install.sh
+++ b/arch/i386/boot/install.sh
@@ -19,6 +19,20 @@
19# $4 - default install path (blank if root directory) 19# $4 - default install path (blank if root directory)
20# 20#
21 21
22verify () {
23 if [ ! -f "$1" ]; then
24 echo "" 1>&2
25 echo " *** Missing file: $1" 1>&2
26 echo ' *** You need to run "make" before "make install".' 1>&2
27 echo "" 1>&2
28 exit 1
29 fi
30}
31
32# Make sure the files actually exist
33verify "$2"
34verify "$3"
35
22# User may have a custom install script 36# User may have a custom install script
23 37
24if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi 38if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
diff --git a/arch/i386/boot/video.S b/arch/i386/boot/video.S
index 92f669470142..2ac40c8244c4 100644
--- a/arch/i386/boot/video.S
+++ b/arch/i386/boot/video.S
@@ -97,7 +97,6 @@
97#define PARAM_VESAPM_OFF 0x30 97#define PARAM_VESAPM_OFF 0x30
98#define PARAM_LFB_PAGES 0x32 98#define PARAM_LFB_PAGES 0x32
99#define PARAM_VESA_ATTRIB 0x34 99#define PARAM_VESA_ATTRIB 0x34
100#define PARAM_CAPABILITIES 0x36
101 100
102/* Define DO_STORE according to CONFIG_VIDEO_RETAIN */ 101/* Define DO_STORE according to CONFIG_VIDEO_RETAIN */
103#ifdef CONFIG_VIDEO_RETAIN 102#ifdef CONFIG_VIDEO_RETAIN
@@ -234,10 +233,6 @@ mopar_gr:
234 movw 18(%di), %ax 233 movw 18(%di), %ax
235 movl %eax, %fs:(PARAM_LFB_SIZE) 234 movl %eax, %fs:(PARAM_LFB_SIZE)
236 235
237# store mode capabilities
238 movl 10(%di), %eax
239 movl %eax, %fs:(PARAM_CAPABILITIES)
240
241# switching the DAC to 8-bit is for <= 8 bpp only 236# switching the DAC to 8-bit is for <= 8 bpp only
242 movw %fs:(PARAM_LFB_DEPTH), %ax 237 movw %fs:(PARAM_LFB_DEPTH), %ax
243 cmpw $8, %ax 238 cmpw $8, %ax
diff --git a/arch/i386/crypto/aes-i586-asm.S b/arch/i386/crypto/aes-i586-asm.S
index 7b73c67cb4e8..911b15377f2e 100644
--- a/arch/i386/crypto/aes-i586-asm.S
+++ b/arch/i386/crypto/aes-i586-asm.S
@@ -255,18 +255,17 @@ aes_enc_blk:
255 xor 8(%ebp),%r4 255 xor 8(%ebp),%r4
256 xor 12(%ebp),%r5 256 xor 12(%ebp),%r5
257 257
258 sub $8,%esp // space for register saves on stack 258 sub $8,%esp // space for register saves on stack
259 add $16,%ebp // increment to next round key 259 add $16,%ebp // increment to next round key
260 sub $10,%r3 260 cmp $12,%r3
261 je 4f // 10 rounds for 128-bit key 261 jb 4f // 10 rounds for 128-bit key
262 add $32,%ebp 262 lea 32(%ebp),%ebp
263 sub $2,%r3 263 je 3f // 12 rounds for 192-bit key
264 je 3f // 12 rounds for 128-bit key 264 lea 32(%ebp),%ebp
265 add $32,%ebp 265
266 2662: fwd_rnd1( -64(%ebp) ,ft_tab) // 14 rounds for 256-bit key
2672: fwd_rnd1( -64(%ebp) ,ft_tab) // 14 rounds for 128-bit key
268 fwd_rnd2( -48(%ebp) ,ft_tab) 267 fwd_rnd2( -48(%ebp) ,ft_tab)
2693: fwd_rnd1( -32(%ebp) ,ft_tab) // 12 rounds for 128-bit key 2683: fwd_rnd1( -32(%ebp) ,ft_tab) // 12 rounds for 192-bit key
270 fwd_rnd2( -16(%ebp) ,ft_tab) 269 fwd_rnd2( -16(%ebp) ,ft_tab)
2714: fwd_rnd1( (%ebp) ,ft_tab) // 10 rounds for 128-bit key 2704: fwd_rnd1( (%ebp) ,ft_tab) // 10 rounds for 128-bit key
272 fwd_rnd2( +16(%ebp) ,ft_tab) 271 fwd_rnd2( +16(%ebp) ,ft_tab)
@@ -334,18 +333,17 @@ aes_dec_blk:
334 xor 8(%ebp),%r4 333 xor 8(%ebp),%r4
335 xor 12(%ebp),%r5 334 xor 12(%ebp),%r5
336 335
337 sub $8,%esp // space for register saves on stack 336 sub $8,%esp // space for register saves on stack
338 sub $16,%ebp // increment to next round key 337 sub $16,%ebp // increment to next round key
339 sub $10,%r3 338 cmp $12,%r3
340 je 4f // 10 rounds for 128-bit key 339 jb 4f // 10 rounds for 128-bit key
341 sub $32,%ebp 340 lea -32(%ebp),%ebp
342 sub $2,%r3 341 je 3f // 12 rounds for 192-bit key
343 je 3f // 12 rounds for 128-bit key 342 lea -32(%ebp),%ebp
344 sub $32,%ebp
345 343
3462: inv_rnd1( +64(%ebp), it_tab) // 14 rounds for 128-bit key 3442: inv_rnd1( +64(%ebp), it_tab) // 14 rounds for 256-bit key
347 inv_rnd2( +48(%ebp), it_tab) 345 inv_rnd2( +48(%ebp), it_tab)
3483: inv_rnd1( +32(%ebp), it_tab) // 12 rounds for 128-bit key 3463: inv_rnd1( +32(%ebp), it_tab) // 12 rounds for 192-bit key
349 inv_rnd2( +16(%ebp), it_tab) 347 inv_rnd2( +16(%ebp), it_tab)
3504: inv_rnd1( (%ebp), it_tab) // 10 rounds for 128-bit key 3484: inv_rnd1( (%ebp), it_tab) // 10 rounds for 128-bit key
351 inv_rnd2( -16(%ebp), it_tab) 349 inv_rnd2( -16(%ebp), it_tab)
diff --git a/arch/i386/crypto/aes.c b/arch/i386/crypto/aes.c
index 88ee85c3b43b..a50397b1d5c7 100644
--- a/arch/i386/crypto/aes.c
+++ b/arch/i386/crypto/aes.c
@@ -36,6 +36,8 @@
36 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 36 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
37 * 37 *
38 */ 38 */
39
40#include <asm/byteorder.h>
39#include <linux/kernel.h> 41#include <linux/kernel.h>
40#include <linux/module.h> 42#include <linux/module.h>
41#include <linux/init.h> 43#include <linux/init.h>
@@ -59,7 +61,6 @@ struct aes_ctx {
59}; 61};
60 62
61#define WPOLY 0x011b 63#define WPOLY 0x011b
62#define u32_in(x) le32_to_cpup((const __le32 *)(x))
63#define bytes2word(b0, b1, b2, b3) \ 64#define bytes2word(b0, b1, b2, b3) \
64 (((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0)) 65 (((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0))
65 66
@@ -93,7 +94,6 @@ static u32 rcon_tab[RC_LENGTH];
93 94
94u32 ft_tab[4][256]; 95u32 ft_tab[4][256];
95u32 fl_tab[4][256]; 96u32 fl_tab[4][256];
96static u32 ls_tab[4][256];
97static u32 im_tab[4][256]; 97static u32 im_tab[4][256];
98u32 il_tab[4][256]; 98u32 il_tab[4][256];
99u32 it_tab[4][256]; 99u32 it_tab[4][256];
@@ -144,15 +144,6 @@ static void gen_tabs(void)
144 fl_tab[2][i] = upr(w, 2); 144 fl_tab[2][i] = upr(w, 2);
145 fl_tab[3][i] = upr(w, 3); 145 fl_tab[3][i] = upr(w, 3);
146 146
147 /*
148 * table for key schedule if fl_tab above is
149 * not of the required form
150 */
151 ls_tab[0][i] = w;
152 ls_tab[1][i] = upr(w, 1);
153 ls_tab[2][i] = upr(w, 2);
154 ls_tab[3][i] = upr(w, 3);
155
156 b = fi(inv_affine((u8)i)); 147 b = fi(inv_affine((u8)i));
157 w = bytes2word(fe(b), f9(b), fd(b), fb(b)); 148 w = bytes2word(fe(b), f9(b), fd(b), fb(b));
158 149
@@ -393,13 +384,14 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
393 int i; 384 int i;
394 u32 ss[8]; 385 u32 ss[8];
395 struct aes_ctx *ctx = ctx_arg; 386 struct aes_ctx *ctx = ctx_arg;
387 const __le32 *key = (const __le32 *)in_key;
396 388
397 /* encryption schedule */ 389 /* encryption schedule */
398 390
399 ctx->ekey[0] = ss[0] = u32_in(in_key); 391 ctx->ekey[0] = ss[0] = le32_to_cpu(key[0]);
400 ctx->ekey[1] = ss[1] = u32_in(in_key + 4); 392 ctx->ekey[1] = ss[1] = le32_to_cpu(key[1]);
401 ctx->ekey[2] = ss[2] = u32_in(in_key + 8); 393 ctx->ekey[2] = ss[2] = le32_to_cpu(key[2]);
402 ctx->ekey[3] = ss[3] = u32_in(in_key + 12); 394 ctx->ekey[3] = ss[3] = le32_to_cpu(key[3]);
403 395
404 switch(key_len) { 396 switch(key_len) {
405 case 16: 397 case 16:
@@ -410,8 +402,8 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
410 break; 402 break;
411 403
412 case 24: 404 case 24:
413 ctx->ekey[4] = ss[4] = u32_in(in_key + 16); 405 ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]);
414 ctx->ekey[5] = ss[5] = u32_in(in_key + 20); 406 ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]);
415 for (i = 0; i < 7; i++) 407 for (i = 0; i < 7; i++)
416 ke6(ctx->ekey, i); 408 ke6(ctx->ekey, i);
417 kel6(ctx->ekey, 7); 409 kel6(ctx->ekey, 7);
@@ -419,10 +411,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
419 break; 411 break;
420 412
421 case 32: 413 case 32:
422 ctx->ekey[4] = ss[4] = u32_in(in_key + 16); 414 ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]);
423 ctx->ekey[5] = ss[5] = u32_in(in_key + 20); 415 ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]);
424 ctx->ekey[6] = ss[6] = u32_in(in_key + 24); 416 ctx->ekey[6] = ss[6] = le32_to_cpu(key[6]);
425 ctx->ekey[7] = ss[7] = u32_in(in_key + 28); 417 ctx->ekey[7] = ss[7] = le32_to_cpu(key[7]);
426 for (i = 0; i < 6; i++) 418 for (i = 0; i < 6; i++)
427 ke8(ctx->ekey, i); 419 ke8(ctx->ekey, i);
428 kel8(ctx->ekey, 6); 420 kel8(ctx->ekey, 6);
@@ -436,10 +428,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
436 428
437 /* decryption schedule */ 429 /* decryption schedule */
438 430
439 ctx->dkey[0] = ss[0] = u32_in(in_key); 431 ctx->dkey[0] = ss[0] = le32_to_cpu(key[0]);
440 ctx->dkey[1] = ss[1] = u32_in(in_key + 4); 432 ctx->dkey[1] = ss[1] = le32_to_cpu(key[1]);
441 ctx->dkey[2] = ss[2] = u32_in(in_key + 8); 433 ctx->dkey[2] = ss[2] = le32_to_cpu(key[2]);
442 ctx->dkey[3] = ss[3] = u32_in(in_key + 12); 434 ctx->dkey[3] = ss[3] = le32_to_cpu(key[3]);
443 435
444 switch (key_len) { 436 switch (key_len) {
445 case 16: 437 case 16:
@@ -450,8 +442,8 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
450 break; 442 break;
451 443
452 case 24: 444 case 24:
453 ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16)); 445 ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4]));
454 ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20)); 446 ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5]));
455 kdf6(ctx->dkey, 0); 447 kdf6(ctx->dkey, 0);
456 for (i = 1; i < 7; i++) 448 for (i = 1; i < 7; i++)
457 kd6(ctx->dkey, i); 449 kd6(ctx->dkey, i);
@@ -459,10 +451,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
459 break; 451 break;
460 452
461 case 32: 453 case 32:
462 ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16)); 454 ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4]));
463 ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20)); 455 ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5]));
464 ctx->dkey[6] = ff(ss[6] = u32_in(in_key + 24)); 456 ctx->dkey[6] = ff(ss[6] = le32_to_cpu(key[6]));
465 ctx->dkey[7] = ff(ss[7] = u32_in(in_key + 28)); 457 ctx->dkey[7] = ff(ss[7] = le32_to_cpu(key[7]));
466 kdf8(ctx->dkey, 0); 458 kdf8(ctx->dkey, 0);
467 for (i = 1; i < 6; i++) 459 for (i = 1; i < 6; i++)
468 kd8(ctx->dkey, i); 460 kd8(ctx->dkey, i);
@@ -484,6 +476,8 @@ static inline void aes_decrypt(void *ctx, u8 *dst, const u8 *src)
484 476
485static struct crypto_alg aes_alg = { 477static struct crypto_alg aes_alg = {
486 .cra_name = "aes", 478 .cra_name = "aes",
479 .cra_driver_name = "aes-i586",
480 .cra_priority = 200,
487 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 481 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
488 .cra_blocksize = AES_BLOCK_SIZE, 482 .cra_blocksize = AES_BLOCK_SIZE,
489 .cra_ctxsize = sizeof(struct aes_ctx), 483 .cra_ctxsize = sizeof(struct aes_ctx),
diff --git a/arch/i386/defconfig b/arch/i386/defconfig
index 6a431b926019..3cbe6e9cb9fc 100644
--- a/arch/i386/defconfig
+++ b/arch/i386/defconfig
@@ -644,6 +644,8 @@ CONFIG_8139TOO_PIO=y
644# CONFIG_ACENIC is not set 644# CONFIG_ACENIC is not set
645# CONFIG_DL2K is not set 645# CONFIG_DL2K is not set
646# CONFIG_E1000 is not set 646# CONFIG_E1000 is not set
647# CONFIG_E1000_NAPI is not set
648# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
647# CONFIG_NS83820 is not set 649# CONFIG_NS83820 is not set
648# CONFIG_HAMACHI is not set 650# CONFIG_HAMACHI is not set
649# CONFIG_YELLOWFIN is not set 651# CONFIG_YELLOWFIN is not set
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index f10de0f2c5e6..60c3f76dfca4 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -4,10 +4,10 @@
4 4
5extra-y := head.o init_task.o vmlinux.lds 5extra-y := head.o init_task.o vmlinux.lds
6 6
7obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o vm86.o \ 7obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ 8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
9 pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \ 9 pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \
10 doublefault.o quirks.o i8237.o 10 quirks.o i8237.o
11 11
12obj-y += cpu/ 12obj-y += cpu/
13obj-y += timers/ 13obj-y += timers/
@@ -25,6 +25,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
25obj-$(CONFIG_X86_IO_APIC) += io_apic.o 25obj-$(CONFIG_X86_IO_APIC) += io_apic.o
26obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups.o 26obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups.o
27obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o 27obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
28obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
28obj-$(CONFIG_X86_NUMAQ) += numaq.o 29obj-$(CONFIG_X86_NUMAQ) += numaq.o
29obj-$(CONFIG_X86_SUMMIT_NUMA) += summit.o 30obj-$(CONFIG_X86_SUMMIT_NUMA) += summit.o
30obj-$(CONFIG_KPROBES) += kprobes.o 31obj-$(CONFIG_KPROBES) += kprobes.o
@@ -33,6 +34,8 @@ obj-y += sysenter.o vsyscall.o
33obj-$(CONFIG_ACPI_SRAT) += srat.o 34obj-$(CONFIG_ACPI_SRAT) += srat.o
34obj-$(CONFIG_HPET_TIMER) += time_hpet.o 35obj-$(CONFIG_HPET_TIMER) += time_hpet.o
35obj-$(CONFIG_EFI) += efi.o efi_stub.o 36obj-$(CONFIG_EFI) += efi.o efi_stub.o
37obj-$(CONFIG_DOUBLEFAULT) += doublefault.o
38obj-$(CONFIG_VM86) += vm86.o
36obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 39obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
37 40
38EXTRA_AFLAGS := -traditional 41EXTRA_AFLAGS := -traditional
diff --git a/arch/i386/kernel/acpi/Makefile b/arch/i386/kernel/acpi/Makefile
index 267ca48e1b6c..d51c7313cae8 100644
--- a/arch/i386/kernel/acpi/Makefile
+++ b/arch/i386/kernel/acpi/Makefile
@@ -3,6 +3,6 @@ obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o
3obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o 3obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
4 4
5ifneq ($(CONFIG_ACPI_PROCESSOR),) 5ifneq ($(CONFIG_ACPI_PROCESSOR),)
6obj-y += cstate.o 6obj-y += cstate.o processor.o
7endif 7endif
8 8
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 1f378df14f34..f21fa0d4482f 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -108,7 +108,7 @@ char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
108 if (!phys_addr || !size) 108 if (!phys_addr || !size)
109 return NULL; 109 return NULL;
110 110
111 if (phys_addr < (end_pfn_map << PAGE_SHIFT)) 111 if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE)
112 return __va(phys_addr); 112 return __va(phys_addr);
113 113
114 return NULL; 114 return NULL;
diff --git a/arch/i386/kernel/acpi/cstate.c b/arch/i386/kernel/acpi/cstate.c
index 4c3036ba65df..25db49ef1770 100644
--- a/arch/i386/kernel/acpi/cstate.c
+++ b/arch/i386/kernel/acpi/cstate.c
@@ -14,64 +14,6 @@
14#include <acpi/processor.h> 14#include <acpi/processor.h>
15#include <asm/acpi.h> 15#include <asm/acpi.h>
16 16
17static void acpi_processor_power_init_intel_pdc(struct acpi_processor_power
18 *pow)
19{
20 struct acpi_object_list *obj_list;
21 union acpi_object *obj;
22 u32 *buf;
23
24 /* allocate and initialize pdc. It will be used later. */
25 obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
26 if (!obj_list) {
27 printk(KERN_ERR "Memory allocation error\n");
28 return;
29 }
30
31 obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
32 if (!obj) {
33 printk(KERN_ERR "Memory allocation error\n");
34 kfree(obj_list);
35 return;
36 }
37
38 buf = kmalloc(12, GFP_KERNEL);
39 if (!buf) {
40 printk(KERN_ERR "Memory allocation error\n");
41 kfree(obj);
42 kfree(obj_list);
43 return;
44 }
45
46 buf[0] = ACPI_PDC_REVISION_ID;
47 buf[1] = 1;
48 buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
49
50 obj->type = ACPI_TYPE_BUFFER;
51 obj->buffer.length = 12;
52 obj->buffer.pointer = (u8 *) buf;
53 obj_list->count = 1;
54 obj_list->pointer = obj;
55 pow->pdc = obj_list;
56
57 return;
58}
59
60/* Initialize _PDC data based on the CPU vendor */
61void acpi_processor_power_init_pdc(struct acpi_processor_power *pow,
62 unsigned int cpu)
63{
64 struct cpuinfo_x86 *c = cpu_data + cpu;
65
66 pow->pdc = NULL;
67 if (c->x86_vendor == X86_VENDOR_INTEL)
68 acpi_processor_power_init_intel_pdc(pow);
69
70 return;
71}
72
73EXPORT_SYMBOL(acpi_processor_power_init_pdc);
74
75/* 17/*
76 * Initialize bm_flags based on the CPU cache properties 18 * Initialize bm_flags based on the CPU cache properties
77 * On SMP it depends on cache configuration 19 * On SMP it depends on cache configuration
diff --git a/arch/i386/kernel/acpi/processor.c b/arch/i386/kernel/acpi/processor.c
new file mode 100644
index 000000000000..9f4cc02717ec
--- /dev/null
+++ b/arch/i386/kernel/acpi/processor.c
@@ -0,0 +1,75 @@
1/*
2 * arch/i386/kernel/acpi/processor.c
3 *
4 * Copyright (C) 2005 Intel Corporation
5 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6 * - Added _PDC for platforms with Intel CPUs
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/acpi.h>
13
14#include <acpi/processor.h>
15#include <asm/acpi.h>
16
17static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
18{
19 struct acpi_object_list *obj_list;
20 union acpi_object *obj;
21 u32 *buf;
22
23 /* allocate and initialize pdc. It will be used later. */
24 obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
25 if (!obj_list) {
26 printk(KERN_ERR "Memory allocation error\n");
27 return;
28 }
29
30 obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
31 if (!obj) {
32 printk(KERN_ERR "Memory allocation error\n");
33 kfree(obj_list);
34 return;
35 }
36
37 buf = kmalloc(12, GFP_KERNEL);
38 if (!buf) {
39 printk(KERN_ERR "Memory allocation error\n");
40 kfree(obj);
41 kfree(obj_list);
42 return;
43 }
44
45 buf[0] = ACPI_PDC_REVISION_ID;
46 buf[1] = 1;
47 buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
48
49 if (cpu_has(c, X86_FEATURE_EST))
50 buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
51
52 obj->type = ACPI_TYPE_BUFFER;
53 obj->buffer.length = 12;
54 obj->buffer.pointer = (u8 *) buf;
55 obj_list->count = 1;
56 obj_list->pointer = obj;
57 pr->pdc = obj_list;
58
59 return;
60}
61
62/* Initialize _PDC data based on the CPU vendor */
63void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
64{
65 unsigned int cpu = pr->id;
66 struct cpuinfo_x86 *c = cpu_data + cpu;
67
68 pr->pdc = NULL;
69 if (c->x86_vendor == X86_VENDOR_INTEL)
70 init_intel_pdc(pr, c);
71
72 return;
73}
74
75EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index d8f94e78de8a..acd3f1e34ca6 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -26,6 +26,7 @@
26#include <linux/kernel_stat.h> 26#include <linux/kernel_stat.h>
27#include <linux/sysdev.h> 27#include <linux/sysdev.h>
28#include <linux/cpu.h> 28#include <linux/cpu.h>
29#include <linux/module.h>
29 30
30#include <asm/atomic.h> 31#include <asm/atomic.h>
31#include <asm/smp.h> 32#include <asm/smp.h>
@@ -37,10 +38,17 @@
37#include <asm/i8253.h> 38#include <asm/i8253.h>
38 39
39#include <mach_apic.h> 40#include <mach_apic.h>
41#include <mach_ipi.h>
40 42
41#include "io_ports.h" 43#include "io_ports.h"
42 44
43/* 45/*
46 * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
47 * IPIs in place of local APIC timers
48 */
49static cpumask_t timer_bcast_ipi;
50
51/*
44 * Knob to control our willingness to enable the local APIC. 52 * Knob to control our willingness to enable the local APIC.
45 */ 53 */
46int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */ 54int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */
@@ -92,10 +100,6 @@ void __init apic_intr_init(void)
92/* Using APIC to generate smp_local_timer_interrupt? */ 100/* Using APIC to generate smp_local_timer_interrupt? */
93int using_apic_timer = 0; 101int using_apic_timer = 0;
94 102
95static DEFINE_PER_CPU(int, prof_multiplier) = 1;
96static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
97static DEFINE_PER_CPU(int, prof_counter) = 1;
98
99static int enabled_via_apicbase; 103static int enabled_via_apicbase;
100 104
101void enable_NMI_through_LVT0 (void * dummy) 105void enable_NMI_through_LVT0 (void * dummy)
@@ -935,11 +939,16 @@ void (*wait_timer_tick)(void) __devinitdata = wait_8254_wraparound;
935static void __setup_APIC_LVTT(unsigned int clocks) 939static void __setup_APIC_LVTT(unsigned int clocks)
936{ 940{
937 unsigned int lvtt_value, tmp_value, ver; 941 unsigned int lvtt_value, tmp_value, ver;
942 int cpu = smp_processor_id();
938 943
939 ver = GET_APIC_VERSION(apic_read(APIC_LVR)); 944 ver = GET_APIC_VERSION(apic_read(APIC_LVR));
940 lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR; 945 lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
941 if (!APIC_INTEGRATED(ver)) 946 if (!APIC_INTEGRATED(ver))
942 lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV); 947 lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
948
949 if (cpu_isset(cpu, timer_bcast_ipi))
950 lvtt_value |= APIC_LVT_MASKED;
951
943 apic_write_around(APIC_LVTT, lvtt_value); 952 apic_write_around(APIC_LVTT, lvtt_value);
944 953
945 /* 954 /*
@@ -1072,7 +1081,7 @@ void __devinit setup_secondary_APIC_clock(void)
1072 setup_APIC_timer(calibration_result); 1081 setup_APIC_timer(calibration_result);
1073} 1082}
1074 1083
1075void __devinit disable_APIC_timer(void) 1084void disable_APIC_timer(void)
1076{ 1085{
1077 if (using_apic_timer) { 1086 if (using_apic_timer) {
1078 unsigned long v; 1087 unsigned long v;
@@ -1084,7 +1093,10 @@ void __devinit disable_APIC_timer(void)
1084 1093
1085void enable_APIC_timer(void) 1094void enable_APIC_timer(void)
1086{ 1095{
1087 if (using_apic_timer) { 1096 int cpu = smp_processor_id();
1097
1098 if (using_apic_timer &&
1099 !cpu_isset(cpu, timer_bcast_ipi)) {
1088 unsigned long v; 1100 unsigned long v;
1089 1101
1090 v = apic_read(APIC_LVTT); 1102 v = apic_read(APIC_LVTT);
@@ -1092,33 +1104,31 @@ void enable_APIC_timer(void)
1092 } 1104 }
1093} 1105}
1094 1106
1095/* 1107void switch_APIC_timer_to_ipi(void *cpumask)
1096 * the frequency of the profiling timer can be changed
1097 * by writing a multiplier value into /proc/profile.
1098 */
1099int setup_profiling_timer(unsigned int multiplier)
1100{ 1108{
1101 int i; 1109 cpumask_t mask = *(cpumask_t *)cpumask;
1110 int cpu = smp_processor_id();
1102 1111
1103 /* 1112 if (cpu_isset(cpu, mask) &&
1104 * Sanity check. [at least 500 APIC cycles should be 1113 !cpu_isset(cpu, timer_bcast_ipi)) {
1105 * between APIC interrupts as a rule of thumb, to avoid 1114 disable_APIC_timer();
1106 * irqs flooding us] 1115 cpu_set(cpu, timer_bcast_ipi);
1107 */ 1116 }
1108 if ( (!multiplier) || (calibration_result/multiplier < 500)) 1117}
1109 return -EINVAL; 1118EXPORT_SYMBOL(switch_APIC_timer_to_ipi);
1110
1111 /*
1112 * Set the new multiplier for each CPU. CPUs don't start using the
1113 * new values until the next timer interrupt in which they do process
1114 * accounting. At that time they also adjust their APIC timers
1115 * accordingly.
1116 */
1117 for (i = 0; i < NR_CPUS; ++i)
1118 per_cpu(prof_multiplier, i) = multiplier;
1119 1119
1120 return 0; 1120void switch_ipi_to_APIC_timer(void *cpumask)
1121{
1122 cpumask_t mask = *(cpumask_t *)cpumask;
1123 int cpu = smp_processor_id();
1124
1125 if (cpu_isset(cpu, mask) &&
1126 cpu_isset(cpu, timer_bcast_ipi)) {
1127 cpu_clear(cpu, timer_bcast_ipi);
1128 enable_APIC_timer();
1129 }
1121} 1130}
1131EXPORT_SYMBOL(switch_ipi_to_APIC_timer);
1122 1132
1123#undef APIC_DIVISOR 1133#undef APIC_DIVISOR
1124 1134
@@ -1134,32 +1144,10 @@ int setup_profiling_timer(unsigned int multiplier)
1134 1144
1135inline void smp_local_timer_interrupt(struct pt_regs * regs) 1145inline void smp_local_timer_interrupt(struct pt_regs * regs)
1136{ 1146{
1137 int cpu = smp_processor_id();
1138
1139 profile_tick(CPU_PROFILING, regs); 1147 profile_tick(CPU_PROFILING, regs);
1140 if (--per_cpu(prof_counter, cpu) <= 0) {
1141 /*
1142 * The multiplier may have changed since the last time we got
1143 * to this point as a result of the user writing to
1144 * /proc/profile. In this case we need to adjust the APIC
1145 * timer accordingly.
1146 *
1147 * Interrupts are already masked off at this point.
1148 */
1149 per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
1150 if (per_cpu(prof_counter, cpu) !=
1151 per_cpu(prof_old_multiplier, cpu)) {
1152 __setup_APIC_LVTT(
1153 calibration_result/
1154 per_cpu(prof_counter, cpu));
1155 per_cpu(prof_old_multiplier, cpu) =
1156 per_cpu(prof_counter, cpu);
1157 }
1158
1159#ifdef CONFIG_SMP 1148#ifdef CONFIG_SMP
1160 update_process_times(user_mode_vm(regs)); 1149 update_process_times(user_mode_vm(regs));
1161#endif 1150#endif
1162 }
1163 1151
1164 /* 1152 /*
1165 * We take the 'long' return path, and there every subsystem 1153 * We take the 'long' return path, and there every subsystem
@@ -1206,6 +1194,43 @@ fastcall void smp_apic_timer_interrupt(struct pt_regs *regs)
1206 irq_exit(); 1194 irq_exit();
1207} 1195}
1208 1196
1197#ifndef CONFIG_SMP
1198static void up_apic_timer_interrupt_call(struct pt_regs *regs)
1199{
1200 int cpu = smp_processor_id();
1201
1202 /*
1203 * the NMI deadlock-detector uses this.
1204 */
1205 per_cpu(irq_stat, cpu).apic_timer_irqs++;
1206
1207 smp_local_timer_interrupt(regs);
1208}
1209#endif
1210
1211void smp_send_timer_broadcast_ipi(struct pt_regs *regs)
1212{
1213 cpumask_t mask;
1214
1215 cpus_and(mask, cpu_online_map, timer_bcast_ipi);
1216 if (!cpus_empty(mask)) {
1217#ifdef CONFIG_SMP
1218 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
1219#else
1220 /*
1221 * We can directly call the apic timer interrupt handler
1222 * in UP case. Minus all irq related functions
1223 */
1224 up_apic_timer_interrupt_call(regs);
1225#endif
1226 }
1227}
1228
1229int setup_profiling_timer(unsigned int multiplier)
1230{
1231 return -EINVAL;
1232}
1233
1209/* 1234/*
1210 * This interrupt should _never_ happen with our APIC/SMP architecture 1235 * This interrupt should _never_ happen with our APIC/SMP architecture
1211 */ 1236 */
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index 2d793d4aef1a..05312a8abb8b 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -219,6 +219,7 @@
219#include <linux/sched.h> 219#include <linux/sched.h>
220#include <linux/pm.h> 220#include <linux/pm.h>
221#include <linux/pm_legacy.h> 221#include <linux/pm_legacy.h>
222#include <linux/capability.h>
222#include <linux/device.h> 223#include <linux/device.h>
223#include <linux/kernel.h> 224#include <linux/kernel.h>
224#include <linux/smp.h> 225#include <linux/smp.h>
@@ -2291,7 +2292,9 @@ static int __init apm_init(void)
2291 apm_info.disabled = 1; 2292 apm_info.disabled = 1;
2292 return -ENODEV; 2293 return -ENODEV;
2293 } 2294 }
2295#ifdef CONFIG_PM_LEGACY
2294 pm_active = 1; 2296 pm_active = 1;
2297#endif
2295 2298
2296 /* 2299 /*
2297 * Set up a segment that references the real mode segment 0x40 2300 * Set up a segment that references the real mode segment 0x40
@@ -2382,7 +2385,9 @@ static void __exit apm_exit(void)
2382 exit_kapmd = 1; 2385 exit_kapmd = 1;
2383 while (kapmd_running) 2386 while (kapmd_running)
2384 schedule(); 2387 schedule();
2388#ifdef CONFIG_PM_LEGACY
2385 pm_active = 0; 2389 pm_active = 0;
2390#endif
2386} 2391}
2387 2392
2388module_init(apm_init); 2393module_init(apm_init);
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index e7697e077f6b..333578a4e91a 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -216,6 +216,12 @@ static void __init init_amd(struct cpuinfo_x86 *c)
216 c->x86_max_cores = 1; 216 c->x86_max_cores = 1;
217 } 217 }
218 218
219 if (cpuid_eax(0x80000000) >= 0x80000007) {
220 c->x86_power = cpuid_edx(0x80000007);
221 if (c->x86_power & (1<<8))
222 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
223 }
224
219#ifdef CONFIG_X86_HT 225#ifdef CONFIG_X86_HT
220 /* 226 /*
221 * On a AMD dual core setup the lower bits of the APIC id 227 * On a AMD dual core setup the lower bits of the APIC id
@@ -233,6 +239,7 @@ static void __init init_amd(struct cpuinfo_x86 *c)
233 cpu, c->x86_max_cores, cpu_core_id[cpu]); 239 cpu, c->x86_max_cores, cpu_core_id[cpu]);
234 } 240 }
235#endif 241#endif
242
236} 243}
237 244
238static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) 245static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
diff --git a/arch/i386/kernel/cpu/changelog b/arch/i386/kernel/cpu/changelog
deleted file mode 100644
index cef76b80a710..000000000000
--- a/arch/i386/kernel/cpu/changelog
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * Enhanced CPU type detection by Mike Jagdis, Patrick St. Jean
3 * and Martin Mares, November 1997.
4 *
5 * Force Cyrix 6x86(MX) and M II processors to report MTRR capability
6 * and Cyrix "coma bug" recognition by
7 * Zoltán Böszörményi <zboszor@mail.externet.hu> February 1999.
8 *
9 * Force Centaur C6 processors to report MTRR capability.
10 * Bart Hartgers <bart@etpmod.phys.tue.nl>, May 1999.
11 *
12 * Intel Mobile Pentium II detection fix. Sean Gilley, June 1999.
13 *
14 * IDT Winchip tweaks, misc clean ups.
15 * Dave Jones <davej@suse.de>, August 1999
16 *
17 * Better detection of Centaur/IDT WinChip models.
18 * Bart Hartgers <bart@etpmod.phys.tue.nl>, August 1999.
19 *
20 * Cleaned up cache-detection code
21 * Dave Jones <davej@suse.de>, October 1999
22 *
23 * Added proper L2 cache detection for Coppermine
24 * Dragan Stancevic <visitor@valinux.com>, October 1999
25 *
26 * Added the original array for capability flags but forgot to credit
27 * myself :) (~1998) Fixed/cleaned up some cpu_model_info and other stuff
28 * Jauder Ho <jauderho@carumba.com>, January 2000
29 *
30 * Detection for Celeron coppermine, identify_cpu() overhauled,
31 * and a few other clean ups.
32 * Dave Jones <davej@suse.de>, April 2000
33 *
34 * Pentium III FXSR, SSE support
35 * General FPU state handling cleanups
36 * Gareth Hughes <gareth@valinux.com>, May 2000
37 *
38 * Added proper Cascades CPU and L2 cache detection for Cascades
39 * and 8-way type cache happy bunch from Intel:^)
40 * Dragan Stancevic <visitor@valinux.com>, May 2000
41 *
42 * Forward port AMD Duron errata T13 from 2.2.17pre
43 * Dave Jones <davej@suse.de>, August 2000
44 *
45 * Forward port lots of fixes/improvements from 2.2.18pre
46 * Cyrix III, Pentium IV support.
47 * Dave Jones <davej@suse.de>, October 2000
48 *
49 * Massive cleanup of CPU detection and bug handling;
50 * Transmeta CPU detection,
51 * H. Peter Anvin <hpa@zytor.com>, November 2000
52 *
53 * VIA C3 Support.
54 * Dave Jones <davej@suse.de>, March 2001
55 *
56 * AMD Athlon/Duron/Thunderbird bluesmoke support.
57 * Dave Jones <davej@suse.de>, April 2001.
58 *
59 * CacheSize bug workaround updates for AMD, Intel & VIA Cyrix.
60 * Dave Jones <davej@suse.de>, September, October 2001.
61 *
62 */
63
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index cca655688ffc..15aee26ec2b6 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -204,7 +204,10 @@ static int __devinit have_cpuid_p(void)
204 204
205/* Do minimum CPU detection early. 205/* Do minimum CPU detection early.
206 Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. 206 Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
207 The others are not touched to avoid unwanted side effects. */ 207 The others are not touched to avoid unwanted side effects.
208
209 WARNING: this function is only called on the BP. Don't add code here
210 that is supposed to run on all CPUs. */
208static void __init early_cpu_detect(void) 211static void __init early_cpu_detect(void)
209{ 212{
210 struct cpuinfo_x86 *c = &boot_cpu_data; 213 struct cpuinfo_x86 *c = &boot_cpu_data;
@@ -236,12 +239,6 @@ static void __init early_cpu_detect(void)
236 if (cap0 & (1<<19)) 239 if (cap0 & (1<<19))
237 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; 240 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
238 } 241 }
239
240 early_intel_workaround(c);
241
242#ifdef CONFIG_X86_HT
243 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
244#endif
245} 242}
246 243
247void __devinit generic_identify(struct cpuinfo_x86 * c) 244void __devinit generic_identify(struct cpuinfo_x86 * c)
@@ -289,6 +286,12 @@ void __devinit generic_identify(struct cpuinfo_x86 * c)
289 get_model_name(c); /* Default name */ 286 get_model_name(c); /* Default name */
290 } 287 }
291 } 288 }
289
290 early_intel_workaround(c);
291
292#ifdef CONFIG_X86_HT
293 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
294#endif
292} 295}
293 296
294static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 297static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
@@ -609,8 +612,10 @@ void __devinit cpu_init(void)
609 load_TR_desc(); 612 load_TR_desc();
610 load_LDT(&init_mm.context); 613 load_LDT(&init_mm.context);
611 614
615#ifdef CONFIG_DOUBLEFAULT
612 /* Set up doublefault TSS pointer in the GDT */ 616 /* Set up doublefault TSS pointer in the GDT */
613 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 617 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
618#endif
614 619
615 /* Clear %fs and %gs. */ 620 /* Clear %fs and %gs. */
616 asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs"); 621 asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 871366b83b3f..3852d0a4c1b5 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -40,8 +40,6 @@
40#include <linux/acpi.h> 40#include <linux/acpi.h>
41#include <acpi/processor.h> 41#include <acpi/processor.h>
42 42
43#include "speedstep-est-common.h"
44
45#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg) 43#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg)
46 44
47MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); 45MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
@@ -297,68 +295,6 @@ acpi_cpufreq_guess_freq (
297} 295}
298 296
299 297
300/*
301 * acpi_processor_cpu_init_pdc_est - let BIOS know about the SMP capabilities
302 * of this driver
303 * @perf: processor-specific acpi_io_data struct
304 * @cpu: CPU being initialized
305 *
306 * To avoid issues with legacy OSes, some BIOSes require to be informed of
307 * the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
308 * accordingly, for Enhanced Speedstep. Actual call to _PDC is done in
309 * driver/acpi/processor.c
310 */
311static void
312acpi_processor_cpu_init_pdc_est(
313 struct acpi_processor_performance *perf,
314 unsigned int cpu,
315 struct acpi_object_list *obj_list
316 )
317{
318 union acpi_object *obj;
319 u32 *buf;
320 struct cpuinfo_x86 *c = cpu_data + cpu;
321 dprintk("acpi_processor_cpu_init_pdc_est\n");
322
323 if (!cpu_has(c, X86_FEATURE_EST))
324 return;
325
326 /* Initialize pdc. It will be used later. */
327 if (!obj_list)
328 return;
329
330 if (!(obj_list->count && obj_list->pointer))
331 return;
332
333 obj = obj_list->pointer;
334 if ((obj->buffer.length == 12) && obj->buffer.pointer) {
335 buf = (u32 *)obj->buffer.pointer;
336 buf[0] = ACPI_PDC_REVISION_ID;
337 buf[1] = 1;
338 buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
339 perf->pdc = obj_list;
340 }
341 return;
342}
343
344
345/* CPU specific PDC initialization */
346static void
347acpi_processor_cpu_init_pdc(
348 struct acpi_processor_performance *perf,
349 unsigned int cpu,
350 struct acpi_object_list *obj_list
351 )
352{
353 struct cpuinfo_x86 *c = cpu_data + cpu;
354 dprintk("acpi_processor_cpu_init_pdc\n");
355 perf->pdc = NULL;
356 if (cpu_has(c, X86_FEATURE_EST))
357 acpi_processor_cpu_init_pdc_est(perf, cpu, obj_list);
358 return;
359}
360
361
362static int 298static int
363acpi_cpufreq_cpu_init ( 299acpi_cpufreq_cpu_init (
364 struct cpufreq_policy *policy) 300 struct cpufreq_policy *policy)
@@ -367,15 +303,9 @@ acpi_cpufreq_cpu_init (
367 unsigned int cpu = policy->cpu; 303 unsigned int cpu = policy->cpu;
368 struct cpufreq_acpi_io *data; 304 struct cpufreq_acpi_io *data;
369 unsigned int result = 0; 305 unsigned int result = 0;
370 306 struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
371 union acpi_object arg0 = {ACPI_TYPE_BUFFER};
372 u32 arg0_buf[3];
373 struct acpi_object_list arg_list = {1, &arg0};
374 307
375 dprintk("acpi_cpufreq_cpu_init\n"); 308 dprintk("acpi_cpufreq_cpu_init\n");
376 /* setup arg_list for _PDC settings */
377 arg0.buffer.length = 12;
378 arg0.buffer.pointer = (u8 *) arg0_buf;
379 309
380 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); 310 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
381 if (!data) 311 if (!data)
@@ -383,14 +313,12 @@ acpi_cpufreq_cpu_init (
383 313
384 acpi_io_data[cpu] = data; 314 acpi_io_data[cpu] = data;
385 315
386 acpi_processor_cpu_init_pdc(&data->acpi_data, cpu, &arg_list);
387 result = acpi_processor_register_performance(&data->acpi_data, cpu); 316 result = acpi_processor_register_performance(&data->acpi_data, cpu);
388 data->acpi_data.pdc = NULL;
389 317
390 if (result) 318 if (result)
391 goto err_free; 319 goto err_free;
392 320
393 if (is_const_loops_cpu(cpu)) { 321 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
394 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; 322 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
395 } 323 }
396 324
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index 0fbbd4c1072e..e11a09207ec8 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -980,7 +980,7 @@ static int powernowk8_verify(struct cpufreq_policy *pol)
980} 980}
981 981
982/* per CPU init entry point to the driver */ 982/* per CPU init entry point to the driver */
983static int __init powernowk8_cpu_init(struct cpufreq_policy *pol) 983static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
984{ 984{
985 struct powernow_k8_data *data; 985 struct powernow_k8_data *data;
986 cpumask_t oldmask = CPU_MASK_ALL; 986 cpumask_t oldmask = CPU_MASK_ALL;
@@ -1141,7 +1141,7 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
1141}; 1141};
1142 1142
1143/* driver entry point for init */ 1143/* driver entry point for init */
1144static int __init powernowk8_init(void) 1144static int __cpuinit powernowk8_init(void)
1145{ 1145{
1146 unsigned int i, supported_cpus = 0; 1146 unsigned int i, supported_cpus = 0;
1147 1147
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index edb9873e27e3..c173c0fa117a 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -35,8 +35,6 @@
35#include <asm/processor.h> 35#include <asm/processor.h>
36#include <asm/cpufeature.h> 36#include <asm/cpufeature.h>
37 37
38#include "speedstep-est-common.h"
39
40#define PFX "speedstep-centrino: " 38#define PFX "speedstep-centrino: "
41#define MAINTAINER "Jeremy Fitzhardinge <jeremy@goop.org>" 39#define MAINTAINER "Jeremy Fitzhardinge <jeremy@goop.org>"
42 40
@@ -364,22 +362,10 @@ static struct acpi_processor_performance p;
364 */ 362 */
365static int centrino_cpu_init_acpi(struct cpufreq_policy *policy) 363static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
366{ 364{
367 union acpi_object arg0 = {ACPI_TYPE_BUFFER};
368 u32 arg0_buf[3];
369 struct acpi_object_list arg_list = {1, &arg0};
370 unsigned long cur_freq; 365 unsigned long cur_freq;
371 int result = 0, i; 366 int result = 0, i;
372 unsigned int cpu = policy->cpu; 367 unsigned int cpu = policy->cpu;
373 368
374 /* _PDC settings */
375 arg0.buffer.length = 12;
376 arg0.buffer.pointer = (u8 *) arg0_buf;
377 arg0_buf[0] = ACPI_PDC_REVISION_ID;
378 arg0_buf[1] = 1;
379 arg0_buf[2] = ACPI_PDC_EST_CAPABILITY_SMP_MSR;
380
381 p.pdc = &arg_list;
382
383 /* register with ACPI core */ 369 /* register with ACPI core */
384 if (acpi_processor_register_performance(&p, cpu)) { 370 if (acpi_processor_register_performance(&p, cpu)) {
385 dprintk(KERN_INFO PFX "obtaining ACPI data failed\n"); 371 dprintk(KERN_INFO PFX "obtaining ACPI data failed\n");
@@ -493,12 +479,13 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
493 unsigned l, h; 479 unsigned l, h;
494 int ret; 480 int ret;
495 int i; 481 int i;
482 struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
496 483
497 /* Only Intel makes Enhanced Speedstep-capable CPUs */ 484 /* Only Intel makes Enhanced Speedstep-capable CPUs */
498 if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST)) 485 if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST))
499 return -ENODEV; 486 return -ENODEV;
500 487
501 if (is_const_loops_cpu(policy->cpu)) { 488 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
502 centrino_driver.flags |= CPUFREQ_CONST_LOOPS; 489 centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
503 } 490 }
504 491
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-est-common.h b/arch/i386/kernel/cpu/cpufreq/speedstep-est-common.h
deleted file mode 100644
index 5ce995c9d866..000000000000
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-est-common.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * Routines common for drivers handling Enhanced Speedstep Technology
3 * Copyright (C) 2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
4 *
5 * Licensed under the terms of the GNU GPL License version 2 -- see
6 * COPYING for details.
7 */
8
9static inline int is_const_loops_cpu(unsigned int cpu)
10{
11 struct cpuinfo_x86 *c = cpu_data + cpu;
12
13 if (c->x86_vendor != X86_VENDOR_INTEL || !cpu_has(c, X86_FEATURE_EST))
14 return 0;
15
16 /*
17 * on P-4s, the TSC runs with constant frequency independent of cpu freq
18 * when we use EST
19 */
20 if (c->x86 == 0xf)
21 return 1;
22
23 return 0;
24}
25
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
index 5e2da704f0fa..8c0120186b9f 100644
--- a/arch/i386/kernel/cpu/intel.c
+++ b/arch/i386/kernel/cpu/intel.c
@@ -183,10 +183,13 @@ static void __devinit init_intel(struct cpuinfo_x86 *c)
183 } 183 }
184#endif 184#endif
185 185
186 if (c->x86 == 15) 186 if (c->x86 == 15)
187 set_bit(X86_FEATURE_P4, c->x86_capability); 187 set_bit(X86_FEATURE_P4, c->x86_capability);
188 if (c->x86 == 6) 188 if (c->x86 == 6)
189 set_bit(X86_FEATURE_P3, c->x86_capability); 189 set_bit(X86_FEATURE_P3, c->x86_capability);
190 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
191 (c->x86 == 0x6 && c->x86_model >= 0x0e))
192 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
190} 193}
191 194
192 195
diff --git a/arch/i386/kernel/cpu/mtrr/changelog b/arch/i386/kernel/cpu/mtrr/changelog
deleted file mode 100644
index af1368535955..000000000000
--- a/arch/i386/kernel/cpu/mtrr/changelog
+++ /dev/null
@@ -1,229 +0,0 @@
1 ChangeLog
2
3 Prehistory Martin Tischhäuser <martin@ikcbarka.fzk.de>
4 Initial register-setting code (from proform-1.0).
5 19971216 Richard Gooch <rgooch@atnf.csiro.au>
6 Original version for /proc/mtrr interface, SMP-safe.
7 v1.0
8 19971217 Richard Gooch <rgooch@atnf.csiro.au>
9 Bug fix for ioctls()'s.
10 Added sample code in Documentation/mtrr.txt
11 v1.1
12 19971218 Richard Gooch <rgooch@atnf.csiro.au>
13 Disallow overlapping regions.
14 19971219 Jens Maurer <jmaurer@menuett.rhein-main.de>
15 Register-setting fixups.
16 v1.2
17 19971222 Richard Gooch <rgooch@atnf.csiro.au>
18 Fixups for kernel 2.1.75.
19 v1.3
20 19971229 David Wragg <dpw@doc.ic.ac.uk>
21 Register-setting fixups and conformity with Intel conventions.
22 19971229 Richard Gooch <rgooch@atnf.csiro.au>
23 Cosmetic changes and wrote this ChangeLog ;-)
24 19980106 Richard Gooch <rgooch@atnf.csiro.au>
25 Fixups for kernel 2.1.78.
26 v1.4
27 19980119 David Wragg <dpw@doc.ic.ac.uk>
28 Included passive-release enable code (elsewhere in PCI setup).
29 v1.5
30 19980131 Richard Gooch <rgooch@atnf.csiro.au>
31 Replaced global kernel lock with private spinlock.
32 v1.6
33 19980201 Richard Gooch <rgooch@atnf.csiro.au>
34 Added wait for other CPUs to complete changes.
35 v1.7
36 19980202 Richard Gooch <rgooch@atnf.csiro.au>
37 Bug fix in definition of <set_mtrr> for UP.
38 v1.8
39 19980319 Richard Gooch <rgooch@atnf.csiro.au>
40 Fixups for kernel 2.1.90.
41 19980323 Richard Gooch <rgooch@atnf.csiro.au>
42 Move SMP BIOS fixup before secondary CPUs call <calibrate_delay>
43 v1.9
44 19980325 Richard Gooch <rgooch@atnf.csiro.au>
45 Fixed test for overlapping regions: confused by adjacent regions
46 19980326 Richard Gooch <rgooch@atnf.csiro.au>
47 Added wbinvd in <set_mtrr_prepare>.
48 19980401 Richard Gooch <rgooch@atnf.csiro.au>
49 Bug fix for non-SMP compilation.
50 19980418 David Wragg <dpw@doc.ic.ac.uk>
51 Fixed-MTRR synchronisation for SMP and use atomic operations
52 instead of spinlocks.
53 19980418 Richard Gooch <rgooch@atnf.csiro.au>
54 Differentiate different MTRR register classes for BIOS fixup.
55 v1.10
56 19980419 David Wragg <dpw@doc.ic.ac.uk>
57 Bug fix in variable MTRR synchronisation.
58 v1.11
59 19980419 Richard Gooch <rgooch@atnf.csiro.au>
60 Fixups for kernel 2.1.97.
61 v1.12
62 19980421 Richard Gooch <rgooch@atnf.csiro.au>
63 Safer synchronisation across CPUs when changing MTRRs.
64 v1.13
65 19980423 Richard Gooch <rgooch@atnf.csiro.au>
66 Bugfix for SMP systems without MTRR support.
67 v1.14
68 19980427 Richard Gooch <rgooch@atnf.csiro.au>
69 Trap calls to <mtrr_add> and <mtrr_del> on non-MTRR machines.
70 v1.15
71 19980427 Richard Gooch <rgooch@atnf.csiro.au>
72 Use atomic bitops for setting SMP change mask.
73 v1.16
74 19980428 Richard Gooch <rgooch@atnf.csiro.au>
75 Removed spurious diagnostic message.
76 v1.17
77 19980429 Richard Gooch <rgooch@atnf.csiro.au>
78 Moved register-setting macros into this file.
79 Moved setup code from init/main.c to i386-specific areas.
80 v1.18
81 19980502 Richard Gooch <rgooch@atnf.csiro.au>
82 Moved MTRR detection outside conditionals in <mtrr_init>.
83 v1.19
84 19980502 Richard Gooch <rgooch@atnf.csiro.au>
85 Documentation improvement: mention Pentium II and AGP.
86 v1.20
87 19980521 Richard Gooch <rgooch@atnf.csiro.au>
88 Only manipulate interrupt enable flag on local CPU.
89 Allow enclosed uncachable regions.
90 v1.21
91 19980611 Richard Gooch <rgooch@atnf.csiro.au>
92 Always define <main_lock>.
93 v1.22
94 19980901 Richard Gooch <rgooch@atnf.csiro.au>
95 Removed module support in order to tidy up code.
96 Added sanity check for <mtrr_add>/<mtrr_del> before <mtrr_init>.
97 Created addition queue for prior to SMP commence.
98 v1.23
99 19980902 Richard Gooch <rgooch@atnf.csiro.au>
100 Ported patch to kernel 2.1.120-pre3.
101 v1.24
102 19980910 Richard Gooch <rgooch@atnf.csiro.au>
103 Removed sanity checks and addition queue: Linus prefers an OOPS.
104 v1.25
105 19981001 Richard Gooch <rgooch@atnf.csiro.au>
106 Fixed harmless compiler warning in include/asm-i386/mtrr.h
107 Fixed version numbering and history for v1.23 -> v1.24.
108 v1.26
109 19990118 Richard Gooch <rgooch@atnf.csiro.au>
110 Added devfs support.
111 v1.27
112 19990123 Richard Gooch <rgooch@atnf.csiro.au>
113 Changed locking to spin with reschedule.
114 Made use of new <smp_call_function>.
115 v1.28
116 19990201 Zoltán Böszörményi <zboszor@mail.externet.hu>
117 Extended the driver to be able to use Cyrix style ARRs.
118 19990204 Richard Gooch <rgooch@atnf.csiro.au>
119 Restructured Cyrix support.
120 v1.29
121 19990204 Zoltán Böszörményi <zboszor@mail.externet.hu>
122 Refined ARR support: enable MAPEN in set_mtrr_prepare()
123 and disable MAPEN in set_mtrr_done().
124 19990205 Richard Gooch <rgooch@atnf.csiro.au>
125 Minor cleanups.
126 v1.30
127 19990208 Zoltán Böszörményi <zboszor@mail.externet.hu>
128 Protect plain 6x86s (and other processors without the
129 Page Global Enable feature) against accessing CR4 in
130 set_mtrr_prepare() and set_mtrr_done().
131 19990210 Richard Gooch <rgooch@atnf.csiro.au>
132 Turned <set_mtrr_up> and <get_mtrr> into function pointers.
133 v1.31
134 19990212 Zoltán Böszörményi <zboszor@mail.externet.hu>
135 Major rewrite of cyrix_arr_init(): do not touch ARRs,
136 leave them as the BIOS have set them up.
137 Enable usage of all 8 ARRs.
138 Avoid multiplications by 3 everywhere and other
139 code clean ups/speed ups.
140 19990213 Zoltán Böszörményi <zboszor@mail.externet.hu>
141 Set up other Cyrix processors identical to the boot cpu.
142 Since Cyrix don't support Intel APIC, this is l'art pour l'art.
143 Weigh ARRs by size:
144 If size <= 32M is given, set up ARR# we were given.
145 If size > 32M is given, set up ARR7 only if it is free,
146 fail otherwise.
147 19990214 Zoltán Böszörményi <zboszor@mail.externet.hu>
148 Also check for size >= 256K if we are to set up ARR7,
149 mtrr_add() returns the value it gets from set_mtrr()
150 19990218 Zoltán Böszörményi <zboszor@mail.externet.hu>
151 Remove Cyrix "coma bug" workaround from here.
152 Moved to linux/arch/i386/kernel/setup.c and
153 linux/include/asm-i386/bugs.h
154 19990228 Richard Gooch <rgooch@atnf.csiro.au>
155 Added MTRRIOC_KILL_ENTRY ioctl(2)
156 Trap for counter underflow in <mtrr_file_del>.
157 Trap for 4 MiB aligned regions for PPro, stepping <= 7.
158 19990301 Richard Gooch <rgooch@atnf.csiro.au>
159 Created <get_free_region> hook.
160 19990305 Richard Gooch <rgooch@atnf.csiro.au>
161 Temporarily disable AMD support now MTRR capability flag is set.
162 v1.32
163 19990308 Zoltán Böszörményi <zboszor@mail.externet.hu>
164 Adjust my changes (19990212-19990218) to Richard Gooch's
165 latest changes. (19990228-19990305)
166 v1.33
167 19990309 Richard Gooch <rgooch@atnf.csiro.au>
168 Fixed typo in <printk> message.
169 19990310 Richard Gooch <rgooch@atnf.csiro.au>
170 Support K6-II/III based on Alan Cox's <alan@redhat.com> patches.
171 v1.34
172 19990511 Bart Hartgers <bart@etpmod.phys.tue.nl>
173 Support Centaur C6 MCR's.
174 19990512 Richard Gooch <rgooch@atnf.csiro.au>
175 Minor cleanups.
176 v1.35
177 19990707 Zoltán Böszörményi <zboszor@mail.externet.hu>
178 Check whether ARR3 is protected in cyrix_get_free_region()
179 and mtrr_del(). The code won't attempt to delete or change it
180 from now on if the BIOS protected ARR3. It silently skips ARR3
181 in cyrix_get_free_region() or returns with an error code from
182 mtrr_del().
183 19990711 Zoltán Böszörményi <zboszor@mail.externet.hu>
184 Reset some bits in the CCRs in cyrix_arr_init() to disable SMM
185 if ARR3 isn't protected. This is needed because if SMM is active
186 and ARR3 isn't protected then deleting and setting ARR3 again
187 may lock up the processor. With SMM entirely disabled, it does
188 not happen.
189 19990812 Zoltán Böszörményi <zboszor@mail.externet.hu>
190 Rearrange switch() statements so the driver accomodates to
191 the fact that the AMD Athlon handles its MTRRs the same way
192 as Intel does.
193 19990814 Zoltán Böszörményi <zboszor@mail.externet.hu>
194 Double check for Intel in mtrr_add()'s big switch() because
195 that revision check is only valid for Intel CPUs.
196 19990819 Alan Cox <alan@redhat.com>
197 Tested Zoltan's changes on a pre production Athlon - 100%
198 success.
199 19991008 Manfred Spraul <manfreds@colorfullife.com>
200 replaced spin_lock_reschedule() with a normal semaphore.
201 v1.36
202 20000221 Richard Gooch <rgooch@atnf.csiro.au>
203 Compile fix if procfs and devfs not enabled.
204 Formatting changes.
205 v1.37
206 20001109 H. Peter Anvin <hpa@zytor.com>
207 Use the new centralized CPU feature detects.
208
209 v1.38
210 20010309 Dave Jones <davej@suse.de>
211 Add support for Cyrix III.
212
213 v1.39
214 20010312 Dave Jones <davej@suse.de>
215 Ugh, I broke AMD support.
216 Reworked fix by Troels Walsted Hansen <troels@thule.no>
217
218 v1.40
219 20010327 Dave Jones <davej@suse.de>
220 Adapted Cyrix III support to include VIA C3.
221
222 v2.0
223 20020306 Patrick Mochel <mochel@osdl.org>
224 Split mtrr.c -> mtrr/*.c
225 Converted to Linux Kernel Coding Style
226 Fixed several minor nits in form
227 Moved some SMP-only functions out, so they can be used
228 for power management in the future.
229 TODO: Fix user interface cruft.
diff --git a/arch/i386/kernel/cpu/mtrr/if.c b/arch/i386/kernel/cpu/mtrr/if.c
index cf39e205d33c..5ac051bb9d55 100644
--- a/arch/i386/kernel/cpu/mtrr/if.c
+++ b/arch/i386/kernel/cpu/mtrr/if.c
@@ -1,5 +1,6 @@
1#include <linux/init.h> 1#include <linux/init.h>
2#include <linux/proc_fs.h> 2#include <linux/proc_fs.h>
3#include <linux/capability.h>
3#include <linux/ctype.h> 4#include <linux/ctype.h>
4#include <linux/module.h> 5#include <linux/module.h>
5#include <linux/seq_file.h> 6#include <linux/seq_file.h>
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 6d91b274589c..89a85af33d28 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -29,7 +29,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
29 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 29 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
30 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, 30 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
31 NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL, 31 NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL,
32 NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow", 32 NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow",
33 33
34 /* Transmeta-defined */ 34 /* Transmeta-defined */
35 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL, 35 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
@@ -40,7 +40,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
40 /* Other (Linux-defined) */ 40 /* Other (Linux-defined) */
41 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", 41 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
42 NULL, NULL, NULL, NULL, 42 NULL, NULL, NULL, NULL,
43 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 43 "constant_tsc", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
45 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 45 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
46 46
@@ -57,11 +57,21 @@ static int show_cpuinfo(struct seq_file *m, void *v)
57 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 57 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
58 58
59 /* AMD-defined (#2) */ 59 /* AMD-defined (#2) */
60 "lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL, 60 "lahf_lm", "cmp_legacy", "svm", NULL, "cr8legacy", NULL, NULL, NULL,
61 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 61 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
62 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 62 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
63 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 63 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
64 }; 64 };
65 static char *x86_power_flags[] = {
66 "ts", /* temperature sensor */
67 "fid", /* frequency id control */
68 "vid", /* voltage id control */
69 "ttp", /* thermal trip */
70 "tm",
71 "stc",
72 NULL,
73 /* nothing */ /* constant_tsc - moved to flags */
74 };
65 struct cpuinfo_x86 *c = v; 75 struct cpuinfo_x86 *c = v;
66 int i, n = c - cpu_data; 76 int i, n = c - cpu_data;
67 int fpu_exception; 77 int fpu_exception;
@@ -131,6 +141,17 @@ static int show_cpuinfo(struct seq_file *m, void *v)
131 x86_cap_flags[i] != NULL ) 141 x86_cap_flags[i] != NULL )
132 seq_printf(m, " %s", x86_cap_flags[i]); 142 seq_printf(m, " %s", x86_cap_flags[i]);
133 143
144 for (i = 0; i < 32; i++)
145 if (c->x86_power & (1 << i)) {
146 if (i < ARRAY_SIZE(x86_power_flags) &&
147 x86_power_flags[i])
148 seq_printf(m, "%s%s",
149 x86_power_flags[i][0]?" ":"",
150 x86_power_flags[i]);
151 else
152 seq_printf(m, " [%d]", i);
153 }
154
134 seq_printf(m, "\nbogomips\t: %lu.%02lu\n\n", 155 seq_printf(m, "\nbogomips\t: %lu.%02lu\n\n",
135 c->loops_per_jiffy/(500000/HZ), 156 c->loops_per_jiffy/(500000/HZ),
136 (c->loops_per_jiffy/(5000/HZ)) % 100); 157 (c->loops_per_jiffy/(5000/HZ)) % 100);
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index 0248e084017c..d49dbe8dc96b 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -25,7 +25,6 @@
25#include <mach_ipi.h> 25#include <mach_ipi.h>
26 26
27 27
28note_buf_t crash_notes[NR_CPUS];
29/* This keeps a track of which one is crashing cpu. */ 28/* This keeps a track of which one is crashing cpu. */
30static int crashing_cpu; 29static int crashing_cpu;
31 30
@@ -72,7 +71,9 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
72 * squirrelled away. ELF notes happen to provide 71 * squirrelled away. ELF notes happen to provide
73 * all of that that no need to invent something new. 72 * all of that that no need to invent something new.
74 */ 73 */
75 buf = &crash_notes[cpu][0]; 74 buf = (u32*)per_cpu_ptr(crash_notes, cpu);
75 if (!buf)
76 return;
76 memset(&prstatus, 0, sizeof(prstatus)); 77 memset(&prstatus, 0, sizeof(prstatus));
77 prstatus.pr_pid = current->pid; 78 prstatus.pr_pid = current->pid;
78 elf_core_copy_regs(&prstatus.pr_reg, regs); 79 elf_core_copy_regs(&prstatus.pr_reg, regs);
@@ -81,51 +82,12 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
81 final_note(buf); 82 final_note(buf);
82} 83}
83 84
84static void crash_get_current_regs(struct pt_regs *regs) 85static void crash_save_self(struct pt_regs *regs)
85{
86 __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
87 __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
88 __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
89 __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
90 __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
91 __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
92 __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
93 __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
94 __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
95 __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
96 __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
97 __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
98 __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
99
100 regs->eip = (unsigned long)current_text_addr();
101}
102
103/* CPU does not save ss and esp on stack if execution is already
104 * running in kernel mode at the time of NMI occurrence. This code
105 * fixes it.
106 */
107static void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
108{
109 memcpy(newregs, oldregs, sizeof(*newregs));
110 newregs->esp = (unsigned long)&(oldregs->esp);
111 __asm__ __volatile__("xorl %eax, %eax;");
112 __asm__ __volatile__ ("movw %%ss, %%ax;" :"=a"(newregs->xss));
113}
114
115/* We may have saved_regs from where the error came from
116 * or it is NULL if via a direct panic().
117 */
118static void crash_save_self(struct pt_regs *saved_regs)
119{ 86{
120 struct pt_regs regs;
121 int cpu; 87 int cpu;
122 88
123 cpu = smp_processor_id(); 89 cpu = smp_processor_id();
124 if (saved_regs) 90 crash_save_this_cpu(regs, cpu);
125 crash_setup_regs(&regs, saved_regs);
126 else
127 crash_get_current_regs(&regs);
128 crash_save_this_cpu(&regs, cpu);
129} 91}
130 92
131#ifdef CONFIG_SMP 93#ifdef CONFIG_SMP
@@ -144,7 +106,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
144 local_irq_disable(); 106 local_irq_disable();
145 107
146 if (!user_mode(regs)) { 108 if (!user_mode(regs)) {
147 crash_setup_regs(&fixed_regs, regs); 109 crash_fixup_ss_esp(&fixed_regs, regs);
148 regs = &fixed_regs; 110 regs = &fixed_regs;
149 } 111 }
150 crash_save_this_cpu(regs, cpu); 112 crash_save_this_cpu(regs, cpu);
diff --git a/arch/i386/kernel/crash_dump.c b/arch/i386/kernel/crash_dump.c
new file mode 100644
index 000000000000..3f532df488bc
--- /dev/null
+++ b/arch/i386/kernel/crash_dump.c
@@ -0,0 +1,74 @@
1/*
2 * kernel/crash_dump.c - Memory preserving reboot related code.
3 *
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5 * Copyright (C) IBM Corporation, 2004. All rights reserved
6 */
7
8#include <linux/errno.h>
9#include <linux/highmem.h>
10#include <linux/crash_dump.h>
11
12#include <asm/uaccess.h>
13
14static void *kdump_buf_page;
15
16/**
17 * copy_oldmem_page - copy one page from "oldmem"
18 * @pfn: page frame number to be copied
19 * @buf: target memory address for the copy; this can be in kernel address
20 * space or user address space (see @userbuf)
21 * @csize: number of bytes to copy
22 * @offset: offset in bytes into the page (based on pfn) to begin the copy
23 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
24 * otherwise @buf is in kernel address space, use memcpy().
25 *
26 * Copy a page from "oldmem". For this page, there is no pte mapped
27 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
28 *
29 * Calling copy_to_user() in atomic context is not desirable. Hence first
30 * copying the data to a pre-allocated kernel page and then copying to user
31 * space in non-atomic context.
32 */
33ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
34 size_t csize, unsigned long offset, int userbuf)
35{
36 void *vaddr;
37
38 if (!csize)
39 return 0;
40
41 vaddr = kmap_atomic_pfn(pfn, KM_PTE0);
42
43 if (!userbuf) {
44 memcpy(buf, (vaddr + offset), csize);
45 kunmap_atomic(vaddr, KM_PTE0);
46 } else {
47 if (!kdump_buf_page) {
48 printk(KERN_WARNING "Kdump: Kdump buffer page not"
49 " allocated\n");
50 return -EFAULT;
51 }
52 copy_page(kdump_buf_page, vaddr);
53 kunmap_atomic(vaddr, KM_PTE0);
54 if (copy_to_user(buf, (kdump_buf_page + offset), csize))
55 return -EFAULT;
56 }
57
58 return csize;
59}
60
61static int __init kdump_buf_page_init(void)
62{
63 int ret = 0;
64
65 kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
66 if (!kdump_buf_page) {
67 printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer"
68 " page\n");
69 ret = -ENOMEM;
70 }
71
72 return ret;
73}
74arch_initcall(kdump_buf_page_init);
diff --git a/arch/i386/kernel/dmi_scan.c b/arch/i386/kernel/dmi_scan.c
index 58516e2ac172..6a93d75db431 100644
--- a/arch/i386/kernel/dmi_scan.c
+++ b/arch/i386/kernel/dmi_scan.c
@@ -4,7 +4,7 @@
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/dmi.h> 5#include <linux/dmi.h>
6#include <linux/bootmem.h> 6#include <linux/bootmem.h>
7 7#include <linux/slab.h>
8 8
9static char * __init dmi_string(struct dmi_header *dm, u8 s) 9static char * __init dmi_string(struct dmi_header *dm, u8 s)
10{ 10{
@@ -19,7 +19,7 @@ static char * __init dmi_string(struct dmi_header *dm, u8 s)
19 } 19 }
20 20
21 if (*bp != 0) { 21 if (*bp != 0) {
22 str = alloc_bootmem(strlen(bp) + 1); 22 str = dmi_alloc(strlen(bp) + 1);
23 if (str != NULL) 23 if (str != NULL)
24 strcpy(str, bp); 24 strcpy(str, bp);
25 else 25 else
@@ -40,7 +40,7 @@ static int __init dmi_table(u32 base, int len, int num,
40 u8 *buf, *data; 40 u8 *buf, *data;
41 int i = 0; 41 int i = 0;
42 42
43 buf = bt_ioremap(base, len); 43 buf = dmi_ioremap(base, len);
44 if (buf == NULL) 44 if (buf == NULL)
45 return -1; 45 return -1;
46 46
@@ -65,7 +65,7 @@ static int __init dmi_table(u32 base, int len, int num,
65 data += 2; 65 data += 2;
66 i++; 66 i++;
67 } 67 }
68 bt_iounmap(buf, len); 68 dmi_iounmap(buf, len);
69 return 0; 69 return 0;
70} 70}
71 71
@@ -112,7 +112,7 @@ static void __init dmi_save_devices(struct dmi_header *dm)
112 if ((*d & 0x80) == 0) 112 if ((*d & 0x80) == 0)
113 continue; 113 continue;
114 114
115 dev = alloc_bootmem(sizeof(*dev)); 115 dev = dmi_alloc(sizeof(*dev));
116 if (!dev) { 116 if (!dev) {
117 printk(KERN_ERR "dmi_save_devices: out of memory.\n"); 117 printk(KERN_ERR "dmi_save_devices: out of memory.\n");
118 break; 118 break;
@@ -131,7 +131,7 @@ static void __init dmi_save_ipmi_device(struct dmi_header *dm)
131 struct dmi_device *dev; 131 struct dmi_device *dev;
132 void * data; 132 void * data;
133 133
134 data = alloc_bootmem(dm->length); 134 data = dmi_alloc(dm->length);
135 if (data == NULL) { 135 if (data == NULL) {
136 printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n"); 136 printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
137 return; 137 return;
@@ -139,7 +139,7 @@ static void __init dmi_save_ipmi_device(struct dmi_header *dm)
139 139
140 memcpy(data, dm, dm->length); 140 memcpy(data, dm, dm->length);
141 141
142 dev = alloc_bootmem(sizeof(*dev)); 142 dev = dmi_alloc(sizeof(*dev));
143 if (!dev) { 143 if (!dev) {
144 printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n"); 144 printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
145 return; 145 return;
@@ -221,7 +221,7 @@ void __init dmi_scan_machine(void)
221 } 221 }
222 } 222 }
223 223
224out: printk(KERN_INFO "DMI not present.\n"); 224out: printk(KERN_INFO "DMI not present or invalid.\n");
225} 225}
226 226
227 227
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index 607c06007508..4d704724b2f5 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -323,6 +323,7 @@ work_notifysig: # deal with pending signals and
323 323
324 ALIGN 324 ALIGN
325work_notifysig_v86: 325work_notifysig_v86:
326#ifdef CONFIG_VM86
326 pushl %ecx # save ti_flags for do_notify_resume 327 pushl %ecx # save ti_flags for do_notify_resume
327 call save_v86_state # %eax contains pt_regs pointer 328 call save_v86_state # %eax contains pt_regs pointer
328 popl %ecx 329 popl %ecx
@@ -330,6 +331,7 @@ work_notifysig_v86:
330 xorl %edx, %edx 331 xorl %edx, %edx
331 call do_notify_resume 332 call do_notify_resume
332 jmp resume_userspace 333 jmp resume_userspace
334#endif
333 335
334 # perform syscall exit tracing 336 # perform syscall exit tracing
335 ALIGN 337 ALIGN
diff --git a/arch/i386/kernel/init_task.c b/arch/i386/kernel/init_task.c
index 9caa8e8db80c..cff95d10a4d8 100644
--- a/arch/i386/kernel/init_task.c
+++ b/arch/i386/kernel/init_task.c
@@ -42,5 +42,5 @@ EXPORT_SYMBOL(init_task);
42 * per-CPU TSS segments. Threads are completely 'soft' on Linux, 42 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
43 * no more per-task TSS's. 43 * no more per-task TSS's.
44 */ 44 */
45DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_maxaligned_in_smp = INIT_TSS; 45DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
46 46
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 7554f8fd874a..f2dd218d88cb 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -1649,7 +1649,7 @@ static void __init enable_IO_APIC(void)
1649 for(apic = 0; apic < nr_ioapics; apic++) { 1649 for(apic = 0; apic < nr_ioapics; apic++) {
1650 int pin; 1650 int pin;
1651 /* See if any of the pins is in ExtINT mode */ 1651 /* See if any of the pins is in ExtINT mode */
1652 for(pin = 0; pin < nr_ioapic_registers[i]; pin++) { 1652 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1653 struct IO_APIC_route_entry entry; 1653 struct IO_APIC_route_entry entry;
1654 spin_lock_irqsave(&ioapic_lock, flags); 1654 spin_lock_irqsave(&ioapic_lock, flags);
1655 *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin); 1655 *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
diff --git a/arch/i386/kernel/ioport.c b/arch/i386/kernel/ioport.c
index b59a34dbe262..79026f026b85 100644
--- a/arch/i386/kernel/ioport.c
+++ b/arch/i386/kernel/ioport.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/capability.h>
10#include <linux/errno.h> 11#include <linux/errno.h>
11#include <linux/types.h> 12#include <linux/types.h>
12#include <linux/ioport.h> 13#include <linux/ioport.h>
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 1a201a932865..f3a9c78c4a24 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -19,7 +19,7 @@
19#include <linux/cpu.h> 19#include <linux/cpu.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21 21
22DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_maxaligned_in_smp; 22DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
23EXPORT_PER_CPU_SYMBOL(irq_stat); 23EXPORT_PER_CPU_SYMBOL(irq_stat);
24 24
25#ifndef CONFIG_X86_LOCAL_APIC 25#ifndef CONFIG_X86_LOCAL_APIC
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 19edcd526ba4..6483eeb1a4e8 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -58,13 +58,9 @@ static inline int is_IF_modifier(kprobe_opcode_t opcode)
58 58
59int __kprobes arch_prepare_kprobe(struct kprobe *p) 59int __kprobes arch_prepare_kprobe(struct kprobe *p)
60{ 60{
61 return 0;
62}
63
64void __kprobes arch_copy_kprobe(struct kprobe *p)
65{
66 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 61 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
67 p->opcode = *p->addr; 62 p->opcode = *p->addr;
63 return 0;
68} 64}
69 65
70void __kprobes arch_arm_kprobe(struct kprobe *p) 66void __kprobes arch_arm_kprobe(struct kprobe *p)
@@ -81,10 +77,6 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
81 (unsigned long) p->addr + sizeof(kprobe_opcode_t)); 77 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
82} 78}
83 79
84void __kprobes arch_remove_kprobe(struct kprobe *p)
85{
86}
87
88static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) 80static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
89{ 81{
90 kcb->prev_kprobe.kp = kprobe_running(); 82 kcb->prev_kprobe.kp = kprobe_running();
@@ -196,6 +188,19 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
196 kcb->kprobe_status = KPROBE_REENTER; 188 kcb->kprobe_status = KPROBE_REENTER;
197 return 1; 189 return 1;
198 } else { 190 } else {
191 if (regs->eflags & VM_MASK) {
192 /* We are in virtual-8086 mode. Return 0 */
193 goto no_kprobe;
194 }
195 if (*addr != BREAKPOINT_INSTRUCTION) {
196 /* The breakpoint instruction was removed by
197 * another cpu right after we hit, no further
198 * handling of this interrupt is appropriate
199 */
200 regs->eip -= sizeof(kprobe_opcode_t);
201 ret = 1;
202 goto no_kprobe;
203 }
199 p = __get_cpu_var(current_kprobe); 204 p = __get_cpu_var(current_kprobe);
200 if (p->break_handler && p->break_handler(p, regs)) { 205 if (p->break_handler && p->break_handler(p, regs)) {
201 goto ss_probe; 206 goto ss_probe;
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index 165f13158c60..d3fdf0057d82 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -70,6 +70,7 @@
70 */ 70 */
71 71
72//#define DEBUG /* pr_debug */ 72//#define DEBUG /* pr_debug */
73#include <linux/capability.h>
73#include <linux/kernel.h> 74#include <linux/kernel.h>
74#include <linux/init.h> 75#include <linux/init.h>
75#include <linux/sched.h> 76#include <linux/sched.h>
@@ -165,7 +166,7 @@ static void collect_cpu_info (void *unused)
165 166
166 wrmsr(MSR_IA32_UCODE_REV, 0, 0); 167 wrmsr(MSR_IA32_UCODE_REV, 0, 0);
167 /* see notes above for revision 1.07. Apparent chip bug */ 168 /* see notes above for revision 1.07. Apparent chip bug */
168 serialize_cpu(); 169 sync_core();
169 /* get the current revision from MSR 0x8B */ 170 /* get the current revision from MSR 0x8B */
170 rdmsr(MSR_IA32_UCODE_REV, val[0], uci->rev); 171 rdmsr(MSR_IA32_UCODE_REV, val[0], uci->rev);
171 pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n", 172 pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n",
@@ -379,7 +380,7 @@ static void do_update_one (void * unused)
379 wrmsr(MSR_IA32_UCODE_REV, 0, 0); 380 wrmsr(MSR_IA32_UCODE_REV, 0, 0);
380 381
381 /* see notes above for revision 1.07. Apparent chip bug */ 382 /* see notes above for revision 1.07. Apparent chip bug */
382 serialize_cpu(); 383 sync_core();
383 384
384 /* get the current revision from MSR 0x8B */ 385 /* get the current revision from MSR 0x8B */
385 rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); 386 rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 45e7f0ac4b04..2185377fdde1 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -48,6 +48,7 @@
48#include <asm/processor.h> 48#include <asm/processor.h>
49#include <asm/i387.h> 49#include <asm/i387.h>
50#include <asm/desc.h> 50#include <asm/desc.h>
51#include <asm/vm86.h>
51#ifdef CONFIG_MATH_EMULATION 52#ifdef CONFIG_MATH_EMULATION
52#include <asm/math_emu.h> 53#include <asm/math_emu.h>
53#endif 54#endif
@@ -423,18 +424,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
423 struct task_struct *tsk; 424 struct task_struct *tsk;
424 int err; 425 int err;
425 426
426 childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1; 427 childregs = task_pt_regs(p);
427 /*
428 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
429 * This is necessary to guarantee that the entire "struct pt_regs"
430 * is accessable even if the CPU haven't stored the SS/ESP registers
431 * on the stack (interrupt gate does not save these registers
432 * when switching to the same priv ring).
433 * Therefore beware: accessing the xss/esp fields of the
434 * "struct pt_regs" is possible, but they may contain the
435 * completely wrong values.
436 */
437 childregs = (struct pt_regs *) ((unsigned long) childregs - 8);
438 *childregs = *regs; 428 *childregs = *regs;
439 childregs->eax = 0; 429 childregs->eax = 0;
440 childregs->esp = esp; 430 childregs->esp = esp;
@@ -539,12 +529,7 @@ EXPORT_SYMBOL(dump_thread);
539 */ 529 */
540int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) 530int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
541{ 531{
542 struct pt_regs ptregs; 532 struct pt_regs ptregs = *task_pt_regs(tsk);
543
544 ptregs = *(struct pt_regs *)
545 ((unsigned long)tsk->thread_info +
546 /* see comments in copy_thread() about -8 */
547 THREAD_SIZE - sizeof(ptregs) - 8);
548 ptregs.xcs &= 0xffff; 533 ptregs.xcs &= 0xffff;
549 ptregs.xds &= 0xffff; 534 ptregs.xds &= 0xffff;
550 ptregs.xes &= 0xffff; 535 ptregs.xes &= 0xffff;
@@ -600,8 +585,8 @@ static inline void disable_tsc(struct task_struct *prev_p,
600 * gcc should eliminate the ->thread_info dereference if 585 * gcc should eliminate the ->thread_info dereference if
601 * has_secure_computing returns 0 at compile time (SECCOMP=n). 586 * has_secure_computing returns 0 at compile time (SECCOMP=n).
602 */ 587 */
603 prev = prev_p->thread_info; 588 prev = task_thread_info(prev_p);
604 next = next_p->thread_info; 589 next = task_thread_info(next_p);
605 590
606 if (has_secure_computing(prev) || has_secure_computing(next)) { 591 if (has_secure_computing(prev) || has_secure_computing(next)) {
607 /* slow path here */ 592 /* slow path here */
@@ -786,7 +771,7 @@ unsigned long get_wchan(struct task_struct *p)
786 int count = 0; 771 int count = 0;
787 if (!p || p == current || p->state == TASK_RUNNING) 772 if (!p || p == current || p->state == TASK_RUNNING)
788 return 0; 773 return 0;
789 stack_page = (unsigned long)p->thread_info; 774 stack_page = (unsigned long)task_stack_page(p);
790 esp = p->thread.esp; 775 esp = p->thread.esp;
791 if (!stack_page || esp < stack_page || esp > top_esp+stack_page) 776 if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
792 return 0; 777 return 0;
diff --git a/arch/i386/kernel/quirks.c b/arch/i386/kernel/quirks.c
index aaf89cb2bc51..87ccdac84928 100644
--- a/arch/i386/kernel/quirks.c
+++ b/arch/i386/kernel/quirks.c
@@ -25,8 +25,7 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
25 25
26 /* enable access to config space*/ 26 /* enable access to config space*/
27 pci_read_config_byte(dev, 0xf4, &config); 27 pci_read_config_byte(dev, 0xf4, &config);
28 config |= 0x2; 28 pci_write_config_byte(dev, 0xf4, config|0x2);
29 pci_write_config_byte(dev, 0xf4, config);
30 29
31 /* read xTPR register */ 30 /* read xTPR register */
32 raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word); 31 raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
@@ -42,9 +41,9 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
42#endif 41#endif
43 } 42 }
44 43
45 config &= ~0x2; 44 /* put back the original value for config space*/
46 /* disable access to config space*/ 45 if (!(config & 0x2))
47 pci_write_config_byte(dev, 0xf4, config); 46 pci_write_config_byte(dev, 0xf4, config);
48} 47}
49DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance); 48DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance);
50DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance); 49DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance);
diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c
index 2fa5803a759d..d207242976d3 100644
--- a/arch/i386/kernel/reboot.c
+++ b/arch/i386/kernel/reboot.c
@@ -12,6 +12,7 @@
12#include <linux/efi.h> 12#include <linux/efi.h>
13#include <linux/dmi.h> 13#include <linux/dmi.h>
14#include <linux/ctype.h> 14#include <linux/ctype.h>
15#include <linux/pm.h>
15#include <asm/uaccess.h> 16#include <asm/uaccess.h>
16#include <asm/apic.h> 17#include <asm/apic.h>
17#include <asm/desc.h> 18#include <asm/desc.h>
@@ -355,10 +356,10 @@ void machine_halt(void)
355 356
356void machine_power_off(void) 357void machine_power_off(void)
357{ 358{
358 machine_shutdown(); 359 if (pm_power_off) {
359 360 machine_shutdown();
360 if (pm_power_off)
361 pm_power_off(); 361 pm_power_off();
362 }
362} 363}
363 364
364 365
diff --git a/arch/i386/kernel/scx200.c b/arch/i386/kernel/scx200.c
index 9c968ae67c43..321f5fd26e75 100644
--- a/arch/i386/kernel/scx200.c
+++ b/arch/i386/kernel/scx200.c
@@ -143,7 +143,7 @@ static int __init scx200_init(void)
143{ 143{
144 printk(KERN_INFO NAME ": NatSemi SCx200 Driver\n"); 144 printk(KERN_INFO NAME ": NatSemi SCx200 Driver\n");
145 145
146 return pci_module_init(&scx200_pci_driver); 146 return pci_register_driver(&scx200_pci_driver);
147} 147}
148 148
149static void __exit scx200_cleanup(void) 149static void __exit scx200_cleanup(void)
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 27c956db0461..51e513b4f72d 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -45,6 +45,7 @@
45#include <linux/nodemask.h> 45#include <linux/nodemask.h>
46#include <linux/kexec.h> 46#include <linux/kexec.h>
47#include <linux/crash_dump.h> 47#include <linux/crash_dump.h>
48#include <linux/dmi.h>
48 49
49#include <video/edid.h> 50#include <video/edid.h>
50 51
@@ -146,7 +147,6 @@ EXPORT_SYMBOL(ist_info);
146struct e820map e820; 147struct e820map e820;
147 148
148extern void early_cpu_init(void); 149extern void early_cpu_init(void);
149extern void dmi_scan_machine(void);
150extern void generic_apic_probe(char *); 150extern void generic_apic_probe(char *);
151extern int root_mountflags; 151extern int root_mountflags;
152 152
@@ -898,7 +898,7 @@ static void __init parse_cmdline_early (char ** cmdline_p)
898 } 898 }
899 } 899 }
900#endif 900#endif
901#ifdef CONFIG_CRASH_DUMP 901#ifdef CONFIG_PROC_VMCORE
902 /* elfcorehdr= specifies the location of elf core header 902 /* elfcorehdr= specifies the location of elf core header
903 * stored by the crashed kernel. 903 * stored by the crashed kernel.
904 */ 904 */
@@ -1584,7 +1584,7 @@ void __init setup_arch(char **cmdline_p)
1584 if (s) { 1584 if (s) {
1585 extern void setup_early_printk(char *); 1585 extern void setup_early_printk(char *);
1586 1586
1587 setup_early_printk(s); 1587 setup_early_printk(strchr(s, '=') + 1);
1588 printk("early console enabled\n"); 1588 printk("early console enabled\n");
1589 } 1589 }
1590 } 1590 }
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index adcd069db91e..963616d364ec 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -37,51 +37,17 @@
37asmlinkage int 37asmlinkage int
38sys_sigsuspend(int history0, int history1, old_sigset_t mask) 38sys_sigsuspend(int history0, int history1, old_sigset_t mask)
39{ 39{
40 struct pt_regs * regs = (struct pt_regs *) &history0;
41 sigset_t saveset;
42
43 mask &= _BLOCKABLE; 40 mask &= _BLOCKABLE;
44 spin_lock_irq(&current->sighand->siglock); 41 spin_lock_irq(&current->sighand->siglock);
45 saveset = current->blocked; 42 current->saved_sigmask = current->blocked;
46 siginitset(&current->blocked, mask); 43 siginitset(&current->blocked, mask);
47 recalc_sigpending(); 44 recalc_sigpending();
48 spin_unlock_irq(&current->sighand->siglock); 45 spin_unlock_irq(&current->sighand->siglock);
49 46
50 regs->eax = -EINTR; 47 current->state = TASK_INTERRUPTIBLE;
51 while (1) { 48 schedule();
52 current->state = TASK_INTERRUPTIBLE; 49 set_thread_flag(TIF_RESTORE_SIGMASK);
53 schedule(); 50 return -ERESTARTNOHAND;
54 if (do_signal(regs, &saveset))
55 return -EINTR;
56 }
57}
58
59asmlinkage int
60sys_rt_sigsuspend(struct pt_regs regs)
61{
62 sigset_t saveset, newset;
63
64 /* XXX: Don't preclude handling different sized sigset_t's. */
65 if (regs.ecx != sizeof(sigset_t))
66 return -EINVAL;
67
68 if (copy_from_user(&newset, (sigset_t __user *)regs.ebx, sizeof(newset)))
69 return -EFAULT;
70 sigdelsetmask(&newset, ~_BLOCKABLE);
71
72 spin_lock_irq(&current->sighand->siglock);
73 saveset = current->blocked;
74 current->blocked = newset;
75 recalc_sigpending();
76 spin_unlock_irq(&current->sighand->siglock);
77
78 regs.eax = -EINTR;
79 while (1) {
80 current->state = TASK_INTERRUPTIBLE;
81 schedule();
82 if (do_signal(&regs, &saveset))
83 return -EINTR;
84 }
85} 51}
86 52
87asmlinkage int 53asmlinkage int
@@ -433,11 +399,11 @@ static int setup_frame(int sig, struct k_sigaction *ka,
433 current->comm, current->pid, frame, regs->eip, frame->pretcode); 399 current->comm, current->pid, frame, regs->eip, frame->pretcode);
434#endif 400#endif
435 401
436 return 1; 402 return 0;
437 403
438give_sigsegv: 404give_sigsegv:
439 force_sigsegv(sig, current); 405 force_sigsegv(sig, current);
440 return 0; 406 return -EFAULT;
441} 407}
442 408
443static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 409static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
@@ -527,11 +493,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
527 current->comm, current->pid, frame, regs->eip, frame->pretcode); 493 current->comm, current->pid, frame, regs->eip, frame->pretcode);
528#endif 494#endif
529 495
530 return 1; 496 return 0;
531 497
532give_sigsegv: 498give_sigsegv:
533 force_sigsegv(sig, current); 499 force_sigsegv(sig, current);
534 return 0; 500 return -EFAULT;
535} 501}
536 502
537/* 503/*
@@ -581,7 +547,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
581 else 547 else
582 ret = setup_frame(sig, ka, oldset, regs); 548 ret = setup_frame(sig, ka, oldset, regs);
583 549
584 if (ret) { 550 if (ret == 0) {
585 spin_lock_irq(&current->sighand->siglock); 551 spin_lock_irq(&current->sighand->siglock);
586 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 552 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
587 if (!(ka->sa.sa_flags & SA_NODEFER)) 553 if (!(ka->sa.sa_flags & SA_NODEFER))
@@ -598,11 +564,12 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
598 * want to handle. Thus you cannot kill init even with a SIGKILL even by 564 * want to handle. Thus you cannot kill init even with a SIGKILL even by
599 * mistake. 565 * mistake.
600 */ 566 */
601int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset) 567static void fastcall do_signal(struct pt_regs *regs)
602{ 568{
603 siginfo_t info; 569 siginfo_t info;
604 int signr; 570 int signr;
605 struct k_sigaction ka; 571 struct k_sigaction ka;
572 sigset_t *oldset;
606 573
607 /* 574 /*
608 * We want the common case to go fast, which 575 * We want the common case to go fast, which
@@ -613,12 +580,14 @@ int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
613 * CS suffices. 580 * CS suffices.
614 */ 581 */
615 if (!user_mode(regs)) 582 if (!user_mode(regs))
616 return 1; 583 return;
617 584
618 if (try_to_freeze()) 585 if (try_to_freeze())
619 goto no_signal; 586 goto no_signal;
620 587
621 if (!oldset) 588 if (test_thread_flag(TIF_RESTORE_SIGMASK))
589 oldset = &current->saved_sigmask;
590 else
622 oldset = &current->blocked; 591 oldset = &current->blocked;
623 592
624 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 593 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -628,38 +597,55 @@ int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
628 * have been cleared if the watchpoint triggered 597 * have been cleared if the watchpoint triggered
629 * inside the kernel. 598 * inside the kernel.
630 */ 599 */
631 if (unlikely(current->thread.debugreg[7])) { 600 if (unlikely(current->thread.debugreg[7]))
632 set_debugreg(current->thread.debugreg[7], 7); 601 set_debugreg(current->thread.debugreg[7], 7);
633 }
634 602
635 /* Whee! Actually deliver the signal. */ 603 /* Whee! Actually deliver the signal. */
636 return handle_signal(signr, &info, &ka, oldset, regs); 604 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
605 /* a signal was successfully delivered; the saved
606 * sigmask will have been stored in the signal frame,
607 * and will be restored by sigreturn, so we can simply
608 * clear the TIF_RESTORE_SIGMASK flag */
609 if (test_thread_flag(TIF_RESTORE_SIGMASK))
610 clear_thread_flag(TIF_RESTORE_SIGMASK);
611 }
612
613 return;
637 } 614 }
638 615
639 no_signal: 616no_signal:
640 /* Did we come from a system call? */ 617 /* Did we come from a system call? */
641 if (regs->orig_eax >= 0) { 618 if (regs->orig_eax >= 0) {
642 /* Restart the system call - no handlers present */ 619 /* Restart the system call - no handlers present */
643 if (regs->eax == -ERESTARTNOHAND || 620 switch (regs->eax) {
644 regs->eax == -ERESTARTSYS || 621 case -ERESTARTNOHAND:
645 regs->eax == -ERESTARTNOINTR) { 622 case -ERESTARTSYS:
623 case -ERESTARTNOINTR:
646 regs->eax = regs->orig_eax; 624 regs->eax = regs->orig_eax;
647 regs->eip -= 2; 625 regs->eip -= 2;
648 } 626 break;
649 if (regs->eax == -ERESTART_RESTARTBLOCK){ 627
628 case -ERESTART_RESTARTBLOCK:
650 regs->eax = __NR_restart_syscall; 629 regs->eax = __NR_restart_syscall;
651 regs->eip -= 2; 630 regs->eip -= 2;
631 break;
652 } 632 }
653 } 633 }
654 return 0; 634
635 /* if there's no signal to deliver, we just put the saved sigmask
636 * back */
637 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
638 clear_thread_flag(TIF_RESTORE_SIGMASK);
639 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
640 }
655} 641}
656 642
657/* 643/*
658 * notification of userspace execution resumption 644 * notification of userspace execution resumption
659 * - triggered by current->work.notify_resume 645 * - triggered by the TIF_WORK_MASK flags
660 */ 646 */
661__attribute__((regparm(3))) 647__attribute__((regparm(3)))
662void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, 648void do_notify_resume(struct pt_regs *regs, void *_unused,
663 __u32 thread_info_flags) 649 __u32 thread_info_flags)
664{ 650{
665 /* Pending single-step? */ 651 /* Pending single-step? */
@@ -667,9 +653,10 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
667 regs->eflags |= TF_MASK; 653 regs->eflags |= TF_MASK;
668 clear_thread_flag(TIF_SINGLESTEP); 654 clear_thread_flag(TIF_SINGLESTEP);
669 } 655 }
656
670 /* deal with pending signal delivery */ 657 /* deal with pending signal delivery */
671 if (thread_info_flags & _TIF_SIGPENDING) 658 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
672 do_signal(regs,oldset); 659 do_signal(regs);
673 660
674 clear_thread_flag(TIF_IRET); 661 clear_thread_flag(TIF_IRET);
675} 662}
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index b3c2e2c26743..255adb498268 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -875,8 +875,7 @@ static inline struct task_struct * alloc_idle_task(int cpu)
875 /* initialize thread_struct. we really want to avoid destroy 875 /* initialize thread_struct. we really want to avoid destroy
876 * idle tread 876 * idle tread
877 */ 877 */
878 idle->thread.esp = (unsigned long)(((struct pt_regs *) 878 idle->thread.esp = (unsigned long)task_pt_regs(idle);
879 (THREAD_SIZE + (unsigned long) idle->thread_info)) - 1);
880 init_idle(idle, cpu); 879 init_idle(idle, cpu);
881 return idle; 880 return idle;
882 } 881 }
@@ -1096,6 +1095,7 @@ static void smp_tune_scheduling (void)
1096 cachesize = 16; /* Pentiums, 2x8kB cache */ 1095 cachesize = 16; /* Pentiums, 2x8kB cache */
1097 bandwidth = 100; 1096 bandwidth = 100;
1098 } 1097 }
1098 max_cache_size = cachesize * 1024;
1099 } 1099 }
1100} 1100}
1101 1101
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index f7ba4acc20ec..1b665928336b 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -293,3 +293,19 @@ ENTRY(sys_call_table)
293 .long sys_inotify_init 293 .long sys_inotify_init
294 .long sys_inotify_add_watch 294 .long sys_inotify_add_watch
295 .long sys_inotify_rm_watch 295 .long sys_inotify_rm_watch
296 .long sys_migrate_pages
297 .long sys_openat /* 295 */
298 .long sys_mkdirat
299 .long sys_mknodat
300 .long sys_fchownat
301 .long sys_futimesat
302 .long sys_newfstatat /* 300 */
303 .long sys_unlinkat
304 .long sys_renameat
305 .long sys_linkat
306 .long sys_symlinkat
307 .long sys_readlinkat /* 305 */
308 .long sys_fchmodat
309 .long sys_faccessat
310 .long sys_pselect6
311 .long sys_ppoll
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 41c5b2dc6200..a14d594bfbeb 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -302,6 +302,12 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
302 do_timer_interrupt(irq, regs); 302 do_timer_interrupt(irq, regs);
303 303
304 write_sequnlock(&xtime_lock); 304 write_sequnlock(&xtime_lock);
305
306#ifdef CONFIG_X86_LOCAL_APIC
307 if (using_apic_timer)
308 smp_send_timer_broadcast_ipi(regs);
309#endif
310
305 return IRQ_HANDLED; 311 return IRQ_HANDLED;
306} 312}
307 313
diff --git a/arch/i386/kernel/time_hpet.c b/arch/i386/kernel/time_hpet.c
index 9caeaa315cd7..a529f0cdce17 100644
--- a/arch/i386/kernel/time_hpet.c
+++ b/arch/i386/kernel/time_hpet.c
@@ -259,8 +259,6 @@ __setup("hpet=", hpet_setup);
259#include <linux/mc146818rtc.h> 259#include <linux/mc146818rtc.h>
260#include <linux/rtc.h> 260#include <linux/rtc.h>
261 261
262extern irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
263
264#define DEFAULT_RTC_INT_FREQ 64 262#define DEFAULT_RTC_INT_FREQ 64
265#define RTC_NUM_INTS 1 263#define RTC_NUM_INTS 1
266 264
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 53ad954e3ba4..0aaebf3e1cfa 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -112,33 +112,38 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
112 p < (void *)tinfo + THREAD_SIZE - 3; 112 p < (void *)tinfo + THREAD_SIZE - 3;
113} 113}
114 114
115static void print_addr_and_symbol(unsigned long addr, char *log_lvl)
116{
117 printk(log_lvl);
118 printk(" [<%08lx>] ", addr);
119 print_symbol("%s", addr);
120 printk("\n");
121}
122
115static inline unsigned long print_context_stack(struct thread_info *tinfo, 123static inline unsigned long print_context_stack(struct thread_info *tinfo,
116 unsigned long *stack, unsigned long ebp) 124 unsigned long *stack, unsigned long ebp,
125 char *log_lvl)
117{ 126{
118 unsigned long addr; 127 unsigned long addr;
119 128
120#ifdef CONFIG_FRAME_POINTER 129#ifdef CONFIG_FRAME_POINTER
121 while (valid_stack_ptr(tinfo, (void *)ebp)) { 130 while (valid_stack_ptr(tinfo, (void *)ebp)) {
122 addr = *(unsigned long *)(ebp + 4); 131 addr = *(unsigned long *)(ebp + 4);
123 printk(" [<%08lx>] ", addr); 132 print_addr_and_symbol(addr, log_lvl);
124 print_symbol("%s", addr);
125 printk("\n");
126 ebp = *(unsigned long *)ebp; 133 ebp = *(unsigned long *)ebp;
127 } 134 }
128#else 135#else
129 while (valid_stack_ptr(tinfo, stack)) { 136 while (valid_stack_ptr(tinfo, stack)) {
130 addr = *stack++; 137 addr = *stack++;
131 if (__kernel_text_address(addr)) { 138 if (__kernel_text_address(addr))
132 printk(" [<%08lx>]", addr); 139 print_addr_and_symbol(addr, log_lvl);
133 print_symbol(" %s", addr);
134 printk("\n");
135 }
136 } 140 }
137#endif 141#endif
138 return ebp; 142 return ebp;
139} 143}
140 144
141void show_trace(struct task_struct *task, unsigned long * stack) 145static void show_trace_log_lvl(struct task_struct *task,
146 unsigned long *stack, char *log_lvl)
142{ 147{
143 unsigned long ebp; 148 unsigned long ebp;
144 149
@@ -157,15 +162,21 @@ void show_trace(struct task_struct *task, unsigned long * stack)
157 struct thread_info *context; 162 struct thread_info *context;
158 context = (struct thread_info *) 163 context = (struct thread_info *)
159 ((unsigned long)stack & (~(THREAD_SIZE - 1))); 164 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
160 ebp = print_context_stack(context, stack, ebp); 165 ebp = print_context_stack(context, stack, ebp, log_lvl);
161 stack = (unsigned long*)context->previous_esp; 166 stack = (unsigned long*)context->previous_esp;
162 if (!stack) 167 if (!stack)
163 break; 168 break;
164 printk(" =======================\n"); 169 printk(KERN_EMERG " =======================\n");
165 } 170 }
166} 171}
167 172
168void show_stack(struct task_struct *task, unsigned long *esp) 173void show_trace(struct task_struct *task, unsigned long * stack)
174{
175 show_trace_log_lvl(task, stack, "");
176}
177
178static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp,
179 char *log_lvl)
169{ 180{
170 unsigned long *stack; 181 unsigned long *stack;
171 int i; 182 int i;
@@ -178,15 +189,26 @@ void show_stack(struct task_struct *task, unsigned long *esp)
178 } 189 }
179 190
180 stack = esp; 191 stack = esp;
192 printk(log_lvl);
181 for(i = 0; i < kstack_depth_to_print; i++) { 193 for(i = 0; i < kstack_depth_to_print; i++) {
182 if (kstack_end(stack)) 194 if (kstack_end(stack))
183 break; 195 break;
184 if (i && ((i % 8) == 0)) 196 if (i && ((i % 8) == 0)) {
185 printk("\n "); 197 printk("\n");
198 printk(log_lvl);
199 printk(" ");
200 }
186 printk("%08lx ", *stack++); 201 printk("%08lx ", *stack++);
187 } 202 }
188 printk("\nCall Trace:\n"); 203 printk("\n");
189 show_trace(task, esp); 204 printk(log_lvl);
205 printk("Call Trace:\n");
206 show_trace_log_lvl(task, esp, log_lvl);
207}
208
209void show_stack(struct task_struct *task, unsigned long *esp)
210{
211 show_stack_log_lvl(task, esp, "");
190} 212}
191 213
192/* 214/*
@@ -216,18 +238,18 @@ void show_registers(struct pt_regs *regs)
216 ss = regs->xss & 0xffff; 238 ss = regs->xss & 0xffff;
217 } 239 }
218 print_modules(); 240 print_modules();
219 printk("CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\nEFLAGS: %08lx" 241 printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n"
220 " (%s) \n", 242 "EFLAGS: %08lx (%s) \n",
221 smp_processor_id(), 0xffff & regs->xcs, regs->eip, 243 smp_processor_id(), 0xffff & regs->xcs, regs->eip,
222 print_tainted(), regs->eflags, system_utsname.release); 244 print_tainted(), regs->eflags, system_utsname.release);
223 print_symbol("EIP is at %s\n", regs->eip); 245 print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
224 printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", 246 printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
225 regs->eax, regs->ebx, regs->ecx, regs->edx); 247 regs->eax, regs->ebx, regs->ecx, regs->edx);
226 printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n", 248 printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
227 regs->esi, regs->edi, regs->ebp, esp); 249 regs->esi, regs->edi, regs->ebp, esp);
228 printk("ds: %04x es: %04x ss: %04x\n", 250 printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
229 regs->xds & 0xffff, regs->xes & 0xffff, ss); 251 regs->xds & 0xffff, regs->xes & 0xffff, ss);
230 printk("Process %s (pid: %d, threadinfo=%p task=%p)", 252 printk(KERN_EMERG "Process %s (pid: %d, threadinfo=%p task=%p)",
231 current->comm, current->pid, current_thread_info(), current); 253 current->comm, current->pid, current_thread_info(), current);
232 /* 254 /*
233 * When in-kernel, we also print out the stack and code at the 255 * When in-kernel, we also print out the stack and code at the
@@ -236,10 +258,10 @@ void show_registers(struct pt_regs *regs)
236 if (in_kernel) { 258 if (in_kernel) {
237 u8 __user *eip; 259 u8 __user *eip;
238 260
239 printk("\nStack: "); 261 printk("\n" KERN_EMERG "Stack: ");
240 show_stack(NULL, (unsigned long*)esp); 262 show_stack_log_lvl(NULL, (unsigned long *)esp, KERN_EMERG);
241 263
242 printk("Code: "); 264 printk(KERN_EMERG "Code: ");
243 265
244 eip = (u8 __user *)regs->eip - 43; 266 eip = (u8 __user *)regs->eip - 43;
245 for (i = 0; i < 64; i++, eip++) { 267 for (i = 0; i < 64; i++, eip++) {
@@ -280,15 +302,15 @@ static void handle_BUG(struct pt_regs *regs)
280 (unsigned long)file < PAGE_OFFSET || __get_user(c, file)) 302 (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
281 file = "<bad filename>"; 303 file = "<bad filename>";
282 304
283 printk("------------[ cut here ]------------\n"); 305 printk(KERN_EMERG "------------[ cut here ]------------\n");
284 printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line); 306 printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
285 307
286no_bug: 308no_bug:
287 return; 309 return;
288 310
289 /* Here we know it was a BUG but file-n-line is unavailable */ 311 /* Here we know it was a BUG but file-n-line is unavailable */
290bug: 312bug:
291 printk("Kernel BUG\n"); 313 printk(KERN_EMERG "Kernel BUG\n");
292} 314}
293 315
294/* This is gone through when something in the kernel 316/* This is gone through when something in the kernel
@@ -321,16 +343,20 @@ void die(const char * str, struct pt_regs * regs, long err)
321 if (++die.lock_owner_depth < 3) { 343 if (++die.lock_owner_depth < 3) {
322 int nl = 0; 344 int nl = 0;
323 handle_BUG(regs); 345 handle_BUG(regs);
324 printk(KERN_ALERT "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); 346 printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
325#ifdef CONFIG_PREEMPT 347#ifdef CONFIG_PREEMPT
326 printk("PREEMPT "); 348 printk(KERN_EMERG "PREEMPT ");
327 nl = 1; 349 nl = 1;
328#endif 350#endif
329#ifdef CONFIG_SMP 351#ifdef CONFIG_SMP
352 if (!nl)
353 printk(KERN_EMERG);
330 printk("SMP "); 354 printk("SMP ");
331 nl = 1; 355 nl = 1;
332#endif 356#endif
333#ifdef CONFIG_DEBUG_PAGEALLOC 357#ifdef CONFIG_DEBUG_PAGEALLOC
358 if (!nl)
359 printk(KERN_EMERG);
334 printk("DEBUG_PAGEALLOC"); 360 printk("DEBUG_PAGEALLOC");
335 nl = 1; 361 nl = 1;
336#endif 362#endif
@@ -339,7 +365,7 @@ void die(const char * str, struct pt_regs * regs, long err)
339 notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV); 365 notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
340 show_registers(regs); 366 show_registers(regs);
341 } else 367 } else
342 printk(KERN_ERR "Recursive die() failure, output suppressed\n"); 368 printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
343 369
344 bust_spinlocks(0); 370 bust_spinlocks(0);
345 die.lock_owner = -1; 371 die.lock_owner = -1;
@@ -527,8 +553,10 @@ gp_in_kernel:
527 553
528static void mem_parity_error(unsigned char reason, struct pt_regs * regs) 554static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
529{ 555{
530 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n"); 556 printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying "
531 printk("You probably have a hardware problem with your RAM chips\n"); 557 "to continue\n");
558 printk(KERN_EMERG "You probably have a hardware problem with your RAM "
559 "chips\n");
532 560
533 /* Clear and disable the memory parity error line. */ 561 /* Clear and disable the memory parity error line. */
534 clear_mem_error(reason); 562 clear_mem_error(reason);
@@ -538,7 +566,7 @@ static void io_check_error(unsigned char reason, struct pt_regs * regs)
538{ 566{
539 unsigned long i; 567 unsigned long i;
540 568
541 printk("NMI: IOCK error (debug interrupt?)\n"); 569 printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
542 show_registers(regs); 570 show_registers(regs);
543 571
544 /* Re-enable the IOCK line, wait for a few seconds */ 572 /* Re-enable the IOCK line, wait for a few seconds */
@@ -580,11 +608,11 @@ void die_nmi (struct pt_regs *regs, const char *msg)
580 * to get a message out. 608 * to get a message out.
581 */ 609 */
582 bust_spinlocks(1); 610 bust_spinlocks(1);
583 printk(msg); 611 printk(KERN_EMERG "%s", msg);
584 printk(" on CPU%d, eip %08lx, registers:\n", 612 printk(" on CPU%d, eip %08lx, registers:\n",
585 smp_processor_id(), regs->eip); 613 smp_processor_id(), regs->eip);
586 show_registers(regs); 614 show_registers(regs);
587 printk("console shuts up ...\n"); 615 printk(KERN_EMERG "console shuts up ...\n");
588 console_silent(); 616 console_silent();
589 spin_unlock(&nmi_print_lock); 617 spin_unlock(&nmi_print_lock);
590 bust_spinlocks(0); 618 bust_spinlocks(0);
@@ -990,8 +1018,8 @@ asmlinkage void math_state_restore(struct pt_regs regs)
990 1018
991asmlinkage void math_emulate(long arg) 1019asmlinkage void math_emulate(long arg)
992{ 1020{
993 printk("math-emulation not enabled and no coprocessor found.\n"); 1021 printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
994 printk("killing %s.\n",current->comm); 1022 printk(KERN_EMERG "killing %s.\n",current->comm);
995 force_sig(SIGFPE,current); 1023 force_sig(SIGFPE,current);
996 schedule(); 1024 schedule();
997} 1025}
diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c
index fc1993564f98..f51c894a7da5 100644
--- a/arch/i386/kernel/vm86.c
+++ b/arch/i386/kernel/vm86.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 1994 Linus Torvalds 4 * Copyright (C) 1994 Linus Torvalds
5 * 5 *
6 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 6 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
7 * stack - Manfred Spraul <manfreds@colorfullife.com> 7 * stack - Manfred Spraul <manfred@colorfullife.com>
8 * 8 *
9 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle 9 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle
10 * them correctly. Now the emulation will be in a 10 * them correctly. Now the emulation will be in a
@@ -30,6 +30,7 @@
30 * 30 *
31 */ 31 */
32 32
33#include <linux/capability.h>
33#include <linux/config.h> 34#include <linux/config.h>
34#include <linux/errno.h> 35#include <linux/errno.h>
35#include <linux/interrupt.h> 36#include <linux/interrupt.h>
@@ -310,7 +311,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
310 "movl %1,%%ebp\n\t" 311 "movl %1,%%ebp\n\t"
311 "jmp resume_userspace" 312 "jmp resume_userspace"
312 : /* no outputs */ 313 : /* no outputs */
313 :"r" (&info->regs), "r" (tsk->thread_info) : "ax"); 314 :"r" (&info->regs), "r" (task_thread_info(tsk)) : "ax");
314 /* we never return here */ 315 /* we never return here */
315} 316}
316 317
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 7df494b51a5b..2700f01994ba 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -268,7 +268,7 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
268 pkmap_page_table = pte; 268 pkmap_page_table = pte;
269} 269}
270 270
271static void __devinit free_new_highpage(struct page *page) 271static void __meminit free_new_highpage(struct page *page)
272{ 272{
273 set_page_count(page, 1); 273 set_page_count(page, 1);
274 __free_page(page); 274 __free_page(page);
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index c30a16df6440..d0cadb33b54c 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -222,6 +222,10 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
222{ 222{
223 if (PageHighMem(page)) 223 if (PageHighMem(page))
224 return; 224 return;
225 if (!enable)
226 mutex_debug_check_no_locks_freed(page_address(page),
227 numpages * PAGE_SIZE);
228
225 /* the return value is ignored - the calls cannot fail, 229 /* the return value is ignored - the calls cannot fail,
226 * large pages are disabled at boot time. 230 * large pages are disabled at boot time.
227 */ 231 */
diff --git a/arch/i386/pci/acpi.c b/arch/i386/pci/acpi.c
index 4c4522b43be5..b33aea845f58 100644
--- a/arch/i386/pci/acpi.c
+++ b/arch/i386/pci/acpi.c
@@ -53,7 +53,7 @@ static int __init pci_acpi_init(void)
53 * don't use pci_enable_device(). 53 * don't use pci_enable_device().
54 */ 54 */
55 printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n"); 55 printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
56 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) 56 for_each_pci_dev(dev)
57 acpi_pci_irq_enable(dev); 57 acpi_pci_irq_enable(dev);
58 } else 58 } else
59 printk(KERN_INFO "PCI: If a device doesn't work, try \"pci=routeirq\". If it helps, post a report\n"); 59 printk(KERN_INFO "PCI: If a device doesn't work, try \"pci=routeirq\". If it helps, post a report\n");
diff --git a/arch/i386/pci/fixup.c b/arch/i386/pci/fixup.c
index eeb1b1f2d548..83c3645ccc43 100644
--- a/arch/i386/pci/fixup.c
+++ b/arch/i386/pci/fixup.c
@@ -413,6 +413,13 @@ static struct dmi_system_id __devinitdata toshiba_ohci1394_dmi_table[] = {
413 DMI_MATCH(DMI_PRODUCT_VERSION, "PSM4"), 413 DMI_MATCH(DMI_PRODUCT_VERSION, "PSM4"),
414 }, 414 },
415 }, 415 },
416 {
417 .ident = "Toshiba A40 based laptop",
418 .matches = {
419 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
420 DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"),
421 },
422 },
416 { } 423 { }
417}; 424};
418 425
@@ -442,3 +449,19 @@ static void __devinit pci_post_fixup_toshiba_ohci1394(struct pci_dev *dev)
442} 449}
443DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_TI, 0x8032, 450DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_TI, 0x8032,
444 pci_post_fixup_toshiba_ohci1394); 451 pci_post_fixup_toshiba_ohci1394);
452
453
454/*
455 * Prevent the BIOS trapping accesses to the Cyrix CS5530A video device
456 * configuration space.
457 */
458static void __devinit pci_early_fixup_cyrix_5530(struct pci_dev *dev)
459{
460 u8 r;
461 /* clear 'F4 Video Configuration Trap' bit */
462 pci_read_config_byte(dev, 0x42, &r);
463 r &= 0xfd;
464 pci_write_config_byte(dev, 0x42, r);
465}
466DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY,
467 pci_early_fixup_cyrix_5530);
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index ee8e01697d96..e715aa930036 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -78,7 +78,7 @@ static inline struct irq_routing_table * pirq_check_routing_table(u8 *addr)
78 for (i=0; i < rt->size; i++) 78 for (i=0; i < rt->size; i++)
79 sum += addr[i]; 79 sum += addr[i];
80 if (!sum) { 80 if (!sum) {
81 DBG("PCI: Interrupt Routing Table found at 0x%p\n", rt); 81 DBG(KERN_DEBUG "PCI: Interrupt Routing Table found at 0x%p\n", rt);
82 return rt; 82 return rt;
83 } 83 }
84 return NULL; 84 return NULL;
@@ -128,7 +128,7 @@ static void __init pirq_peer_trick(void)
128#ifdef DEBUG 128#ifdef DEBUG
129 { 129 {
130 int j; 130 int j;
131 DBG("%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot); 131 DBG(KERN_DEBUG "%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot);
132 for(j=0; j<4; j++) 132 for(j=0; j<4; j++)
133 DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap); 133 DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap);
134 DBG("\n"); 134 DBG("\n");
@@ -160,10 +160,10 @@ void eisa_set_level_irq(unsigned int irq)
160 return; 160 return;
161 161
162 eisa_irq_mask |= (1 << irq); 162 eisa_irq_mask |= (1 << irq);
163 printk("PCI: setting IRQ %u as level-triggered\n", irq); 163 printk(KERN_DEBUG "PCI: setting IRQ %u as level-triggered\n", irq);
164 val = inb(port); 164 val = inb(port);
165 if (!(val & mask)) { 165 if (!(val & mask)) {
166 DBG(" -> edge"); 166 DBG(KERN_DEBUG " -> edge");
167 outb(val | mask, port); 167 outb(val | mask, port);
168 } 168 }
169} 169}
@@ -677,11 +677,11 @@ static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router,
677 { 677 {
678 case PCI_DEVICE_ID_AL_M1533: 678 case PCI_DEVICE_ID_AL_M1533:
679 case PCI_DEVICE_ID_AL_M1563: 679 case PCI_DEVICE_ID_AL_M1563:
680 printk("PCI: Using ALI IRQ Router\n"); 680 printk(KERN_DEBUG "PCI: Using ALI IRQ Router\n");
681 r->name = "ALI"; 681 r->name = "ALI";
682 r->get = pirq_ali_get; 682 r->get = pirq_ali_get;
683 r->set = pirq_ali_set; 683 r->set = pirq_ali_set;
684 return 1; 684 return 1;
685 } 685 }
686 return 0; 686 return 0;
687} 687}
@@ -749,12 +749,13 @@ static void __init pirq_find_router(struct irq_router *r)
749 r->get = NULL; 749 r->get = NULL;
750 r->set = NULL; 750 r->set = NULL;
751 751
752 DBG("PCI: Attempting to find IRQ router for %04x:%04x\n", 752 DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for %04x:%04x\n",
753 rt->rtr_vendor, rt->rtr_device); 753 rt->rtr_vendor, rt->rtr_device);
754 754
755 pirq_router_dev = pci_find_slot(rt->rtr_bus, rt->rtr_devfn); 755 pirq_router_dev = pci_find_slot(rt->rtr_bus, rt->rtr_devfn);
756 if (!pirq_router_dev) { 756 if (!pirq_router_dev) {
757 DBG("PCI: Interrupt router not found at %02x:%02x\n", rt->rtr_bus, rt->rtr_devfn); 757 DBG(KERN_DEBUG "PCI: Interrupt router not found at "
758 "%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
758 return; 759 return;
759 } 760 }
760 761
@@ -799,7 +800,7 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
799 /* Find IRQ pin */ 800 /* Find IRQ pin */
800 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); 801 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
801 if (!pin) { 802 if (!pin) {
802 DBG(" -> no interrupt pin\n"); 803 DBG(KERN_DEBUG " -> no interrupt pin\n");
803 return 0; 804 return 0;
804 } 805 }
805 pin = pin - 1; 806 pin = pin - 1;
@@ -809,16 +810,16 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
809 if (!pirq_table) 810 if (!pirq_table)
810 return 0; 811 return 0;
811 812
812 DBG("IRQ for %s[%c]", pci_name(dev), 'A' + pin); 813 DBG(KERN_DEBUG "IRQ for %s[%c]", pci_name(dev), 'A' + pin);
813 info = pirq_get_info(dev); 814 info = pirq_get_info(dev);
814 if (!info) { 815 if (!info) {
815 DBG(" -> not found in routing table\n"); 816 DBG(" -> not found in routing table\n" KERN_DEBUG);
816 return 0; 817 return 0;
817 } 818 }
818 pirq = info->irq[pin].link; 819 pirq = info->irq[pin].link;
819 mask = info->irq[pin].bitmap; 820 mask = info->irq[pin].bitmap;
820 if (!pirq) { 821 if (!pirq) {
821 DBG(" -> not routed\n"); 822 DBG(" -> not routed\n" KERN_DEBUG);
822 return 0; 823 return 0;
823 } 824 }
824 DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, pirq_table->exclusive_irqs); 825 DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, pirq_table->exclusive_irqs);
@@ -848,7 +849,10 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
848 newirq = dev->irq; 849 newirq = dev->irq;
849 if (newirq && !((1 << newirq) & mask)) { 850 if (newirq && !((1 << newirq) & mask)) {
850 if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0; 851 if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0;
851 else printk(KERN_WARNING "PCI: IRQ %i for device %s doesn't match PIRQ mask - try pci=usepirqmask\n", newirq, pci_name(dev)); 852 else printk("\n" KERN_WARNING
853 "PCI: IRQ %i for device %s doesn't match PIRQ mask "
854 "- try pci=usepirqmask\n" KERN_DEBUG, newirq,
855 pci_name(dev));
852 } 856 }
853 if (!newirq && assign) { 857 if (!newirq && assign) {
854 for (i = 0; i < 16; i++) { 858 for (i = 0; i < 16; i++) {
@@ -923,14 +927,14 @@ static void __init pcibios_fixup_irqs(void)
923 struct pci_dev *dev = NULL; 927 struct pci_dev *dev = NULL;
924 u8 pin; 928 u8 pin;
925 929
926 DBG("PCI: IRQ fixup\n"); 930 DBG(KERN_DEBUG "PCI: IRQ fixup\n");
927 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 931 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
928 /* 932 /*
929 * If the BIOS has set an out of range IRQ number, just ignore it. 933 * If the BIOS has set an out of range IRQ number, just ignore it.
930 * Also keep track of which IRQ's are already in use. 934 * Also keep track of which IRQ's are already in use.
931 */ 935 */
932 if (dev->irq >= 16) { 936 if (dev->irq >= 16) {
933 DBG("%s: ignoring bogus IRQ %d\n", pci_name(dev), dev->irq); 937 DBG(KERN_DEBUG "%s: ignoring bogus IRQ %d\n", pci_name(dev), dev->irq);
934 dev->irq = 0; 938 dev->irq = 0;
935 } 939 }
936 /* If the IRQ is already assigned to a PCI device, ignore its ISA use penalty */ 940 /* If the IRQ is already assigned to a PCI device, ignore its ISA use penalty */
@@ -1039,7 +1043,7 @@ static struct dmi_system_id __initdata pciirq_dmi_table[] = {
1039 1043
1040static int __init pcibios_irq_init(void) 1044static int __init pcibios_irq_init(void)
1041{ 1045{
1042 DBG("PCI: IRQ init\n"); 1046 DBG(KERN_DEBUG "PCI: IRQ init\n");
1043 1047
1044 if (pcibios_enable_irq || raw_pci_ops == NULL) 1048 if (pcibios_enable_irq || raw_pci_ops == NULL)
1045 return 0; 1049 return 0;