aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Makefile5
-rw-r--r--arch/arm/boot/Makefile5
-rw-r--r--arch/arm/boot/bootp/Makefile5
-rw-r--r--arch/arm/mach-pxa/leds-mainstone.c6
-rw-r--r--arch/arm/mach-s3c2410/cpu.c2
-rw-r--r--arch/arm/mach-sa1100/collie.c30
-rw-r--r--arch/arm26/Makefile7
-rw-r--r--arch/arm26/boot/Makefile5
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c2
-rw-r--r--arch/cris/kernel/irq.c10
-rw-r--r--arch/cris/kernel/process.c3
-rw-r--r--arch/frv/kernel/gdb-stub.c2
-rw-r--r--arch/frv/kernel/irq.c10
-rw-r--r--arch/h8300/kernel/process.c4
-rw-r--r--arch/i386/Kconfig26
-rw-r--r--arch/i386/Kconfig.debug13
-rw-r--r--arch/i386/Makefile7
-rw-r--r--arch/i386/Makefile.cpu4
-rw-r--r--arch/i386/boot/edd.S2
-rw-r--r--arch/i386/kernel/Makefile2
-rw-r--r--arch/i386/kernel/alternative.c321
-rw-r--r--arch/i386/kernel/apic.c1
-rw-r--r--arch/i386/kernel/apm.c2
-rw-r--r--arch/i386/kernel/cpu/centaur.c1
-rw-r--r--arch/i386/kernel/cpu/common.c47
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig24
-rw-r--r--arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c64
-rw-r--r--arch/i386/kernel/cpu/cpufreq/elanfreq.c109
-rw-r--r--arch/i386/kernel/cpu/cpufreq/gx-suspmod.c183
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.h4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/p4-clockmod.c26
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k6.c16
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k7.c10
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c41
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.h6
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.c42
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.h20
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-smi.c53
-rw-r--r--arch/i386/kernel/cpu/intel.c12
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c4
-rw-r--r--arch/i386/kernel/cpu/proc.c4
-rw-r--r--arch/i386/kernel/crash.c2
-rw-r--r--arch/i386/kernel/dmi_scan.c33
-rw-r--r--arch/i386/kernel/efi.c2
-rw-r--r--arch/i386/kernel/entry.S4
-rw-r--r--arch/i386/kernel/head.S5
-rw-r--r--arch/i386/kernel/io_apic.c25
-rw-r--r--arch/i386/kernel/kprobes.c4
-rw-r--r--arch/i386/kernel/microcode.c4
-rw-r--r--arch/i386/kernel/module.c32
-rw-r--r--arch/i386/kernel/mpparse.c7
-rw-r--r--arch/i386/kernel/nmi.c6
-rw-r--r--arch/i386/kernel/process.c2
-rw-r--r--arch/i386/kernel/ptrace.c4
-rw-r--r--arch/i386/kernel/semaphore.c8
-rw-r--r--arch/i386/kernel/setup.c140
-rw-r--r--arch/i386/kernel/signal.c7
-rw-r--r--arch/i386/kernel/smpboot.c36
-rw-r--r--arch/i386/kernel/topology.c9
-rw-r--r--arch/i386/kernel/traps.c57
-rw-r--r--arch/i386/kernel/vm86.c12
-rw-r--r--arch/i386/kernel/vmlinux.lds.S23
-rw-r--r--arch/i386/kernel/vsyscall-sysenter.S3
-rw-r--r--arch/i386/mach-es7000/es7000.h5
-rw-r--r--arch/i386/mach-es7000/es7000plat.c6
-rw-r--r--arch/i386/mach-visws/reboot.c1
-rw-r--r--arch/i386/mm/fault.c210
-rw-r--r--arch/i386/mm/init.c45
-rw-r--r--arch/i386/oprofile/nmi_int.c7
-rw-r--r--arch/i386/pci/Makefile2
-rw-r--r--arch/i386/pci/common.c32
-rw-r--r--arch/i386/pci/direct.c15
-rw-r--r--arch/i386/pci/init.c25
-rw-r--r--arch/i386/pci/mmconfig.c11
-rw-r--r--arch/i386/pci/pcbios.c4
-rw-r--r--arch/i386/pci/pci.h3
-rw-r--r--arch/ia64/Kconfig9
-rw-r--r--arch/ia64/Makefile5
-rw-r--r--arch/ia64/configs/gensparse_defconfig1
-rw-r--r--arch/ia64/configs/sn2_defconfig1
-rw-r--r--arch/ia64/defconfig1
-rw-r--r--arch/ia64/dig/setup.c5
-rw-r--r--arch/ia64/hp/sim/simserial.c7
-rw-r--r--arch/ia64/ia32/sys_ia32.c14
-rw-r--r--arch/ia64/kernel/acpi.c35
-rw-r--r--arch/ia64/kernel/ivt.S1
-rw-r--r--arch/ia64/kernel/machvec.c19
-rw-r--r--arch/ia64/kernel/mca.c110
-rw-r--r--arch/ia64/kernel/mca_drv.c22
-rw-r--r--arch/ia64/kernel/mca_drv.h7
-rw-r--r--arch/ia64/kernel/mca_drv_asm.S13
-rw-r--r--arch/ia64/kernel/numa.c2
-rw-r--r--arch/ia64/kernel/patch.c8
-rw-r--r--arch/ia64/kernel/ptrace.c10
-rw-r--r--arch/ia64/kernel/setup.c61
-rw-r--r--arch/ia64/kernel/smpboot.c109
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S60
-rw-r--r--arch/ia64/mm/contig.c8
-rw-r--r--arch/ia64/mm/discontig.c2
-rw-r--r--arch/ia64/mm/hugetlbpage.c7
-rw-r--r--arch/ia64/mm/init.c16
-rw-r--r--arch/ia64/sn/kernel/bte.c2
-rw-r--r--arch/ia64/sn/kernel/io_init.c29
-rw-r--r--arch/ia64/sn/kernel/irq.c21
-rw-r--r--arch/ia64/sn/kernel/tiocx.c10
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c17
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c2
-rw-r--r--arch/m32r/Kconfig.debug2
-rw-r--r--arch/m32r/Makefile5
-rw-r--r--arch/m32r/kernel/irq.c10
-rw-r--r--arch/m68k/bvme6000/rtc.c4
-rw-r--r--arch/m68k/kernel/process.c2
-rw-r--r--arch/m68knommu/kernel/process.c2
-rw-r--r--arch/mips/kernel/irq.c10
-rw-r--r--arch/mips/kernel/smp.c4
-rw-r--r--arch/mips/kernel/sysirix.c22
-rw-r--r--arch/mips/mm/dma-ip32.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c5
-rw-r--r--arch/parisc/kernel/process.c5
-rw-r--r--arch/parisc/kernel/smp.c25
-rw-r--r--arch/powerpc/Makefile2
-rw-r--r--arch/powerpc/kernel/irq.c5
-rw-r--r--arch/powerpc/kernel/kprobes.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c5
-rw-r--r--arch/powerpc/kernel/setup_32.c5
-rw-r--r--arch/powerpc/lib/strcase.c4
-rw-r--r--arch/powerpc/platforms/powermac/smp.c4
-rw-r--r--arch/ppc/8xx_io/cs4218_tdm.c10
-rw-r--r--arch/ppc/Makefile2
-rw-r--r--arch/ppc/boot/Makefile8
-rw-r--r--arch/ppc/boot/openfirmware/Makefile7
-rw-r--r--arch/ppc/kernel/setup.c10
-rw-r--r--arch/ppc/syslib/ppc85xx_setup.c2
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/appldata/appldata_base.c3
-rw-r--r--arch/s390/kernel/debug.c11
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/setup.c108
-rw-r--r--arch/s390/kernel/smp.c8
-rw-r--r--arch/s390/mm/cmm.c6
-rw-r--r--arch/sh/Makefile2
-rw-r--r--arch/sh/kernel/irq.c5
-rw-r--r--arch/sh/kernel/process.c1
-rw-r--r--arch/sh/kernel/setup.c5
-rw-r--r--arch/sh64/kernel/irq.c5
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/kernel/irq.c71
-rw-r--r--arch/sparc/kernel/smp.c108
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c4
-rw-r--r--arch/sparc/kernel/sun4d_irq.c10
-rw-r--r--arch/sparc/kernel/sun4d_smp.c16
-rw-r--r--arch/sparc/kernel/sun4m_smp.c183
-rw-r--r--arch/sparc/mm/srmmu.c6
-rw-r--r--arch/sparc64/Kconfig.debug2
-rw-r--r--arch/sparc64/kernel/irq.c6
-rw-r--r--arch/sparc64/kernel/smp.c35
-rw-r--r--arch/sparc64/mm/init.c4
-rw-r--r--arch/um/Makefile7
-rw-r--r--arch/um/kernel/um_arch.c12
-rw-r--r--arch/v850/kernel/process.c2
-rw-r--r--arch/x86_64/Kconfig40
-rw-r--r--arch/x86_64/Makefile6
-rw-r--r--arch/x86_64/defconfig34
-rw-r--r--arch/x86_64/ia32/ia32_binfmt.c2
-rw-r--r--arch/x86_64/ia32/sys_ia32.c16
-rw-r--r--arch/x86_64/kernel/aperture.c4
-rw-r--r--arch/x86_64/kernel/apic.c20
-rw-r--r--arch/x86_64/kernel/early_printk.c116
-rw-r--r--arch/x86_64/kernel/entry.S2
-rw-r--r--arch/x86_64/kernel/functionlist1286
-rw-r--r--arch/x86_64/kernel/head.S26
-rw-r--r--arch/x86_64/kernel/io_apic.c10
-rw-r--r--arch/x86_64/kernel/irq.c21
-rw-r--r--arch/x86_64/kernel/kprobes.c4
-rw-r--r--arch/x86_64/kernel/mce.c3
-rw-r--r--arch/x86_64/kernel/mpparse.c19
-rw-r--r--arch/x86_64/kernel/nmi.c5
-rw-r--r--arch/x86_64/kernel/pci-dma.c3
-rw-r--r--arch/x86_64/kernel/pci-gart.c11
-rw-r--r--arch/x86_64/kernel/pmtimer.c3
-rw-r--r--arch/x86_64/kernel/process.c11
-rw-r--r--arch/x86_64/kernel/ptrace.c6
-rw-r--r--arch/x86_64/kernel/setup.c85
-rw-r--r--arch/x86_64/kernel/setup64.c18
-rw-r--r--arch/x86_64/kernel/signal.c4
-rw-r--r--arch/x86_64/kernel/smp.c6
-rw-r--r--arch/x86_64/kernel/time.c129
-rw-r--r--arch/x86_64/kernel/traps.c21
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S6
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c4
-rw-r--r--arch/x86_64/lib/thunk.S1
-rw-r--r--arch/x86_64/mm/fault.c79
-rw-r--r--arch/x86_64/mm/init.c36
-rw-r--r--arch/x86_64/mm/k8topology.c2
-rw-r--r--arch/x86_64/mm/numa.c24
-rw-r--r--arch/x86_64/mm/srat.c8
-rw-r--r--arch/x86_64/pci/Makefile3
-rw-r--r--arch/x86_64/pci/mmconfig.c18
-rw-r--r--arch/xtensa/kernel/irq.c15
-rw-r--r--arch/xtensa/platform-iss/console.c4
201 files changed, 3650 insertions, 1805 deletions
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index b5b1e4087516..99c0d323719a 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -1,6 +1,9 @@
1# 1#
2# arch/arm/Makefile 2# arch/arm/Makefile
3# 3#
4# This file is included by the global makefile so that you can add your own
5# architecture-specific flags and dependencies.
6#
4# This file is subject to the terms and conditions of the GNU General Public 7# This file is subject to the terms and conditions of the GNU General Public
5# License. See the file "COPYING" in the main directory of this archive 8# License. See the file "COPYING" in the main directory of this archive
6# for more details. 9# for more details.
@@ -177,7 +180,7 @@ endif
177 180
178archprepare: maketools 181archprepare: maketools
179 182
180.PHONY: maketools FORCE 183PHONY += maketools FORCE
181maketools: include/linux/version.h include/asm-arm/.arch FORCE 184maketools: include/linux/version.h include/asm-arm/.arch FORCE
182 $(Q)$(MAKE) $(build)=arch/arm/tools include/asm-arm/mach-types.h 185 $(Q)$(MAKE) $(build)=arch/arm/tools include/asm-arm/mach-types.h
183 186
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index a174d63395ea..ec9c400c7f82 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -1,6 +1,9 @@
1# 1#
2# arch/arm/boot/Makefile 2# arch/arm/boot/Makefile
3# 3#
4# This file is included by the global makefile so that you can add your own
5# architecture-specific flags and dependencies.
6#
4# This file is subject to the terms and conditions of the GNU General Public 7# This file is subject to the terms and conditions of the GNU General Public
5# License. See the file "COPYING" in the main directory of this archive 8# License. See the file "COPYING" in the main directory of this archive
6# for more details. 9# for more details.
@@ -73,7 +76,7 @@ $(obj)/bootpImage: $(obj)/bootp/bootp FORCE
73 $(call if_changed,objcopy) 76 $(call if_changed,objcopy)
74 @echo ' Kernel: $@ is ready' 77 @echo ' Kernel: $@ is ready'
75 78
76.PHONY: initrd FORCE 79PHONY += initrd FORCE
77initrd: 80initrd:
78 @test "$(INITRD_PHYS)" != "" || \ 81 @test "$(INITRD_PHYS)" != "" || \
79 (echo This machine does not support INITRD; exit -1) 82 (echo This machine does not support INITRD; exit -1)
diff --git a/arch/arm/boot/bootp/Makefile b/arch/arm/boot/bootp/Makefile
index 8e8879b6b3d7..c394e305447c 100644
--- a/arch/arm/boot/bootp/Makefile
+++ b/arch/arm/boot/bootp/Makefile
@@ -1,6 +1,9 @@
1# 1#
2# linux/arch/arm/boot/bootp/Makefile 2# linux/arch/arm/boot/bootp/Makefile
3# 3#
4# This file is included by the global makefile so that you can add your own
5# architecture-specific flags and dependencies.
6#
4 7
5LDFLAGS_bootp :=-p --no-undefined -X \ 8LDFLAGS_bootp :=-p --no-undefined -X \
6 --defsym initrd_phys=$(INITRD_PHYS) \ 9 --defsym initrd_phys=$(INITRD_PHYS) \
@@ -21,4 +24,4 @@ $(obj)/kernel.o: arch/arm/boot/zImage FORCE
21 24
22$(obj)/initrd.o: $(INITRD) FORCE 25$(obj)/initrd.o: $(INITRD) FORCE
23 26
24.PHONY: $(INITRD) FORCE 27PHONY += $(INITRD) FORCE
diff --git a/arch/arm/mach-pxa/leds-mainstone.c b/arch/arm/mach-pxa/leds-mainstone.c
index bbd3f87a9fc2..c06d3d7a8dd4 100644
--- a/arch/arm/mach-pxa/leds-mainstone.c
+++ b/arch/arm/mach-pxa/leds-mainstone.c
@@ -85,7 +85,7 @@ void mainstone_leds_event(led_event_t evt)
85 break; 85 break;
86 86
87 case led_green_on: 87 case led_green_on:
88 hw_led_state |= D21;; 88 hw_led_state |= D21;
89 break; 89 break;
90 90
91 case led_green_off: 91 case led_green_off:
@@ -93,7 +93,7 @@ void mainstone_leds_event(led_event_t evt)
93 break; 93 break;
94 94
95 case led_amber_on: 95 case led_amber_on:
96 hw_led_state |= D22;; 96 hw_led_state |= D22;
97 break; 97 break;
98 98
99 case led_amber_off: 99 case led_amber_off:
@@ -101,7 +101,7 @@ void mainstone_leds_event(led_event_t evt)
101 break; 101 break;
102 102
103 case led_red_on: 103 case led_red_on:
104 hw_led_state |= D23;; 104 hw_led_state |= D23;
105 break; 105 break;
106 106
107 case led_red_off: 107 case led_red_off:
diff --git a/arch/arm/mach-s3c2410/cpu.c b/arch/arm/mach-s3c2410/cpu.c
index 00a379334b60..70c34fcf7858 100644
--- a/arch/arm/mach-s3c2410/cpu.c
+++ b/arch/arm/mach-s3c2410/cpu.c
@@ -146,7 +146,7 @@ void s3c24xx_set_board(struct s3c24xx_board *b)
146 board = b; 146 board = b;
147 147
148 if (b->clocks_count != 0) { 148 if (b->clocks_count != 0) {
149 struct clk **ptr = b->clocks;; 149 struct clk **ptr = b->clocks;
150 150
151 for (i = b->clocks_count; i > 0; i--, ptr++) 151 for (i = b->clocks_count; i > 0; i--, ptr++)
152 s3c24xx_register_clock(*ptr); 152 s3c24xx_register_clock(*ptr);
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index 6888816a1935..102454082474 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -40,6 +40,7 @@
40#include <asm/hardware/scoop.h> 40#include <asm/hardware/scoop.h>
41#include <asm/mach/sharpsl_param.h> 41#include <asm/mach/sharpsl_param.h>
42#include <asm/hardware/locomo.h> 42#include <asm/hardware/locomo.h>
43#include <asm/arch/mcp.h>
43 44
44#include "generic.h" 45#include "generic.h"
45 46
@@ -66,6 +67,32 @@ struct platform_device colliescoop_device = {
66 .resource = collie_scoop_resources, 67 .resource = collie_scoop_resources,
67}; 68};
68 69
70static struct scoop_pcmcia_dev collie_pcmcia_scoop[] = {
71{
72 .dev = &colliescoop_device.dev,
73 .irq = COLLIE_IRQ_GPIO_CF_IRQ,
74 .cd_irq = COLLIE_IRQ_GPIO_CF_CD,
75 .cd_irq_str = "PCMCIA0 CD",
76},
77};
78
79static struct scoop_pcmcia_config collie_pcmcia_config = {
80 .devs = &collie_pcmcia_scoop[0],
81 .num_devs = 1,
82};
83
84
85static struct mcp_plat_data collie_mcp_data = {
86 .mccr0 = MCCR0_ADM,
87 .sclk_rate = 11981000,
88};
89
90
91static struct sa1100_port_fns collie_port_fns __initdata = {
92 .set_mctrl = collie_uart_set_mctrl,
93 .get_mctrl = collie_uart_get_mctrl,
94};
95
69 96
70static struct resource locomo_resources[] = { 97static struct resource locomo_resources[] = {
71 [0] = { 98 [0] = {
@@ -159,6 +186,8 @@ static void __init collie_init(void)
159 GPDR |= GPIO_32_768kHz; 186 GPDR |= GPIO_32_768kHz;
160 TUCR = TUCR_32_768kHz; 187 TUCR = TUCR_32_768kHz;
161 188
189 platform_scoop_config = &collie_pcmcia_config;
190
162 ret = platform_add_devices(devices, ARRAY_SIZE(devices)); 191 ret = platform_add_devices(devices, ARRAY_SIZE(devices));
163 if (ret) { 192 if (ret) {
164 printk(KERN_WARNING "collie: Unable to register LoCoMo device\n"); 193 printk(KERN_WARNING "collie: Unable to register LoCoMo device\n");
@@ -166,6 +195,7 @@ static void __init collie_init(void)
166 195
167 sa11x0_set_flash_data(&collie_flash_data, collie_flash_resources, 196 sa11x0_set_flash_data(&collie_flash_data, collie_flash_resources,
168 ARRAY_SIZE(collie_flash_resources)); 197 ARRAY_SIZE(collie_flash_resources));
198 sa11x0_set_mcp_data(&collie_mcp_data);
169 199
170 sharpsl_save_param(); 200 sharpsl_save_param();
171} 201}
diff --git a/arch/arm26/Makefile b/arch/arm26/Makefile
index 844a9e46886e..fe91eda98a94 100644
--- a/arch/arm26/Makefile
+++ b/arch/arm26/Makefile
@@ -1,6 +1,9 @@
1# 1#
2# arch/arm26/Makefile 2# arch/arm26/Makefile
3# 3#
4# This file is included by the global makefile so that you can add your own
5# architecture-specific flags and dependencies.
6#
4# This file is subject to the terms and conditions of the GNU General Public 7# This file is subject to the terms and conditions of the GNU General Public
5# License. See the file "COPYING" in the main directory of this archive 8# License. See the file "COPYING" in the main directory of this archive
6# for more details. 9# for more details.
@@ -49,9 +52,9 @@ all: zImage
49 52
50boot := arch/arm26/boot 53boot := arch/arm26/boot
51 54
52.PHONY: maketools FORCE 55PHONY += maketools FORCE
53maketools: FORCE 56maketools: FORCE
54 57
55 58
56# Convert bzImage to zImage 59# Convert bzImage to zImage
57bzImage: vmlinux 60bzImage: vmlinux
diff --git a/arch/arm26/boot/Makefile b/arch/arm26/boot/Makefile
index b5c2277654d4..68acb7b0d47f 100644
--- a/arch/arm26/boot/Makefile
+++ b/arch/arm26/boot/Makefile
@@ -1,6 +1,9 @@
1# 1#
2# arch/arm26/boot/Makefile 2# arch/arm26/boot/Makefile
3# 3#
4# This file is included by the global makefile so that you can add your own
5# architecture-specific flags and dependencies.
6#
4# This file is subject to the terms and conditions of the GNU General Public 7# This file is subject to the terms and conditions of the GNU General Public
5# License. See the file "COPYING" in the main directory of this archive 8# License. See the file "COPYING" in the main directory of this archive
6# for more details. 9# for more details.
@@ -60,7 +63,7 @@ $(obj)/xipImage: vmlinux FORCE
60 @echo ' Kernel: $@ is ready' 63 @echo ' Kernel: $@ is ready'
61endif 64endif
62 65
63.PHONY: initrd 66PHONY += initrd
64initrd: 67initrd:
65 @test "$(INITRD_PHYS)" != "" || \ 68 @test "$(INITRD_PHYS)" != "" || \
66 (echo This machine does not support INITRD; exit -1) 69 (echo This machine does not support INITRD; exit -1)
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index 501fa52d8d3a..c59ee28a35f4 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -2944,7 +2944,7 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
2944 int spdl_err; 2944 int spdl_err;
2945 /* Mark output pages dirty. */ 2945 /* Mark output pages dirty. */
2946 spdl_err = set_page_dirty_lock(outpages[i]); 2946 spdl_err = set_page_dirty_lock(outpages[i]);
2947 DEBUG(if (spdl_err)printk("cryptocop_ioctl_process: set_page_dirty_lock returned %d\n", spdl_err)); 2947 DEBUG(if (spdl_err < 0)printk("cryptocop_ioctl_process: set_page_dirty_lock returned %d\n", spdl_err));
2948 } 2948 }
2949 for (i = 0; i < nooutpages; i++){ 2949 for (i = 0; i < nooutpages; i++){
2950 put_page(outpages[i]); 2950 put_page(outpages[i]);
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index 30deaf1b728a..b504def3e346 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -52,9 +52,8 @@ int show_interrupts(struct seq_file *p, void *v)
52 52
53 if (i == 0) { 53 if (i == 0) {
54 seq_printf(p, " "); 54 seq_printf(p, " ");
55 for (j=0; j<NR_CPUS; j++) 55 for_each_online_cpu(j)
56 if (cpu_online(j)) 56 seq_printf(p, "CPU%d ",j);
57 seq_printf(p, "CPU%d ",j);
58 seq_putc(p, '\n'); 57 seq_putc(p, '\n');
59 } 58 }
60 59
@@ -67,9 +66,8 @@ int show_interrupts(struct seq_file *p, void *v)
67#ifndef CONFIG_SMP 66#ifndef CONFIG_SMP
68 seq_printf(p, "%10u ", kstat_irqs(i)); 67 seq_printf(p, "%10u ", kstat_irqs(i));
69#else 68#else
70 for (j = 0; j < NR_CPUS; j++) 69 for_each_online_cpu(j)
71 if (cpu_online(j)) 70 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
72 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
73#endif 71#endif
74 seq_printf(p, " %14s", irq_desc[i].handler->typename); 72 seq_printf(p, " %14s", irq_desc[i].handler->typename);
75 seq_printf(p, " %s", action->name); 73 seq_printf(p, " %s", action->name);
diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c
index 4ab3e87115b6..123451c44154 100644
--- a/arch/cris/kernel/process.c
+++ b/arch/cris/kernel/process.c
@@ -116,6 +116,7 @@
116#include <asm/pgtable.h> 116#include <asm/pgtable.h>
117#include <asm/uaccess.h> 117#include <asm/uaccess.h>
118#include <asm/irq.h> 118#include <asm/irq.h>
119#include <asm/system.h>
119#include <linux/module.h> 120#include <linux/module.h>
120#include <linux/spinlock.h> 121#include <linux/spinlock.h>
121#include <linux/fs_struct.h> 122#include <linux/fs_struct.h>
@@ -194,8 +195,6 @@ EXPORT_SYMBOL(enable_hlt);
194 */ 195 */
195void (*pm_idle)(void); 196void (*pm_idle)(void);
196 197
197extern void default_idle(void);
198
199/* 198/*
200 * The idle thread. There's no useful work to be 199 * The idle thread. There's no useful work to be
201 * done, so just try to conserve power and have a 200 * done, so just try to conserve power and have a
diff --git a/arch/frv/kernel/gdb-stub.c b/arch/frv/kernel/gdb-stub.c
index 8f860d9c4947..508601fad079 100644
--- a/arch/frv/kernel/gdb-stub.c
+++ b/arch/frv/kernel/gdb-stub.c
@@ -1406,7 +1406,7 @@ void gdbstub(int sigval)
1406 __debug_frame->psr |= PSR_S; 1406 __debug_frame->psr |= PSR_S;
1407 __debug_regs->brr = (__debug_frame->tbr & TBR_TT) << 12; 1407 __debug_regs->brr = (__debug_frame->tbr & TBR_TT) << 12;
1408 __debug_regs->brr |= BRR_EB; 1408 __debug_regs->brr |= BRR_EB;
1409 sigval = SIGXCPU;; 1409 sigval = SIGXCPU;
1410 } 1410 }
1411 1411
1412 LEDS(0x5002); 1412 LEDS(0x5002);
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c
index 27ab4c30aac6..11fa326a8f62 100644
--- a/arch/frv/kernel/irq.c
+++ b/arch/frv/kernel/irq.c
@@ -75,9 +75,8 @@ int show_interrupts(struct seq_file *p, void *v)
75 switch (i) { 75 switch (i) {
76 case 0: 76 case 0:
77 seq_printf(p, " "); 77 seq_printf(p, " ");
78 for (j = 0; j < NR_CPUS; j++) 78 for_each_online_cpu(j)
79 if (cpu_online(j)) 79 seq_printf(p, "CPU%d ",j);
80 seq_printf(p, "CPU%d ",j);
81 80
82 seq_putc(p, '\n'); 81 seq_putc(p, '\n');
83 break; 82 break;
@@ -100,9 +99,8 @@ int show_interrupts(struct seq_file *p, void *v)
100#ifndef CONFIG_SMP 99#ifndef CONFIG_SMP
101 seq_printf(p, "%10u ", kstat_irqs(i)); 100 seq_printf(p, "%10u ", kstat_irqs(i));
102#else 101#else
103 for (j = 0; j < NR_CPUS; j++) 102 for_each_online_cpu(j)
104 if (cpu_online(j)) 103 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]);
105 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]);
106#endif 104#endif
107 105
108 level = group->sources[ix]->level - frv_irq_levels; 106 level = group->sources[ix]->level - frv_irq_levels;
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index dd344f112cfe..16ccddc69c2b 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -54,7 +54,7 @@ asmlinkage void ret_from_fork(void);
54 * The idle loop on an H8/300.. 54 * The idle loop on an H8/300..
55 */ 55 */
56#if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM) 56#if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM)
57void default_idle(void) 57static void default_idle(void)
58{ 58{
59 local_irq_disable(); 59 local_irq_disable();
60 if (!need_resched()) { 60 if (!need_resched()) {
@@ -65,7 +65,7 @@ void default_idle(void)
65 local_irq_enable(); 65 local_irq_enable();
66} 66}
67#else 67#else
68void default_idle(void) 68static void default_idle(void)
69{ 69{
70 cpu_relax(); 70 cpu_relax();
71} 71}
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 5b1a7d46d1d9..b008fb0cd7b7 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -80,6 +80,7 @@ config X86_VOYAGER
80 80
81config X86_NUMAQ 81config X86_NUMAQ
82 bool "NUMAQ (IBM/Sequent)" 82 bool "NUMAQ (IBM/Sequent)"
83 select SMP
83 select NUMA 84 select NUMA
84 help 85 help
85 This option is used for getting Linux to run on a (IBM/Sequent) NUMA 86 This option is used for getting Linux to run on a (IBM/Sequent) NUMA
@@ -400,6 +401,7 @@ choice
400 401
401config NOHIGHMEM 402config NOHIGHMEM
402 bool "off" 403 bool "off"
404 depends on !X86_NUMAQ
403 ---help--- 405 ---help---
404 Linux can use up to 64 Gigabytes of physical memory on x86 systems. 406 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
405 However, the address space of 32-bit x86 processors is only 4 407 However, the address space of 32-bit x86 processors is only 4
@@ -436,6 +438,7 @@ config NOHIGHMEM
436 438
437config HIGHMEM4G 439config HIGHMEM4G
438 bool "4GB" 440 bool "4GB"
441 depends on !X86_NUMAQ
439 help 442 help
440 Select this if you have a 32-bit processor and between 1 and 4 443 Select this if you have a 32-bit processor and between 1 and 4
441 gigabytes of physical RAM. 444 gigabytes of physical RAM.
@@ -503,10 +506,6 @@ config NUMA
503 default n if X86_PC 506 default n if X86_PC
504 default y if (X86_NUMAQ || X86_SUMMIT) 507 default y if (X86_NUMAQ || X86_SUMMIT)
505 508
506# Need comments to help the hapless user trying to turn on NUMA support
507comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
508 depends on X86_NUMAQ && (!HIGHMEM64G || !SMP)
509
510comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" 509comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
511 depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI) 510 depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI)
512 511
@@ -660,13 +659,18 @@ config BOOT_IOREMAP
660 default y 659 default y
661 660
662config REGPARM 661config REGPARM
663 bool "Use register arguments (EXPERIMENTAL)" 662 bool "Use register arguments"
664 depends on EXPERIMENTAL 663 default y
665 default n
666 help 664 help
667 Compile the kernel with -mregparm=3. This uses a different ABI 665 Compile the kernel with -mregparm=3. This instructs gcc to use
668 and passes the first three arguments of a function call in registers. 666 a more efficient function call ABI which passes the first three
669 This will probably break binary only modules. 667 arguments of a function call via registers, which results in denser
668 and faster code.
669
670 If this option is disabled, then the default ABI of passing
671 arguments via the stack is used.
672
673 If unsure, say Y.
670 674
671config SECCOMP 675config SECCOMP
672 bool "Enable seccomp to safely compute untrusted bytecode" 676 bool "Enable seccomp to safely compute untrusted bytecode"
@@ -733,7 +737,7 @@ config PHYSICAL_START
733 737
734config HOTPLUG_CPU 738config HOTPLUG_CPU
735 bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" 739 bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
736 depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER 740 depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER && !X86_PC
737 ---help--- 741 ---help---
738 Say Y here to experiment with turning CPUs off and on. CPUs 742 Say Y here to experiment with turning CPUs off and on. CPUs
739 can be controlled through /sys/devices/system/cpu. 743 can be controlled through /sys/devices/system/cpu.
diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug
index bf32ecc9ad04..6e97df6979e8 100644
--- a/arch/i386/Kconfig.debug
+++ b/arch/i386/Kconfig.debug
@@ -31,12 +31,21 @@ config DEBUG_STACK_USAGE
31 31
32 This option will slow down process creation somewhat. 32 This option will slow down process creation somewhat.
33 33
34config STACK_BACKTRACE_COLS
35 int "Stack backtraces per line" if DEBUG_KERNEL
36 range 1 3
37 default 2
38 help
39 Selects how many stack backtrace entries per line to display.
40
41 This can save screen space when displaying traces.
42
34comment "Page alloc debug is incompatible with Software Suspend on i386" 43comment "Page alloc debug is incompatible with Software Suspend on i386"
35 depends on DEBUG_KERNEL && SOFTWARE_SUSPEND 44 depends on DEBUG_KERNEL && SOFTWARE_SUSPEND
36 45
37config DEBUG_PAGEALLOC 46config DEBUG_PAGEALLOC
38 bool "Page alloc debugging" 47 bool "Debug page memory allocations"
39 depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND 48 depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND && !HUGETLBFS
40 help 49 help
41 Unmap pages from the kernel linear mapping after free_pages(). 50 Unmap pages from the kernel linear mapping after free_pages().
42 This results in a large slowdown, but helps to find certain types 51 This results in a large slowdown, but helps to find certain types
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index 36bef6543ac1..c848a5b30391 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -39,6 +39,9 @@ include $(srctree)/arch/i386/Makefile.cpu
39 39
40cflags-$(CONFIG_REGPARM) += -mregparm=3 40cflags-$(CONFIG_REGPARM) += -mregparm=3
41 41
42# temporary until string.h is fixed
43cflags-y += -ffreestanding
44
42# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use 45# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
43# a lot more stack due to the lack of sharing of stacklots: 46# a lot more stack due to the lack of sharing of stacklots:
44CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then echo $(call cc-option,-fno-unit-at-a-time); fi ;) 47CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then echo $(call cc-option,-fno-unit-at-a-time); fi ;)
@@ -99,8 +102,8 @@ AFLAGS += $(mflags-y)
99 102
100boot := arch/i386/boot 103boot := arch/i386/boot
101 104
102.PHONY: zImage bzImage compressed zlilo bzlilo \ 105PHONY += zImage bzImage compressed zlilo bzlilo \
103 zdisk bzdisk fdimage fdimage144 fdimage288 install 106 zdisk bzdisk fdimage fdimage144 fdimage288 install
104 107
105all: bzImage 108all: bzImage
106 109
diff --git a/arch/i386/Makefile.cpu b/arch/i386/Makefile.cpu
index dcd936ef45db..a11befba26d5 100644
--- a/arch/i386/Makefile.cpu
+++ b/arch/i386/Makefile.cpu
@@ -39,3 +39,7 @@ cflags-$(CONFIG_X86_ELAN) += -march=i486
39# Geode GX1 support 39# Geode GX1 support
40cflags-$(CONFIG_MGEODEGX1) += -march=pentium-mmx 40cflags-$(CONFIG_MGEODEGX1) += -march=pentium-mmx
41 41
42# add at the end to overwrite eventual tuning options from earlier
43# cpu entries
44cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic)
45
diff --git a/arch/i386/boot/edd.S b/arch/i386/boot/edd.S
index d8d69f2b911d..4b84ea216f2b 100644
--- a/arch/i386/boot/edd.S
+++ b/arch/i386/boot/edd.S
@@ -76,6 +76,8 @@ edd_mbr_sig_read:
76 popw %es 76 popw %es
77 popw %bx 77 popw %bx
78 jc edd_mbr_sig_done # on failure, we're done. 78 jc edd_mbr_sig_done # on failure, we're done.
79 cmpb $0, %ah # some BIOSes do not set CF
80 jne edd_mbr_sig_done # on failure, we're done.
79 movl (EDDBUF+EDD_MBR_SIG_OFFSET), %eax # read sig out of the MBR 81 movl (EDDBUF+EDD_MBR_SIG_OFFSET), %eax # read sig out of the MBR
80 movl %eax, (%bx) # store success 82 movl %eax, (%bx) # store success
81 incb (EDD_MBR_SIG_NR_BUF) # note that we stored something 83 incb (EDD_MBR_SIG_NR_BUF) # note that we stored something
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 65656c033d70..5b9ed21216cf 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds
7obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ 7obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ 8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
9 pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \ 9 pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \
10 quirks.o i8237.o topology.o 10 quirks.o i8237.o topology.o alternative.o
11 11
12obj-y += cpu/ 12obj-y += cpu/
13obj-y += timers/ 13obj-y += timers/
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
new file mode 100644
index 000000000000..5cbd6f99fb2a
--- /dev/null
+++ b/arch/i386/kernel/alternative.c
@@ -0,0 +1,321 @@
1#include <linux/module.h>
2#include <linux/spinlock.h>
3#include <linux/list.h>
4#include <asm/alternative.h>
5#include <asm/sections.h>
6
7#define DEBUG 0
8#if DEBUG
9# define DPRINTK(fmt, args...) printk(fmt, args)
10#else
11# define DPRINTK(fmt, args...)
12#endif
13
14/* Use inline assembly to define this because the nops are defined
15 as inline assembly strings in the include files and we cannot
16 get them easily into strings. */
17asm("\t.data\nintelnops: "
18 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
19 GENERIC_NOP7 GENERIC_NOP8);
20asm("\t.data\nk8nops: "
21 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
22 K8_NOP7 K8_NOP8);
23asm("\t.data\nk7nops: "
24 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
25 K7_NOP7 K7_NOP8);
26
27extern unsigned char intelnops[], k8nops[], k7nops[];
28static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
29 NULL,
30 intelnops,
31 intelnops + 1,
32 intelnops + 1 + 2,
33 intelnops + 1 + 2 + 3,
34 intelnops + 1 + 2 + 3 + 4,
35 intelnops + 1 + 2 + 3 + 4 + 5,
36 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
37 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
38};
39static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
40 NULL,
41 k8nops,
42 k8nops + 1,
43 k8nops + 1 + 2,
44 k8nops + 1 + 2 + 3,
45 k8nops + 1 + 2 + 3 + 4,
46 k8nops + 1 + 2 + 3 + 4 + 5,
47 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
48 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
49};
50static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
51 NULL,
52 k7nops,
53 k7nops + 1,
54 k7nops + 1 + 2,
55 k7nops + 1 + 2 + 3,
56 k7nops + 1 + 2 + 3 + 4,
57 k7nops + 1 + 2 + 3 + 4 + 5,
58 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
59 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
60};
61static struct nop {
62 int cpuid;
63 unsigned char **noptable;
64} noptypes[] = {
65 { X86_FEATURE_K8, k8_nops },
66 { X86_FEATURE_K7, k7_nops },
67 { -1, NULL }
68};
69
70
71extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
72extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
73extern u8 *__smp_locks[], *__smp_locks_end[];
74
75extern u8 __smp_alt_begin[], __smp_alt_end[];
76
77
78static unsigned char** find_nop_table(void)
79{
80 unsigned char **noptable = intel_nops;
81 int i;
82
83 for (i = 0; noptypes[i].cpuid >= 0; i++) {
84 if (boot_cpu_has(noptypes[i].cpuid)) {
85 noptable = noptypes[i].noptable;
86 break;
87 }
88 }
89 return noptable;
90}
91
92/* Replace instructions with better alternatives for this CPU type.
93 This runs before SMP is initialized to avoid SMP problems with
94 self modifying code. This implies that assymetric systems where
95 APs have less capabilities than the boot processor are not handled.
96 Tough. Make sure you disable such features by hand. */
97
98void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
99{
100 unsigned char **noptable = find_nop_table();
101 struct alt_instr *a;
102 int diff, i, k;
103
104 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
105 for (a = start; a < end; a++) {
106 BUG_ON(a->replacementlen > a->instrlen);
107 if (!boot_cpu_has(a->cpuid))
108 continue;
109 memcpy(a->instr, a->replacement, a->replacementlen);
110 diff = a->instrlen - a->replacementlen;
111 /* Pad the rest with nops */
112 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
113 k = diff;
114 if (k > ASM_NOP_MAX)
115 k = ASM_NOP_MAX;
116 memcpy(a->instr + i, noptable[k], k);
117 }
118 }
119}
120
121static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
122{
123 struct alt_instr *a;
124
125 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
126 for (a = start; a < end; a++) {
127 memcpy(a->replacement + a->replacementlen,
128 a->instr,
129 a->instrlen);
130 }
131}
132
133static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
134{
135 struct alt_instr *a;
136
137 for (a = start; a < end; a++) {
138 memcpy(a->instr,
139 a->replacement + a->replacementlen,
140 a->instrlen);
141 }
142}
143
144static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
145{
146 u8 **ptr;
147
148 for (ptr = start; ptr < end; ptr++) {
149 if (*ptr < text)
150 continue;
151 if (*ptr > text_end)
152 continue;
153 **ptr = 0xf0; /* lock prefix */
154 };
155}
156
157static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
158{
159 unsigned char **noptable = find_nop_table();
160 u8 **ptr;
161
162 for (ptr = start; ptr < end; ptr++) {
163 if (*ptr < text)
164 continue;
165 if (*ptr > text_end)
166 continue;
167 **ptr = noptable[1][0];
168 };
169}
170
171struct smp_alt_module {
172 /* what is this ??? */
173 struct module *mod;
174 char *name;
175
176 /* ptrs to lock prefixes */
177 u8 **locks;
178 u8 **locks_end;
179
180 /* .text segment, needed to avoid patching init code ;) */
181 u8 *text;
182 u8 *text_end;
183
184 struct list_head next;
185};
186static LIST_HEAD(smp_alt_modules);
187static DEFINE_SPINLOCK(smp_alt);
188
189static int smp_alt_once = 0;
190static int __init bootonly(char *str)
191{
192 smp_alt_once = 1;
193 return 1;
194}
195__setup("smp-alt-boot", bootonly);
196
197void alternatives_smp_module_add(struct module *mod, char *name,
198 void *locks, void *locks_end,
199 void *text, void *text_end)
200{
201 struct smp_alt_module *smp;
202 unsigned long flags;
203
204 if (smp_alt_once) {
205 if (boot_cpu_has(X86_FEATURE_UP))
206 alternatives_smp_unlock(locks, locks_end,
207 text, text_end);
208 return;
209 }
210
211 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
212 if (NULL == smp)
213 return; /* we'll run the (safe but slow) SMP code then ... */
214
215 smp->mod = mod;
216 smp->name = name;
217 smp->locks = locks;
218 smp->locks_end = locks_end;
219 smp->text = text;
220 smp->text_end = text_end;
221 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
222 __FUNCTION__, smp->locks, smp->locks_end,
223 smp->text, smp->text_end, smp->name);
224
225 spin_lock_irqsave(&smp_alt, flags);
226 list_add_tail(&smp->next, &smp_alt_modules);
227 if (boot_cpu_has(X86_FEATURE_UP))
228 alternatives_smp_unlock(smp->locks, smp->locks_end,
229 smp->text, smp->text_end);
230 spin_unlock_irqrestore(&smp_alt, flags);
231}
232
233void alternatives_smp_module_del(struct module *mod)
234{
235 struct smp_alt_module *item;
236 unsigned long flags;
237
238 if (smp_alt_once)
239 return;
240
241 spin_lock_irqsave(&smp_alt, flags);
242 list_for_each_entry(item, &smp_alt_modules, next) {
243 if (mod != item->mod)
244 continue;
245 list_del(&item->next);
246 spin_unlock_irqrestore(&smp_alt, flags);
247 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
248 kfree(item);
249 return;
250 }
251 spin_unlock_irqrestore(&smp_alt, flags);
252}
253
254void alternatives_smp_switch(int smp)
255{
256 struct smp_alt_module *mod;
257 unsigned long flags;
258
259 if (smp_alt_once)
260 return;
261 BUG_ON(!smp && (num_online_cpus() > 1));
262
263 spin_lock_irqsave(&smp_alt, flags);
264 if (smp) {
265 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
266 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
267 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
268 alternatives_smp_apply(__smp_alt_instructions,
269 __smp_alt_instructions_end);
270 list_for_each_entry(mod, &smp_alt_modules, next)
271 alternatives_smp_lock(mod->locks, mod->locks_end,
272 mod->text, mod->text_end);
273 } else {
274 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
275 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
276 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
277 apply_alternatives(__smp_alt_instructions,
278 __smp_alt_instructions_end);
279 list_for_each_entry(mod, &smp_alt_modules, next)
280 alternatives_smp_unlock(mod->locks, mod->locks_end,
281 mod->text, mod->text_end);
282 }
283 spin_unlock_irqrestore(&smp_alt, flags);
284}
285
286void __init alternative_instructions(void)
287{
288 apply_alternatives(__alt_instructions, __alt_instructions_end);
289
290 /* switch to patch-once-at-boottime-only mode and free the
291 * tables in case we know the number of CPUs will never ever
292 * change */
293#ifdef CONFIG_HOTPLUG_CPU
294 if (num_possible_cpus() < 2)
295 smp_alt_once = 1;
296#else
297 smp_alt_once = 1;
298#endif
299
300 if (smp_alt_once) {
301 if (1 == num_possible_cpus()) {
302 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
303 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
304 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
305 apply_alternatives(__smp_alt_instructions,
306 __smp_alt_instructions_end);
307 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
308 _text, _etext);
309 }
310 free_init_pages("SMP alternatives",
311 (unsigned long)__smp_alt_begin,
312 (unsigned long)__smp_alt_end);
313 } else {
314 alternatives_smp_save(__smp_alt_instructions,
315 __smp_alt_instructions_end);
316 alternatives_smp_module_add(NULL, "core kernel",
317 __smp_locks, __smp_locks_end,
318 _text, _etext);
319 alternatives_smp_switch(0);
320 }
321}
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 776c90989e06..eb5279d23b7f 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -38,6 +38,7 @@
38#include <asm/i8253.h> 38#include <asm/i8253.h>
39 39
40#include <mach_apic.h> 40#include <mach_apic.h>
41#include <mach_apicdef.h>
41#include <mach_ipi.h> 42#include <mach_ipi.h>
42 43
43#include "io_ports.h" 44#include "io_ports.h"
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index 05312a8abb8b..da30a374dd4e 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -824,8 +824,6 @@ static void apm_do_busy(void)
824 824
825static void (*original_pm_idle)(void); 825static void (*original_pm_idle)(void);
826 826
827extern void default_idle(void);
828
829/** 827/**
830 * apm_cpu_idle - cpu idling for APM capable Linux 828 * apm_cpu_idle - cpu idling for APM capable Linux
831 * 829 *
diff --git a/arch/i386/kernel/cpu/centaur.c b/arch/i386/kernel/cpu/centaur.c
index f52669ecb93f..bd75629dd262 100644
--- a/arch/i386/kernel/cpu/centaur.c
+++ b/arch/i386/kernel/cpu/centaur.c
@@ -4,6 +4,7 @@
4#include <asm/processor.h> 4#include <asm/processor.h>
5#include <asm/msr.h> 5#include <asm/msr.h>
6#include <asm/e820.h> 6#include <asm/e820.h>
7#include <asm/mtrr.h>
7#include "cpu.h" 8#include "cpu.h"
8 9
9#ifdef CONFIG_X86_OOSTORE 10#ifdef CONFIG_X86_OOSTORE
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index e6bd095ae108..7e3d6b6a4e96 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -25,9 +25,10 @@ EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
25DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); 25DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
26EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); 26EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
27 27
28static int cachesize_override __devinitdata = -1; 28static int cachesize_override __cpuinitdata = -1;
29static int disable_x86_fxsr __devinitdata = 0; 29static int disable_x86_fxsr __cpuinitdata;
30static int disable_x86_serial_nr __devinitdata = 1; 30static int disable_x86_serial_nr __cpuinitdata = 1;
31static int disable_x86_sep __cpuinitdata;
31 32
32struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; 33struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
33 34
@@ -59,7 +60,7 @@ static int __init cachesize_setup(char *str)
59} 60}
60__setup("cachesize=", cachesize_setup); 61__setup("cachesize=", cachesize_setup);
61 62
62int __devinit get_model_name(struct cpuinfo_x86 *c) 63int __cpuinit get_model_name(struct cpuinfo_x86 *c)
63{ 64{
64 unsigned int *v; 65 unsigned int *v;
65 char *p, *q; 66 char *p, *q;
@@ -89,7 +90,7 @@ int __devinit get_model_name(struct cpuinfo_x86 *c)
89} 90}
90 91
91 92
92void __devinit display_cacheinfo(struct cpuinfo_x86 *c) 93void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
93{ 94{
94 unsigned int n, dummy, ecx, edx, l2size; 95 unsigned int n, dummy, ecx, edx, l2size;
95 96
@@ -130,7 +131,7 @@ void __devinit display_cacheinfo(struct cpuinfo_x86 *c)
130/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ 131/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
131 132
132/* Look up CPU names by table lookup. */ 133/* Look up CPU names by table lookup. */
133static char __devinit *table_lookup_model(struct cpuinfo_x86 *c) 134static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
134{ 135{
135 struct cpu_model_info *info; 136 struct cpu_model_info *info;
136 137
@@ -151,7 +152,7 @@ static char __devinit *table_lookup_model(struct cpuinfo_x86 *c)
151} 152}
152 153
153 154
154static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) 155static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
155{ 156{
156 char *v = c->x86_vendor_id; 157 char *v = c->x86_vendor_id;
157 int i; 158 int i;
@@ -187,6 +188,14 @@ static int __init x86_fxsr_setup(char * s)
187__setup("nofxsr", x86_fxsr_setup); 188__setup("nofxsr", x86_fxsr_setup);
188 189
189 190
191static int __init x86_sep_setup(char * s)
192{
193 disable_x86_sep = 1;
194 return 1;
195}
196__setup("nosep", x86_sep_setup);
197
198
190/* Standard macro to see if a specific flag is changeable */ 199/* Standard macro to see if a specific flag is changeable */
191static inline int flag_is_changeable_p(u32 flag) 200static inline int flag_is_changeable_p(u32 flag)
192{ 201{
@@ -210,7 +219,7 @@ static inline int flag_is_changeable_p(u32 flag)
210 219
211 220
212/* Probe for the CPUID instruction */ 221/* Probe for the CPUID instruction */
213static int __devinit have_cpuid_p(void) 222static int __cpuinit have_cpuid_p(void)
214{ 223{
215 return flag_is_changeable_p(X86_EFLAGS_ID); 224 return flag_is_changeable_p(X86_EFLAGS_ID);
216} 225}
@@ -254,7 +263,7 @@ static void __init early_cpu_detect(void)
254 } 263 }
255} 264}
256 265
257void __devinit generic_identify(struct cpuinfo_x86 * c) 266void __cpuinit generic_identify(struct cpuinfo_x86 * c)
258{ 267{
259 u32 tfms, xlvl; 268 u32 tfms, xlvl;
260 int junk; 269 int junk;
@@ -307,7 +316,7 @@ void __devinit generic_identify(struct cpuinfo_x86 * c)
307#endif 316#endif
308} 317}
309 318
310static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 319static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
311{ 320{
312 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { 321 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
313 /* Disable processor serial number */ 322 /* Disable processor serial number */
@@ -335,7 +344,7 @@ __setup("serialnumber", x86_serial_nr_setup);
335/* 344/*
336 * This does the hard work of actually picking apart the CPU stuff... 345 * This does the hard work of actually picking apart the CPU stuff...
337 */ 346 */
338void __devinit identify_cpu(struct cpuinfo_x86 *c) 347void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
339{ 348{
340 int i; 349 int i;
341 350
@@ -405,6 +414,10 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c)
405 clear_bit(X86_FEATURE_XMM, c->x86_capability); 414 clear_bit(X86_FEATURE_XMM, c->x86_capability);
406 } 415 }
407 416
417 /* SEP disabled? */
418 if (disable_x86_sep)
419 clear_bit(X86_FEATURE_SEP, c->x86_capability);
420
408 if (disable_pse) 421 if (disable_pse)
409 clear_bit(X86_FEATURE_PSE, c->x86_capability); 422 clear_bit(X86_FEATURE_PSE, c->x86_capability);
410 423
@@ -417,7 +430,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c)
417 else 430 else
418 /* Last resort... */ 431 /* Last resort... */
419 sprintf(c->x86_model_id, "%02x/%02x", 432 sprintf(c->x86_model_id, "%02x/%02x",
420 c->x86_vendor, c->x86_model); 433 c->x86, c->x86_model);
421 } 434 }
422 435
423 /* Now the feature flags better reflect actual CPU features! */ 436 /* Now the feature flags better reflect actual CPU features! */
@@ -453,7 +466,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c)
453} 466}
454 467
455#ifdef CONFIG_X86_HT 468#ifdef CONFIG_X86_HT
456void __devinit detect_ht(struct cpuinfo_x86 *c) 469void __cpuinit detect_ht(struct cpuinfo_x86 *c)
457{ 470{
458 u32 eax, ebx, ecx, edx; 471 u32 eax, ebx, ecx, edx;
459 int index_msb, core_bits; 472 int index_msb, core_bits;
@@ -500,7 +513,7 @@ void __devinit detect_ht(struct cpuinfo_x86 *c)
500} 513}
501#endif 514#endif
502 515
503void __devinit print_cpu_info(struct cpuinfo_x86 *c) 516void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
504{ 517{
505 char *vendor = NULL; 518 char *vendor = NULL;
506 519
@@ -523,7 +536,7 @@ void __devinit print_cpu_info(struct cpuinfo_x86 *c)
523 printk("\n"); 536 printk("\n");
524} 537}
525 538
526cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE; 539cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
527 540
528/* This is hacky. :) 541/* This is hacky. :)
529 * We're emulating future behavior. 542 * We're emulating future behavior.
@@ -570,7 +583,7 @@ void __init early_cpu_init(void)
570 * and IDT. We reload them nevertheless, this function acts as a 583 * and IDT. We reload them nevertheless, this function acts as a
571 * 'CPU state barrier', nothing should get across. 584 * 'CPU state barrier', nothing should get across.
572 */ 585 */
573void __devinit cpu_init(void) 586void __cpuinit cpu_init(void)
574{ 587{
575 int cpu = smp_processor_id(); 588 int cpu = smp_processor_id();
576 struct tss_struct * t = &per_cpu(init_tss, cpu); 589 struct tss_struct * t = &per_cpu(init_tss, cpu);
@@ -670,7 +683,7 @@ void __devinit cpu_init(void)
670} 683}
671 684
672#ifdef CONFIG_HOTPLUG_CPU 685#ifdef CONFIG_HOTPLUG_CPU
673void __devinit cpu_uninit(void) 686void __cpuinit cpu_uninit(void)
674{ 687{
675 int cpu = raw_smp_processor_id(); 688 int cpu = raw_smp_processor_id();
676 cpu_clear(cpu, cpu_initialized); 689 cpu_clear(cpu, cpu_initialized);
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index 26892d2099b0..e44a4c6a4fe5 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -96,7 +96,6 @@ config X86_POWERNOW_K8_ACPI
96 96
97config X86_GX_SUSPMOD 97config X86_GX_SUSPMOD
98 tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" 98 tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
99 depends on PCI
100 help 99 help
101 This add the CPUFreq driver for NatSemi Geode processors which 100 This add the CPUFreq driver for NatSemi Geode processors which
102 support suspend modulation. 101 support suspend modulation.
@@ -115,9 +114,9 @@ config X86_SPEEDSTEP_CENTRINO
115 you also need to say Y to "Use ACPI tables to decode..." below 114 you also need to say Y to "Use ACPI tables to decode..." below
116 [which might imply enabling ACPI] if you want to use this driver 115 [which might imply enabling ACPI] if you want to use this driver
117 on non-Banias CPUs. 116 on non-Banias CPUs.
118 117
119 For details, take a look at <file:Documentation/cpu-freq/>. 118 For details, take a look at <file:Documentation/cpu-freq/>.
120 119
121 If in doubt, say N. 120 If in doubt, say N.
122 121
123config X86_SPEEDSTEP_CENTRINO_ACPI 122config X86_SPEEDSTEP_CENTRINO_ACPI
@@ -148,7 +147,7 @@ config X86_SPEEDSTEP_ICH
148 help 147 help
149 This adds the CPUFreq driver for certain mobile Intel Pentium III 148 This adds the CPUFreq driver for certain mobile Intel Pentium III
150 (Coppermine), all mobile Intel Pentium III-M (Tualatin) and all 149 (Coppermine), all mobile Intel Pentium III-M (Tualatin) and all
151 mobile Intel Pentium 4 P4-M on systems which have an Intel ICH2, 150 mobile Intel Pentium 4 P4-M on systems which have an Intel ICH2,
152 ICH3 or ICH4 southbridge. 151 ICH3 or ICH4 southbridge.
153 152
154 For details, take a look at <file:Documentation/cpu-freq/>. 153 For details, take a look at <file:Documentation/cpu-freq/>.
@@ -161,7 +160,7 @@ config X86_SPEEDSTEP_SMI
161 depends on EXPERIMENTAL 160 depends on EXPERIMENTAL
162 help 161 help
163 This adds the CPUFreq driver for certain mobile Intel Pentium III 162 This adds the CPUFreq driver for certain mobile Intel Pentium III
164 (Coppermine), all mobile Intel Pentium III-M (Tualatin) 163 (Coppermine), all mobile Intel Pentium III-M (Tualatin)
165 on systems which have an Intel 440BX/ZX/MX southbridge. 164 on systems which have an Intel 440BX/ZX/MX southbridge.
166 165
167 For details, take a look at <file:Documentation/cpu-freq/>. 166 For details, take a look at <file:Documentation/cpu-freq/>.
@@ -203,9 +202,10 @@ config X86_LONGRUN
203config X86_LONGHAUL 202config X86_LONGHAUL
204 tristate "VIA Cyrix III Longhaul" 203 tristate "VIA Cyrix III Longhaul"
205 select CPU_FREQ_TABLE 204 select CPU_FREQ_TABLE
205 depends on BROKEN
206 help 206 help
207 This adds the CPUFreq driver for VIA Samuel/CyrixIII, 207 This adds the CPUFreq driver for VIA Samuel/CyrixIII,
208 VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T 208 VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T
209 processors. 209 processors.
210 210
211 For details, take a look at <file:Documentation/cpu-freq/>. 211 For details, take a look at <file:Documentation/cpu-freq/>.
@@ -215,11 +215,11 @@ config X86_LONGHAUL
215comment "shared options" 215comment "shared options"
216 216
217config X86_ACPI_CPUFREQ_PROC_INTF 217config X86_ACPI_CPUFREQ_PROC_INTF
218 bool "/proc/acpi/processor/../performance interface (deprecated)" 218 bool "/proc/acpi/processor/../performance interface (deprecated)"
219 depends on PROC_FS 219 depends on PROC_FS
220 depends on X86_ACPI_CPUFREQ || X86_SPEEDSTEP_CENTRINO_ACPI || X86_POWERNOW_K7_ACPI || X86_POWERNOW_K8_ACPI 220 depends on X86_ACPI_CPUFREQ || X86_SPEEDSTEP_CENTRINO_ACPI || X86_POWERNOW_K7_ACPI || X86_POWERNOW_K8_ACPI
221 help 221 help
222 This enables the deprecated /proc/acpi/processor/../performance 222 This enables the deprecated /proc/acpi/processor/../performance
223 interface. While it is helpful for debugging, the generic, 223 interface. While it is helpful for debugging, the generic,
224 cross-architecture cpufreq interfaces should be used. 224 cross-architecture cpufreq interfaces should be used.
225 225
@@ -233,9 +233,9 @@ config X86_SPEEDSTEP_RELAXED_CAP_CHECK
233 bool "Relaxed speedstep capability checks" 233 bool "Relaxed speedstep capability checks"
234 depends on (X86_SPEEDSTEP_SMI || X86_SPEEDSTEP_ICH) 234 depends on (X86_SPEEDSTEP_SMI || X86_SPEEDSTEP_ICH)
235 help 235 help
236 Don't perform all checks for a speedstep capable system which would 236 Don't perform all checks for a speedstep capable system which would
237 normally be done. Some ancient or strange systems, though speedstep 237 normally be done. Some ancient or strange systems, though speedstep
238 capable, don't always indicate that they are speedstep capable. This 238 capable, don't always indicate that they are speedstep capable. This
239 option lets the probing code bypass some of those checks if the 239 option lets the probing code bypass some of those checks if the
240 parameter "relaxed_check=1" is passed to the module. 240 parameter "relaxed_check=1" is passed to the module.
241 241
diff --git a/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c
index 2b62dee35c6c..f275e0d4aee5 100644
--- a/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c
+++ b/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c
@@ -39,7 +39,7 @@ static struct pci_dev *nforce2_chipset_dev;
39static int fid = 0; 39static int fid = 0;
40 40
41/* min_fsb, max_fsb: 41/* min_fsb, max_fsb:
42 * minimum and maximum FSB (= FSB at boot time) 42 * minimum and maximum FSB (= FSB at boot time)
43 */ 43 */
44static int min_fsb = 0; 44static int min_fsb = 0;
45static int max_fsb = 0; 45static int max_fsb = 0;
@@ -57,10 +57,10 @@ MODULE_PARM_DESC(min_fsb,
57 57
58#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "cpufreq-nforce2", msg) 58#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "cpufreq-nforce2", msg)
59 59
60/* 60/**
61 * nforce2_calc_fsb - calculate FSB 61 * nforce2_calc_fsb - calculate FSB
62 * @pll: PLL value 62 * @pll: PLL value
63 * 63 *
64 * Calculates FSB from PLL value 64 * Calculates FSB from PLL value
65 */ 65 */
66static int nforce2_calc_fsb(int pll) 66static int nforce2_calc_fsb(int pll)
@@ -76,10 +76,10 @@ static int nforce2_calc_fsb(int pll)
76 return 0; 76 return 0;
77} 77}
78 78
79/* 79/**
80 * nforce2_calc_pll - calculate PLL value 80 * nforce2_calc_pll - calculate PLL value
81 * @fsb: FSB 81 * @fsb: FSB
82 * 82 *
83 * Calculate PLL value for given FSB 83 * Calculate PLL value for given FSB
84 */ 84 */
85static int nforce2_calc_pll(unsigned int fsb) 85static int nforce2_calc_pll(unsigned int fsb)
@@ -106,10 +106,10 @@ static int nforce2_calc_pll(unsigned int fsb)
106 return NFORCE2_PLL(mul, div); 106 return NFORCE2_PLL(mul, div);
107} 107}
108 108
109/* 109/**
110 * nforce2_write_pll - write PLL value to chipset 110 * nforce2_write_pll - write PLL value to chipset
111 * @pll: PLL value 111 * @pll: PLL value
112 * 112 *
113 * Writes new FSB PLL value to chipset 113 * Writes new FSB PLL value to chipset
114 */ 114 */
115static void nforce2_write_pll(int pll) 115static void nforce2_write_pll(int pll)
@@ -121,15 +121,13 @@ static void nforce2_write_pll(int pll)
121 pci_write_config_dword(nforce2_chipset_dev, NFORCE2_PLLADR, temp); 121 pci_write_config_dword(nforce2_chipset_dev, NFORCE2_PLLADR, temp);
122 122
123 /* Now write the value in all 64 registers */ 123 /* Now write the value in all 64 registers */
124 for (temp = 0; temp <= 0x3f; temp++) { 124 for (temp = 0; temp <= 0x3f; temp++)
125 pci_write_config_dword(nforce2_chipset_dev, 125 pci_write_config_dword(nforce2_chipset_dev, NFORCE2_PLLREG, pll);
126 NFORCE2_PLLREG, pll);
127 }
128 126
129 return; 127 return;
130} 128}
131 129
132/* 130/**
133 * nforce2_fsb_read - Read FSB 131 * nforce2_fsb_read - Read FSB
134 * 132 *
135 * Read FSB from chipset 133 * Read FSB from chipset
@@ -140,39 +138,32 @@ static unsigned int nforce2_fsb_read(int bootfsb)
140 struct pci_dev *nforce2_sub5; 138 struct pci_dev *nforce2_sub5;
141 u32 fsb, temp = 0; 139 u32 fsb, temp = 0;
142 140
143
144 /* Get chipset boot FSB from subdevice 5 (FSB at boot-time) */ 141 /* Get chipset boot FSB from subdevice 5 (FSB at boot-time) */
145 nforce2_sub5 = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, 142 nforce2_sub5 = pci_get_subsys(PCI_VENDOR_ID_NVIDIA,
146 0x01EF, 143 0x01EF,PCI_ANY_ID,PCI_ANY_ID,NULL);
147 PCI_ANY_ID,
148 PCI_ANY_ID,
149 NULL);
150
151 if (!nforce2_sub5) 144 if (!nforce2_sub5)
152 return 0; 145 return 0;
153 146
154 pci_read_config_dword(nforce2_sub5, NFORCE2_BOOTFSB, &fsb); 147 pci_read_config_dword(nforce2_sub5, NFORCE2_BOOTFSB, &fsb);
155 fsb /= 1000000; 148 fsb /= 1000000;
156 149
157 /* Check if PLL register is already set */ 150 /* Check if PLL register is already set */
158 pci_read_config_byte(nforce2_chipset_dev, 151 pci_read_config_byte(nforce2_chipset_dev,NFORCE2_PLLENABLE, (u8 *)&temp);
159 NFORCE2_PLLENABLE, (u8 *)&temp); 152
160
161 if(bootfsb || !temp) 153 if(bootfsb || !temp)
162 return fsb; 154 return fsb;
163 155
164 /* Use PLL register FSB value */ 156 /* Use PLL register FSB value */
165 pci_read_config_dword(nforce2_chipset_dev, 157 pci_read_config_dword(nforce2_chipset_dev,NFORCE2_PLLREG, &temp);
166 NFORCE2_PLLREG, &temp);
167 fsb = nforce2_calc_fsb(temp); 158 fsb = nforce2_calc_fsb(temp);
168 159
169 return fsb; 160 return fsb;
170} 161}
171 162
172/* 163/**
173 * nforce2_set_fsb - set new FSB 164 * nforce2_set_fsb - set new FSB
174 * @fsb: New FSB 165 * @fsb: New FSB
175 * 166 *
176 * Sets new FSB 167 * Sets new FSB
177 */ 168 */
178static int nforce2_set_fsb(unsigned int fsb) 169static int nforce2_set_fsb(unsigned int fsb)
@@ -186,7 +177,7 @@ static int nforce2_set_fsb(unsigned int fsb)
186 printk(KERN_ERR "cpufreq: FSB %d is out of range!\n", fsb); 177 printk(KERN_ERR "cpufreq: FSB %d is out of range!\n", fsb);
187 return -EINVAL; 178 return -EINVAL;
188 } 179 }
189 180
190 tfsb = nforce2_fsb_read(0); 181 tfsb = nforce2_fsb_read(0);
191 if (!tfsb) { 182 if (!tfsb) {
192 printk(KERN_ERR "cpufreq: Error while reading the FSB\n"); 183 printk(KERN_ERR "cpufreq: Error while reading the FSB\n");
@@ -194,8 +185,7 @@ static int nforce2_set_fsb(unsigned int fsb)
194 } 185 }
195 186
196 /* First write? Then set actual value */ 187 /* First write? Then set actual value */
197 pci_read_config_byte(nforce2_chipset_dev, 188 pci_read_config_byte(nforce2_chipset_dev,NFORCE2_PLLENABLE, (u8 *)&temp);
198 NFORCE2_PLLENABLE, (u8 *)&temp);
199 if (!temp) { 189 if (!temp) {
200 pll = nforce2_calc_pll(tfsb); 190 pll = nforce2_calc_pll(tfsb);
201 191
@@ -223,7 +213,7 @@ static int nforce2_set_fsb(unsigned int fsb)
223 /* Calculate the PLL reg. value */ 213 /* Calculate the PLL reg. value */
224 if ((pll = nforce2_calc_pll(tfsb)) == -1) 214 if ((pll = nforce2_calc_pll(tfsb)) == -1)
225 return -EINVAL; 215 return -EINVAL;
226 216
227 nforce2_write_pll(pll); 217 nforce2_write_pll(pll);
228#ifdef NFORCE2_DELAY 218#ifdef NFORCE2_DELAY
229 mdelay(NFORCE2_DELAY); 219 mdelay(NFORCE2_DELAY);
@@ -239,7 +229,7 @@ static int nforce2_set_fsb(unsigned int fsb)
239/** 229/**
240 * nforce2_get - get the CPU frequency 230 * nforce2_get - get the CPU frequency
241 * @cpu: CPU number 231 * @cpu: CPU number
242 * 232 *
243 * Returns the CPU frequency 233 * Returns the CPU frequency
244 */ 234 */
245static unsigned int nforce2_get(unsigned int cpu) 235static unsigned int nforce2_get(unsigned int cpu)
@@ -354,10 +344,10 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
354 344
355 printk(KERN_INFO "cpufreq: FSB currently at %i MHz, FID %d.%d\n", fsb, 345 printk(KERN_INFO "cpufreq: FSB currently at %i MHz, FID %d.%d\n", fsb,
356 fid / 10, fid % 10); 346 fid / 10, fid % 10);
357 347
358 /* Set maximum FSB to FSB at boot time */ 348 /* Set maximum FSB to FSB at boot time */
359 max_fsb = nforce2_fsb_read(1); 349 max_fsb = nforce2_fsb_read(1);
360 350
361 if(!max_fsb) 351 if(!max_fsb)
362 return -EIO; 352 return -EIO;
363 353
@@ -398,17 +388,15 @@ static struct cpufreq_driver nforce2_driver = {
398 * nforce2_detect_chipset - detect the Southbridge which contains FSB PLL logic 388 * nforce2_detect_chipset - detect the Southbridge which contains FSB PLL logic
399 * 389 *
400 * Detects nForce2 A2 and C1 stepping 390 * Detects nForce2 A2 and C1 stepping
401 * 391 *
402 */ 392 */
403static unsigned int nforce2_detect_chipset(void) 393static unsigned int nforce2_detect_chipset(void)
404{ 394{
405 u8 revision; 395 u8 revision;
406 396
407 nforce2_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, 397 nforce2_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA,
408 PCI_DEVICE_ID_NVIDIA_NFORCE2, 398 PCI_DEVICE_ID_NVIDIA_NFORCE2,
409 PCI_ANY_ID, 399 PCI_ANY_ID, PCI_ANY_ID, NULL);
410 PCI_ANY_ID,
411 NULL);
412 400
413 if (nforce2_chipset_dev == NULL) 401 if (nforce2_chipset_dev == NULL)
414 return -ENODEV; 402 return -ENODEV;
diff --git a/arch/i386/kernel/cpu/cpufreq/elanfreq.c b/arch/i386/kernel/cpu/cpufreq/elanfreq.c
index 3f7caa4ae6d6..f317276afa7a 100644
--- a/arch/i386/kernel/cpu/cpufreq/elanfreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/elanfreq.c
@@ -1,16 +1,16 @@
1/* 1/*
2 * elanfreq: cpufreq driver for the AMD ELAN family 2 * elanfreq: cpufreq driver for the AMD ELAN family
3 * 3 *
4 * (c) Copyright 2002 Robert Schwebel <r.schwebel@pengutronix.de> 4 * (c) Copyright 2002 Robert Schwebel <r.schwebel@pengutronix.de>
5 * 5 *
6 * Parts of this code are (c) Sven Geggus <sven@geggus.net> 6 * Parts of this code are (c) Sven Geggus <sven@geggus.net>
7 * 7 *
8 * All Rights Reserved. 8 * All Rights Reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version. 13 * 2 of the License, or (at your option) any later version.
14 * 14 *
15 * 2002-02-13: - initial revision for 2.4.18-pre9 by Robert Schwebel 15 * 2002-02-13: - initial revision for 2.4.18-pre9 by Robert Schwebel
16 * 16 *
@@ -28,7 +28,7 @@
28#include <asm/timex.h> 28#include <asm/timex.h>
29#include <asm/io.h> 29#include <asm/io.h>
30 30
31#define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */ 31#define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */
32#define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */ 32#define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */
33 33
34/* Module parameter */ 34/* Module parameter */
@@ -41,7 +41,7 @@ struct s_elan_multiplier {
41}; 41};
42 42
43/* 43/*
44 * It is important that the frequencies 44 * It is important that the frequencies
45 * are listed in ascending order here! 45 * are listed in ascending order here!
46 */ 46 */
47struct s_elan_multiplier elan_multiplier[] = { 47struct s_elan_multiplier elan_multiplier[] = {
@@ -72,78 +72,79 @@ static struct cpufreq_frequency_table elanfreq_table[] = {
72 * elanfreq_get_cpu_frequency: determine current cpu speed 72 * elanfreq_get_cpu_frequency: determine current cpu speed
73 * 73 *
74 * Finds out at which frequency the CPU of the Elan SOC runs 74 * Finds out at which frequency the CPU of the Elan SOC runs
75 * at the moment. Frequencies from 1 to 33 MHz are generated 75 * at the moment. Frequencies from 1 to 33 MHz are generated
76 * the normal way, 66 and 99 MHz are called "Hyperspeed Mode" 76 * the normal way, 66 and 99 MHz are called "Hyperspeed Mode"
77 * and have the rest of the chip running with 33 MHz. 77 * and have the rest of the chip running with 33 MHz.
78 */ 78 */
79 79
80static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu) 80static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
81{ 81{
82 u8 clockspeed_reg; /* Clock Speed Register */ 82 u8 clockspeed_reg; /* Clock Speed Register */
83 83
84 local_irq_disable(); 84 local_irq_disable();
85 outb_p(0x80,REG_CSCIR); 85 outb_p(0x80,REG_CSCIR);
86 clockspeed_reg = inb_p(REG_CSCDR); 86 clockspeed_reg = inb_p(REG_CSCDR);
87 local_irq_enable(); 87 local_irq_enable();
88 88
89 if ((clockspeed_reg & 0xE0) == 0xE0) { return 0; } 89 if ((clockspeed_reg & 0xE0) == 0xE0)
90 return 0;
90 91
91 /* Are we in CPU clock multiplied mode (66/99 MHz)? */ 92 /* Are we in CPU clock multiplied mode (66/99 MHz)? */
92 if ((clockspeed_reg & 0xE0) == 0xC0) { 93 if ((clockspeed_reg & 0xE0) == 0xC0) {
93 if ((clockspeed_reg & 0x01) == 0) { 94 if ((clockspeed_reg & 0x01) == 0)
94 return 66000; 95 return 66000;
95 } else { 96 else
96 return 99000; 97 return 99000;
97 } 98 }
98 }
99 99
100 /* 33 MHz is not 32 MHz... */ 100 /* 33 MHz is not 32 MHz... */
101 if ((clockspeed_reg & 0xE0)==0xA0) 101 if ((clockspeed_reg & 0xE0)==0xA0)
102 return 33000; 102 return 33000;
103 103
104 return ((1<<((clockspeed_reg & 0xE0) >> 5)) * 1000); 104 return ((1<<((clockspeed_reg & 0xE0) >> 5)) * 1000);
105} 105}
106 106
107 107
108/** 108/**
109 * elanfreq_set_cpu_frequency: Change the CPU core frequency 109 * elanfreq_set_cpu_frequency: Change the CPU core frequency
110 * @cpu: cpu number 110 * @cpu: cpu number
111 * @freq: frequency in kHz 111 * @freq: frequency in kHz
112 * 112 *
113 * This function takes a frequency value and changes the CPU frequency 113 * This function takes a frequency value and changes the CPU frequency
114 * according to this. Note that the frequency has to be checked by 114 * according to this. Note that the frequency has to be checked by
115 * elanfreq_validatespeed() for correctness! 115 * elanfreq_validatespeed() for correctness!
116 * 116 *
117 * There is no return value. 117 * There is no return value.
118 */ 118 */
119 119
120static void elanfreq_set_cpu_state (unsigned int state) { 120static void elanfreq_set_cpu_state (unsigned int state)
121 121{
122 struct cpufreq_freqs freqs; 122 struct cpufreq_freqs freqs;
123 123
124 freqs.old = elanfreq_get_cpu_frequency(0); 124 freqs.old = elanfreq_get_cpu_frequency(0);
125 freqs.new = elan_multiplier[state].clock; 125 freqs.new = elan_multiplier[state].clock;
126 freqs.cpu = 0; /* elanfreq.c is UP only driver */ 126 freqs.cpu = 0; /* elanfreq.c is UP only driver */
127 127
128 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 128 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
129 129
130 printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",elan_multiplier[state].clock); 130 printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",
131 elan_multiplier[state].clock);
131 132
132 133
133 /* 134 /*
134 * Access to the Elan's internal registers is indexed via 135 * Access to the Elan's internal registers is indexed via
135 * 0x22: Chip Setup & Control Register Index Register (CSCI) 136 * 0x22: Chip Setup & Control Register Index Register (CSCI)
136 * 0x23: Chip Setup & Control Register Data Register (CSCD) 137 * 0x23: Chip Setup & Control Register Data Register (CSCD)
137 * 138 *
138 */ 139 */
139 140
140 /* 141 /*
141 * 0x40 is the Power Management Unit's Force Mode Register. 142 * 0x40 is the Power Management Unit's Force Mode Register.
142 * Bit 6 enables Hyperspeed Mode (66/100 MHz core frequency) 143 * Bit 6 enables Hyperspeed Mode (66/100 MHz core frequency)
143 */ 144 */
144 145
145 local_irq_disable(); 146 local_irq_disable();
146 outb_p(0x40,REG_CSCIR); /* Disable hyperspeed mode */ 147 outb_p(0x40,REG_CSCIR); /* Disable hyperspeed mode */
147 outb_p(0x00,REG_CSCDR); 148 outb_p(0x00,REG_CSCDR);
148 local_irq_enable(); /* wait till internal pipelines and */ 149 local_irq_enable(); /* wait till internal pipelines and */
149 udelay(1000); /* buffers have cleaned up */ 150 udelay(1000); /* buffers have cleaned up */
@@ -166,10 +167,10 @@ static void elanfreq_set_cpu_state (unsigned int state) {
166 167
167/** 168/**
168 * elanfreq_validatespeed: test if frequency range is valid 169 * elanfreq_validatespeed: test if frequency range is valid
169 * @policy: the policy to validate 170 * @policy: the policy to validate
170 * 171 *
171 * This function checks if a given frequency range in kHz is valid 172 * This function checks if a given frequency range in kHz is valid
172 * for the hardware supported by the driver. 173 * for the hardware supported by the driver.
173 */ 174 */
174 175
175static int elanfreq_verify (struct cpufreq_policy *policy) 176static int elanfreq_verify (struct cpufreq_policy *policy)
@@ -177,11 +178,11 @@ static int elanfreq_verify (struct cpufreq_policy *policy)
177 return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]); 178 return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]);
178} 179}
179 180
180static int elanfreq_target (struct cpufreq_policy *policy, 181static int elanfreq_target (struct cpufreq_policy *policy,
181 unsigned int target_freq, 182 unsigned int target_freq,
182 unsigned int relation) 183 unsigned int relation)
183{ 184{
184 unsigned int newstate = 0; 185 unsigned int newstate = 0;
185 186
186 if (cpufreq_frequency_table_target(policy, &elanfreq_table[0], target_freq, relation, &newstate)) 187 if (cpufreq_frequency_table_target(policy, &elanfreq_table[0], target_freq, relation, &newstate))
187 return -EINVAL; 188 return -EINVAL;
@@ -212,7 +213,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
212 max_freq = elanfreq_get_cpu_frequency(0); 213 max_freq = elanfreq_get_cpu_frequency(0);
213 214
214 /* table init */ 215 /* table init */
215 for (i=0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) { 216 for (i=0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) {
216 if (elanfreq_table[i].frequency > max_freq) 217 if (elanfreq_table[i].frequency > max_freq)
217 elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID; 218 elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
218 } 219 }
@@ -226,8 +227,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
226 if (result) 227 if (result)
227 return (result); 228 return (result);
228 229
229 cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu); 230 cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu);
230
231 return 0; 231 return 0;
232} 232}
233 233
@@ -268,9 +268,9 @@ static struct freq_attr* elanfreq_attr[] = {
268 268
269 269
270static struct cpufreq_driver elanfreq_driver = { 270static struct cpufreq_driver elanfreq_driver = {
271 .get = elanfreq_get_cpu_frequency, 271 .get = elanfreq_get_cpu_frequency,
272 .verify = elanfreq_verify, 272 .verify = elanfreq_verify,
273 .target = elanfreq_target, 273 .target = elanfreq_target,
274 .init = elanfreq_cpu_init, 274 .init = elanfreq_cpu_init,
275 .exit = elanfreq_cpu_exit, 275 .exit = elanfreq_cpu_exit,
276 .name = "elanfreq", 276 .name = "elanfreq",
@@ -279,23 +279,21 @@ static struct cpufreq_driver elanfreq_driver = {
279}; 279};
280 280
281 281
282static int __init elanfreq_init(void) 282static int __init elanfreq_init(void)
283{ 283{
284 struct cpuinfo_x86 *c = cpu_data; 284 struct cpuinfo_x86 *c = cpu_data;
285 285
286 /* Test if we have the right hardware */ 286 /* Test if we have the right hardware */
287 if ((c->x86_vendor != X86_VENDOR_AMD) || 287 if ((c->x86_vendor != X86_VENDOR_AMD) ||
288 (c->x86 != 4) || (c->x86_model!=10)) 288 (c->x86 != 4) || (c->x86_model!=10)) {
289 {
290 printk(KERN_INFO "elanfreq: error: no Elan processor found!\n"); 289 printk(KERN_INFO "elanfreq: error: no Elan processor found!\n");
291 return -ENODEV; 290 return -ENODEV;
292 } 291 }
293
294 return cpufreq_register_driver(&elanfreq_driver); 292 return cpufreq_register_driver(&elanfreq_driver);
295} 293}
296 294
297 295
298static void __exit elanfreq_exit(void) 296static void __exit elanfreq_exit(void)
299{ 297{
300 cpufreq_unregister_driver(&elanfreq_driver); 298 cpufreq_unregister_driver(&elanfreq_driver);
301} 299}
@@ -309,4 +307,3 @@ MODULE_DESCRIPTION("cpufreq driver for AMD's Elan CPUs");
309 307
310module_init(elanfreq_init); 308module_init(elanfreq_init);
311module_exit(elanfreq_exit); 309module_exit(elanfreq_exit);
312
diff --git a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
index e86ea486c311..92afa3bc84f1 100644
--- a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
@@ -6,12 +6,12 @@
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation 9 * version 2 as published by the Free Software Foundation
10 * 10 *
11 * The author(s) of this software shall not be held liable for damages 11 * The author(s) of this software shall not be held liable for damages
12 * of any nature resulting due to the use of this software. This 12 * of any nature resulting due to the use of this software. This
13 * software is provided AS-IS with no warranties. 13 * software is provided AS-IS with no warranties.
14 * 14 *
15 * Theoritical note: 15 * Theoritical note:
16 * 16 *
17 * (see Geode(tm) CS5530 manual (rev.4.1) page.56) 17 * (see Geode(tm) CS5530 manual (rev.4.1) page.56)
@@ -21,18 +21,18 @@
21 * 21 *
22 * Suspend Modulation works by asserting and de-asserting the SUSP# pin 22 * Suspend Modulation works by asserting and de-asserting the SUSP# pin
23 * to CPU(GX1/GXLV) for configurable durations. When asserting SUSP# 23 * to CPU(GX1/GXLV) for configurable durations. When asserting SUSP#
24 * the CPU enters an idle state. GX1 stops its core clock when SUSP# is 24 * the CPU enters an idle state. GX1 stops its core clock when SUSP# is
25 * asserted then power consumption is reduced. 25 * asserted then power consumption is reduced.
26 * 26 *
27 * Suspend Modulation's OFF/ON duration are configurable 27 * Suspend Modulation's OFF/ON duration are configurable
28 * with 'Suspend Modulation OFF Count Register' 28 * with 'Suspend Modulation OFF Count Register'
29 * and 'Suspend Modulation ON Count Register'. 29 * and 'Suspend Modulation ON Count Register'.
30 * These registers are 8bit counters that represent the number of 30 * These registers are 8bit counters that represent the number of
31 * 32us intervals which the SUSP# pin is asserted(ON)/de-asserted(OFF) 31 * 32us intervals which the SUSP# pin is asserted(ON)/de-asserted(OFF)
32 * to the processor. 32 * to the processor.
33 * 33 *
34 * These counters define a ratio which is the effective frequency 34 * These counters define a ratio which is the effective frequency
35 * of operation of the system. 35 * of operation of the system.
36 * 36 *
37 * OFF Count 37 * OFF Count
38 * F_eff = Fgx * ---------------------- 38 * F_eff = Fgx * ----------------------
@@ -40,24 +40,24 @@
40 * 40 *
41 * 0 <= On Count, Off Count <= 255 41 * 0 <= On Count, Off Count <= 255
42 * 42 *
43 * From these limits, we can get register values 43 * From these limits, we can get register values
44 * 44 *
45 * off_duration + on_duration <= MAX_DURATION 45 * off_duration + on_duration <= MAX_DURATION
46 * on_duration = off_duration * (stock_freq - freq) / freq 46 * on_duration = off_duration * (stock_freq - freq) / freq
47 * 47 *
48 * off_duration = (freq * DURATION) / stock_freq 48 * off_duration = (freq * DURATION) / stock_freq
49 * on_duration = DURATION - off_duration 49 * on_duration = DURATION - off_duration
50 * 50 *
51 * 51 *
52 *--------------------------------------------------------------------------- 52 *---------------------------------------------------------------------------
53 * 53 *
54 * ChangeLog: 54 * ChangeLog:
55 * Dec. 12, 2003 Hiroshi Miura <miura@da-cha.org> 55 * Dec. 12, 2003 Hiroshi Miura <miura@da-cha.org>
56 * - fix on/off register mistake 56 * - fix on/off register mistake
57 * - fix cpu_khz calc when it stops cpu modulation. 57 * - fix cpu_khz calc when it stops cpu modulation.
58 * 58 *
59 * Dec. 11, 2002 Hiroshi Miura <miura@da-cha.org> 59 * Dec. 11, 2002 Hiroshi Miura <miura@da-cha.org>
60 * - rewrite for Cyrix MediaGX Cx5510/5520 and 60 * - rewrite for Cyrix MediaGX Cx5510/5520 and
61 * NatSemi Geode Cs5530(A). 61 * NatSemi Geode Cs5530(A).
62 * 62 *
63 * Jul. ??, 2002 Zwane Mwaikambo <zwane@commfireservices.com> 63 * Jul. ??, 2002 Zwane Mwaikambo <zwane@commfireservices.com>
@@ -74,40 +74,40 @@
74 ************************************************************************/ 74 ************************************************************************/
75 75
76#include <linux/kernel.h> 76#include <linux/kernel.h>
77#include <linux/module.h> 77#include <linux/module.h>
78#include <linux/init.h> 78#include <linux/init.h>
79#include <linux/smp.h> 79#include <linux/smp.h>
80#include <linux/cpufreq.h> 80#include <linux/cpufreq.h>
81#include <linux/pci.h> 81#include <linux/pci.h>
82#include <asm/processor.h> 82#include <asm/processor.h>
83#include <asm/errno.h> 83#include <asm/errno.h>
84 84
85/* PCI config registers, all at F0 */ 85/* PCI config registers, all at F0 */
86#define PCI_PMER1 0x80 /* power management enable register 1 */ 86#define PCI_PMER1 0x80 /* power management enable register 1 */
87#define PCI_PMER2 0x81 /* power management enable register 2 */ 87#define PCI_PMER2 0x81 /* power management enable register 2 */
88#define PCI_PMER3 0x82 /* power management enable register 3 */ 88#define PCI_PMER3 0x82 /* power management enable register 3 */
89#define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */ 89#define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */
90#define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */ 90#define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */
91#define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */ 91#define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */
92#define PCI_MODON 0x95 /* suspend modulation ON counter register */ 92#define PCI_MODON 0x95 /* suspend modulation ON counter register */
93#define PCI_SUSCFG 0x96 /* suspend configuration register */ 93#define PCI_SUSCFG 0x96 /* suspend configuration register */
94 94
95/* PMER1 bits */ 95/* PMER1 bits */
96#define GPM (1<<0) /* global power management */ 96#define GPM (1<<0) /* global power management */
97#define GIT (1<<1) /* globally enable PM device idle timers */ 97#define GIT (1<<1) /* globally enable PM device idle timers */
98#define GTR (1<<2) /* globally enable IO traps */ 98#define GTR (1<<2) /* globally enable IO traps */
99#define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */ 99#define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */
100#define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */ 100#define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */
101 101
102/* SUSCFG bits */ 102/* SUSCFG bits */
103#define SUSMOD (1<<0) /* enable/disable suspend modulation */ 103#define SUSMOD (1<<0) /* enable/disable suspend modulation */
104/* the belows support only with cs5530 (after rev.1.2)/cs5530A */ 104/* the belows support only with cs5530 (after rev.1.2)/cs5530A */
105#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */ 105#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */
106 /* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */ 106 /* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */
107#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */ 107#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */
108/* the belows support only with cs5530A */ 108/* the belows support only with cs5530A */
109#define PWRSVE_ISA (1<<3) /* stop ISA clock */ 109#define PWRSVE_ISA (1<<3) /* stop ISA clock */
110#define PWRSVE (1<<4) /* active idle */ 110#define PWRSVE (1<<4) /* active idle */
111 111
112struct gxfreq_params { 112struct gxfreq_params {
113 u8 on_duration; 113 u8 on_duration;
@@ -128,7 +128,7 @@ module_param (pci_busclk, int, 0444);
128 128
129/* maximum duration for which the cpu may be suspended 129/* maximum duration for which the cpu may be suspended
130 * (32us * MAX_DURATION). If no parameter is given, this defaults 130 * (32us * MAX_DURATION). If no parameter is given, this defaults
131 * to 255. 131 * to 255.
132 * Note that this leads to a maximum of 8 ms(!) where the CPU clock 132 * Note that this leads to a maximum of 8 ms(!) where the CPU clock
133 * is suspended -- processing power is just 0.39% of what it used to be, 133 * is suspended -- processing power is just 0.39% of what it used to be,
134 * though. 781.25 kHz(!) for a 200 MHz processor -- wow. */ 134 * though. 781.25 kHz(!) for a 200 MHz processor -- wow. */
@@ -144,17 +144,17 @@ module_param (max_duration, int, 0444);
144#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "gx-suspmod", msg) 144#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "gx-suspmod", msg)
145 145
146/** 146/**
147 * we can detect a core multipiler from dir0_lsb 147 * we can detect a core multipiler from dir0_lsb
148 * from GX1 datasheet p.56, 148 * from GX1 datasheet p.56,
149 * MULT[3:0]: 149 * MULT[3:0]:
150 * 0000 = SYSCLK multiplied by 4 (test only) 150 * 0000 = SYSCLK multiplied by 4 (test only)
151 * 0001 = SYSCLK multiplied by 10 151 * 0001 = SYSCLK multiplied by 10
152 * 0010 = SYSCLK multiplied by 4 152 * 0010 = SYSCLK multiplied by 4
153 * 0011 = SYSCLK multiplied by 6 153 * 0011 = SYSCLK multiplied by 6
154 * 0100 = SYSCLK multiplied by 9 154 * 0100 = SYSCLK multiplied by 9
155 * 0101 = SYSCLK multiplied by 5 155 * 0101 = SYSCLK multiplied by 5
156 * 0110 = SYSCLK multiplied by 7 156 * 0110 = SYSCLK multiplied by 7
157 * 0111 = SYSCLK multiplied by 8 157 * 0111 = SYSCLK multiplied by 8
158 * of 33.3MHz 158 * of 33.3MHz
159 **/ 159 **/
160static int gx_freq_mult[16] = { 160static int gx_freq_mult[16] = {
@@ -164,17 +164,17 @@ static int gx_freq_mult[16] = {
164 164
165 165
166/**************************************************************** 166/****************************************************************
167 * Low Level chipset interface * 167 * Low Level chipset interface *
168 ****************************************************************/ 168 ****************************************************************/
169static struct pci_device_id gx_chipset_tbl[] __initdata = { 169static struct pci_device_id gx_chipset_tbl[] __initdata = {
170 { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, PCI_ANY_ID, PCI_ANY_ID }, 170 { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, PCI_ANY_ID, PCI_ANY_ID },
171 { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, PCI_ANY_ID, PCI_ANY_ID }, 171 { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, PCI_ANY_ID, PCI_ANY_ID },
172 { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, PCI_ANY_ID, PCI_ANY_ID }, 172 { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, PCI_ANY_ID, PCI_ANY_ID },
173 { 0, }, 173 { 0, },
174}; 174};
175 175
176/** 176/**
177 * gx_detect_chipset: 177 * gx_detect_chipset:
178 * 178 *
179 **/ 179 **/
180static __init struct pci_dev *gx_detect_chipset(void) 180static __init struct pci_dev *gx_detect_chipset(void)
@@ -182,17 +182,16 @@ static __init struct pci_dev *gx_detect_chipset(void)
182 struct pci_dev *gx_pci = NULL; 182 struct pci_dev *gx_pci = NULL;
183 183
184 /* check if CPU is a MediaGX or a Geode. */ 184 /* check if CPU is a MediaGX or a Geode. */
185 if ((current_cpu_data.x86_vendor != X86_VENDOR_NSC) && 185 if ((current_cpu_data.x86_vendor != X86_VENDOR_NSC) &&
186 (current_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) { 186 (current_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) {
187 dprintk("error: no MediaGX/Geode processor found!\n"); 187 dprintk("error: no MediaGX/Geode processor found!\n");
188 return NULL; 188 return NULL;
189 } 189 }
190 190
191 /* detect which companion chip is used */ 191 /* detect which companion chip is used */
192 while ((gx_pci = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, gx_pci)) != NULL) { 192 while ((gx_pci = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, gx_pci)) != NULL) {
193 if ((pci_match_id(gx_chipset_tbl, gx_pci)) != NULL) { 193 if ((pci_match_id(gx_chipset_tbl, gx_pci)) != NULL)
194 return gx_pci; 194 return gx_pci;
195 }
196 } 195 }
197 196
198 dprintk("error: no supported chipset found!\n"); 197 dprintk("error: no supported chipset found!\n");
@@ -200,24 +199,24 @@ static __init struct pci_dev *gx_detect_chipset(void)
200} 199}
201 200
202/** 201/**
203 * gx_get_cpuspeed: 202 * gx_get_cpuspeed:
204 * 203 *
205 * Finds out at which efficient frequency the Cyrix MediaGX/NatSemi Geode CPU runs. 204 * Finds out at which efficient frequency the Cyrix MediaGX/NatSemi Geode CPU runs.
206 */ 205 */
207static unsigned int gx_get_cpuspeed(unsigned int cpu) 206static unsigned int gx_get_cpuspeed(unsigned int cpu)
208{ 207{
209 if ((gx_params->pci_suscfg & SUSMOD) == 0) 208 if ((gx_params->pci_suscfg & SUSMOD) == 0)
210 return stock_freq; 209 return stock_freq;
211 210
212 return (stock_freq * gx_params->off_duration) 211 return (stock_freq * gx_params->off_duration)
213 / (gx_params->on_duration + gx_params->off_duration); 212 / (gx_params->on_duration + gx_params->off_duration);
214} 213}
215 214
216/** 215/**
217 * gx_validate_speed: 216 * gx_validate_speed:
218 * determine current cpu speed 217 * determine current cpu speed
219 * 218 *
220**/ 219 **/
221 220
222static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off_duration) 221static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off_duration)
223{ 222{
@@ -230,7 +229,7 @@ static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off
230 *on_duration=0; 229 *on_duration=0;
231 230
232 for (i=max_duration; i>0; i--) { 231 for (i=max_duration; i>0; i--) {
233 tmp_off = ((khz * i) / stock_freq) & 0xff; 232 tmp_off = ((khz * i) / stock_freq) & 0xff;
234 tmp_on = i - tmp_off; 233 tmp_on = i - tmp_off;
235 tmp_freq = (stock_freq * tmp_off) / i; 234 tmp_freq = (stock_freq * tmp_off) / i;
236 /* if this relation is closer to khz, use this. If it's equal, 235 /* if this relation is closer to khz, use this. If it's equal,
@@ -247,18 +246,17 @@ static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off
247 246
248 247
249/** 248/**
250 * gx_set_cpuspeed: 249 * gx_set_cpuspeed:
251 * set cpu speed in khz. 250 * set cpu speed in khz.
252 **/ 251 **/
253 252
254static void gx_set_cpuspeed(unsigned int khz) 253static void gx_set_cpuspeed(unsigned int khz)
255{ 254{
256 u8 suscfg, pmer1; 255 u8 suscfg, pmer1;
257 unsigned int new_khz; 256 unsigned int new_khz;
258 unsigned long flags; 257 unsigned long flags;
259 struct cpufreq_freqs freqs; 258 struct cpufreq_freqs freqs;
260 259
261
262 freqs.cpu = 0; 260 freqs.cpu = 0;
263 freqs.old = gx_get_cpuspeed(0); 261 freqs.old = gx_get_cpuspeed(0);
264 262
@@ -303,18 +301,18 @@ static void gx_set_cpuspeed(unsigned int khz)
303 pci_write_config_byte(gx_params->cs55x0, PCI_MODOFF, gx_params->off_duration); 301 pci_write_config_byte(gx_params->cs55x0, PCI_MODOFF, gx_params->off_duration);
304 pci_write_config_byte(gx_params->cs55x0, PCI_MODON, gx_params->on_duration); 302 pci_write_config_byte(gx_params->cs55x0, PCI_MODON, gx_params->on_duration);
305 303
306 pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, suscfg); 304 pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, suscfg);
307 pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg); 305 pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg);
308 306
309 local_irq_restore(flags); 307 local_irq_restore(flags);
310 308
311 gx_params->pci_suscfg = suscfg; 309 gx_params->pci_suscfg = suscfg;
312 310
313 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 311 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
314 312
315 dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", 313 dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
316 gx_params->on_duration * 32, gx_params->off_duration * 32); 314 gx_params->on_duration * 32, gx_params->off_duration * 32);
317 dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); 315 dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new);
318} 316}
319 317
320/**************************************************************** 318/****************************************************************
@@ -322,10 +320,10 @@ static void gx_set_cpuspeed(unsigned int khz)
322 ****************************************************************/ 320 ****************************************************************/
323 321
324/* 322/*
325 * cpufreq_gx_verify: test if frequency range is valid 323 * cpufreq_gx_verify: test if frequency range is valid
326 * 324 *
327 * This function checks if a given frequency range in kHz is valid 325 * This function checks if a given frequency range in kHz is valid
328 * for the hardware supported by the driver. 326 * for the hardware supported by the driver.
329 */ 327 */
330 328
331static int cpufreq_gx_verify(struct cpufreq_policy *policy) 329static int cpufreq_gx_verify(struct cpufreq_policy *policy)
@@ -333,8 +331,8 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy)
333 unsigned int tmp_freq = 0; 331 unsigned int tmp_freq = 0;
334 u8 tmp1, tmp2; 332 u8 tmp1, tmp2;
335 333
336 if (!stock_freq || !policy) 334 if (!stock_freq || !policy)
337 return -EINVAL; 335 return -EINVAL;
338 336
339 policy->cpu = 0; 337 policy->cpu = 0;
340 cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq); 338 cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq);
@@ -342,14 +340,14 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy)
342 /* it needs to be assured that at least one supported frequency is 340 /* it needs to be assured that at least one supported frequency is
343 * within policy->min and policy->max. If it is not, policy->max 341 * within policy->min and policy->max. If it is not, policy->max
344 * needs to be increased until one freuqency is supported. 342 * needs to be increased until one freuqency is supported.
345 * policy->min may not be decreased, though. This way we guarantee a 343 * policy->min may not be decreased, though. This way we guarantee a
346 * specific processing capacity. 344 * specific processing capacity.
347 */ 345 */
348 tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2); 346 tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2);
349 if (tmp_freq < policy->min) 347 if (tmp_freq < policy->min)
350 tmp_freq += stock_freq / max_duration; 348 tmp_freq += stock_freq / max_duration;
351 policy->min = tmp_freq; 349 policy->min = tmp_freq;
352 if (policy->min > policy->max) 350 if (policy->min > policy->max)
353 policy->max = tmp_freq; 351 policy->max = tmp_freq;
354 tmp_freq = gx_validate_speed(policy->max, &tmp1, &tmp2); 352 tmp_freq = gx_validate_speed(policy->max, &tmp1, &tmp2);
355 if (tmp_freq > policy->max) 353 if (tmp_freq > policy->max)
@@ -358,12 +356,12 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy)
358 if (policy->max < policy->min) 356 if (policy->max < policy->min)
359 policy->max = policy->min; 357 policy->max = policy->min;
360 cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq); 358 cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq);
361 359
362 return 0; 360 return 0;
363} 361}
364 362
365/* 363/*
366 * cpufreq_gx_target: 364 * cpufreq_gx_target:
367 * 365 *
368 */ 366 */
369static int cpufreq_gx_target(struct cpufreq_policy *policy, 367static int cpufreq_gx_target(struct cpufreq_policy *policy,
@@ -373,8 +371,8 @@ static int cpufreq_gx_target(struct cpufreq_policy *policy,
373 u8 tmp1, tmp2; 371 u8 tmp1, tmp2;
374 unsigned int tmp_freq; 372 unsigned int tmp_freq;
375 373
376 if (!stock_freq || !policy) 374 if (!stock_freq || !policy)
377 return -EINVAL; 375 return -EINVAL;
378 376
379 policy->cpu = 0; 377 policy->cpu = 0;
380 378
@@ -431,7 +429,7 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
431 return 0; 429 return 0;
432} 430}
433 431
434/* 432/*
435 * cpufreq_gx_init: 433 * cpufreq_gx_init:
436 * MediaGX/Geode GX initialize cpufreq driver 434 * MediaGX/Geode GX initialize cpufreq driver
437 */ 435 */
@@ -452,7 +450,7 @@ static int __init cpufreq_gx_init(void)
452 u32 class_rev; 450 u32 class_rev;
453 451
454 /* Test if we have the right hardware */ 452 /* Test if we have the right hardware */
455 if ((gx_pci = gx_detect_chipset()) == NULL) 453 if ((gx_pci = gx_detect_chipset()) == NULL)
456 return -ENODEV; 454 return -ENODEV;
457 455
458 /* check whether module parameters are sane */ 456 /* check whether module parameters are sane */
@@ -461,10 +459,9 @@ static int __init cpufreq_gx_init(void)
461 459
462 dprintk("geode suspend modulation available.\n"); 460 dprintk("geode suspend modulation available.\n");
463 461
464 params = kmalloc(sizeof(struct gxfreq_params), GFP_KERNEL); 462 params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL);
465 if (params == NULL) 463 if (params == NULL)
466 return -ENOMEM; 464 return -ENOMEM;
467 memset(params, 0, sizeof(struct gxfreq_params));
468 465
469 params->cs55x0 = gx_pci; 466 params->cs55x0 = gx_pci;
470 gx_params = params; 467 gx_params = params;
@@ -478,7 +475,7 @@ static int __init cpufreq_gx_init(void)
478 pci_read_config_dword(params->cs55x0, PCI_CLASS_REVISION, &class_rev); 475 pci_read_config_dword(params->cs55x0, PCI_CLASS_REVISION, &class_rev);
479 params->pci_rev = class_rev && 0xff; 476 params->pci_rev = class_rev && 0xff;
480 477
481 if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) { 478 if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) {
482 kfree(params); 479 kfree(params);
483 return ret; /* register error! */ 480 return ret; /* register error! */
484 } 481 }
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.h b/arch/i386/kernel/cpu/cpufreq/longhaul.h
index 2a495c162ec7..d3a95d77ee85 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.h
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.h
@@ -234,7 +234,7 @@ static int __initdata ezrat_eblcr[32] = {
234 234
235/* 235/*
236 * VIA C3 Nehemiah */ 236 * VIA C3 Nehemiah */
237 237
238static int __initdata nehemiah_a_clock_ratio[32] = { 238static int __initdata nehemiah_a_clock_ratio[32] = {
239 100, /* 0000 -> 10.0x */ 239 100, /* 0000 -> 10.0x */
240 160, /* 0001 -> 16.0x */ 240 160, /* 0001 -> 16.0x */
@@ -446,7 +446,7 @@ static int __initdata nehemiah_c_eblcr[32] = {
446 /* end of table */ 446 /* end of table */
447}; 447};
448 448
449/* 449/*
450 * Voltage scales. Div/Mod by 1000 to get actual voltage. 450 * Voltage scales. Div/Mod by 1000 to get actual voltage.
451 * Which scale to use depends on the VRM type in use. 451 * Which scale to use depends on the VRM type in use.
452 */ 452 */
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
index cc73a7ae34bc..ab6504efd801 100644
--- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
@@ -14,7 +14,7 @@
14 * The author(s) of this software shall not be held liable for damages 14 * The author(s) of this software shall not be held liable for damages
15 * of any nature resulting due to the use of this software. This 15 * of any nature resulting due to the use of this software. This
16 * software is provided AS-IS with no warranties. 16 * software is provided AS-IS with no warranties.
17 * 17 *
18 * Date Errata Description 18 * Date Errata Description
19 * 20020525 N44, O17 12.5% or 25% DC causes lockup 19 * 20020525 N44, O17 12.5% or 25% DC causes lockup
20 * 20 *
@@ -22,7 +22,7 @@
22 22
23#include <linux/config.h> 23#include <linux/config.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/smp.h> 27#include <linux/smp.h>
28#include <linux/cpufreq.h> 28#include <linux/cpufreq.h>
@@ -30,7 +30,7 @@
30#include <linux/cpumask.h> 30#include <linux/cpumask.h>
31#include <linux/sched.h> /* current / set_cpus_allowed() */ 31#include <linux/sched.h> /* current / set_cpus_allowed() */
32 32
33#include <asm/processor.h> 33#include <asm/processor.h>
34#include <asm/msr.h> 34#include <asm/msr.h>
35#include <asm/timex.h> 35#include <asm/timex.h>
36 36
@@ -79,7 +79,7 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
79 } else { 79 } else {
80 dprintk("CPU#%d setting duty cycle to %d%%\n", 80 dprintk("CPU#%d setting duty cycle to %d%%\n",
81 cpu, ((125 * newstate) / 10)); 81 cpu, ((125 * newstate) / 10));
82 /* bits 63 - 5 : reserved 82 /* bits 63 - 5 : reserved
83 * bit 4 : enable/disable 83 * bit 4 : enable/disable
84 * bits 3-1 : duty cycle 84 * bits 3-1 : duty cycle
85 * bit 0 : reserved 85 * bit 0 : reserved
@@ -132,7 +132,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
132 } 132 }
133 133
134 /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software 134 /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
135 * Developer's Manual, Volume 3 135 * Developer's Manual, Volume 3
136 */ 136 */
137 cpus_allowed = current->cpus_allowed; 137 cpus_allowed = current->cpus_allowed;
138 138
@@ -206,7 +206,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
206 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_P4D); 206 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_P4D);
207} 207}
208 208
209 209
210 210
211static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) 211static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
212{ 212{
@@ -234,7 +234,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
234 dprintk("has errata -- disabling frequencies lower than 2ghz\n"); 234 dprintk("has errata -- disabling frequencies lower than 2ghz\n");
235 break; 235 break;
236 } 236 }
237 237
238 /* get max frequency */ 238 /* get max frequency */
239 stock_freq = cpufreq_p4_get_frequency(c); 239 stock_freq = cpufreq_p4_get_frequency(c);
240 if (!stock_freq) 240 if (!stock_freq)
@@ -244,13 +244,13 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
244 for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { 244 for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
245 if ((i<2) && (has_N44_O17_errata[policy->cpu])) 245 if ((i<2) && (has_N44_O17_errata[policy->cpu]))
246 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; 246 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
247 else if (has_N60_errata[policy->cpu] && p4clockmod_table[i].frequency < 2000000) 247 else if (has_N60_errata[policy->cpu] && ((stock_freq * i)/8) < 2000000)
248 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; 248 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
249 else 249 else
250 p4clockmod_table[i].frequency = (stock_freq * i)/8; 250 p4clockmod_table[i].frequency = (stock_freq * i)/8;
251 } 251 }
252 cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu); 252 cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
253 253
254 /* cpuinfo and default policy values */ 254 /* cpuinfo and default policy values */
255 policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 255 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
256 policy->cpuinfo.transition_latency = 1000000; /* assumed */ 256 policy->cpuinfo.transition_latency = 1000000; /* assumed */
@@ -262,7 +262,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
262 262
263static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy) 263static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy)
264{ 264{
265 cpufreq_frequency_table_put_attr(policy->cpu); 265 cpufreq_frequency_table_put_attr(policy->cpu);
266 return 0; 266 return 0;
267} 267}
268 268
@@ -298,7 +298,7 @@ static struct freq_attr* p4clockmod_attr[] = {
298}; 298};
299 299
300static struct cpufreq_driver p4clockmod_driver = { 300static struct cpufreq_driver p4clockmod_driver = {
301 .verify = cpufreq_p4_verify, 301 .verify = cpufreq_p4_verify,
302 .target = cpufreq_p4_target, 302 .target = cpufreq_p4_target,
303 .init = cpufreq_p4_cpu_init, 303 .init = cpufreq_p4_cpu_init,
304 .exit = cpufreq_p4_cpu_exit, 304 .exit = cpufreq_p4_cpu_exit,
@@ -310,12 +310,12 @@ static struct cpufreq_driver p4clockmod_driver = {
310 310
311 311
312static int __init cpufreq_p4_init(void) 312static int __init cpufreq_p4_init(void)
313{ 313{
314 struct cpuinfo_x86 *c = cpu_data; 314 struct cpuinfo_x86 *c = cpu_data;
315 int ret; 315 int ret;
316 316
317 /* 317 /*
318 * THERM_CONTROL is architectural for IA32 now, so 318 * THERM_CONTROL is architectural for IA32 now, so
319 * we can rely on the capability checks 319 * we can rely on the capability checks
320 */ 320 */
321 if (c->x86_vendor != X86_VENDOR_INTEL) 321 if (c->x86_vendor != X86_VENDOR_INTEL)
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k6.c b/arch/i386/kernel/cpu/cpufreq/powernow-k6.c
index 222f8cfe3c57..f89524051e4a 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k6.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k6.c
@@ -8,7 +8,7 @@
8 */ 8 */
9 9
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/cpufreq.h> 13#include <linux/cpufreq.h>
14#include <linux/ioport.h> 14#include <linux/ioport.h>
@@ -50,7 +50,7 @@ static int powernow_k6_get_cpu_multiplier(void)
50{ 50{
51 u64 invalue = 0; 51 u64 invalue = 0;
52 u32 msrval; 52 u32 msrval;
53 53
54 msrval = POWERNOW_IOPORT + 0x1; 54 msrval = POWERNOW_IOPORT + 0x1;
55 wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ 55 wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
56 invalue=inl(POWERNOW_IOPORT + 0x8); 56 invalue=inl(POWERNOW_IOPORT + 0x8);
@@ -81,7 +81,7 @@ static void powernow_k6_set_state (unsigned int best_i)
81 freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); 81 freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
82 freqs.new = busfreq * clock_ratio[best_i].index; 82 freqs.new = busfreq * clock_ratio[best_i].index;
83 freqs.cpu = 0; /* powernow-k6.c is UP only driver */ 83 freqs.cpu = 0; /* powernow-k6.c is UP only driver */
84 84
85 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 85 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
86 86
87 /* we now need to transform best_i to the BVC format, see AMD#23446 */ 87 /* we now need to transform best_i to the BVC format, see AMD#23446 */
@@ -152,7 +152,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
152 busfreq = cpu_khz / max_multiplier; 152 busfreq = cpu_khz / max_multiplier;
153 153
154 /* table init */ 154 /* table init */
155 for (i=0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { 155 for (i=0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
156 if (clock_ratio[i].index > max_multiplier) 156 if (clock_ratio[i].index > max_multiplier)
157 clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; 157 clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID;
158 else 158 else
@@ -182,7 +182,7 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
182 powernow_k6_set_state(i); 182 powernow_k6_set_state(i);
183 } 183 }
184 cpufreq_frequency_table_put_attr(policy->cpu); 184 cpufreq_frequency_table_put_attr(policy->cpu);
185 return 0; 185 return 0;
186} 186}
187 187
188static unsigned int powernow_k6_get(unsigned int cpu) 188static unsigned int powernow_k6_get(unsigned int cpu)
@@ -196,8 +196,8 @@ static struct freq_attr* powernow_k6_attr[] = {
196}; 196};
197 197
198static struct cpufreq_driver powernow_k6_driver = { 198static struct cpufreq_driver powernow_k6_driver = {
199 .verify = powernow_k6_verify, 199 .verify = powernow_k6_verify,
200 .target = powernow_k6_target, 200 .target = powernow_k6_target,
201 .init = powernow_k6_cpu_init, 201 .init = powernow_k6_cpu_init,
202 .exit = powernow_k6_cpu_exit, 202 .exit = powernow_k6_cpu_exit,
203 .get = powernow_k6_get, 203 .get = powernow_k6_get,
@@ -215,7 +215,7 @@ static struct cpufreq_driver powernow_k6_driver = {
215 * on success. 215 * on success.
216 */ 216 */
217static int __init powernow_k6_init(void) 217static int __init powernow_k6_init(void)
218{ 218{
219 struct cpuinfo_x86 *c = cpu_data; 219 struct cpuinfo_x86 *c = cpu_data;
220 220
221 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) || 221 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) ||
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
index edcd626001da..2bf4237cb94e 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
@@ -199,8 +199,8 @@ static int get_ranges (unsigned char *pst)
199 powernow_table[j].index |= (vid << 8); /* upper 8 bits */ 199 powernow_table[j].index |= (vid << 8); /* upper 8 bits */
200 200
201 dprintk (" FID: 0x%x (%d.%dx [%dMHz]) " 201 dprintk (" FID: 0x%x (%d.%dx [%dMHz]) "
202 "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, 202 "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
203 fid_codes[fid] % 10, speed/1000, vid, 203 fid_codes[fid] % 10, speed/1000, vid,
204 mobile_vid_table[vid]/1000, 204 mobile_vid_table[vid]/1000,
205 mobile_vid_table[vid]%1000); 205 mobile_vid_table[vid]%1000);
206 } 206 }
@@ -368,8 +368,8 @@ static int powernow_acpi_init(void)
368 } 368 }
369 369
370 dprintk (" FID: 0x%x (%d.%dx [%dMHz]) " 370 dprintk (" FID: 0x%x (%d.%dx [%dMHz]) "
371 "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, 371 "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
372 fid_codes[fid] % 10, speed/1000, vid, 372 fid_codes[fid] % 10, speed/1000, vid,
373 mobile_vid_table[vid]/1000, 373 mobile_vid_table[vid]/1000,
374 mobile_vid_table[vid]%1000); 374 mobile_vid_table[vid]%1000);
375 375
@@ -460,7 +460,7 @@ static int powernow_decode_bios (int maxfid, int startvid)
460 (maxfid==pst->maxfid) && (startvid==pst->startvid)) 460 (maxfid==pst->maxfid) && (startvid==pst->startvid))
461 { 461 {
462 dprintk ("PST:%d (@%p)\n", i, pst); 462 dprintk ("PST:%d (@%p)\n", i, pst);
463 dprintk (" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n", 463 dprintk (" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n",
464 pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid); 464 pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid);
465 465
466 ret = get_ranges ((char *) pst + sizeof (struct pst_s)); 466 ret = get_ranges ((char *) pst + sizeof (struct pst_s));
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index e11a09207ec8..e5bc06480ff9 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -45,7 +45,7 @@
45 45
46#define PFX "powernow-k8: " 46#define PFX "powernow-k8: "
47#define BFX PFX "BIOS error: " 47#define BFX PFX "BIOS error: "
48#define VERSION "version 1.60.0" 48#define VERSION "version 1.60.1"
49#include "powernow-k8.h" 49#include "powernow-k8.h"
50 50
51/* serialize freq changes */ 51/* serialize freq changes */
@@ -54,7 +54,7 @@ static DECLARE_MUTEX(fidvid_sem);
54static struct powernow_k8_data *powernow_data[NR_CPUS]; 54static struct powernow_k8_data *powernow_data[NR_CPUS];
55 55
56#ifndef CONFIG_SMP 56#ifndef CONFIG_SMP
57static cpumask_t cpu_core_map[1]; 57static cpumask_t cpu_core_map[1] = { CPU_MASK_ALL };
58#endif 58#endif
59 59
60/* Return a frequency in MHz, given an input fid */ 60/* Return a frequency in MHz, given an input fid */
@@ -83,11 +83,10 @@ static u32 find_millivolts_from_vid(struct powernow_k8_data *data, u32 vid)
83 */ 83 */
84static u32 convert_fid_to_vco_fid(u32 fid) 84static u32 convert_fid_to_vco_fid(u32 fid)
85{ 85{
86 if (fid < HI_FID_TABLE_BOTTOM) { 86 if (fid < HI_FID_TABLE_BOTTOM)
87 return 8 + (2 * fid); 87 return 8 + (2 * fid);
88 } else { 88 else
89 return fid; 89 return fid;
90 }
91} 90}
92 91
93/* 92/*
@@ -177,7 +176,7 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid)
177 if (i++ > 100) { 176 if (i++ > 100) {
178 printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n"); 177 printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n");
179 return 1; 178 return 1;
180 } 179 }
181 } while (query_current_values_with_pending_wait(data)); 180 } while (query_current_values_with_pending_wait(data));
182 181
183 count_off_irt(data); 182 count_off_irt(data);
@@ -474,8 +473,10 @@ static int check_supported_cpu(unsigned int cpu)
474 goto out; 473 goto out;
475 474
476 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); 475 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
476 if ((eax & CPUID_XFAM) != CPUID_XFAM_K8)
477 goto out;
478
477 if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || 479 if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
478 ((eax & CPUID_XFAM) != CPUID_XFAM_K8) ||
479 ((eax & CPUID_XMOD) > CPUID_XMOD_REV_G)) { 480 ((eax & CPUID_XMOD) > CPUID_XMOD_REV_G)) {
480 printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax); 481 printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax);
481 goto out; 482 goto out;
@@ -780,9 +781,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
780 /* verify only 1 entry from the lo frequency table */ 781 /* verify only 1 entry from the lo frequency table */
781 if (fid < HI_FID_TABLE_BOTTOM) { 782 if (fid < HI_FID_TABLE_BOTTOM) {
782 if (cntlofreq) { 783 if (cntlofreq) {
783 /* if both entries are the same, ignore this 784 /* if both entries are the same, ignore this one ... */
784 * one...
785 */
786 if ((powernow_table[i].frequency != powernow_table[cntlofreq].frequency) || 785 if ((powernow_table[i].frequency != powernow_table[cntlofreq].frequency) ||
787 (powernow_table[i].index != powernow_table[cntlofreq].index)) { 786 (powernow_table[i].index != powernow_table[cntlofreq].index)) {
788 printk(KERN_ERR PFX "Too many lo freq table entries\n"); 787 printk(KERN_ERR PFX "Too many lo freq table entries\n");
@@ -854,7 +853,7 @@ static int transition_frequency(struct powernow_k8_data *data, unsigned int inde
854 dprintk("cpu %d transition to index %u\n", smp_processor_id(), index); 853 dprintk("cpu %d transition to index %u\n", smp_processor_id(), index);
855 854
856 /* fid are the lower 8 bits of the index we stored into 855 /* fid are the lower 8 bits of the index we stored into
857 * the cpufreq frequency table in find_psb_table, vid are 856 * the cpufreq frequency table in find_psb_table, vid are
858 * the upper 8 bits. 857 * the upper 8 bits.
859 */ 858 */
860 859
@@ -909,7 +908,6 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
909 u32 checkvid = data->currvid; 908 u32 checkvid = data->currvid;
910 unsigned int newstate; 909 unsigned int newstate;
911 int ret = -EIO; 910 int ret = -EIO;
912 int i;
913 911
914 /* only run on specific CPU from here on */ 912 /* only run on specific CPU from here on */
915 oldmask = current->cpus_allowed; 913 oldmask = current->cpus_allowed;
@@ -955,12 +953,6 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
955 up(&fidvid_sem); 953 up(&fidvid_sem);
956 goto err_out; 954 goto err_out;
957 } 955 }
958
959 /* Update all the fid/vids of our siblings */
960 for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
961 powernow_data[i]->currvid = data->currvid;
962 powernow_data[i]->currfid = data->currfid;
963 }
964 up(&fidvid_sem); 956 up(&fidvid_sem);
965 957
966 pol->cur = find_khz_freq_from_fid(data->currfid); 958 pol->cur = find_khz_freq_from_fid(data->currfid);
@@ -1048,7 +1040,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1048 pol->governor = CPUFREQ_DEFAULT_GOVERNOR; 1040 pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
1049 pol->cpus = cpu_core_map[pol->cpu]; 1041 pol->cpus = cpu_core_map[pol->cpu];
1050 1042
1051 /* Take a crude guess here. 1043 /* Take a crude guess here.
1052 * That guess was in microseconds, so multiply with 1000 */ 1044 * That guess was in microseconds, so multiply with 1000 */
1053 pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US) 1045 pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US)
1054 + (3 * (1 << data->irt) * 10)) * 1000; 1046 + (3 * (1 << data->irt) * 10)) * 1000;
@@ -1070,9 +1062,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1070 printk("cpu_init done, current fid 0x%x, vid 0x%x\n", 1062 printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
1071 data->currfid, data->currvid); 1063 data->currfid, data->currvid);
1072 1064
1073 for_each_cpu_mask(i, cpu_core_map[pol->cpu]) { 1065 for_each_cpu_mask(i, cpu_core_map[pol->cpu])
1074 powernow_data[i] = data; 1066 powernow_data[i] = data;
1075 }
1076 1067
1077 return 0; 1068 return 0;
1078 1069
@@ -1145,16 +1136,14 @@ static int __cpuinit powernowk8_init(void)
1145{ 1136{
1146 unsigned int i, supported_cpus = 0; 1137 unsigned int i, supported_cpus = 0;
1147 1138
1148 for (i=0; i<NR_CPUS; i++) { 1139 for_each_online_cpu(i) {
1149 if (!cpu_online(i))
1150 continue;
1151 if (check_supported_cpu(i)) 1140 if (check_supported_cpu(i))
1152 supported_cpus++; 1141 supported_cpus++;
1153 } 1142 }
1154 1143
1155 if (supported_cpus == num_online_cpus()) { 1144 if (supported_cpus == num_online_cpus()) {
1156 printk(KERN_INFO PFX "Found %d AMD Athlon 64 / Opteron processors (" VERSION ")\n", 1145 printk(KERN_INFO PFX "Found %d AMD Athlon 64 / Opteron "
1157 supported_cpus); 1146 "processors (" VERSION ")\n", supported_cpus);
1158 return cpufreq_register_driver(&cpufreq_amd64_driver); 1147 return cpufreq_register_driver(&cpufreq_amd64_driver);
1159 } 1148 }
1160 1149
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
index d0de37d58e9a..00ea899c17e1 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
@@ -63,7 +63,7 @@ struct powernow_k8_data {
63#define MSR_C_LO_VID_SHIFT 8 63#define MSR_C_LO_VID_SHIFT 8
64 64
65/* Field definitions within the FID VID High Control MSR : */ 65/* Field definitions within the FID VID High Control MSR : */
66#define MSR_C_HI_STP_GNT_TO 0x000fffff 66#define MSR_C_HI_STP_GNT_TO 0x000fffff
67 67
68/* Field definitions within the FID VID Low Status MSR : */ 68/* Field definitions within the FID VID Low Status MSR : */
69#define MSR_S_LO_CHANGE_PENDING 0x80000000 /* cleared when completed */ 69#define MSR_S_LO_CHANGE_PENDING 0x80000000 /* cleared when completed */
@@ -123,7 +123,7 @@ struct powernow_k8_data {
123 * Most values of interest are enocoded in a single field of the _PSS 123 * Most values of interest are enocoded in a single field of the _PSS
124 * entries: the "control" value. 124 * entries: the "control" value.
125 */ 125 */
126 126
127#define IRT_SHIFT 30 127#define IRT_SHIFT 30
128#define RVO_SHIFT 28 128#define RVO_SHIFT 28
129#define EXT_TYPE_SHIFT 27 129#define EXT_TYPE_SHIFT 27
@@ -185,7 +185,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned
185#ifndef for_each_cpu_mask 185#ifndef for_each_cpu_mask
186#define for_each_cpu_mask(i,mask) for (i=0;i<1;i++) 186#define for_each_cpu_mask(i,mask) for (i=0;i<1;i++)
187#endif 187#endif
188 188
189#ifdef CONFIG_SMP 189#ifdef CONFIG_SMP
190static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) 190static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[])
191{ 191{
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index c173c0fa117a..b0ff9075708c 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -479,15 +479,13 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
479 unsigned l, h; 479 unsigned l, h;
480 int ret; 480 int ret;
481 int i; 481 int i;
482 struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
483 482
484 /* Only Intel makes Enhanced Speedstep-capable CPUs */ 483 /* Only Intel makes Enhanced Speedstep-capable CPUs */
485 if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST)) 484 if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST))
486 return -ENODEV; 485 return -ENODEV;
487 486
488 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { 487 if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
489 centrino_driver.flags |= CPUFREQ_CONST_LOOPS; 488 centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
490 }
491 489
492 if (centrino_cpu_init_acpi(policy)) { 490 if (centrino_cpu_init_acpi(policy)) {
493 if (policy->cpu != 0) 491 if (policy->cpu != 0)
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
index 7c47005a1805..4f46cac155c4 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
@@ -9,7 +9,7 @@
9 */ 9 */
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/moduleparam.h> 13#include <linux/moduleparam.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/cpufreq.h> 15#include <linux/cpufreq.h>
@@ -36,8 +36,8 @@ static unsigned int pentium3_get_frequency (unsigned int processor)
36 /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ 36 /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */
37 struct { 37 struct {
38 unsigned int ratio; /* Frequency Multiplier (x10) */ 38 unsigned int ratio; /* Frequency Multiplier (x10) */
39 u8 bitmap; /* power on configuration bits 39 u8 bitmap; /* power on configuration bits
40 [27, 25:22] (in MSR 0x2a) */ 40 [27, 25:22] (in MSR 0x2a) */
41 } msr_decode_mult [] = { 41 } msr_decode_mult [] = {
42 { 30, 0x01 }, 42 { 30, 0x01 },
43 { 35, 0x05 }, 43 { 35, 0x05 },
@@ -58,9 +58,9 @@ static unsigned int pentium3_get_frequency (unsigned int processor)
58 58
59 /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */ 59 /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */
60 struct { 60 struct {
61 unsigned int value; /* Front Side Bus speed in MHz */ 61 unsigned int value; /* Front Side Bus speed in MHz */
62 u8 bitmap; /* power on configuration bits [18: 19] 62 u8 bitmap; /* power on configuration bits [18: 19]
63 (in MSR 0x2a) */ 63 (in MSR 0x2a) */
64 } msr_decode_fsb [] = { 64 } msr_decode_fsb [] = {
65 { 66, 0x0 }, 65 { 66, 0x0 },
66 { 100, 0x2 }, 66 { 100, 0x2 },
@@ -68,8 +68,8 @@ static unsigned int pentium3_get_frequency (unsigned int processor)
68 { 0, 0xff} 68 { 0, 0xff}
69 }; 69 };
70 70
71 u32 msr_lo, msr_tmp; 71 u32 msr_lo, msr_tmp;
72 int i = 0, j = 0; 72 int i = 0, j = 0;
73 73
74 /* read MSR 0x2a - we only need the low 32 bits */ 74 /* read MSR 0x2a - we only need the low 32 bits */
75 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); 75 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
@@ -106,7 +106,7 @@ static unsigned int pentium3_get_frequency (unsigned int processor)
106 106
107static unsigned int pentiumM_get_frequency(void) 107static unsigned int pentiumM_get_frequency(void)
108{ 108{
109 u32 msr_lo, msr_tmp; 109 u32 msr_lo, msr_tmp;
110 110
111 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); 111 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
112 dprintk("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); 112 dprintk("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
@@ -134,7 +134,7 @@ static unsigned int pentium4_get_frequency(void)
134 134
135 dprintk("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); 135 dprintk("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi);
136 136
137 /* decode the FSB: see IA-32 Intel (C) Architecture Software 137 /* decode the FSB: see IA-32 Intel (C) Architecture Software
138 * Developer's Manual, Volume 3: System Prgramming Guide, 138 * Developer's Manual, Volume 3: System Prgramming Guide,
139 * revision #12 in Table B-1: MSRs in the Pentium 4 and 139 * revision #12 in Table B-1: MSRs in the Pentium 4 and
140 * Intel Xeon Processors, on page B-4 and B-5. 140 * Intel Xeon Processors, on page B-4 and B-5.
@@ -170,7 +170,7 @@ static unsigned int pentium4_get_frequency(void)
170 return (fsb * mult); 170 return (fsb * mult);
171} 171}
172 172
173 173
174unsigned int speedstep_get_processor_frequency(unsigned int processor) 174unsigned int speedstep_get_processor_frequency(unsigned int processor)
175{ 175{
176 switch (processor) { 176 switch (processor) {
@@ -198,11 +198,11 @@ EXPORT_SYMBOL_GPL(speedstep_get_processor_frequency);
198unsigned int speedstep_detect_processor (void) 198unsigned int speedstep_detect_processor (void)
199{ 199{
200 struct cpuinfo_x86 *c = cpu_data; 200 struct cpuinfo_x86 *c = cpu_data;
201 u32 ebx, msr_lo, msr_hi; 201 u32 ebx, msr_lo, msr_hi;
202 202
203 dprintk("x86: %x, model: %x\n", c->x86, c->x86_model); 203 dprintk("x86: %x, model: %x\n", c->x86, c->x86_model);
204 204
205 if ((c->x86_vendor != X86_VENDOR_INTEL) || 205 if ((c->x86_vendor != X86_VENDOR_INTEL) ||
206 ((c->x86 != 6) && (c->x86 != 0xF))) 206 ((c->x86 != 6) && (c->x86 != 0xF)))
207 return 0; 207 return 0;
208 208
@@ -218,15 +218,15 @@ unsigned int speedstep_detect_processor (void)
218 dprintk("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); 218 dprintk("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask);
219 219
220 switch (c->x86_mask) { 220 switch (c->x86_mask) {
221 case 4: 221 case 4:
222 /* 222 /*
223 * B-stepping [M-P4-M] 223 * B-stepping [M-P4-M]
224 * sample has ebx = 0x0f, production has 0x0e. 224 * sample has ebx = 0x0f, production has 0x0e.
225 */ 225 */
226 if ((ebx == 0x0e) || (ebx == 0x0f)) 226 if ((ebx == 0x0e) || (ebx == 0x0f))
227 return SPEEDSTEP_PROCESSOR_P4M; 227 return SPEEDSTEP_PROCESSOR_P4M;
228 break; 228 break;
229 case 7: 229 case 7:
230 /* 230 /*
231 * C-stepping [M-P4-M] 231 * C-stepping [M-P4-M]
232 * needs to have ebx=0x0e, else it's a celeron: 232 * needs to have ebx=0x0e, else it's a celeron:
@@ -253,7 +253,7 @@ unsigned int speedstep_detect_processor (void)
253 * also, M-P4M HTs have ebx=0x8, too 253 * also, M-P4M HTs have ebx=0x8, too
254 * For now, they are distinguished by the model_id string 254 * For now, they are distinguished by the model_id string
255 */ 255 */
256 if ((ebx == 0x0e) || (strstr(c->x86_model_id,"Mobile Intel(R) Pentium(R) 4") != NULL)) 256 if ((ebx == 0x0e) || (strstr(c->x86_model_id,"Mobile Intel(R) Pentium(R) 4") != NULL))
257 return SPEEDSTEP_PROCESSOR_P4M; 257 return SPEEDSTEP_PROCESSOR_P4M;
258 break; 258 break;
259 default: 259 default:
@@ -264,8 +264,7 @@ unsigned int speedstep_detect_processor (void)
264 264
265 switch (c->x86_model) { 265 switch (c->x86_model) {
266 case 0x0B: /* Intel PIII [Tualatin] */ 266 case 0x0B: /* Intel PIII [Tualatin] */
267 /* cpuid_ebx(1) is 0x04 for desktop PIII, 267 /* cpuid_ebx(1) is 0x04 for desktop PIII, 0x06 for mobile PIII-M */
268 0x06 for mobile PIII-M */
269 ebx = cpuid_ebx(0x00000001); 268 ebx = cpuid_ebx(0x00000001);
270 dprintk("ebx is %x\n", ebx); 269 dprintk("ebx is %x\n", ebx);
271 270
@@ -275,9 +274,8 @@ unsigned int speedstep_detect_processor (void)
275 return 0; 274 return 0;
276 275
277 /* So far all PIII-M processors support SpeedStep. See 276 /* So far all PIII-M processors support SpeedStep. See
278 * Intel's 24540640.pdf of June 2003 277 * Intel's 24540640.pdf of June 2003
279 */ 278 */
280
281 return SPEEDSTEP_PROCESSOR_PIII_T; 279 return SPEEDSTEP_PROCESSOR_PIII_T;
282 280
283 case 0x08: /* Intel PIII [Coppermine] */ 281 case 0x08: /* Intel PIII [Coppermine] */
@@ -399,7 +397,7 @@ unsigned int speedstep_get_freqs(unsigned int processor,
399 } 397 }
400 } 398 }
401 399
402 out: 400out:
403 local_irq_restore(flags); 401 local_irq_restore(flags);
404 return (ret); 402 return (ret);
405} 403}
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
index 6a727fd3a77e..b735429c50b4 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
@@ -14,7 +14,7 @@
14 14
15#define SPEEDSTEP_PROCESSOR_PIII_C_EARLY 0x00000001 /* Coppermine core */ 15#define SPEEDSTEP_PROCESSOR_PIII_C_EARLY 0x00000001 /* Coppermine core */
16#define SPEEDSTEP_PROCESSOR_PIII_C 0x00000002 /* Coppermine core */ 16#define SPEEDSTEP_PROCESSOR_PIII_C 0x00000002 /* Coppermine core */
17#define SPEEDSTEP_PROCESSOR_PIII_T 0x00000003 /* Tualatin core */ 17#define SPEEDSTEP_PROCESSOR_PIII_T 0x00000003 /* Tualatin core */
18#define SPEEDSTEP_PROCESSOR_P4M 0x00000004 /* P4-M */ 18#define SPEEDSTEP_PROCESSOR_P4M 0x00000004 /* P4-M */
19 19
20/* the following processors are not speedstep-capable and are not auto-detected 20/* the following processors are not speedstep-capable and are not auto-detected
@@ -25,8 +25,8 @@
25 25
26/* speedstep states -- only two of them */ 26/* speedstep states -- only two of them */
27 27
28#define SPEEDSTEP_HIGH 0x00000000 28#define SPEEDSTEP_HIGH 0x00000000
29#define SPEEDSTEP_LOW 0x00000001 29#define SPEEDSTEP_LOW 0x00000001
30 30
31 31
32/* detect a speedstep-capable processor */ 32/* detect a speedstep-capable processor */
@@ -36,13 +36,13 @@ extern unsigned int speedstep_detect_processor (void);
36extern unsigned int speedstep_get_processor_frequency(unsigned int processor); 36extern unsigned int speedstep_get_processor_frequency(unsigned int processor);
37 37
38 38
39/* detect the low and high speeds of the processor. The callback 39/* detect the low and high speeds of the processor. The callback
40 * set_state"'s first argument is either SPEEDSTEP_HIGH or 40 * set_state"'s first argument is either SPEEDSTEP_HIGH or
41 * SPEEDSTEP_LOW; the second argument is zero so that no 41 * SPEEDSTEP_LOW; the second argument is zero so that no
42 * cpufreq_notify_transition calls are initiated. 42 * cpufreq_notify_transition calls are initiated.
43 */ 43 */
44extern unsigned int speedstep_get_freqs(unsigned int processor, 44extern unsigned int speedstep_get_freqs(unsigned int processor,
45 unsigned int *low_speed, 45 unsigned int *low_speed,
46 unsigned int *high_speed, 46 unsigned int *high_speed,
47 unsigned int *transition_latency, 47 unsigned int *transition_latency,
48 void (*set_state) (unsigned int state)); 48 void (*set_state) (unsigned int state));
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
index 28cc5d524afc..c28333d53646 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
@@ -13,8 +13,8 @@
13 *********************************************************************/ 13 *********************************************************************/
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/cpufreq.h> 19#include <linux/cpufreq.h>
20#include <linux/pci.h> 20#include <linux/pci.h>
@@ -28,21 +28,21 @@
28 * 28 *
29 * These parameters are got from IST-SMI BIOS call. 29 * These parameters are got from IST-SMI BIOS call.
30 * If user gives it, these are used. 30 * If user gives it, these are used.
31 * 31 *
32 */ 32 */
33static int smi_port = 0; 33static int smi_port = 0;
34static int smi_cmd = 0; 34static int smi_cmd = 0;
35static unsigned int smi_sig = 0; 35static unsigned int smi_sig = 0;
36 36
37/* info about the processor */ 37/* info about the processor */
38static unsigned int speedstep_processor = 0; 38static unsigned int speedstep_processor = 0;
39 39
40/* 40/*
41 * There are only two frequency states for each processor. Values 41 * There are only two frequency states for each processor. Values
42 * are in kHz for the time being. 42 * are in kHz for the time being.
43 */ 43 */
44static struct cpufreq_frequency_table speedstep_freqs[] = { 44static struct cpufreq_frequency_table speedstep_freqs[] = {
45 {SPEEDSTEP_HIGH, 0}, 45 {SPEEDSTEP_HIGH, 0},
46 {SPEEDSTEP_LOW, 0}, 46 {SPEEDSTEP_LOW, 0},
47 {0, CPUFREQ_TABLE_END}, 47 {0, CPUFREQ_TABLE_END},
48}; 48};
@@ -75,7 +75,9 @@ static int speedstep_smi_ownership (void)
75 __asm__ __volatile__( 75 __asm__ __volatile__(
76 "out %%al, (%%dx)\n" 76 "out %%al, (%%dx)\n"
77 : "=D" (result) 77 : "=D" (result)
78 : "a" (command), "b" (function), "c" (0), "d" (smi_port), "D" (0), "S" (magic) 78 : "a" (command), "b" (function), "c" (0), "d" (smi_port),
79 "D" (0), "S" (magic)
80 : "memory"
79 ); 81 );
80 82
81 dprintk("result is %x\n", result); 83 dprintk("result is %x\n", result);
@@ -123,7 +125,7 @@ static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high)
123 *low = low_mhz * 1000; 125 *low = low_mhz * 1000;
124 126
125 return result; 127 return result;
126} 128}
127 129
128/** 130/**
129 * speedstep_get_state - set the SpeedStep state 131 * speedstep_get_state - set the SpeedStep state
@@ -204,7 +206,7 @@ static void speedstep_set_state (unsigned int state)
204 * speedstep_target - set a new CPUFreq policy 206 * speedstep_target - set a new CPUFreq policy
205 * @policy: new policy 207 * @policy: new policy
206 * @target_freq: new freq 208 * @target_freq: new freq
207 * @relation: 209 * @relation:
208 * 210 *
209 * Sets a new CPUFreq policy/freq. 211 * Sets a new CPUFreq policy/freq.
210 */ 212 */
@@ -283,7 +285,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
283 state = speedstep_get_state(); 285 state = speedstep_get_state();
284 speed = speedstep_freqs[state].frequency; 286 speed = speedstep_freqs[state].frequency;
285 287
286 dprintk("currently at %s speed setting - %i MHz\n", 288 dprintk("currently at %s speed setting - %i MHz\n",
287 (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", 289 (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high",
288 (speed / 1000)); 290 (speed / 1000));
289 291
@@ -296,7 +298,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
296 if (result) 298 if (result)
297 return (result); 299 return (result);
298 300
299 cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); 301 cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
300 302
301 return 0; 303 return 0;
302} 304}
@@ -332,8 +334,8 @@ static struct freq_attr* speedstep_attr[] = {
332 334
333static struct cpufreq_driver speedstep_driver = { 335static struct cpufreq_driver speedstep_driver = {
334 .name = "speedstep-smi", 336 .name = "speedstep-smi",
335 .verify = speedstep_verify, 337 .verify = speedstep_verify,
336 .target = speedstep_target, 338 .target = speedstep_target,
337 .init = speedstep_cpu_init, 339 .init = speedstep_cpu_init,
338 .exit = speedstep_cpu_exit, 340 .exit = speedstep_cpu_exit,
339 .get = speedstep_get, 341 .get = speedstep_get,
@@ -370,13 +372,12 @@ static int __init speedstep_init(void)
370 return -ENODEV; 372 return -ENODEV;
371 } 373 }
372 374
373 dprintk("signature:0x%.8lx, command:0x%.8lx, event:0x%.8lx, perf_level:0x%.8lx.\n", 375 dprintk("signature:0x%.8lx, command:0x%.8lx, event:0x%.8lx, perf_level:0x%.8lx.\n",
374 ist_info.signature, ist_info.command, ist_info.event, ist_info.perf_level); 376 ist_info.signature, ist_info.command, ist_info.event, ist_info.perf_level);
375 377
376 378 /* Error if no IST-SMI BIOS or no PARM
377 /* Error if no IST-SMI BIOS or no PARM
378 sig= 'ISGE' aka 'Intel Speedstep Gate E' */ 379 sig= 'ISGE' aka 'Intel Speedstep Gate E' */
379 if ((ist_info.signature != 0x47534943) && ( 380 if ((ist_info.signature != 0x47534943) && (
380 (smi_port == 0) || (smi_cmd == 0))) 381 (smi_port == 0) || (smi_cmd == 0)))
381 return -ENODEV; 382 return -ENODEV;
382 383
@@ -386,17 +387,15 @@ static int __init speedstep_init(void)
386 smi_sig = ist_info.signature; 387 smi_sig = ist_info.signature;
387 388
388 /* setup smi_port from MODLULE_PARM or BIOS */ 389 /* setup smi_port from MODLULE_PARM or BIOS */
389 if ((smi_port > 0xff) || (smi_port < 0)) { 390 if ((smi_port > 0xff) || (smi_port < 0))
390 return -EINVAL; 391 return -EINVAL;
391 } else if (smi_port == 0) { 392 else if (smi_port == 0)
392 smi_port = ist_info.command & 0xff; 393 smi_port = ist_info.command & 0xff;
393 }
394 394
395 if ((smi_cmd > 0xff) || (smi_cmd < 0)) { 395 if ((smi_cmd > 0xff) || (smi_cmd < 0))
396 return -EINVAL; 396 return -EINVAL;
397 } else if (smi_cmd == 0) { 397 else if (smi_cmd == 0)
398 smi_cmd = (ist_info.command >> 16) & 0xff; 398 smi_cmd = (ist_info.command >> 16) & 0xff;
399 }
400 399
401 return cpufreq_register_driver(&speedstep_driver); 400 return cpufreq_register_driver(&speedstep_driver);
402} 401}
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
index 8c0120186b9f..5386b29bb5a5 100644
--- a/arch/i386/kernel/cpu/intel.c
+++ b/arch/i386/kernel/cpu/intel.c
@@ -29,7 +29,7 @@ extern int trap_init_f00f_bug(void);
29struct movsl_mask movsl_mask __read_mostly; 29struct movsl_mask movsl_mask __read_mostly;
30#endif 30#endif
31 31
32void __devinit early_intel_workaround(struct cpuinfo_x86 *c) 32void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c)
33{ 33{
34 if (c->x86_vendor != X86_VENDOR_INTEL) 34 if (c->x86_vendor != X86_VENDOR_INTEL)
35 return; 35 return;
@@ -44,7 +44,7 @@ void __devinit early_intel_workaround(struct cpuinfo_x86 *c)
44 * This is called before we do cpu ident work 44 * This is called before we do cpu ident work
45 */ 45 */
46 46
47int __devinit ppro_with_ram_bug(void) 47int __cpuinit ppro_with_ram_bug(void)
48{ 48{
49 /* Uses data from early_cpu_detect now */ 49 /* Uses data from early_cpu_detect now */
50 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 50 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
@@ -62,7 +62,7 @@ int __devinit ppro_with_ram_bug(void)
62 * P4 Xeon errata 037 workaround. 62 * P4 Xeon errata 037 workaround.
63 * Hardware prefetcher may cause stale data to be loaded into the cache. 63 * Hardware prefetcher may cause stale data to be loaded into the cache.
64 */ 64 */
65static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c) 65static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
66{ 66{
67 unsigned long lo, hi; 67 unsigned long lo, hi;
68 68
@@ -81,7 +81,7 @@ static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
81/* 81/*
82 * find out the number of processor cores on the die 82 * find out the number of processor cores on the die
83 */ 83 */
84static int __devinit num_cpu_cores(struct cpuinfo_x86 *c) 84static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c)
85{ 85{
86 unsigned int eax, ebx, ecx, edx; 86 unsigned int eax, ebx, ecx, edx;
87 87
@@ -96,7 +96,7 @@ static int __devinit num_cpu_cores(struct cpuinfo_x86 *c)
96 return 1; 96 return 1;
97} 97}
98 98
99static void __devinit init_intel(struct cpuinfo_x86 *c) 99static void __cpuinit init_intel(struct cpuinfo_x86 *c)
100{ 100{
101 unsigned int l2 = 0; 101 unsigned int l2 = 0;
102 char *p = NULL; 102 char *p = NULL;
@@ -205,7 +205,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
205 return size; 205 return size;
206} 206}
207 207
208static struct cpu_dev intel_cpu_dev __devinitdata = { 208static struct cpu_dev intel_cpu_dev __cpuinitdata = {
209 .c_vendor = "Intel", 209 .c_vendor = "Intel",
210 .c_ident = { "GenuineIntel" }, 210 .c_ident = { "GenuineIntel" },
211 .c_models = { 211 .c_models = {
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index ffe58cee0c48..ce61921369e5 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -174,7 +174,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
174 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ 174 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
175 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ 175 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
176 176
177 if (c->cpuid_level > 4) { 177 if (c->cpuid_level > 3) {
178 static int is_initialized; 178 static int is_initialized;
179 179
180 if (is_initialized == 0) { 180 if (is_initialized == 0) {
@@ -330,7 +330,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
330 } 330 }
331 } 331 }
332} 332}
333static void __devinit cache_remove_shared_cpu_map(unsigned int cpu, int index) 333static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
334{ 334{
335 struct _cpuid4_info *this_leaf, *sibling_leaf; 335 struct _cpuid4_info *this_leaf, *sibling_leaf;
336 int sibling; 336 int sibling;
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 89a85af33d28..f94cdb7aca50 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -40,12 +40,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
40 /* Other (Linux-defined) */ 40 /* Other (Linux-defined) */
41 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", 41 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
42 NULL, NULL, NULL, NULL, 42 NULL, NULL, NULL, NULL,
43 "constant_tsc", NULL, NULL, NULL, NULL, NULL, NULL, NULL, 43 "constant_tsc", "up", NULL, NULL, NULL, NULL, NULL, NULL,
44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
45 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 45 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
46 46
47 /* Intel-defined (#2) */ 47 /* Intel-defined (#2) */
48 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", NULL, "est", 48 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
49 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL, 49 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
50 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 50 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
51 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 51 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index d49dbe8dc96b..e3c5fca0aa8a 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -105,7 +105,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
105 return 1; 105 return 1;
106 local_irq_disable(); 106 local_irq_disable();
107 107
108 if (!user_mode(regs)) { 108 if (!user_mode_vm(regs)) {
109 crash_fixup_ss_esp(&fixed_regs, regs); 109 crash_fixup_ss_esp(&fixed_regs, regs);
110 regs = &fixed_regs; 110 regs = &fixed_regs;
111 } 111 }
diff --git a/arch/i386/kernel/dmi_scan.c b/arch/i386/kernel/dmi_scan.c
index 6a93d75db431..ebc8dc116c43 100644
--- a/arch/i386/kernel/dmi_scan.c
+++ b/arch/i386/kernel/dmi_scan.c
@@ -5,6 +5,7 @@
5#include <linux/dmi.h> 5#include <linux/dmi.h>
6#include <linux/bootmem.h> 6#include <linux/bootmem.h>
7#include <linux/slab.h> 7#include <linux/slab.h>
8#include <asm/dmi.h>
8 9
9static char * __init dmi_string(struct dmi_header *dm, u8 s) 10static char * __init dmi_string(struct dmi_header *dm, u8 s)
10{ 11{
@@ -106,7 +107,7 @@ static void __init dmi_save_devices(struct dmi_header *dm)
106 struct dmi_device *dev; 107 struct dmi_device *dev;
107 108
108 for (i = 0; i < count; i++) { 109 for (i = 0; i < count; i++) {
109 char *d = ((char *) dm) + (i * 2); 110 char *d = (char *)(dm + 1) + (i * 2);
110 111
111 /* Skip disabled device */ 112 /* Skip disabled device */
112 if ((*d & 0x80) == 0) 113 if ((*d & 0x80) == 0)
@@ -299,3 +300,33 @@ struct dmi_device * dmi_find_device(int type, const char *name,
299 return NULL; 300 return NULL;
300} 301}
301EXPORT_SYMBOL(dmi_find_device); 302EXPORT_SYMBOL(dmi_find_device);
303
304/**
305 * dmi_get_year - Return year of a DMI date
306 * @field: data index (like dmi_get_system_info)
307 *
308 * Returns -1 when the field doesn't exist. 0 when it is broken.
309 */
310int dmi_get_year(int field)
311{
312 int year;
313 char *s = dmi_get_system_info(field);
314
315 if (!s)
316 return -1;
317 if (*s == '\0')
318 return 0;
319 s = strrchr(s, '/');
320 if (!s)
321 return 0;
322
323 s += 1;
324 year = simple_strtoul(s, NULL, 0);
325 if (year && year < 100) { /* 2-digit year */
326 year += 1900;
327 if (year < 1996) /* no dates < spec 1.0 */
328 year += 100;
329 }
330
331 return year;
332}
diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c
index aeabb4196861..7ec6cfa01fb3 100644
--- a/arch/i386/kernel/efi.c
+++ b/arch/i386/kernel/efi.c
@@ -543,7 +543,7 @@ efi_initialize_iomem_resources(struct resource *code_resource,
543 if ((md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) > 543 if ((md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >
544 0x100000000ULL) 544 0x100000000ULL)
545 continue; 545 continue;
546 res = alloc_bootmem_low(sizeof(struct resource)); 546 res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
547 switch (md->type) { 547 switch (md->type) {
548 case EFI_RESERVED_TYPE: 548 case EFI_RESERVED_TYPE:
549 res->name = "Reserved Memory"; 549 res->name = "Reserved Memory";
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index 4d704724b2f5..cfc683f153b9 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -226,6 +226,10 @@ ENTRY(system_call)
226 pushl %eax # save orig_eax 226 pushl %eax # save orig_eax
227 SAVE_ALL 227 SAVE_ALL
228 GET_THREAD_INFO(%ebp) 228 GET_THREAD_INFO(%ebp)
229 testl $TF_MASK,EFLAGS(%esp)
230 jz no_singlestep
231 orl $_TIF_SINGLESTEP,TI_flags(%ebp)
232no_singlestep:
229 # system call tracing in operation / emulation 233 # system call tracing in operation / emulation
230 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ 234 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
231 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) 235 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index e0b7c632efbc..3debc2e26542 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -450,7 +450,6 @@ int_msg:
450 450
451.globl boot_gdt_descr 451.globl boot_gdt_descr
452.globl idt_descr 452.globl idt_descr
453.globl cpu_gdt_descr
454 453
455 ALIGN 454 ALIGN
456# early boot GDT descriptor (must use 1:1 address mapping) 455# early boot GDT descriptor (must use 1:1 address mapping)
@@ -470,8 +469,6 @@ cpu_gdt_descr:
470 .word GDT_ENTRIES*8-1 469 .word GDT_ENTRIES*8-1
471 .long cpu_gdt_table 470 .long cpu_gdt_table
472 471
473 .fill NR_CPUS-1,8,0 # space for the other GDT descriptors
474
475/* 472/*
476 * The boot_gdt_table must mirror the equivalent in setup.S and is 473 * The boot_gdt_table must mirror the equivalent in setup.S and is
477 * used only for booting. 474 * used only for booting.
@@ -485,7 +482,7 @@ ENTRY(boot_gdt_table)
485/* 482/*
486 * The Global Descriptor Table contains 28 quadwords, per-CPU. 483 * The Global Descriptor Table contains 28 quadwords, per-CPU.
487 */ 484 */
488 .align PAGE_SIZE_asm 485 .align L1_CACHE_BYTES
489ENTRY(cpu_gdt_table) 486ENTRY(cpu_gdt_table)
490 .quad 0x0000000000000000 /* NULL descriptor */ 487 .quad 0x0000000000000000 /* NULL descriptor */
491 .quad 0x0000000000000000 /* 0x0b reserved */ 488 .quad 0x0000000000000000 /* 0x0b reserved */
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 39d9a5fa907e..311b4e7266f1 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -351,8 +351,8 @@ static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
351{ 351{
352 int i, j; 352 int i, j;
353 Dprintk("Rotating IRQs among CPUs.\n"); 353 Dprintk("Rotating IRQs among CPUs.\n");
354 for (i = 0; i < NR_CPUS; i++) { 354 for_each_online_cpu(i) {
355 for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) { 355 for (j = 0; j < NR_IRQS; j++) {
356 if (!irq_desc[j].action) 356 if (!irq_desc[j].action)
357 continue; 357 continue;
358 /* Is it a significant load ? */ 358 /* Is it a significant load ? */
@@ -381,7 +381,7 @@ static void do_irq_balance(void)
381 unsigned long imbalance = 0; 381 unsigned long imbalance = 0;
382 cpumask_t allowed_mask, target_cpu_mask, tmp; 382 cpumask_t allowed_mask, target_cpu_mask, tmp;
383 383
384 for (i = 0; i < NR_CPUS; i++) { 384 for_each_cpu(i) {
385 int package_index; 385 int package_index;
386 CPU_IRQ(i) = 0; 386 CPU_IRQ(i) = 0;
387 if (!cpu_online(i)) 387 if (!cpu_online(i))
@@ -422,9 +422,7 @@ static void do_irq_balance(void)
422 } 422 }
423 } 423 }
424 /* Find the least loaded processor package */ 424 /* Find the least loaded processor package */
425 for (i = 0; i < NR_CPUS; i++) { 425 for_each_online_cpu(i) {
426 if (!cpu_online(i))
427 continue;
428 if (i != CPU_TO_PACKAGEINDEX(i)) 426 if (i != CPU_TO_PACKAGEINDEX(i))
429 continue; 427 continue;
430 if (min_cpu_irq > CPU_IRQ(i)) { 428 if (min_cpu_irq > CPU_IRQ(i)) {
@@ -441,9 +439,7 @@ tryanothercpu:
441 */ 439 */
442 tmp_cpu_irq = 0; 440 tmp_cpu_irq = 0;
443 tmp_loaded = -1; 441 tmp_loaded = -1;
444 for (i = 0; i < NR_CPUS; i++) { 442 for_each_online_cpu(i) {
445 if (!cpu_online(i))
446 continue;
447 if (i != CPU_TO_PACKAGEINDEX(i)) 443 if (i != CPU_TO_PACKAGEINDEX(i))
448 continue; 444 continue;
449 if (max_cpu_irq <= CPU_IRQ(i)) 445 if (max_cpu_irq <= CPU_IRQ(i))
@@ -619,9 +615,7 @@ static int __init balanced_irq_init(void)
619 if (smp_num_siblings > 1 && !cpus_empty(tmp)) 615 if (smp_num_siblings > 1 && !cpus_empty(tmp))
620 physical_balance = 1; 616 physical_balance = 1;
621 617
622 for (i = 0; i < NR_CPUS; i++) { 618 for_each_online_cpu(i) {
623 if (!cpu_online(i))
624 continue;
625 irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); 619 irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
626 irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); 620 irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
627 if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { 621 if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
@@ -638,9 +632,11 @@ static int __init balanced_irq_init(void)
638 else 632 else
639 printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); 633 printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
640failed: 634failed:
641 for (i = 0; i < NR_CPUS; i++) { 635 for_each_cpu(i) {
642 kfree(irq_cpu_data[i].irq_delta); 636 kfree(irq_cpu_data[i].irq_delta);
637 irq_cpu_data[i].irq_delta = NULL;
643 kfree(irq_cpu_data[i].last_irq); 638 kfree(irq_cpu_data[i].last_irq);
639 irq_cpu_data[i].last_irq = NULL;
644 } 640 }
645 return 0; 641 return 0;
646} 642}
@@ -1761,7 +1757,8 @@ static void __init setup_ioapic_ids_from_mpc(void)
1761 * Don't check I/O APIC IDs for xAPIC systems. They have 1757 * Don't check I/O APIC IDs for xAPIC systems. They have
1762 * no meaning without the serial APIC bus. 1758 * no meaning without the serial APIC bus.
1763 */ 1759 */
1764 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 < 15)) 1760 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1761 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
1765 return; 1762 return;
1766 /* 1763 /*
1767 * This is broken; anything with a real cpu count has to 1764 * This is broken; anything with a real cpu count has to
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 694a13997637..7a59050242a7 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -84,9 +84,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
84 84
85void __kprobes arch_remove_kprobe(struct kprobe *p) 85void __kprobes arch_remove_kprobe(struct kprobe *p)
86{ 86{
87 down(&kprobe_mutex); 87 mutex_lock(&kprobe_mutex);
88 free_insn_slot(p->ainsn.insn); 88 free_insn_slot(p->ainsn.insn);
89 up(&kprobe_mutex); 89 mutex_unlock(&kprobe_mutex);
90} 90}
91 91
92static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) 92static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index 5390b521aca0..55bc365b8753 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -202,8 +202,6 @@ static inline void mark_microcode_update (int cpu_num, microcode_header_t *mc_he
202 } else if (mc_header->rev == uci->rev) { 202 } else if (mc_header->rev == uci->rev) {
203 /* notify the caller of success on this cpu */ 203 /* notify the caller of success on this cpu */
204 uci->err = MC_SUCCESS; 204 uci->err = MC_SUCCESS;
205 printk(KERN_ERR "microcode: CPU%d already at revision"
206 " 0x%x (current=0x%x)\n", cpu_num, mc_header->rev, uci->rev);
207 goto out; 205 goto out;
208 } 206 }
209 207
@@ -369,7 +367,6 @@ static void do_update_one (void * unused)
369 struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; 367 struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
370 368
371 if (uci->mc == NULL) { 369 if (uci->mc == NULL) {
372 printk(KERN_INFO "microcode: No new microcode data for CPU%d\n", cpu_num);
373 return; 370 return;
374 } 371 }
375 372
@@ -511,7 +508,6 @@ static int __init microcode_init (void)
511static void __exit microcode_exit (void) 508static void __exit microcode_exit (void)
512{ 509{
513 misc_deregister(&microcode_dev); 510 misc_deregister(&microcode_dev);
514 printk(KERN_INFO "IA-32 Microcode Update Driver v" MICROCODE_VERSION " unregistered\n");
515} 511}
516 512
517module_init(microcode_init) 513module_init(microcode_init)
diff --git a/arch/i386/kernel/module.c b/arch/i386/kernel/module.c
index 5149c8a621f0..470cf97e7cd3 100644
--- a/arch/i386/kernel/module.c
+++ b/arch/i386/kernel/module.c
@@ -104,26 +104,38 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
104 return -ENOEXEC; 104 return -ENOEXEC;
105} 105}
106 106
107extern void apply_alternatives(void *start, void *end);
108
109int module_finalize(const Elf_Ehdr *hdr, 107int module_finalize(const Elf_Ehdr *hdr,
110 const Elf_Shdr *sechdrs, 108 const Elf_Shdr *sechdrs,
111 struct module *me) 109 struct module *me)
112{ 110{
113 const Elf_Shdr *s; 111 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL;
114 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 112 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
115 113
116 /* look for .altinstructions to patch */
117 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 114 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
118 void *seg; 115 if (!strcmp(".text", secstrings + s->sh_name))
119 if (strcmp(".altinstructions", secstrings + s->sh_name)) 116 text = s;
120 continue; 117 if (!strcmp(".altinstructions", secstrings + s->sh_name))
121 seg = (void *)s->sh_addr; 118 alt = s;
122 apply_alternatives(seg, seg + s->sh_size); 119 if (!strcmp(".smp_locks", secstrings + s->sh_name))
123 } 120 locks= s;
121 }
122
123 if (alt) {
124 /* patch .altinstructions */
125 void *aseg = (void *)alt->sh_addr;
126 apply_alternatives(aseg, aseg + alt->sh_size);
127 }
128 if (locks && text) {
129 void *lseg = (void *)locks->sh_addr;
130 void *tseg = (void *)text->sh_addr;
131 alternatives_smp_module_add(me, me->name,
132 lseg, lseg + locks->sh_size,
133 tseg, tseg + text->sh_size);
134 }
124 return 0; 135 return 0;
125} 136}
126 137
127void module_arch_cleanup(struct module *mod) 138void module_arch_cleanup(struct module *mod)
128{ 139{
140 alternatives_smp_module_del(mod);
129} 141}
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index e6e2f43db85e..8d8aa9d1796d 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -828,6 +828,8 @@ void __init find_smp_config (void)
828 smp_scan_config(address, 0x400); 828 smp_scan_config(address, 0x400);
829} 829}
830 830
831int es7000_plat;
832
831/* -------------------------------------------------------------------------- 833/* --------------------------------------------------------------------------
832 ACPI-based MP Configuration 834 ACPI-based MP Configuration
833 -------------------------------------------------------------------------- */ 835 -------------------------------------------------------------------------- */
@@ -935,7 +937,8 @@ void __init mp_register_ioapic (
935 mp_ioapics[idx].mpc_apicaddr = address; 937 mp_ioapics[idx].mpc_apicaddr = address;
936 938
937 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 939 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
938 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15)) 940 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
941 && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
939 tmpid = io_apic_get_unique_id(idx, id); 942 tmpid = io_apic_get_unique_id(idx, id);
940 else 943 else
941 tmpid = id; 944 tmpid = id;
@@ -1011,8 +1014,6 @@ void __init mp_override_legacy_irq (
1011 return; 1014 return;
1012} 1015}
1013 1016
1014int es7000_plat;
1015
1016void __init mp_config_acpi_legacy_irqs (void) 1017void __init mp_config_acpi_legacy_irqs (void)
1017{ 1018{
1018 struct mpc_config_intsrc intsrc; 1019 struct mpc_config_intsrc intsrc;
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index be87c5e2ee95..9074818b9473 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -143,7 +143,7 @@ static int __init check_nmi_watchdog(void)
143 local_irq_enable(); 143 local_irq_enable();
144 mdelay((10*1000)/nmi_hz); // wait 10 ticks 144 mdelay((10*1000)/nmi_hz); // wait 10 ticks
145 145
146 for (cpu = 0; cpu < NR_CPUS; cpu++) { 146 for_each_cpu(cpu) {
147#ifdef CONFIG_SMP 147#ifdef CONFIG_SMP
148 /* Check cpu_callin_map here because that is set 148 /* Check cpu_callin_map here because that is set
149 after the timer is started. */ 149 after the timer is started. */
@@ -510,7 +510,7 @@ void touch_nmi_watchdog (void)
510 * Just reset the alert counters, (other CPUs might be 510 * Just reset the alert counters, (other CPUs might be
511 * spinning on locks we hold): 511 * spinning on locks we hold):
512 */ 512 */
513 for (i = 0; i < NR_CPUS; i++) 513 for_each_cpu(i)
514 alert_counter[i] = 0; 514 alert_counter[i] = 0;
515 515
516 /* 516 /*
@@ -543,7 +543,7 @@ void nmi_watchdog_tick (struct pt_regs * regs)
543 /* 543 /*
544 * die_nmi will return ONLY if NOTIFY_STOP happens.. 544 * die_nmi will return ONLY if NOTIFY_STOP happens..
545 */ 545 */
546 die_nmi(regs, "NMI Watchdog detected LOCKUP"); 546 die_nmi(regs, "BUG: NMI Watchdog detected LOCKUP");
547 } else { 547 } else {
548 last_irq_sums[cpu] = sum; 548 last_irq_sums[cpu] = sum;
549 alert_counter[cpu] = 0; 549 alert_counter[cpu] = 0;
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 0480454ebffa..299e61674084 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -295,7 +295,7 @@ void show_regs(struct pt_regs * regs)
295 printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); 295 printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
296 print_symbol("EIP is at %s\n", regs->eip); 296 print_symbol("EIP is at %s\n", regs->eip);
297 297
298 if (user_mode(regs)) 298 if (user_mode_vm(regs))
299 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); 299 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
300 printk(" EFLAGS: %08lx %s (%s %.*s)\n", 300 printk(" EFLAGS: %08lx %s (%s %.*s)\n",
301 regs->eflags, print_tainted(), system_utsname.release, 301 regs->eflags, print_tainted(), system_utsname.release,
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index 5c1fb6aada5b..506462ef36a0 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -34,10 +34,10 @@
34 34
35/* 35/*
36 * Determines which flags the user has access to [1 = access, 0 = no access]. 36 * Determines which flags the user has access to [1 = access, 0 = no access].
37 * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9). 37 * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), NT(14), IOPL(12-13), IF(9).
38 * Also masks reserved bits (31-22, 15, 5, 3, 1). 38 * Also masks reserved bits (31-22, 15, 5, 3, 1).
39 */ 39 */
40#define FLAG_MASK 0x00054dd5 40#define FLAG_MASK 0x00050dd5
41 41
42/* set's the trap flag. */ 42/* set's the trap flag. */
43#define TRAP_FLAG 0x100 43#define TRAP_FLAG 0x100
diff --git a/arch/i386/kernel/semaphore.c b/arch/i386/kernel/semaphore.c
index 7455ab643943..967dc74df9ee 100644
--- a/arch/i386/kernel/semaphore.c
+++ b/arch/i386/kernel/semaphore.c
@@ -110,11 +110,11 @@ asm(
110".align 4\n" 110".align 4\n"
111".globl __write_lock_failed\n" 111".globl __write_lock_failed\n"
112"__write_lock_failed:\n\t" 112"__write_lock_failed:\n\t"
113 LOCK "addl $" RW_LOCK_BIAS_STR ",(%eax)\n" 113 LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ",(%eax)\n"
114"1: rep; nop\n\t" 114"1: rep; nop\n\t"
115 "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" 115 "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t"
116 "jne 1b\n\t" 116 "jne 1b\n\t"
117 LOCK "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" 117 LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t"
118 "jnz __write_lock_failed\n\t" 118 "jnz __write_lock_failed\n\t"
119 "ret" 119 "ret"
120); 120);
@@ -124,11 +124,11 @@ asm(
124".align 4\n" 124".align 4\n"
125".globl __read_lock_failed\n" 125".globl __read_lock_failed\n"
126"__read_lock_failed:\n\t" 126"__read_lock_failed:\n\t"
127 LOCK "incl (%eax)\n" 127 LOCK_PREFIX "incl (%eax)\n"
128"1: rep; nop\n\t" 128"1: rep; nop\n\t"
129 "cmpl $1,(%eax)\n\t" 129 "cmpl $1,(%eax)\n\t"
130 "js 1b\n\t" 130 "js 1b\n\t"
131 LOCK "decl (%eax)\n\t" 131 LOCK_PREFIX "decl (%eax)\n\t"
132 "js __read_lock_failed\n\t" 132 "js __read_lock_failed\n\t"
133 "ret" 133 "ret"
134); 134);
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index ab62a9f4701e..d313a11acafa 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -1288,7 +1288,7 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat
1288 struct resource *res; 1288 struct resource *res;
1289 if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL) 1289 if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
1290 continue; 1290 continue;
1291 res = alloc_bootmem_low(sizeof(struct resource)); 1291 res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
1292 switch (e820.map[i].type) { 1292 switch (e820.map[i].type) {
1293 case E820_RAM: res->name = "System RAM"; break; 1293 case E820_RAM: res->name = "System RAM"; break;
1294 case E820_ACPI: res->name = "ACPI Tables"; break; 1294 case E820_ACPI: res->name = "ACPI Tables"; break;
@@ -1316,13 +1316,15 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat
1316 1316
1317/* 1317/*
1318 * Request address space for all standard resources 1318 * Request address space for all standard resources
1319 *
1320 * This is called just before pcibios_assign_resources(), which is also
1321 * an fs_initcall, but is linked in later (in arch/i386/pci/i386.c).
1319 */ 1322 */
1320static void __init register_memory(void) 1323static int __init request_standard_resources(void)
1321{ 1324{
1322 unsigned long gapstart, gapsize, round; 1325 int i;
1323 unsigned long long last;
1324 int i;
1325 1326
1327 printk("Setting up standard PCI resources\n");
1326 if (efi_enabled) 1328 if (efi_enabled)
1327 efi_initialize_iomem_resources(&code_resource, &data_resource); 1329 efi_initialize_iomem_resources(&code_resource, &data_resource);
1328 else 1330 else
@@ -1334,6 +1336,16 @@ static void __init register_memory(void)
1334 /* request I/O space for devices used on all i[345]86 PCs */ 1336 /* request I/O space for devices used on all i[345]86 PCs */
1335 for (i = 0; i < STANDARD_IO_RESOURCES; i++) 1337 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
1336 request_resource(&ioport_resource, &standard_io_resources[i]); 1338 request_resource(&ioport_resource, &standard_io_resources[i]);
1339 return 0;
1340}
1341
1342fs_initcall(request_standard_resources);
1343
1344static void __init register_memory(void)
1345{
1346 unsigned long gapstart, gapsize, round;
1347 unsigned long long last;
1348 int i;
1337 1349
1338 /* 1350 /*
1339 * Search for the bigest gap in the low 32 bits of the e820 1351 * Search for the bigest gap in the low 32 bits of the e820
@@ -1377,101 +1389,6 @@ static void __init register_memory(void)
1377 pci_mem_start, gapstart, gapsize); 1389 pci_mem_start, gapstart, gapsize);
1378} 1390}
1379 1391
1380/* Use inline assembly to define this because the nops are defined
1381 as inline assembly strings in the include files and we cannot
1382 get them easily into strings. */
1383asm("\t.data\nintelnops: "
1384 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
1385 GENERIC_NOP7 GENERIC_NOP8);
1386asm("\t.data\nk8nops: "
1387 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
1388 K8_NOP7 K8_NOP8);
1389asm("\t.data\nk7nops: "
1390 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
1391 K7_NOP7 K7_NOP8);
1392
1393extern unsigned char intelnops[], k8nops[], k7nops[];
1394static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
1395 NULL,
1396 intelnops,
1397 intelnops + 1,
1398 intelnops + 1 + 2,
1399 intelnops + 1 + 2 + 3,
1400 intelnops + 1 + 2 + 3 + 4,
1401 intelnops + 1 + 2 + 3 + 4 + 5,
1402 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
1403 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
1404};
1405static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
1406 NULL,
1407 k8nops,
1408 k8nops + 1,
1409 k8nops + 1 + 2,
1410 k8nops + 1 + 2 + 3,
1411 k8nops + 1 + 2 + 3 + 4,
1412 k8nops + 1 + 2 + 3 + 4 + 5,
1413 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
1414 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
1415};
1416static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
1417 NULL,
1418 k7nops,
1419 k7nops + 1,
1420 k7nops + 1 + 2,
1421 k7nops + 1 + 2 + 3,
1422 k7nops + 1 + 2 + 3 + 4,
1423 k7nops + 1 + 2 + 3 + 4 + 5,
1424 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
1425 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
1426};
1427static struct nop {
1428 int cpuid;
1429 unsigned char **noptable;
1430} noptypes[] = {
1431 { X86_FEATURE_K8, k8_nops },
1432 { X86_FEATURE_K7, k7_nops },
1433 { -1, NULL }
1434};
1435
1436/* Replace instructions with better alternatives for this CPU type.
1437
1438 This runs before SMP is initialized to avoid SMP problems with
1439 self modifying code. This implies that assymetric systems where
1440 APs have less capabilities than the boot processor are not handled.
1441 Tough. Make sure you disable such features by hand. */
1442void apply_alternatives(void *start, void *end)
1443{
1444 struct alt_instr *a;
1445 int diff, i, k;
1446 unsigned char **noptable = intel_nops;
1447 for (i = 0; noptypes[i].cpuid >= 0; i++) {
1448 if (boot_cpu_has(noptypes[i].cpuid)) {
1449 noptable = noptypes[i].noptable;
1450 break;
1451 }
1452 }
1453 for (a = start; (void *)a < end; a++) {
1454 if (!boot_cpu_has(a->cpuid))
1455 continue;
1456 BUG_ON(a->replacementlen > a->instrlen);
1457 memcpy(a->instr, a->replacement, a->replacementlen);
1458 diff = a->instrlen - a->replacementlen;
1459 /* Pad the rest with nops */
1460 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
1461 k = diff;
1462 if (k > ASM_NOP_MAX)
1463 k = ASM_NOP_MAX;
1464 memcpy(a->instr + i, noptable[k], k);
1465 }
1466 }
1467}
1468
1469void __init alternative_instructions(void)
1470{
1471 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
1472 apply_alternatives(__alt_instructions, __alt_instructions_end);
1473}
1474
1475static char * __init machine_specific_memory_setup(void); 1392static char * __init machine_specific_memory_setup(void);
1476 1393
1477#ifdef CONFIG_MCA 1394#ifdef CONFIG_MCA
@@ -1554,6 +1471,16 @@ void __init setup_arch(char **cmdline_p)
1554 1471
1555 parse_cmdline_early(cmdline_p); 1472 parse_cmdline_early(cmdline_p);
1556 1473
1474#ifdef CONFIG_EARLY_PRINTK
1475 {
1476 char *s = strstr(*cmdline_p, "earlyprintk=");
1477 if (s) {
1478 setup_early_printk(strchr(s, '=') + 1);
1479 printk("early console enabled\n");
1480 }
1481 }
1482#endif
1483
1557 max_low_pfn = setup_memory(); 1484 max_low_pfn = setup_memory();
1558 1485
1559 /* 1486 /*
@@ -1578,19 +1505,6 @@ void __init setup_arch(char **cmdline_p)
1578 * NOTE: at this point the bootmem allocator is fully available. 1505 * NOTE: at this point the bootmem allocator is fully available.
1579 */ 1506 */
1580 1507
1581#ifdef CONFIG_EARLY_PRINTK
1582 {
1583 char *s = strstr(*cmdline_p, "earlyprintk=");
1584 if (s) {
1585 extern void setup_early_printk(char *);
1586
1587 setup_early_printk(strchr(s, '=') + 1);
1588 printk("early console enabled\n");
1589 }
1590 }
1591#endif
1592
1593
1594 dmi_scan_machine(); 1508 dmi_scan_machine();
1595 1509
1596#ifdef CONFIG_X86_GENERICARCH 1510#ifdef CONFIG_X86_GENERICARCH
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index 963616d364ec..5c352c3a9e7f 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -123,7 +123,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax
123 err |= __get_user(tmp, &sc->seg); \ 123 err |= __get_user(tmp, &sc->seg); \
124 loadsegment(seg,tmp); } 124 loadsegment(seg,tmp); }
125 125
126#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | X86_EFLAGS_DF | \ 126#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_RF | \
127 X86_EFLAGS_OF | X86_EFLAGS_DF | \
127 X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \ 128 X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \
128 X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF) 129 X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF)
129 130
@@ -582,9 +583,6 @@ static void fastcall do_signal(struct pt_regs *regs)
582 if (!user_mode(regs)) 583 if (!user_mode(regs))
583 return; 584 return;
584 585
585 if (try_to_freeze())
586 goto no_signal;
587
588 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 586 if (test_thread_flag(TIF_RESTORE_SIGMASK))
589 oldset = &current->saved_sigmask; 587 oldset = &current->saved_sigmask;
590 else 588 else
@@ -613,7 +611,6 @@ static void fastcall do_signal(struct pt_regs *regs)
613 return; 611 return;
614 } 612 }
615 613
616no_signal:
617 /* Did we come from a system call? */ 614 /* Did we come from a system call? */
618 if (regs->orig_eax >= 0) { 615 if (regs->orig_eax >= 0) {
619 /* Restart the system call - no handlers present */ 616 /* Restart the system call - no handlers present */
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 7007e1783797..82371d83bfa9 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -899,6 +899,7 @@ static int __devinit do_boot_cpu(int apicid, int cpu)
899 unsigned short nmi_high = 0, nmi_low = 0; 899 unsigned short nmi_high = 0, nmi_low = 0;
900 900
901 ++cpucount; 901 ++cpucount;
902 alternatives_smp_switch(1);
902 903
903 /* 904 /*
904 * We can't use kernel_thread since we must avoid to 905 * We can't use kernel_thread since we must avoid to
@@ -1002,7 +1003,6 @@ void cpu_exit_clear(void)
1002 1003
1003 cpu_clear(cpu, cpu_callout_map); 1004 cpu_clear(cpu, cpu_callout_map);
1004 cpu_clear(cpu, cpu_callin_map); 1005 cpu_clear(cpu, cpu_callin_map);
1005 cpu_clear(cpu, cpu_present_map);
1006 1006
1007 cpu_clear(cpu, smp_commenced_mask); 1007 cpu_clear(cpu, smp_commenced_mask);
1008 unmap_cpu_to_logical_apicid(cpu); 1008 unmap_cpu_to_logical_apicid(cpu);
@@ -1014,31 +1014,20 @@ struct warm_boot_cpu_info {
1014 int cpu; 1014 int cpu;
1015}; 1015};
1016 1016
1017static void __devinit do_warm_boot_cpu(void *p) 1017static void __cpuinit do_warm_boot_cpu(void *p)
1018{ 1018{
1019 struct warm_boot_cpu_info *info = p; 1019 struct warm_boot_cpu_info *info = p;
1020 do_boot_cpu(info->apicid, info->cpu); 1020 do_boot_cpu(info->apicid, info->cpu);
1021 complete(info->complete); 1021 complete(info->complete);
1022} 1022}
1023 1023
1024int __devinit smp_prepare_cpu(int cpu) 1024static int __cpuinit __smp_prepare_cpu(int cpu)
1025{ 1025{
1026 DECLARE_COMPLETION(done); 1026 DECLARE_COMPLETION(done);
1027 struct warm_boot_cpu_info info; 1027 struct warm_boot_cpu_info info;
1028 struct work_struct task; 1028 struct work_struct task;
1029 int apicid, ret; 1029 int apicid, ret;
1030 1030
1031 lock_cpu_hotplug();
1032
1033 /*
1034 * On x86, CPU0 is never offlined. Trying to bring up an
1035 * already-booted CPU will hang. So check for that case.
1036 */
1037 if (cpu_online(cpu)) {
1038 ret = -EINVAL;
1039 goto exit;
1040 }
1041
1042 apicid = x86_cpu_to_apicid[cpu]; 1031 apicid = x86_cpu_to_apicid[cpu];
1043 if (apicid == BAD_APICID) { 1032 if (apicid == BAD_APICID) {
1044 ret = -ENODEV; 1033 ret = -ENODEV;
@@ -1063,7 +1052,6 @@ int __devinit smp_prepare_cpu(int cpu)
1063 zap_low_mappings(); 1052 zap_low_mappings();
1064 ret = 0; 1053 ret = 0;
1065exit: 1054exit:
1066 unlock_cpu_hotplug();
1067 return ret; 1055 return ret;
1068} 1056}
1069#endif 1057#endif
@@ -1368,6 +1356,8 @@ void __cpu_die(unsigned int cpu)
1368 /* They ack this in play_dead by setting CPU_DEAD */ 1356 /* They ack this in play_dead by setting CPU_DEAD */
1369 if (per_cpu(cpu_state, cpu) == CPU_DEAD) { 1357 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1370 printk ("CPU %d is now offline\n", cpu); 1358 printk ("CPU %d is now offline\n", cpu);
1359 if (1 == num_online_cpus())
1360 alternatives_smp_switch(0);
1371 return; 1361 return;
1372 } 1362 }
1373 msleep(100); 1363 msleep(100);
@@ -1389,6 +1379,22 @@ void __cpu_die(unsigned int cpu)
1389 1379
1390int __devinit __cpu_up(unsigned int cpu) 1380int __devinit __cpu_up(unsigned int cpu)
1391{ 1381{
1382#ifdef CONFIG_HOTPLUG_CPU
1383 int ret=0;
1384
1385 /*
1386 * We do warm boot only on cpus that had booted earlier
1387 * Otherwise cold boot is all handled from smp_boot_cpus().
1388 * cpu_callin_map is set during AP kickstart process. Its reset
1389 * when a cpu is taken offline from cpu_exit_clear().
1390 */
1391 if (!cpu_isset(cpu, cpu_callin_map))
1392 ret = __smp_prepare_cpu(cpu);
1393
1394 if (ret)
1395 return -EIO;
1396#endif
1397
1392 /* In case one didn't come up */ 1398 /* In case one didn't come up */
1393 if (!cpu_isset(cpu, cpu_callin_map)) { 1399 if (!cpu_isset(cpu, cpu_callin_map)) {
1394 printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu); 1400 printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
diff --git a/arch/i386/kernel/topology.c b/arch/i386/kernel/topology.c
index 67a0e1baa28b..296355292c7c 100644
--- a/arch/i386/kernel/topology.c
+++ b/arch/i386/kernel/topology.c
@@ -41,6 +41,15 @@ int arch_register_cpu(int num){
41 parent = &node_devices[node].node; 41 parent = &node_devices[node].node;
42#endif /* CONFIG_NUMA */ 42#endif /* CONFIG_NUMA */
43 43
44 /*
45 * CPU0 cannot be offlined due to several
46 * restrictions and assumptions in kernel. This basically
47 * doesnt add a control file, one cannot attempt to offline
48 * BSP.
49 */
50 if (!num)
51 cpu_devices[num].cpu.no_control = 1;
52
44 return register_cpu(&cpu_devices[num].cpu, num, parent); 53 return register_cpu(&cpu_devices[num].cpu, num, parent);
45} 54}
46 55
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index b814dbdcc91e..de5386b01d38 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -99,6 +99,8 @@ int register_die_notifier(struct notifier_block *nb)
99{ 99{
100 int err = 0; 100 int err = 0;
101 unsigned long flags; 101 unsigned long flags;
102
103 vmalloc_sync_all();
102 spin_lock_irqsave(&die_notifier_lock, flags); 104 spin_lock_irqsave(&die_notifier_lock, flags);
103 err = notifier_chain_register(&i386die_chain, nb); 105 err = notifier_chain_register(&i386die_chain, nb);
104 spin_unlock_irqrestore(&die_notifier_lock, flags); 106 spin_unlock_irqrestore(&die_notifier_lock, flags);
@@ -112,12 +114,30 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
112 p < (void *)tinfo + THREAD_SIZE - 3; 114 p < (void *)tinfo + THREAD_SIZE - 3;
113} 115}
114 116
115static void print_addr_and_symbol(unsigned long addr, char *log_lvl) 117/*
118 * Print CONFIG_STACK_BACKTRACE_COLS address/symbol entries per line.
119 */
120static inline int print_addr_and_symbol(unsigned long addr, char *log_lvl,
121 int printed)
116{ 122{
117 printk(log_lvl); 123 if (!printed)
124 printk(log_lvl);
125
126#if CONFIG_STACK_BACKTRACE_COLS == 1
118 printk(" [<%08lx>] ", addr); 127 printk(" [<%08lx>] ", addr);
128#else
129 printk(" <%08lx> ", addr);
130#endif
119 print_symbol("%s", addr); 131 print_symbol("%s", addr);
120 printk("\n"); 132
133 printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS;
134
135 if (printed)
136 printk(" ");
137 else
138 printk("\n");
139
140 return printed;
121} 141}
122 142
123static inline unsigned long print_context_stack(struct thread_info *tinfo, 143static inline unsigned long print_context_stack(struct thread_info *tinfo,
@@ -125,20 +145,24 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
125 char *log_lvl) 145 char *log_lvl)
126{ 146{
127 unsigned long addr; 147 unsigned long addr;
148 int printed = 0; /* nr of entries already printed on current line */
128 149
129#ifdef CONFIG_FRAME_POINTER 150#ifdef CONFIG_FRAME_POINTER
130 while (valid_stack_ptr(tinfo, (void *)ebp)) { 151 while (valid_stack_ptr(tinfo, (void *)ebp)) {
131 addr = *(unsigned long *)(ebp + 4); 152 addr = *(unsigned long *)(ebp + 4);
132 print_addr_and_symbol(addr, log_lvl); 153 printed = print_addr_and_symbol(addr, log_lvl, printed);
133 ebp = *(unsigned long *)ebp; 154 ebp = *(unsigned long *)ebp;
134 } 155 }
135#else 156#else
136 while (valid_stack_ptr(tinfo, stack)) { 157 while (valid_stack_ptr(tinfo, stack)) {
137 addr = *stack++; 158 addr = *stack++;
138 if (__kernel_text_address(addr)) 159 if (__kernel_text_address(addr))
139 print_addr_and_symbol(addr, log_lvl); 160 printed = print_addr_and_symbol(addr, log_lvl, printed);
140 } 161 }
141#endif 162#endif
163 if (printed)
164 printk("\n");
165
142 return ebp; 166 return ebp;
143} 167}
144 168
@@ -166,8 +190,7 @@ static void show_trace_log_lvl(struct task_struct *task,
166 stack = (unsigned long*)context->previous_esp; 190 stack = (unsigned long*)context->previous_esp;
167 if (!stack) 191 if (!stack)
168 break; 192 break;
169 printk(log_lvl); 193 printk("%s =======================\n", log_lvl);
170 printk(" =======================\n");
171 } 194 }
172} 195}
173 196
@@ -194,21 +217,17 @@ static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp,
194 for(i = 0; i < kstack_depth_to_print; i++) { 217 for(i = 0; i < kstack_depth_to_print; i++) {
195 if (kstack_end(stack)) 218 if (kstack_end(stack))
196 break; 219 break;
197 if (i && ((i % 8) == 0)) { 220 if (i && ((i % 8) == 0))
198 printk("\n"); 221 printk("\n%s ", log_lvl);
199 printk(log_lvl);
200 printk(" ");
201 }
202 printk("%08lx ", *stack++); 222 printk("%08lx ", *stack++);
203 } 223 }
204 printk("\n"); 224 printk("\n%sCall Trace:\n", log_lvl);
205 printk(log_lvl);
206 printk("Call Trace:\n");
207 show_trace_log_lvl(task, esp, log_lvl); 225 show_trace_log_lvl(task, esp, log_lvl);
208} 226}
209 227
210void show_stack(struct task_struct *task, unsigned long *esp) 228void show_stack(struct task_struct *task, unsigned long *esp)
211{ 229{
230 printk(" ");
212 show_stack_log_lvl(task, esp, ""); 231 show_stack_log_lvl(task, esp, "");
213} 232}
214 233
@@ -233,7 +252,7 @@ void show_registers(struct pt_regs *regs)
233 252
234 esp = (unsigned long) (&regs->esp); 253 esp = (unsigned long) (&regs->esp);
235 savesegment(ss, ss); 254 savesegment(ss, ss);
236 if (user_mode(regs)) { 255 if (user_mode_vm(regs)) {
237 in_kernel = 0; 256 in_kernel = 0;
238 esp = regs->esp; 257 esp = regs->esp;
239 ss = regs->xss & 0xffff; 258 ss = regs->xss & 0xffff;
@@ -333,6 +352,8 @@ void die(const char * str, struct pt_regs * regs, long err)
333 static int die_counter; 352 static int die_counter;
334 unsigned long flags; 353 unsigned long flags;
335 354
355 oops_enter();
356
336 if (die.lock_owner != raw_smp_processor_id()) { 357 if (die.lock_owner != raw_smp_processor_id()) {
337 console_verbose(); 358 console_verbose();
338 spin_lock_irqsave(&die.lock, flags); 359 spin_lock_irqsave(&die.lock, flags);
@@ -385,6 +406,7 @@ void die(const char * str, struct pt_regs * regs, long err)
385 ssleep(5); 406 ssleep(5);
386 panic("Fatal exception"); 407 panic("Fatal exception");
387 } 408 }
409 oops_exit();
388 do_exit(SIGSEGV); 410 do_exit(SIGSEGV);
389} 411}
390 412
@@ -623,7 +645,7 @@ void die_nmi (struct pt_regs *regs, const char *msg)
623 /* If we are in kernel we are probably nested up pretty bad 645 /* If we are in kernel we are probably nested up pretty bad
624 * and might aswell get out now while we still can. 646 * and might aswell get out now while we still can.
625 */ 647 */
626 if (!user_mode(regs)) { 648 if (!user_mode_vm(regs)) {
627 current->thread.trap_no = 2; 649 current->thread.trap_no = 2;
628 crash_kexec(regs); 650 crash_kexec(regs);
629 } 651 }
@@ -694,6 +716,7 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code)
694 716
695void set_nmi_callback(nmi_callback_t callback) 717void set_nmi_callback(nmi_callback_t callback)
696{ 718{
719 vmalloc_sync_all();
697 rcu_assign_pointer(nmi_callback, callback); 720 rcu_assign_pointer(nmi_callback, callback);
698} 721}
699EXPORT_SYMBOL_GPL(set_nmi_callback); 722EXPORT_SYMBOL_GPL(set_nmi_callback);
diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c
index f51c894a7da5..aee14fafd13d 100644
--- a/arch/i386/kernel/vm86.c
+++ b/arch/i386/kernel/vm86.c
@@ -43,6 +43,7 @@
43#include <linux/smp_lock.h> 43#include <linux/smp_lock.h>
44#include <linux/highmem.h> 44#include <linux/highmem.h>
45#include <linux/ptrace.h> 45#include <linux/ptrace.h>
46#include <linux/audit.h>
46 47
47#include <asm/uaccess.h> 48#include <asm/uaccess.h>
48#include <asm/io.h> 49#include <asm/io.h>
@@ -252,6 +253,7 @@ out:
252static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) 253static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
253{ 254{
254 struct tss_struct *tss; 255 struct tss_struct *tss;
256 long eax;
255/* 257/*
256 * make sure the vm86() system call doesn't try to do anything silly 258 * make sure the vm86() system call doesn't try to do anything silly
257 */ 259 */
@@ -305,13 +307,19 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
305 tsk->thread.screen_bitmap = info->screen_bitmap; 307 tsk->thread.screen_bitmap = info->screen_bitmap;
306 if (info->flags & VM86_SCREEN_BITMAP) 308 if (info->flags & VM86_SCREEN_BITMAP)
307 mark_screen_rdonly(tsk->mm); 309 mark_screen_rdonly(tsk->mm);
310 __asm__ __volatile__("xorl %eax,%eax; movl %eax,%fs; movl %eax,%gs\n\t");
311 __asm__ __volatile__("movl %%eax, %0\n" :"=r"(eax));
312
313 /*call audit_syscall_exit since we do not exit via the normal paths */
314 if (unlikely(current->audit_context))
315 audit_syscall_exit(current, AUDITSC_RESULT(eax), eax);
316
308 __asm__ __volatile__( 317 __asm__ __volatile__(
309 "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t"
310 "movl %0,%%esp\n\t" 318 "movl %0,%%esp\n\t"
311 "movl %1,%%ebp\n\t" 319 "movl %1,%%ebp\n\t"
312 "jmp resume_userspace" 320 "jmp resume_userspace"
313 : /* no outputs */ 321 : /* no outputs */
314 :"r" (&info->regs), "r" (task_thread_info(tsk)) : "ax"); 322 :"r" (&info->regs), "r" (task_thread_info(tsk)));
315 /* we never return here */ 323 /* we never return here */
316} 324}
317 325
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index 4710195b6b74..8831303a473f 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -7,6 +7,7 @@
7#include <asm-generic/vmlinux.lds.h> 7#include <asm-generic/vmlinux.lds.h>
8#include <asm/thread_info.h> 8#include <asm/thread_info.h>
9#include <asm/page.h> 9#include <asm/page.h>
10#include <asm/cache.h>
10 11
11OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386") 12OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
12OUTPUT_ARCH(i386) 13OUTPUT_ARCH(i386)
@@ -68,6 +69,26 @@ SECTIONS
68 *(.data.init_task) 69 *(.data.init_task)
69 } 70 }
70 71
72 /* might get freed after init */
73 . = ALIGN(4096);
74 __smp_alt_begin = .;
75 __smp_alt_instructions = .;
76 .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
77 *(.smp_altinstructions)
78 }
79 __smp_alt_instructions_end = .;
80 . = ALIGN(4);
81 __smp_locks = .;
82 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
83 *(.smp_locks)
84 }
85 __smp_locks_end = .;
86 .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
87 *(.smp_altinstr_replacement)
88 }
89 . = ALIGN(4096);
90 __smp_alt_end = .;
91
71 /* will be freed after init */ 92 /* will be freed after init */
72 . = ALIGN(4096); /* Init code and data */ 93 . = ALIGN(4096); /* Init code and data */
73 __init_begin = .; 94 __init_begin = .;
@@ -115,7 +136,7 @@ SECTIONS
115 __initramfs_start = .; 136 __initramfs_start = .;
116 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) } 137 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
117 __initramfs_end = .; 138 __initramfs_end = .;
118 . = ALIGN(32); 139 . = ALIGN(L1_CACHE_BYTES);
119 __per_cpu_start = .; 140 __per_cpu_start = .;
120 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) } 141 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
121 __per_cpu_end = .; 142 __per_cpu_end = .;
diff --git a/arch/i386/kernel/vsyscall-sysenter.S b/arch/i386/kernel/vsyscall-sysenter.S
index 76b728159403..3b62baa6a371 100644
--- a/arch/i386/kernel/vsyscall-sysenter.S
+++ b/arch/i386/kernel/vsyscall-sysenter.S
@@ -21,6 +21,9 @@
21 * instruction clobbers %esp, the user's %esp won't even survive entry 21 * instruction clobbers %esp, the user's %esp won't even survive entry
22 * into the kernel. We store %esp in %ebp. Code in entry.S must fetch 22 * into the kernel. We store %esp in %ebp. Code in entry.S must fetch
23 * arg6 from the stack. 23 * arg6 from the stack.
24 *
25 * You can not use this vsyscall for the clone() syscall because the
26 * three dwords on the parent stack do not get copied to the child.
24 */ 27 */
25 .text 28 .text
26 .globl __kernel_vsyscall 29 .globl __kernel_vsyscall
diff --git a/arch/i386/mach-es7000/es7000.h b/arch/i386/mach-es7000/es7000.h
index f1e3204f5dec..80566ca4a80a 100644
--- a/arch/i386/mach-es7000/es7000.h
+++ b/arch/i386/mach-es7000/es7000.h
@@ -83,6 +83,7 @@ struct es7000_oem_table {
83 struct psai psai; 83 struct psai psai;
84}; 84};
85 85
86#ifdef CONFIG_ACPI
86struct acpi_table_sdt { 87struct acpi_table_sdt {
87 unsigned long pa; 88 unsigned long pa;
88 unsigned long count; 89 unsigned long count;
@@ -99,6 +100,9 @@ struct oem_table {
99 u32 OEMTableSize; 100 u32 OEMTableSize;
100}; 101};
101 102
103extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
104#endif
105
102struct mip_reg { 106struct mip_reg {
103 unsigned long long off_0; 107 unsigned long long off_0;
104 unsigned long long off_8; 108 unsigned long long off_8;
@@ -114,7 +118,6 @@ struct mip_reg {
114#define MIP_FUNC(VALUE) (VALUE & 0xff) 118#define MIP_FUNC(VALUE) (VALUE & 0xff)
115 119
116extern int parse_unisys_oem (char *oemptr); 120extern int parse_unisys_oem (char *oemptr);
117extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
118extern void setup_unisys(void); 121extern void setup_unisys(void);
119extern int es7000_start_cpu(int cpu, unsigned long eip); 122extern int es7000_start_cpu(int cpu, unsigned long eip);
120extern void es7000_sw_apic(void); 123extern void es7000_sw_apic(void);
diff --git a/arch/i386/mach-es7000/es7000plat.c b/arch/i386/mach-es7000/es7000plat.c
index a9ab0644f403..3d0fc853516d 100644
--- a/arch/i386/mach-es7000/es7000plat.c
+++ b/arch/i386/mach-es7000/es7000plat.c
@@ -51,8 +51,6 @@ struct mip_reg *host_reg;
51int mip_port; 51int mip_port;
52unsigned long mip_addr, host_addr; 52unsigned long mip_addr, host_addr;
53 53
54#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI)
55
56/* 54/*
57 * GSI override for ES7000 platforms. 55 * GSI override for ES7000 platforms.
58 */ 56 */
@@ -76,8 +74,6 @@ es7000_rename_gsi(int ioapic, int gsi)
76 return gsi; 74 return gsi;
77} 75}
78 76
79#endif /* (CONFIG_X86_IO_APIC) && (CONFIG_ACPI) */
80
81void __init 77void __init
82setup_unisys(void) 78setup_unisys(void)
83{ 79{
@@ -160,6 +156,7 @@ parse_unisys_oem (char *oemptr)
160 return es7000_plat; 156 return es7000_plat;
161} 157}
162 158
159#ifdef CONFIG_ACPI
163int __init 160int __init
164find_unisys_acpi_oem_table(unsigned long *oem_addr) 161find_unisys_acpi_oem_table(unsigned long *oem_addr)
165{ 162{
@@ -212,6 +209,7 @@ find_unisys_acpi_oem_table(unsigned long *oem_addr)
212 } 209 }
213 return -1; 210 return -1;
214} 211}
212#endif
215 213
216static void 214static void
217es7000_spin(int n) 215es7000_spin(int n)
diff --git a/arch/i386/mach-visws/reboot.c b/arch/i386/mach-visws/reboot.c
index 5d73e042ed0a..99332abfad42 100644
--- a/arch/i386/mach-visws/reboot.c
+++ b/arch/i386/mach-visws/reboot.c
@@ -1,7 +1,6 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/smp.h> 2#include <linux/smp.h>
3#include <linux/delay.h> 3#include <linux/delay.h>
4#include <linux/platform.h>
5 4
6#include <asm/io.h> 5#include <asm/io.h>
7#include "piix4.h" 6#include "piix4.h"
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index cf572d9a3b6e..7f0fcf219a26 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -214,6 +214,68 @@ static noinline void force_sig_info_fault(int si_signo, int si_code,
214 214
215fastcall void do_invalid_op(struct pt_regs *, unsigned long); 215fastcall void do_invalid_op(struct pt_regs *, unsigned long);
216 216
217static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
218{
219 unsigned index = pgd_index(address);
220 pgd_t *pgd_k;
221 pud_t *pud, *pud_k;
222 pmd_t *pmd, *pmd_k;
223
224 pgd += index;
225 pgd_k = init_mm.pgd + index;
226
227 if (!pgd_present(*pgd_k))
228 return NULL;
229
230 /*
231 * set_pgd(pgd, *pgd_k); here would be useless on PAE
232 * and redundant with the set_pmd() on non-PAE. As would
233 * set_pud.
234 */
235
236 pud = pud_offset(pgd, address);
237 pud_k = pud_offset(pgd_k, address);
238 if (!pud_present(*pud_k))
239 return NULL;
240
241 pmd = pmd_offset(pud, address);
242 pmd_k = pmd_offset(pud_k, address);
243 if (!pmd_present(*pmd_k))
244 return NULL;
245 if (!pmd_present(*pmd))
246 set_pmd(pmd, *pmd_k);
247 else
248 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
249 return pmd_k;
250}
251
252/*
253 * Handle a fault on the vmalloc or module mapping area
254 *
255 * This assumes no large pages in there.
256 */
257static inline int vmalloc_fault(unsigned long address)
258{
259 unsigned long pgd_paddr;
260 pmd_t *pmd_k;
261 pte_t *pte_k;
262 /*
263 * Synchronize this task's top level page-table
264 * with the 'reference' page table.
265 *
266 * Do _not_ use "current" here. We might be inside
267 * an interrupt in the middle of a task switch..
268 */
269 pgd_paddr = read_cr3();
270 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
271 if (!pmd_k)
272 return -1;
273 pte_k = pte_offset_kernel(pmd_k, address);
274 if (!pte_present(*pte_k))
275 return -1;
276 return 0;
277}
278
217/* 279/*
218 * This routine handles page faults. It determines the address, 280 * This routine handles page faults. It determines the address,
219 * and the problem, and then passes it off to one of the appropriate 281 * and the problem, and then passes it off to one of the appropriate
@@ -223,6 +285,8 @@ fastcall void do_invalid_op(struct pt_regs *, unsigned long);
223 * bit 0 == 0 means no page found, 1 means protection fault 285 * bit 0 == 0 means no page found, 1 means protection fault
224 * bit 1 == 0 means read, 1 means write 286 * bit 1 == 0 means read, 1 means write
225 * bit 2 == 0 means kernel, 1 means user-mode 287 * bit 2 == 0 means kernel, 1 means user-mode
288 * bit 3 == 1 means use of reserved bit detected
289 * bit 4 == 1 means fault was an instruction fetch
226 */ 290 */
227fastcall void __kprobes do_page_fault(struct pt_regs *regs, 291fastcall void __kprobes do_page_fault(struct pt_regs *regs,
228 unsigned long error_code) 292 unsigned long error_code)
@@ -237,13 +301,6 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
237 /* get the address */ 301 /* get the address */
238 address = read_cr2(); 302 address = read_cr2();
239 303
240 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
241 SIGSEGV) == NOTIFY_STOP)
242 return;
243 /* It's safe to allow irq's after cr2 has been saved */
244 if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
245 local_irq_enable();
246
247 tsk = current; 304 tsk = current;
248 305
249 si_code = SEGV_MAPERR; 306 si_code = SEGV_MAPERR;
@@ -259,17 +316,29 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
259 * 316 *
260 * This verifies that the fault happens in kernel space 317 * This verifies that the fault happens in kernel space
261 * (error_code & 4) == 0, and that the fault was not a 318 * (error_code & 4) == 0, and that the fault was not a
262 * protection error (error_code & 1) == 0. 319 * protection error (error_code & 9) == 0.
263 */ 320 */
264 if (unlikely(address >= TASK_SIZE)) { 321 if (unlikely(address >= TASK_SIZE)) {
265 if (!(error_code & 5)) 322 if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
266 goto vmalloc_fault; 323 return;
267 /* 324 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
325 SIGSEGV) == NOTIFY_STOP)
326 return;
327 /*
268 * Don't take the mm semaphore here. If we fixup a prefetch 328 * Don't take the mm semaphore here. If we fixup a prefetch
269 * fault we could otherwise deadlock. 329 * fault we could otherwise deadlock.
270 */ 330 */
271 goto bad_area_nosemaphore; 331 goto bad_area_nosemaphore;
272 } 332 }
333
334 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
335 SIGSEGV) == NOTIFY_STOP)
336 return;
337
338 /* It's safe to allow irq's after cr2 has been saved and the vmalloc
339 fault has been handled. */
340 if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
341 local_irq_enable();
273 342
274 mm = tsk->mm; 343 mm = tsk->mm;
275 344
@@ -440,24 +509,31 @@ no_context:
440 509
441 bust_spinlocks(1); 510 bust_spinlocks(1);
442 511
443#ifdef CONFIG_X86_PAE 512 if (oops_may_print()) {
444 if (error_code & 16) { 513 #ifdef CONFIG_X86_PAE
445 pte_t *pte = lookup_address(address); 514 if (error_code & 16) {
515 pte_t *pte = lookup_address(address);
446 516
447 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) 517 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
448 printk(KERN_CRIT "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", current->uid); 518 printk(KERN_CRIT "kernel tried to execute "
519 "NX-protected page - exploit attempt? "
520 "(uid: %d)\n", current->uid);
521 }
522 #endif
523 if (address < PAGE_SIZE)
524 printk(KERN_ALERT "BUG: unable to handle kernel NULL "
525 "pointer dereference");
526 else
527 printk(KERN_ALERT "BUG: unable to handle kernel paging"
528 " request");
529 printk(" at virtual address %08lx\n",address);
530 printk(KERN_ALERT " printing eip:\n");
531 printk("%08lx\n", regs->eip);
449 } 532 }
450#endif
451 if (address < PAGE_SIZE)
452 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
453 else
454 printk(KERN_ALERT "Unable to handle kernel paging request");
455 printk(" at virtual address %08lx\n",address);
456 printk(KERN_ALERT " printing eip:\n");
457 printk("%08lx\n", regs->eip);
458 page = read_cr3(); 533 page = read_cr3();
459 page = ((unsigned long *) __va(page))[address >> 22]; 534 page = ((unsigned long *) __va(page))[address >> 22];
460 printk(KERN_ALERT "*pde = %08lx\n", page); 535 if (oops_may_print())
536 printk(KERN_ALERT "*pde = %08lx\n", page);
461 /* 537 /*
462 * We must not directly access the pte in the highpte 538 * We must not directly access the pte in the highpte
463 * case, the page table might be allocated in highmem. 539 * case, the page table might be allocated in highmem.
@@ -465,7 +541,7 @@ no_context:
465 * it's allocated already. 541 * it's allocated already.
466 */ 542 */
467#ifndef CONFIG_HIGHPTE 543#ifndef CONFIG_HIGHPTE
468 if (page & 1) { 544 if ((page & 1) && oops_may_print()) {
469 page &= PAGE_MASK; 545 page &= PAGE_MASK;
470 address &= 0x003ff000; 546 address &= 0x003ff000;
471 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; 547 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
@@ -510,51 +586,41 @@ do_sigbus:
510 tsk->thread.error_code = error_code; 586 tsk->thread.error_code = error_code;
511 tsk->thread.trap_no = 14; 587 tsk->thread.trap_no = 14;
512 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); 588 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
513 return; 589}
514
515vmalloc_fault:
516 {
517 /*
518 * Synchronize this task's top level page-table
519 * with the 'reference' page table.
520 *
521 * Do _not_ use "tsk" here. We might be inside
522 * an interrupt in the middle of a task switch..
523 */
524 int index = pgd_index(address);
525 unsigned long pgd_paddr;
526 pgd_t *pgd, *pgd_k;
527 pud_t *pud, *pud_k;
528 pmd_t *pmd, *pmd_k;
529 pte_t *pte_k;
530
531 pgd_paddr = read_cr3();
532 pgd = index + (pgd_t *)__va(pgd_paddr);
533 pgd_k = init_mm.pgd + index;
534
535 if (!pgd_present(*pgd_k))
536 goto no_context;
537
538 /*
539 * set_pgd(pgd, *pgd_k); here would be useless on PAE
540 * and redundant with the set_pmd() on non-PAE. As would
541 * set_pud.
542 */
543 590
544 pud = pud_offset(pgd, address); 591#ifndef CONFIG_X86_PAE
545 pud_k = pud_offset(pgd_k, address); 592void vmalloc_sync_all(void)
546 if (!pud_present(*pud_k)) 593{
547 goto no_context; 594 /*
548 595 * Note that races in the updates of insync and start aren't
549 pmd = pmd_offset(pud, address); 596 * problematic: insync can only get set bits added, and updates to
550 pmd_k = pmd_offset(pud_k, address); 597 * start are only improving performance (without affecting correctness
551 if (!pmd_present(*pmd_k)) 598 * if undone).
552 goto no_context; 599 */
553 set_pmd(pmd, *pmd_k); 600 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
601 static unsigned long start = TASK_SIZE;
602 unsigned long address;
554 603
555 pte_k = pte_offset_kernel(pmd_k, address); 604 BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
556 if (!pte_present(*pte_k)) 605 for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
557 goto no_context; 606 if (!test_bit(pgd_index(address), insync)) {
558 return; 607 unsigned long flags;
608 struct page *page;
609
610 spin_lock_irqsave(&pgd_lock, flags);
611 for (page = pgd_list; page; page =
612 (struct page *)page->index)
613 if (!vmalloc_sync_one(page_address(page),
614 address)) {
615 BUG_ON(page != pgd_list);
616 break;
617 }
618 spin_unlock_irqrestore(&pgd_lock, flags);
619 if (!page)
620 set_bit(pgd_index(address), insync);
621 }
622 if (address == start && test_bit(pgd_index(address), insync))
623 start = address + PGDIR_SIZE;
559 } 624 }
560} 625}
626#endif
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 7ba55a6e2dbc..9f66ac582a8b 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -720,21 +720,6 @@ static int noinline do_test_wp_bit(void)
720 return flag; 720 return flag;
721} 721}
722 722
723void free_initmem(void)
724{
725 unsigned long addr;
726
727 addr = (unsigned long)(&__init_begin);
728 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
729 ClearPageReserved(virt_to_page(addr));
730 init_page_count(virt_to_page(addr));
731 memset((void *)addr, 0xcc, PAGE_SIZE);
732 free_page(addr);
733 totalram_pages++;
734 }
735 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
736}
737
738#ifdef CONFIG_DEBUG_RODATA 723#ifdef CONFIG_DEBUG_RODATA
739 724
740extern char __start_rodata, __end_rodata; 725extern char __start_rodata, __end_rodata;
@@ -758,17 +743,31 @@ void mark_rodata_ro(void)
758} 743}
759#endif 744#endif
760 745
746void free_init_pages(char *what, unsigned long begin, unsigned long end)
747{
748 unsigned long addr;
749
750 for (addr = begin; addr < end; addr += PAGE_SIZE) {
751 ClearPageReserved(virt_to_page(addr));
752 init_page_count(virt_to_page(addr));
753 memset((void *)addr, 0xcc, PAGE_SIZE);
754 free_page(addr);
755 totalram_pages++;
756 }
757 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
758}
759
760void free_initmem(void)
761{
762 free_init_pages("unused kernel memory",
763 (unsigned long)(&__init_begin),
764 (unsigned long)(&__init_end));
765}
761 766
762#ifdef CONFIG_BLK_DEV_INITRD 767#ifdef CONFIG_BLK_DEV_INITRD
763void free_initrd_mem(unsigned long start, unsigned long end) 768void free_initrd_mem(unsigned long start, unsigned long end)
764{ 769{
765 if (start < end) 770 free_init_pages("initrd memory", start, end);
766 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
767 for (; start < end; start += PAGE_SIZE) {
768 ClearPageReserved(virt_to_page(start));
769 init_page_count(virt_to_page(start));
770 free_page(start);
771 totalram_pages++;
772 }
773} 771}
774#endif 772#endif
773
diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c
index 0493e8b8ec49..1accce50c2c7 100644
--- a/arch/i386/oprofile/nmi_int.c
+++ b/arch/i386/oprofile/nmi_int.c
@@ -122,7 +122,7 @@ static void nmi_save_registers(void * dummy)
122static void free_msrs(void) 122static void free_msrs(void)
123{ 123{
124 int i; 124 int i;
125 for (i = 0; i < NR_CPUS; ++i) { 125 for_each_cpu(i) {
126 kfree(cpu_msrs[i].counters); 126 kfree(cpu_msrs[i].counters);
127 cpu_msrs[i].counters = NULL; 127 cpu_msrs[i].counters = NULL;
128 kfree(cpu_msrs[i].controls); 128 kfree(cpu_msrs[i].controls);
@@ -138,10 +138,7 @@ static int allocate_msrs(void)
138 size_t counters_size = sizeof(struct op_msr) * model->num_counters; 138 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
139 139
140 int i; 140 int i;
141 for (i = 0; i < NR_CPUS; ++i) { 141 for_each_online_cpu(i) {
142 if (!cpu_online(i))
143 continue;
144
145 cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL); 142 cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL);
146 if (!cpu_msrs[i].counters) { 143 if (!cpu_msrs[i].counters) {
147 success = 0; 144 success = 0;
diff --git a/arch/i386/pci/Makefile b/arch/i386/pci/Makefile
index 5461d4d5ea1e..62ad75c57e6a 100644
--- a/arch/i386/pci/Makefile
+++ b/arch/i386/pci/Makefile
@@ -1,4 +1,4 @@
1obj-y := i386.o 1obj-y := i386.o init.o
2 2
3obj-$(CONFIG_PCI_BIOS) += pcbios.o 3obj-$(CONFIG_PCI_BIOS) += pcbios.o
4obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o direct.o 4obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o direct.o
diff --git a/arch/i386/pci/common.c b/arch/i386/pci/common.c
index f6bc48da4d2a..dbece776c5b2 100644
--- a/arch/i386/pci/common.c
+++ b/arch/i386/pci/common.c
@@ -8,6 +8,7 @@
8#include <linux/pci.h> 8#include <linux/pci.h>
9#include <linux/ioport.h> 9#include <linux/ioport.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/dmi.h>
11 12
12#include <asm/acpi.h> 13#include <asm/acpi.h>
13#include <asm/segment.h> 14#include <asm/segment.h>
@@ -120,11 +121,42 @@ void __devinit pcibios_fixup_bus(struct pci_bus *b)
120 pci_read_bridge_bases(b); 121 pci_read_bridge_bases(b);
121} 122}
122 123
124/*
125 * Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus)
126 */
127#ifdef __i386__
128static int __devinit assign_all_busses(struct dmi_system_id *d)
129{
130 pci_probe |= PCI_ASSIGN_ALL_BUSSES;
131 printk(KERN_INFO "%s detected: enabling PCI bus# renumbering"
132 " (pci=assign-busses)\n", d->ident);
133 return 0;
134}
135#endif
136
137/*
138 * Laptops which need pci=assign-busses to see Cardbus cards
139 */
140static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = {
141#ifdef __i386__
142 {
143 .callback = assign_all_busses,
144 .ident = "Samsung X20 Laptop",
145 .matches = {
146 DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
147 DMI_MATCH(DMI_PRODUCT_NAME, "SX20S"),
148 },
149 },
150#endif /* __i386__ */
151 {}
152};
123 153
124struct pci_bus * __devinit pcibios_scan_root(int busnum) 154struct pci_bus * __devinit pcibios_scan_root(int busnum)
125{ 155{
126 struct pci_bus *bus = NULL; 156 struct pci_bus *bus = NULL;
127 157
158 dmi_check_system(pciprobe_dmi_table);
159
128 while ((bus = pci_find_next_bus(bus)) != NULL) { 160 while ((bus = pci_find_next_bus(bus)) != NULL) {
129 if (bus->number == busnum) { 161 if (bus->number == busnum) {
130 /* Already scanned */ 162 /* Already scanned */
diff --git a/arch/i386/pci/direct.c b/arch/i386/pci/direct.c
index e3ac502bf2fb..99012b93bd12 100644
--- a/arch/i386/pci/direct.c
+++ b/arch/i386/pci/direct.c
@@ -245,7 +245,7 @@ static int __init pci_check_type2(void)
245 return works; 245 return works;
246} 246}
247 247
248static int __init pci_direct_init(void) 248void __init pci_direct_init(void)
249{ 249{
250 struct resource *region, *region2; 250 struct resource *region, *region2;
251 251
@@ -258,16 +258,16 @@ static int __init pci_direct_init(void)
258 if (pci_check_type1()) { 258 if (pci_check_type1()) {
259 printk(KERN_INFO "PCI: Using configuration type 1\n"); 259 printk(KERN_INFO "PCI: Using configuration type 1\n");
260 raw_pci_ops = &pci_direct_conf1; 260 raw_pci_ops = &pci_direct_conf1;
261 return 0; 261 return;
262 } 262 }
263 release_resource(region); 263 release_resource(region);
264 264
265 type2: 265 type2:
266 if ((pci_probe & PCI_PROBE_CONF2) == 0) 266 if ((pci_probe & PCI_PROBE_CONF2) == 0)
267 goto out; 267 return;
268 region = request_region(0xCF8, 4, "PCI conf2"); 268 region = request_region(0xCF8, 4, "PCI conf2");
269 if (!region) 269 if (!region)
270 goto out; 270 return;
271 region2 = request_region(0xC000, 0x1000, "PCI conf2"); 271 region2 = request_region(0xC000, 0x1000, "PCI conf2");
272 if (!region2) 272 if (!region2)
273 goto fail2; 273 goto fail2;
@@ -275,15 +275,10 @@ static int __init pci_direct_init(void)
275 if (pci_check_type2()) { 275 if (pci_check_type2()) {
276 printk(KERN_INFO "PCI: Using configuration type 2\n"); 276 printk(KERN_INFO "PCI: Using configuration type 2\n");
277 raw_pci_ops = &pci_direct_conf2; 277 raw_pci_ops = &pci_direct_conf2;
278 return 0; 278 return;
279 } 279 }
280 280
281 release_resource(region2); 281 release_resource(region2);
282 fail2: 282 fail2:
283 release_resource(region); 283 release_resource(region);
284
285 out:
286 return 0;
287} 284}
288
289arch_initcall(pci_direct_init);
diff --git a/arch/i386/pci/init.c b/arch/i386/pci/init.c
new file mode 100644
index 000000000000..f9156d3ac723
--- /dev/null
+++ b/arch/i386/pci/init.c
@@ -0,0 +1,25 @@
1#include <linux/config.h>
2#include <linux/pci.h>
3#include <linux/init.h>
4#include "pci.h"
5
6/* arch_initcall has too random ordering, so call the initializers
7 in the right sequence from here. */
8static __init int pci_access_init(void)
9{
10#ifdef CONFIG_PCI_MMCONFIG
11 pci_mmcfg_init();
12#endif
13 if (raw_pci_ops)
14 return 0;
15#ifdef CONFIG_PCI_BIOS
16 pci_pcbios_init();
17#endif
18 if (raw_pci_ops)
19 return 0;
20#ifdef CONFIG_PCI_DIRECT
21 pci_direct_init();
22#endif
23 return 0;
24}
25arch_initcall(pci_access_init);
diff --git a/arch/i386/pci/mmconfig.c b/arch/i386/pci/mmconfig.c
index 0ee8a983708c..613789071f30 100644
--- a/arch/i386/pci/mmconfig.c
+++ b/arch/i386/pci/mmconfig.c
@@ -172,25 +172,20 @@ static __init void unreachable_devices(void)
172 } 172 }
173} 173}
174 174
175static int __init pci_mmcfg_init(void) 175void __init pci_mmcfg_init(void)
176{ 176{
177 if ((pci_probe & PCI_PROBE_MMCONF) == 0) 177 if ((pci_probe & PCI_PROBE_MMCONF) == 0)
178 goto out; 178 return;
179 179
180 acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg); 180 acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
181 if ((pci_mmcfg_config_num == 0) || 181 if ((pci_mmcfg_config_num == 0) ||
182 (pci_mmcfg_config == NULL) || 182 (pci_mmcfg_config == NULL) ||
183 (pci_mmcfg_config[0].base_address == 0)) 183 (pci_mmcfg_config[0].base_address == 0))
184 goto out; 184 return;
185 185
186 printk(KERN_INFO "PCI: Using MMCONFIG\n"); 186 printk(KERN_INFO "PCI: Using MMCONFIG\n");
187 raw_pci_ops = &pci_mmcfg; 187 raw_pci_ops = &pci_mmcfg;
188 pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; 188 pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
189 189
190 unreachable_devices(); 190 unreachable_devices();
191
192 out:
193 return 0;
194} 191}
195
196arch_initcall(pci_mmcfg_init);
diff --git a/arch/i386/pci/pcbios.c b/arch/i386/pci/pcbios.c
index b9d65f0bc2d1..1eec0868f4b3 100644
--- a/arch/i386/pci/pcbios.c
+++ b/arch/i386/pci/pcbios.c
@@ -476,14 +476,12 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
476} 476}
477EXPORT_SYMBOL(pcibios_set_irq_routing); 477EXPORT_SYMBOL(pcibios_set_irq_routing);
478 478
479static int __init pci_pcbios_init(void) 479void __init pci_pcbios_init(void)
480{ 480{
481 if ((pci_probe & PCI_PROBE_BIOS) 481 if ((pci_probe & PCI_PROBE_BIOS)
482 && ((raw_pci_ops = pci_find_bios()))) { 482 && ((raw_pci_ops = pci_find_bios()))) {
483 pci_probe |= PCI_BIOS_SORT; 483 pci_probe |= PCI_BIOS_SORT;
484 pci_bios_present = 1; 484 pci_bios_present = 1;
485 } 485 }
486 return 0;
487} 486}
488 487
489arch_initcall(pci_pcbios_init);
diff --git a/arch/i386/pci/pci.h b/arch/i386/pci/pci.h
index f550781ec310..12035e29108b 100644
--- a/arch/i386/pci/pci.h
+++ b/arch/i386/pci/pci.h
@@ -80,4 +80,7 @@ extern int pci_conf1_write(unsigned int seg, unsigned int bus,
80extern int pci_conf1_read(unsigned int seg, unsigned int bus, 80extern int pci_conf1_read(unsigned int seg, unsigned int bus,
81 unsigned int devfn, int reg, int len, u32 *value); 81 unsigned int devfn, int reg, int len, u32 *value);
82 82
83extern void pci_direct_init(void);
84extern void pci_pcbios_init(void);
85extern void pci_mmcfg_init(void);
83 86
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index ff7ae6b664e8..10b6b9e7716b 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -252,6 +252,15 @@ config NR_CPUS
252 than 64 will cause the use of a CPU mask array, causing a small 252 than 64 will cause the use of a CPU mask array, causing a small
253 performance hit. 253 performance hit.
254 254
255config IA64_NR_NODES
256 int "Maximum number of NODEs (256-1024)" if (IA64_SGI_SN2 || IA64_GENERIC)
257 range 256 1024
258 depends on IA64_SGI_SN2 || IA64_GENERIC
259 default "256"
260 help
261 This option specifies the maximum number of nodes in your SSI system.
262 If in doubt, use the default.
263
255config HOTPLUG_CPU 264config HOTPLUG_CPU
256 bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" 265 bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
257 depends on SMP && EXPERIMENTAL 266 depends on SMP && EXPERIMENTAL
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index f722e1a25948..80ea7506fa1a 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -1,6 +1,9 @@
1# 1#
2# ia64/Makefile 2# ia64/Makefile
3# 3#
4# This file is included by the global makefile so that you can add your own
5# architecture-specific flags and dependencies.
6#
4# This file is subject to the terms and conditions of the GNU General Public 7# This file is subject to the terms and conditions of the GNU General Public
5# License. See the file "COPYING" in the main directory of this archive 8# License. See the file "COPYING" in the main directory of this archive
6# for more details. 9# for more details.
@@ -62,7 +65,7 @@ drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/
62 65
63boot := arch/ia64/hp/sim/boot 66boot := arch/ia64/hp/sim/boot
64 67
65.PHONY: boot compressed check 68PHONY += boot compressed check
66 69
67all: compressed unwcheck 70all: compressed unwcheck
68 71
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index 744fd2f79f61..0d29aa2066b3 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -116,6 +116,7 @@ CONFIG_IOSAPIC=y
116CONFIG_FORCE_MAX_ZONEORDER=17 116CONFIG_FORCE_MAX_ZONEORDER=17
117CONFIG_SMP=y 117CONFIG_SMP=y
118CONFIG_NR_CPUS=512 118CONFIG_NR_CPUS=512
119CONFIG_IA64_NR_NODES=256
119CONFIG_HOTPLUG_CPU=y 120CONFIG_HOTPLUG_CPU=y
120# CONFIG_SCHED_SMT is not set 121# CONFIG_SCHED_SMT is not set
121# CONFIG_PREEMPT is not set 122# CONFIG_PREEMPT is not set
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index 8206752161bb..a718034d68d0 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -116,6 +116,7 @@ CONFIG_IA64_SGI_SN_XP=m
116CONFIG_FORCE_MAX_ZONEORDER=17 116CONFIG_FORCE_MAX_ZONEORDER=17
117CONFIG_SMP=y 117CONFIG_SMP=y
118CONFIG_NR_CPUS=1024 118CONFIG_NR_CPUS=1024
119CONFIG_IA64_NR_NODES=256
119# CONFIG_HOTPLUG_CPU is not set 120# CONFIG_HOTPLUG_CPU is not set
120CONFIG_SCHED_SMT=y 121CONFIG_SCHED_SMT=y
121CONFIG_PREEMPT=y 122CONFIG_PREEMPT=y
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig
index 3e767288a745..6cba55da572a 100644
--- a/arch/ia64/defconfig
+++ b/arch/ia64/defconfig
@@ -116,6 +116,7 @@ CONFIG_IOSAPIC=y
116CONFIG_FORCE_MAX_ZONEORDER=17 116CONFIG_FORCE_MAX_ZONEORDER=17
117CONFIG_SMP=y 117CONFIG_SMP=y
118CONFIG_NR_CPUS=512 118CONFIG_NR_CPUS=512
119CONFIG_IA64_NR_NODES=256
119CONFIG_HOTPLUG_CPU=y 120CONFIG_HOTPLUG_CPU=y
120# CONFIG_SCHED_SMT is not set 121# CONFIG_SCHED_SMT is not set
121# CONFIG_PREEMPT is not set 122# CONFIG_PREEMPT is not set
diff --git a/arch/ia64/dig/setup.c b/arch/ia64/dig/setup.c
index c9104bfff667..38aa9c108857 100644
--- a/arch/ia64/dig/setup.c
+++ b/arch/ia64/dig/setup.c
@@ -69,8 +69,3 @@ dig_setup (char **cmdline_p)
69 screen_info.orig_video_isVGA = 1; /* XXX fake */ 69 screen_info.orig_video_isVGA = 1; /* XXX fake */
70 screen_info.orig_video_ega_bx = 3; /* XXX fake */ 70 screen_info.orig_video_ega_bx = 3; /* XXX fake */
71} 71}
72
73void __init
74dig_irq_init (void)
75{
76}
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 626cdc83668b..0e5c6ae50228 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -46,11 +46,6 @@
46#define KEYBOARD_INTR 3 /* must match with simulator! */ 46#define KEYBOARD_INTR 3 /* must match with simulator! */
47 47
48#define NR_PORTS 1 /* only one port for now */ 48#define NR_PORTS 1 /* only one port for now */
49#define SERIAL_INLINE 1
50
51#ifdef SERIAL_INLINE
52#define _INLINE_ inline
53#endif
54 49
55#define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? SA_SHIRQ : SA_INTERRUPT) 50#define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? SA_SHIRQ : SA_INTERRUPT)
56 51
@@ -237,7 +232,7 @@ static void rs_put_char(struct tty_struct *tty, unsigned char ch)
237 local_irq_restore(flags); 232 local_irq_restore(flags);
238} 233}
239 234
240static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done) 235static void transmit_chars(struct async_struct *info, int *intr_done)
241{ 236{
242 int count; 237 int count;
243 unsigned long flags; 238 unsigned long flags;
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 70dba1f0e2ee..13e739e4c84d 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -1166,19 +1166,7 @@ put_tv32 (struct compat_timeval __user *o, struct timeval *i)
1166asmlinkage unsigned long 1166asmlinkage unsigned long
1167sys32_alarm (unsigned int seconds) 1167sys32_alarm (unsigned int seconds)
1168{ 1168{
1169 struct itimerval it_new, it_old; 1169 return alarm_setitimer(seconds);
1170 unsigned int oldalarm;
1171
1172 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
1173 it_new.it_value.tv_sec = seconds;
1174 it_new.it_value.tv_usec = 0;
1175 do_setitimer(ITIMER_REAL, &it_new, &it_old);
1176 oldalarm = it_old.it_value.tv_sec;
1177 /* ehhh.. We can't return 0 if we have an alarm pending.. */
1178 /* And we'd better return too much than too little anyway */
1179 if (it_old.it_value.tv_usec)
1180 oldalarm++;
1181 return oldalarm;
1182} 1170}
1183 1171
1184/* Translations due to time_t size differences. Which affects all 1172/* Translations due to time_t size differences. Which affects all
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 4722ec51c70c..a4e218ce2edb 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -420,6 +420,26 @@ int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
420int __initdata nid_to_pxm_map[MAX_NUMNODES]; 420int __initdata nid_to_pxm_map[MAX_NUMNODES];
421static struct acpi_table_slit __initdata *slit_table; 421static struct acpi_table_slit __initdata *slit_table;
422 422
423static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa)
424{
425 int pxm;
426
427 pxm = pa->proximity_domain;
428 if (ia64_platform_is("sn2"))
429 pxm += pa->reserved[0] << 8;
430 return pxm;
431}
432
433static int get_memory_proximity_domain(struct acpi_table_memory_affinity *ma)
434{
435 int pxm;
436
437 pxm = ma->proximity_domain;
438 if (ia64_platform_is("sn2"))
439 pxm += ma->reserved1[0] << 8;
440 return pxm;
441}
442
423/* 443/*
424 * ACPI 2.0 SLIT (System Locality Information Table) 444 * ACPI 2.0 SLIT (System Locality Information Table)
425 * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf 445 * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
@@ -443,13 +463,20 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
443void __init 463void __init
444acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) 464acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
445{ 465{
466 int pxm;
467
468 if (!pa->flags.enabled)
469 return;
470
471 pxm = get_processor_proximity_domain(pa);
472
446 /* record this node in proximity bitmap */ 473 /* record this node in proximity bitmap */
447 pxm_bit_set(pa->proximity_domain); 474 pxm_bit_set(pxm);
448 475
449 node_cpuid[srat_num_cpus].phys_id = 476 node_cpuid[srat_num_cpus].phys_id =
450 (pa->apic_id << 8) | (pa->lsapic_eid); 477 (pa->apic_id << 8) | (pa->lsapic_eid);
451 /* nid should be overridden as logical node id later */ 478 /* nid should be overridden as logical node id later */
452 node_cpuid[srat_num_cpus].nid = pa->proximity_domain; 479 node_cpuid[srat_num_cpus].nid = pxm;
453 srat_num_cpus++; 480 srat_num_cpus++;
454} 481}
455 482
@@ -457,10 +484,10 @@ void __init
457acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) 484acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
458{ 485{
459 unsigned long paddr, size; 486 unsigned long paddr, size;
460 u8 pxm; 487 int pxm;
461 struct node_memblk_s *p, *q, *pend; 488 struct node_memblk_s *p, *q, *pend;
462 489
463 pxm = ma->proximity_domain; 490 pxm = get_memory_proximity_domain(ma);
464 491
465 /* fill node memory chunk structure */ 492 /* fill node memory chunk structure */
466 paddr = ma->base_addr_hi; 493 paddr = ma->base_addr_hi;
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index dcd906fe5749..829a43cab797 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -865,6 +865,7 @@ ENTRY(interrupt)
865 ;; 865 ;;
866 SAVE_REST 866 SAVE_REST
867 ;; 867 ;;
868 MCA_RECOVER_RANGE(interrupt)
868 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group 869 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
869 mov out0=cr.ivr // pass cr.ivr as first arg 870 mov out0=cr.ivr // pass cr.ivr as first arg
870 add out1=16,sp // pass pointer to pt_regs as second arg 871 add out1=16,sp // pass pointer to pt_regs as second arg
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c
index c3a04ee7f4f6..4b0b71d5aef4 100644
--- a/arch/ia64/kernel/machvec.c
+++ b/arch/ia64/kernel/machvec.c
@@ -14,7 +14,15 @@
14struct ia64_machine_vector ia64_mv; 14struct ia64_machine_vector ia64_mv;
15EXPORT_SYMBOL(ia64_mv); 15EXPORT_SYMBOL(ia64_mv);
16 16
17static struct ia64_machine_vector * 17static __initdata const char *mvec_name;
18static __init int setup_mvec(char *s)
19{
20 mvec_name = s;
21 return 0;
22}
23early_param("machvec", setup_mvec);
24
25static struct ia64_machine_vector * __init
18lookup_machvec (const char *name) 26lookup_machvec (const char *name)
19{ 27{
20 extern struct ia64_machine_vector machvec_start[]; 28 extern struct ia64_machine_vector machvec_start[];
@@ -33,10 +41,13 @@ machvec_init (const char *name)
33{ 41{
34 struct ia64_machine_vector *mv; 42 struct ia64_machine_vector *mv;
35 43
44 if (!name)
45 name = mvec_name ? mvec_name : acpi_get_sysname();
36 mv = lookup_machvec(name); 46 mv = lookup_machvec(name);
37 if (!mv) { 47 if (!mv)
38 panic("generic kernel failed to find machine vector for platform %s!", name); 48 panic("generic kernel failed to find machine vector for"
39 } 49 " platform %s!", name);
50
40 ia64_mv = *mv; 51 ia64_mv = *mv;
41 printk(KERN_INFO "booting generic kernel on platform %s\n", name); 52 printk(KERN_INFO "booting generic kernel on platform %s\n", name);
42} 53}
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index b57e723f194c..87ff7fe33cfb 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -83,6 +83,7 @@
83#include <asm/irq.h> 83#include <asm/irq.h>
84#include <asm/hw_irq.h> 84#include <asm/hw_irq.h>
85 85
86#include "mca_drv.h"
86#include "entry.h" 87#include "entry.h"
87 88
88#if defined(IA64_MCA_DEBUG_INFO) 89#if defined(IA64_MCA_DEBUG_INFO)
@@ -133,7 +134,7 @@ static int cpe_poll_enabled = 1;
133 134
134extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe); 135extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
135 136
136static int mca_init; 137static int mca_init __initdata;
137 138
138 139
139static void inline 140static void inline
@@ -184,7 +185,7 @@ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
184 * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE}) 185 * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
185 * Outputs : None 186 * Outputs : None
186 */ 187 */
187static void 188static void __init
188ia64_log_init(int sal_info_type) 189ia64_log_init(int sal_info_type)
189{ 190{
190 u64 max_size = 0; 191 u64 max_size = 0;
@@ -281,6 +282,50 @@ ia64_mca_log_sal_error_record(int sal_info_type)
281 ia64_sal_clear_state_info(sal_info_type); 282 ia64_sal_clear_state_info(sal_info_type);
282} 283}
283 284
285/*
286 * search_mca_table
287 * See if the MCA surfaced in an instruction range
288 * that has been tagged as recoverable.
289 *
290 * Inputs
291 * first First address range to check
292 * last Last address range to check
293 * ip Instruction pointer, address we are looking for
294 *
295 * Return value:
296 * 1 on Success (in the table)/ 0 on Failure (not in the table)
297 */
298int
299search_mca_table (const struct mca_table_entry *first,
300 const struct mca_table_entry *last,
301 unsigned long ip)
302{
303 const struct mca_table_entry *curr;
304 u64 curr_start, curr_end;
305
306 curr = first;
307 while (curr <= last) {
308 curr_start = (u64) &curr->start_addr + curr->start_addr;
309 curr_end = (u64) &curr->end_addr + curr->end_addr;
310
311 if ((ip >= curr_start) && (ip <= curr_end)) {
312 return 1;
313 }
314 curr++;
315 }
316 return 0;
317}
318
319/* Given an address, look for it in the mca tables. */
320int mca_recover_range(unsigned long addr)
321{
322 extern struct mca_table_entry __start___mca_table[];
323 extern struct mca_table_entry __stop___mca_table[];
324
325 return search_mca_table(__start___mca_table, __stop___mca_table-1, addr);
326}
327EXPORT_SYMBOL_GPL(mca_recover_range);
328
284#ifdef CONFIG_ACPI 329#ifdef CONFIG_ACPI
285 330
286int cpe_vector = -1; 331int cpe_vector = -1;
@@ -355,7 +400,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
355 * Outputs 400 * Outputs
356 * None 401 * None
357 */ 402 */
358static void 403static void __init
359ia64_mca_register_cpev (int cpev) 404ia64_mca_register_cpev (int cpev)
360{ 405{
361 /* Register the CPE interrupt vector with SAL */ 406 /* Register the CPE interrupt vector with SAL */
@@ -386,7 +431,7 @@ ia64_mca_register_cpev (int cpev)
386 * Outputs 431 * Outputs
387 * None 432 * None
388 */ 433 */
389void 434void __cpuinit
390ia64_mca_cmc_vector_setup (void) 435ia64_mca_cmc_vector_setup (void)
391{ 436{
392 cmcv_reg_t cmcv; 437 cmcv_reg_t cmcv;
@@ -747,31 +792,34 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
747 ia64_mca_modify_comm(previous_current); 792 ia64_mca_modify_comm(previous_current);
748 goto no_mod; 793 goto no_mod;
749 } 794 }
750 if (r13 != sos->prev_IA64_KR_CURRENT) { 795
751 msg = "inconsistent previous current and r13"; 796 if (!mca_recover_range(ms->pmsa_iip)) {
752 goto no_mod; 797 if (r13 != sos->prev_IA64_KR_CURRENT) {
753 } 798 msg = "inconsistent previous current and r13";
754 if ((r12 - r13) >= KERNEL_STACK_SIZE) { 799 goto no_mod;
755 msg = "inconsistent r12 and r13"; 800 }
756 goto no_mod; 801 if ((r12 - r13) >= KERNEL_STACK_SIZE) {
757 } 802 msg = "inconsistent r12 and r13";
758 if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) { 803 goto no_mod;
759 msg = "inconsistent ar.bspstore and r13"; 804 }
760 goto no_mod; 805 if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {
761 } 806 msg = "inconsistent ar.bspstore and r13";
762 va.p = old_bspstore; 807 goto no_mod;
763 if (va.f.reg < 5) { 808 }
764 msg = "old_bspstore is in the wrong region"; 809 va.p = old_bspstore;
765 goto no_mod; 810 if (va.f.reg < 5) {
766 } 811 msg = "old_bspstore is in the wrong region";
767 if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) { 812 goto no_mod;
768 msg = "inconsistent ar.bsp and r13"; 813 }
769 goto no_mod; 814 if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {
770 } 815 msg = "inconsistent ar.bsp and r13";
771 size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8; 816 goto no_mod;
772 if (ar_bspstore + size > r12) { 817 }
773 msg = "no room for blocked state"; 818 size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
774 goto no_mod; 819 if (ar_bspstore + size > r12) {
820 msg = "no room for blocked state";
821 goto no_mod;
822 }
775 } 823 }
776 824
777 ia64_mca_modify_comm(previous_current); 825 ia64_mca_modify_comm(previous_current);
@@ -1443,7 +1491,7 @@ static struct irqaction mca_cpep_irqaction = {
1443 * format most of the fields. 1491 * format most of the fields.
1444 */ 1492 */
1445 1493
1446static void 1494static void __cpuinit
1447format_mca_init_stack(void *mca_data, unsigned long offset, 1495format_mca_init_stack(void *mca_data, unsigned long offset,
1448 const char *type, int cpu) 1496 const char *type, int cpu)
1449{ 1497{
@@ -1467,7 +1515,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
1467 1515
1468/* Do per-CPU MCA-related initialization. */ 1516/* Do per-CPU MCA-related initialization. */
1469 1517
1470void __devinit 1518void __cpuinit
1471ia64_mca_cpu_init(void *cpu_data) 1519ia64_mca_cpu_init(void *cpu_data)
1472{ 1520{
1473 void *pal_vaddr; 1521 void *pal_vaddr;
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index e883d85906db..37c88eb55873 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -6,6 +6,7 @@
6 * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) 6 * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com)
7 * Copyright (C) 2005 Silicon Graphics, Inc 7 * Copyright (C) 2005 Silicon Graphics, Inc
8 * Copyright (C) 2005 Keith Owens <kaos@sgi.com> 8 * Copyright (C) 2005 Keith Owens <kaos@sgi.com>
9 * Copyright (C) 2006 Russ Anderson <rja@sgi.com>
9 */ 10 */
10#include <linux/config.h> 11#include <linux/config.h>
11#include <linux/types.h> 12#include <linux/types.h>
@@ -121,11 +122,12 @@ mca_page_isolate(unsigned long paddr)
121 */ 122 */
122 123
123void 124void
124mca_handler_bh(unsigned long paddr) 125mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr)
125{ 126{
126 printk(KERN_ERR 127 printk(KERN_ERR "OS_MCA: process [cpu %d, pid: %d, uid: %d, "
127 "OS_MCA: process [pid: %d](%s) encounters MCA (paddr=%lx)\n", 128 "iip: %p, psr: 0x%lx,paddr: 0x%lx](%s) encounters MCA.\n",
128 current->pid, current->comm, paddr); 129 raw_smp_processor_id(), current->pid, current->uid,
130 iip, ipsr, paddr, current->comm);
129 131
130 spin_lock(&mca_bh_lock); 132 spin_lock(&mca_bh_lock);
131 switch (mca_page_isolate(paddr)) { 133 switch (mca_page_isolate(paddr)) {
@@ -442,21 +444,26 @@ recover_from_read_error(slidx_table_t *slidx,
442 if (!peidx_bottom(peidx) || !(peidx_bottom(peidx)->valid.minstate)) 444 if (!peidx_bottom(peidx) || !(peidx_bottom(peidx)->valid.minstate))
443 return 0; 445 return 0;
444 psr1 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_ipsr); 446 psr1 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_ipsr);
447 psr2 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_xpsr);
445 448
446 /* 449 /*
447 * Check the privilege level of interrupted context. 450 * Check the privilege level of interrupted context.
448 * If it is user-mode, then terminate affected process. 451 * If it is user-mode, then terminate affected process.
449 */ 452 */
450 if (psr1->cpl != 0) { 453
454 pmsa = sos->pal_min_state;
455 if (psr1->cpl != 0 ||
456 ((psr2->cpl != 0) && mca_recover_range(pmsa->pmsa_iip))) {
451 smei = peidx_bus_check(peidx, 0); 457 smei = peidx_bus_check(peidx, 0);
452 if (smei->valid.target_identifier) { 458 if (smei->valid.target_identifier) {
453 /* 459 /*
454 * setup for resume to bottom half of MCA, 460 * setup for resume to bottom half of MCA,
455 * "mca_handler_bhhook" 461 * "mca_handler_bhhook"
456 */ 462 */
457 pmsa = sos->pal_min_state; 463 /* pass to bhhook as argument (gr8, ...) */
458 /* pass to bhhook as 1st argument (gr8) */
459 pmsa->pmsa_gr[8-1] = smei->target_identifier; 464 pmsa->pmsa_gr[8-1] = smei->target_identifier;
465 pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip;
466 pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr;
460 /* set interrupted return address (but no use) */ 467 /* set interrupted return address (but no use) */
461 pmsa->pmsa_br0 = pmsa->pmsa_iip; 468 pmsa->pmsa_br0 = pmsa->pmsa_iip;
462 /* change resume address to bottom half */ 469 /* change resume address to bottom half */
@@ -466,6 +473,7 @@ recover_from_read_error(slidx_table_t *slidx,
466 psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr; 473 psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
467 psr2->cpl = 0; 474 psr2->cpl = 0;
468 psr2->ri = 0; 475 psr2->ri = 0;
476 psr2->bn = 1;
469 psr2->i = 0; 477 psr2->i = 0;
470 478
471 return 1; 479 return 1;
diff --git a/arch/ia64/kernel/mca_drv.h b/arch/ia64/kernel/mca_drv.h
index e2f6fa1e0ef6..31a2e52bb16f 100644
--- a/arch/ia64/kernel/mca_drv.h
+++ b/arch/ia64/kernel/mca_drv.h
@@ -111,3 +111,10 @@ typedef struct slidx_table {
111 slidx_foreach_entry(__pos, &((slidx)->sec)) { __count++; }\ 111 slidx_foreach_entry(__pos, &((slidx)->sec)) { __count++; }\
112 __count; }) 112 __count; })
113 113
114struct mca_table_entry {
115 int start_addr; /* location-relative starting address of MCA recoverable range */
116 int end_addr; /* location-relative ending address of MCA recoverable range */
117};
118
119extern const struct mca_table_entry *search_mca_tables (unsigned long addr);
120extern int mca_recover_range(unsigned long);
diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S
index 3f298ee4d00c..e6a580d354b9 100644
--- a/arch/ia64/kernel/mca_drv_asm.S
+++ b/arch/ia64/kernel/mca_drv_asm.S
@@ -14,15 +14,12 @@
14 14
15GLOBAL_ENTRY(mca_handler_bhhook) 15GLOBAL_ENTRY(mca_handler_bhhook)
16 invala // clear RSE ? 16 invala // clear RSE ?
17 ;;
18 cover 17 cover
19 ;; 18 ;;
20 clrrrb 19 clrrrb
21 ;; 20 ;;
22 alloc r16=ar.pfs,0,2,1,0 // make a new frame 21 alloc r16=ar.pfs,0,2,3,0 // make a new frame
23 ;;
24 mov ar.rsc=0 22 mov ar.rsc=0
25 ;;
26 mov r13=IA64_KR(CURRENT) // current task pointer 23 mov r13=IA64_KR(CURRENT) // current task pointer
27 ;; 24 ;;
28 mov r2=r13 25 mov r2=r13
@@ -30,7 +27,6 @@ GLOBAL_ENTRY(mca_handler_bhhook)
30 addl r22=IA64_RBS_OFFSET,r2 27 addl r22=IA64_RBS_OFFSET,r2
31 ;; 28 ;;
32 mov ar.bspstore=r22 29 mov ar.bspstore=r22
33 ;;
34 addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 30 addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2
35 ;; 31 ;;
36 adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 32 adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
@@ -40,12 +36,12 @@ GLOBAL_ENTRY(mca_handler_bhhook)
40 movl loc1=mca_handler_bh // recovery C function 36 movl loc1=mca_handler_bh // recovery C function
41 ;; 37 ;;
42 mov out0=r8 // poisoned address 38 mov out0=r8 // poisoned address
39 mov out1=r9 // iip
40 mov out2=r10 // psr
43 mov b6=loc1 41 mov b6=loc1
44 ;; 42 ;;
45 mov loc1=rp 43 mov loc1=rp
46 ;; 44 ssm psr.i | psr.ic
47 ssm psr.i
48 ;;
49 br.call.sptk.many rp=b6 // does not return ... 45 br.call.sptk.many rp=b6 // does not return ...
50 ;; 46 ;;
51 mov ar.pfs=loc0 47 mov ar.pfs=loc0
@@ -53,5 +49,4 @@ GLOBAL_ENTRY(mca_handler_bhhook)
53 ;; 49 ;;
54 mov r8=r0 50 mov r8=r0
55 br.ret.sptk.many rp 51 br.ret.sptk.many rp
56 ;;
57END(mca_handler_bhhook) 52END(mca_handler_bhhook)
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c
index a68ce6678092..0766493d4d00 100644
--- a/arch/ia64/kernel/numa.c
+++ b/arch/ia64/kernel/numa.c
@@ -25,7 +25,7 @@
25#include <asm/processor.h> 25#include <asm/processor.h>
26#include <asm/smp.h> 26#include <asm/smp.h>
27 27
28u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned; 28u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
29EXPORT_SYMBOL(cpu_to_node_map); 29EXPORT_SYMBOL(cpu_to_node_map);
30 30
31cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; 31cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c
index 6a4ac7d70b35..bc11bb096f58 100644
--- a/arch/ia64/kernel/patch.c
+++ b/arch/ia64/kernel/patch.c
@@ -115,7 +115,7 @@ ia64_patch_vtop (unsigned long start, unsigned long end)
115 ia64_srlz_i(); 115 ia64_srlz_i();
116} 116}
117 117
118void 118void __init
119ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) 119ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
120{ 120{
121 static int first_time = 1; 121 static int first_time = 1;
@@ -149,7 +149,7 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
149 ia64_srlz_i(); 149 ia64_srlz_i();
150} 150}
151 151
152static void 152static void __init
153patch_fsyscall_table (unsigned long start, unsigned long end) 153patch_fsyscall_table (unsigned long start, unsigned long end)
154{ 154{
155 extern unsigned long fsyscall_table[NR_syscalls]; 155 extern unsigned long fsyscall_table[NR_syscalls];
@@ -166,7 +166,7 @@ patch_fsyscall_table (unsigned long start, unsigned long end)
166 ia64_srlz_i(); 166 ia64_srlz_i();
167} 167}
168 168
169static void 169static void __init
170patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) 170patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
171{ 171{
172 extern char fsys_bubble_down[]; 172 extern char fsys_bubble_down[];
@@ -184,7 +184,7 @@ patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
184 ia64_srlz_i(); 184 ia64_srlz_i();
185} 185}
186 186
187void 187void __init
188ia64_patch_gate (void) 188ia64_patch_gate (void)
189{ 189{
190# define START(name) ((unsigned long) __start_gate_##name##_patchlist) 190# define START(name) ((unsigned long) __start_gate_##name##_patchlist)
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index eaed14aac6aa..9887c8787e7a 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -1656,8 +1656,14 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1656 long arg4, long arg5, long arg6, long arg7, 1656 long arg4, long arg5, long arg6, long arg7,
1657 struct pt_regs regs) 1657 struct pt_regs regs)
1658{ 1658{
1659 if (unlikely(current->audit_context)) 1659 if (unlikely(current->audit_context)) {
1660 audit_syscall_exit(current, AUDITSC_RESULT(regs.r10), regs.r8); 1660 int success = AUDITSC_RESULT(regs.r10);
1661 long result = regs.r8;
1662
1663 if (success != AUDITSC_SUCCESS)
1664 result = -result;
1665 audit_syscall_exit(current, success, result);
1666 }
1661 1667
1662 if (test_thread_flag(TIF_SYSCALL_TRACE) 1668 if (test_thread_flag(TIF_SYSCALL_TRACE)
1663 && (current->ptrace & PT_PTRACED)) 1669 && (current->ptrace & PT_PTRACED))
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 3258e09278d0..eb388e271b2b 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -41,7 +41,6 @@
41#include <linux/serial_core.h> 41#include <linux/serial_core.h>
42#include <linux/efi.h> 42#include <linux/efi.h>
43#include <linux/initrd.h> 43#include <linux/initrd.h>
44#include <linux/platform.h>
45#include <linux/pm.h> 44#include <linux/pm.h>
46#include <linux/cpufreq.h> 45#include <linux/cpufreq.h>
47 46
@@ -131,8 +130,8 @@ EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
131/* 130/*
132 * We use a special marker for the end of memory and it uses the extra (+1) slot 131 * We use a special marker for the end of memory and it uses the extra (+1) slot
133 */ 132 */
134struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1]; 133struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata;
135int num_rsvd_regions; 134int num_rsvd_regions __initdata;
136 135
137 136
138/* 137/*
@@ -141,7 +140,7 @@ int num_rsvd_regions;
141 * caller-specified function is called with the memory ranges that remain after filtering. 140 * caller-specified function is called with the memory ranges that remain after filtering.
142 * This routine does not assume the incoming segments are sorted. 141 * This routine does not assume the incoming segments are sorted.
143 */ 142 */
144int 143int __init
145filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) 144filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
146{ 145{
147 unsigned long range_start, range_end, prev_start; 146 unsigned long range_start, range_end, prev_start;
@@ -177,7 +176,7 @@ filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
177 return 0; 176 return 0;
178} 177}
179 178
180static void 179static void __init
181sort_regions (struct rsvd_region *rsvd_region, int max) 180sort_regions (struct rsvd_region *rsvd_region, int max)
182{ 181{
183 int j; 182 int j;
@@ -218,7 +217,7 @@ __initcall(register_memory);
218 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, 217 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
219 * see include/asm-ia64/meminit.h if you need to define more. 218 * see include/asm-ia64/meminit.h if you need to define more.
220 */ 219 */
221void 220void __init
222reserve_memory (void) 221reserve_memory (void)
223{ 222{
224 int n = 0; 223 int n = 0;
@@ -270,7 +269,7 @@ reserve_memory (void)
270 * Grab the initrd start and end from the boot parameter struct given us by 269 * Grab the initrd start and end from the boot parameter struct given us by
271 * the boot loader. 270 * the boot loader.
272 */ 271 */
273void 272void __init
274find_initrd (void) 273find_initrd (void)
275{ 274{
276#ifdef CONFIG_BLK_DEV_INITRD 275#ifdef CONFIG_BLK_DEV_INITRD
@@ -362,7 +361,7 @@ mark_bsp_online (void)
362} 361}
363 362
364#ifdef CONFIG_SMP 363#ifdef CONFIG_SMP
365static void 364static void __init
366check_for_logical_procs (void) 365check_for_logical_procs (void)
367{ 366{
368 pal_logical_to_physical_t info; 367 pal_logical_to_physical_t info;
@@ -389,6 +388,14 @@ check_for_logical_procs (void)
389} 388}
390#endif 389#endif
391 390
391static __initdata int nomca;
392static __init int setup_nomca(char *s)
393{
394 nomca = 1;
395 return 0;
396}
397early_param("nomca", setup_nomca);
398
392void __init 399void __init
393setup_arch (char **cmdline_p) 400setup_arch (char **cmdline_p)
394{ 401{
@@ -402,35 +409,15 @@ setup_arch (char **cmdline_p)
402 efi_init(); 409 efi_init();
403 io_port_init(); 410 io_port_init();
404 411
412 parse_early_param();
413
405#ifdef CONFIG_IA64_GENERIC 414#ifdef CONFIG_IA64_GENERIC
406 { 415 machvec_init(NULL);
407 const char *mvec_name = strstr (*cmdline_p, "machvec=");
408 char str[64];
409
410 if (mvec_name) {
411 const char *end;
412 size_t len;
413
414 mvec_name += 8;
415 end = strchr (mvec_name, ' ');
416 if (end)
417 len = end - mvec_name;
418 else
419 len = strlen (mvec_name);
420 len = min(len, sizeof (str) - 1);
421 strncpy (str, mvec_name, len);
422 str[len] = '\0';
423 mvec_name = str;
424 } else
425 mvec_name = acpi_get_sysname();
426 machvec_init(mvec_name);
427 }
428#endif 416#endif
429 417
430 if (early_console_setup(*cmdline_p) == 0) 418 if (early_console_setup(*cmdline_p) == 0)
431 mark_bsp_online(); 419 mark_bsp_online();
432 420
433 parse_early_param();
434#ifdef CONFIG_ACPI 421#ifdef CONFIG_ACPI
435 /* Initialize the ACPI boot-time table parser */ 422 /* Initialize the ACPI boot-time table parser */
436 acpi_table_init(); 423 acpi_table_init();
@@ -493,7 +480,7 @@ setup_arch (char **cmdline_p)
493#endif 480#endif
494 481
495 /* enable IA-64 Machine Check Abort Handling unless disabled */ 482 /* enable IA-64 Machine Check Abort Handling unless disabled */
496 if (!strstr(saved_command_line, "nomca")) 483 if (!nomca)
497 ia64_mca_init(); 484 ia64_mca_init();
498 485
499 platform_setup(cmdline_p); 486 platform_setup(cmdline_p);
@@ -623,7 +610,7 @@ struct seq_operations cpuinfo_op = {
623 .show = show_cpuinfo 610 .show = show_cpuinfo
624}; 611};
625 612
626void 613static void __cpuinit
627identify_cpu (struct cpuinfo_ia64 *c) 614identify_cpu (struct cpuinfo_ia64 *c)
628{ 615{
629 union { 616 union {
@@ -700,7 +687,7 @@ setup_per_cpu_areas (void)
700 * In addition, the minimum of the i-cache stride sizes is calculated for 687 * In addition, the minimum of the i-cache stride sizes is calculated for
701 * "flush_icache_range()". 688 * "flush_icache_range()".
702 */ 689 */
703static void 690static void __cpuinit
704get_max_cacheline_size (void) 691get_max_cacheline_size (void)
705{ 692{
706 unsigned long line_size, max = 1; 693 unsigned long line_size, max = 1;
@@ -763,10 +750,10 @@ get_max_cacheline_size (void)
763 * cpu_init() initializes state that is per-CPU. This function acts 750 * cpu_init() initializes state that is per-CPU. This function acts
764 * as a 'CPU state barrier', nothing should get across. 751 * as a 'CPU state barrier', nothing should get across.
765 */ 752 */
766void 753void __cpuinit
767cpu_init (void) 754cpu_init (void)
768{ 755{
769 extern void __devinit ia64_mmu_init (void *); 756 extern void __cpuinit ia64_mmu_init (void *);
770 unsigned long num_phys_stacked; 757 unsigned long num_phys_stacked;
771 pal_vm_info_2_u_t vmi; 758 pal_vm_info_2_u_t vmi;
772 unsigned int max_ctx; 759 unsigned int max_ctx;
@@ -894,7 +881,7 @@ void sched_cacheflush(void)
894 ia64_sal_cache_flush(3); 881 ia64_sal_cache_flush(3);
895} 882}
896 883
897void 884void __init
898check_bugs (void) 885check_bugs (void)
899{ 886{
900 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, 887 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index c4b633b36dab..44e9547878ac 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -624,32 +624,8 @@ void __devinit smp_prepare_boot_cpu(void)
624 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 624 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
625} 625}
626 626
627/*
628 * mt_info[] is a temporary store for all info returned by
629 * PAL_LOGICAL_TO_PHYSICAL, to be copied into cpuinfo_ia64 when the
630 * specific cpu comes.
631 */
632static struct {
633 __u32 socket_id;
634 __u16 core_id;
635 __u16 thread_id;
636 __u16 proc_fixed_addr;
637 __u8 valid;
638} mt_info[NR_CPUS] __devinitdata;
639
640#ifdef CONFIG_HOTPLUG_CPU 627#ifdef CONFIG_HOTPLUG_CPU
641static inline void 628static inline void
642remove_from_mtinfo(int cpu)
643{
644 int i;
645
646 for_each_cpu(i)
647 if (mt_info[i].valid && mt_info[i].socket_id ==
648 cpu_data(cpu)->socket_id)
649 mt_info[i].valid = 0;
650}
651
652static inline void
653clear_cpu_sibling_map(int cpu) 629clear_cpu_sibling_map(int cpu)
654{ 630{
655 int i; 631 int i;
@@ -678,12 +654,6 @@ remove_siblinginfo(int cpu)
678 654
679 /* remove it from all sibling map's */ 655 /* remove it from all sibling map's */
680 clear_cpu_sibling_map(cpu); 656 clear_cpu_sibling_map(cpu);
681
682 /* if this cpu is the last in the core group, remove all its info
683 * from mt_info structure
684 */
685 if (last)
686 remove_from_mtinfo(cpu);
687} 657}
688 658
689extern void fixup_irqs(void); 659extern void fixup_irqs(void);
@@ -878,40 +848,6 @@ init_smp_config(void)
878 ia64_sal_strerror(sal_ret)); 848 ia64_sal_strerror(sal_ret));
879} 849}
880 850
881static inline int __devinit
882check_for_mtinfo_index(void)
883{
884 int i;
885
886 for_each_cpu(i)
887 if (!mt_info[i].valid)
888 return i;
889
890 return -1;
891}
892
893/*
894 * Search the mt_info to find out if this socket's cid/tid information is
895 * cached or not. If the socket exists, fill in the core_id and thread_id
896 * in cpuinfo
897 */
898static int __devinit
899check_for_new_socket(__u16 logical_address, struct cpuinfo_ia64 *c)
900{
901 int i;
902 __u32 sid = c->socket_id;
903
904 for_each_cpu(i) {
905 if (mt_info[i].valid && mt_info[i].proc_fixed_addr == logical_address
906 && mt_info[i].socket_id == sid) {
907 c->core_id = mt_info[i].core_id;
908 c->thread_id = mt_info[i].thread_id;
909 return 1; /* not a new socket */
910 }
911 }
912 return 0;
913}
914
915/* 851/*
916 * identify_siblings(cpu) gets called from identify_cpu. This populates the 852 * identify_siblings(cpu) gets called from identify_cpu. This populates the
917 * information related to logical execution units in per_cpu_data structure. 853 * information related to logical execution units in per_cpu_data structure.
@@ -921,14 +857,12 @@ identify_siblings(struct cpuinfo_ia64 *c)
921{ 857{
922 s64 status; 858 s64 status;
923 u16 pltid; 859 u16 pltid;
924 u64 proc_fixed_addr;
925 int count, i;
926 pal_logical_to_physical_t info; 860 pal_logical_to_physical_t info;
927 861
928 if (smp_num_cpucores == 1 && smp_num_siblings == 1) 862 if (smp_num_cpucores == 1 && smp_num_siblings == 1)
929 return; 863 return;
930 864
931 if ((status = ia64_pal_logical_to_phys(0, &info)) != PAL_STATUS_SUCCESS) { 865 if ((status = ia64_pal_logical_to_phys(-1, &info)) != PAL_STATUS_SUCCESS) {
932 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", 866 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
933 status); 867 status);
934 return; 868 return;
@@ -937,47 +871,12 @@ identify_siblings(struct cpuinfo_ia64 *c)
937 printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status); 871 printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status);
938 return; 872 return;
939 } 873 }
940 if ((status = ia64_pal_fixed_addr(&proc_fixed_addr)) != PAL_STATUS_SUCCESS) {
941 printk(KERN_ERR "ia64_pal_fixed_addr failed with %ld\n", status);
942 return;
943 }
944 874
945 c->socket_id = (pltid << 8) | info.overview_ppid; 875 c->socket_id = (pltid << 8) | info.overview_ppid;
946 c->cores_per_socket = info.overview_cpp; 876 c->cores_per_socket = info.overview_cpp;
947 c->threads_per_core = info.overview_tpc; 877 c->threads_per_core = info.overview_tpc;
948 count = c->num_log = info.overview_num_log; 878 c->num_log = info.overview_num_log;
949 879
950 /* If the thread and core id information is already cached, then 880 c->core_id = info.log1_cid;
951 * we will simply update cpu_info and return. Otherwise, we will 881 c->thread_id = info.log1_tid;
952 * do the PAL calls and cache core and thread id's of all the siblings.
953 */
954 if (check_for_new_socket(proc_fixed_addr, c))
955 return;
956
957 for (i = 0; i < count; i++) {
958 int index;
959
960 if (i && (status = ia64_pal_logical_to_phys(i, &info))
961 != PAL_STATUS_SUCCESS) {
962 printk(KERN_ERR "ia64_pal_logical_to_phys failed"
963 " with %ld\n", status);
964 return;
965 }
966 if (info.log2_la == proc_fixed_addr) {
967 c->core_id = info.log1_cid;
968 c->thread_id = info.log1_tid;
969 }
970
971 index = check_for_mtinfo_index();
972 /* We will not do the mt_info caching optimization in this case.
973 */
974 if (index < 0)
975 continue;
976
977 mt_info[index].valid = 1;
978 mt_info[index].socket_id = c->socket_id;
979 mt_info[index].core_id = info.log1_cid;
980 mt_info[index].thread_id = info.log1_tid;
981 mt_info[index].proc_fixed_addr = info.log2_la;
982 }
983} 882}
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 73af6267d2ef..0b9e56dd7f05 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -70,34 +70,9 @@ SECTIONS
70 __stop___ex_table = .; 70 __stop___ex_table = .;
71 } 71 }
72 72
73 .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
74 {
75 __start___vtop_patchlist = .;
76 *(.data.patch.vtop)
77 __end___vtop_patchlist = .;
78 }
79
80 .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET)
81 {
82 __start___mckinley_e9_bundles = .;
83 *(.data.patch.mckinley_e9)
84 __end___mckinley_e9_bundles = .;
85 }
86
87 /* Global data */ 73 /* Global data */
88 _data = .; 74 _data = .;
89 75
90#if defined(CONFIG_IA64_GENERIC)
91 /* Machine Vector */
92 . = ALIGN(16);
93 .machvec : AT(ADDR(.machvec) - LOAD_OFFSET)
94 {
95 machvec_start = .;
96 *(.machvec)
97 machvec_end = .;
98 }
99#endif
100
101 /* Unwind info & table: */ 76 /* Unwind info & table: */
102 . = ALIGN(8); 77 . = ALIGN(8);
103 .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) 78 .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET)
@@ -154,6 +129,41 @@ SECTIONS
154 *(.initcall7.init) 129 *(.initcall7.init)
155 __initcall_end = .; 130 __initcall_end = .;
156 } 131 }
132
133 /* MCA table */
134 . = ALIGN(16);
135 __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET)
136 {
137 __start___mca_table = .;
138 *(__mca_table)
139 __stop___mca_table = .;
140 }
141
142 .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
143 {
144 __start___vtop_patchlist = .;
145 *(.data.patch.vtop)
146 __end___vtop_patchlist = .;
147 }
148
149 .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET)
150 {
151 __start___mckinley_e9_bundles = .;
152 *(.data.patch.mckinley_e9)
153 __end___mckinley_e9_bundles = .;
154 }
155
156#if defined(CONFIG_IA64_GENERIC)
157 /* Machine Vector */
158 . = ALIGN(16);
159 .machvec : AT(ADDR(.machvec) - LOAD_OFFSET)
160 {
161 machvec_start = .;
162 *(.machvec)
163 machvec_end = .;
164 }
165#endif
166
157 __con_initcall_start = .; 167 __con_initcall_start = .;
158 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) 168 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
159 { *(.con_initcall.init) } 169 { *(.con_initcall.init) }
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 9855ba318094..84fd1c14c8a9 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -97,7 +97,7 @@ find_max_pfn (unsigned long start, unsigned long end, void *arg)
97 * Find a place to put the bootmap and return its starting address in 97 * Find a place to put the bootmap and return its starting address in
98 * bootmap_start. This address must be page-aligned. 98 * bootmap_start. This address must be page-aligned.
99 */ 99 */
100int 100static int __init
101find_bootmap_location (unsigned long start, unsigned long end, void *arg) 101find_bootmap_location (unsigned long start, unsigned long end, void *arg)
102{ 102{
103 unsigned long needed = *(unsigned long *)arg; 103 unsigned long needed = *(unsigned long *)arg;
@@ -141,7 +141,7 @@ find_bootmap_location (unsigned long start, unsigned long end, void *arg)
141 * Walk the EFI memory map and find usable memory for the system, taking 141 * Walk the EFI memory map and find usable memory for the system, taking
142 * into account reserved areas. 142 * into account reserved areas.
143 */ 143 */
144void 144void __init
145find_memory (void) 145find_memory (void)
146{ 146{
147 unsigned long bootmap_size; 147 unsigned long bootmap_size;
@@ -176,7 +176,7 @@ find_memory (void)
176 * 176 *
177 * Allocate and setup per-cpu data areas. 177 * Allocate and setup per-cpu data areas.
178 */ 178 */
179void * 179void * __cpuinit
180per_cpu_init (void) 180per_cpu_init (void)
181{ 181{
182 void *cpu_data; 182 void *cpu_data;
@@ -228,7 +228,7 @@ count_dma_pages (u64 start, u64 end, void *arg)
228 * Set up the page tables. 228 * Set up the page tables.
229 */ 229 */
230 230
231void 231void __init
232paging_init (void) 232paging_init (void)
233{ 233{
234 unsigned long max_dma; 234 unsigned long max_dma;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 573d5cc63e2b..2f5e44862e91 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -525,7 +525,7 @@ void __init find_memory(void)
525 * find_pernode_space() does most of this already, we just need to set 525 * find_pernode_space() does most of this already, we just need to set
526 * local_per_cpu_offset 526 * local_per_cpu_offset
527 */ 527 */
528void *per_cpu_init(void) 528void __cpuinit *per_cpu_init(void)
529{ 529{
530 int cpu; 530 int cpu;
531 static int first_time = 1; 531 static int first_time = 1;
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 9dbc7dadd165..8d506710fdbd 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -113,8 +113,7 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb,
113 unsigned long floor, unsigned long ceiling) 113 unsigned long floor, unsigned long ceiling)
114{ 114{
115 /* 115 /*
116 * This is called only when is_hugepage_only_range(addr,), 116 * This is called to free hugetlb page tables.
117 * and it follows that is_hugepage_only_range(end,) also.
118 * 117 *
119 * The offset of these addresses from the base of the hugetlb 118 * The offset of these addresses from the base of the hugetlb
120 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that 119 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
@@ -126,9 +125,9 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb,
126 125
127 addr = htlbpage_to_page(addr); 126 addr = htlbpage_to_page(addr);
128 end = htlbpage_to_page(end); 127 end = htlbpage_to_page(end);
129 if (is_hugepage_only_range(tlb->mm, floor, HPAGE_SIZE)) 128 if (REGION_NUMBER(floor) == RGN_HPAGE)
130 floor = htlbpage_to_page(floor); 129 floor = htlbpage_to_page(floor);
131 if (is_hugepage_only_range(tlb->mm, ceiling, HPAGE_SIZE)) 130 if (REGION_NUMBER(ceiling) == RGN_HPAGE)
132 ceiling = htlbpage_to_page(ceiling); 131 ceiling = htlbpage_to_page(ceiling);
133 132
134 free_pgd_range(tlb, addr, end, floor, ceiling); 133 free_pgd_range(tlb, addr, end, floor, ceiling);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 08d94e6bfa18..ff4f31fcd330 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -206,7 +206,7 @@ free_initmem (void)
206 (__init_end - __init_begin) >> 10); 206 (__init_end - __init_begin) >> 10);
207} 207}
208 208
209void 209void __init
210free_initrd_mem (unsigned long start, unsigned long end) 210free_initrd_mem (unsigned long start, unsigned long end)
211{ 211{
212 struct page *page; 212 struct page *page;
@@ -261,7 +261,7 @@ free_initrd_mem (unsigned long start, unsigned long end)
261/* 261/*
262 * This installs a clean page in the kernel's page table. 262 * This installs a clean page in the kernel's page table.
263 */ 263 */
264struct page * 264static struct page * __init
265put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) 265put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
266{ 266{
267 pgd_t *pgd; 267 pgd_t *pgd;
@@ -294,7 +294,7 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
294 return page; 294 return page;
295} 295}
296 296
297static void 297static void __init
298setup_gate (void) 298setup_gate (void)
299{ 299{
300 struct page *page; 300 struct page *page;
@@ -411,7 +411,7 @@ ia64_mmu_init (void *my_cpu_data)
411 411
412#ifdef CONFIG_VIRTUAL_MEM_MAP 412#ifdef CONFIG_VIRTUAL_MEM_MAP
413 413
414int 414int __init
415create_mem_map_page_table (u64 start, u64 end, void *arg) 415create_mem_map_page_table (u64 start, u64 end, void *arg)
416{ 416{
417 unsigned long address, start_page, end_page; 417 unsigned long address, start_page, end_page;
@@ -519,7 +519,7 @@ ia64_pfn_valid (unsigned long pfn)
519} 519}
520EXPORT_SYMBOL(ia64_pfn_valid); 520EXPORT_SYMBOL(ia64_pfn_valid);
521 521
522int 522int __init
523find_largest_hole (u64 start, u64 end, void *arg) 523find_largest_hole (u64 start, u64 end, void *arg)
524{ 524{
525 u64 *max_gap = arg; 525 u64 *max_gap = arg;
@@ -535,7 +535,7 @@ find_largest_hole (u64 start, u64 end, void *arg)
535} 535}
536#endif /* CONFIG_VIRTUAL_MEM_MAP */ 536#endif /* CONFIG_VIRTUAL_MEM_MAP */
537 537
538static int 538static int __init
539count_reserved_pages (u64 start, u64 end, void *arg) 539count_reserved_pages (u64 start, u64 end, void *arg)
540{ 540{
541 unsigned long num_reserved = 0; 541 unsigned long num_reserved = 0;
@@ -556,7 +556,7 @@ count_reserved_pages (u64 start, u64 end, void *arg)
556 * purposes. 556 * purposes.
557 */ 557 */
558 558
559static int nolwsys; 559static int nolwsys __initdata;
560 560
561static int __init 561static int __init
562nolwsys_setup (char *s) 562nolwsys_setup (char *s)
@@ -567,7 +567,7 @@ nolwsys_setup (char *s)
567 567
568__setup("nolwsys", nolwsys_setup); 568__setup("nolwsys", nolwsys_setup);
569 569
570void 570void __init
571mem_init (void) 571mem_init (void)
572{ 572{
573 long reserved_pages, codesize, datasize, initsize; 573 long reserved_pages, codesize, datasize, initsize;
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index 1f11db470d90..e952ef4f6d91 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -36,7 +36,7 @@ static struct bteinfo_s *bte_if_on_node(nasid_t nasid, int interface)
36 nodepda_t *tmp_nodepda; 36 nodepda_t *tmp_nodepda;
37 37
38 if (nasid_to_cnodeid(nasid) == -1) 38 if (nasid_to_cnodeid(nasid) == -1)
39 return (struct bteinfo_s *)NULL;; 39 return (struct bteinfo_s *)NULL;
40 40
41 tmp_nodepda = NODEPDA(nasid_to_cnodeid(nasid)); 41 tmp_nodepda = NODEPDA(nasid_to_cnodeid(nasid));
42 return &tmp_nodepda->bte_if[interface]; 42 return &tmp_nodepda->bte_if[interface];
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index dfb3f2902379..5101ac462643 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -13,6 +13,8 @@
13#include <asm/sn/sn_feature_sets.h> 13#include <asm/sn/sn_feature_sets.h>
14#include <asm/sn/geo.h> 14#include <asm/sn/geo.h>
15#include <asm/sn/io.h> 15#include <asm/sn/io.h>
16#include <asm/sn/l1.h>
17#include <asm/sn/module.h>
16#include <asm/sn/pcibr_provider.h> 18#include <asm/sn/pcibr_provider.h>
17#include <asm/sn/pcibus_provider_defs.h> 19#include <asm/sn/pcibus_provider_defs.h>
18#include <asm/sn/pcidev.h> 20#include <asm/sn/pcidev.h>
@@ -710,9 +712,36 @@ cnodeid_get_geoid(cnodeid_t cnode)
710 return hubdev->hdi_geoid; 712 return hubdev->hdi_geoid;
711} 713}
712 714
715void sn_generate_path(struct pci_bus *pci_bus, char *address)
716{
717 nasid_t nasid;
718 cnodeid_t cnode;
719 geoid_t geoid;
720 moduleid_t moduleid;
721 u16 bricktype;
722
723 nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
724 cnode = nasid_to_cnodeid(nasid);
725 geoid = cnodeid_get_geoid(cnode);
726 moduleid = geo_module(geoid);
727
728 sprintf(address, "module_%c%c%c%c%.2d",
729 '0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)),
730 '0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)),
731 '0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)),
732 MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid));
733
734 /* Tollhouse requires slot id to be displayed */
735 bricktype = MODULE_GET_BTYPE(moduleid);
736 if ((bricktype == L1_BRICKTYPE_191010) ||
737 (bricktype == L1_BRICKTYPE_1932))
738 sprintf(address, "%s^%d", address, geo_slot(geoid));
739}
740
713subsys_initcall(sn_pci_init); 741subsys_initcall(sn_pci_init);
714EXPORT_SYMBOL(sn_pci_fixup_slot); 742EXPORT_SYMBOL(sn_pci_fixup_slot);
715EXPORT_SYMBOL(sn_pci_unfixup_slot); 743EXPORT_SYMBOL(sn_pci_unfixup_slot);
716EXPORT_SYMBOL(sn_pci_controller_fixup); 744EXPORT_SYMBOL(sn_pci_controller_fixup);
717EXPORT_SYMBOL(sn_bus_store_sysdata); 745EXPORT_SYMBOL(sn_bus_store_sysdata);
718EXPORT_SYMBOL(sn_bus_free_sysdata); 746EXPORT_SYMBOL(sn_bus_free_sysdata);
747EXPORT_SYMBOL(sn_generate_path);
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index c373113d073a..c265e02f5036 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -350,9 +350,6 @@ static void force_interrupt(int irq)
350static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) 350static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
351{ 351{
352 u64 regval; 352 u64 regval;
353 int irr_reg_num;
354 int irr_bit;
355 u64 irr_reg;
356 struct pcidev_info *pcidev_info; 353 struct pcidev_info *pcidev_info;
357 struct pcibus_info *pcibus_info; 354 struct pcibus_info *pcibus_info;
358 355
@@ -373,23 +370,7 @@ static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
373 pdi_pcibus_info; 370 pdi_pcibus_info;
374 regval = pcireg_intr_status_get(pcibus_info); 371 regval = pcireg_intr_status_get(pcibus_info);
375 372
376 irr_reg_num = irq_to_vector(irq) / 64; 373 if (!ia64_get_irr(irq_to_vector(irq))) {
377 irr_bit = irq_to_vector(irq) % 64;
378 switch (irr_reg_num) {
379 case 0:
380 irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
381 break;
382 case 1:
383 irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
384 break;
385 case 2:
386 irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
387 break;
388 case 3:
389 irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
390 break;
391 }
392 if (!test_bit(irr_bit, &irr_reg)) {
393 if (!test_bit(irq, pda->sn_in_service_ivecs)) { 374 if (!test_bit(irq, pda->sn_in_service_ivecs)) {
394 regval &= 0xff; 375 regval &= 0xff;
395 if (sn_irq_info->irq_int_bit & regval & 376 if (sn_irq_info->irq_int_bit & regval &
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index 99cb28e74295..feaf1a6e8101 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -369,9 +369,15 @@ static void tio_corelet_reset(nasid_t nasid, int corelet)
369 369
370static int is_fpga_tio(int nasid, int *bt) 370static int is_fpga_tio(int nasid, int *bt)
371{ 371{
372 int ioboard_type; 372 u16 ioboard_type;
373 s64 rc;
373 374
374 ioboard_type = ia64_sn_sysctl_ioboard_get(nasid); 375 rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard_type);
376 if (rc) {
377 printk(KERN_WARNING "ia64_sn_sysctl_ioboard_get failed: %ld\n",
378 rc);
379 return 0;
380 }
375 381
376 switch (ioboard_type) { 382 switch (ioboard_type) {
377 case L1_BRICKTYPE_SA: 383 case L1_BRICKTYPE_SA:
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 98f716bd92f0..ab1211ef0176 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -74,6 +74,22 @@ static int sal_pcibr_error_interrupt(struct pcibus_info *soft)
74 return (int)ret_stuff.v0; 74 return (int)ret_stuff.v0;
75} 75}
76 76
77u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus)
78{
79 s64 rc;
80 u16 ioboard;
81 nasid_t nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
82
83 rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard);
84 if (rc) {
85 printk(KERN_WARNING "ia64_sn_sysctl_ioboard_get failed: %ld\n",
86 rc);
87 return 0;
88 }
89
90 return ioboard;
91}
92
77/* 93/*
78 * PCI Bridge Error interrupt handler. Gets invoked whenever a PCI 94 * PCI Bridge Error interrupt handler. Gets invoked whenever a PCI
79 * bridge sends an error interrupt. 95 * bridge sends an error interrupt.
@@ -255,3 +271,4 @@ pcibr_init_provider(void)
255 271
256EXPORT_SYMBOL_GPL(sal_pcibr_slot_enable); 272EXPORT_SYMBOL_GPL(sal_pcibr_slot_enable);
257EXPORT_SYMBOL_GPL(sal_pcibr_slot_disable); 273EXPORT_SYMBOL_GPL(sal_pcibr_slot_disable);
274EXPORT_SYMBOL_GPL(sn_ioboard_to_pci_bus);
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 7571a4025529..be0176912968 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -377,7 +377,7 @@ tioca_dma_mapped(struct pci_dev *pdev, u64 paddr, size_t req_size)
377 struct tioca_dmamap *ca_dmamap; 377 struct tioca_dmamap *ca_dmamap;
378 void *map; 378 void *map;
379 unsigned long flags; 379 unsigned long flags;
380 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);; 380 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
381 381
382 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; 382 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
383 tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private; 383 tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
diff --git a/arch/m32r/Kconfig.debug b/arch/m32r/Kconfig.debug
index bbf711bab69e..2e1019ddbb22 100644
--- a/arch/m32r/Kconfig.debug
+++ b/arch/m32r/Kconfig.debug
@@ -19,7 +19,7 @@ config DEBUG_STACK_USAGE
19 This option will slow down process creation somewhat. 19 This option will slow down process creation somewhat.
20 20
21config DEBUG_PAGEALLOC 21config DEBUG_PAGEALLOC
22 bool "Page alloc debugging" 22 bool "Debug page memory allocations"
23 depends on DEBUG_KERNEL && BROKEN 23 depends on DEBUG_KERNEL && BROKEN
24 help 24 help
25 Unmap pages from the kernel linear mapping after free_pages(). 25 Unmap pages from the kernel linear mapping after free_pages().
diff --git a/arch/m32r/Makefile b/arch/m32r/Makefile
index 4b3c90ba926c..f219c47d334f 100644
--- a/arch/m32r/Makefile
+++ b/arch/m32r/Makefile
@@ -1,6 +1,9 @@
1# 1#
2# m32r/Makefile 2# m32r/Makefile
3# 3#
4# This file is included by the global makefile so that you can add your own
5# architecture-specific flags and dependencies.
6#
4 7
5LDFLAGS := 8LDFLAGS :=
6OBJCOPYFLAGS := -O binary -R .note -R .comment -S 9OBJCOPYFLAGS := -O binary -R .note -R .comment -S
@@ -39,7 +42,7 @@ drivers-$(CONFIG_OPROFILE) += arch/m32r/oprofile/
39 42
40boot := arch/m32r/boot 43boot := arch/m32r/boot
41 44
42.PHONY: zImage 45PHONY += zImage
43 46
44all: zImage 47all: zImage
45 48
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
index 1ce63926a3c0..a4634b06f675 100644
--- a/arch/m32r/kernel/irq.c
+++ b/arch/m32r/kernel/irq.c
@@ -37,9 +37,8 @@ int show_interrupts(struct seq_file *p, void *v)
37 37
38 if (i == 0) { 38 if (i == 0) {
39 seq_printf(p, " "); 39 seq_printf(p, " ");
40 for (j=0; j<NR_CPUS; j++) 40 for_each_online_cpu(j)
41 if (cpu_online(j)) 41 seq_printf(p, "CPU%d ",j);
42 seq_printf(p, "CPU%d ",j);
43 seq_putc(p, '\n'); 42 seq_putc(p, '\n');
44 } 43 }
45 44
@@ -52,9 +51,8 @@ int show_interrupts(struct seq_file *p, void *v)
52#ifndef CONFIG_SMP 51#ifndef CONFIG_SMP
53 seq_printf(p, "%10u ", kstat_irqs(i)); 52 seq_printf(p, "%10u ", kstat_irqs(i));
54#else 53#else
55 for (j = 0; j < NR_CPUS; j++) 54 for_each_online_cpu(j)
56 if (cpu_online(j)) 55 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
57 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
58#endif 56#endif
59 seq_printf(p, " %14s", irq_desc[i].handler->typename); 57 seq_printf(p, " %14s", irq_desc[i].handler->typename);
60 seq_printf(p, " %s", action->name); 58 seq_printf(p, " %s", action->name);
diff --git a/arch/m68k/bvme6000/rtc.c b/arch/m68k/bvme6000/rtc.c
index 703cbc6dc9cc..15c16b62dff5 100644
--- a/arch/m68k/bvme6000/rtc.c
+++ b/arch/m68k/bvme6000/rtc.c
@@ -18,6 +18,7 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/mc146818rtc.h> /* For struct rtc_time and ioctls, etc */ 19#include <linux/mc146818rtc.h> /* For struct rtc_time and ioctls, etc */
20#include <linux/smp_lock.h> 20#include <linux/smp_lock.h>
21#include <linux/bcd.h>
21#include <asm/bvme6000hw.h> 22#include <asm/bvme6000hw.h>
22 23
23#include <asm/io.h> 24#include <asm/io.h>
@@ -32,9 +33,6 @@
32 * ioctls. 33 * ioctls.
33 */ 34 */
34 35
35#define BCD2BIN(val) (((val)&15) + ((val)>>4)*10)
36#define BIN2BCD(val) ((((val)/10)<<4) + (val)%10)
37
38static unsigned char days_in_mo[] = 36static unsigned char days_in_mo[] =
39{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; 37{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
40 38
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index 2d8ad0727b6b..33648efb772e 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -77,7 +77,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
77/* 77/*
78 * The idle loop on an m68k.. 78 * The idle loop on an m68k..
79 */ 79 */
80void default_idle(void) 80static void default_idle(void)
81{ 81{
82 if (!need_resched()) 82 if (!need_resched())
83#if defined(MACH_ATARI_ONLY) && !defined(CONFIG_HADES) 83#if defined(MACH_ATARI_ONLY) && !defined(CONFIG_HADES)
diff --git a/arch/m68knommu/kernel/process.c b/arch/m68knommu/kernel/process.c
index 63c117dae0c3..f861755ec88b 100644
--- a/arch/m68knommu/kernel/process.c
+++ b/arch/m68knommu/kernel/process.c
@@ -51,7 +51,7 @@ EXPORT_SYMBOL(pm_power_off);
51/* 51/*
52 * The idle loop on an m68knommu.. 52 * The idle loop on an m68knommu..
53 */ 53 */
54void default_idle(void) 54static void default_idle(void)
55{ 55{
56 local_irq_disable(); 56 local_irq_disable();
57 while (!need_resched()) { 57 while (!need_resched()) {
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 7d93992e462c..3dd76b3d2967 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -68,9 +68,8 @@ int show_interrupts(struct seq_file *p, void *v)
68 68
69 if (i == 0) { 69 if (i == 0) {
70 seq_printf(p, " "); 70 seq_printf(p, " ");
71 for (j=0; j<NR_CPUS; j++) 71 for_each_online_cpu(j)
72 if (cpu_online(j)) 72 seq_printf(p, "CPU%d ",j);
73 seq_printf(p, "CPU%d ",j);
74 seq_putc(p, '\n'); 73 seq_putc(p, '\n');
75 } 74 }
76 75
@@ -83,9 +82,8 @@ int show_interrupts(struct seq_file *p, void *v)
83#ifndef CONFIG_SMP 82#ifndef CONFIG_SMP
84 seq_printf(p, "%10u ", kstat_irqs(i)); 83 seq_printf(p, "%10u ", kstat_irqs(i));
85#else 84#else
86 for (j = 0; j < NR_CPUS; j++) 85 for_each_online_cpu(j)
87 if (cpu_online(j)) 86 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
88 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
89#endif 87#endif
90 seq_printf(p, " %14s", irq_desc[i].handler->typename); 88 seq_printf(p, " %14s", irq_desc[i].handler->typename);
91 seq_printf(p, " %s", action->name); 89 seq_printf(p, " %s", action->name);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 06ed90752424..78d171bfa331 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -167,8 +167,8 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
167 mb(); 167 mb();
168 168
169 /* Send a message to all other CPUs and wait for them to respond */ 169 /* Send a message to all other CPUs and wait for them to respond */
170 for (i = 0; i < NR_CPUS; i++) 170 for_each_online_cpu(i)
171 if (cpu_online(i) && i != cpu) 171 if (i != cpu)
172 core_send_ipi(i, SMP_CALL_FUNCTION); 172 core_send_ipi(i, SMP_CALL_FUNCTION);
173 173
174 /* Wait for response */ 174 /* Wait for response */
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 0fc3730a294f..5407b784cd01 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -645,27 +645,7 @@ static inline void getitimer_real(struct itimerval *value)
645 645
646asmlinkage unsigned int irix_alarm(unsigned int seconds) 646asmlinkage unsigned int irix_alarm(unsigned int seconds)
647{ 647{
648 struct itimerval it_new, it_old; 648 return alarm_setitimer(seconds);
649 unsigned int oldalarm;
650
651 if (!seconds) {
652 getitimer_real(&it_old);
653 del_timer(&current->real_timer);
654 } else {
655 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
656 it_new.it_value.tv_sec = seconds;
657 it_new.it_value.tv_usec = 0;
658 do_setitimer(ITIMER_REAL, &it_new, &it_old);
659 }
660 oldalarm = it_old.it_value.tv_sec;
661 /*
662 * ehhh.. We can't return 0 if we have an alarm pending ...
663 * And we'd better return too much than too little anyway
664 */
665 if (it_old.it_value.tv_usec)
666 oldalarm++;
667
668 return oldalarm;
669} 649}
670 650
671asmlinkage int irix_pause(void) 651asmlinkage int irix_pause(void)
diff --git a/arch/mips/mm/dma-ip32.c b/arch/mips/mm/dma-ip32.c
index a7e3072ff78d..ec54ed0d26ff 100644
--- a/arch/mips/mm/dma-ip32.c
+++ b/arch/mips/mm/dma-ip32.c
@@ -138,7 +138,7 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
138 BUG(); 138 BUG();
139 } 139 }
140 140
141 addr = virt_to_phys(ptr)&RAM_OFFSET_MASK;; 141 addr = virt_to_phys(ptr)&RAM_OFFSET_MASK;
142 if(dev == NULL) 142 if(dev == NULL)
143 addr+=CRIME_HI_MEM_BASE; 143 addr+=CRIME_HI_MEM_BASE;
144 return (dma_addr_t)addr; 144 return (dma_addr_t)addr;
@@ -179,7 +179,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
179 addr = (unsigned long) page_address(sg->page)+sg->offset; 179 addr = (unsigned long) page_address(sg->page)+sg->offset;
180 if (addr) 180 if (addr)
181 __dma_sync(addr, sg->length, direction); 181 __dma_sync(addr, sg->length, direction);
182 addr = __pa(addr)&RAM_OFFSET_MASK;; 182 addr = __pa(addr)&RAM_OFFSET_MASK;
183 if(dev == NULL) 183 if(dev == NULL)
184 addr += CRIME_HI_MEM_BASE; 184 addr += CRIME_HI_MEM_BASE;
185 sg->dma_address = (dma_addr_t)addr; 185 sg->dma_address = (dma_addr_t)addr;
@@ -199,7 +199,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
199 199
200 addr = (unsigned long) page_address(page) + offset; 200 addr = (unsigned long) page_address(page) + offset;
201 dma_cache_wback_inv(addr, size); 201 dma_cache_wback_inv(addr, size);
202 addr = __pa(addr)&RAM_OFFSET_MASK;; 202 addr = __pa(addr)&RAM_OFFSET_MASK;
203 if(dev == NULL) 203 if(dev == NULL)
204 addr += CRIME_HI_MEM_BASE; 204 addr += CRIME_HI_MEM_BASE;
205 205
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 73e5e52781d8..2854ac4c9be1 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -88,12 +88,9 @@ static inline int find_level(cpuid_t *cpunum, int irq)
88{ 88{
89 int cpu, i; 89 int cpu, i;
90 90
91 for (cpu = 0; cpu <= NR_CPUS; cpu++) { 91 for_each_online_cpu(cpu) {
92 struct slice_data *si = cpu_data[cpu].data; 92 struct slice_data *si = cpu_data[cpu].data;
93 93
94 if (!cpu_online(cpu))
95 continue;
96
97 for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++) 94 for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++)
98 if (si->level_to_irq[i] == irq) { 95 if (si->level_to_irq[i] == irq) {
99 *cpunum = cpu; 96 *cpunum = cpu;
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index e8dea4177113..0b485ef4be89 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -54,11 +54,6 @@
54#include <asm/uaccess.h> 54#include <asm/uaccess.h>
55#include <asm/unwind.h> 55#include <asm/unwind.h>
56 56
57void default_idle(void)
58{
59 barrier();
60}
61
62/* 57/*
63 * The idle thread. There's no useful work to be 58 * The idle thread. There's no useful work to be
64 * done, so just try to conserve power and have a 59 * done, so just try to conserve power and have a
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 25564b7ca6bb..d6ac1c60a471 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -298,8 +298,8 @@ send_IPI_allbutself(enum ipi_message_type op)
298{ 298{
299 int i; 299 int i;
300 300
301 for (i = 0; i < NR_CPUS; i++) { 301 for_each_online_cpu(i) {
302 if (cpu_online(i) && i != smp_processor_id()) 302 if (i != smp_processor_id())
303 send_IPI_single(i, op); 303 send_IPI_single(i, op);
304 } 304 }
305} 305}
@@ -643,14 +643,13 @@ int sys_cpus(int argc, char **argv)
643 if ( argc == 1 ){ 643 if ( argc == 1 ){
644 644
645#ifdef DUMP_MORE_STATE 645#ifdef DUMP_MORE_STATE
646 for(i=0; i<NR_CPUS; i++) { 646 for_each_online_cpu(i) {
647 int cpus_per_line = 4; 647 int cpus_per_line = 4;
648 if(cpu_online(i)) { 648
649 if (j++ % cpus_per_line) 649 if (j++ % cpus_per_line)
650 printk(" %3d",i); 650 printk(" %3d",i);
651 else 651 else
652 printk("\n %3d",i); 652 printk("\n %3d",i);
653 }
654 } 653 }
655 printk("\n"); 654 printk("\n");
656#else 655#else
@@ -659,9 +658,7 @@ int sys_cpus(int argc, char **argv)
659 } else if((argc==2) && !(strcmp(argv[1],"-l"))) { 658 } else if((argc==2) && !(strcmp(argv[1],"-l"))) {
660 printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n"); 659 printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n");
661#ifdef DUMP_MORE_STATE 660#ifdef DUMP_MORE_STATE
662 for(i=0;i<NR_CPUS;i++) { 661 for_each_online_cpu(i) {
663 if (!cpu_online(i))
664 continue;
665 if (cpu_data[i].cpuid != NO_PROC_ID) { 662 if (cpu_data[i].cpuid != NO_PROC_ID) {
666 switch(cpu_data[i].state) { 663 switch(cpu_data[i].state) {
667 case STATE_RENDEZVOUS: 664 case STATE_RENDEZVOUS:
@@ -695,9 +692,7 @@ int sys_cpus(int argc, char **argv)
695 } else if ((argc==2) && !(strcmp(argv[1],"-s"))) { 692 } else if ((argc==2) && !(strcmp(argv[1],"-s"))) {
696#ifdef DUMP_MORE_STATE 693#ifdef DUMP_MORE_STATE
697 printk("\nCPUSTATE CPUID\n"); 694 printk("\nCPUSTATE CPUID\n");
698 for (i=0;i<NR_CPUS;i++) { 695 for_each_online_cpu(i) {
699 if (!cpu_online(i))
700 continue;
701 if (cpu_data[i].cpuid != NO_PROC_ID) { 696 if (cpu_data[i].cpuid != NO_PROC_ID) {
702 switch(cpu_data[i].state) { 697 switch(cpu_data[i].state) {
703 case STATE_RENDEZVOUS: 698 case STATE_RENDEZVOUS:
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index a3fc7a23158f..829e017b8a54 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -150,7 +150,7 @@ CPPFLAGS_vmlinux.lds := -Upowerpc
150 150
151BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm uImage vmlinux.bin 151BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm uImage vmlinux.bin
152 152
153.PHONY: $(BOOT_TARGETS) 153PHONY += $(BOOT_TARGETS)
154 154
155boot := arch/$(ARCH)/boot 155boot := arch/$(ARCH)/boot
156 156
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 24dc8117b822..771a59cbd213 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -135,9 +135,8 @@ skip:
135#ifdef CONFIG_TAU_INT 135#ifdef CONFIG_TAU_INT
136 if (tau_initialized){ 136 if (tau_initialized){
137 seq_puts(p, "TAU: "); 137 seq_puts(p, "TAU: ");
138 for (j = 0; j < NR_CPUS; j++) 138 for_each_online_cpu(j)
139 if (cpu_online(j)) 139 seq_printf(p, "%10u ", tau_interrupts(j));
140 seq_printf(p, "%10u ", tau_interrupts(j));
141 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); 140 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
142 } 141 }
143#endif 142#endif
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 258039fb3016..cb1fe5878e8b 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -81,9 +81,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
81 81
82void __kprobes arch_remove_kprobe(struct kprobe *p) 82void __kprobes arch_remove_kprobe(struct kprobe *p)
83{ 83{
84 down(&kprobe_mutex); 84 mutex_lock(&kprobe_mutex);
85 free_insn_slot(p->ainsn.insn); 85 free_insn_slot(p->ainsn.insn);
86 up(&kprobe_mutex); 86 mutex_unlock(&kprobe_mutex);
87} 87}
88 88
89static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 89static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index be12041c0fc5..c1d62bf11f29 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -162,9 +162,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
162#if defined(CONFIG_SMP) && defined(CONFIG_PPC32) 162#if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
163 unsigned long bogosum = 0; 163 unsigned long bogosum = 0;
164 int i; 164 int i;
165 for (i = 0; i < NR_CPUS; ++i) 165 for_each_online_cpu(i)
166 if (cpu_online(i)) 166 bogosum += loops_per_jiffy;
167 bogosum += loops_per_jiffy;
168 seq_printf(m, "total bogomips\t: %lu.%02lu\n", 167 seq_printf(m, "total bogomips\t: %lu.%02lu\n",
169 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); 168 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
170#endif /* CONFIG_SMP && CONFIG_PPC32 */ 169#endif /* CONFIG_SMP && CONFIG_PPC32 */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index db72a92943bf..dc2770df25b3 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -272,9 +272,8 @@ int __init ppc_init(void)
272 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); 272 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
273 273
274 /* register CPU devices */ 274 /* register CPU devices */
275 for (i = 0; i < NR_CPUS; i++) 275 for_each_cpu(i)
276 if (cpu_possible(i)) 276 register_cpu(&cpu_devices[i], i, NULL);
277 register_cpu(&cpu_devices[i], i, NULL);
278 277
279 /* call platform init */ 278 /* call platform init */
280 if (ppc_md.init != NULL) { 279 if (ppc_md.init != NULL) {
diff --git a/arch/powerpc/lib/strcase.c b/arch/powerpc/lib/strcase.c
index 36b521091bbc..f8ec1eba3fdd 100644
--- a/arch/powerpc/lib/strcase.c
+++ b/arch/powerpc/lib/strcase.c
@@ -1,4 +1,6 @@
1#include <linux/types.h>
1#include <linux/ctype.h> 2#include <linux/ctype.h>
3#include <linux/string.h>
2 4
3int strcasecmp(const char *s1, const char *s2) 5int strcasecmp(const char *s1, const char *s2)
4{ 6{
@@ -11,7 +13,7 @@ int strcasecmp(const char *s1, const char *s2)
11 return c1 - c2; 13 return c1 - c2;
12} 14}
13 15
14int strncasecmp(const char *s1, const char *s2, int n) 16int strncasecmp(const char *s1, const char *s2, size_t n)
15{ 17{
16 int c1, c2; 18 int c1, c2;
17 19
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 6d64a9bf3474..1065d87fc279 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -191,9 +191,7 @@ static void smp_psurge_message_pass(int target, int msg)
191 if (num_online_cpus() < 2) 191 if (num_online_cpus() < 2)
192 return; 192 return;
193 193
194 for (i = 0; i < NR_CPUS; i++) { 194 for_each_online_cpu(i) {
195 if (!cpu_online(i))
196 continue;
197 if (target == MSG_ALL 195 if (target == MSG_ALL
198 || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) 196 || (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
199 || target == i) { 197 || target == i) {
diff --git a/arch/ppc/8xx_io/cs4218_tdm.c b/arch/ppc/8xx_io/cs4218_tdm.c
index 49eb2a7e65c0..a892356d5c3b 100644
--- a/arch/ppc/8xx_io/cs4218_tdm.c
+++ b/arch/ppc/8xx_io/cs4218_tdm.c
@@ -126,11 +126,11 @@ static int numReadBufs = 4, readbufSize = 32;
126*/ 126*/
127static volatile cbd_t *rx_base, *rx_cur, *tx_base, *tx_cur; 127static volatile cbd_t *rx_base, *rx_cur, *tx_base, *tx_cur;
128 128
129MODULE_PARM(catchRadius, "i"); 129module_param(catchRadius, int, 0);
130MODULE_PARM(numBufs, "i"); 130module_param(numBufs, int, 0);
131MODULE_PARM(bufSize, "i"); 131module_param(bufSize, int, 0);
132MODULE_PARM(numreadBufs, "i"); 132module_param(numreadBufs, int, 0);
133MODULE_PARM(readbufSize, "i"); 133module_param(readbufSize, int, 0);
134 134
135#define arraysize(x) (sizeof(x)/sizeof(*(x))) 135#define arraysize(x) (sizeof(x)/sizeof(*(x)))
136#define le2be16(x) (((x)<<8 & 0xff00) | ((x)>>8 & 0x00ff)) 136#define le2be16(x) (((x)<<8 & 0xff00) | ((x)>>8 & 0x00ff))
diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile
index 98e940beeb3b..9fbdf54ba2be 100644
--- a/arch/ppc/Makefile
+++ b/arch/ppc/Makefile
@@ -82,7 +82,7 @@ drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
82 82
83BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm 83BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm
84 84
85.PHONY: $(BOOT_TARGETS) 85PHONY += $(BOOT_TARGETS)
86 86
87all: uImage zImage 87all: uImage zImage
88 88
diff --git a/arch/ppc/boot/Makefile b/arch/ppc/boot/Makefile
index f565699a9fe0..84eec0bef93c 100644
--- a/arch/ppc/boot/Makefile
+++ b/arch/ppc/boot/Makefile
@@ -1,3 +1,9 @@
1#
2# arch/ppc/boot/Makefile
3#
4# This file is included by the global makefile so that you can add your own
5# architecture-specific flags and dependencies.
6#
1# This file is subject to the terms and conditions of the GNU General Public 7# This file is subject to the terms and conditions of the GNU General Public
2# License. See the file "COPYING" in the main directory of this archive 8# License. See the file "COPYING" in the main directory of this archive
3# for more details. 9# for more details.
@@ -22,7 +28,7 @@ subdir- += simple openfirmware
22 28
23hostprogs-y := $(addprefix utils/, addnote mknote hack-coff mkprep mkbugboot mktree) 29hostprogs-y := $(addprefix utils/, addnote mknote hack-coff mkprep mkbugboot mktree)
24 30
25.PHONY: $(BOOT_TARGETS) $(bootdir-y) 31PHONY += $(BOOT_TARGETS) $(bootdir-y)
26 32
27$(BOOT_TARGETS): $(bootdir-y) 33$(BOOT_TARGETS): $(bootdir-y)
28 34
diff --git a/arch/ppc/boot/openfirmware/Makefile b/arch/ppc/boot/openfirmware/Makefile
index 2a411ec2e650..66b739743759 100644
--- a/arch/ppc/boot/openfirmware/Makefile
+++ b/arch/ppc/boot/openfirmware/Makefile
@@ -1,5 +1,8 @@
1# Makefile for making bootable images on various OpenFirmware machines. 1# Makefile for making bootable images on various OpenFirmware machines.
2# 2#
3# This file is included by the global makefile so that you can add your own
4# architecture-specific flags and dependencies.
5#
3# Paul Mackerras January 1997 6# Paul Mackerras January 1997
4# XCOFF bootable images for PowerMacs 7# XCOFF bootable images for PowerMacs
5# Geert Uytterhoeven September 1997 8# Geert Uytterhoeven September 1997
@@ -86,7 +89,7 @@ $(images)/zImage.chrp-rs6k $(images)/zImage.initrd.chrp-rs6k: \
86 89
87# The targets used on the make command-line 90# The targets used on the make command-line
88 91
89.PHONY: zImage zImage.initrd 92PHONY += zImage zImage.initrd
90zImage: $(images)/zImage.chrp \ 93zImage: $(images)/zImage.chrp \
91 $(images)/zImage.chrp-rs6k 94 $(images)/zImage.chrp-rs6k
92 @echo ' kernel: $@ is ready ($<)' 95 @echo ' kernel: $@ is ready ($<)'
@@ -96,7 +99,7 @@ zImage.initrd: $(images)/zImage.initrd.chrp \
96 99
97TFTPIMAGE := /tftpboot/zImage 100TFTPIMAGE := /tftpboot/zImage
98 101
99.PHONY: znetboot znetboot.initrd 102PHONY += znetboot znetboot.initrd
100znetboot: $(images)/zImage.chrp 103znetboot: $(images)/zImage.chrp
101 cp $(images)/zImage.chrp $(TFTPIMAGE).chrp$(END) 104 cp $(images)/zImage.chrp $(TFTPIMAGE).chrp$(END)
102 @echo ' kernel: $@ is ready ($<)' 105 @echo ' kernel: $@ is ready ($<)'
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index c08ab432e958..53e9deacee82 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -168,9 +168,8 @@ int show_cpuinfo(struct seq_file *m, void *v)
168 /* Show summary information */ 168 /* Show summary information */
169#ifdef CONFIG_SMP 169#ifdef CONFIG_SMP
170 unsigned long bogosum = 0; 170 unsigned long bogosum = 0;
171 for (i = 0; i < NR_CPUS; ++i) 171 for_each_online_cpu(i)
172 if (cpu_online(i)) 172 bogosum += cpu_data[i].loops_per_jiffy;
173 bogosum += cpu_data[i].loops_per_jiffy;
174 seq_printf(m, "total bogomips\t: %lu.%02lu\n", 173 seq_printf(m, "total bogomips\t: %lu.%02lu\n",
175 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); 174 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
176#endif /* CONFIG_SMP */ 175#endif /* CONFIG_SMP */
@@ -712,9 +711,8 @@ int __init ppc_init(void)
712 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); 711 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
713 712
714 /* register CPU devices */ 713 /* register CPU devices */
715 for (i = 0; i < NR_CPUS; i++) 714 for_each_cpu(i)
716 if (cpu_possible(i)) 715 register_cpu(&cpu_devices[i], i, NULL);
717 register_cpu(&cpu_devices[i], i, NULL);
718 716
719 /* call platform init */ 717 /* call platform init */
720 if (ppc_md.init != NULL) { 718 if (ppc_md.init != NULL) {
diff --git a/arch/ppc/syslib/ppc85xx_setup.c b/arch/ppc/syslib/ppc85xx_setup.c
index e70b34ee6275..79b7089d7500 100644
--- a/arch/ppc/syslib/ppc85xx_setup.c
+++ b/arch/ppc/syslib/ppc85xx_setup.c
@@ -235,7 +235,7 @@ mpc85xx_setup_pci2(struct pci_controller *hose)
235 (__ilog2(MPC85XX_PCI2_UPPER_MEM - MPC85XX_PCI2_LOWER_MEM + 1) - 1); 235 (__ilog2(MPC85XX_PCI2_UPPER_MEM - MPC85XX_PCI2_LOWER_MEM + 1) - 1);
236 236
237 /* Setup outbound IO windows @ MPC85XX_PCI2_IO_BASE */ 237 /* Setup outbound IO windows @ MPC85XX_PCI2_IO_BASE */
238 pci->potar2 = (MPC85XX_PCI2_LOWER_IO >> 12) & 0x000fffff;; 238 pci->potar2 = (MPC85XX_PCI2_LOWER_IO >> 12) & 0x000fffff;
239 pci->potear2 = 0x00000000; 239 pci->potear2 = 0x00000000;
240 pci->powbar2 = (MPC85XX_PCI2_IO_BASE >> 12) & 0x000fffff; 240 pci->powbar2 = (MPC85XX_PCI2_IO_BASE >> 12) & 0x000fffff;
241 /* Enable, IO R/W */ 241 /* Enable, IO R/W */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b7ca5bf9acfc..2b7364ed23bc 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -460,6 +460,8 @@ config PCMCIA
460 460
461source "drivers/base/Kconfig" 461source "drivers/base/Kconfig"
462 462
463source "drivers/connector/Kconfig"
464
463source "drivers/scsi/Kconfig" 465source "drivers/scsi/Kconfig"
464 466
465source "drivers/s390/Kconfig" 467source "drivers/s390/Kconfig"
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index d06a8d71c71d..54d35c130907 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -531,12 +531,11 @@ int appldata_register_ops(struct appldata_ops *ops)
531 P_ERROR("ctl_nr %i already in use!\n", ops->ctl_nr); 531 P_ERROR("ctl_nr %i already in use!\n", ops->ctl_nr);
532 return -EBUSY; 532 return -EBUSY;
533 } 533 }
534 ops->ctl_table = kmalloc(4*sizeof(struct ctl_table), GFP_KERNEL); 534 ops->ctl_table = kzalloc(4*sizeof(struct ctl_table), GFP_KERNEL);
535 if (ops->ctl_table == NULL) { 535 if (ops->ctl_table == NULL) {
536 P_ERROR("Not enough memory for %s ctl_table!\n", ops->name); 536 P_ERROR("Not enough memory for %s ctl_table!\n", ops->name);
537 return -ENOMEM; 537 return -ENOMEM;
538 } 538 }
539 memset(ops->ctl_table, 0, 4*sizeof(struct ctl_table));
540 539
541 spin_lock(&appldata_ops_lock); 540 spin_lock(&appldata_ops_lock);
542 list_for_each(lh, &appldata_ops_list) { 541 list_for_each(lh, &appldata_ops_list) {
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 896d39d0e4ce..06a3fbc12536 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -204,16 +204,13 @@ debug_areas_alloc(int pages_per_area, int nr_areas)
204 goto fail_malloc_areas2; 204 goto fail_malloc_areas2;
205 } 205 }
206 for(j = 0; j < pages_per_area; j++) { 206 for(j = 0; j < pages_per_area; j++) {
207 areas[i][j] = (debug_entry_t*)kmalloc(PAGE_SIZE, 207 areas[i][j] = kzalloc(PAGE_SIZE, GFP_KERNEL);
208 GFP_KERNEL);
209 if(!areas[i][j]) { 208 if(!areas[i][j]) {
210 for(j--; j >=0 ; j--) { 209 for(j--; j >=0 ; j--) {
211 kfree(areas[i][j]); 210 kfree(areas[i][j]);
212 } 211 }
213 kfree(areas[i]); 212 kfree(areas[i]);
214 goto fail_malloc_areas2; 213 goto fail_malloc_areas2;
215 } else {
216 memset(areas[i][j],0,PAGE_SIZE);
217 } 214 }
218 } 215 }
219 } 216 }
@@ -249,14 +246,12 @@ debug_info_alloc(char *name, int pages_per_area, int nr_areas, int buf_size,
249 rc = (debug_info_t*) kmalloc(sizeof(debug_info_t), GFP_KERNEL); 246 rc = (debug_info_t*) kmalloc(sizeof(debug_info_t), GFP_KERNEL);
250 if(!rc) 247 if(!rc)
251 goto fail_malloc_rc; 248 goto fail_malloc_rc;
252 rc->active_entries = (int*)kmalloc(nr_areas * sizeof(int), GFP_KERNEL); 249 rc->active_entries = kcalloc(nr_areas, sizeof(int), GFP_KERNEL);
253 if(!rc->active_entries) 250 if(!rc->active_entries)
254 goto fail_malloc_active_entries; 251 goto fail_malloc_active_entries;
255 memset(rc->active_entries, 0, nr_areas * sizeof(int)); 252 rc->active_pages = kcalloc(nr_areas, sizeof(int), GFP_KERNEL);
256 rc->active_pages = (int*)kmalloc(nr_areas * sizeof(int), GFP_KERNEL);
257 if(!rc->active_pages) 253 if(!rc->active_pages)
258 goto fail_malloc_active_pages; 254 goto fail_malloc_active_pages;
259 memset(rc->active_pages, 0, nr_areas * sizeof(int));
260 if((mode == ALL_AREAS) && (pages_per_area != 0)){ 255 if((mode == ALL_AREAS) && (pages_per_area != 0)){
261 rc->areas = debug_areas_alloc(pages_per_area, nr_areas); 256 rc->areas = debug_areas_alloc(pages_per_area, nr_areas);
262 if(!rc->areas) 257 if(!rc->areas)
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index da6fbae8df91..99182a415fe7 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -103,7 +103,7 @@ extern void s390_handle_mcck(void);
103/* 103/*
104 * The idle loop on a S390... 104 * The idle loop on a S390...
105 */ 105 */
106void default_idle(void) 106static void default_idle(void)
107{ 107{
108 int cpu, rc; 108 int cpu, rc;
109 109
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 24f62f16c0e5..0a04e4a564b2 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -78,8 +78,6 @@ extern int _text,_etext, _edata, _end;
78 78
79#include <asm/setup.h> 79#include <asm/setup.h>
80 80
81static char command_line[COMMAND_LINE_SIZE] = { 0, };
82
83static struct resource code_resource = { 81static struct resource code_resource = {
84 .name = "Kernel code", 82 .name = "Kernel code",
85 .start = (unsigned long) &_text, 83 .start = (unsigned long) &_text,
@@ -335,63 +333,38 @@ add_memory_hole(unsigned long start, unsigned long end)
335 } 333 }
336} 334}
337 335
338static void __init 336static int __init early_parse_mem(char *p)
339parse_cmdline_early(char **cmdline_p) 337{
338 memory_end = memparse(p, &p);
339 return 0;
340}
341early_param("mem", early_parse_mem);
342
343/*
344 * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
345 */
346static int __init early_parse_ipldelay(char *p)
340{ 347{
341 char c = ' ', cn, *to = command_line, *from = COMMAND_LINE;
342 unsigned long delay = 0; 348 unsigned long delay = 0;
343 349
344 /* Save unparsed command line copy for /proc/cmdline */ 350 delay = simple_strtoul(p, &p, 0);
345 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
346 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
347 351
348 for (;;) { 352 switch (*p) {
349 /* 353 case 's':
350 * "mem=XXX[kKmM]" sets memsize 354 case 'S':
351 */ 355 delay *= 1000000;
352 if (c == ' ' && strncmp(from, "mem=", 4) == 0) { 356 break;
353 memory_end = simple_strtoul(from+4, &from, 0); 357 case 'm':
354 if ( *from == 'K' || *from == 'k' ) { 358 case 'M':
355 memory_end = memory_end << 10; 359 delay *= 60 * 1000000;
356 from++;
357 } else if ( *from == 'M' || *from == 'm' ) {
358 memory_end = memory_end << 20;
359 from++;
360 }
361 }
362 /*
363 * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
364 */
365 if (c == ' ' && strncmp(from, "ipldelay=", 9) == 0) {
366 delay = simple_strtoul(from+9, &from, 0);
367 if (*from == 's' || *from == 'S') {
368 delay = delay*1000000;
369 from++;
370 } else if (*from == 'm' || *from == 'M') {
371 delay = delay*60*1000000;
372 from++;
373 }
374 /* now wait for the requested amount of time */
375 udelay(delay);
376 }
377 cn = *(from++);
378 if (!cn)
379 break;
380 if (cn == '\n')
381 cn = ' '; /* replace newlines with space */
382 if (cn == 0x0d)
383 cn = ' '; /* replace 0x0d with space */
384 if (cn == ' ' && c == ' ')
385 continue; /* remove additional spaces */
386 c = cn;
387 if (to - command_line >= COMMAND_LINE_SIZE)
388 break;
389 *(to++) = c;
390 } 360 }
391 if (c == ' ' && to > command_line) to--; 361
392 *to = '\0'; 362 /* now wait for the requested amount of time */
393 *cmdline_p = command_line; 363 udelay(delay);
364
365 return 0;
394} 366}
367early_param("ipldelay", early_parse_ipldelay);
395 368
396static void __init 369static void __init
397setup_lowcore(void) 370setup_lowcore(void)
@@ -580,9 +553,26 @@ setup_arch(char **cmdline_p)
580 "We are running native (64 bit mode)\n"); 553 "We are running native (64 bit mode)\n");
581#endif /* CONFIG_64BIT */ 554#endif /* CONFIG_64BIT */
582 555
556 /* Save unparsed command line copy for /proc/cmdline */
557 strlcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
558
559 *cmdline_p = COMMAND_LINE;
560 *(*cmdline_p + COMMAND_LINE_SIZE - 1) = '\0';
561
583 ROOT_DEV = Root_RAM0; 562 ROOT_DEV = Root_RAM0;
563
564 init_mm.start_code = PAGE_OFFSET;
565 init_mm.end_code = (unsigned long) &_etext;
566 init_mm.end_data = (unsigned long) &_edata;
567 init_mm.brk = (unsigned long) &_end;
568
569 memory_end = memory_size;
570
571 parse_early_param();
572
584#ifndef CONFIG_64BIT 573#ifndef CONFIG_64BIT
585 memory_end = memory_size & ~0x400000UL; /* align memory end to 4MB */ 574 memory_end &= ~0x400000UL;
575
586 /* 576 /*
587 * We need some free virtual space to be able to do vmalloc. 577 * We need some free virtual space to be able to do vmalloc.
588 * On a machine with 2GB memory we make sure that we have at 578 * On a machine with 2GB memory we make sure that we have at
@@ -591,17 +581,9 @@ setup_arch(char **cmdline_p)
591 if (memory_end > 1920*1024*1024) 581 if (memory_end > 1920*1024*1024)
592 memory_end = 1920*1024*1024; 582 memory_end = 1920*1024*1024;
593#else /* CONFIG_64BIT */ 583#else /* CONFIG_64BIT */
594 memory_end = memory_size & ~0x200000UL; /* detected in head.s */ 584 memory_end &= ~0x200000UL;
595#endif /* CONFIG_64BIT */ 585#endif /* CONFIG_64BIT */
596 586
597 init_mm.start_code = PAGE_OFFSET;
598 init_mm.end_code = (unsigned long) &_etext;
599 init_mm.end_data = (unsigned long) &_edata;
600 init_mm.brk = (unsigned long) &_end;
601
602 parse_cmdline_early(cmdline_p);
603 parse_early_param();
604
605 setup_memory(); 587 setup_memory();
606 setup_resources(); 588 setup_resources();
607 setup_lowcore(); 589 setup_lowcore();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 7dbe00c76c6b..2b8841f85534 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -665,7 +665,9 @@ __cpu_up(unsigned int cpu)
665 cpu_lowcore->current_task = (unsigned long) idle; 665 cpu_lowcore->current_task = (unsigned long) idle;
666 cpu_lowcore->cpu_data.cpu_nr = cpu; 666 cpu_lowcore->cpu_data.cpu_nr = cpu;
667 eieio(); 667 eieio();
668 signal_processor(cpu,sigp_restart); 668
669 while (signal_processor(cpu,sigp_restart) == sigp_busy)
670 udelay(10);
669 671
670 while (!cpu_online(cpu)) 672 while (!cpu_online(cpu))
671 cpu_relax(); 673 cpu_relax();
@@ -799,9 +801,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
799 */ 801 */
800 print_cpu_info(&S390_lowcore.cpu_data); 802 print_cpu_info(&S390_lowcore.cpu_data);
801 803
802 for(i = 0; i < NR_CPUS; i++) { 804 for_each_cpu(i) {
803 if (!cpu_possible(i))
804 continue;
805 lowcore_ptr[i] = (struct _lowcore *) 805 lowcore_ptr[i] = (struct _lowcore *)
806 __get_free_pages(GFP_KERNEL|GFP_DMA, 806 __get_free_pages(GFP_KERNEL|GFP_DMA,
807 sizeof(void*) == 8 ? 1 : 0); 807 sizeof(void*) == 8 ? 1 : 0);
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index b075ab499d05..51596f429235 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -339,19 +339,19 @@ static struct ctl_table cmm_table[] = {
339 { 339 {
340 .ctl_name = VM_CMM_PAGES, 340 .ctl_name = VM_CMM_PAGES,
341 .procname = "cmm_pages", 341 .procname = "cmm_pages",
342 .mode = 0600, 342 .mode = 0644,
343 .proc_handler = &cmm_pages_handler, 343 .proc_handler = &cmm_pages_handler,
344 }, 344 },
345 { 345 {
346 .ctl_name = VM_CMM_TIMED_PAGES, 346 .ctl_name = VM_CMM_TIMED_PAGES,
347 .procname = "cmm_timed_pages", 347 .procname = "cmm_timed_pages",
348 .mode = 0600, 348 .mode = 0644,
349 .proc_handler = &cmm_pages_handler, 349 .proc_handler = &cmm_pages_handler,
350 }, 350 },
351 { 351 {
352 .ctl_name = VM_CMM_TIMEOUT, 352 .ctl_name = VM_CMM_TIMEOUT,
353 .procname = "cmm_timeout", 353 .procname = "cmm_timeout",
354 .mode = 0600, 354 .mode = 0644,
355 .proc_handler = &cmm_timeout_handler, 355 .proc_handler = &cmm_timeout_handler,
356 }, 356 },
357 { .ctl_name = 0 } 357 { .ctl_name = 0 }
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 08c9515c4806..c72e17a96eed 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -172,7 +172,7 @@ include/asm-sh/.mach: $(wildcard include/config/sh/*.h) include/config/MARKER
172 172
173archprepare: maketools include/asm-sh/.cpu include/asm-sh/.mach 173archprepare: maketools include/asm-sh/.cpu include/asm-sh/.mach
174 174
175.PHONY: maketools FORCE 175PHONY += maketools FORCE
176maketools: include/linux/version.h FORCE 176maketools: include/linux/version.h FORCE
177 $(Q)$(MAKE) $(build)=arch/sh/tools include/asm-sh/machtypes.h 177 $(Q)$(MAKE) $(build)=arch/sh/tools include/asm-sh/machtypes.h
178 178
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 6883c00728cb..b56e79632f24 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -35,9 +35,8 @@ int show_interrupts(struct seq_file *p, void *v)
35 35
36 if (i == 0) { 36 if (i == 0) {
37 seq_puts(p, " "); 37 seq_puts(p, " ");
38 for (j=0; j<NR_CPUS; j++) 38 for_each_online_cpu(j)
39 if (cpu_online(j)) 39 seq_printf(p, "CPU%d ",j);
40 seq_printf(p, "CPU%d ",j);
41 seq_putc(p, '\n'); 40 seq_putc(p, '\n');
42 } 41 }
43 42
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 9fd1723e6219..22dc9c21201d 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -19,7 +19,6 @@
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/pm.h> 20#include <linux/pm.h>
21#include <linux/ptrace.h> 21#include <linux/ptrace.h>
22#include <linux/platform.h>
23#include <linux/kallsyms.h> 22#include <linux/kallsyms.h>
24#include <linux/kexec.h> 23#include <linux/kexec.h>
25 24
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index a067a34e0b64..c0e79843f580 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -404,9 +404,8 @@ static int __init topology_init(void)
404{ 404{
405 int cpu_id; 405 int cpu_id;
406 406
407 for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++) 407 for_each_cpu(cpu_id)
408 if (cpu_possible(cpu_id)) 408 register_cpu(&cpu[cpu_id], cpu_id, NULL);
409 register_cpu(&cpu[cpu_id], cpu_id, NULL);
410 409
411 return 0; 410 return 0;
412} 411}
diff --git a/arch/sh64/kernel/irq.c b/arch/sh64/kernel/irq.c
index 9fc2b71dbd84..d69879c0e063 100644
--- a/arch/sh64/kernel/irq.c
+++ b/arch/sh64/kernel/irq.c
@@ -53,9 +53,8 @@ int show_interrupts(struct seq_file *p, void *v)
53 53
54 if (i == 0) { 54 if (i == 0) {
55 seq_puts(p, " "); 55 seq_puts(p, " ");
56 for (j=0; j<NR_CPUS; j++) 56 for_each_online_cpu(j)
57 if (cpu_online(j)) 57 seq_printf(p, "CPU%d ",j);
58 seq_printf(p, "CPU%d ",j);
59 seq_putc(p, '\n'); 58 seq_putc(p, '\n');
60 } 59 }
61 60
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index f944b58cdfe7..7c58fc1a39c4 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -23,7 +23,6 @@ menu "General machine setup"
23 23
24config SMP 24config SMP
25 bool "Symmetric multi-processing support (does not work on sun4/sun4c)" 25 bool "Symmetric multi-processing support (does not work on sun4/sun4c)"
26 depends on BROKEN
27 ---help--- 26 ---help---
28 This enables support for systems with more than one CPU. If you have 27 This enables support for systems with more than one CPU. If you have
29 a system with only one CPU, say N. If you have a system with more 28 a system with only one CPU, say N. If you have a system with more
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c
index 410b9a72aba9..aac8af5aae51 100644
--- a/arch/sparc/kernel/irq.c
+++ b/arch/sparc/kernel/irq.c
@@ -154,9 +154,11 @@ void (*sparc_init_timers)(irqreturn_t (*)(int, void *,struct pt_regs *)) =
154struct irqaction static_irqaction[MAX_STATIC_ALLOC]; 154struct irqaction static_irqaction[MAX_STATIC_ALLOC];
155int static_irq_count; 155int static_irq_count;
156 156
157struct irqaction *irq_action[NR_IRQS] = { 157struct {
158 [0 ... (NR_IRQS-1)] = NULL 158 struct irqaction *action;
159}; 159 int flags;
160} sparc_irq[NR_IRQS];
161#define SPARC_IRQ_INPROGRESS 1
160 162
161/* Used to protect the IRQ action lists */ 163/* Used to protect the IRQ action lists */
162DEFINE_SPINLOCK(irq_action_lock); 164DEFINE_SPINLOCK(irq_action_lock);
@@ -177,17 +179,16 @@ int show_interrupts(struct seq_file *p, void *v)
177 } 179 }
178 spin_lock_irqsave(&irq_action_lock, flags); 180 spin_lock_irqsave(&irq_action_lock, flags);
179 if (i < NR_IRQS) { 181 if (i < NR_IRQS) {
180 action = *(i + irq_action); 182 action = sparc_irq[i].action;
181 if (!action) 183 if (!action)
182 goto out_unlock; 184 goto out_unlock;
183 seq_printf(p, "%3d: ", i); 185 seq_printf(p, "%3d: ", i);
184#ifndef CONFIG_SMP 186#ifndef CONFIG_SMP
185 seq_printf(p, "%10u ", kstat_irqs(i)); 187 seq_printf(p, "%10u ", kstat_irqs(i));
186#else 188#else
187 for (j = 0; j < NR_CPUS; j++) { 189 for_each_online_cpu(j) {
188 if (cpu_online(j)) 190 seq_printf(p, "%10u ",
189 seq_printf(p, "%10u ", 191 kstat_cpu(j).irqs[i]);
190 kstat_cpu(cpu_logical_map(j)).irqs[i]);
191 } 192 }
192#endif 193#endif
193 seq_printf(p, " %c %s", 194 seq_printf(p, " %c %s",
@@ -208,7 +209,7 @@ out_unlock:
208void free_irq(unsigned int irq, void *dev_id) 209void free_irq(unsigned int irq, void *dev_id)
209{ 210{
210 struct irqaction * action; 211 struct irqaction * action;
211 struct irqaction * tmp = NULL; 212 struct irqaction **actionp;
212 unsigned long flags; 213 unsigned long flags;
213 unsigned int cpu_irq; 214 unsigned int cpu_irq;
214 215
@@ -226,7 +227,8 @@ void free_irq(unsigned int irq, void *dev_id)
226 227
227 spin_lock_irqsave(&irq_action_lock, flags); 228 spin_lock_irqsave(&irq_action_lock, flags);
228 229
229 action = *(cpu_irq + irq_action); 230 actionp = &sparc_irq[cpu_irq].action;
231 action = *actionp;
230 232
231 if (!action->handler) { 233 if (!action->handler) {
232 printk("Trying to free free IRQ%d\n",irq); 234 printk("Trying to free free IRQ%d\n",irq);
@@ -236,7 +238,7 @@ void free_irq(unsigned int irq, void *dev_id)
236 for (; action; action = action->next) { 238 for (; action; action = action->next) {
237 if (action->dev_id == dev_id) 239 if (action->dev_id == dev_id)
238 break; 240 break;
239 tmp = action; 241 actionp = &action->next;
240 } 242 }
241 if (!action) { 243 if (!action) {
242 printk("Trying to free free shared IRQ%d\n",irq); 244 printk("Trying to free free shared IRQ%d\n",irq);
@@ -255,11 +257,8 @@ void free_irq(unsigned int irq, void *dev_id)
255 irq, action->name); 257 irq, action->name);
256 goto out_unlock; 258 goto out_unlock;
257 } 259 }
258 260
259 if (action && tmp) 261 *actionp = action->next;
260 tmp->next = action->next;
261 else
262 *(cpu_irq + irq_action) = action->next;
263 262
264 spin_unlock_irqrestore(&irq_action_lock, flags); 263 spin_unlock_irqrestore(&irq_action_lock, flags);
265 264
@@ -269,7 +268,7 @@ void free_irq(unsigned int irq, void *dev_id)
269 268
270 kfree(action); 269 kfree(action);
271 270
272 if (!(*(cpu_irq + irq_action))) 271 if (!sparc_irq[cpu_irq].action)
273 disable_irq(irq); 272 disable_irq(irq);
274 273
275out_unlock: 274out_unlock:
@@ -288,8 +287,11 @@ EXPORT_SYMBOL(free_irq);
288#ifdef CONFIG_SMP 287#ifdef CONFIG_SMP
289void synchronize_irq(unsigned int irq) 288void synchronize_irq(unsigned int irq)
290{ 289{
291 printk("synchronize_irq says: implement me!\n"); 290 unsigned int cpu_irq;
292 BUG(); 291
292 cpu_irq = irq & (NR_IRQS - 1);
293 while (sparc_irq[cpu_irq].flags & SPARC_IRQ_INPROGRESS)
294 cpu_relax();
293} 295}
294#endif /* SMP */ 296#endif /* SMP */
295 297
@@ -300,7 +302,7 @@ void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
300 unsigned int cpu_irq; 302 unsigned int cpu_irq;
301 303
302 cpu_irq = irq & (NR_IRQS - 1); 304 cpu_irq = irq & (NR_IRQS - 1);
303 action = *(cpu_irq + irq_action); 305 action = sparc_irq[cpu_irq].action;
304 306
305 printk("IO device interrupt, irq = %d\n", irq); 307 printk("IO device interrupt, irq = %d\n", irq);
306 printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc, 308 printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
@@ -331,7 +333,8 @@ void handler_irq(int irq, struct pt_regs * regs)
331 if(irq < 10) 333 if(irq < 10)
332 smp4m_irq_rotate(cpu); 334 smp4m_irq_rotate(cpu);
333#endif 335#endif
334 action = *(irq + irq_action); 336 action = sparc_irq[irq].action;
337 sparc_irq[irq].flags |= SPARC_IRQ_INPROGRESS;
335 kstat_cpu(cpu).irqs[irq]++; 338 kstat_cpu(cpu).irqs[irq]++;
336 do { 339 do {
337 if (!action || !action->handler) 340 if (!action || !action->handler)
@@ -339,6 +342,7 @@ void handler_irq(int irq, struct pt_regs * regs)
339 action->handler(irq, action->dev_id, regs); 342 action->handler(irq, action->dev_id, regs);
340 action = action->next; 343 action = action->next;
341 } while (action); 344 } while (action);
345 sparc_irq[irq].flags &= ~SPARC_IRQ_INPROGRESS;
342 enable_pil_irq(irq); 346 enable_pil_irq(irq);
343 irq_exit(); 347 irq_exit();
344} 348}
@@ -390,7 +394,7 @@ int request_fast_irq(unsigned int irq,
390 394
391 spin_lock_irqsave(&irq_action_lock, flags); 395 spin_lock_irqsave(&irq_action_lock, flags);
392 396
393 action = *(cpu_irq + irq_action); 397 action = sparc_irq[cpu_irq].action;
394 if(action) { 398 if(action) {
395 if(action->flags & SA_SHIRQ) 399 if(action->flags & SA_SHIRQ)
396 panic("Trying to register fast irq when already shared.\n"); 400 panic("Trying to register fast irq when already shared.\n");
@@ -453,7 +457,7 @@ int request_fast_irq(unsigned int irq,
453 action->dev_id = NULL; 457 action->dev_id = NULL;
454 action->next = NULL; 458 action->next = NULL;
455 459
456 *(cpu_irq + irq_action) = action; 460 sparc_irq[cpu_irq].action = action;
457 461
458 enable_irq(irq); 462 enable_irq(irq);
459 463
@@ -468,7 +472,7 @@ int request_irq(unsigned int irq,
468 irqreturn_t (*handler)(int, void *, struct pt_regs *), 472 irqreturn_t (*handler)(int, void *, struct pt_regs *),
469 unsigned long irqflags, const char * devname, void *dev_id) 473 unsigned long irqflags, const char * devname, void *dev_id)
470{ 474{
471 struct irqaction * action, *tmp = NULL; 475 struct irqaction * action, **actionp;
472 unsigned long flags; 476 unsigned long flags;
473 unsigned int cpu_irq; 477 unsigned int cpu_irq;
474 int ret; 478 int ret;
@@ -491,20 +495,20 @@ int request_irq(unsigned int irq,
491 495
492 spin_lock_irqsave(&irq_action_lock, flags); 496 spin_lock_irqsave(&irq_action_lock, flags);
493 497
494 action = *(cpu_irq + irq_action); 498 actionp = &sparc_irq[cpu_irq].action;
499 action = *actionp;
495 if (action) { 500 if (action) {
496 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) { 501 if (!(action->flags & SA_SHIRQ) || !(irqflags & SA_SHIRQ)) {
497 for (tmp = action; tmp->next; tmp = tmp->next);
498 } else {
499 ret = -EBUSY; 502 ret = -EBUSY;
500 goto out_unlock; 503 goto out_unlock;
501 } 504 }
502 if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) { 505 if ((action->flags & SA_INTERRUPT) != (irqflags & SA_INTERRUPT)) {
503 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq); 506 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
504 ret = -EBUSY; 507 ret = -EBUSY;
505 goto out_unlock; 508 goto out_unlock;
506 } 509 }
507 action = NULL; /* Or else! */ 510 for ( ; action; action = *actionp)
511 actionp = &action->next;
508 } 512 }
509 513
510 /* If this is flagged as statically allocated then we use our 514 /* If this is flagged as statically allocated then we use our
@@ -533,10 +537,7 @@ int request_irq(unsigned int irq,
533 action->next = NULL; 537 action->next = NULL;
534 action->dev_id = dev_id; 538 action->dev_id = dev_id;
535 539
536 if (tmp) 540 *actionp = action;
537 tmp->next = action;
538 else
539 *(cpu_irq + irq_action) = action;
540 541
541 enable_irq(irq); 542 enable_irq(irq);
542 543
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index c6e721d8f477..2be812115197 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -45,6 +45,7 @@ volatile int __cpu_logical_map[NR_CPUS];
45 45
46cpumask_t cpu_online_map = CPU_MASK_NONE; 46cpumask_t cpu_online_map = CPU_MASK_NONE;
47cpumask_t phys_cpu_present_map = CPU_MASK_NONE; 47cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
48cpumask_t smp_commenced_mask = CPU_MASK_NONE;
48 49
49/* The only guaranteed locking primitive available on all Sparc 50/* The only guaranteed locking primitive available on all Sparc
50 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically 51 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
@@ -57,11 +58,6 @@ cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
57/* Used to make bitops atomic */ 58/* Used to make bitops atomic */
58unsigned char bitops_spinlock = 0; 59unsigned char bitops_spinlock = 0;
59 60
60volatile unsigned long ipi_count;
61
62volatile int smp_process_available=0;
63volatile int smp_commenced = 0;
64
65void __init smp_store_cpu_info(int id) 61void __init smp_store_cpu_info(int id)
66{ 62{
67 int cpu_node; 63 int cpu_node;
@@ -79,6 +75,22 @@ void __init smp_store_cpu_info(int id)
79 75
80void __init smp_cpus_done(unsigned int max_cpus) 76void __init smp_cpus_done(unsigned int max_cpus)
81{ 77{
78 extern void smp4m_smp_done(void);
79 unsigned long bogosum = 0;
80 int cpu, num;
81
82 for (cpu = 0, num = 0; cpu < NR_CPUS; cpu++)
83 if (cpu_online(cpu)) {
84 num++;
85 bogosum += cpu_data(cpu).udelay_val;
86 }
87
88 printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
89 num, bogosum/(500000/HZ),
90 (bogosum/(5000/HZ))%100);
91
92 BUG_ON(sparc_cpu_model != sun4m);
93 smp4m_smp_done();
82} 94}
83 95
84void cpu_panic(void) 96void cpu_panic(void)
@@ -89,17 +101,6 @@ void cpu_panic(void)
89 101
90struct linux_prom_registers smp_penguin_ctable __initdata = { 0 }; 102struct linux_prom_registers smp_penguin_ctable __initdata = { 0 };
91 103
92void __init smp_boot_cpus(void)
93{
94 extern void smp4m_boot_cpus(void);
95 extern void smp4d_boot_cpus(void);
96
97 if (sparc_cpu_model == sun4m)
98 smp4m_boot_cpus();
99 else
100 smp4d_boot_cpus();
101}
102
103void smp_send_reschedule(int cpu) 104void smp_send_reschedule(int cpu)
104{ 105{
105 /* See sparc64 */ 106 /* See sparc64 */
@@ -243,9 +244,8 @@ int setup_profiling_timer(unsigned int multiplier)
243 return -EINVAL; 244 return -EINVAL;
244 245
245 spin_lock_irqsave(&prof_setup_lock, flags); 246 spin_lock_irqsave(&prof_setup_lock, flags);
246 for(i = 0; i < NR_CPUS; i++) { 247 for_each_cpu(i) {
247 if (cpu_possible(i)) 248 load_profile_irq(i, lvl14_resolution / multiplier);
248 load_profile_irq(i, lvl14_resolution / multiplier);
249 prof_multiplier(i) = multiplier; 249 prof_multiplier(i) = multiplier;
250 } 250 }
251 spin_unlock_irqrestore(&prof_setup_lock, flags); 251 spin_unlock_irqrestore(&prof_setup_lock, flags);
@@ -253,33 +253,73 @@ int setup_profiling_timer(unsigned int multiplier)
253 return 0; 253 return 0;
254} 254}
255 255
256void __init smp_prepare_cpus(unsigned int maxcpus) 256void __init smp_prepare_cpus(unsigned int max_cpus)
257{ 257{
258 extern void smp4m_boot_cpus(void);
259 int i, cpuid, ncpus, extra;
260
261 BUG_ON(sparc_cpu_model != sun4m);
262 printk("Entering SMP Mode...\n");
263
264 ncpus = 1;
265 extra = 0;
266 for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
267 if (cpuid == boot_cpu_id)
268 continue;
269 if (cpuid < NR_CPUS && ncpus++ < max_cpus)
270 cpu_set(cpuid, phys_cpu_present_map);
271 else
272 extra++;
273 }
274 if (max_cpus >= NR_CPUS && extra)
275 printk("Warning: NR_CPUS is too low to start all cpus\n");
276
277 smp_store_cpu_info(boot_cpu_id);
278
279 smp4m_boot_cpus();
258} 280}
259 281
260void __devinit smp_prepare_boot_cpu(void) 282void __devinit smp_prepare_boot_cpu(void)
261{ 283{
262 current_thread_info()->cpu = hard_smp_processor_id(); 284 int cpuid = hard_smp_processor_id();
263 cpu_set(smp_processor_id(), cpu_online_map); 285
264 cpu_set(smp_processor_id(), phys_cpu_present_map); 286 if (cpuid >= NR_CPUS) {
287 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
288 prom_halt();
289 }
290 if (cpuid != 0)
291 printk("boot cpu id != 0, this could work but is untested\n");
292
293 current_thread_info()->cpu = cpuid;
294 cpu_set(cpuid, cpu_online_map);
295 cpu_set(cpuid, phys_cpu_present_map);
265} 296}
266 297
267int __devinit __cpu_up(unsigned int cpu) 298int __devinit __cpu_up(unsigned int cpu)
268{ 299{
269 panic("smp doesn't work\n"); 300 extern int smp4m_boot_one_cpu(int);
301 int ret;
302
303 ret = smp4m_boot_one_cpu(cpu);
304
305 if (!ret) {
306 cpu_set(cpu, smp_commenced_mask);
307 while (!cpu_online(cpu))
308 mb();
309 }
310 return ret;
270} 311}
271 312
272void smp_bogo(struct seq_file *m) 313void smp_bogo(struct seq_file *m)
273{ 314{
274 int i; 315 int i;
275 316
276 for (i = 0; i < NR_CPUS; i++) { 317 for_each_online_cpu(i) {
277 if (cpu_online(i)) 318 seq_printf(m,
278 seq_printf(m, 319 "Cpu%dBogo\t: %lu.%02lu\n",
279 "Cpu%dBogo\t: %lu.%02lu\n", 320 i,
280 i, 321 cpu_data(i).udelay_val/(500000/HZ),
281 cpu_data(i).udelay_val/(500000/HZ), 322 (cpu_data(i).udelay_val/(5000/HZ))%100);
282 (cpu_data(i).udelay_val/(5000/HZ))%100);
283 } 323 }
284} 324}
285 325
@@ -288,8 +328,6 @@ void smp_info(struct seq_file *m)
288 int i; 328 int i;
289 329
290 seq_printf(m, "State:\n"); 330 seq_printf(m, "State:\n");
291 for (i = 0; i < NR_CPUS; i++) { 331 for_each_online_cpu(i)
292 if (cpu_online(i)) 332 seq_printf(m, "CPU%d\t\t: online\n", i);
293 seq_printf(m, "CPU%d\t\t: online\n", i);
294 }
295} 333}
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index 19b25399d7e4..2c21d7907635 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -136,10 +136,6 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data);
136/* IRQ implementation. */ 136/* IRQ implementation. */
137EXPORT_SYMBOL(synchronize_irq); 137EXPORT_SYMBOL(synchronize_irq);
138 138
139/* Misc SMP information */
140EXPORT_SYMBOL(__cpu_number_map);
141EXPORT_SYMBOL(__cpu_logical_map);
142
143/* CPU online map and active count. */ 139/* CPU online map and active count. */
144EXPORT_SYMBOL(cpu_online_map); 140EXPORT_SYMBOL(cpu_online_map);
145EXPORT_SYMBOL(phys_cpu_present_map); 141EXPORT_SYMBOL(phys_cpu_present_map);
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index 52621348a56c..ca656d9bd6fd 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -54,7 +54,7 @@ unsigned char cpu_leds[32];
54unsigned char sbus_tid[32]; 54unsigned char sbus_tid[32];
55#endif 55#endif
56 56
57extern struct irqaction *irq_action[]; 57static struct irqaction *irq_action[NR_IRQS];
58extern spinlock_t irq_action_lock; 58extern spinlock_t irq_action_lock;
59 59
60struct sbus_action { 60struct sbus_action {
@@ -103,11 +103,9 @@ found_it: seq_printf(p, "%3d: ", i);
103#ifndef CONFIG_SMP 103#ifndef CONFIG_SMP
104 seq_printf(p, "%10u ", kstat_irqs(i)); 104 seq_printf(p, "%10u ", kstat_irqs(i));
105#else 105#else
106 for (x = 0; x < NR_CPUS; x++) { 106 for_each_online_cpu(x)
107 if (cpu_online(x)) 107 seq_printf(p, "%10u ",
108 seq_printf(p, "%10u ", 108 kstat_cpu(cpu_logical_map(x)).irqs[i]);
109 kstat_cpu(cpu_logical_map(x)).irqs[i]);
110 }
111#endif 109#endif
112 seq_printf(p, "%c %s", 110 seq_printf(p, "%c %s",
113 (action->flags & SA_INTERRUPT) ? '+' : ' ', 111 (action->flags & SA_INTERRUPT) ? '+' : ' ',
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 4219dd2ce3a2..b141b7ee6717 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -46,14 +46,16 @@ extern volatile int smp_processors_ready;
46extern int smp_num_cpus; 46extern int smp_num_cpus;
47static int smp_highest_cpu; 47static int smp_highest_cpu;
48extern volatile unsigned long cpu_callin_map[NR_CPUS]; 48extern volatile unsigned long cpu_callin_map[NR_CPUS];
49extern struct cpuinfo_sparc cpu_data[NR_CPUS]; 49extern cpuinfo_sparc cpu_data[NR_CPUS];
50extern unsigned char boot_cpu_id; 50extern unsigned char boot_cpu_id;
51extern int smp_activated; 51extern int smp_activated;
52extern volatile int __cpu_number_map[NR_CPUS]; 52extern volatile int __cpu_number_map[NR_CPUS];
53extern volatile int __cpu_logical_map[NR_CPUS]; 53extern volatile int __cpu_logical_map[NR_CPUS];
54extern volatile unsigned long ipi_count; 54extern volatile unsigned long ipi_count;
55extern volatile int smp_process_available; 55extern volatile int smp_process_available;
56extern volatile int smp_commenced; 56
57extern cpumask_t smp_commenced_mask;
58
57extern int __smp4d_processor_id(void); 59extern int __smp4d_processor_id(void);
58 60
59/* #define SMP_DEBUG */ 61/* #define SMP_DEBUG */
@@ -136,7 +138,7 @@ void __init smp4d_callin(void)
136 138
137 local_irq_enable(); /* We don't allow PIL 14 yet */ 139 local_irq_enable(); /* We don't allow PIL 14 yet */
138 140
139 while(!smp_commenced) 141 while (!cpu_isset(cpuid, smp_commenced_mask))
140 barrier(); 142 barrier();
141 143
142 spin_lock_irqsave(&sun4d_imsk_lock, flags); 144 spin_lock_irqsave(&sun4d_imsk_lock, flags);
@@ -249,11 +251,9 @@ void __init smp4d_boot_cpus(void)
249 } else { 251 } else {
250 unsigned long bogosum = 0; 252 unsigned long bogosum = 0;
251 253
252 for(i = 0; i < NR_CPUS; i++) { 254 for_each_present_cpu(i) {
253 if (cpu_isset(i, cpu_present_map)) { 255 bogosum += cpu_data(i).udelay_val;
254 bogosum += cpu_data(i).udelay_val; 256 smp_highest_cpu = i;
255 smp_highest_cpu = i;
256 }
257 } 257 }
258 SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100)); 258 SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100));
259 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", 259 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index fbbd8a474c4c..70b375a4c2c2 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -40,15 +40,11 @@ extern ctxd_t *srmmu_ctx_table_phys;
40extern void calibrate_delay(void); 40extern void calibrate_delay(void);
41 41
42extern volatile int smp_processors_ready; 42extern volatile int smp_processors_ready;
43extern int smp_num_cpus;
44extern volatile unsigned long cpu_callin_map[NR_CPUS]; 43extern volatile unsigned long cpu_callin_map[NR_CPUS];
45extern unsigned char boot_cpu_id; 44extern unsigned char boot_cpu_id;
46extern int smp_activated; 45
47extern volatile int __cpu_number_map[NR_CPUS]; 46extern cpumask_t smp_commenced_mask;
48extern volatile int __cpu_logical_map[NR_CPUS]; 47
49extern volatile unsigned long ipi_count;
50extern volatile int smp_process_available;
51extern volatile int smp_commenced;
52extern int __smp4m_processor_id(void); 48extern int __smp4m_processor_id(void);
53 49
54/*#define SMP_DEBUG*/ 50/*#define SMP_DEBUG*/
@@ -77,8 +73,6 @@ void __init smp4m_callin(void)
77 local_flush_cache_all(); 73 local_flush_cache_all();
78 local_flush_tlb_all(); 74 local_flush_tlb_all();
79 75
80 set_irq_udt(boot_cpu_id);
81
82 /* Get our local ticker going. */ 76 /* Get our local ticker going. */
83 smp_setup_percpu_timer(); 77 smp_setup_percpu_timer();
84 78
@@ -95,8 +89,9 @@ void __init smp4m_callin(void)
95 * to call the scheduler code. 89 * to call the scheduler code.
96 */ 90 */
97 /* Allow master to continue. */ 91 /* Allow master to continue. */
98 swap((unsigned long *)&cpu_callin_map[cpuid], 1); 92 swap(&cpu_callin_map[cpuid], 1);
99 93
94 /* XXX: What's up with all the flushes? */
100 local_flush_cache_all(); 95 local_flush_cache_all();
101 local_flush_tlb_all(); 96 local_flush_tlb_all();
102 97
@@ -111,13 +106,14 @@ void __init smp4m_callin(void)
111 atomic_inc(&init_mm.mm_count); 106 atomic_inc(&init_mm.mm_count);
112 current->active_mm = &init_mm; 107 current->active_mm = &init_mm;
113 108
114 while(!smp_commenced) 109 while (!cpu_isset(cpuid, smp_commenced_mask))
115 barrier(); 110 mb();
116
117 local_flush_cache_all();
118 local_flush_tlb_all();
119 111
120 local_irq_enable(); 112 local_irq_enable();
113
114 cpu_set(cpuid, cpu_online_map);
115 /* last one in gets all the interrupts (for testing) */
116 set_irq_udt(boot_cpu_id);
121} 117}
122 118
123extern void init_IRQ(void); 119extern void init_IRQ(void);
@@ -134,104 +130,76 @@ extern unsigned long trapbase_cpu3[];
134 130
135void __init smp4m_boot_cpus(void) 131void __init smp4m_boot_cpus(void)
136{ 132{
137 int cpucount = 0; 133 smp_setup_percpu_timer();
138 int i, mid; 134 local_flush_cache_all();
135}
139 136
140 printk("Entering SMP Mode...\n"); 137int smp4m_boot_one_cpu(int i)
138{
139 extern unsigned long sun4m_cpu_startup;
140 unsigned long *entry = &sun4m_cpu_startup;
141 struct task_struct *p;
142 int timeout;
143 int cpu_node;
141 144
142 local_irq_enable(); 145 cpu_find_by_mid(i, &cpu_node);
143 cpus_clear(cpu_present_map);
144 146
145 for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++) 147 /* Cook up an idler for this guy. */
146 cpu_set(mid, cpu_present_map); 148 p = fork_idle(i);
149 current_set[i] = task_thread_info(p);
150 /* See trampoline.S for details... */
151 entry += ((i-1) * 3);
147 152
148 for(i=0; i < NR_CPUS; i++) { 153 /*
149 __cpu_number_map[i] = -1; 154 * Initialize the contexts table
150 __cpu_logical_map[i] = -1; 155 * Since the call to prom_startcpu() trashes the structure,
156 * we need to re-initialize it for each cpu
157 */
158 smp_penguin_ctable.which_io = 0;
159 smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
160 smp_penguin_ctable.reg_size = 0;
161
162 /* whirrr, whirrr, whirrrrrrrrr... */
163 printk("Starting CPU %d at %p\n", i, entry);
164 local_flush_cache_all();
165 prom_startcpu(cpu_node,
166 &smp_penguin_ctable, 0, (char *)entry);
167
168 /* wheee... it's going... */
169 for(timeout = 0; timeout < 10000; timeout++) {
170 if(cpu_callin_map[i])
171 break;
172 udelay(200);
151 } 173 }
152 174
153 __cpu_number_map[boot_cpu_id] = 0; 175 if (!(cpu_callin_map[i])) {
154 __cpu_logical_map[0] = boot_cpu_id; 176 printk("Processor %d is stuck.\n", i);
155 current_thread_info()->cpu = boot_cpu_id; 177 return -ENODEV;
178 }
156 179
157 smp_store_cpu_info(boot_cpu_id);
158 set_irq_udt(boot_cpu_id);
159 smp_setup_percpu_timer();
160 local_flush_cache_all(); 180 local_flush_cache_all();
161 if(cpu_find_by_instance(1, NULL, NULL)) 181 return 0;
162 return; /* Not an MP box. */ 182}
163 for(i = 0; i < NR_CPUS; i++) { 183
164 if(i == boot_cpu_id) 184void __init smp4m_smp_done(void)
165 continue; 185{
166 186 int i, first;
167 if (cpu_isset(i, cpu_present_map)) { 187 int *prev;
168 extern unsigned long sun4m_cpu_startup; 188
169 unsigned long *entry = &sun4m_cpu_startup; 189 /* setup cpu list for irq rotation */
170 struct task_struct *p; 190 first = 0;
171 int timeout; 191 prev = &first;
172 192 for (i = 0; i < NR_CPUS; i++) {
173 /* Cook up an idler for this guy. */ 193 if (cpu_online(i)) {
174 p = fork_idle(i); 194 *prev = i;
175 cpucount++; 195 prev = &cpu_data(i).next;
176 current_set[i] = task_thread_info(p);
177 /* See trampoline.S for details... */
178 entry += ((i-1) * 3);
179
180 /*
181 * Initialize the contexts table
182 * Since the call to prom_startcpu() trashes the structure,
183 * we need to re-initialize it for each cpu
184 */
185 smp_penguin_ctable.which_io = 0;
186 smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
187 smp_penguin_ctable.reg_size = 0;
188
189 /* whirrr, whirrr, whirrrrrrrrr... */
190 printk("Starting CPU %d at %p\n", i, entry);
191 local_flush_cache_all();
192 prom_startcpu(cpu_data(i).prom_node,
193 &smp_penguin_ctable, 0, (char *)entry);
194
195 /* wheee... it's going... */
196 for(timeout = 0; timeout < 10000; timeout++) {
197 if(cpu_callin_map[i])
198 break;
199 udelay(200);
200 }
201 if(cpu_callin_map[i]) {
202 /* Another "Red Snapper". */
203 __cpu_number_map[i] = i;
204 __cpu_logical_map[i] = i;
205 } else {
206 cpucount--;
207 printk("Processor %d is stuck.\n", i);
208 }
209 }
210 if(!(cpu_callin_map[i])) {
211 cpu_clear(i, cpu_present_map);
212 __cpu_number_map[i] = -1;
213 } 196 }
214 } 197 }
198 *prev = first;
215 local_flush_cache_all(); 199 local_flush_cache_all();
216 if(cpucount == 0) {
217 printk("Error: only one Processor found.\n");
218 cpu_present_map = cpumask_of_cpu(smp_processor_id());
219 } else {
220 unsigned long bogosum = 0;
221 for(i = 0; i < NR_CPUS; i++) {
222 if (cpu_isset(i, cpu_present_map))
223 bogosum += cpu_data(i).udelay_val;
224 }
225 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
226 cpucount + 1,
227 bogosum/(500000/HZ),
228 (bogosum/(5000/HZ))%100);
229 smp_activated = 1;
230 smp_num_cpus = cpucount + 1;
231 }
232 200
233 /* Free unneeded trap tables */ 201 /* Free unneeded trap tables */
234 if (!cpu_isset(i, cpu_present_map)) { 202 if (!cpu_isset(1, cpu_present_map)) {
235 ClearPageReserved(virt_to_page(trapbase_cpu1)); 203 ClearPageReserved(virt_to_page(trapbase_cpu1));
236 init_page_count(virt_to_page(trapbase_cpu1)); 204 init_page_count(virt_to_page(trapbase_cpu1));
237 free_page((unsigned long)trapbase_cpu1); 205 free_page((unsigned long)trapbase_cpu1);
@@ -265,6 +233,9 @@ void __init smp4m_boot_cpus(void)
265 */ 233 */
266void smp4m_irq_rotate(int cpu) 234void smp4m_irq_rotate(int cpu)
267{ 235{
236 int next = cpu_data(cpu).next;
237 if (next != cpu)
238 set_irq_udt(next);
268} 239}
269 240
270/* Cross calls, in order to work efficiently and atomically do all 241/* Cross calls, in order to work efficiently and atomically do all
@@ -291,7 +262,7 @@ void smp4m_message_pass(int target, int msg, unsigned long data, int wait)
291 262
292 smp_cpu_in_msg[me]++; 263 smp_cpu_in_msg[me]++;
293 if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) { 264 if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) {
294 mask = cpu_present_map; 265 mask = cpu_online_map;
295 if(target == MSG_ALL_BUT_SELF) 266 if(target == MSG_ALL_BUT_SELF)
296 cpu_clear(me, mask); 267 cpu_clear(me, mask);
297 for(i = 0; i < 4; i++) { 268 for(i = 0; i < 4; i++) {
@@ -316,8 +287,8 @@ static struct smp_funcall {
316 unsigned long arg3; 287 unsigned long arg3;
317 unsigned long arg4; 288 unsigned long arg4;
318 unsigned long arg5; 289 unsigned long arg5;
319 unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */ 290 unsigned long processors_in[SUN4M_NCPUS]; /* Set when ipi entered. */
320 unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */ 291 unsigned long processors_out[SUN4M_NCPUS]; /* Set when ipi exited. */
321} ccall_info; 292} ccall_info;
322 293
323static DEFINE_SPINLOCK(cross_call_lock); 294static DEFINE_SPINLOCK(cross_call_lock);
@@ -326,8 +297,7 @@ static DEFINE_SPINLOCK(cross_call_lock);
326void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2, 297void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
327 unsigned long arg3, unsigned long arg4, unsigned long arg5) 298 unsigned long arg3, unsigned long arg4, unsigned long arg5)
328{ 299{
329 if(smp_processors_ready) { 300 register int ncpus = SUN4M_NCPUS;
330 register int ncpus = smp_num_cpus;
331 unsigned long flags; 301 unsigned long flags;
332 302
333 spin_lock_irqsave(&cross_call_lock, flags); 303 spin_lock_irqsave(&cross_call_lock, flags);
@@ -342,7 +312,7 @@ void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
342 312
343 /* Init receive/complete mapping, plus fire the IPI's off. */ 313 /* Init receive/complete mapping, plus fire the IPI's off. */
344 { 314 {
345 cpumask_t mask = cpu_present_map; 315 cpumask_t mask = cpu_online_map;
346 register int i; 316 register int i;
347 317
348 cpu_clear(smp_processor_id(), mask); 318 cpu_clear(smp_processor_id(), mask);
@@ -375,7 +345,6 @@ void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
375 } 345 }
376 346
377 spin_unlock_irqrestore(&cross_call_lock, flags); 347 spin_unlock_irqrestore(&cross_call_lock, flags);
378 }
379} 348}
380 349
381/* Running cross calls. */ 350/* Running cross calls. */
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 27b0e0ba8581..58c65cc8d0d3 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -1302,7 +1302,12 @@ void __init srmmu_paging_init(void)
1302 1302
1303 flush_cache_all(); 1303 flush_cache_all();
1304 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); 1304 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
1305#ifdef CONFIG_SMP
1306 /* Stop from hanging here... */
1307 local_flush_tlb_all();
1308#else
1305 flush_tlb_all(); 1309 flush_tlb_all();
1310#endif
1306 poke_srmmu(); 1311 poke_srmmu();
1307 1312
1308#ifdef CONFIG_SUN_IO 1313#ifdef CONFIG_SUN_IO
@@ -1419,6 +1424,7 @@ static void __init init_vac_layout(void)
1419 max_size = vac_cache_size; 1424 max_size = vac_cache_size;
1420 if(vac_line_size < min_line_size) 1425 if(vac_line_size < min_line_size)
1421 min_line_size = vac_line_size; 1426 min_line_size = vac_line_size;
1427 //FIXME: cpus not contiguous!!
1422 cpu++; 1428 cpu++;
1423 if (cpu >= NR_CPUS || !cpu_online(cpu)) 1429 if (cpu >= NR_CPUS || !cpu_online(cpu))
1424 break; 1430 break;
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug
index 3e31be494e54..afe0a7720a26 100644
--- a/arch/sparc64/Kconfig.debug
+++ b/arch/sparc64/Kconfig.debug
@@ -24,7 +24,7 @@ config DEBUG_BOOTMEM
24 bool "Debug BOOTMEM initialization" 24 bool "Debug BOOTMEM initialization"
25 25
26config DEBUG_PAGEALLOC 26config DEBUG_PAGEALLOC
27 bool "Page alloc debugging" 27 bool "Debug page memory allocations"
28 depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND 28 depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND
29 help 29 help
30 Unmap pages from the kernel linear mapping after free_pages(). 30 Unmap pages from the kernel linear mapping after free_pages().
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 8c93ba655b33..11e645c9ec50 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -117,9 +117,7 @@ int show_interrupts(struct seq_file *p, void *v)
117#ifndef CONFIG_SMP 117#ifndef CONFIG_SMP
118 seq_printf(p, "%10u ", kstat_irqs(i)); 118 seq_printf(p, "%10u ", kstat_irqs(i));
119#else 119#else
120 for (j = 0; j < NR_CPUS; j++) { 120 for_each_online_cpu(j) {
121 if (!cpu_online(j))
122 continue;
123 seq_printf(p, "%10u ", 121 seq_printf(p, "%10u ",
124 kstat_cpu(j).irqs[i]); 122 kstat_cpu(j).irqs[i]);
125 } 123 }
@@ -729,7 +727,7 @@ void handler_irq(int irq, struct pt_regs *regs)
729} 727}
730 728
731#ifdef CONFIG_BLK_DEV_FD 729#ifdef CONFIG_BLK_DEV_FD
732extern irqreturn_t floppy_interrupt(int, void *, struct pt_regs *);; 730extern irqreturn_t floppy_interrupt(int, void *, struct pt_regs *);
733 731
734/* XXX No easy way to include asm/floppy.h XXX */ 732/* XXX No easy way to include asm/floppy.h XXX */
735extern unsigned char *pdma_vaddr; 733extern unsigned char *pdma_vaddr;
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 373a701c90a5..7dc28a484268 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -57,25 +57,21 @@ void smp_info(struct seq_file *m)
57 int i; 57 int i;
58 58
59 seq_printf(m, "State:\n"); 59 seq_printf(m, "State:\n");
60 for (i = 0; i < NR_CPUS; i++) { 60 for_each_online_cpu(i)
61 if (cpu_online(i)) 61 seq_printf(m, "CPU%d:\t\tonline\n", i);
62 seq_printf(m,
63 "CPU%d:\t\tonline\n", i);
64 }
65} 62}
66 63
67void smp_bogo(struct seq_file *m) 64void smp_bogo(struct seq_file *m)
68{ 65{
69 int i; 66 int i;
70 67
71 for (i = 0; i < NR_CPUS; i++) 68 for_each_online_cpu(i)
72 if (cpu_online(i)) 69 seq_printf(m,
73 seq_printf(m, 70 "Cpu%dBogo\t: %lu.%02lu\n"
74 "Cpu%dBogo\t: %lu.%02lu\n" 71 "Cpu%dClkTck\t: %016lx\n",
75 "Cpu%dClkTck\t: %016lx\n", 72 i, cpu_data(i).udelay_val / (500000/HZ),
76 i, cpu_data(i).udelay_val / (500000/HZ), 73 (cpu_data(i).udelay_val / (5000/HZ)) % 100,
77 (cpu_data(i).udelay_val / (5000/HZ)) % 100, 74 i, cpu_data(i).clock_tick);
78 i, cpu_data(i).clock_tick);
79} 75}
80 76
81void __init smp_store_cpu_info(int id) 77void __init smp_store_cpu_info(int id)
@@ -1282,7 +1278,7 @@ int setup_profiling_timer(unsigned int multiplier)
1282 return -EINVAL; 1278 return -EINVAL;
1283 1279
1284 spin_lock_irqsave(&prof_setup_lock, flags); 1280 spin_lock_irqsave(&prof_setup_lock, flags);
1285 for (i = 0; i < NR_CPUS; i++) 1281 for_each_cpu(i)
1286 prof_multiplier(i) = multiplier; 1282 prof_multiplier(i) = multiplier;
1287 current_tick_offset = (timer_tick_offset / multiplier); 1283 current_tick_offset = (timer_tick_offset / multiplier);
1288 spin_unlock_irqrestore(&prof_setup_lock, flags); 1284 spin_unlock_irqrestore(&prof_setup_lock, flags);
@@ -1302,6 +1298,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
1302 while (!cpu_find_by_instance(instance, NULL, &mid)) { 1298 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1303 if (mid != boot_cpu_id) { 1299 if (mid != boot_cpu_id) {
1304 cpu_clear(mid, phys_cpu_present_map); 1300 cpu_clear(mid, phys_cpu_present_map);
1301 cpu_clear(mid, cpu_present_map);
1305 if (num_possible_cpus() <= max_cpus) 1302 if (num_possible_cpus() <= max_cpus)
1306 break; 1303 break;
1307 } 1304 }
@@ -1336,8 +1333,10 @@ void __init smp_setup_cpu_possible_map(void)
1336 1333
1337 instance = 0; 1334 instance = 0;
1338 while (!cpu_find_by_instance(instance, NULL, &mid)) { 1335 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1339 if (mid < NR_CPUS) 1336 if (mid < NR_CPUS) {
1340 cpu_set(mid, phys_cpu_present_map); 1337 cpu_set(mid, phys_cpu_present_map);
1338 cpu_set(mid, cpu_present_map);
1339 }
1341 instance++; 1340 instance++;
1342 } 1341 }
1343} 1342}
@@ -1384,10 +1383,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
1384 unsigned long bogosum = 0; 1383 unsigned long bogosum = 0;
1385 int i; 1384 int i;
1386 1385
1387 for (i = 0; i < NR_CPUS; i++) { 1386 for_each_online_cpu(i)
1388 if (cpu_online(i)) 1387 bogosum += cpu_data(i).udelay_val;
1389 bogosum += cpu_data(i).udelay_val;
1390 }
1391 printk("Total of %ld processors activated " 1388 printk("Total of %ld processors activated "
1392 "(%lu.%02lu BogoMIPS).\n", 1389 "(%lu.%02lu BogoMIPS).\n",
1393 (long) num_online_cpus(), 1390 (long) num_online_cpus(),
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index ded63ee9c4fd..1539a8362b6f 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1828,8 +1828,8 @@ void __flush_tlb_all(void)
1828void online_page(struct page *page) 1828void online_page(struct page *page)
1829{ 1829{
1830 ClearPageReserved(page); 1830 ClearPageReserved(page);
1831 set_page_count(page, 0); 1831 init_page_count(page);
1832 free_cold_page(page); 1832 __free_page(page);
1833 totalram_pages++; 1833 totalram_pages++;
1834 num_physpages++; 1834 num_physpages++;
1835} 1835}
diff --git a/arch/um/Makefile b/arch/um/Makefile
index c58b657f0097..8d14c7a831be 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -1,4 +1,7 @@
1# 1#
2# This file is included by the global makefile so that you can add your own
3# architecture-specific flags and dependencies.
4#
2# Copyright (C) 2002 Jeff Dike (jdike@karaya.com) 5# Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3# Licensed under the GPL 6# Licensed under the GPL
4# 7#
@@ -88,7 +91,7 @@ CONFIG_KERNEL_HALF_GIGS ?= 0
88 91
89SIZE = (($(CONFIG_NEST_LEVEL) + $(CONFIG_KERNEL_HALF_GIGS)) * 0x20000000) 92SIZE = (($(CONFIG_NEST_LEVEL) + $(CONFIG_KERNEL_HALF_GIGS)) * 0x20000000)
90 93
91.PHONY: linux 94PHONY += linux
92 95
93all: linux 96all: linux
94 97
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 27cdf9164422..80c9c18aae94 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -491,6 +491,16 @@ void __init check_bugs(void)
491 check_devanon(); 491 check_devanon();
492} 492}
493 493
494void apply_alternatives(void *start, void *end) 494void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
495{
496}
497
498void alternatives_smp_module_add(struct module *mod, char *name,
499 void *locks, void *locks_end,
500 void *text, void *text_end)
501{
502}
503
504void alternatives_smp_module_del(struct module *mod)
495{ 505{
496} 506}
diff --git a/arch/v850/kernel/process.c b/arch/v850/kernel/process.c
index 621111ddf907..57218c76925c 100644
--- a/arch/v850/kernel/process.c
+++ b/arch/v850/kernel/process.c
@@ -37,7 +37,7 @@ extern void ret_from_fork (void);
37 37
38 38
39/* The idle loop. */ 39/* The idle loop. */
40void default_idle (void) 40static void default_idle (void)
41{ 41{
42 while (! need_resched ()) 42 while (! need_resched ())
43 asm ("halt; nop; nop; nop; nop; nop" ::: "cc"); 43 asm ("halt; nop; nop; nop; nop; nop" ::: "cc");
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index e18eb79bf855..6420baeb8c1f 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -323,7 +323,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
323 323
324config NR_CPUS 324config NR_CPUS
325 int "Maximum number of CPUs (2-256)" 325 int "Maximum number of CPUs (2-256)"
326 range 2 256 326 range 2 255
327 depends on SMP 327 depends on SMP
328 default "8" 328 default "8"
329 help 329 help
@@ -364,13 +364,15 @@ config GART_IOMMU
364 select SWIOTLB 364 select SWIOTLB
365 depends on PCI 365 depends on PCI
366 help 366 help
367 Support the IOMMU. Needed to run systems with more than 3GB of memory 367 Support for hardware IOMMU in AMD's Opteron/Athlon64 Processors
368 properly with 32-bit PCI devices that do not support DAC (Double Address 368 and for the bounce buffering software IOMMU.
369 Cycle). The IOMMU can be turned off at runtime with the iommu=off parameter. 369 Needed to run systems with more than 3GB of memory properly with
370 Normally the kernel will take the right choice by itself. 370 32-bit PCI devices that do not support DAC (Double Address Cycle).
371 This option includes a driver for the AMD Opteron/Athlon64 northbridge IOMMU 371 The IOMMU can be turned off at runtime with the iommu=off parameter.
372 and a software emulation used on other systems. 372 Normally the kernel will take the right choice by itself.
373 If unsure, say Y. 373 This option includes a driver for the AMD Opteron/Athlon64 IOMMU
374 northbridge and a software emulation used on other systems without
375 hardware IOMMU. If unsure, say Y.
374 376
375# need this always enabled with GART_IOMMU for the VIA workaround 377# need this always enabled with GART_IOMMU for the VIA workaround
376config SWIOTLB 378config SWIOTLB
@@ -429,10 +431,10 @@ config CRASH_DUMP
429config PHYSICAL_START 431config PHYSICAL_START
430 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) 432 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
431 default "0x1000000" if CRASH_DUMP 433 default "0x1000000" if CRASH_DUMP
432 default "0x100000" 434 default "0x200000"
433 help 435 help
434 This gives the physical address where the kernel is loaded. Normally 436 This gives the physical address where the kernel is loaded. Normally
435 for regular kernels this value is 0x100000 (1MB). But in the case 437 for regular kernels this value is 0x200000 (2MB). But in the case
436 of kexec on panic the fail safe kernel needs to run at a different 438 of kexec on panic the fail safe kernel needs to run at a different
437 address than the panic-ed kernel. This option is used to set the load 439 address than the panic-ed kernel. This option is used to set the load
438 address for kernels used to capture crash dump on being kexec'ed 440 address for kernels used to capture crash dump on being kexec'ed
@@ -464,6 +466,14 @@ config SECCOMP
464 466
465source kernel/Kconfig.hz 467source kernel/Kconfig.hz
466 468
469config REORDER
470 bool "Function reordering"
471 default n
472 help
473 This option enables the toolchain to reorder functions for a more
474 optimal TLB usage. If you have pretty much any version of binutils,
475 this can increase your kernel build time by roughly one minute.
476
467endmenu 477endmenu
468 478
469# 479#
@@ -512,16 +522,6 @@ config PCI_MMCONFIG
512 bool "Support mmconfig PCI config space access" 522 bool "Support mmconfig PCI config space access"
513 depends on PCI && ACPI 523 depends on PCI && ACPI
514 524
515config UNORDERED_IO
516 bool "Unordered IO mapping access"
517 depends on EXPERIMENTAL
518 help
519 Use unordered stores to access IO memory mappings in device drivers.
520 Still very experimental. When a driver works on IA64/ppc64/pa-risc it should
521 work with this option, but it makes the drivers behave differently
522 from i386. Requires that the driver writer used memory barriers
523 properly.
524
525source "drivers/pci/pcie/Kconfig" 525source "drivers/pci/pcie/Kconfig"
526 526
527source "drivers/pci/Kconfig" 527source "drivers/pci/Kconfig"
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index d7fd46479c55..0fbc0283609c 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -29,12 +29,14 @@ CHECKFLAGS += -D__x86_64__ -m64
29 29
30cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8) 30cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
31cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona) 31cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
32cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
32CFLAGS += $(cflags-y) 33CFLAGS += $(cflags-y)
33 34
34CFLAGS += -m64 35CFLAGS += -m64
35CFLAGS += -mno-red-zone 36CFLAGS += -mno-red-zone
36CFLAGS += -mcmodel=kernel 37CFLAGS += -mcmodel=kernel
37CFLAGS += -pipe 38CFLAGS += -pipe
39cflags-$(CONFIG_REORDER) += -ffunction-sections
38# this makes reading assembly source easier, but produces worse code 40# this makes reading assembly source easier, but produces worse code
39# actually it makes the kernel smaller too. 41# actually it makes the kernel smaller too.
40CFLAGS += -fno-reorder-blocks 42CFLAGS += -fno-reorder-blocks
@@ -67,8 +69,8 @@ drivers-$(CONFIG_OPROFILE) += arch/x86_64/oprofile/
67 69
68boot := arch/x86_64/boot 70boot := arch/x86_64/boot
69 71
70.PHONY: bzImage bzlilo install archmrproper \ 72PHONY += bzImage bzlilo install archmrproper \
71 fdimage fdimage144 fdimage288 archclean 73 fdimage fdimage144 fdimage288 archclean
72 74
73#Default target when executing "make" 75#Default target when executing "make"
74all: bzImage 76all: bzImage
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index ce4de61ed85d..566ecc97ee5a 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.16-rc3-git9 3# Linux kernel version: 2.6.16-git9
4# Sat Feb 18 00:27:03 2006 4# Sat Mar 25 15:18:40 2006
5# 5#
6CONFIG_X86_64=y 6CONFIG_X86_64=y
7CONFIG_64BIT=y 7CONFIG_64BIT=y
@@ -38,6 +38,7 @@ CONFIG_SYSCTL=y
38CONFIG_IKCONFIG=y 38CONFIG_IKCONFIG=y
39CONFIG_IKCONFIG_PROC=y 39CONFIG_IKCONFIG_PROC=y
40# CONFIG_CPUSETS is not set 40# CONFIG_CPUSETS is not set
41# CONFIG_RELAY is not set
41CONFIG_INITRAMFS_SOURCE="" 42CONFIG_INITRAMFS_SOURCE=""
42CONFIG_UID16=y 43CONFIG_UID16=y
43CONFIG_VM86=y 44CONFIG_VM86=y
@@ -79,6 +80,7 @@ CONFIG_STOP_MACHINE=y
79# Block layer 80# Block layer
80# 81#
81CONFIG_LBD=y 82CONFIG_LBD=y
83# CONFIG_BLK_DEV_IO_TRACE is not set
82 84
83# 85#
84# IO Schedulers 86# IO Schedulers
@@ -139,7 +141,6 @@ CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
139CONFIG_NR_CPUS=32 141CONFIG_NR_CPUS=32
140CONFIG_HOTPLUG_CPU=y 142CONFIG_HOTPLUG_CPU=y
141CONFIG_HPET_TIMER=y 143CONFIG_HPET_TIMER=y
142CONFIG_X86_PM_TIMER=y
143CONFIG_HPET_EMULATE_RTC=y 144CONFIG_HPET_EMULATE_RTC=y
144CONFIG_GART_IOMMU=y 145CONFIG_GART_IOMMU=y
145CONFIG_SWIOTLB=y 146CONFIG_SWIOTLB=y
@@ -148,12 +149,13 @@ CONFIG_X86_MCE_INTEL=y
148CONFIG_X86_MCE_AMD=y 149CONFIG_X86_MCE_AMD=y
149# CONFIG_KEXEC is not set 150# CONFIG_KEXEC is not set
150# CONFIG_CRASH_DUMP is not set 151# CONFIG_CRASH_DUMP is not set
151CONFIG_PHYSICAL_START=0x100000 152CONFIG_PHYSICAL_START=0x200000
152CONFIG_SECCOMP=y 153CONFIG_SECCOMP=y
153# CONFIG_HZ_100 is not set 154# CONFIG_HZ_100 is not set
154CONFIG_HZ_250=y 155CONFIG_HZ_250=y
155# CONFIG_HZ_1000 is not set 156# CONFIG_HZ_1000 is not set
156CONFIG_HZ=250 157CONFIG_HZ=250
158# CONFIG_REORDER is not set
157CONFIG_GENERIC_HARDIRQS=y 159CONFIG_GENERIC_HARDIRQS=y
158CONFIG_GENERIC_IRQ_PROBE=y 160CONFIG_GENERIC_IRQ_PROBE=y
159CONFIG_ISA_DMA_API=y 161CONFIG_ISA_DMA_API=y
@@ -189,12 +191,14 @@ CONFIG_ACPI_NUMA=y
189# CONFIG_ACPI_ASUS is not set 191# CONFIG_ACPI_ASUS is not set
190# CONFIG_ACPI_IBM is not set 192# CONFIG_ACPI_IBM is not set
191CONFIG_ACPI_TOSHIBA=y 193CONFIG_ACPI_TOSHIBA=y
192CONFIG_ACPI_BLACKLIST_YEAR=2001 194CONFIG_ACPI_BLACKLIST_YEAR=0
193# CONFIG_ACPI_DEBUG is not set 195# CONFIG_ACPI_DEBUG is not set
194CONFIG_ACPI_EC=y 196CONFIG_ACPI_EC=y
195CONFIG_ACPI_POWER=y 197CONFIG_ACPI_POWER=y
196CONFIG_ACPI_SYSTEM=y 198CONFIG_ACPI_SYSTEM=y
199CONFIG_X86_PM_TIMER=y
197CONFIG_ACPI_CONTAINER=y 200CONFIG_ACPI_CONTAINER=y
201CONFIG_ACPI_HOTPLUG_MEMORY=y
198 202
199# 203#
200# CPU Frequency scaling 204# CPU Frequency scaling
@@ -232,10 +236,8 @@ CONFIG_X86_ACPI_CPUFREQ_PROC_INTF=y
232CONFIG_PCI=y 236CONFIG_PCI=y
233CONFIG_PCI_DIRECT=y 237CONFIG_PCI_DIRECT=y
234CONFIG_PCI_MMCONFIG=y 238CONFIG_PCI_MMCONFIG=y
235CONFIG_UNORDERED_IO=y
236CONFIG_PCIEPORTBUS=y 239CONFIG_PCIEPORTBUS=y
237CONFIG_PCI_MSI=y 240CONFIG_PCI_MSI=y
238# CONFIG_PCI_LEGACY_PROC is not set
239# CONFIG_PCI_DEBUG is not set 241# CONFIG_PCI_DEBUG is not set
240 242
241# 243#
@@ -294,6 +296,7 @@ CONFIG_INET_TCP_DIAG=y
294CONFIG_TCP_CONG_BIC=y 296CONFIG_TCP_CONG_BIC=y
295CONFIG_IPV6=y 297CONFIG_IPV6=y
296# CONFIG_IPV6_PRIVACY is not set 298# CONFIG_IPV6_PRIVACY is not set
299# CONFIG_IPV6_ROUTER_PREF is not set
297# CONFIG_INET6_AH is not set 300# CONFIG_INET6_AH is not set
298# CONFIG_INET6_ESP is not set 301# CONFIG_INET6_ESP is not set
299# CONFIG_INET6_IPCOMP is not set 302# CONFIG_INET6_IPCOMP is not set
@@ -701,6 +704,7 @@ CONFIG_S2IO=m
701# Wireless LAN (non-hamradio) 704# Wireless LAN (non-hamradio)
702# 705#
703# CONFIG_NET_RADIO is not set 706# CONFIG_NET_RADIO is not set
707# CONFIG_NET_WIRELESS_RTNETLINK is not set
704 708
705# 709#
706# Wan interfaces 710# Wan interfaces
@@ -861,6 +865,8 @@ CONFIG_RTC=y
861CONFIG_AGP=y 865CONFIG_AGP=y
862CONFIG_AGP_AMD64=y 866CONFIG_AGP_AMD64=y
863CONFIG_AGP_INTEL=y 867CONFIG_AGP_INTEL=y
868# CONFIG_AGP_SIS is not set
869# CONFIG_AGP_VIA is not set
864# CONFIG_DRM is not set 870# CONFIG_DRM is not set
865# CONFIG_MWAVE is not set 871# CONFIG_MWAVE is not set
866CONFIG_RAW_DRIVER=y 872CONFIG_RAW_DRIVER=y
@@ -907,10 +913,6 @@ CONFIG_HWMON=y
907# CONFIG_IBM_ASM is not set 913# CONFIG_IBM_ASM is not set
908 914
909# 915#
910# Multimedia Capabilities Port drivers
911#
912
913#
914# Multimedia devices 916# Multimedia devices
915# 917#
916# CONFIG_VIDEO_DEV is not set 918# CONFIG_VIDEO_DEV is not set
@@ -974,6 +976,7 @@ CONFIG_SOUND_ICH=y
974# 976#
975CONFIG_USB_ARCH_HAS_HCD=y 977CONFIG_USB_ARCH_HAS_HCD=y
976CONFIG_USB_ARCH_HAS_OHCI=y 978CONFIG_USB_ARCH_HAS_OHCI=y
979CONFIG_USB_ARCH_HAS_EHCI=y
977CONFIG_USB=y 980CONFIG_USB=y
978# CONFIG_USB_DEBUG is not set 981# CONFIG_USB_DEBUG is not set
979 982
@@ -1002,7 +1005,6 @@ CONFIG_USB_UHCI_HCD=y
1002# 1005#
1003# USB Device Class drivers 1006# USB Device Class drivers
1004# 1007#
1005# CONFIG_OBSOLETE_OSS_USB_DRIVER is not set
1006# CONFIG_USB_ACM is not set 1008# CONFIG_USB_ACM is not set
1007CONFIG_USB_PRINTER=y 1009CONFIG_USB_PRINTER=y
1008 1010
@@ -1121,11 +1123,7 @@ CONFIG_USB_MON=y
1121# CONFIG_INFINIBAND is not set 1123# CONFIG_INFINIBAND is not set
1122 1124
1123# 1125#
1124# SN Devices 1126# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
1125#
1126
1127#
1128# EDAC - error detection and reporting (RAS)
1129# 1127#
1130# CONFIG_EDAC is not set 1128# CONFIG_EDAC is not set
1131 1129
@@ -1198,7 +1196,6 @@ CONFIG_TMPFS=y
1198CONFIG_HUGETLBFS=y 1196CONFIG_HUGETLBFS=y
1199CONFIG_HUGETLB_PAGE=y 1197CONFIG_HUGETLB_PAGE=y
1200CONFIG_RAMFS=y 1198CONFIG_RAMFS=y
1201CONFIG_RELAYFS_FS=y
1202# CONFIG_CONFIGFS_FS is not set 1199# CONFIG_CONFIGFS_FS is not set
1203 1200
1204# 1201#
@@ -1321,6 +1318,7 @@ CONFIG_DETECT_SOFTLOCKUP=y
1321CONFIG_DEBUG_FS=y 1318CONFIG_DEBUG_FS=y
1322# CONFIG_DEBUG_VM is not set 1319# CONFIG_DEBUG_VM is not set
1323# CONFIG_FRAME_POINTER is not set 1320# CONFIG_FRAME_POINTER is not set
1321# CONFIG_UNWIND_INFO is not set
1324# CONFIG_FORCED_INLINING is not set 1322# CONFIG_FORCED_INLINING is not set
1325# CONFIG_RCU_TORTURE_TEST is not set 1323# CONFIG_RCU_TORTURE_TEST is not set
1326# CONFIG_DEBUG_RODATA is not set 1324# CONFIG_DEBUG_RODATA is not set
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index 572b3b28772d..e776139afb20 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -58,7 +58,7 @@ struct elf_phdr;
58 58
59#define USE_ELF_CORE_DUMP 1 59#define USE_ELF_CORE_DUMP 1
60 60
61/* Overwrite elfcore.h */ 61/* Override elfcore.h */
62#define _LINUX_ELFCORE_H 1 62#define _LINUX_ELFCORE_H 1
63typedef unsigned int elf_greg_t; 63typedef unsigned int elf_greg_t;
64 64
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index 2bc55af95419..2b2d029f477c 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -430,24 +430,12 @@ put_tv32(struct compat_timeval __user *o, struct timeval *i)
430 return err; 430 return err;
431} 431}
432 432
433extern int do_setitimer(int which, struct itimerval *, struct itimerval *); 433extern unsigned int alarm_setitimer(unsigned int seconds);
434 434
435asmlinkage long 435asmlinkage long
436sys32_alarm(unsigned int seconds) 436sys32_alarm(unsigned int seconds)
437{ 437{
438 struct itimerval it_new, it_old; 438 return alarm_setitimer(seconds);
439 unsigned int oldalarm;
440
441 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
442 it_new.it_value.tv_sec = seconds;
443 it_new.it_value.tv_usec = 0;
444 do_setitimer(ITIMER_REAL, &it_new, &it_old);
445 oldalarm = it_old.it_value.tv_sec;
446 /* ehhh.. We can't return 0 if we have an alarm pending.. */
447 /* And we'd better return too much than too little anyway */
448 if (it_old.it_value.tv_usec)
449 oldalarm++;
450 return oldalarm;
451} 439}
452 440
453/* Translations due to time_t size differences. Which affects all 441/* Translations due to time_t size differences. Which affects all
diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c
index a0f955b9995f..fffd6b0a2fab 100644
--- a/arch/x86_64/kernel/aperture.c
+++ b/arch/x86_64/kernel/aperture.c
@@ -60,7 +60,7 @@ static u32 __init allocate_aperture(void)
60 printk("Cannot allocate aperture memory hole (%p,%uK)\n", 60 printk("Cannot allocate aperture memory hole (%p,%uK)\n",
61 p, aper_size>>10); 61 p, aper_size>>10);
62 if (p) 62 if (p)
63 free_bootmem_node(nd0, (unsigned long)p, aper_size); 63 free_bootmem_node(nd0, __pa(p), aper_size);
64 return 0; 64 return 0;
65 } 65 }
66 printk("Mapping aperture over %d KB of RAM @ %lx\n", 66 printk("Mapping aperture over %d KB of RAM @ %lx\n",
@@ -161,7 +161,7 @@ static __u32 __init search_agp_bridge(u32 *order, int *valid_agp)
161 int num, slot, func; 161 int num, slot, func;
162 162
163 /* Poor man's PCI discovery */ 163 /* Poor man's PCI discovery */
164 for (num = 0; num < 32; num++) { 164 for (num = 0; num < 256; num++) {
165 for (slot = 0; slot < 32; slot++) { 165 for (slot = 0; slot < 32; slot++) {
166 for (func = 0; func < 8; func++) { 166 for (func = 0; func < 8; func++) {
167 u32 class, cap; 167 u32 class, cap;
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index e5b14c57eaa0..d54620147e8e 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -342,6 +342,7 @@ void __init init_bsp_APIC(void)
342void __cpuinit setup_local_APIC (void) 342void __cpuinit setup_local_APIC (void)
343{ 343{
344 unsigned int value, maxlvt; 344 unsigned int value, maxlvt;
345 int i, j;
345 346
346 value = apic_read(APIC_LVR); 347 value = apic_read(APIC_LVR);
347 348
@@ -371,6 +372,25 @@ void __cpuinit setup_local_APIC (void)
371 apic_write(APIC_TASKPRI, value); 372 apic_write(APIC_TASKPRI, value);
372 373
373 /* 374 /*
375 * After a crash, we no longer service the interrupts and a pending
376 * interrupt from previous kernel might still have ISR bit set.
377 *
378 * Most probably by now CPU has serviced that pending interrupt and
379 * it might not have done the ack_APIC_irq() because it thought,
380 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
381 * does not clear the ISR bit and cpu thinks it has already serivced
382 * the interrupt. Hence a vector might get locked. It was noticed
383 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
384 */
385 for (i = APIC_ISR_NR - 1; i >= 0; i--) {
386 value = apic_read(APIC_ISR + i*0x10);
387 for (j = 31; j >= 0; j--) {
388 if (value & (1<<j))
389 ack_APIC_irq();
390 }
391 }
392
393 /*
374 * Now that we are all set up, enable the APIC 394 * Now that we are all set up, enable the APIC
375 */ 395 */
376 value = apic_read(APIC_SPIV); 396 value = apic_read(APIC_SPIV);
diff --git a/arch/x86_64/kernel/early_printk.c b/arch/x86_64/kernel/early_printk.c
index 6dffb498ccd7..13af920b6594 100644
--- a/arch/x86_64/kernel/early_printk.c
+++ b/arch/x86_64/kernel/early_printk.c
@@ -17,11 +17,8 @@
17#define VGABASE ((void __iomem *)0xffffffff800b8000UL) 17#define VGABASE ((void __iomem *)0xffffffff800b8000UL)
18#endif 18#endif
19 19
20#define MAX_YPOS max_ypos
21#define MAX_XPOS max_xpos
22
23static int max_ypos = 25, max_xpos = 80; 20static int max_ypos = 25, max_xpos = 80;
24static int current_ypos = 1, current_xpos = 0; 21static int current_ypos = 25, current_xpos = 0;
25 22
26static void early_vga_write(struct console *con, const char *str, unsigned n) 23static void early_vga_write(struct console *con, const char *str, unsigned n)
27{ 24{
@@ -29,26 +26,26 @@ static void early_vga_write(struct console *con, const char *str, unsigned n)
29 int i, k, j; 26 int i, k, j;
30 27
31 while ((c = *str++) != '\0' && n-- > 0) { 28 while ((c = *str++) != '\0' && n-- > 0) {
32 if (current_ypos >= MAX_YPOS) { 29 if (current_ypos >= max_ypos) {
33 /* scroll 1 line up */ 30 /* scroll 1 line up */
34 for (k = 1, j = 0; k < MAX_YPOS; k++, j++) { 31 for (k = 1, j = 0; k < max_ypos; k++, j++) {
35 for (i = 0; i < MAX_XPOS; i++) { 32 for (i = 0; i < max_xpos; i++) {
36 writew(readw(VGABASE + 2*(MAX_XPOS*k + i)), 33 writew(readw(VGABASE+2*(max_xpos*k+i)),
37 VGABASE + 2*(MAX_XPOS*j + i)); 34 VGABASE + 2*(max_xpos*j + i));
38 } 35 }
39 } 36 }
40 for (i = 0; i < MAX_XPOS; i++) 37 for (i = 0; i < max_xpos; i++)
41 writew(0x720, VGABASE + 2*(MAX_XPOS*j + i)); 38 writew(0x720, VGABASE + 2*(max_xpos*j + i));
42 current_ypos = MAX_YPOS-1; 39 current_ypos = max_ypos-1;
43 } 40 }
44 if (c == '\n') { 41 if (c == '\n') {
45 current_xpos = 0; 42 current_xpos = 0;
46 current_ypos++; 43 current_ypos++;
47 } else if (c != '\r') { 44 } else if (c != '\r') {
48 writew(((0x7 << 8) | (unsigned short) c), 45 writew(((0x7 << 8) | (unsigned short) c),
49 VGABASE + 2*(MAX_XPOS*current_ypos + 46 VGABASE + 2*(max_xpos*current_ypos +
50 current_xpos++)); 47 current_xpos++));
51 if (current_xpos >= MAX_XPOS) { 48 if (current_xpos >= max_xpos) {
52 current_xpos = 0; 49 current_xpos = 0;
53 current_ypos++; 50 current_ypos++;
54 } 51 }
@@ -63,7 +60,7 @@ static struct console early_vga_console = {
63 .index = -1, 60 .index = -1,
64}; 61};
65 62
66/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */ 63/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
67 64
68static int early_serial_base = 0x3f8; /* ttyS0 */ 65static int early_serial_base = 0x3f8; /* ttyS0 */
69 66
@@ -83,30 +80,30 @@ static int early_serial_base = 0x3f8; /* ttyS0 */
83#define DLL 0 /* Divisor Latch Low */ 80#define DLL 0 /* Divisor Latch Low */
84#define DLH 1 /* Divisor latch High */ 81#define DLH 1 /* Divisor latch High */
85 82
86static int early_serial_putc(unsigned char ch) 83static int early_serial_putc(unsigned char ch)
87{ 84{
88 unsigned timeout = 0xffff; 85 unsigned timeout = 0xffff;
89 while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout) 86 while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
90 cpu_relax(); 87 cpu_relax();
91 outb(ch, early_serial_base + TXR); 88 outb(ch, early_serial_base + TXR);
92 return timeout ? 0 : -1; 89 return timeout ? 0 : -1;
93} 90}
94 91
95static void early_serial_write(struct console *con, const char *s, unsigned n) 92static void early_serial_write(struct console *con, const char *s, unsigned n)
96{ 93{
97 while (*s && n-- > 0) { 94 while (*s && n-- > 0) {
98 early_serial_putc(*s); 95 early_serial_putc(*s);
99 if (*s == '\n') 96 if (*s == '\n')
100 early_serial_putc('\r'); 97 early_serial_putc('\r');
101 s++; 98 s++;
102 } 99 }
103} 100}
104 101
105#define DEFAULT_BAUD 9600 102#define DEFAULT_BAUD 9600
106 103
107static __init void early_serial_init(char *s) 104static __init void early_serial_init(char *s)
108{ 105{
109 unsigned char c; 106 unsigned char c;
110 unsigned divisor; 107 unsigned divisor;
111 unsigned baud = DEFAULT_BAUD; 108 unsigned baud = DEFAULT_BAUD;
112 char *e; 109 char *e;
@@ -115,7 +112,7 @@ static __init void early_serial_init(char *s)
115 ++s; 112 ++s;
116 113
117 if (*s) { 114 if (*s) {
118 unsigned port; 115 unsigned port;
119 if (!strncmp(s,"0x",2)) { 116 if (!strncmp(s,"0x",2)) {
120 early_serial_base = simple_strtoul(s, &e, 16); 117 early_serial_base = simple_strtoul(s, &e, 16);
121 } else { 118 } else {
@@ -139,16 +136,16 @@ static __init void early_serial_init(char *s)
139 outb(0x3, early_serial_base + MCR); /* DTR + RTS */ 136 outb(0x3, early_serial_base + MCR); /* DTR + RTS */
140 137
141 if (*s) { 138 if (*s) {
142 baud = simple_strtoul(s, &e, 0); 139 baud = simple_strtoul(s, &e, 0);
143 if (baud == 0 || s == e) 140 if (baud == 0 || s == e)
144 baud = DEFAULT_BAUD; 141 baud = DEFAULT_BAUD;
145 } 142 }
146 143
147 divisor = 115200 / baud; 144 divisor = 115200 / baud;
148 c = inb(early_serial_base + LCR); 145 c = inb(early_serial_base + LCR);
149 outb(c | DLAB, early_serial_base + LCR); 146 outb(c | DLAB, early_serial_base + LCR);
150 outb(divisor & 0xff, early_serial_base + DLL); 147 outb(divisor & 0xff, early_serial_base + DLL);
151 outb((divisor >> 8) & 0xff, early_serial_base + DLH); 148 outb((divisor >> 8) & 0xff, early_serial_base + DLH);
152 outb(c & ~DLAB, early_serial_base + LCR); 149 outb(c & ~DLAB, early_serial_base + LCR);
153} 150}
154 151
@@ -205,67 +202,68 @@ struct console *early_console = &early_vga_console;
205static int early_console_initialized = 0; 202static int early_console_initialized = 0;
206 203
207void early_printk(const char *fmt, ...) 204void early_printk(const char *fmt, ...)
208{ 205{
209 char buf[512]; 206 char buf[512];
210 int n; 207 int n;
211 va_list ap; 208 va_list ap;
212 209
213 va_start(ap,fmt); 210 va_start(ap,fmt);
214 n = vscnprintf(buf,512,fmt,ap); 211 n = vscnprintf(buf,512,fmt,ap);
215 early_console->write(early_console,buf,n); 212 early_console->write(early_console,buf,n);
216 va_end(ap); 213 va_end(ap);
217} 214}
218 215
219static int __initdata keep_early; 216static int __initdata keep_early;
220 217
221int __init setup_early_printk(char *opt) 218int __init setup_early_printk(char *opt)
222{ 219{
223 char *space; 220 char *space;
224 char buf[256]; 221 char buf[256];
225 222
226 if (early_console_initialized) 223 if (early_console_initialized)
227 return -1; 224 return -1;
228 225
229 strlcpy(buf,opt,sizeof(buf)); 226 strlcpy(buf,opt,sizeof(buf));
230 space = strchr(buf, ' '); 227 space = strchr(buf, ' ');
231 if (space) 228 if (space)
232 *space = 0; 229 *space = 0;
233 230
234 if (strstr(buf,"keep")) 231 if (strstr(buf,"keep"))
235 keep_early = 1; 232 keep_early = 1;
236 233
237 if (!strncmp(buf, "serial", 6)) { 234 if (!strncmp(buf, "serial", 6)) {
238 early_serial_init(buf + 6); 235 early_serial_init(buf + 6);
239 early_console = &early_serial_console; 236 early_console = &early_serial_console;
240 } else if (!strncmp(buf, "ttyS", 4)) { 237 } else if (!strncmp(buf, "ttyS", 4)) {
241 early_serial_init(buf); 238 early_serial_init(buf);
242 early_console = &early_serial_console; 239 early_console = &early_serial_console;
243 } else if (!strncmp(buf, "vga", 3) 240 } else if (!strncmp(buf, "vga", 3)
244 && SCREEN_INFO.orig_video_isVGA == 1) { 241 && SCREEN_INFO.orig_video_isVGA == 1) {
245 max_xpos = SCREEN_INFO.orig_video_cols; 242 max_xpos = SCREEN_INFO.orig_video_cols;
246 max_ypos = SCREEN_INFO.orig_video_lines; 243 max_ypos = SCREEN_INFO.orig_video_lines;
247 early_console = &early_vga_console; 244 current_ypos = SCREEN_INFO.orig_y;
245 early_console = &early_vga_console;
248 } else if (!strncmp(buf, "simnow", 6)) { 246 } else if (!strncmp(buf, "simnow", 6)) {
249 simnow_init(buf + 6); 247 simnow_init(buf + 6);
250 early_console = &simnow_console; 248 early_console = &simnow_console;
251 keep_early = 1; 249 keep_early = 1;
252 } 250 }
253 early_console_initialized = 1; 251 early_console_initialized = 1;
254 register_console(early_console); 252 register_console(early_console);
255 return 0; 253 return 0;
256} 254}
257 255
258void __init disable_early_printk(void) 256void __init disable_early_printk(void)
259{ 257{
260 if (!early_console_initialized || !early_console) 258 if (!early_console_initialized || !early_console)
261 return; 259 return;
262 if (!keep_early) { 260 if (!keep_early) {
263 printk("disabling early console\n"); 261 printk("disabling early console\n");
264 unregister_console(early_console); 262 unregister_console(early_console);
265 early_console_initialized = 0; 263 early_console_initialized = 0;
266 } else { 264 } else {
267 printk("keeping early console\n"); 265 printk("keeping early console\n");
268 } 266 }
269} 267}
270 268
271__setup("earlyprintk=", setup_early_printk); 269__setup("earlyprintk=", setup_early_printk);
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 7c10e9009d61..8538bfea30e6 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -553,7 +553,7 @@ iret_label:
553 /* force a signal here? this matches i386 behaviour */ 553 /* force a signal here? this matches i386 behaviour */
554 /* running with kernel gs */ 554 /* running with kernel gs */
555bad_iret: 555bad_iret:
556 movq $-9999,%rdi /* better code? */ 556 movq $11,%rdi /* SIGSEGV */
557 sti 557 sti
558 jmp do_exit 558 jmp do_exit
559 .previous 559 .previous
diff --git a/arch/x86_64/kernel/functionlist b/arch/x86_64/kernel/functionlist
new file mode 100644
index 000000000000..2bcebdc3eedb
--- /dev/null
+++ b/arch/x86_64/kernel/functionlist
@@ -0,0 +1,1286 @@
1*(.text.flush_thread)
2*(.text.check_poison_obj)
3*(.text.copy_page)
4*(.text.__set_personality)
5*(.text.gart_map_sg)
6*(.text.kmem_cache_free)
7*(.text.find_get_page)
8*(.text._raw_spin_lock)
9*(.text.ide_outb)
10*(.text.unmap_vmas)
11*(.text.copy_page_range)
12*(.text.kprobe_handler)
13*(.text.__handle_mm_fault)
14*(.text.__d_lookup)
15*(.text.copy_user_generic)
16*(.text.__link_path_walk)
17*(.text.get_page_from_freelist)
18*(.text.kmem_cache_alloc)
19*(.text.drive_cmd_intr)
20*(.text.ia32_setup_sigcontext)
21*(.text.huge_pte_offset)
22*(.text.do_page_fault)
23*(.text.page_remove_rmap)
24*(.text.release_pages)
25*(.text.ide_end_request)
26*(.text.__mutex_lock_slowpath)
27*(.text.__find_get_block)
28*(.text.kfree)
29*(.text.vfs_read)
30*(.text._raw_spin_unlock)
31*(.text.free_hot_cold_page)
32*(.text.fget_light)
33*(.text.schedule)
34*(.text.memcmp)
35*(.text.touch_atime)
36*(.text.__might_sleep)
37*(.text.__down_read_trylock)
38*(.text.arch_pick_mmap_layout)
39*(.text.find_vma)
40*(.text.__make_request)
41*(.text.do_generic_mapping_read)
42*(.text.mutex_lock_interruptible)
43*(.text.__generic_file_aio_read)
44*(.text._atomic_dec_and_lock)
45*(.text.__wake_up_bit)
46*(.text.add_to_page_cache)
47*(.text.cache_alloc_debugcheck_after)
48*(.text.vm_normal_page)
49*(.text.mutex_debug_check_no_locks_freed)
50*(.text.net_rx_action)
51*(.text.__find_first_zero_bit)
52*(.text.put_page)
53*(.text._raw_read_lock)
54*(.text.__delay)
55*(.text.dnotify_parent)
56*(.text.do_path_lookup)
57*(.text.do_sync_read)
58*(.text.do_lookup)
59*(.text.bit_waitqueue)
60*(.text.file_read_actor)
61*(.text.strncpy_from_user)
62*(.text.__pagevec_lru_add_active)
63*(.text.fget)
64*(.text.dput)
65*(.text.__strnlen_user)
66*(.text.inotify_inode_queue_event)
67*(.text.rw_verify_area)
68*(.text.ide_intr)
69*(.text.inotify_dentry_parent_queue_event)
70*(.text.permission)
71*(.text.memscan)
72*(.text.hpet_rtc_interrupt)
73*(.text.do_mmap_pgoff)
74*(.text.current_fs_time)
75*(.text.vfs_getattr)
76*(.text.kmem_flagcheck)
77*(.text.mark_page_accessed)
78*(.text.free_pages_and_swap_cache)
79*(.text.generic_fillattr)
80*(.text.__block_prepare_write)
81*(.text.__set_page_dirty_nobuffers)
82*(.text.link_path_walk)
83*(.text.find_get_pages_tag)
84*(.text.ide_do_request)
85*(.text.__alloc_pages)
86*(.text.generic_permission)
87*(.text.mod_page_state_offset)
88*(.text.free_pgd_range)
89*(.text.generic_file_buffered_write)
90*(.text.number)
91*(.text.ide_do_rw_disk)
92*(.text.__brelse)
93*(.text.__mod_page_state_offset)
94*(.text.rotate_reclaimable_page)
95*(.text.find_vma_prepare)
96*(.text.find_vma_prev)
97*(.text.lru_cache_add_active)
98*(.text.__kmalloc_track_caller)
99*(.text.smp_invalidate_interrupt)
100*(.text.handle_IRQ_event)
101*(.text.__find_get_block_slow)
102*(.text.do_wp_page)
103*(.text.do_select)
104*(.text.set_user_nice)
105*(.text.sys_read)
106*(.text.do_munmap)
107*(.text.csum_partial)
108*(.text.__do_softirq)
109*(.text.may_open)
110*(.text.getname)
111*(.text.get_empty_filp)
112*(.text.__fput)
113*(.text.remove_mapping)
114*(.text.filp_ctor)
115*(.text.poison_obj)
116*(.text.unmap_region)
117*(.text.test_set_page_writeback)
118*(.text.__do_page_cache_readahead)
119*(.text.sock_def_readable)
120*(.text.ide_outl)
121*(.text.shrink_zone)
122*(.text.rb_insert_color)
123*(.text.get_request)
124*(.text.sys_pread64)
125*(.text.spin_bug)
126*(.text.ide_outsl)
127*(.text.mask_and_ack_8259A)
128*(.text.filemap_nopage)
129*(.text.page_add_file_rmap)
130*(.text.find_lock_page)
131*(.text.tcp_poll)
132*(.text.__mark_inode_dirty)
133*(.text.file_ra_state_init)
134*(.text.generic_file_llseek)
135*(.text.__pagevec_lru_add)
136*(.text.page_cache_readahead)
137*(.text.n_tty_receive_buf)
138*(.text.zonelist_policy)
139*(.text.vma_adjust)
140*(.text.test_clear_page_dirty)
141*(.text.sync_buffer)
142*(.text.do_exit)
143*(.text.__bitmap_weight)
144*(.text.alloc_pages_current)
145*(.text.get_unused_fd)
146*(.text.zone_watermark_ok)
147*(.text.cpuset_update_task_memory_state)
148*(.text.__bitmap_empty)
149*(.text.sys_munmap)
150*(.text.__inode_dir_notify)
151*(.text.__generic_file_aio_write_nolock)
152*(.text.__pte_alloc)
153*(.text.sys_select)
154*(.text.vm_acct_memory)
155*(.text.vfs_write)
156*(.text.__lru_add_drain)
157*(.text.prio_tree_insert)
158*(.text.generic_file_aio_read)
159*(.text.vma_merge)
160*(.text.block_write_full_page)
161*(.text.__page_set_anon_rmap)
162*(.text.apic_timer_interrupt)
163*(.text.release_console_sem)
164*(.text.sys_write)
165*(.text.sys_brk)
166*(.text.dup_mm)
167*(.text.read_current_timer)
168*(.text.ll_rw_block)
169*(.text.blk_rq_map_sg)
170*(.text.dbg_userword)
171*(.text.__block_commit_write)
172*(.text.cache_grow)
173*(.text.copy_strings)
174*(.text.release_task)
175*(.text.do_sync_write)
176*(.text.unlock_page)
177*(.text.load_elf_binary)
178*(.text.__follow_mount)
179*(.text.__getblk)
180*(.text.do_sys_open)
181*(.text.current_kernel_time)
182*(.text.call_rcu)
183*(.text.write_chan)
184*(.text.vsnprintf)
185*(.text.dummy_inode_setsecurity)
186*(.text.submit_bh)
187*(.text.poll_freewait)
188*(.text.bio_alloc_bioset)
189*(.text.skb_clone)
190*(.text.page_waitqueue)
191*(.text.__mutex_lock_interruptible_slowpath)
192*(.text.get_index)
193*(.text.csum_partial_copy_generic)
194*(.text.bad_range)
195*(.text.remove_vma)
196*(.text.cp_new_stat)
197*(.text.alloc_arraycache)
198*(.text.test_clear_page_writeback)
199*(.text.strsep)
200*(.text.open_namei)
201*(.text._raw_read_unlock)
202*(.text.get_vma_policy)
203*(.text.__down_write_trylock)
204*(.text.find_get_pages)
205*(.text.tcp_rcv_established)
206*(.text.generic_make_request)
207*(.text.__block_write_full_page)
208*(.text.cfq_set_request)
209*(.text.sys_inotify_init)
210*(.text.split_vma)
211*(.text.__mod_timer)
212*(.text.get_options)
213*(.text.vma_link)
214*(.text.mpage_writepages)
215*(.text.truncate_complete_page)
216*(.text.tcp_recvmsg)
217*(.text.sigprocmask)
218*(.text.filemap_populate)
219*(.text.sys_close)
220*(.text.inotify_dev_queue_event)
221*(.text.do_task_stat)
222*(.text.__dentry_open)
223*(.text.unlink_file_vma)
224*(.text.__pollwait)
225*(.text.packet_rcv_spkt)
226*(.text.drop_buffers)
227*(.text.free_pgtables)
228*(.text.generic_file_direct_write)
229*(.text.copy_process)
230*(.text.netif_receive_skb)
231*(.text.dnotify_flush)
232*(.text.print_bad_pte)
233*(.text.anon_vma_unlink)
234*(.text.sys_mprotect)
235*(.text.sync_sb_inodes)
236*(.text.find_inode_fast)
237*(.text.dummy_inode_readlink)
238*(.text.putname)
239*(.text.init_smp_flush)
240*(.text.dbg_redzone2)
241*(.text.sk_run_filter)
242*(.text.may_expand_vm)
243*(.text.generic_file_aio_write)
244*(.text.find_next_zero_bit)
245*(.text.file_kill)
246*(.text.audit_getname)
247*(.text.arch_unmap_area_topdown)
248*(.text.alloc_page_vma)
249*(.text.tcp_transmit_skb)
250*(.text.rb_next)
251*(.text.dbg_redzone1)
252*(.text.generic_file_mmap)
253*(.text.vfs_fstat)
254*(.text.sys_time)
255*(.text.page_lock_anon_vma)
256*(.text.get_unmapped_area)
257*(.text.remote_llseek)
258*(.text.__up_read)
259*(.text.fd_install)
260*(.text.eventpoll_init_file)
261*(.text.dma_alloc_coherent)
262*(.text.create_empty_buffers)
263*(.text.__mutex_unlock_slowpath)
264*(.text.dup_fd)
265*(.text.d_alloc)
266*(.text.tty_ldisc_try)
267*(.text.sys_stime)
268*(.text.__rb_rotate_right)
269*(.text.d_validate)
270*(.text.rb_erase)
271*(.text.path_release)
272*(.text.memmove)
273*(.text.invalidate_complete_page)
274*(.text.clear_inode)
275*(.text.cache_estimate)
276*(.text.alloc_buffer_head)
277*(.text.smp_call_function_interrupt)
278*(.text.flush_tlb_others)
279*(.text.file_move)
280*(.text.balance_dirty_pages_ratelimited)
281*(.text.vma_prio_tree_add)
282*(.text.timespec_trunc)
283*(.text.mempool_alloc)
284*(.text.iget_locked)
285*(.text.d_alloc_root)
286*(.text.cpuset_populate_dir)
287*(.text.anon_vma_prepare)
288*(.text.sys_newstat)
289*(.text.alloc_page_interleave)
290*(.text.__path_lookup_intent_open)
291*(.text.__pagevec_free)
292*(.text.inode_init_once)
293*(.text.free_vfsmnt)
294*(.text.__user_walk_fd)
295*(.text.cfq_idle_slice_timer)
296*(.text.sys_mmap)
297*(.text.sys_llseek)
298*(.text.prio_tree_remove)
299*(.text.filp_close)
300*(.text.file_permission)
301*(.text.vma_prio_tree_remove)
302*(.text.tcp_ack)
303*(.text.nameidata_to_filp)
304*(.text.sys_lseek)
305*(.text.percpu_counter_mod)
306*(.text.igrab)
307*(.text.__bread)
308*(.text.alloc_inode)
309*(.text.filldir)
310*(.text.__rb_rotate_left)
311*(.text.irq_affinity_write_proc)
312*(.text.init_request_from_bio)
313*(.text.find_or_create_page)
314*(.text.tty_poll)
315*(.text.tcp_sendmsg)
316*(.text.ide_wait_stat)
317*(.text.free_buffer_head)
318*(.text.flush_signal_handlers)
319*(.text.tcp_v4_rcv)
320*(.text.nr_blockdev_pages)
321*(.text.locks_remove_flock)
322*(.text.__iowrite32_copy)
323*(.text.do_filp_open)
324*(.text.try_to_release_page)
325*(.text.page_add_new_anon_rmap)
326*(.text.kmem_cache_size)
327*(.text.eth_type_trans)
328*(.text.try_to_free_buffers)
329*(.text.schedule_tail)
330*(.text.proc_lookup)
331*(.text.no_llseek)
332*(.text.kfree_skbmem)
333*(.text.do_wait)
334*(.text.do_mpage_readpage)
335*(.text.vfs_stat_fd)
336*(.text.tty_write)
337*(.text.705)
338*(.text.sync_page)
339*(.text.__remove_shared_vm_struct)
340*(.text.__kfree_skb)
341*(.text.sock_poll)
342*(.text.get_request_wait)
343*(.text.do_sigaction)
344*(.text.do_brk)
345*(.text.tcp_event_data_recv)
346*(.text.read_chan)
347*(.text.pipe_writev)
348*(.text.__emul_lookup_dentry)
349*(.text.rtc_get_rtc_time)
350*(.text.print_objinfo)
351*(.text.file_update_time)
352*(.text.do_signal)
353*(.text.disable_8259A_irq)
354*(.text.blk_queue_bounce)
355*(.text.__anon_vma_link)
356*(.text.__vma_link)
357*(.text.vfs_rename)
358*(.text.sys_newlstat)
359*(.text.sys_newfstat)
360*(.text.sys_mknod)
361*(.text.__show_regs)
362*(.text.iput)
363*(.text.get_signal_to_deliver)
364*(.text.flush_tlb_page)
365*(.text.debug_mutex_wake_waiter)
366*(.text.copy_thread)
367*(.text.clear_page_dirty_for_io)
368*(.text.buffer_io_error)
369*(.text.vfs_permission)
370*(.text.truncate_inode_pages_range)
371*(.text.sys_recvfrom)
372*(.text.remove_suid)
373*(.text.mark_buffer_dirty)
374*(.text.local_bh_enable)
375*(.text.get_zeroed_page)
376*(.text.get_vmalloc_info)
377*(.text.flush_old_exec)
378*(.text.dummy_inode_permission)
379*(.text.__bio_add_page)
380*(.text.prio_tree_replace)
381*(.text.notify_change)
382*(.text.mntput_no_expire)
383*(.text.fput)
384*(.text.__end_that_request_first)
385*(.text.wake_up_bit)
386*(.text.unuse_mm)
387*(.text.skb_release_data)
388*(.text.shrink_icache_memory)
389*(.text.sched_balance_self)
390*(.text.__pmd_alloc)
391*(.text.pipe_poll)
392*(.text.normal_poll)
393*(.text.__free_pages)
394*(.text.follow_mount)
395*(.text.cdrom_start_packet_command)
396*(.text.blk_recount_segments)
397*(.text.bio_put)
398*(.text.__alloc_skb)
399*(.text.__wake_up)
400*(.text.vm_stat_account)
401*(.text.sys_fcntl)
402*(.text.sys_fadvise64)
403*(.text._raw_write_unlock)
404*(.text.__pud_alloc)
405*(.text.alloc_page_buffers)
406*(.text.vfs_llseek)
407*(.text.sockfd_lookup)
408*(.text._raw_write_lock)
409*(.text.put_compound_page)
410*(.text.prune_dcache)
411*(.text.pipe_readv)
412*(.text.mempool_free)
413*(.text.make_ahead_window)
414*(.text.lru_add_drain)
415*(.text.constant_test_bit)
416*(.text.__clear_user)
417*(.text.arch_unmap_area)
418*(.text.anon_vma_link)
419*(.text.sys_chroot)
420*(.text.setup_arg_pages)
421*(.text.radix_tree_preload)
422*(.text.init_rwsem)
423*(.text.generic_osync_inode)
424*(.text.generic_delete_inode)
425*(.text.do_sys_poll)
426*(.text.dev_queue_xmit)
427*(.text.default_llseek)
428*(.text.__writeback_single_inode)
429*(.text.vfs_ioctl)
430*(.text.__up_write)
431*(.text.unix_poll)
432*(.text.sys_rt_sigprocmask)
433*(.text.sock_recvmsg)
434*(.text.recalc_bh_state)
435*(.text.__put_unused_fd)
436*(.text.process_backlog)
437*(.text.locks_remove_posix)
438*(.text.lease_modify)
439*(.text.expand_files)
440*(.text.end_buffer_read_nobh)
441*(.text.d_splice_alias)
442*(.text.debug_mutex_init_waiter)
443*(.text.copy_from_user)
444*(.text.cap_vm_enough_memory)
445*(.text.show_vfsmnt)
446*(.text.release_sock)
447*(.text.pfifo_fast_enqueue)
448*(.text.half_md4_transform)
449*(.text.fs_may_remount_ro)
450*(.text.do_fork)
451*(.text.copy_hugetlb_page_range)
452*(.text.cache_free_debugcheck)
453*(.text.__tcp_select_window)
454*(.text.task_handoff_register)
455*(.text.sys_open)
456*(.text.strlcpy)
457*(.text.skb_copy_datagram_iovec)
458*(.text.set_up_list3s)
459*(.text.release_open_intent)
460*(.text.qdisc_restart)
461*(.text.n_tty_chars_in_buffer)
462*(.text.inode_change_ok)
463*(.text.__downgrade_write)
464*(.text.debug_mutex_unlock)
465*(.text.add_timer_randomness)
466*(.text.sock_common_recvmsg)
467*(.text.set_bh_page)
468*(.text.printk_lock)
469*(.text.path_release_on_umount)
470*(.text.ip_output)
471*(.text.ide_build_dmatable)
472*(.text.__get_user_8)
473*(.text.end_buffer_read_sync)
474*(.text.__d_path)
475*(.text.d_move)
476*(.text.del_timer)
477*(.text.constant_test_bit)
478*(.text.blockable_page_cache_readahead)
479*(.text.tty_read)
480*(.text.sys_readlink)
481*(.text.sys_faccessat)
482*(.text.read_swap_cache_async)
483*(.text.pty_write_room)
484*(.text.page_address_in_vma)
485*(.text.kthread)
486*(.text.cfq_exit_io_context)
487*(.text.__tcp_push_pending_frames)
488*(.text.sys_pipe)
489*(.text.submit_bio)
490*(.text.pid_revalidate)
491*(.text.page_referenced_file)
492*(.text.lock_sock)
493*(.text.get_page_state_node)
494*(.text.generic_block_bmap)
495*(.text.do_setitimer)
496*(.text.dev_queue_xmit_nit)
497*(.text.copy_from_read_buf)
498*(.text.__const_udelay)
499*(.text.console_conditional_schedule)
500*(.text.wake_up_new_task)
501*(.text.wait_for_completion_interruptible)
502*(.text.tcp_rcv_rtt_update)
503*(.text.sys_mlockall)
504*(.text.set_fs_altroot)
505*(.text.schedule_timeout)
506*(.text.nr_free_pagecache_pages)
507*(.text.nf_iterate)
508*(.text.mapping_tagged)
509*(.text.ip_queue_xmit)
510*(.text.ip_local_deliver)
511*(.text.follow_page)
512*(.text.elf_map)
513*(.text.dummy_file_permission)
514*(.text.dispose_list)
515*(.text.dentry_open)
516*(.text.dentry_iput)
517*(.text.bio_alloc)
518*(.text.alloc_skb_from_cache)
519*(.text.wait_on_page_bit)
520*(.text.vfs_readdir)
521*(.text.vfs_lstat)
522*(.text.seq_escape)
523*(.text.__posix_lock_file)
524*(.text.mm_release)
525*(.text.kref_put)
526*(.text.ip_rcv)
527*(.text.__iget)
528*(.text.free_pages)
529*(.text.find_mergeable_anon_vma)
530*(.text.find_extend_vma)
531*(.text.dummy_inode_listsecurity)
532*(.text.bio_add_page)
533*(.text.__vm_enough_memory)
534*(.text.vfs_stat)
535*(.text.tty_paranoia_check)
536*(.text.tcp_read_sock)
537*(.text.tcp_data_queue)
538*(.text.sys_uname)
539*(.text.sys_renameat)
540*(.text.__strncpy_from_user)
541*(.text.__mutex_init)
542*(.text.__lookup_hash)
543*(.text.kref_get)
544*(.text.ip_route_input)
545*(.text.__insert_inode_hash)
546*(.text.do_sock_write)
547*(.text.blk_done_softirq)
548*(.text.__wake_up_sync)
549*(.text.__vma_link_rb)
550*(.text.tty_ioctl)
551*(.text.tracesys)
552*(.text.sys_getdents)
553*(.text.sys_dup)
554*(.text.stub_execve)
555*(.text.sha_transform)
556*(.text.radix_tree_tag_clear)
557*(.text.put_unused_fd)
558*(.text.put_files_struct)
559*(.text.mpage_readpages)
560*(.text.may_delete)
561*(.text.kmem_cache_create)
562*(.text.ip_mc_output)
563*(.text.interleave_nodes)
564*(.text.groups_search)
565*(.text.generic_drop_inode)
566*(.text.generic_commit_write)
567*(.text.fcntl_setlk)
568*(.text.exit_mmap)
569*(.text.end_page_writeback)
570*(.text.__d_rehash)
571*(.text.debug_mutex_free_waiter)
572*(.text.csum_ipv6_magic)
573*(.text.count)
574*(.text.cleanup_rbuf)
575*(.text.check_spinlock_acquired_node)
576*(.text.can_vma_merge_after)
577*(.text.bio_endio)
578*(.text.alloc_pidmap)
579*(.text.write_ldt)
580*(.text.vmtruncate_range)
581*(.text.vfs_create)
582*(.text.__user_walk)
583*(.text.update_send_head)
584*(.text.unmap_underlying_metadata)
585*(.text.tty_ldisc_deref)
586*(.text.tcp_setsockopt)
587*(.text.tcp_send_ack)
588*(.text.sys_pause)
589*(.text.sys_gettimeofday)
590*(.text.sync_dirty_buffer)
591*(.text.strncmp)
592*(.text.release_posix_timer)
593*(.text.proc_file_read)
594*(.text.prepare_to_wait)
595*(.text.locks_mandatory_locked)
596*(.text.interruptible_sleep_on_timeout)
597*(.text.inode_sub_bytes)
598*(.text.in_group_p)
599*(.text.hrtimer_try_to_cancel)
600*(.text.filldir64)
601*(.text.fasync_helper)
602*(.text.dummy_sb_pivotroot)
603*(.text.d_lookup)
604*(.text.d_instantiate)
605*(.text.__d_find_alias)
606*(.text.cpu_idle_wait)
607*(.text.cond_resched_lock)
608*(.text.chown_common)
609*(.text.blk_congestion_wait)
610*(.text.activate_page)
611*(.text.unlock_buffer)
612*(.text.tty_wakeup)
613*(.text.tcp_v4_do_rcv)
614*(.text.tcp_current_mss)
615*(.text.sys_openat)
616*(.text.sys_fchdir)
617*(.text.strnlen_user)
618*(.text.strnlen)
619*(.text.strchr)
620*(.text.sock_common_getsockopt)
621*(.text.skb_checksum)
622*(.text.remove_wait_queue)
623*(.text.rb_replace_node)
624*(.text.radix_tree_node_ctor)
625*(.text.pty_chars_in_buffer)
626*(.text.profile_hit)
627*(.text.prio_tree_left)
628*(.text.pgd_clear_bad)
629*(.text.pfifo_fast_dequeue)
630*(.text.page_referenced)
631*(.text.open_exec)
632*(.text.mmput)
633*(.text.mm_init)
634*(.text.__ide_dma_off_quietly)
635*(.text.ide_dma_intr)
636*(.text.hrtimer_start)
637*(.text.get_io_context)
638*(.text.__get_free_pages)
639*(.text.find_first_zero_bit)
640*(.text.file_free_rcu)
641*(.text.dummy_socket_sendmsg)
642*(.text.do_unlinkat)
643*(.text.do_arch_prctl)
644*(.text.destroy_inode)
645*(.text.can_vma_merge_before)
646*(.text.block_sync_page)
647*(.text.block_prepare_write)
648*(.text.bio_init)
649*(.text.arch_ptrace)
650*(.text.wake_up_inode)
651*(.text.wait_on_retry_sync_kiocb)
652*(.text.vma_prio_tree_next)
653*(.text.tcp_rcv_space_adjust)
654*(.text.__tcp_ack_snd_check)
655*(.text.sys_utime)
656*(.text.sys_recvmsg)
657*(.text.sys_mremap)
658*(.text.sys_bdflush)
659*(.text.sleep_on)
660*(.text.set_page_dirty_lock)
661*(.text.seq_path)
662*(.text.schedule_timeout_interruptible)
663*(.text.sched_fork)
664*(.text.rt_run_flush)
665*(.text.profile_munmap)
666*(.text.prepare_binprm)
667*(.text.__pagevec_release_nonlru)
668*(.text.m_show)
669*(.text.lookup_mnt)
670*(.text.__lookup_mnt)
671*(.text.lock_timer_base)
672*(.text.is_subdir)
673*(.text.invalidate_bh_lru)
674*(.text.init_buffer_head)
675*(.text.ifind_fast)
676*(.text.ide_dma_start)
677*(.text.__get_page_state)
678*(.text.flock_to_posix_lock)
679*(.text.__find_symbol)
680*(.text.do_futex)
681*(.text.do_execve)
682*(.text.dirty_writeback_centisecs_handler)
683*(.text.dev_watchdog)
684*(.text.can_share_swap_page)
685*(.text.blkdev_put)
686*(.text.bio_get_nr_vecs)
687*(.text.xfrm_compile_policy)
688*(.text.vma_prio_tree_insert)
689*(.text.vfs_lstat_fd)
690*(.text.__user_path_lookup_open)
691*(.text.thread_return)
692*(.text.tcp_send_delayed_ack)
693*(.text.sock_def_error_report)
694*(.text.shrink_slab)
695*(.text.serial_out)
696*(.text.seq_read)
697*(.text.secure_ip_id)
698*(.text.search_binary_handler)
699*(.text.proc_pid_unhash)
700*(.text.pagevec_lookup)
701*(.text.new_inode)
702*(.text.memcpy_toiovec)
703*(.text.locks_free_lock)
704*(.text.__lock_page)
705*(.text.__lock_buffer)
706*(.text.load_module)
707*(.text.is_bad_inode)
708*(.text.invalidate_inode_buffers)
709*(.text.insert_vm_struct)
710*(.text.inode_setattr)
711*(.text.inode_add_bytes)
712*(.text.ide_read_24)
713*(.text.ide_get_error_location)
714*(.text.ide_do_drive_cmd)
715*(.text.get_locked_pte)
716*(.text.get_filesystem_list)
717*(.text.generic_file_open)
718*(.text.follow_down)
719*(.text.find_next_bit)
720*(.text.__find_first_bit)
721*(.text.exit_mm)
722*(.text.exec_keys)
723*(.text.end_buffer_write_sync)
724*(.text.end_bio_bh_io_sync)
725*(.text.dummy_socket_shutdown)
726*(.text.d_rehash)
727*(.text.d_path)
728*(.text.do_ioctl)
729*(.text.dget_locked)
730*(.text.copy_thread_group_keys)
731*(.text.cdrom_end_request)
732*(.text.cap_bprm_apply_creds)
733*(.text.blk_rq_bio_prep)
734*(.text.__bitmap_intersects)
735*(.text.bio_phys_segments)
736*(.text.bio_free)
737*(.text.arch_get_unmapped_area_topdown)
738*(.text.writeback_in_progress)
739*(.text.vfs_follow_link)
740*(.text.tcp_rcv_state_process)
741*(.text.tcp_check_space)
742*(.text.sys_stat)
743*(.text.sys_rt_sigreturn)
744*(.text.sys_rt_sigaction)
745*(.text.sys_remap_file_pages)
746*(.text.sys_pwrite64)
747*(.text.sys_fchownat)
748*(.text.sys_fchmodat)
749*(.text.strncat)
750*(.text.strlcat)
751*(.text.strcmp)
752*(.text.steal_locks)
753*(.text.sock_create)
754*(.text.sk_stream_rfree)
755*(.text.sk_stream_mem_schedule)
756*(.text.skip_atoi)
757*(.text.sk_alloc)
758*(.text.show_stat)
759*(.text.set_fs_pwd)
760*(.text.set_binfmt)
761*(.text.pty_unthrottle)
762*(.text.proc_symlink)
763*(.text.pipe_release)
764*(.text.pageout)
765*(.text.n_tty_write_wakeup)
766*(.text.n_tty_ioctl)
767*(.text.nr_free_zone_pages)
768*(.text.migration_thread)
769*(.text.mempool_free_slab)
770*(.text.meminfo_read_proc)
771*(.text.max_sane_readahead)
772*(.text.lru_cache_add)
773*(.text.kill_fasync)
774*(.text.kernel_read)
775*(.text.invalidate_mapping_pages)
776*(.text.inode_has_buffers)
777*(.text.init_once)
778*(.text.inet_sendmsg)
779*(.text.idedisk_issue_flush)
780*(.text.generic_file_write)
781*(.text.free_more_memory)
782*(.text.__free_fdtable)
783*(.text.filp_dtor)
784*(.text.exit_sem)
785*(.text.exit_itimers)
786*(.text.error_interrupt)
787*(.text.end_buffer_async_write)
788*(.text.eligible_child)
789*(.text.elf_map)
790*(.text.dump_task_regs)
791*(.text.dummy_task_setscheduler)
792*(.text.dummy_socket_accept)
793*(.text.dummy_file_free_security)
794*(.text.__down_read)
795*(.text.do_sock_read)
796*(.text.do_sigaltstack)
797*(.text.do_mremap)
798*(.text.current_io_context)
799*(.text.cpu_swap_callback)
800*(.text.copy_vma)
801*(.text.cap_bprm_set_security)
802*(.text.blk_insert_request)
803*(.text.bio_map_kern_endio)
804*(.text.bio_hw_segments)
805*(.text.bictcp_cong_avoid)
806*(.text.add_interrupt_randomness)
807*(.text.wait_for_completion)
808*(.text.version_read_proc)
809*(.text.unix_write_space)
810*(.text.tty_ldisc_ref_wait)
811*(.text.tty_ldisc_put)
812*(.text.try_to_wake_up)
813*(.text.tcp_v4_tw_remember_stamp)
814*(.text.tcp_try_undo_dsack)
815*(.text.tcp_may_send_now)
816*(.text.sys_waitid)
817*(.text.sys_sched_getparam)
818*(.text.sys_getppid)
819*(.text.sys_getcwd)
820*(.text.sys_dup2)
821*(.text.sys_chmod)
822*(.text.sys_chdir)
823*(.text.sprintf)
824*(.text.sock_wfree)
825*(.text.sock_aio_write)
826*(.text.skb_drop_fraglist)
827*(.text.skb_dequeue)
828*(.text.set_close_on_exec)
829*(.text.set_brk)
830*(.text.seq_puts)
831*(.text.SELECT_DRIVE)
832*(.text.sched_exec)
833*(.text.return_EIO)
834*(.text.remove_from_page_cache)
835*(.text.rcu_start_batch)
836*(.text.__put_task_struct)
837*(.text.proc_pid_readdir)
838*(.text.proc_get_inode)
839*(.text.prepare_to_wait_exclusive)
840*(.text.pipe_wait)
841*(.text.pipe_new)
842*(.text.pdflush_operation)
843*(.text.__pagevec_release)
844*(.text.pagevec_lookup_tag)
845*(.text.packet_rcv)
846*(.text.n_tty_set_room)
847*(.text.nr_free_pages)
848*(.text.__net_timestamp)
849*(.text.mpage_end_io_read)
850*(.text.mod_timer)
851*(.text.__memcpy)
852*(.text.mb_cache_shrink_fn)
853*(.text.lock_rename)
854*(.text.kstrdup)
855*(.text.is_ignored)
856*(.text.int_very_careful)
857*(.text.inotify_inode_is_dead)
858*(.text.inotify_get_cookie)
859*(.text.inode_get_bytes)
860*(.text.init_timer)
861*(.text.init_dev)
862*(.text.inet_getname)
863*(.text.ide_map_sg)
864*(.text.__ide_dma_end)
865*(.text.hrtimer_get_remaining)
866*(.text.get_task_mm)
867*(.text.get_random_int)
868*(.text.free_pipe_info)
869*(.text.filemap_write_and_wait_range)
870*(.text.exit_thread)
871*(.text.enter_idle)
872*(.text.end_that_request_first)
873*(.text.end_8259A_irq)
874*(.text.dummy_file_alloc_security)
875*(.text.do_group_exit)
876*(.text.debug_mutex_init)
877*(.text.cpuset_exit)
878*(.text.cpu_idle)
879*(.text.copy_semundo)
880*(.text.copy_files)
881*(.text.chrdev_open)
882*(.text.cdrom_transfer_packet_command)
883*(.text.cdrom_mode_sense)
884*(.text.blk_phys_contig_segment)
885*(.text.blk_get_queue)
886*(.text.bio_split)
887*(.text.audit_alloc)
888*(.text.anon_pipe_buf_release)
889*(.text.add_wait_queue_exclusive)
890*(.text.add_wait_queue)
891*(.text.acct_process)
892*(.text.account)
893*(.text.zeromap_page_range)
894*(.text.yield)
895*(.text.writeback_acquire)
896*(.text.worker_thread)
897*(.text.wait_on_page_writeback_range)
898*(.text.__wait_on_buffer)
899*(.text.vscnprintf)
900*(.text.vmalloc_to_pfn)
901*(.text.vgacon_save_screen)
902*(.text.vfs_unlink)
903*(.text.vfs_rmdir)
904*(.text.unregister_md_personality)
905*(.text.unlock_new_inode)
906*(.text.unix_stream_sendmsg)
907*(.text.unix_stream_recvmsg)
908*(.text.unhash_process)
909*(.text.udp_v4_lookup_longway)
910*(.text.tty_ldisc_flush)
911*(.text.tty_ldisc_enable)
912*(.text.tty_hung_up_p)
913*(.text.tty_buffer_free_all)
914*(.text.tso_fragment)
915*(.text.try_to_del_timer_sync)
916*(.text.tcp_v4_err)
917*(.text.tcp_unhash)
918*(.text.tcp_seq_next)
919*(.text.tcp_select_initial_window)
920*(.text.tcp_sacktag_write_queue)
921*(.text.tcp_cwnd_validate)
922*(.text.sys_vhangup)
923*(.text.sys_uselib)
924*(.text.sys_symlink)
925*(.text.sys_signal)
926*(.text.sys_poll)
927*(.text.sys_mount)
928*(.text.sys_kill)
929*(.text.sys_ioctl)
930*(.text.sys_inotify_add_watch)
931*(.text.sys_getuid)
932*(.text.sys_getrlimit)
933*(.text.sys_getitimer)
934*(.text.sys_getgroups)
935*(.text.sys_ftruncate)
936*(.text.sysfs_lookup)
937*(.text.sys_exit_group)
938*(.text.stub_fork)
939*(.text.sscanf)
940*(.text.sock_map_fd)
941*(.text.sock_get_timestamp)
942*(.text.__sock_create)
943*(.text.smp_call_function_single)
944*(.text.sk_stop_timer)
945*(.text.skb_copy_and_csum_datagram)
946*(.text.__skb_checksum_complete)
947*(.text.single_next)
948*(.text.sigqueue_alloc)
949*(.text.shrink_dcache_parent)
950*(.text.select_idle_routine)
951*(.text.run_workqueue)
952*(.text.run_local_timers)
953*(.text.remove_inode_hash)
954*(.text.remove_dquot_ref)
955*(.text.register_binfmt)
956*(.text.read_cache_pages)
957*(.text.rb_last)
958*(.text.pty_open)
959*(.text.proc_root_readdir)
960*(.text.proc_pid_flush)
961*(.text.proc_pident_lookup)
962*(.text.proc_fill_super)
963*(.text.proc_exe_link)
964*(.text.posix_locks_deadlock)
965*(.text.pipe_iov_copy_from_user)
966*(.text.opost)
967*(.text.nf_register_hook)
968*(.text.netif_rx_ni)
969*(.text.m_start)
970*(.text.mpage_writepage)
971*(.text.mm_alloc)
972*(.text.memory_open)
973*(.text.mark_buffer_async_write)
974*(.text.lru_add_drain_all)
975*(.text.locks_init_lock)
976*(.text.locks_delete_lock)
977*(.text.lock_hrtimer_base)
978*(.text.load_script)
979*(.text.__kill_fasync)
980*(.text.ip_mc_sf_allow)
981*(.text.__ioremap)
982*(.text.int_with_check)
983*(.text.int_sqrt)
984*(.text.install_thread_keyring)
985*(.text.init_page_buffers)
986*(.text.inet_sock_destruct)
987*(.text.idle_notifier_register)
988*(.text.ide_execute_command)
989*(.text.ide_end_drive_cmd)
990*(.text.__ide_dma_host_on)
991*(.text.hrtimer_run_queues)
992*(.text.hpet_mask_rtc_irq_bit)
993*(.text.__get_zone_counts)
994*(.text.get_zone_counts)
995*(.text.get_write_access)
996*(.text.get_fs_struct)
997*(.text.get_dirty_limits)
998*(.text.generic_readlink)
999*(.text.free_hot_page)
1000*(.text.finish_wait)
1001*(.text.find_inode)
1002*(.text.find_first_bit)
1003*(.text.__filemap_fdatawrite_range)
1004*(.text.__filemap_copy_from_user_iovec)
1005*(.text.exit_aio)
1006*(.text.elv_set_request)
1007*(.text.elv_former_request)
1008*(.text.dup_namespace)
1009*(.text.dupfd)
1010*(.text.dummy_socket_getsockopt)
1011*(.text.dummy_sb_post_mountroot)
1012*(.text.dummy_quotactl)
1013*(.text.dummy_inode_rename)
1014*(.text.__do_SAK)
1015*(.text.do_pipe)
1016*(.text.do_fsync)
1017*(.text.d_instantiate_unique)
1018*(.text.d_find_alias)
1019*(.text.deny_write_access)
1020*(.text.dentry_unhash)
1021*(.text.d_delete)
1022*(.text.datagram_poll)
1023*(.text.cpuset_fork)
1024*(.text.cpuid_read)
1025*(.text.copy_namespace)
1026*(.text.cond_resched)
1027*(.text.check_version)
1028*(.text.__change_page_attr)
1029*(.text.cfq_slab_kill)
1030*(.text.cfq_completed_request)
1031*(.text.cdrom_pc_intr)
1032*(.text.cdrom_decode_status)
1033*(.text.cap_capset_check)
1034*(.text.blk_put_request)
1035*(.text.bio_fs_destructor)
1036*(.text.bictcp_min_cwnd)
1037*(.text.alloc_chrdev_region)
1038*(.text.add_element)
1039*(.text.acct_update_integrals)
1040*(.text.write_boundary_block)
1041*(.text.writeback_release)
1042*(.text.writeback_inodes)
1043*(.text.wake_up_state)
1044*(.text.__wake_up_locked)
1045*(.text.wake_futex)
1046*(.text.wait_task_inactive)
1047*(.text.__wait_on_freeing_inode)
1048*(.text.wait_noreap_copyout)
1049*(.text.vmstat_start)
1050*(.text.vgacon_do_font_op)
1051*(.text.vfs_readv)
1052*(.text.vfs_quota_sync)
1053*(.text.update_queue)
1054*(.text.unshare_files)
1055*(.text.unmap_vm_area)
1056*(.text.unix_socketpair)
1057*(.text.unix_release_sock)
1058*(.text.unix_detach_fds)
1059*(.text.unix_create1)
1060*(.text.unix_bind)
1061*(.text.udp_sendmsg)
1062*(.text.udp_rcv)
1063*(.text.udp_queue_rcv_skb)
1064*(.text.uart_write)
1065*(.text.uart_startup)
1066*(.text.uart_open)
1067*(.text.tty_vhangup)
1068*(.text.tty_termios_baud_rate)
1069*(.text.tty_release)
1070*(.text.tty_ldisc_ref)
1071*(.text.throttle_vm_writeout)
1072*(.text.058)
1073*(.text.tcp_xmit_probe_skb)
1074*(.text.tcp_v4_send_check)
1075*(.text.tcp_v4_destroy_sock)
1076*(.text.tcp_sync_mss)
1077*(.text.tcp_snd_test)
1078*(.text.tcp_slow_start)
1079*(.text.tcp_send_fin)
1080*(.text.tcp_rtt_estimator)
1081*(.text.tcp_parse_options)
1082*(.text.tcp_ioctl)
1083*(.text.tcp_init_tso_segs)
1084*(.text.tcp_init_cwnd)
1085*(.text.tcp_getsockopt)
1086*(.text.tcp_fin)
1087*(.text.tcp_connect)
1088*(.text.tcp_cong_avoid)
1089*(.text.__tcp_checksum_complete_user)
1090*(.text.task_dumpable)
1091*(.text.sys_wait4)
1092*(.text.sys_utimes)
1093*(.text.sys_symlinkat)
1094*(.text.sys_socketpair)
1095*(.text.sys_rmdir)
1096*(.text.sys_readahead)
1097*(.text.sys_nanosleep)
1098*(.text.sys_linkat)
1099*(.text.sys_fstat)
1100*(.text.sysfs_readdir)
1101*(.text.sys_execve)
1102*(.text.sysenter_tracesys)
1103*(.text.sys_chown)
1104*(.text.stub_clone)
1105*(.text.strrchr)
1106*(.text.strncpy)
1107*(.text.stopmachine_set_state)
1108*(.text.sock_sendmsg)
1109*(.text.sock_release)
1110*(.text.sock_fasync)
1111*(.text.sock_close)
1112*(.text.sk_stream_write_space)
1113*(.text.sk_reset_timer)
1114*(.text.skb_split)
1115*(.text.skb_recv_datagram)
1116*(.text.skb_queue_tail)
1117*(.text.sk_attach_filter)
1118*(.text.si_swapinfo)
1119*(.text.simple_strtoll)
1120*(.text.set_termios)
1121*(.text.set_task_comm)
1122*(.text.set_shrinker)
1123*(.text.set_normalized_timespec)
1124*(.text.set_brk)
1125*(.text.serial_in)
1126*(.text.seq_printf)
1127*(.text.secure_dccp_sequence_number)
1128*(.text.rwlock_bug)
1129*(.text.rt_hash_code)
1130*(.text.__rta_fill)
1131*(.text.__request_resource)
1132*(.text.relocate_new_kernel)
1133*(.text.release_thread)
1134*(.text.release_mem)
1135*(.text.rb_prev)
1136*(.text.rb_first)
1137*(.text.random_poll)
1138*(.text.__put_super_and_need_restart)
1139*(.text.pty_write)
1140*(.text.ptrace_stop)
1141*(.text.proc_self_readlink)
1142*(.text.proc_root_lookup)
1143*(.text.proc_root_link)
1144*(.text.proc_pid_make_inode)
1145*(.text.proc_pid_attr_write)
1146*(.text.proc_lookupfd)
1147*(.text.proc_delete_inode)
1148*(.text.posix_same_owner)
1149*(.text.posix_block_lock)
1150*(.text.poll_initwait)
1151*(.text.pipe_write)
1152*(.text.pipe_read_fasync)
1153*(.text.pipe_ioctl)
1154*(.text.pdflush)
1155*(.text.pci_user_read_config_dword)
1156*(.text.page_readlink)
1157*(.text.null_lseek)
1158*(.text.nf_hook_slow)
1159*(.text.netlink_sock_destruct)
1160*(.text.netlink_broadcast)
1161*(.text.neigh_resolve_output)
1162*(.text.name_to_int)
1163*(.text.mwait_idle)
1164*(.text.mutex_trylock)
1165*(.text.mutex_debug_check_no_locks_held)
1166*(.text.m_stop)
1167*(.text.mpage_end_io_write)
1168*(.text.mpage_alloc)
1169*(.text.move_page_tables)
1170*(.text.mounts_open)
1171*(.text.__memset)
1172*(.text.memcpy_fromiovec)
1173*(.text.make_8259A_irq)
1174*(.text.lookup_user_key_possessed)
1175*(.text.lookup_create)
1176*(.text.locks_insert_lock)
1177*(.text.locks_alloc_lock)
1178*(.text.kthread_should_stop)
1179*(.text.kswapd)
1180*(.text.kobject_uevent)
1181*(.text.kobject_get_path)
1182*(.text.kobject_get)
1183*(.text.klist_children_put)
1184*(.text.__ip_route_output_key)
1185*(.text.ip_flush_pending_frames)
1186*(.text.ip_compute_csum)
1187*(.text.ip_append_data)
1188*(.text.ioc_set_batching)
1189*(.text.invalidate_inode_pages)
1190*(.text.__invalidate_device)
1191*(.text.install_arg_page)
1192*(.text.in_sched_functions)
1193*(.text.inotify_unmount_inodes)
1194*(.text.init_once)
1195*(.text.init_cdrom_command)
1196*(.text.inet_stream_connect)
1197*(.text.inet_sk_rebuild_header)
1198*(.text.inet_csk_addr2sockaddr)
1199*(.text.inet_create)
1200*(.text.ifind)
1201*(.text.ide_setup_dma)
1202*(.text.ide_outsw)
1203*(.text.ide_fixstring)
1204*(.text.ide_dma_setup)
1205*(.text.ide_cdrom_packet)
1206*(.text.ide_cd_put)
1207*(.text.ide_build_sglist)
1208*(.text.i8259A_shutdown)
1209*(.text.hung_up_tty_ioctl)
1210*(.text.hrtimer_nanosleep)
1211*(.text.hrtimer_init)
1212*(.text.hrtimer_cancel)
1213*(.text.hash_futex)
1214*(.text.group_send_sig_info)
1215*(.text.grab_cache_page_nowait)
1216*(.text.get_wchan)
1217*(.text.get_stack)
1218*(.text.get_page_state)
1219*(.text.getnstimeofday)
1220*(.text.get_node)
1221*(.text.get_kprobe)
1222*(.text.generic_unplug_device)
1223*(.text.free_task)
1224*(.text.frag_show)
1225*(.text.find_next_zero_string)
1226*(.text.filp_open)
1227*(.text.fillonedir)
1228*(.text.exit_io_context)
1229*(.text.exit_idle)
1230*(.text.exact_lock)
1231*(.text.eth_header)
1232*(.text.dummy_unregister_security)
1233*(.text.dummy_socket_post_create)
1234*(.text.dummy_socket_listen)
1235*(.text.dummy_quota_on)
1236*(.text.dummy_inode_follow_link)
1237*(.text.dummy_file_receive)
1238*(.text.dummy_file_mprotect)
1239*(.text.dummy_file_lock)
1240*(.text.dummy_file_ioctl)
1241*(.text.dummy_bprm_post_apply_creds)
1242*(.text.do_writepages)
1243*(.text.__down_interruptible)
1244*(.text.do_notify_resume)
1245*(.text.do_acct_process)
1246*(.text.del_timer_sync)
1247*(.text.default_rebuild_header)
1248*(.text.d_callback)
1249*(.text.dcache_readdir)
1250*(.text.ctrl_dumpfamily)
1251*(.text.cpuset_rmdir)
1252*(.text.copy_strings_kernel)
1253*(.text.con_write_room)
1254*(.text.complete_all)
1255*(.text.collect_sigign_sigcatch)
1256*(.text.clear_user)
1257*(.text.check_unthrottle)
1258*(.text.cdrom_release)
1259*(.text.cdrom_newpc_intr)
1260*(.text.cdrom_ioctl)
1261*(.text.cdrom_check_status)
1262*(.text.cdev_put)
1263*(.text.cdev_add)
1264*(.text.cap_ptrace)
1265*(.text.cap_bprm_secureexec)
1266*(.text.cache_alloc_refill)
1267*(.text.bmap)
1268*(.text.blk_run_queue)
1269*(.text.blk_queue_dma_alignment)
1270*(.text.blk_ordered_req_seq)
1271*(.text.blk_backing_dev_unplug)
1272*(.text.__bitmap_subset)
1273*(.text.__bitmap_and)
1274*(.text.bio_unmap_user)
1275*(.text.__bforget)
1276*(.text.bd_forget)
1277*(.text.bad_pipe_w)
1278*(.text.bad_get_user)
1279*(.text.audit_free)
1280*(.text.anon_vma_ctor)
1281*(.text.anon_pipe_buf_map)
1282*(.text.alloc_sock_iocb)
1283*(.text.alloc_fdset)
1284*(.text.aio_kick_handler)
1285*(.text.__add_entropy_words)
1286*(.text.add_disk_randomness)
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index 02fc7fa0ea28..6df05e6034fa 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28 .text 28 .text
29 .section .bootstrap.text
29 .code32 30 .code32
30 .globl startup_32 31 .globl startup_32
31/* %bx: 1 if coming from smp trampoline on secondary cpu */ 32/* %bx: 1 if coming from smp trampoline on secondary cpu */
@@ -192,7 +193,8 @@ startup_64:
192 movq initial_code(%rip),%rax 193 movq initial_code(%rip),%rax
193 jmp *%rax 194 jmp *%rax
194 195
195 /* SMP bootup changes these two */ 196 /* SMP bootup changes these two */
197 .align 8
196 .globl initial_code 198 .globl initial_code
197initial_code: 199initial_code:
198 .quad x86_64_start_kernel 200 .quad x86_64_start_kernel
@@ -237,7 +239,7 @@ ENTRY(no_long_mode)
237.org 0xf00 239.org 0xf00
238 .globl pGDT32 240 .globl pGDT32
239pGDT32: 241pGDT32:
240 .word gdt_end-cpu_gdt_table 242 .word gdt_end-cpu_gdt_table-1
241 .long cpu_gdt_table-__START_KERNEL_map 243 .long cpu_gdt_table-__START_KERNEL_map
242 244
243.org 0xf10 245.org 0xf10
@@ -293,8 +295,6 @@ NEXT_PAGE(level2_kernel_pgt)
293 /* Module mapping starts here */ 295 /* Module mapping starts here */
294 .fill 492,8,0 296 .fill 492,8,0
295 297
296NEXT_PAGE(empty_zero_page)
297
298NEXT_PAGE(level3_physmem_pgt) 298NEXT_PAGE(level3_physmem_pgt)
299 .quad phys_level2_kernel_pgt | 0x007 /* so that __va works even before pagetable_init */ 299 .quad phys_level2_kernel_pgt | 0x007 /* so that __va works even before pagetable_init */
300 .fill 511,8,0 300 .fill 511,8,0
@@ -337,7 +337,7 @@ ENTRY(boot_level4_pgt)
337 .align 16 337 .align 16
338 .globl cpu_gdt_descr 338 .globl cpu_gdt_descr
339cpu_gdt_descr: 339cpu_gdt_descr:
340 .word gdt_end-cpu_gdt_table 340 .word gdt_end-cpu_gdt_table-1
341gdt: 341gdt:
342 .quad cpu_gdt_table 342 .quad cpu_gdt_table
343#ifdef CONFIG_SMP 343#ifdef CONFIG_SMP
@@ -352,7 +352,8 @@ gdt:
352 * Also sysret mandates a special GDT layout 352 * Also sysret mandates a special GDT layout
353 */ 353 */
354 354
355.align PAGE_SIZE 355 .section .data.page_aligned, "aw"
356 .align PAGE_SIZE
356 357
357/* The TLS descriptors are currently at a different place compared to i386. 358/* The TLS descriptors are currently at a different place compared to i386.
358 Hopefully nobody expects them at a fixed place (Wine?) */ 359 Hopefully nobody expects them at a fixed place (Wine?) */
@@ -378,9 +379,12 @@ gdt_end:
378 /* zero the remaining page */ 379 /* zero the remaining page */
379 .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0 380 .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
380 381
381ENTRY(idt_table) 382 .section .bss, "aw", @nobits
382 .rept 256 383 .align L1_CACHE_BYTES
383 .quad 0 384ENTRY(idt_table)
384 .quad 0 385 .skip 256 * 16
385 .endr
386 386
387 .section .bss.page_aligned, "aw", @nobits
388 .align PAGE_SIZE
389ENTRY(empty_zero_page)
390 .skip PAGE_SIZE
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index ffed464e6b12..77b4c608cca0 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -50,7 +50,7 @@ static int no_timer_check;
50 50
51int disable_timer_pin_1 __initdata; 51int disable_timer_pin_1 __initdata;
52 52
53int timer_over_8254 __initdata = 1; 53int timer_over_8254 __initdata = 0;
54 54
55/* Where if anywhere is the i8259 connect in external int mode */ 55/* Where if anywhere is the i8259 connect in external int mode */
56static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 56static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
@@ -310,7 +310,7 @@ void __init check_ioapic(void)
310 force_iommu) && 310 force_iommu) &&
311 !iommu_aperture_allowed) { 311 !iommu_aperture_allowed) {
312 printk(KERN_INFO 312 printk(KERN_INFO
313 "Looks like a VIA chipset. Disabling IOMMU. Overwrite with \"iommu=allowed\"\n"); 313 "Looks like a VIA chipset. Disabling IOMMU. Override with \"iommu=allowed\"\n");
314 iommu_aperture_disabled = 1; 314 iommu_aperture_disabled = 1;
315 } 315 }
316#endif 316#endif
@@ -1848,7 +1848,7 @@ static inline void check_timer(void)
1848 */ 1848 */
1849 setup_ExtINT_IRQ0_pin(apic2, pin2, vector); 1849 setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
1850 if (timer_irq_works()) { 1850 if (timer_irq_works()) {
1851 printk("works.\n"); 1851 apic_printk(APIC_VERBOSE," works.\n");
1852 nmi_watchdog_default(); 1852 nmi_watchdog_default();
1853 if (nmi_watchdog == NMI_IO_APIC) { 1853 if (nmi_watchdog == NMI_IO_APIC) {
1854 setup_nmi(); 1854 setup_nmi();
@@ -1860,7 +1860,7 @@ static inline void check_timer(void)
1860 */ 1860 */
1861 clear_IO_APIC_pin(apic2, pin2); 1861 clear_IO_APIC_pin(apic2, pin2);
1862 } 1862 }
1863 printk(" failed.\n"); 1863 apic_printk(APIC_VERBOSE," failed.\n");
1864 1864
1865 if (nmi_watchdog == NMI_IO_APIC) { 1865 if (nmi_watchdog == NMI_IO_APIC) {
1866 printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n"); 1866 printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
@@ -1875,7 +1875,7 @@ static inline void check_timer(void)
1875 enable_8259A_irq(0); 1875 enable_8259A_irq(0);
1876 1876
1877 if (timer_irq_works()) { 1877 if (timer_irq_works()) {
1878 apic_printk(APIC_QUIET, " works.\n"); 1878 apic_printk(APIC_VERBOSE," works.\n");
1879 return; 1879 return;
1880 } 1880 }
1881 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector); 1881 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index 30d2a1e545fe..d8bd0b345b1e 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -38,9 +38,8 @@ int show_interrupts(struct seq_file *p, void *v)
38 38
39 if (i == 0) { 39 if (i == 0) {
40 seq_printf(p, " "); 40 seq_printf(p, " ");
41 for (j=0; j<NR_CPUS; j++) 41 for_each_online_cpu(j)
42 if (cpu_online(j)) 42 seq_printf(p, "CPU%d ",j);
43 seq_printf(p, "CPU%d ",j);
44 seq_putc(p, '\n'); 43 seq_putc(p, '\n');
45 } 44 }
46 45
@@ -53,10 +52,8 @@ int show_interrupts(struct seq_file *p, void *v)
53#ifndef CONFIG_SMP 52#ifndef CONFIG_SMP
54 seq_printf(p, "%10u ", kstat_irqs(i)); 53 seq_printf(p, "%10u ", kstat_irqs(i));
55#else 54#else
56 for (j=0; j<NR_CPUS; j++) 55 for_each_online_cpu(j)
57 if (cpu_online(j)) 56 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
58 seq_printf(p, "%10u ",
59 kstat_cpu(j).irqs[i]);
60#endif 57#endif
61 seq_printf(p, " %14s", irq_desc[i].handler->typename); 58 seq_printf(p, " %14s", irq_desc[i].handler->typename);
62 59
@@ -68,15 +65,13 @@ skip:
68 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 65 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
69 } else if (i == NR_IRQS) { 66 } else if (i == NR_IRQS) {
70 seq_printf(p, "NMI: "); 67 seq_printf(p, "NMI: ");
71 for (j = 0; j < NR_CPUS; j++) 68 for_each_online_cpu(j)
72 if (cpu_online(j)) 69 seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
73 seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
74 seq_putc(p, '\n'); 70 seq_putc(p, '\n');
75#ifdef CONFIG_X86_LOCAL_APIC 71#ifdef CONFIG_X86_LOCAL_APIC
76 seq_printf(p, "LOC: "); 72 seq_printf(p, "LOC: ");
77 for (j = 0; j < NR_CPUS; j++) 73 for_each_online_cpu(j)
78 if (cpu_online(j)) 74 seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
79 seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
80 seq_putc(p, '\n'); 75 seq_putc(p, '\n');
81#endif 76#endif
82 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 77 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index 8b866a8572cf..14f0ced613b6 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -222,9 +222,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
222 222
223void __kprobes arch_remove_kprobe(struct kprobe *p) 223void __kprobes arch_remove_kprobe(struct kprobe *p)
224{ 224{
225 down(&kprobe_mutex); 225 mutex_lock(&kprobe_mutex);
226 free_insn_slot(p->ainsn.insn); 226 free_insn_slot(p->ainsn.insn);
227 up(&kprobe_mutex); 227 mutex_unlock(&kprobe_mutex);
228} 228}
229 229
230static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) 230static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index b8b9529fa89e..04282ef9fbd4 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -139,8 +139,7 @@ static void mce_panic(char *msg, struct mce *backup, unsigned long start)
139 139
140static int mce_available(struct cpuinfo_x86 *c) 140static int mce_available(struct cpuinfo_x86 *c)
141{ 141{
142 return test_bit(X86_FEATURE_MCE, &c->x86_capability) && 142 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
143 test_bit(X86_FEATURE_MCA, &c->x86_capability);
144} 143}
145 144
146static inline void mce_get_rip(struct mce *m, struct pt_regs *regs) 145static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 9013a90b5c2e..b17cf3eba359 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -106,11 +106,11 @@ static int __init mpf_checksum(unsigned char *mp, int len)
106 return sum & 0xFF; 106 return sum & 0xFF;
107} 107}
108 108
109static void __init MP_processor_info (struct mpc_config_processor *m) 109static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
110{ 110{
111 int cpu; 111 int cpu;
112 unsigned char ver; 112 unsigned char ver;
113 static int found_bsp=0; 113 cpumask_t tmp_map;
114 114
115 if (!(m->mpc_cpuflag & CPU_ENABLED)) { 115 if (!(m->mpc_cpuflag & CPU_ENABLED)) {
116 disabled_cpus++; 116 disabled_cpus++;
@@ -133,8 +133,10 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
133 return; 133 return;
134 } 134 }
135 135
136 cpu = num_processors++; 136 num_processors++;
137 137 cpus_complement(tmp_map, cpu_present_map);
138 cpu = first_cpu(tmp_map);
139
138#if MAX_APICS < 255 140#if MAX_APICS < 255
139 if ((int)m->mpc_apicid > MAX_APICS) { 141 if ((int)m->mpc_apicid > MAX_APICS) {
140 printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n", 142 printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
@@ -160,12 +162,7 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
160 * entry is BSP, and so on. 162 * entry is BSP, and so on.
161 */ 163 */
162 cpu = 0; 164 cpu = 0;
163 165 }
164 bios_cpu_apicid[0] = m->mpc_apicid;
165 x86_cpu_to_apicid[0] = m->mpc_apicid;
166 found_bsp = 1;
167 } else
168 cpu = num_processors - found_bsp;
169 bios_cpu_apicid[cpu] = m->mpc_apicid; 166 bios_cpu_apicid[cpu] = m->mpc_apicid;
170 x86_cpu_to_apicid[cpu] = m->mpc_apicid; 167 x86_cpu_to_apicid[cpu] = m->mpc_apicid;
171 168
@@ -691,7 +688,7 @@ void __init mp_register_lapic_address (
691} 688}
692 689
693 690
694void __init mp_register_lapic ( 691void __cpuinit mp_register_lapic (
695 u8 id, 692 u8 id,
696 u8 enabled) 693 u8 enabled)
697{ 694{
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 5bf17e41cd2d..d9e4067faf05 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -162,9 +162,7 @@ int __init check_nmi_watchdog (void)
162 local_irq_enable(); 162 local_irq_enable();
163 mdelay((10*1000)/nmi_hz); // wait 10 ticks 163 mdelay((10*1000)/nmi_hz); // wait 10 ticks
164 164
165 for (cpu = 0; cpu < NR_CPUS; cpu++) { 165 for_each_online_cpu(cpu) {
166 if (!cpu_online(cpu))
167 continue;
168 if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) { 166 if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
169 endflag = 1; 167 endflag = 1;
170 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", 168 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
@@ -536,6 +534,7 @@ asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
536 534
537void set_nmi_callback(nmi_callback_t callback) 535void set_nmi_callback(nmi_callback_t callback)
538{ 536{
537 vmalloc_sync_all();
539 rcu_assign_pointer(nmi_callback, callback); 538 rcu_assign_pointer(nmi_callback, callback);
540} 539}
541 540
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index 4ed391edd47a..03c9eeedb0f3 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -73,6 +73,9 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
73 if (dma_mask == 0) 73 if (dma_mask == 0)
74 dma_mask = 0xffffffff; 74 dma_mask = 0xffffffff;
75 75
76 /* Don't invoke OOM killer */
77 gfp |= __GFP_NORETRY;
78
76 /* Kludge to make it bug-to-bug compatible with i386. i386 79 /* Kludge to make it bug-to-bug compatible with i386. i386
77 uses the normal dma_mask for alloc_coherent. */ 80 uses the normal dma_mask for alloc_coherent. */
78 dma_mask &= *dev->dma_mask; 81 dma_mask &= *dev->dma_mask;
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index 0c3f052ba6ce..a6c01e121266 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -65,9 +65,7 @@ static u32 gart_unmapped_entry;
65 65
66#define for_all_nb(dev) \ 66#define for_all_nb(dev) \
67 dev = NULL; \ 67 dev = NULL; \
68 while ((dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL)\ 68 while ((dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL)
69 if (dev->bus->number == 0 && \
70 (PCI_SLOT(dev->devfn) >= 24) && (PCI_SLOT(dev->devfn) <= 31))
71 69
72static struct pci_dev *northbridges[MAX_NB]; 70static struct pci_dev *northbridges[MAX_NB];
73static u32 northbridge_flush_word[MAX_NB]; 71static u32 northbridge_flush_word[MAX_NB];
@@ -148,9 +146,12 @@ static void flush_gart(struct device *dev)
148 if (!northbridges[i]) 146 if (!northbridges[i])
149 continue; 147 continue;
150 /* Make sure the hardware actually executed the flush. */ 148 /* Make sure the hardware actually executed the flush. */
151 do { 149 for (;;) {
152 pci_read_config_dword(northbridges[i], 0x9c, &w); 150 pci_read_config_dword(northbridges[i], 0x9c, &w);
153 } while (w & 1); 151 if (!(w & 1))
152 break;
153 cpu_relax();
154 }
154 } 155 }
155 if (!flushed) 156 if (!flushed)
156 printk("nothing to flush?\n"); 157 printk("nothing to flush?\n");
diff --git a/arch/x86_64/kernel/pmtimer.c b/arch/x86_64/kernel/pmtimer.c
index 5c51d10408a6..ee5ee4891f3d 100644
--- a/arch/x86_64/kernel/pmtimer.c
+++ b/arch/x86_64/kernel/pmtimer.c
@@ -86,7 +86,7 @@ static unsigned pmtimer_wait_tick(void)
86 for (a = b = inl(pmtmr_ioport) & ACPI_PM_MASK; 86 for (a = b = inl(pmtmr_ioport) & ACPI_PM_MASK;
87 a == b; 87 a == b;
88 b = inl(pmtmr_ioport) & ACPI_PM_MASK) 88 b = inl(pmtmr_ioport) & ACPI_PM_MASK)
89 ; 89 cpu_relax();
90 return b; 90 return b;
91} 91}
92 92
@@ -97,6 +97,7 @@ void pmtimer_wait(unsigned us)
97 a = pmtimer_wait_tick(); 97 a = pmtimer_wait_tick();
98 do { 98 do {
99 b = inl(pmtmr_ioport); 99 b = inl(pmtmr_ioport);
100 cpu_relax();
100 } while (cyc2us(b - a) < us); 101 } while (cyc2us(b - a) < us);
101} 102}
102 103
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 22a05dec81a2..81111835722d 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -114,7 +114,7 @@ void exit_idle(void)
114 * We use this if we don't have any better 114 * We use this if we don't have any better
115 * idle routine.. 115 * idle routine..
116 */ 116 */
117void default_idle(void) 117static void default_idle(void)
118{ 118{
119 local_irq_enable(); 119 local_irq_enable();
120 120
@@ -508,7 +508,7 @@ out:
508/* 508/*
509 * This special macro can be used to load a debugging register 509 * This special macro can be used to load a debugging register
510 */ 510 */
511#define loaddebug(thread,r) set_debug(thread->debugreg ## r, r) 511#define loaddebug(thread,r) set_debugreg(thread->debugreg ## r, r)
512 512
513/* 513/*
514 * switch_to(x,y) should switch tasks from x to y. 514 * switch_to(x,y) should switch tasks from x to y.
@@ -527,8 +527,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
527 int cpu = smp_processor_id(); 527 int cpu = smp_processor_id();
528 struct tss_struct *tss = &per_cpu(init_tss, cpu); 528 struct tss_struct *tss = &per_cpu(init_tss, cpu);
529 529
530 unlazy_fpu(prev_p);
531
532 /* 530 /*
533 * Reload esp0, LDT and the page table pointer: 531 * Reload esp0, LDT and the page table pointer:
534 */ 532 */
@@ -586,11 +584,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
586 } 584 }
587 585
588 /* 586 /*
589 * Switch the PDA context. 587 * Switch the PDA and FPU contexts.
590 */ 588 */
591 prev->userrsp = read_pda(oldrsp); 589 prev->userrsp = read_pda(oldrsp);
592 write_pda(oldrsp, next->userrsp); 590 write_pda(oldrsp, next->userrsp);
593 write_pda(pcurrent, next_p); 591 write_pda(pcurrent, next_p);
592 /* This must be here to ensure both math_state_restore() and
593 kernel_fpu_begin() work consistently. */
594 unlazy_fpu(prev_p);
594 write_pda(kernelstack, 595 write_pda(kernelstack,
595 task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET); 596 task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
596 597
diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c
index 53205622351c..d44b2c1e63a6 100644
--- a/arch/x86_64/kernel/ptrace.c
+++ b/arch/x86_64/kernel/ptrace.c
@@ -420,9 +420,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
420 case offsetof(struct user, u_debugreg[7]): 420 case offsetof(struct user, u_debugreg[7]):
421 /* See arch/i386/kernel/ptrace.c for an explanation of 421 /* See arch/i386/kernel/ptrace.c for an explanation of
422 * this awkward check.*/ 422 * this awkward check.*/
423 data &= ~DR_CONTROL_RESERVED; 423 data &= ~DR_CONTROL_RESERVED;
424 for(i=0; i<4; i++) 424 for(i=0; i<4; i++)
425 if ((0x5454 >> ((data >> (16 + 4*i)) & 0xf)) & 1) 425 if ((0x5554 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
426 break; 426 break;
427 if (i == 4) { 427 if (i == 4) {
428 child->thread.debugreg7 = data; 428 child->thread.debugreg7 = data;
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index aa55e3cec665..a57eec8311a7 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -46,6 +46,7 @@
46#include <linux/cpufreq.h> 46#include <linux/cpufreq.h>
47#include <linux/dmi.h> 47#include <linux/dmi.h>
48#include <linux/dma-mapping.h> 48#include <linux/dma-mapping.h>
49#include <linux/ctype.h>
49 50
50#include <asm/mtrr.h> 51#include <asm/mtrr.h>
51#include <asm/uaccess.h> 52#include <asm/uaccess.h>
@@ -67,6 +68,7 @@
67#include <asm/swiotlb.h> 68#include <asm/swiotlb.h>
68#include <asm/sections.h> 69#include <asm/sections.h>
69#include <asm/gart-mapping.h> 70#include <asm/gart-mapping.h>
71#include <asm/dmi.h>
70 72
71/* 73/*
72 * Machine setup.. 74 * Machine setup..
@@ -91,6 +93,12 @@ int bootloader_type;
91 93
92unsigned long saved_video_mode; 94unsigned long saved_video_mode;
93 95
96/*
97 * Early DMI memory
98 */
99int dmi_alloc_index;
100char dmi_alloc_data[DMI_MAX_DATA];
101
94/* 102/*
95 * Setup options 103 * Setup options
96 */ 104 */
@@ -270,6 +278,13 @@ static void __init probe_roms(void)
270 } 278 }
271} 279}
272 280
281/* Check for full argument with no trailing characters */
282static int fullarg(char *p, char *arg)
283{
284 int l = strlen(arg);
285 return !memcmp(p, arg, l) && (p[l] == 0 || isspace(p[l]));
286}
287
273static __init void parse_cmdline_early (char ** cmdline_p) 288static __init void parse_cmdline_early (char ** cmdline_p)
274{ 289{
275 char c = ' ', *to = command_line, *from = COMMAND_LINE; 290 char c = ' ', *to = command_line, *from = COMMAND_LINE;
@@ -293,10 +308,10 @@ static __init void parse_cmdline_early (char ** cmdline_p)
293#endif 308#endif
294#ifdef CONFIG_ACPI 309#ifdef CONFIG_ACPI
295 /* "acpi=off" disables both ACPI table parsing and interpreter init */ 310 /* "acpi=off" disables both ACPI table parsing and interpreter init */
296 if (!memcmp(from, "acpi=off", 8)) 311 if (fullarg(from,"acpi=off"))
297 disable_acpi(); 312 disable_acpi();
298 313
299 if (!memcmp(from, "acpi=force", 10)) { 314 if (fullarg(from, "acpi=force")) {
300 /* add later when we do DMI horrors: */ 315 /* add later when we do DMI horrors: */
301 acpi_force = 1; 316 acpi_force = 1;
302 acpi_disabled = 0; 317 acpi_disabled = 0;
@@ -304,52 +319,47 @@ static __init void parse_cmdline_early (char ** cmdline_p)
304 319
305 /* acpi=ht just means: do ACPI MADT parsing 320 /* acpi=ht just means: do ACPI MADT parsing
306 at bootup, but don't enable the full ACPI interpreter */ 321 at bootup, but don't enable the full ACPI interpreter */
307 if (!memcmp(from, "acpi=ht", 7)) { 322 if (fullarg(from, "acpi=ht")) {
308 if (!acpi_force) 323 if (!acpi_force)
309 disable_acpi(); 324 disable_acpi();
310 acpi_ht = 1; 325 acpi_ht = 1;
311 } 326 }
312 else if (!memcmp(from, "pci=noacpi", 10)) 327 else if (fullarg(from, "pci=noacpi"))
313 acpi_disable_pci(); 328 acpi_disable_pci();
314 else if (!memcmp(from, "acpi=noirq", 10)) 329 else if (fullarg(from, "acpi=noirq"))
315 acpi_noirq_set(); 330 acpi_noirq_set();
316 331
317 else if (!memcmp(from, "acpi_sci=edge", 13)) 332 else if (fullarg(from, "acpi_sci=edge"))
318 acpi_sci_flags.trigger = 1; 333 acpi_sci_flags.trigger = 1;
319 else if (!memcmp(from, "acpi_sci=level", 14)) 334 else if (fullarg(from, "acpi_sci=level"))
320 acpi_sci_flags.trigger = 3; 335 acpi_sci_flags.trigger = 3;
321 else if (!memcmp(from, "acpi_sci=high", 13)) 336 else if (fullarg(from, "acpi_sci=high"))
322 acpi_sci_flags.polarity = 1; 337 acpi_sci_flags.polarity = 1;
323 else if (!memcmp(from, "acpi_sci=low", 12)) 338 else if (fullarg(from, "acpi_sci=low"))
324 acpi_sci_flags.polarity = 3; 339 acpi_sci_flags.polarity = 3;
325 340
326 /* acpi=strict disables out-of-spec workarounds */ 341 /* acpi=strict disables out-of-spec workarounds */
327 else if (!memcmp(from, "acpi=strict", 11)) { 342 else if (fullarg(from, "acpi=strict")) {
328 acpi_strict = 1; 343 acpi_strict = 1;
329 } 344 }
330#ifdef CONFIG_X86_IO_APIC 345#ifdef CONFIG_X86_IO_APIC
331 else if (!memcmp(from, "acpi_skip_timer_override", 24)) 346 else if (fullarg(from, "acpi_skip_timer_override"))
332 acpi_skip_timer_override = 1; 347 acpi_skip_timer_override = 1;
333#endif 348#endif
334#endif 349#endif
335 350
336 if (!memcmp(from, "disable_timer_pin_1", 19)) 351 if (fullarg(from, "disable_timer_pin_1"))
337 disable_timer_pin_1 = 1; 352 disable_timer_pin_1 = 1;
338 if (!memcmp(from, "enable_timer_pin_1", 18)) 353 if (fullarg(from, "enable_timer_pin_1"))
339 disable_timer_pin_1 = -1; 354 disable_timer_pin_1 = -1;
340 355
341 if (!memcmp(from, "nolapic", 7) || 356 if (fullarg(from, "nolapic") || fullarg(from, "disableapic"))
342 !memcmp(from, "disableapic", 11))
343 disable_apic = 1; 357 disable_apic = 1;
344 358
345 /* Don't confuse with noapictimer */ 359 if (fullarg(from, "noapic"))
346 if (!memcmp(from, "noapic", 6) &&
347 (from[6] == ' ' || from[6] == 0))
348 skip_ioapic_setup = 1; 360 skip_ioapic_setup = 1;
349 361
350 /* Make sure to not confuse with apic= */ 362 if (fullarg(from,"apic")) {
351 if (!memcmp(from, "apic", 4) &&
352 (from[4] == ' ' || from[4] == 0)) {
353 skip_ioapic_setup = 0; 363 skip_ioapic_setup = 0;
354 ioapic_force = 1; 364 ioapic_force = 1;
355 } 365 }
@@ -388,7 +398,7 @@ static __init void parse_cmdline_early (char ** cmdline_p)
388 iommu_setup(from+6); 398 iommu_setup(from+6);
389 } 399 }
390 400
391 if (!memcmp(from,"oops=panic", 10)) 401 if (fullarg(from,"oops=panic"))
392 panic_on_oops = 1; 402 panic_on_oops = 1;
393 403
394 if (!memcmp(from, "noexec=", 7)) 404 if (!memcmp(from, "noexec=", 7))
@@ -611,11 +621,14 @@ void __init setup_arch(char **cmdline_p)
611 * we are rounding upwards: 621 * we are rounding upwards:
612 */ 622 */
613 end_pfn = e820_end_of_ram(); 623 end_pfn = e820_end_of_ram();
624 num_physpages = end_pfn; /* for pfn_valid */
614 625
615 check_efer(); 626 check_efer();
616 627
617 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); 628 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
618 629
630 dmi_scan_machine();
631
619 zap_low_mappings(0); 632 zap_low_mappings(0);
620 633
621#ifdef CONFIG_ACPI 634#ifdef CONFIG_ACPI
@@ -708,6 +721,12 @@ void __init setup_arch(char **cmdline_p)
708 721
709 check_ioapic(); 722 check_ioapic();
710 723
724 /*
725 * set this early, so we dont allocate cpu0
726 * if MADT list doesnt list BSP first
727 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
728 */
729 cpu_set(0, cpu_present_map);
711#ifdef CONFIG_ACPI 730#ifdef CONFIG_ACPI
712 /* 731 /*
713 * Read APIC and some other early information from ACPI tables. 732 * Read APIC and some other early information from ACPI tables.
@@ -836,7 +855,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
836 unsigned bits; 855 unsigned bits;
837#ifdef CONFIG_NUMA 856#ifdef CONFIG_NUMA
838 int node = 0; 857 int node = 0;
839 unsigned apicid = phys_proc_id[cpu]; 858 unsigned apicid = hard_smp_processor_id();
840#endif 859#endif
841 860
842 bits = 0; 861 bits = 0;
@@ -846,7 +865,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
846 /* Low order bits define the core id (index of core in socket) */ 865 /* Low order bits define the core id (index of core in socket) */
847 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1); 866 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
848 /* Convert the APIC ID into the socket ID */ 867 /* Convert the APIC ID into the socket ID */
849 phys_proc_id[cpu] >>= bits; 868 phys_proc_id[cpu] = phys_pkg_id(bits);
850 869
851#ifdef CONFIG_NUMA 870#ifdef CONFIG_NUMA
852 node = phys_proc_id[cpu]; 871 node = phys_proc_id[cpu];
@@ -872,8 +891,8 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
872 } 891 }
873 numa_set_node(cpu, node); 892 numa_set_node(cpu, node);
874 893
875 printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n", 894 printk(KERN_INFO "CPU %d/%x(%d) -> Node %d -> Core %d\n",
876 cpu, c->x86_max_cores, node, cpu_core_id[cpu]); 895 cpu, apicid, c->x86_max_cores, node, cpu_core_id[cpu]);
877#endif 896#endif
878#endif 897#endif
879} 898}
@@ -927,8 +946,6 @@ static int __init init_amd(struct cpuinfo_x86 *c)
927 946
928 if (c->extended_cpuid_level >= 0x80000008) { 947 if (c->extended_cpuid_level >= 0x80000008) {
929 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; 948 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
930 if (c->x86_max_cores & (c->x86_max_cores - 1))
931 c->x86_max_cores = 1;
932 949
933 amd_detect_cmp(c); 950 amd_detect_cmp(c);
934 } 951 }
@@ -1261,7 +1278,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1261 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1278 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1262 1279
1263 /* Intel-defined (#2) */ 1280 /* Intel-defined (#2) */
1264 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", NULL, "est", 1281 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
1265 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL, 1282 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1266 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1283 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1267 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1284 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
@@ -1344,8 +1361,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1344 { 1361 {
1345 int i; 1362 int i;
1346 for ( i = 0 ; i < 32*NCAPINTS ; i++ ) 1363 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1347 if ( test_bit(i, &c->x86_capability) && 1364 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1348 x86_cap_flags[i] != NULL )
1349 seq_printf(m, " %s", x86_cap_flags[i]); 1365 seq_printf(m, " %s", x86_cap_flags[i]);
1350 } 1366 }
1351 1367
@@ -1403,10 +1419,3 @@ struct seq_operations cpuinfo_op = {
1403 .show = show_cpuinfo, 1419 .show = show_cpuinfo,
1404}; 1420};
1405 1421
1406static int __init run_dmi_scan(void)
1407{
1408 dmi_scan_machine();
1409 return 0;
1410}
1411core_initcall(run_dmi_scan);
1412
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index 70f1bb808a20..eabdb63fec31 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -33,7 +33,7 @@ cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
33struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly; 33struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
34struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned; 34struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
35 35
36struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table }; 36struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
37 37
38char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned"))); 38char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
39 39
@@ -59,7 +59,7 @@ int __init nonx_setup(char *str)
59} 59}
60__setup("noexec=", nonx_setup); /* parsed early actually */ 60__setup("noexec=", nonx_setup); /* parsed early actually */
61 61
62int force_personality32 = READ_IMPLIES_EXEC; 62int force_personality32 = 0;
63 63
64/* noexec32=on|off 64/* noexec32=on|off
65Control non executable heap for 32bit processes. 65Control non executable heap for 32bit processes.
@@ -248,7 +248,7 @@ void __cpuinit cpu_init (void)
248 switch (v + 1) { 248 switch (v + 1) {
249#if DEBUG_STKSZ > EXCEPTION_STKSZ 249#if DEBUG_STKSZ > EXCEPTION_STKSZ
250 case DEBUG_STACK: 250 case DEBUG_STACK:
251 cpu_pda[cpu].debugstack = (unsigned long)estacks; 251 cpu_pda(cpu)->debugstack = (unsigned long)estacks;
252 estacks += DEBUG_STKSZ; 252 estacks += DEBUG_STKSZ;
253 break; 253 break;
254#endif 254#endif
@@ -281,12 +281,12 @@ void __cpuinit cpu_init (void)
281 * Clear all 6 debug registers: 281 * Clear all 6 debug registers:
282 */ 282 */
283 283
284 set_debug(0UL, 0); 284 set_debugreg(0UL, 0);
285 set_debug(0UL, 1); 285 set_debugreg(0UL, 1);
286 set_debug(0UL, 2); 286 set_debugreg(0UL, 2);
287 set_debug(0UL, 3); 287 set_debugreg(0UL, 3);
288 set_debug(0UL, 6); 288 set_debugreg(0UL, 6);
289 set_debug(0UL, 7); 289 set_debugreg(0UL, 7);
290 290
291 fpu_init(); 291 fpu_init();
292} 292}
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c
index 5876df116c92..e5f5ce7909a3 100644
--- a/arch/x86_64/kernel/signal.c
+++ b/arch/x86_64/kernel/signal.c
@@ -443,9 +443,6 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
443 if (!user_mode(regs)) 443 if (!user_mode(regs))
444 return 1; 444 return 1;
445 445
446 if (try_to_freeze())
447 goto no_signal;
448
449 if (!oldset) 446 if (!oldset)
450 oldset = &current->blocked; 447 oldset = &current->blocked;
451 448
@@ -463,7 +460,6 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
463 return handle_signal(signr, &info, &ka, oldset, regs); 460 return handle_signal(signr, &info, &ka, oldset, regs);
464 } 461 }
465 462
466 no_signal:
467 /* Did we come from a system call? */ 463 /* Did we come from a system call? */
468 if ((long)regs->orig_rax >= 0) { 464 if ((long)regs->orig_rax >= 0) {
469 /* Restart the system call - no handlers present */ 465 /* Restart the system call - no handlers present */
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 19ef012b1f17..4a6628b14d99 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -75,7 +75,7 @@ static inline void leave_mm(int cpu)
75{ 75{
76 if (read_pda(mmu_state) == TLBSTATE_OK) 76 if (read_pda(mmu_state) == TLBSTATE_OK)
77 BUG(); 77 BUG();
78 clear_bit(cpu, &read_pda(active_mm)->cpu_vm_mask); 78 cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
79 load_cr3(swapper_pg_dir); 79 load_cr3(swapper_pg_dir);
80} 80}
81 81
@@ -85,7 +85,7 @@ static inline void leave_mm(int cpu)
85 * [cpu0: the cpu that switches] 85 * [cpu0: the cpu that switches]
86 * 1) switch_mm() either 1a) or 1b) 86 * 1) switch_mm() either 1a) or 1b)
87 * 1a) thread switch to a different mm 87 * 1a) thread switch to a different mm
88 * 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask); 88 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
89 * Stop ipi delivery for the old mm. This is not synchronized with 89 * Stop ipi delivery for the old mm. This is not synchronized with
90 * the other cpus, but smp_invalidate_interrupt ignore flush ipis 90 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
91 * for the wrong mm, and in the worst case we perform a superfluous 91 * for the wrong mm, and in the worst case we perform a superfluous
@@ -95,7 +95,7 @@ static inline void leave_mm(int cpu)
95 * was in lazy tlb mode. 95 * was in lazy tlb mode.
96 * 1a3) update cpu active_mm 96 * 1a3) update cpu active_mm
97 * Now cpu0 accepts tlb flushes for the new mm. 97 * Now cpu0 accepts tlb flushes for the new mm.
98 * 1a4) set_bit(cpu, &new_mm->cpu_vm_mask); 98 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
99 * Now the other cpus will send tlb flush ipis. 99 * Now the other cpus will send tlb flush ipis.
100 * 1a4) change cr3. 100 * 1a4) change cr3.
101 * 1b) thread switch without mm change 101 * 1b) thread switch without mm change
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index ee5ce3d3cbc3..7f58fa682491 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -88,7 +88,8 @@ static inline unsigned int do_gettimeoffset_tsc(void)
88 unsigned long t; 88 unsigned long t;
89 unsigned long x; 89 unsigned long x;
90 t = get_cycles_sync(); 90 t = get_cycles_sync();
91 if (t < vxtime.last_tsc) t = vxtime.last_tsc; /* hack */ 91 if (t < vxtime.last_tsc)
92 t = vxtime.last_tsc; /* hack */
92 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32; 93 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
93 return x; 94 return x;
94} 95}
@@ -178,8 +179,9 @@ unsigned long profile_pc(struct pt_regs *regs)
178{ 179{
179 unsigned long pc = instruction_pointer(regs); 180 unsigned long pc = instruction_pointer(regs);
180 181
181 /* Assume the lock function has either no stack frame or only a single word. 182 /* Assume the lock function has either no stack frame or only a single
182 This checks if the address on the stack looks like a kernel text address. 183 word. This checks if the address on the stack looks like a kernel
184 text address.
183 There is a small window for false hits, but in that case the tick 185 There is a small window for false hits, but in that case the tick
184 is just accounted to the spinlock function. 186 is just accounted to the spinlock function.
185 Better would be to write these functions in assembler again 187 Better would be to write these functions in assembler again
@@ -242,17 +244,10 @@ static void set_rtc_mmss(unsigned long nowtime)
242 real_minutes += 30; /* correct for half hour time zone */ 244 real_minutes += 30; /* correct for half hour time zone */
243 real_minutes %= 60; 245 real_minutes %= 60;
244 246
245#if 0
246 /* AMD 8111 is a really bad time keeper and hits this regularly.
247 It probably was an attempt to avoid screwing up DST, but ignore
248 that for now. */
249 if (abs(real_minutes - cmos_minutes) >= 30) { 247 if (abs(real_minutes - cmos_minutes) >= 30) {
250 printk(KERN_WARNING "time.c: can't update CMOS clock " 248 printk(KERN_WARNING "time.c: can't update CMOS clock "
251 "from %d to %d\n", cmos_minutes, real_minutes); 249 "from %d to %d\n", cmos_minutes, real_minutes);
252 } else 250 } else {
253#endif
254
255 {
256 BIN_TO_BCD(real_seconds); 251 BIN_TO_BCD(real_seconds);
257 BIN_TO_BCD(real_minutes); 252 BIN_TO_BCD(real_minutes);
258 CMOS_WRITE(real_seconds, RTC_SECONDS); 253 CMOS_WRITE(real_seconds, RTC_SECONDS);
@@ -293,8 +288,7 @@ unsigned long long monotonic_clock(void)
293 this_offset = hpet_readl(HPET_COUNTER); 288 this_offset = hpet_readl(HPET_COUNTER);
294 } while (read_seqretry(&xtime_lock, seq)); 289 } while (read_seqretry(&xtime_lock, seq));
295 offset = (this_offset - last_offset); 290 offset = (this_offset - last_offset);
296 offset *=(NSEC_PER_SEC/HZ)/hpet_tick; 291 offset *= (NSEC_PER_SEC/HZ) / hpet_tick;
297 return base + offset;
298 } else { 292 } else {
299 do { 293 do {
300 seq = read_seqbegin(&xtime_lock); 294 seq = read_seqbegin(&xtime_lock);
@@ -303,50 +297,46 @@ unsigned long long monotonic_clock(void)
303 base = monotonic_base; 297 base = monotonic_base;
304 } while (read_seqretry(&xtime_lock, seq)); 298 } while (read_seqretry(&xtime_lock, seq));
305 this_offset = get_cycles_sync(); 299 this_offset = get_cycles_sync();
306 offset = (this_offset - last_offset)*1000/cpu_khz; 300 offset = (this_offset - last_offset)*1000 / cpu_khz;
307 return base + offset;
308 } 301 }
302 return base + offset;
309} 303}
310EXPORT_SYMBOL(monotonic_clock); 304EXPORT_SYMBOL(monotonic_clock);
311 305
312static noinline void handle_lost_ticks(int lost, struct pt_regs *regs) 306static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
313{ 307{
314 static long lost_count; 308 static long lost_count;
315 static int warned; 309 static int warned;
316 310 if (report_lost_ticks) {
317 if (report_lost_ticks) { 311 printk(KERN_WARNING "time.c: Lost %d timer tick(s)! ", lost);
318 printk(KERN_WARNING "time.c: Lost %d timer " 312 print_symbol("rip %s)\n", regs->rip);
319 "tick(s)! ", lost); 313 }
320 print_symbol("rip %s)\n", regs->rip); 314
321 } 315 if (lost_count == 1000 && !warned) {
322 316 printk(KERN_WARNING "warning: many lost ticks.\n"
323 if (lost_count == 1000 && !warned) { 317 KERN_WARNING "Your time source seems to be instable or "
324 printk(KERN_WARNING
325 "warning: many lost ticks.\n"
326 KERN_WARNING "Your time source seems to be instable or "
327 "some driver is hogging interupts\n"); 318 "some driver is hogging interupts\n");
328 print_symbol("rip %s\n", regs->rip); 319 print_symbol("rip %s\n", regs->rip);
329 if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) { 320 if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
330 printk(KERN_WARNING "Falling back to HPET\n"); 321 printk(KERN_WARNING "Falling back to HPET\n");
331 if (hpet_use_timer) 322 if (hpet_use_timer)
332 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick; 323 vxtime.last = hpet_readl(HPET_T0_CMP) -
333 else 324 hpet_tick;
334 vxtime.last = hpet_readl(HPET_COUNTER); 325 else
335 vxtime.mode = VXTIME_HPET; 326 vxtime.last = hpet_readl(HPET_COUNTER);
336 do_gettimeoffset = do_gettimeoffset_hpet; 327 vxtime.mode = VXTIME_HPET;
337 } 328 do_gettimeoffset = do_gettimeoffset_hpet;
338 /* else should fall back to PIT, but code missing. */ 329 }
339 warned = 1; 330 /* else should fall back to PIT, but code missing. */
340 } else 331 warned = 1;
341 lost_count++; 332 } else
333 lost_count++;
342 334
343#ifdef CONFIG_CPU_FREQ 335#ifdef CONFIG_CPU_FREQ
344 /* In some cases the CPU can change frequency without us noticing 336 /* In some cases the CPU can change frequency without us noticing
345 (like going into thermal throttle) 337 Give cpufreq a change to catch up. */
346 Give cpufreq a change to catch up. */ 338 if ((lost_count+1) % 25 == 0)
347 if ((lost_count+1) % 25 == 0) { 339 cpufreq_delayed_get();
348 cpufreq_delayed_get();
349 }
350#endif 340#endif
351} 341}
352 342
@@ -354,7 +344,7 @@ void main_timer_handler(struct pt_regs *regs)
354{ 344{
355 static unsigned long rtc_update = 0; 345 static unsigned long rtc_update = 0;
356 unsigned long tsc; 346 unsigned long tsc;
357 int delay, offset = 0, lost = 0; 347 int delay = 0, offset = 0, lost = 0;
358 348
359/* 349/*
360 * Here we are in the timer irq handler. We have irqs locally disabled (so we 350 * Here we are in the timer irq handler. We have irqs locally disabled (so we
@@ -375,7 +365,7 @@ void main_timer_handler(struct pt_regs *regs)
375 */ 365 */
376 offset = hpet_readl(HPET_T0_CMP) - hpet_tick; 366 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
377 delay = hpet_readl(HPET_COUNTER) - offset; 367 delay = hpet_readl(HPET_COUNTER) - offset;
378 } else { 368 } else if (!pmtmr_ioport) {
379 spin_lock(&i8253_lock); 369 spin_lock(&i8253_lock);
380 outb_p(0x00, 0x43); 370 outb_p(0x00, 0x43);
381 delay = inb_p(0x40); 371 delay = inb_p(0x40);
@@ -517,6 +507,7 @@ static unsigned long get_cmos_time(void)
517 unsigned int timeout = 1000000, year, mon, day, hour, min, sec; 507 unsigned int timeout = 1000000, year, mon, day, hour, min, sec;
518 unsigned char uip = 0, this = 0; 508 unsigned char uip = 0, this = 0;
519 unsigned long flags; 509 unsigned long flags;
510 unsigned extyear = 0;
520 511
521/* 512/*
522 * The Linux interpretation of the CMOS clock register contents: When the 513 * The Linux interpretation of the CMOS clock register contents: When the
@@ -545,12 +536,17 @@ static unsigned long get_cmos_time(void)
545 mon = CMOS_READ(RTC_MONTH); 536 mon = CMOS_READ(RTC_MONTH);
546 year = CMOS_READ(RTC_YEAR); 537 year = CMOS_READ(RTC_YEAR);
547 538
539#ifdef CONFIG_ACPI
540 if (acpi_fadt.revision >= FADT2_REVISION_ID && acpi_fadt.century)
541 extyear = CMOS_READ(acpi_fadt.century);
542#endif
543
548 spin_unlock_irqrestore(&rtc_lock, flags); 544 spin_unlock_irqrestore(&rtc_lock, flags);
549 545
550 /* 546 /*
551 * We know that x86-64 always uses BCD format, no need to check the 547 * We know that x86-64 always uses BCD format, no need to check the
552 * config register. 548 * config register.
553 */ 549 */
554 550
555 BCD_TO_BIN(sec); 551 BCD_TO_BIN(sec);
556 BCD_TO_BIN(min); 552 BCD_TO_BIN(min);
@@ -559,11 +555,17 @@ static unsigned long get_cmos_time(void)
559 BCD_TO_BIN(mon); 555 BCD_TO_BIN(mon);
560 BCD_TO_BIN(year); 556 BCD_TO_BIN(year);
561 557
562 /* 558 if (extyear) {
563 * x86-64 systems only exists since 2002. 559 BCD_TO_BIN(extyear);
564 * This will work up to Dec 31, 2100 560 year += extyear;
565 */ 561 printk(KERN_INFO "Extended CMOS year: %d\n", extyear);
566 year += 2000; 562 } else {
563 /*
564 * x86-64 systems only exists since 2002.
565 * This will work up to Dec 31, 2100
566 */
567 year += 2000;
568 }
567 569
568 return mktime(year, mon, day, hour, min, sec); 570 return mktime(year, mon, day, hour, min, sec);
569} 571}
@@ -606,7 +608,8 @@ static void cpufreq_delayed_get(void)
606 cpufreq_delayed_issched = 1; 608 cpufreq_delayed_issched = 1;
607 if (!warned) { 609 if (!warned) {
608 warned = 1; 610 warned = 1;
609 printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n"); 611 printk(KERN_DEBUG
612 "Losing some ticks... checking if CPU frequency changed.\n");
610 } 613 }
611 schedule_work(&cpufreq_delayed_get_work); 614 schedule_work(&cpufreq_delayed_get_work);
612 } 615 }
@@ -629,9 +632,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
629 lpj = &dummy; 632 lpj = &dummy;
630 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 633 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
631#ifdef CONFIG_SMP 634#ifdef CONFIG_SMP
632 lpj = &cpu_data[freq->cpu].loops_per_jiffy; 635 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
633#else 636#else
634 lpj = &boot_cpu_data.loops_per_jiffy; 637 lpj = &boot_cpu_data.loops_per_jiffy;
635#endif 638#endif
636 639
637 if (!ref_freq) { 640 if (!ref_freq) {
@@ -768,9 +771,8 @@ static __init int late_hpet_init(void)
768 int i; 771 int i;
769 772
770 hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE); 773 hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
771 774 timer = &hpet->hpet_timers[2];
772 for (i = 2, timer = &hpet->hpet_timers[2]; i < ntimer; 775 for (i = 2; i < ntimer; timer++, i++)
773 timer++, i++)
774 hd.hd_irq[i] = (timer->hpet_config & 776 hd.hd_irq[i] = (timer->hpet_config &
775 Tn_INT_ROUTE_CNF_MASK) >> 777 Tn_INT_ROUTE_CNF_MASK) >>
776 Tn_INT_ROUTE_CNF_SHIFT; 778 Tn_INT_ROUTE_CNF_SHIFT;
@@ -927,8 +929,7 @@ void __init time_init(void)
927 -xtime.tv_sec, -xtime.tv_nsec); 929 -xtime.tv_sec, -xtime.tv_nsec);
928 930
929 if (!hpet_init()) 931 if (!hpet_init())
930 vxtime_hz = (1000000000000000L + hpet_period / 2) / 932 vxtime_hz = (1000000000000000L + hpet_period / 2) / hpet_period;
931 hpet_period;
932 else 933 else
933 vxtime.hpet_address = 0; 934 vxtime.hpet_address = 0;
934 935
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 28d50dc540e8..7b148309c529 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -47,8 +47,6 @@
47#include <asm/proto.h> 47#include <asm/proto.h>
48#include <asm/nmi.h> 48#include <asm/nmi.h>
49 49
50extern struct gate_struct idt_table[256];
51
52asmlinkage void divide_error(void); 50asmlinkage void divide_error(void);
53asmlinkage void debug(void); 51asmlinkage void debug(void);
54asmlinkage void nmi(void); 52asmlinkage void nmi(void);
@@ -78,6 +76,8 @@ int register_die_notifier(struct notifier_block *nb)
78{ 76{
79 int err = 0; 77 int err = 0;
80 unsigned long flags; 78 unsigned long flags;
79
80 vmalloc_sync_all();
81 spin_lock_irqsave(&die_notifier_lock, flags); 81 spin_lock_irqsave(&die_notifier_lock, flags);
82 err = notifier_chain_register(&die_chain, nb); 82 err = notifier_chain_register(&die_chain, nb);
83 spin_unlock_irqrestore(&die_notifier_lock, flags); 83 spin_unlock_irqrestore(&die_notifier_lock, flags);
@@ -122,7 +122,7 @@ int printk_address(unsigned long address)
122 if (!modname) 122 if (!modname)
123 modname = delim = ""; 123 modname = delim = "";
124 return printk("<%016lx>{%s%s%s%s%+ld}", 124 return printk("<%016lx>{%s%s%s%s%+ld}",
125 address,delim,modname,delim,symname,offset); 125 address, delim, modname, delim, symname, offset);
126} 126}
127#else 127#else
128int printk_address(unsigned long address) 128int printk_address(unsigned long address)
@@ -334,13 +334,12 @@ void show_registers(struct pt_regs *regs)
334 show_stack(NULL, (unsigned long*)rsp); 334 show_stack(NULL, (unsigned long*)rsp);
335 335
336 printk("\nCode: "); 336 printk("\nCode: ");
337 if(regs->rip < PAGE_OFFSET) 337 if (regs->rip < PAGE_OFFSET)
338 goto bad; 338 goto bad;
339 339
340 for(i=0;i<20;i++) 340 for (i=0; i<20; i++) {
341 {
342 unsigned char c; 341 unsigned char c;
343 if(__get_user(c, &((unsigned char*)regs->rip)[i])) { 342 if (__get_user(c, &((unsigned char*)regs->rip)[i])) {
344bad: 343bad:
345 printk(" Bad RIP value."); 344 printk(" Bad RIP value.");
346 break; 345 break;
@@ -479,7 +478,7 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
479 printk(KERN_INFO 478 printk(KERN_INFO
480 "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n", 479 "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
481 tsk->comm, tsk->pid, str, 480 tsk->comm, tsk->pid, str,
482 regs->rip,regs->rsp,error_code); 481 regs->rip, regs->rsp, error_code);
483 482
484 if (info) 483 if (info)
485 force_sig_info(signr, info, tsk); 484 force_sig_info(signr, info, tsk);
@@ -493,9 +492,9 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
493 { 492 {
494 const struct exception_table_entry *fixup; 493 const struct exception_table_entry *fixup;
495 fixup = search_exception_tables(regs->rip); 494 fixup = search_exception_tables(regs->rip);
496 if (fixup) { 495 if (fixup)
497 regs->rip = fixup->fixup; 496 regs->rip = fixup->fixup;
498 } else 497 else
499 die(str, regs, error_code); 498 die(str, regs, error_code);
500 return; 499 return;
501 } 500 }
@@ -568,7 +567,7 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
568 printk(KERN_INFO 567 printk(KERN_INFO
569 "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n", 568 "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
570 tsk->comm, tsk->pid, 569 tsk->comm, tsk->pid,
571 regs->rip,regs->rsp,error_code); 570 regs->rip, regs->rsp, error_code);
572 571
573 force_sig(SIGSEGV, tsk); 572 force_sig(SIGSEGV, tsk);
574 return; 573 return;
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index 74db0062d4a2..39ff0708f803 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -20,6 +20,12 @@ SECTIONS
20 phys_startup_64 = startup_64 - LOAD_OFFSET; 20 phys_startup_64 = startup_64 - LOAD_OFFSET;
21 _text = .; /* Text and read-only data */ 21 _text = .; /* Text and read-only data */
22 .text : AT(ADDR(.text) - LOAD_OFFSET) { 22 .text : AT(ADDR(.text) - LOAD_OFFSET) {
23 /* First the code that has to be first for bootstrapping */
24 *(.bootstrap.text)
25 /* Then all the functions that are "hot" in profiles, to group them
26 onto the same hugetlb entry */
27 #include "functionlist"
28 /* Then the rest */
23 *(.text) 29 *(.text)
24 SCHED_TEXT 30 SCHED_TEXT
25 LOCK_TEXT 31 LOCK_TEXT
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index c9dc7e46731e..d96a9348e5a2 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -144,16 +144,12 @@ EXPORT_SYMBOL(rwsem_down_write_failed_thunk);
144EXPORT_SYMBOL(empty_zero_page); 144EXPORT_SYMBOL(empty_zero_page);
145 145
146EXPORT_SYMBOL(die_chain); 146EXPORT_SYMBOL(die_chain);
147EXPORT_SYMBOL(register_die_notifier);
148 147
149#ifdef CONFIG_SMP 148#ifdef CONFIG_SMP
150EXPORT_SYMBOL(cpu_sibling_map); 149EXPORT_SYMBOL(cpu_sibling_map);
151EXPORT_SYMBOL(smp_num_siblings); 150EXPORT_SYMBOL(smp_num_siblings);
152#endif 151#endif
153 152
154extern void do_softirq_thunk(void);
155EXPORT_SYMBOL(do_softirq_thunk);
156
157#ifdef CONFIG_BUG 153#ifdef CONFIG_BUG
158EXPORT_SYMBOL(out_of_line_bug); 154EXPORT_SYMBOL(out_of_line_bug);
159#endif 155#endif
diff --git a/arch/x86_64/lib/thunk.S b/arch/x86_64/lib/thunk.S
index acc1e2ca7ed7..e49af0032e94 100644
--- a/arch/x86_64/lib/thunk.S
+++ b/arch/x86_64/lib/thunk.S
@@ -42,7 +42,6 @@
42 thunk rwsem_wake_thunk,rwsem_wake 42 thunk rwsem_wake_thunk,rwsem_wake
43 thunk rwsem_downgrade_thunk,rwsem_downgrade_wake 43 thunk rwsem_downgrade_thunk,rwsem_downgrade_wake
44#endif 44#endif
45 thunk do_softirq_thunk,do_softirq
46 45
47 thunk __down_failed,__down 46 thunk __down_failed,__down
48 thunk_retrax __down_failed_interruptible,__down_interruptible 47 thunk_retrax __down_failed_interruptible,__down_interruptible
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 2e7c3c8ffe03..316c53de47bd 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -264,6 +264,8 @@ static int vmalloc_fault(unsigned long address)
264 return -1; 264 return -1;
265 if (pgd_none(*pgd)) 265 if (pgd_none(*pgd))
266 set_pgd(pgd, *pgd_ref); 266 set_pgd(pgd, *pgd_ref);
267 else
268 BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
267 269
268 /* Below here mismatches are bugs because these lower tables 270 /* Below here mismatches are bugs because these lower tables
269 are shared */ 271 are shared */
@@ -312,21 +314,13 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
312 unsigned long flags; 314 unsigned long flags;
313 siginfo_t info; 315 siginfo_t info;
314 316
317 tsk = current;
318 mm = tsk->mm;
319 prefetchw(&mm->mmap_sem);
320
315 /* get the address */ 321 /* get the address */
316 __asm__("movq %%cr2,%0":"=r" (address)); 322 __asm__("movq %%cr2,%0":"=r" (address));
317 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
318 SIGSEGV) == NOTIFY_STOP)
319 return;
320
321 if (likely(regs->eflags & X86_EFLAGS_IF))
322 local_irq_enable();
323 323
324 if (unlikely(page_fault_trace))
325 printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
326 regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
327
328 tsk = current;
329 mm = tsk->mm;
330 info.si_code = SEGV_MAPERR; 324 info.si_code = SEGV_MAPERR;
331 325
332 326
@@ -351,10 +345,12 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
351 */ 345 */
352 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && 346 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
353 ((address >= VMALLOC_START && address < VMALLOC_END))) { 347 ((address >= VMALLOC_START && address < VMALLOC_END))) {
354 if (vmalloc_fault(address) < 0) 348 if (vmalloc_fault(address) >= 0)
355 goto bad_area_nosemaphore; 349 return;
356 return;
357 } 350 }
351 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
352 SIGSEGV) == NOTIFY_STOP)
353 return;
358 /* 354 /*
359 * Don't take the mm semaphore here. If we fixup a prefetch 355 * Don't take the mm semaphore here. If we fixup a prefetch
360 * fault we could otherwise deadlock. 356 * fault we could otherwise deadlock.
@@ -362,6 +358,17 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
362 goto bad_area_nosemaphore; 358 goto bad_area_nosemaphore;
363 } 359 }
364 360
361 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
362 SIGSEGV) == NOTIFY_STOP)
363 return;
364
365 if (likely(regs->eflags & X86_EFLAGS_IF))
366 local_irq_enable();
367
368 if (unlikely(page_fault_trace))
369 printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
370 regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
371
365 if (unlikely(error_code & PF_RSVD)) 372 if (unlikely(error_code & PF_RSVD))
366 pgtable_bad(address, regs, error_code); 373 pgtable_bad(address, regs, error_code);
367 374
@@ -571,6 +578,48 @@ do_sigbus:
571 return; 578 return;
572} 579}
573 580
581DEFINE_SPINLOCK(pgd_lock);
582struct page *pgd_list;
583
584void vmalloc_sync_all(void)
585{
586 /* Note that races in the updates of insync and start aren't
587 problematic:
588 insync can only get set bits added, and updates to start are only
589 improving performance (without affecting correctness if undone). */
590 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
591 static unsigned long start = VMALLOC_START & PGDIR_MASK;
592 unsigned long address;
593
594 for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
595 if (!test_bit(pgd_index(address), insync)) {
596 const pgd_t *pgd_ref = pgd_offset_k(address);
597 struct page *page;
598
599 if (pgd_none(*pgd_ref))
600 continue;
601 spin_lock(&pgd_lock);
602 for (page = pgd_list; page;
603 page = (struct page *)page->index) {
604 pgd_t *pgd;
605 pgd = (pgd_t *)page_address(page) + pgd_index(address);
606 if (pgd_none(*pgd))
607 set_pgd(pgd, *pgd_ref);
608 else
609 BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
610 }
611 spin_unlock(&pgd_lock);
612 set_bit(pgd_index(address), insync);
613 }
614 if (address == start)
615 start = address + PGDIR_SIZE;
616 }
617 /* Check that there is no need to do the same for the modules area. */
618 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
619 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
620 (__START_KERNEL & PGDIR_MASK)));
621}
622
574static int __init enable_pagefaulttrace(char *str) 623static int __init enable_pagefaulttrace(char *str)
575{ 624{
576 page_fault_trace = 1; 625 page_fault_trace = 1;
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 40ed13d263cd..b04415625442 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -94,7 +94,7 @@ void show_mem(void)
94 94
95int after_bootmem; 95int after_bootmem;
96 96
97static void *spp_getpage(void) 97static __init void *spp_getpage(void)
98{ 98{
99 void *ptr; 99 void *ptr;
100 if (after_bootmem) 100 if (after_bootmem)
@@ -108,7 +108,7 @@ static void *spp_getpage(void)
108 return ptr; 108 return ptr;
109} 109}
110 110
111static void set_pte_phys(unsigned long vaddr, 111static __init void set_pte_phys(unsigned long vaddr,
112 unsigned long phys, pgprot_t prot) 112 unsigned long phys, pgprot_t prot)
113{ 113{
114 pgd_t *pgd; 114 pgd_t *pgd;
@@ -157,7 +157,8 @@ static void set_pte_phys(unsigned long vaddr,
157} 157}
158 158
159/* NOTE: this is meant to be run only at boot */ 159/* NOTE: this is meant to be run only at boot */
160void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot) 160void __init
161__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
161{ 162{
162 unsigned long address = __fix_to_virt(idx); 163 unsigned long address = __fix_to_virt(idx);
163 164
@@ -225,6 +226,33 @@ static __meminit void unmap_low_page(int i)
225 ti->allocated = 0; 226 ti->allocated = 0;
226} 227}
227 228
229/* Must run before zap_low_mappings */
230__init void *early_ioremap(unsigned long addr, unsigned long size)
231{
232 unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
233
234 /* actually usually some more */
235 if (size >= LARGE_PAGE_SIZE) {
236 printk("SMBIOS area too long %lu\n", size);
237 return NULL;
238 }
239 set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
240 map += LARGE_PAGE_SIZE;
241 set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
242 __flush_tlb();
243 return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
244}
245
246/* To avoid virtual aliases later */
247__init void early_iounmap(void *addr, unsigned long size)
248{
249 if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
250 printk("early_iounmap: bad address %p\n", addr);
251 set_pmd(temp_mappings[0].pmd, __pmd(0));
252 set_pmd(temp_mappings[1].pmd, __pmd(0));
253 __flush_tlb();
254}
255
228static void __meminit 256static void __meminit
229phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) 257phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
230{ 258{
@@ -344,7 +372,7 @@ void __meminit init_memory_mapping(unsigned long start, unsigned long end)
344 pud_t *pud; 372 pud_t *pud;
345 373
346 if (after_bootmem) 374 if (after_bootmem)
347 pud = pud_offset_k(pgd, __PAGE_OFFSET); 375 pud = pud_offset_k(pgd, start & PGDIR_MASK);
348 else 376 else
349 pud = alloc_low_page(&map, &pud_phys); 377 pud = alloc_low_page(&map, &pud_phys);
350 378
diff --git a/arch/x86_64/mm/k8topology.c b/arch/x86_64/mm/k8topology.c
index dd60e71fdba6..7c45c2d2b8b2 100644
--- a/arch/x86_64/mm/k8topology.c
+++ b/arch/x86_64/mm/k8topology.c
@@ -43,7 +43,7 @@ static __init int find_northbridge(void)
43int __init k8_scan_nodes(unsigned long start, unsigned long end) 43int __init k8_scan_nodes(unsigned long start, unsigned long end)
44{ 44{
45 unsigned long prevbase; 45 unsigned long prevbase;
46 struct node nodes[8]; 46 struct bootnode nodes[8];
47 int nodeid, i, nb; 47 int nodeid, i, nb;
48 unsigned char nodeids[8]; 48 unsigned char nodeids[8];
49 int found = 0; 49 int found = 0;
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 22e51beee8d3..63c72641b737 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -25,8 +25,7 @@
25struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 25struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
26bootmem_data_t plat_node_bdata[MAX_NUMNODES]; 26bootmem_data_t plat_node_bdata[MAX_NUMNODES];
27 27
28int memnode_shift; 28struct memnode memnode;
29u8 memnodemap[NODEMAPSIZE];
30 29
31unsigned char cpu_to_node[NR_CPUS] __read_mostly = { 30unsigned char cpu_to_node[NR_CPUS] __read_mostly = {
32 [0 ... NR_CPUS-1] = NUMA_NO_NODE 31 [0 ... NR_CPUS-1] = NUMA_NO_NODE
@@ -47,7 +46,7 @@ int numa_off __initdata;
47 * -1 if node overlap or lost ram (shift too big) 46 * -1 if node overlap or lost ram (shift too big)
48 */ 47 */
49static int __init 48static int __init
50populate_memnodemap(const struct node *nodes, int numnodes, int shift) 49populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift)
51{ 50{
52 int i; 51 int i;
53 int res = -1; 52 int res = -1;
@@ -74,7 +73,7 @@ populate_memnodemap(const struct node *nodes, int numnodes, int shift)
74 return res; 73 return res;
75} 74}
76 75
77int __init compute_hash_shift(struct node *nodes, int numnodes) 76int __init compute_hash_shift(struct bootnode *nodes, int numnodes)
78{ 77{
79 int shift = 20; 78 int shift = 20;
80 79
@@ -149,7 +148,7 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long en
149/* Initialize final allocator for a zone */ 148/* Initialize final allocator for a zone */
150void __init setup_node_zones(int nodeid) 149void __init setup_node_zones(int nodeid)
151{ 150{
152 unsigned long start_pfn, end_pfn; 151 unsigned long start_pfn, end_pfn, memmapsize, limit;
153 unsigned long zones[MAX_NR_ZONES]; 152 unsigned long zones[MAX_NR_ZONES];
154 unsigned long holes[MAX_NR_ZONES]; 153 unsigned long holes[MAX_NR_ZONES];
155 154
@@ -159,6 +158,16 @@ void __init setup_node_zones(int nodeid)
159 Dprintk(KERN_INFO "Setting up node %d %lx-%lx\n", 158 Dprintk(KERN_INFO "Setting up node %d %lx-%lx\n",
160 nodeid, start_pfn, end_pfn); 159 nodeid, start_pfn, end_pfn);
161 160
161 /* Try to allocate mem_map at end to not fill up precious <4GB
162 memory. */
163 memmapsize = sizeof(struct page) * (end_pfn-start_pfn);
164 limit = end_pfn << PAGE_SHIFT;
165 NODE_DATA(nodeid)->node_mem_map =
166 __alloc_bootmem_core(NODE_DATA(nodeid)->bdata,
167 memmapsize, SMP_CACHE_BYTES,
168 round_down(limit - memmapsize, PAGE_SIZE),
169 limit);
170
162 size_zones(zones, holes, start_pfn, end_pfn); 171 size_zones(zones, holes, start_pfn, end_pfn);
163 free_area_init_node(nodeid, NODE_DATA(nodeid), zones, 172 free_area_init_node(nodeid, NODE_DATA(nodeid), zones,
164 start_pfn, holes); 173 start_pfn, holes);
@@ -191,7 +200,7 @@ int numa_fake __initdata = 0;
191static int numa_emulation(unsigned long start_pfn, unsigned long end_pfn) 200static int numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
192{ 201{
193 int i; 202 int i;
194 struct node nodes[MAX_NUMNODES]; 203 struct bootnode nodes[MAX_NUMNODES];
195 unsigned long sz = ((end_pfn - start_pfn)<<PAGE_SHIFT) / numa_fake; 204 unsigned long sz = ((end_pfn - start_pfn)<<PAGE_SHIFT) / numa_fake;
196 205
197 /* Kludge needed for the hash function */ 206 /* Kludge needed for the hash function */
@@ -357,8 +366,7 @@ void __init init_cpu_to_node(void)
357 366
358EXPORT_SYMBOL(cpu_to_node); 367EXPORT_SYMBOL(cpu_to_node);
359EXPORT_SYMBOL(node_to_cpumask); 368EXPORT_SYMBOL(node_to_cpumask);
360EXPORT_SYMBOL(memnode_shift); 369EXPORT_SYMBOL(memnode);
361EXPORT_SYMBOL(memnodemap);
362EXPORT_SYMBOL(node_data); 370EXPORT_SYMBOL(node_data);
363 371
364#ifdef CONFIG_DISCONTIGMEM 372#ifdef CONFIG_DISCONTIGMEM
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 482c25767369..2eb879590dc4 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -23,7 +23,7 @@ static struct acpi_table_slit *acpi_slit;
23 23
24static nodemask_t nodes_parsed __initdata; 24static nodemask_t nodes_parsed __initdata;
25static nodemask_t nodes_found __initdata; 25static nodemask_t nodes_found __initdata;
26static struct node nodes[MAX_NUMNODES] __initdata; 26static struct bootnode nodes[MAX_NUMNODES] __initdata;
27static u8 pxm2node[256] = { [0 ... 255] = 0xff }; 27static u8 pxm2node[256] = { [0 ... 255] = 0xff };
28 28
29/* Too small nodes confuse the VM badly. Usually they result 29/* Too small nodes confuse the VM badly. Usually they result
@@ -57,7 +57,7 @@ static __init int conflicting_nodes(unsigned long start, unsigned long end)
57{ 57{
58 int i; 58 int i;
59 for_each_node_mask(i, nodes_parsed) { 59 for_each_node_mask(i, nodes_parsed) {
60 struct node *nd = &nodes[i]; 60 struct bootnode *nd = &nodes[i];
61 if (nd->start == nd->end) 61 if (nd->start == nd->end)
62 continue; 62 continue;
63 if (nd->end > start && nd->start < end) 63 if (nd->end > start && nd->start < end)
@@ -70,7 +70,7 @@ static __init int conflicting_nodes(unsigned long start, unsigned long end)
70 70
71static __init void cutoff_node(int i, unsigned long start, unsigned long end) 71static __init void cutoff_node(int i, unsigned long start, unsigned long end)
72{ 72{
73 struct node *nd = &nodes[i]; 73 struct bootnode *nd = &nodes[i];
74 if (nd->start < start) { 74 if (nd->start < start) {
75 nd->start = start; 75 nd->start = start;
76 if (nd->end < nd->start) 76 if (nd->end < nd->start)
@@ -159,7 +159,7 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
159void __init 159void __init
160acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) 160acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
161{ 161{
162 struct node *nd; 162 struct bootnode *nd;
163 unsigned long start, end; 163 unsigned long start, end;
164 int node, pxm; 164 int node, pxm;
165 int i; 165 int i;
diff --git a/arch/x86_64/pci/Makefile b/arch/x86_64/pci/Makefile
index a8f75a2a0f6f..a3f6ad570179 100644
--- a/arch/x86_64/pci/Makefile
+++ b/arch/x86_64/pci/Makefile
@@ -7,7 +7,7 @@ CFLAGS += -Iarch/i386/pci
7 7
8obj-y := i386.o 8obj-y := i386.o
9obj-$(CONFIG_PCI_DIRECT)+= direct.o 9obj-$(CONFIG_PCI_DIRECT)+= direct.o
10obj-y += fixup.o 10obj-y += fixup.o init.o
11obj-$(CONFIG_ACPI) += acpi.o 11obj-$(CONFIG_ACPI) += acpi.o
12obj-y += legacy.o irq.o common.o 12obj-y += legacy.o irq.o common.o
13# mmconfig has a 64bit special 13# mmconfig has a 64bit special
@@ -22,3 +22,4 @@ irq-y += ../../i386/pci/irq.o
22common-y += ../../i386/pci/common.o 22common-y += ../../i386/pci/common.o
23fixup-y += ../../i386/pci/fixup.o 23fixup-y += ../../i386/pci/fixup.o
24i386-y += ../../i386/pci/i386.o 24i386-y += ../../i386/pci/i386.o
25init-y += ../../i386/pci/init.o
diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c
index 18f371fe37f8..e616500207e4 100644
--- a/arch/x86_64/pci/mmconfig.c
+++ b/arch/x86_64/pci/mmconfig.c
@@ -55,7 +55,7 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus)
55static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn) 55static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
56{ 56{
57 char __iomem *addr; 57 char __iomem *addr;
58 if (seg == 0 && bus == 0 && test_bit(PCI_SLOT(devfn), &fallback_slots)) 58 if (seg == 0 && bus == 0 && test_bit(PCI_SLOT(devfn), fallback_slots))
59 return NULL; 59 return NULL;
60 addr = get_virt(seg, bus); 60 addr = get_virt(seg, bus);
61 if (!addr) 61 if (!addr)
@@ -143,29 +143,29 @@ static __init void unreachable_devices(void)
143 continue; 143 continue;
144 addr = pci_dev_base(0, 0, PCI_DEVFN(i, 0)); 144 addr = pci_dev_base(0, 0, PCI_DEVFN(i, 0));
145 if (addr == NULL|| readl(addr) != val1) { 145 if (addr == NULL|| readl(addr) != val1) {
146 set_bit(i, &fallback_slots); 146 set_bit(i, fallback_slots);
147 } 147 }
148 } 148 }
149} 149}
150 150
151static int __init pci_mmcfg_init(void) 151void __init pci_mmcfg_init(void)
152{ 152{
153 int i; 153 int i;
154 154
155 if ((pci_probe & PCI_PROBE_MMCONF) == 0) 155 if ((pci_probe & PCI_PROBE_MMCONF) == 0)
156 return 0; 156 return;
157 157
158 acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg); 158 acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
159 if ((pci_mmcfg_config_num == 0) || 159 if ((pci_mmcfg_config_num == 0) ||
160 (pci_mmcfg_config == NULL) || 160 (pci_mmcfg_config == NULL) ||
161 (pci_mmcfg_config[0].base_address == 0)) 161 (pci_mmcfg_config[0].base_address == 0))
162 return 0; 162 return;
163 163
164 /* RED-PEN i386 doesn't do _nocache right now */ 164 /* RED-PEN i386 doesn't do _nocache right now */
165 pci_mmcfg_virt = kmalloc(sizeof(*pci_mmcfg_virt) * pci_mmcfg_config_num, GFP_KERNEL); 165 pci_mmcfg_virt = kmalloc(sizeof(*pci_mmcfg_virt) * pci_mmcfg_config_num, GFP_KERNEL);
166 if (pci_mmcfg_virt == NULL) { 166 if (pci_mmcfg_virt == NULL) {
167 printk("PCI: Can not allocate memory for mmconfig structures\n"); 167 printk("PCI: Can not allocate memory for mmconfig structures\n");
168 return 0; 168 return;
169 } 169 }
170 for (i = 0; i < pci_mmcfg_config_num; ++i) { 170 for (i = 0; i < pci_mmcfg_config_num; ++i) {
171 pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i]; 171 pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
@@ -173,7 +173,7 @@ static int __init pci_mmcfg_init(void)
173 if (!pci_mmcfg_virt[i].virt) { 173 if (!pci_mmcfg_virt[i].virt) {
174 printk("PCI: Cannot map mmconfig aperture for segment %d\n", 174 printk("PCI: Cannot map mmconfig aperture for segment %d\n",
175 pci_mmcfg_config[i].pci_segment_group_number); 175 pci_mmcfg_config[i].pci_segment_group_number);
176 return 0; 176 return;
177 } 177 }
178 printk(KERN_INFO "PCI: Using MMCONFIG at %x\n", pci_mmcfg_config[i].base_address); 178 printk(KERN_INFO "PCI: Using MMCONFIG at %x\n", pci_mmcfg_config[i].base_address);
179 } 179 }
@@ -182,8 +182,4 @@ static int __init pci_mmcfg_init(void)
182 182
183 raw_pci_ops = &pci_mmcfg; 183 raw_pci_ops = &pci_mmcfg;
184 pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; 184 pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
185
186 return 0;
187} 185}
188
189arch_initcall(pci_mmcfg_init);
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 4cbf6d91571f..51f9bed455fa 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -83,9 +83,8 @@ int show_interrupts(struct seq_file *p, void *v)
83 83
84 if (i == 0) { 84 if (i == 0) {
85 seq_printf(p, " "); 85 seq_printf(p, " ");
86 for (j=0; j<NR_CPUS; j++) 86 for_each_online_cpu(j)
87 if (cpu_online(j)) 87 seq_printf(p, "CPU%d ",j);
88 seq_printf(p, "CPU%d ",j);
89 seq_putc(p, '\n'); 88 seq_putc(p, '\n');
90 } 89 }
91 90
@@ -98,9 +97,8 @@ int show_interrupts(struct seq_file *p, void *v)
98#ifndef CONFIG_SMP 97#ifndef CONFIG_SMP
99 seq_printf(p, "%10u ", kstat_irqs(i)); 98 seq_printf(p, "%10u ", kstat_irqs(i));
100#else 99#else
101 for (j = 0; j < NR_CPUS; j++) 100 for_each_online_cpu(j)
102 if (cpu_online(j)) 101 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
103 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
104#endif 102#endif
105 seq_printf(p, " %14s", irq_desc[i].handler->typename); 103 seq_printf(p, " %14s", irq_desc[i].handler->typename);
106 seq_printf(p, " %s", action->name); 104 seq_printf(p, " %s", action->name);
@@ -113,9 +111,8 @@ skip:
113 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 111 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
114 } else if (i == NR_IRQS) { 112 } else if (i == NR_IRQS) {
115 seq_printf(p, "NMI: "); 113 seq_printf(p, "NMI: ");
116 for (j = 0; j < NR_CPUS; j++) 114 for_each_online_cpu(j)
117 if (cpu_online(j)) 115 seq_printf(p, "%10u ", nmi_count(j));
118 seq_printf(p, "%10u ", nmi_count(j));
119 seq_putc(p, '\n'); 116 seq_putc(p, '\n');
120 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 117 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
121 } 118 }
diff --git a/arch/xtensa/platform-iss/console.c b/arch/xtensa/platform-iss/console.c
index 94fdfe474ac1..2a580efb58ec 100644
--- a/arch/xtensa/platform-iss/console.c
+++ b/arch/xtensa/platform-iss/console.c
@@ -31,10 +31,6 @@
31#include <linux/tty.h> 31#include <linux/tty.h>
32#include <linux/tty_flip.h> 32#include <linux/tty_flip.h>
33 33
34#ifdef SERIAL_INLINE
35#define _INLINE_ inline
36#endif
37
38#define SERIAL_MAX_NUM_LINES 1 34#define SERIAL_MAX_NUM_LINES 1
39#define SERIAL_TIMER_VALUE (20 * HZ) 35#define SERIAL_TIMER_VALUE (20 * HZ)
40 36