aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/setup.c2
-rw-r--r--arch/alpha/oprofile/common.c2
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/compressed/head-at91rm9200.S6
-rw-r--r--arch/arm/boot/compressed/ll_char_wr.S6
-rw-r--r--arch/arm/common/locomo.c45
-rw-r--r--arch/arm/configs/onearm_defconfig1053
-rw-r--r--arch/arm/configs/s3c2410_defconfig50
-rw-r--r--arch/arm/kernel/entry-common.S2
-rw-r--r--arch/arm/kernel/head-nommu.S2
-rw-r--r--arch/arm/kernel/head.S4
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/lib/backtrace.S8
-rw-r--r--arch/arm/lib/clear_user.S4
-rw-r--r--arch/arm/lib/copy_page.S2
-rw-r--r--arch/arm/lib/csumipv6.S2
-rw-r--r--arch/arm/lib/delay.S18
-rw-r--r--arch/arm/lib/ecard.S4
-rw-r--r--arch/arm/lib/findbit.S10
-rw-r--r--arch/arm/lib/io-readsb.S6
-rw-r--r--arch/arm/lib/io-readsw-armv3.S6
-rw-r--r--arch/arm/lib/io-writesb.S6
-rw-r--r--arch/arm/lib/io-writesw-armv3.S6
-rw-r--r--arch/arm/lib/memchr.S2
-rw-r--r--arch/arm/lib/memset.S4
-rw-r--r--arch/arm/lib/memzero.S4
-rw-r--r--arch/arm/lib/strchr.S2
-rw-r--r--arch/arm/lib/strncpy_from_user.S5
-rw-r--r--arch/arm/lib/strnlen_user.S5
-rw-r--r--arch/arm/lib/strrchr.S2
-rw-r--r--arch/arm/lib/uaccess.S8
-rw-r--r--arch/arm/mach-at91rm9200/Kconfig6
-rw-r--r--arch/arm/mach-at91rm9200/Makefile1
-rw-r--r--arch/arm/mach-at91rm9200/board-1arm.c109
-rw-r--r--arch/arm/mach-ixp4xx/Kconfig3
-rw-r--r--arch/arm/mach-ixp4xx/Makefile24
-rw-r--r--arch/arm/mach-pxa/sleep.S2
-rw-r--r--arch/arm/mach-s3c2410/Kconfig4
-rw-r--r--arch/arm/mach-s3c2410/sleep.S2
-rw-r--r--arch/arm/mach-sa1100/sleep.S2
-rw-r--r--arch/arm/mm/copypage-v3.S2
-rw-r--r--arch/arm/mm/proc-v6.S32
-rw-r--r--arch/arm/nwfpe/entry26.S2
-rw-r--r--arch/arm/tools/mach-types71
-rw-r--r--arch/i386/Kconfig54
-rw-r--r--arch/i386/Kconfig.cpu2
-rw-r--r--arch/i386/boot/Makefile9
-rw-r--r--arch/i386/boot/compressed/misc.c32
-rw-r--r--arch/i386/boot/video.S19
-rw-r--r--arch/i386/crypto/aes-i586-asm.S29
-rw-r--r--arch/i386/crypto/aes.c20
-rw-r--r--arch/i386/kernel/Makefile8
-rw-r--r--arch/i386/kernel/alternative.c118
-rw-r--r--arch/i386/kernel/apic.c16
-rw-r--r--arch/i386/kernel/apm.c6
-rw-r--r--arch/i386/kernel/asm-offsets.c7
-rw-r--r--arch/i386/kernel/cpu/amd.c22
-rw-r--r--arch/i386/kernel/cpu/common.c25
-rw-r--r--arch/i386/kernel/cpu/cyrix.c2
-rw-r--r--arch/i386/kernel/cpu/intel.c6
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c123
-rw-r--r--arch/i386/kernel/cpu/proc.c8
-rw-r--r--arch/i386/kernel/cpuid.c2
-rw-r--r--arch/i386/kernel/crash.c9
-rw-r--r--arch/i386/kernel/entry.S285
-rw-r--r--arch/i386/kernel/hpet.c67
-rw-r--r--arch/i386/kernel/i8253.c118
-rw-r--r--arch/i386/kernel/i8259.c2
-rw-r--r--arch/i386/kernel/io_apic.c49
-rw-r--r--arch/i386/kernel/irq.c10
-rw-r--r--arch/i386/kernel/kprobes.c95
-rw-r--r--arch/i386/kernel/machine_kexec.c4
-rw-r--r--arch/i386/kernel/msr.c2
-rw-r--r--arch/i386/kernel/nmi.c72
-rw-r--r--arch/i386/kernel/numaq.c10
-rw-r--r--arch/i386/kernel/process.c8
-rw-r--r--arch/i386/kernel/scx200.c66
-rw-r--r--arch/i386/kernel/setup.c1
-rw-r--r--arch/i386/kernel/signal.c4
-rw-r--r--arch/i386/kernel/smp.c12
-rw-r--r--arch/i386/kernel/smpboot.c38
-rw-r--r--arch/i386/kernel/sysenter.c128
-rw-r--r--arch/i386/kernel/time.c157
-rw-r--r--arch/i386/kernel/timers/Makefile9
-rw-r--r--arch/i386/kernel/timers/common.c172
-rw-r--r--arch/i386/kernel/timers/timer.c75
-rw-r--r--arch/i386/kernel/timers/timer_cyclone.c259
-rw-r--r--arch/i386/kernel/timers/timer_hpet.c217
-rw-r--r--arch/i386/kernel/timers/timer_none.c39
-rw-r--r--arch/i386/kernel/timers/timer_pit.c177
-rw-r--r--arch/i386/kernel/timers/timer_pm.c342
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c617
-rw-r--r--arch/i386/kernel/topology.c28
-rw-r--r--arch/i386/kernel/traps.c70
-rw-r--r--arch/i386/kernel/tsc.c478
-rw-r--r--arch/i386/kernel/vmlinux.lds.S9
-rw-r--r--arch/i386/kernel/vsyscall-sysenter.S4
-rw-r--r--arch/i386/kernel/vsyscall.lds.S4
-rw-r--r--arch/i386/lib/delay.c65
-rw-r--r--arch/i386/mach-voyager/setup.c5
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c2
-rw-r--r--arch/i386/mm/fault.c38
-rw-r--r--arch/i386/mm/init.c5
-rw-r--r--arch/i386/mm/pageattr.c4
-rw-r--r--arch/i386/oprofile/nmi_int.c4
-rw-r--r--arch/i386/oprofile/op_model_athlon.c1
-rw-r--r--arch/i386/oprofile/op_model_p4.c1
-rw-r--r--arch/i386/oprofile/op_model_ppro.c1
-rw-r--r--arch/i386/pci/pcbios.c6
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/kernel/palinfo.c4
-rw-r--r--arch/ia64/kernel/process.c4
-rw-r--r--arch/ia64/kernel/salinfo.c6
-rw-r--r--arch/ia64/kernel/topology.c32
-rw-r--r--arch/ia64/mm/discontig.c57
-rw-r--r--arch/ia64/mm/fault.c36
-rw-r--r--arch/ia64/mm/init.c5
-rw-r--r--arch/ia64/sn/kernel/irq.c2
-rw-r--r--arch/m32r/kernel/setup.c2
-rw-r--r--arch/m68k/mm/memory.c6
-rw-r--r--arch/m68k/sun3/sun3dvma.c6
-rw-r--r--arch/m68knommu/Kconfig78
-rw-r--r--arch/m68knommu/Makefile24
-rw-r--r--arch/m68knommu/defconfig207
-rw-r--r--arch/m68knommu/kernel/vmlinux.lds.S160
-rw-r--r--arch/m68knommu/platform/5307/head.S82
-rw-r--r--arch/m68knommu/platform/68328/head-pilot.S3
-rw-r--r--arch/m68knommu/platform/68328/head-ram.S6
-rw-r--r--arch/m68knommu/platform/68328/head-rom.S18
-rw-r--r--arch/m68knommu/platform/68360/head-ram.S19
-rw-r--r--arch/m68knommu/platform/68360/head-rom.S17
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/kernel/smtc.c4
-rw-r--r--arch/mips/momentum/ocelot_g/gt-irq.c4
-rw-r--r--arch/mips/oprofile/common.c2
-rw-r--r--arch/mips/sgi-ip22/ip22-reset.c2
-rw-r--r--arch/mips/sgi-ip32/ip32-reset.c12
-rw-r--r--arch/parisc/kernel/topology.c3
-rw-r--r--arch/powerpc/kernel/crash.c2
-rw-r--r--arch/powerpc/kernel/machine_kexec_32.c4
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/sysfs.c31
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/mm/fault.c36
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/powerpc/mm/mem.c11
-rw-r--r--arch/powerpc/mm/numa.c11
-rw-r--r--arch/powerpc/oprofile/common.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c4
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_core.c2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c2
-rw-r--r--arch/powerpc/sysdev/mmio_nvram.c2
-rw-r--r--arch/ppc/kernel/machine_kexec.c4
-rw-r--r--arch/ppc/kernel/setup.c2
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/crypto/aes_s390.c14
-rw-r--r--arch/s390/crypto/des_s390.c42
-rw-r--r--arch/s390/crypto/sha1_s390.c34
-rw-r--r--arch/s390/crypto/sha256_s390.c14
-rw-r--r--arch/s390/kernel/machine_kexec.c4
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/sh/Makefile4
-rw-r--r--arch/sh/kernel/machine_kexec.c4
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sh/oprofile/op_model_sh7750.c2
-rw-r--r--arch/sh64/kernel/setup.c2
-rw-r--r--arch/sparc/kernel/of_device.c2
-rw-r--r--arch/sparc/kernel/prom.c106
-rw-r--r--arch/sparc/lib/Makefile2
-rw-r--r--arch/sparc/lib/iomap.c48
-rw-r--r--arch/sparc64/kernel/auxio.c3
-rw-r--r--arch/sparc64/kernel/irq.c61
-rw-r--r--arch/sparc64/kernel/of_device.c3
-rw-r--r--arch/sparc64/kernel/prom.c107
-rw-r--r--arch/sparc64/kernel/setup.c2
-rw-r--r--arch/sparc64/mm/fault.c36
-rw-r--r--arch/sparc64/mm/init.c4
-rw-r--r--arch/um/drivers/ubd_kern.c2
-rw-r--r--arch/x86_64/Kconfig51
-rw-r--r--arch/x86_64/Kconfig.debug18
-rw-r--r--arch/x86_64/Makefile4
-rw-r--r--arch/x86_64/boot/Makefile9
-rw-r--r--arch/x86_64/boot/compressed/misc.c46
-rw-r--r--arch/x86_64/boot/tools/build.c6
-rw-r--r--arch/x86_64/boot/video.S19
-rw-r--r--arch/x86_64/crypto/aes-x86_64-asm.S22
-rw-r--r--arch/x86_64/crypto/aes.c20
-rw-r--r--arch/x86_64/defconfig159
-rw-r--r--arch/x86_64/ia32/fpu32.c1
-rw-r--r--arch/x86_64/ia32/ia32_signal.c2
-rw-r--r--arch/x86_64/ia32/ia32entry.S11
-rw-r--r--arch/x86_64/ia32/ptrace32.c43
-rw-r--r--arch/x86_64/ia32/sys_ia32.c25
-rw-r--r--arch/x86_64/kernel/Makefile8
-rw-r--r--arch/x86_64/kernel/aperture.c26
-rw-r--r--arch/x86_64/kernel/apic.c32
-rw-r--r--arch/x86_64/kernel/asm-offsets.c3
-rw-r--r--arch/x86_64/kernel/crash.c6
-rw-r--r--arch/x86_64/kernel/e820.c2
-rw-r--r--arch/x86_64/kernel/entry.S115
-rw-r--r--arch/x86_64/kernel/genapic_flat.c30
-rw-r--r--arch/x86_64/kernel/head64.c2
-rw-r--r--arch/x86_64/kernel/i8259.c16
-rw-r--r--arch/x86_64/kernel/io_apic.c45
-rw-r--r--arch/x86_64/kernel/irq.c34
-rw-r--r--arch/x86_64/kernel/k8.c118
-rw-r--r--arch/x86_64/kernel/machine_kexec.c4
-rw-r--r--arch/x86_64/kernel/mce.c6
-rw-r--r--arch/x86_64/kernel/mce_amd.c506
-rw-r--r--arch/x86_64/kernel/module.c38
-rw-r--r--arch/x86_64/kernel/nmi.c89
-rw-r--r--arch/x86_64/kernel/pci-calgary.c1018
-rw-r--r--arch/x86_64/kernel/pci-dma.c55
-rw-r--r--arch/x86_64/kernel/pci-gart.c155
-rw-r--r--arch/x86_64/kernel/pci-nommu.c9
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86_64/kernel/pmtimer.c2
-rw-r--r--arch/x86_64/kernel/process.c16
-rw-r--r--arch/x86_64/kernel/reboot.c1
-rw-r--r--arch/x86_64/kernel/setup.c181
-rw-r--r--arch/x86_64/kernel/setup64.c3
-rw-r--r--arch/x86_64/kernel/signal.c3
-rw-r--r--arch/x86_64/kernel/smp.c16
-rw-r--r--arch/x86_64/kernel/smpboot.c31
-rw-r--r--arch/x86_64/kernel/tce.c202
-rw-r--r--arch/x86_64/kernel/time.c87
-rw-r--r--arch/x86_64/kernel/traps.c83
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S29
-rw-r--r--arch/x86_64/kernel/vsyscall.c4
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c114
-rw-r--r--arch/x86_64/lib/csum-partial.c1
-rw-r--r--arch/x86_64/lib/csum-wrappers.c1
-rw-r--r--arch/x86_64/lib/delay.c5
-rw-r--r--arch/x86_64/lib/memmove.c4
-rw-r--r--arch/x86_64/lib/usercopy.c13
-rw-r--r--arch/x86_64/mm/fault.c47
-rw-r--r--arch/x86_64/mm/init.c121
-rw-r--r--arch/x86_64/mm/ioremap.c5
-rw-r--r--arch/x86_64/pci/k8-bus.c10
-rw-r--r--arch/xtensa/Makefile2
-rw-r--r--arch/xtensa/kernel/time.c2
-rw-r--r--arch/xtensa/kernel/traps.c2
245 files changed, 6791 insertions, 4203 deletions
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 558b83368559..254c507a608c 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -481,7 +481,7 @@ register_cpus(void)
481 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); 481 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
482 if (!p) 482 if (!p)
483 return -ENOMEM; 483 return -ENOMEM;
484 register_cpu(p, i, NULL); 484 register_cpu(p, i);
485 } 485 }
486 return 0; 486 return 0;
487} 487}
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
index ba788cfdc3c6..9fc0eeb4f0ab 100644
--- a/arch/alpha/oprofile/common.c
+++ b/arch/alpha/oprofile/common.c
@@ -112,7 +112,7 @@ op_axp_create_files(struct super_block * sb, struct dentry * root)
112 112
113 for (i = 0; i < model->num_counters; ++i) { 113 for (i = 0; i < model->num_counters; ++i) {
114 struct dentry *dir; 114 struct dentry *dir;
115 char buf[3]; 115 char buf[4];
116 116
117 snprintf(buf, sizeof buf, "%d", i); 117 snprintf(buf, sizeof buf, "%d", i);
118 dir = oprofilefs_mkdir(sb, root, buf); 118 dir = oprofilefs_mkdir(sb, root, buf);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 1b7e5c2e90ef..3d1a3fb7d5fc 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -253,7 +253,7 @@ config ARCH_SA1100
253 Support for StrongARM 11x0 based boards. 253 Support for StrongARM 11x0 based boards.
254 254
255config ARCH_S3C2410 255config ARCH_S3C2410
256 bool "Samsung S3C2410" 256 bool "Samsung S3C2410, S3C2412, S3C2413, S3C2440, S3C2442"
257 help 257 help
258 Samsung S3C2410X CPU based systems, such as the Simtec Electronics 258 Samsung S3C2410X CPU based systems, such as the Simtec Electronics
259 BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or 259 BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or
@@ -372,7 +372,7 @@ config ISA_DMA_API
372 bool 372 bool
373 373
374config PCI 374config PCI
375 bool "PCI support" if ARCH_INTEGRATOR_AP || ARCH_VERSATILE_PB 375 bool "PCI support" if ARCH_INTEGRATOR_AP || ARCH_VERSATILE_PB || ARCH_IXP4XX
376 help 376 help
377 Find out whether you have a PCI motherboard. PCI is the name of a 377 Find out whether you have a PCI motherboard. PCI is the name of a
378 bus system, i.e. the way the CPU talks to the other stuff inside 378 bus system, i.e. the way the CPU talks to the other stuff inside
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 282b14e2f464..a3bbaaf480b9 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -177,7 +177,7 @@ boot := arch/arm/boot
177# them changed. We use .arch to indicate when they were updated 177# them changed. We use .arch to indicate when they were updated
178# last, otherwise make uses the target directory mtime. 178# last, otherwise make uses the target directory mtime.
179 179
180include/asm-arm/.arch: $(wildcard include/config/arch/*.h) include/config/MARKER 180include/asm-arm/.arch: $(wildcard include/config/arch/*.h) include/config/auto.conf
181 @echo ' SYMLINK include/asm-arm/arch -> include/asm-arm/$(INCDIR)' 181 @echo ' SYMLINK include/asm-arm/arch -> include/asm-arm/$(INCDIR)'
182ifneq ($(KBUILD_SRC),) 182ifneq ($(KBUILD_SRC),)
183 $(Q)mkdir -p include/asm-arm 183 $(Q)mkdir -p include/asm-arm
diff --git a/arch/arm/boot/compressed/head-at91rm9200.S b/arch/arm/boot/compressed/head-at91rm9200.S
index 57a3b163b2cb..d68b9acd826e 100644
--- a/arch/arm/boot/compressed/head-at91rm9200.S
+++ b/arch/arm/boot/compressed/head-at91rm9200.S
@@ -61,6 +61,12 @@
61 cmp r7, r3 61 cmp r7, r3
62 beq 99f 62 beq 99f
63 63
64 @ Ajeco 1ARM : 1075
65 mov r3, #(MACH_TYPE_ONEARM & 0xff)
66 orr r3, r3, #(MACH_TYPE_ONEARM & 0xff00)
67 cmp r7, r3
68 beq 99f
69
64 @ Unknown board, use the AT91RM9200DK board 70 @ Unknown board, use the AT91RM9200DK board
65 @ mov r7, #MACH_TYPE_AT91RM9200 71 @ mov r7, #MACH_TYPE_AT91RM9200
66 mov r7, #(MACH_TYPE_AT91RM9200DK & 0xff) 72 mov r7, #(MACH_TYPE_AT91RM9200DK & 0xff)
diff --git a/arch/arm/boot/compressed/ll_char_wr.S b/arch/arm/boot/compressed/ll_char_wr.S
index d7bbd9da2fca..8517c8606b4a 100644
--- a/arch/arm/boot/compressed/ll_char_wr.S
+++ b/arch/arm/boot/compressed/ll_char_wr.S
@@ -77,7 +77,7 @@ Lrow4bpplp:
77 subne r1, r1, #1 77 subne r1, r1, #1
78 ldrneb r7, [r6, r1] 78 ldrneb r7, [r6, r1]
79 bne Lrow4bpplp 79 bne Lrow4bpplp
80 LOADREGS(fd, sp!, {r4 - r7, pc}) 80 ldmfd sp!, {r4 - r7, pc}
81 81
82@ 82@
83@ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc) 83@ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc)
@@ -105,7 +105,7 @@ Lrow8bpplp:
105 subne r1, r1, #1 105 subne r1, r1, #1
106 ldrneb r7, [r6, r1] 106 ldrneb r7, [r6, r1]
107 bne Lrow8bpplp 107 bne Lrow8bpplp
108 LOADREGS(fd, sp!, {r4 - r7, pc}) 108 ldmfd sp!, {r4 - r7, pc}
109 109
110@ 110@
111@ Smashable regs: {r0 - r3}, [r4], {r5, r6}, [r7], (r8 - fp), [ip], (sp), [lr], (pc) 111@ Smashable regs: {r0 - r3}, [r4], {r5, r6}, [r7], (r8 - fp), [ip], (sp), [lr], (pc)
@@ -127,7 +127,7 @@ Lrow1bpp:
127 strb r7, [r0], r5 127 strb r7, [r0], r5
128 mov r7, r7, lsr #8 128 mov r7, r7, lsr #8
129 strb r7, [r0], r5 129 strb r7, [r0], r5
130 LOADREGS(fd, sp!, {r4 - r7, pc}) 130 ldmfd sp!, {r4 - r7, pc}
131 131
132 .bss 132 .bss
133ENTRY(con_charconvtable) 133ENTRY(con_charconvtable)
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
index a7dc1370695b..0dafba3a701d 100644
--- a/arch/arm/common/locomo.c
+++ b/arch/arm/common/locomo.c
@@ -629,21 +629,6 @@ static int locomo_resume(struct platform_device *dev)
629#endif 629#endif
630 630
631 631
632#define LCM_ALC_EN 0x8000
633
634void frontlight_set(struct locomo *lchip, int duty, int vr, int bpwf)
635{
636 unsigned long flags;
637
638 spin_lock_irqsave(&lchip->lock, flags);
639 locomo_writel(bpwf, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
640 udelay(100);
641 locomo_writel(duty, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD);
642 locomo_writel(bpwf | LCM_ALC_EN, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
643 spin_unlock_irqrestore(&lchip->lock, flags);
644}
645
646
647/** 632/**
648 * locomo_probe - probe for a single LoCoMo chip. 633 * locomo_probe - probe for a single LoCoMo chip.
649 * @phys_addr: physical address of device. 634 * @phys_addr: physical address of device.
@@ -698,14 +683,10 @@ __locomo_probe(struct device *me, struct resource *mem, int irq)
698 , lchip->base + LOCOMO_GPD); 683 , lchip->base + LOCOMO_GPD);
699 locomo_writel(0, lchip->base + LOCOMO_GIE); 684 locomo_writel(0, lchip->base + LOCOMO_GIE);
700 685
701 /* FrontLight */ 686 /* Frontlight */
702 locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); 687 locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
703 locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD); 688 locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD);
704 689
705 /* Same constants can be used for collie and poodle
706 (depending on CONFIG options in original sharp code)? */
707 frontlight_set(lchip, 163, 0, 148);
708
709 /* Longtime timer */ 690 /* Longtime timer */
710 locomo_writel(0, lchip->base + LOCOMO_LTINT); 691 locomo_writel(0, lchip->base + LOCOMO_LTINT);
711 /* SPI */ 692 /* SPI */
@@ -1063,6 +1044,30 @@ void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int
1063} 1044}
1064 1045
1065/* 1046/*
1047 * Frontlight control
1048 */
1049
1050static struct locomo *locomo_chip_driver(struct locomo_dev *ldev);
1051
1052void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf)
1053{
1054 unsigned long flags;
1055 struct locomo *lchip = locomo_chip_driver(dev);
1056
1057 if (vr)
1058 locomo_gpio_write(dev, LOCOMO_GPIO_FL_VR, 1);
1059 else
1060 locomo_gpio_write(dev, LOCOMO_GPIO_FL_VR, 0);
1061
1062 spin_lock_irqsave(&lchip->lock, flags);
1063 locomo_writel(bpwf, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
1064 udelay(100);
1065 locomo_writel(duty, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD);
1066 locomo_writel(bpwf | LOCOMO_ALC_EN, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
1067 spin_unlock_irqrestore(&lchip->lock, flags);
1068}
1069
1070/*
1066 * LoCoMo "Register Access Bus." 1071 * LoCoMo "Register Access Bus."
1067 * 1072 *
1068 * We model this as a regular bus type, and hang devices directly 1073 * We model this as a regular bus type, and hang devices directly
diff --git a/arch/arm/configs/onearm_defconfig b/arch/arm/configs/onearm_defconfig
new file mode 100644
index 000000000000..5401c01caefe
--- /dev/null
+++ b/arch/arm/configs/onearm_defconfig
@@ -0,0 +1,1053 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.17-git10
4# Mon Jun 26 13:45:44 2006
5#
6CONFIG_ARM=y
7CONFIG_MMU=y
8CONFIG_RWSEM_GENERIC_SPINLOCK=y
9CONFIG_GENERIC_HWEIGHT=y
10CONFIG_GENERIC_CALIBRATE_DELAY=y
11CONFIG_VECTORS_BASE=0xffff0000
12
13#
14# Code maturity level options
15#
16CONFIG_EXPERIMENTAL=y
17CONFIG_BROKEN_ON_SMP=y
18CONFIG_INIT_ENV_ARG_LIMIT=32
19
20#
21# General setup
22#
23CONFIG_LOCALVERSION=""
24CONFIG_LOCALVERSION_AUTO=y
25# CONFIG_SWAP is not set
26CONFIG_SYSVIPC=y
27# CONFIG_POSIX_MQUEUE is not set
28# CONFIG_BSD_PROCESS_ACCT is not set
29CONFIG_SYSCTL=y
30# CONFIG_AUDIT is not set
31# CONFIG_IKCONFIG is not set
32# CONFIG_RELAY is not set
33CONFIG_INITRAMFS_SOURCE=""
34CONFIG_UID16=y
35CONFIG_CC_OPTIMIZE_FOR_SIZE=y
36CONFIG_EMBEDDED=y
37CONFIG_KALLSYMS=y
38# CONFIG_KALLSYMS_ALL is not set
39# CONFIG_KALLSYMS_EXTRA_PASS is not set
40CONFIG_HOTPLUG=y
41CONFIG_PRINTK=y
42CONFIG_BUG=y
43CONFIG_ELF_CORE=y
44CONFIG_BASE_FULL=y
45CONFIG_FUTEX=y
46CONFIG_EPOLL=y
47CONFIG_SHMEM=y
48CONFIG_SLAB=y
49# CONFIG_TINY_SHMEM is not set
50CONFIG_BASE_SMALL=0
51# CONFIG_SLOB is not set
52
53#
54# Loadable module support
55#
56CONFIG_MODULES=y
57CONFIG_MODULE_UNLOAD=y
58# CONFIG_MODULE_FORCE_UNLOAD is not set
59# CONFIG_MODVERSIONS is not set
60# CONFIG_MODULE_SRCVERSION_ALL is not set
61CONFIG_KMOD=y
62
63#
64# Block layer
65#
66# CONFIG_BLK_DEV_IO_TRACE is not set
67
68#
69# IO Schedulers
70#
71CONFIG_IOSCHED_NOOP=y
72CONFIG_IOSCHED_AS=y
73# CONFIG_IOSCHED_DEADLINE is not set
74# CONFIG_IOSCHED_CFQ is not set
75CONFIG_DEFAULT_AS=y
76# CONFIG_DEFAULT_DEADLINE is not set
77# CONFIG_DEFAULT_CFQ is not set
78# CONFIG_DEFAULT_NOOP is not set
79CONFIG_DEFAULT_IOSCHED="anticipatory"
80
81#
82# System Type
83#
84# CONFIG_ARCH_AAEC2000 is not set
85# CONFIG_ARCH_INTEGRATOR is not set
86# CONFIG_ARCH_REALVIEW is not set
87# CONFIG_ARCH_VERSATILE is not set
88CONFIG_ARCH_AT91RM9200=y
89# CONFIG_ARCH_CLPS7500 is not set
90# CONFIG_ARCH_CLPS711X is not set
91# CONFIG_ARCH_CO285 is not set
92# CONFIG_ARCH_EBSA110 is not set
93# CONFIG_ARCH_EP93XX is not set
94# CONFIG_ARCH_FOOTBRIDGE is not set
95# CONFIG_ARCH_NETX is not set
96# CONFIG_ARCH_H720X is not set
97# CONFIG_ARCH_IMX is not set
98# CONFIG_ARCH_IOP3XX is not set
99# CONFIG_ARCH_IXP4XX is not set
100# CONFIG_ARCH_IXP2000 is not set
101# CONFIG_ARCH_IXP23XX is not set
102# CONFIG_ARCH_L7200 is not set
103# CONFIG_ARCH_PNX4008 is not set
104# CONFIG_ARCH_PXA is not set
105# CONFIG_ARCH_RPC is not set
106# CONFIG_ARCH_SA1100 is not set
107# CONFIG_ARCH_S3C2410 is not set
108# CONFIG_ARCH_SHARK is not set
109# CONFIG_ARCH_LH7A40X is not set
110# CONFIG_ARCH_OMAP is not set
111
112#
113# AT91RM9200 Implementations
114#
115
116#
117# AT91RM9200 Board Type
118#
119CONFIG_MACH_ONEARM=y
120# CONFIG_ARCH_AT91RM9200DK is not set
121# CONFIG_MACH_AT91RM9200EK is not set
122# CONFIG_MACH_CSB337 is not set
123# CONFIG_MACH_CSB637 is not set
124# CONFIG_MACH_CARMEVA is not set
125# CONFIG_MACH_KB9200 is not set
126# CONFIG_MACH_ATEB9200 is not set
127# CONFIG_MACH_KAFA is not set
128
129#
130# AT91RM9200 Feature Selections
131#
132CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
133
134#
135# Processor Type
136#
137CONFIG_CPU_32=y
138CONFIG_CPU_ARM920T=y
139CONFIG_CPU_32v4=y
140CONFIG_CPU_ABRT_EV4T=y
141CONFIG_CPU_CACHE_V4WT=y
142CONFIG_CPU_CACHE_VIVT=y
143CONFIG_CPU_COPY_V4WB=y
144CONFIG_CPU_TLB_V4WBI=y
145
146#
147# Processor Features
148#
149# CONFIG_ARM_THUMB is not set
150# CONFIG_CPU_ICACHE_DISABLE is not set
151# CONFIG_CPU_DCACHE_DISABLE is not set
152# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
153
154#
155# Bus support
156#
157
158#
159# PCCARD (PCMCIA/CardBus) support
160#
161CONFIG_PCCARD=y
162# CONFIG_PCMCIA_DEBUG is not set
163CONFIG_PCMCIA=y
164CONFIG_PCMCIA_LOAD_CIS=y
165CONFIG_PCMCIA_IOCTL=y
166
167#
168# PC-card bridges
169#
170CONFIG_AT91_CF=y
171
172#
173# Kernel Features
174#
175# CONFIG_PREEMPT is not set
176# CONFIG_NO_IDLE_HZ is not set
177CONFIG_HZ=100
178# CONFIG_AEABI is not set
179# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
180CONFIG_SELECT_MEMORY_MODEL=y
181CONFIG_FLATMEM_MANUAL=y
182# CONFIG_DISCONTIGMEM_MANUAL is not set
183# CONFIG_SPARSEMEM_MANUAL is not set
184CONFIG_FLATMEM=y
185CONFIG_FLAT_NODE_MEM_MAP=y
186# CONFIG_SPARSEMEM_STATIC is not set
187CONFIG_SPLIT_PTLOCK_CPUS=4096
188CONFIG_LEDS=y
189CONFIG_LEDS_TIMER=y
190# CONFIG_LEDS_CPU is not set
191CONFIG_ALIGNMENT_TRAP=y
192
193#
194# Boot options
195#
196CONFIG_ZBOOT_ROM_TEXT=0x0
197CONFIG_ZBOOT_ROM_BSS=0x0
198CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs ip=bootp mem=64M"
199# CONFIG_XIP_KERNEL is not set
200
201#
202# Floating point emulation
203#
204
205#
206# At least one emulation must be selected
207#
208CONFIG_FPE_NWFPE=y
209# CONFIG_FPE_NWFPE_XP is not set
210# CONFIG_FPE_FASTFPE is not set
211
212#
213# Userspace binary formats
214#
215CONFIG_BINFMT_ELF=y
216# CONFIG_BINFMT_AOUT is not set
217# CONFIG_BINFMT_MISC is not set
218# CONFIG_ARTHUR is not set
219
220#
221# Power management options
222#
223# CONFIG_PM is not set
224# CONFIG_APM is not set
225
226#
227# Networking
228#
229CONFIG_NET=y
230
231#
232# Networking options
233#
234# CONFIG_NETDEBUG is not set
235CONFIG_PACKET=y
236# CONFIG_PACKET_MMAP is not set
237CONFIG_UNIX=y
238CONFIG_XFRM=y
239# CONFIG_XFRM_USER is not set
240# CONFIG_NET_KEY is not set
241CONFIG_INET=y
242# CONFIG_IP_MULTICAST is not set
243# CONFIG_IP_ADVANCED_ROUTER is not set
244CONFIG_IP_FIB_HASH=y
245CONFIG_IP_PNP=y
246# CONFIG_IP_PNP_DHCP is not set
247CONFIG_IP_PNP_BOOTP=y
248# CONFIG_IP_PNP_RARP is not set
249# CONFIG_NET_IPIP is not set
250# CONFIG_NET_IPGRE is not set
251# CONFIG_ARPD is not set
252# CONFIG_SYN_COOKIES is not set
253# CONFIG_INET_AH is not set
254# CONFIG_INET_ESP is not set
255# CONFIG_INET_IPCOMP is not set
256# CONFIG_INET_XFRM_TUNNEL is not set
257# CONFIG_INET_TUNNEL is not set
258CONFIG_INET_XFRM_MODE_TRANSPORT=y
259CONFIG_INET_XFRM_MODE_TUNNEL=y
260CONFIG_INET_DIAG=y
261CONFIG_INET_TCP_DIAG=y
262# CONFIG_TCP_CONG_ADVANCED is not set
263CONFIG_TCP_CONG_BIC=y
264# CONFIG_IPV6 is not set
265# CONFIG_INET6_XFRM_TUNNEL is not set
266# CONFIG_INET6_TUNNEL is not set
267# CONFIG_NETWORK_SECMARK is not set
268# CONFIG_NETFILTER is not set
269
270#
271# DCCP Configuration (EXPERIMENTAL)
272#
273# CONFIG_IP_DCCP is not set
274
275#
276# SCTP Configuration (EXPERIMENTAL)
277#
278# CONFIG_IP_SCTP is not set
279
280#
281# TIPC Configuration (EXPERIMENTAL)
282#
283# CONFIG_TIPC is not set
284# CONFIG_ATM is not set
285# CONFIG_BRIDGE is not set
286# CONFIG_VLAN_8021Q is not set
287# CONFIG_DECNET is not set
288# CONFIG_LLC2 is not set
289# CONFIG_IPX is not set
290# CONFIG_ATALK is not set
291# CONFIG_X25 is not set
292# CONFIG_LAPB is not set
293# CONFIG_NET_DIVERT is not set
294# CONFIG_ECONET is not set
295# CONFIG_WAN_ROUTER is not set
296
297#
298# QoS and/or fair queueing
299#
300# CONFIG_NET_SCHED is not set
301
302#
303# Network testing
304#
305# CONFIG_NET_PKTGEN is not set
306# CONFIG_HAMRADIO is not set
307# CONFIG_IRDA is not set
308# CONFIG_BT is not set
309# CONFIG_IEEE80211 is not set
310
311#
312# Device Drivers
313#
314
315#
316# Generic Driver Options
317#
318CONFIG_STANDALONE=y
319CONFIG_PREVENT_FIRMWARE_BUILD=y
320CONFIG_FW_LOADER=y
321# CONFIG_DEBUG_DRIVER is not set
322# CONFIG_SYS_HYPERVISOR is not set
323
324#
325# Connector - unified userspace <-> kernelspace linker
326#
327# CONFIG_CONNECTOR is not set
328
329#
330# Memory Technology Devices (MTD)
331#
332CONFIG_MTD=y
333# CONFIG_MTD_DEBUG is not set
334# CONFIG_MTD_CONCAT is not set
335CONFIG_MTD_PARTITIONS=y
336# CONFIG_MTD_REDBOOT_PARTS is not set
337CONFIG_MTD_CMDLINE_PARTS=y
338# CONFIG_MTD_AFS_PARTS is not set
339
340#
341# User Modules And Translation Layers
342#
343CONFIG_MTD_CHAR=y
344CONFIG_MTD_BLOCK=y
345# CONFIG_FTL is not set
346# CONFIG_NFTL is not set
347# CONFIG_INFTL is not set
348# CONFIG_RFD_FTL is not set
349
350#
351# RAM/ROM/Flash chip drivers
352#
353CONFIG_MTD_CFI=y
354CONFIG_MTD_JEDECPROBE=y
355CONFIG_MTD_GEN_PROBE=y
356# CONFIG_MTD_CFI_ADV_OPTIONS is not set
357CONFIG_MTD_MAP_BANK_WIDTH_1=y
358CONFIG_MTD_MAP_BANK_WIDTH_2=y
359CONFIG_MTD_MAP_BANK_WIDTH_4=y
360# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
361# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
362# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
363CONFIG_MTD_CFI_I1=y
364CONFIG_MTD_CFI_I2=y
365# CONFIG_MTD_CFI_I4 is not set
366# CONFIG_MTD_CFI_I8 is not set
367# CONFIG_MTD_CFI_INTELEXT is not set
368CONFIG_MTD_CFI_AMDSTD=y
369# CONFIG_MTD_CFI_STAA is not set
370CONFIG_MTD_CFI_UTIL=y
371# CONFIG_MTD_RAM is not set
372# CONFIG_MTD_ROM is not set
373# CONFIG_MTD_ABSENT is not set
374# CONFIG_MTD_OBSOLETE_CHIPS is not set
375
376#
377# Mapping drivers for chip access
378#
379# CONFIG_MTD_COMPLEX_MAPPINGS is not set
380CONFIG_MTD_PHYSMAP=y
381CONFIG_MTD_PHYSMAP_START=0x0
382CONFIG_MTD_PHYSMAP_LEN=0x0
383CONFIG_MTD_PHYSMAP_BANKWIDTH=0
384# CONFIG_MTD_ARM_INTEGRATOR is not set
385# CONFIG_MTD_IMPA7 is not set
386# CONFIG_MTD_PLATRAM is not set
387
388#
389# Self-contained MTD device drivers
390#
391# CONFIG_MTD_SLRAM is not set
392# CONFIG_MTD_PHRAM is not set
393# CONFIG_MTD_MTDRAM is not set
394# CONFIG_MTD_BLOCK2MTD is not set
395
396#
397# Disk-On-Chip Device Drivers
398#
399# CONFIG_MTD_DOC2000 is not set
400# CONFIG_MTD_DOC2001 is not set
401# CONFIG_MTD_DOC2001PLUS is not set
402
403#
404# NAND Flash Device Drivers
405#
406# CONFIG_MTD_NAND is not set
407
408#
409# OneNAND Flash Device Drivers
410#
411# CONFIG_MTD_ONENAND is not set
412
413#
414# Parallel port support
415#
416# CONFIG_PARPORT is not set
417
418#
419# Plug and Play support
420#
421
422#
423# Block devices
424#
425# CONFIG_BLK_DEV_COW_COMMON is not set
426# CONFIG_BLK_DEV_LOOP is not set
427# CONFIG_BLK_DEV_NBD is not set
428# CONFIG_BLK_DEV_UB is not set
429CONFIG_BLK_DEV_RAM=y
430CONFIG_BLK_DEV_RAM_COUNT=16
431CONFIG_BLK_DEV_RAM_SIZE=8192
432CONFIG_BLK_DEV_INITRD=y
433# CONFIG_CDROM_PKTCDVD is not set
434# CONFIG_ATA_OVER_ETH is not set
435
436#
437# ATA/ATAPI/MFM/RLL support
438#
439# CONFIG_IDE is not set
440
441#
442# SCSI device support
443#
444# CONFIG_RAID_ATTRS is not set
445# CONFIG_SCSI is not set
446
447#
448# Multi-device support (RAID and LVM)
449#
450# CONFIG_MD is not set
451
452#
453# Fusion MPT device support
454#
455# CONFIG_FUSION is not set
456
457#
458# IEEE 1394 (FireWire) support
459#
460
461#
462# I2O device support
463#
464
465#
466# Network device support
467#
468CONFIG_NETDEVICES=y
469# CONFIG_DUMMY is not set
470# CONFIG_BONDING is not set
471# CONFIG_EQUALIZER is not set
472# CONFIG_TUN is not set
473
474#
475# PHY device support
476#
477# CONFIG_PHYLIB is not set
478
479#
480# Ethernet (10 or 100Mbit)
481#
482CONFIG_NET_ETHERNET=y
483CONFIG_MII=y
484CONFIG_ARM_AT91_ETHER=y
485# CONFIG_SMC91X is not set
486# CONFIG_DM9000 is not set
487
488#
489# Ethernet (1000 Mbit)
490#
491
492#
493# Ethernet (10000 Mbit)
494#
495
496#
497# Token Ring devices
498#
499
500#
501# Wireless LAN (non-hamradio)
502#
503# CONFIG_NET_RADIO is not set
504
505#
506# PCMCIA network device support
507#
508# CONFIG_NET_PCMCIA is not set
509
510#
511# Wan interfaces
512#
513# CONFIG_WAN is not set
514# CONFIG_PPP is not set
515# CONFIG_SLIP is not set
516# CONFIG_SHAPER is not set
517# CONFIG_NETCONSOLE is not set
518# CONFIG_NETPOLL is not set
519# CONFIG_NET_POLL_CONTROLLER is not set
520
521#
522# ISDN subsystem
523#
524# CONFIG_ISDN is not set
525
526#
527# Input device support
528#
529CONFIG_INPUT=y
530
531#
532# Userland interfaces
533#
534CONFIG_INPUT_MOUSEDEV=y
535# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
536CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
537CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
538# CONFIG_INPUT_JOYDEV is not set
539# CONFIG_INPUT_TSDEV is not set
540# CONFIG_INPUT_EVDEV is not set
541# CONFIG_INPUT_EVBUG is not set
542
543#
544# Input Device Drivers
545#
546# CONFIG_INPUT_KEYBOARD is not set
547# CONFIG_INPUT_MOUSE is not set
548# CONFIG_INPUT_JOYSTICK is not set
549# CONFIG_INPUT_TOUCHSCREEN is not set
550# CONFIG_INPUT_MISC is not set
551
552#
553# Hardware I/O ports
554#
555# CONFIG_SERIO is not set
556# CONFIG_GAMEPORT is not set
557
558#
559# Character devices
560#
561# CONFIG_VT is not set
562# CONFIG_SERIAL_NONSTANDARD is not set
563
564#
565# Serial drivers
566#
567# CONFIG_SERIAL_8250 is not set
568
569#
570# Non-8250 serial port support
571#
572CONFIG_SERIAL_AT91=y
573CONFIG_SERIAL_AT91_CONSOLE=y
574# CONFIG_SERIAL_AT91_TTYAT is not set
575CONFIG_SERIAL_CORE=y
576CONFIG_SERIAL_CORE_CONSOLE=y
577CONFIG_UNIX98_PTYS=y
578CONFIG_LEGACY_PTYS=y
579CONFIG_LEGACY_PTY_COUNT=256
580
581#
582# IPMI
583#
584# CONFIG_IPMI_HANDLER is not set
585
586#
587# Watchdog Cards
588#
589CONFIG_WATCHDOG=y
590CONFIG_WATCHDOG_NOWAYOUT=y
591
592#
593# Watchdog Device Drivers
594#
595# CONFIG_SOFT_WATCHDOG is not set
596CONFIG_AT91_WATCHDOG=y
597
598#
599# USB-based Watchdog Cards
600#
601# CONFIG_USBPCWATCHDOG is not set
602# CONFIG_NVRAM is not set
603# CONFIG_DTLK is not set
604# CONFIG_R3964 is not set
605
606#
607# Ftape, the floppy tape device driver
608#
609
610#
611# PCMCIA character devices
612#
613# CONFIG_SYNCLINK_CS is not set
614# CONFIG_CARDMAN_4000 is not set
615# CONFIG_CARDMAN_4040 is not set
616# CONFIG_RAW_DRIVER is not set
617
618#
619# TPM devices
620#
621# CONFIG_TCG_TPM is not set
622# CONFIG_TELCLOCK is not set
623
624#
625# I2C support
626#
627CONFIG_I2C=y
628CONFIG_I2C_CHARDEV=y
629
630#
631# I2C Algorithms
632#
633# CONFIG_I2C_ALGOBIT is not set
634# CONFIG_I2C_ALGOPCF is not set
635# CONFIG_I2C_ALGOPCA is not set
636
637#
638# I2C Hardware Bus support
639#
640# CONFIG_I2C_OCORES is not set
641# CONFIG_I2C_PARPORT_LIGHT is not set
642# CONFIG_I2C_STUB is not set
643# CONFIG_I2C_PCA_ISA is not set
644
645#
646# Miscellaneous I2C Chip support
647#
648# CONFIG_SENSORS_DS1337 is not set
649# CONFIG_SENSORS_DS1374 is not set
650# CONFIG_SENSORS_EEPROM is not set
651# CONFIG_SENSORS_PCF8574 is not set
652# CONFIG_SENSORS_PCA9539 is not set
653# CONFIG_SENSORS_PCF8591 is not set
654# CONFIG_SENSORS_MAX6875 is not set
655# CONFIG_I2C_DEBUG_CORE is not set
656# CONFIG_I2C_DEBUG_ALGO is not set
657# CONFIG_I2C_DEBUG_BUS is not set
658# CONFIG_I2C_DEBUG_CHIP is not set
659
660#
661# SPI support
662#
663# CONFIG_SPI is not set
664# CONFIG_SPI_MASTER is not set
665
666#
667# Dallas's 1-wire bus
668#
669
670#
671# Hardware Monitoring support
672#
673CONFIG_HWMON=y
674# CONFIG_HWMON_VID is not set
675# CONFIG_SENSORS_ABITUGURU is not set
676# CONFIG_SENSORS_ADM1021 is not set
677# CONFIG_SENSORS_ADM1025 is not set
678# CONFIG_SENSORS_ADM1026 is not set
679# CONFIG_SENSORS_ADM1031 is not set
680# CONFIG_SENSORS_ADM9240 is not set
681# CONFIG_SENSORS_ASB100 is not set
682# CONFIG_SENSORS_ATXP1 is not set
683# CONFIG_SENSORS_DS1621 is not set
684# CONFIG_SENSORS_F71805F is not set
685# CONFIG_SENSORS_FSCHER is not set
686# CONFIG_SENSORS_FSCPOS is not set
687# CONFIG_SENSORS_GL518SM is not set
688# CONFIG_SENSORS_GL520SM is not set
689# CONFIG_SENSORS_IT87 is not set
690# CONFIG_SENSORS_LM63 is not set
691# CONFIG_SENSORS_LM75 is not set
692# CONFIG_SENSORS_LM77 is not set
693# CONFIG_SENSORS_LM78 is not set
694# CONFIG_SENSORS_LM80 is not set
695# CONFIG_SENSORS_LM83 is not set
696# CONFIG_SENSORS_LM85 is not set
697# CONFIG_SENSORS_LM87 is not set
698# CONFIG_SENSORS_LM90 is not set
699# CONFIG_SENSORS_LM92 is not set
700# CONFIG_SENSORS_MAX1619 is not set
701# CONFIG_SENSORS_PC87360 is not set
702# CONFIG_SENSORS_SMSC47M1 is not set
703# CONFIG_SENSORS_SMSC47M192 is not set
704# CONFIG_SENSORS_SMSC47B397 is not set
705# CONFIG_SENSORS_W83781D is not set
706# CONFIG_SENSORS_W83791D is not set
707# CONFIG_SENSORS_W83792D is not set
708# CONFIG_SENSORS_W83L785TS is not set
709# CONFIG_SENSORS_W83627HF is not set
710# CONFIG_SENSORS_W83627EHF is not set
711# CONFIG_HWMON_DEBUG_CHIP is not set
712
713#
714# Misc devices
715#
716
717#
718# LED devices
719#
720# CONFIG_NEW_LEDS is not set
721
722#
723# LED drivers
724#
725
726#
727# LED Triggers
728#
729
730#
731# Multimedia devices
732#
733# CONFIG_VIDEO_DEV is not set
734CONFIG_VIDEO_V4L2=y
735
736#
737# Digital Video Broadcasting Devices
738#
739# CONFIG_DVB is not set
740# CONFIG_USB_DABUSB is not set
741
742#
743# Graphics support
744#
745# CONFIG_FB is not set
746
747#
748# Sound
749#
750# CONFIG_SOUND is not set
751
752#
753# USB support
754#
755CONFIG_USB_ARCH_HAS_HCD=y
756CONFIG_USB_ARCH_HAS_OHCI=y
757# CONFIG_USB_ARCH_HAS_EHCI is not set
758CONFIG_USB=y
759CONFIG_USB_DEBUG=y
760
761#
762# Miscellaneous USB options
763#
764CONFIG_USB_DEVICEFS=y
765# CONFIG_USB_BANDWIDTH is not set
766# CONFIG_USB_DYNAMIC_MINORS is not set
767# CONFIG_USB_OTG is not set
768
769#
770# USB Host Controller Drivers
771#
772# CONFIG_USB_ISP116X_HCD is not set
773CONFIG_USB_OHCI_HCD=y
774# CONFIG_USB_OHCI_BIG_ENDIAN is not set
775CONFIG_USB_OHCI_LITTLE_ENDIAN=y
776# CONFIG_USB_SL811_HCD is not set
777
778#
779# USB Device Class drivers
780#
781# CONFIG_USB_ACM is not set
782# CONFIG_USB_PRINTER is not set
783
784#
785# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
786#
787
788#
789# may also be needed; see USB_STORAGE Help for more information
790#
791# CONFIG_USB_STORAGE is not set
792# CONFIG_USB_LIBUSUAL is not set
793
794#
795# USB Input Devices
796#
797# CONFIG_USB_HID is not set
798
799#
800# USB HID Boot Protocol drivers
801#
802# CONFIG_USB_KBD is not set
803# CONFIG_USB_MOUSE is not set
804# CONFIG_USB_AIPTEK is not set
805# CONFIG_USB_WACOM is not set
806# CONFIG_USB_ACECAD is not set
807# CONFIG_USB_KBTAB is not set
808# CONFIG_USB_POWERMATE is not set
809# CONFIG_USB_TOUCHSCREEN is not set
810# CONFIG_USB_YEALINK is not set
811# CONFIG_USB_XPAD is not set
812# CONFIG_USB_ATI_REMOTE is not set
813# CONFIG_USB_ATI_REMOTE2 is not set
814# CONFIG_USB_KEYSPAN_REMOTE is not set
815# CONFIG_USB_APPLETOUCH is not set
816
817#
818# USB Imaging devices
819#
820# CONFIG_USB_MDC800 is not set
821
822#
823# USB Network Adapters
824#
825# CONFIG_USB_CATC is not set
826# CONFIG_USB_KAWETH is not set
827# CONFIG_USB_PEGASUS is not set
828# CONFIG_USB_RTL8150 is not set
829# CONFIG_USB_USBNET is not set
830CONFIG_USB_MON=y
831
832#
833# USB port drivers
834#
835
836#
837# USB Serial Converter support
838#
839# CONFIG_USB_SERIAL is not set
840
841#
842# USB Miscellaneous drivers
843#
844# CONFIG_USB_EMI62 is not set
845# CONFIG_USB_EMI26 is not set
846# CONFIG_USB_AUERSWALD is not set
847# CONFIG_USB_RIO500 is not set
848# CONFIG_USB_LEGOTOWER is not set
849# CONFIG_USB_LCD is not set
850# CONFIG_USB_LED is not set
851# CONFIG_USB_CY7C63 is not set
852# CONFIG_USB_CYTHERM is not set
853# CONFIG_USB_PHIDGETKIT is not set
854# CONFIG_USB_PHIDGETSERVO is not set
855# CONFIG_USB_IDMOUSE is not set
856# CONFIG_USB_APPLEDISPLAY is not set
857# CONFIG_USB_LD is not set
858# CONFIG_USB_TEST is not set
859
860#
861# USB DSL modem support
862#
863
864#
865# USB Gadget Support
866#
867CONFIG_USB_GADGET=y
868# CONFIG_USB_GADGET_DEBUG_FILES is not set
869CONFIG_USB_GADGET_SELECTED=y
870# CONFIG_USB_GADGET_NET2280 is not set
871# CONFIG_USB_GADGET_PXA2XX is not set
872# CONFIG_USB_GADGET_GOKU is not set
873# CONFIG_USB_GADGET_LH7A40X is not set
874# CONFIG_USB_GADGET_OMAP is not set
875CONFIG_USB_GADGET_AT91=y
876CONFIG_USB_AT91=y
877# CONFIG_USB_GADGET_DUMMY_HCD is not set
878# CONFIG_USB_GADGET_DUALSPEED is not set
879# CONFIG_USB_ZERO is not set
880# CONFIG_USB_ETH is not set
881# CONFIG_USB_GADGETFS is not set
882# CONFIG_USB_FILE_STORAGE is not set
883# CONFIG_USB_G_SERIAL is not set
884
885#
886# MMC/SD Card support
887#
888CONFIG_MMC=y
889# CONFIG_MMC_DEBUG is not set
890CONFIG_MMC_BLOCK=y
891CONFIG_MMC_AT91RM9200=y
892
893#
894# Real Time Clock
895#
896CONFIG_RTC_LIB=y
897# CONFIG_RTC_CLASS is not set
898
899#
900# File systems
901#
902CONFIG_EXT2_FS=y
903# CONFIG_EXT2_FS_XATTR is not set
904# CONFIG_EXT2_FS_XIP is not set
905# CONFIG_EXT3_FS is not set
906# CONFIG_REISERFS_FS is not set
907# CONFIG_JFS_FS is not set
908CONFIG_FS_POSIX_ACL=y
909# CONFIG_XFS_FS is not set
910# CONFIG_OCFS2_FS is not set
911# CONFIG_MINIX_FS is not set
912# CONFIG_ROMFS_FS is not set
913CONFIG_INOTIFY=y
914CONFIG_INOTIFY_USER=y
915# CONFIG_QUOTA is not set
916CONFIG_DNOTIFY=y
917# CONFIG_AUTOFS_FS is not set
918# CONFIG_AUTOFS4_FS is not set
919# CONFIG_FUSE_FS is not set
920
921#
922# CD-ROM/DVD Filesystems
923#
924# CONFIG_ISO9660_FS is not set
925# CONFIG_UDF_FS is not set
926
927#
928# DOS/FAT/NT Filesystems
929#
930# CONFIG_MSDOS_FS is not set
931# CONFIG_VFAT_FS is not set
932# CONFIG_NTFS_FS is not set
933
934#
935# Pseudo filesystems
936#
937CONFIG_PROC_FS=y
938CONFIG_SYSFS=y
939CONFIG_TMPFS=y
940# CONFIG_HUGETLB_PAGE is not set
941CONFIG_RAMFS=y
942# CONFIG_CONFIGFS_FS is not set
943
944#
945# Miscellaneous filesystems
946#
947# CONFIG_ADFS_FS is not set
948# CONFIG_AFFS_FS is not set
949# CONFIG_HFS_FS is not set
950# CONFIG_HFSPLUS_FS is not set
951# CONFIG_BEFS_FS is not set
952# CONFIG_BFS_FS is not set
953# CONFIG_EFS_FS is not set
954# CONFIG_JFFS_FS is not set
955# CONFIG_JFFS2_FS is not set
956CONFIG_CRAMFS=y
957# CONFIG_VXFS_FS is not set
958# CONFIG_HPFS_FS is not set
959# CONFIG_QNX4FS_FS is not set
960# CONFIG_SYSV_FS is not set
961# CONFIG_UFS_FS is not set
962
963#
964# Network File Systems
965#
966CONFIG_NFS_FS=y
967CONFIG_NFS_V3=y
968CONFIG_NFS_V3_ACL=y
969# CONFIG_NFS_V4 is not set
970# CONFIG_NFS_DIRECTIO is not set
971# CONFIG_NFSD is not set
972CONFIG_ROOT_NFS=y
973CONFIG_LOCKD=y
974CONFIG_LOCKD_V4=y
975CONFIG_NFS_ACL_SUPPORT=y
976CONFIG_NFS_COMMON=y
977CONFIG_SUNRPC=y
978# CONFIG_RPCSEC_GSS_KRB5 is not set
979# CONFIG_RPCSEC_GSS_SPKM3 is not set
980# CONFIG_SMB_FS is not set
981# CONFIG_CIFS is not set
982# CONFIG_NCP_FS is not set
983# CONFIG_CODA_FS is not set
984# CONFIG_AFS_FS is not set
985# CONFIG_9P_FS is not set
986
987#
988# Partition Types
989#
990# CONFIG_PARTITION_ADVANCED is not set
991CONFIG_MSDOS_PARTITION=y
992
993#
994# Native Language Support
995#
996# CONFIG_NLS is not set
997
998#
999# Profiling support
1000#
1001# CONFIG_PROFILING is not set
1002
1003#
1004# Kernel hacking
1005#
1006# CONFIG_PRINTK_TIME is not set
1007# CONFIG_MAGIC_SYSRQ is not set
1008CONFIG_DEBUG_KERNEL=y
1009CONFIG_LOG_BUF_SHIFT=14
1010CONFIG_DETECT_SOFTLOCKUP=y
1011# CONFIG_SCHEDSTATS is not set
1012# CONFIG_DEBUG_SLAB is not set
1013# CONFIG_DEBUG_MUTEXES is not set
1014# CONFIG_DEBUG_SPINLOCK is not set
1015# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1016# CONFIG_DEBUG_KOBJECT is not set
1017CONFIG_DEBUG_BUGVERBOSE=y
1018# CONFIG_DEBUG_INFO is not set
1019# CONFIG_DEBUG_FS is not set
1020# CONFIG_DEBUG_VM is not set
1021CONFIG_FRAME_POINTER=y
1022# CONFIG_UNWIND_INFO is not set
1023CONFIG_FORCED_INLINING=y
1024# CONFIG_RCU_TORTURE_TEST is not set
1025CONFIG_DEBUG_USER=y
1026# CONFIG_DEBUG_WAITQ is not set
1027# CONFIG_DEBUG_ERRORS is not set
1028CONFIG_DEBUG_LL=y
1029# CONFIG_DEBUG_ICEDCC is not set
1030
1031#
1032# Security options
1033#
1034# CONFIG_KEYS is not set
1035# CONFIG_SECURITY is not set
1036
1037#
1038# Cryptographic options
1039#
1040# CONFIG_CRYPTO is not set
1041
1042#
1043# Hardware crypto devices
1044#
1045
1046#
1047# Library routines
1048#
1049# CONFIG_CRC_CCITT is not set
1050# CONFIG_CRC16 is not set
1051CONFIG_CRC32=y
1052# CONFIG_LIBCRC32C is not set
1053CONFIG_ZLIB_INFLATE=y
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index e17661380096..f20814e6f497 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.17 3# Linux kernel version: 2.6.17-git9
4# Tue Jun 20 18:57:01 2006 4# Sun Jun 25 23:56:32 2006
5# 5#
6CONFIG_ARM=y 6CONFIG_ARM=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -49,7 +49,6 @@ CONFIG_SLAB=y
49# CONFIG_TINY_SHMEM is not set 49# CONFIG_TINY_SHMEM is not set
50CONFIG_BASE_SMALL=0 50CONFIG_BASE_SMALL=0
51# CONFIG_SLOB is not set 51# CONFIG_SLOB is not set
52CONFIG_OBSOLETE_INTERMODULE=y
53 52
54# 53#
55# Loadable module support 54# Loadable module support
@@ -81,18 +80,26 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
81# 80#
82# System Type 81# System Type
83# 82#
83# CONFIG_ARCH_AAEC2000 is not set
84# CONFIG_ARCH_INTEGRATOR is not set
85# CONFIG_ARCH_REALVIEW is not set
86# CONFIG_ARCH_VERSATILE is not set
87# CONFIG_ARCH_AT91RM9200 is not set
84# CONFIG_ARCH_CLPS7500 is not set 88# CONFIG_ARCH_CLPS7500 is not set
85# CONFIG_ARCH_CLPS711X is not set 89# CONFIG_ARCH_CLPS711X is not set
86# CONFIG_ARCH_CO285 is not set 90# CONFIG_ARCH_CO285 is not set
87# CONFIG_ARCH_EBSA110 is not set 91# CONFIG_ARCH_EBSA110 is not set
88# CONFIG_ARCH_EP93XX is not set 92# CONFIG_ARCH_EP93XX is not set
89# CONFIG_ARCH_FOOTBRIDGE is not set 93# CONFIG_ARCH_FOOTBRIDGE is not set
90# CONFIG_ARCH_INTEGRATOR is not set 94# CONFIG_ARCH_NETX is not set
95# CONFIG_ARCH_H720X is not set
96# CONFIG_ARCH_IMX is not set
91# CONFIG_ARCH_IOP3XX is not set 97# CONFIG_ARCH_IOP3XX is not set
92# CONFIG_ARCH_IXP4XX is not set 98# CONFIG_ARCH_IXP4XX is not set
93# CONFIG_ARCH_IXP2000 is not set 99# CONFIG_ARCH_IXP2000 is not set
94# CONFIG_ARCH_IXP23XX is not set 100# CONFIG_ARCH_IXP23XX is not set
95# CONFIG_ARCH_L7200 is not set 101# CONFIG_ARCH_L7200 is not set
102# CONFIG_ARCH_PNX4008 is not set
96# CONFIG_ARCH_PXA is not set 103# CONFIG_ARCH_PXA is not set
97# CONFIG_ARCH_RPC is not set 104# CONFIG_ARCH_RPC is not set
98# CONFIG_ARCH_SA1100 is not set 105# CONFIG_ARCH_SA1100 is not set
@@ -100,14 +107,6 @@ CONFIG_ARCH_S3C2410=y
100# CONFIG_ARCH_SHARK is not set 107# CONFIG_ARCH_SHARK is not set
101# CONFIG_ARCH_LH7A40X is not set 108# CONFIG_ARCH_LH7A40X is not set
102# CONFIG_ARCH_OMAP is not set 109# CONFIG_ARCH_OMAP is not set
103# CONFIG_ARCH_VERSATILE is not set
104# CONFIG_ARCH_REALVIEW is not set
105# CONFIG_ARCH_IMX is not set
106# CONFIG_ARCH_H720X is not set
107# CONFIG_ARCH_AAEC2000 is not set
108# CONFIG_ARCH_AT91RM9200 is not set
109# CONFIG_ARCH_PNX4008 is not set
110# CONFIG_ARCH_NETX is not set
111 110
112# 111#
113# S3C24XX Implementations 112# S3C24XX Implementations
@@ -123,11 +122,14 @@ CONFIG_ARCH_SMDK2410=y
123CONFIG_ARCH_S3C2440=y 122CONFIG_ARCH_S3C2440=y
124CONFIG_SMDK2440_CPU2440=y 123CONFIG_SMDK2440_CPU2440=y
125CONFIG_SMDK2440_CPU2442=y 124CONFIG_SMDK2440_CPU2442=y
125CONFIG_MACH_SMDK2413=y
126CONFIG_MACH_VR1000=y 126CONFIG_MACH_VR1000=y
127CONFIG_MACH_RX3715=y 127CONFIG_MACH_RX3715=y
128CONFIG_MACH_OTOM=y 128CONFIG_MACH_OTOM=y
129CONFIG_MACH_NEXCODER_2440=y 129CONFIG_MACH_NEXCODER_2440=y
130CONFIG_S3C2410_CLOCK=y
130CONFIG_CPU_S3C2410=y 131CONFIG_CPU_S3C2410=y
132CONFIG_CPU_S3C2412=y
131CONFIG_CPU_S3C244X=y 133CONFIG_CPU_S3C244X=y
132CONFIG_CPU_S3C2440=y 134CONFIG_CPU_S3C2440=y
133CONFIG_CPU_S3C2442=y 135CONFIG_CPU_S3C2442=y
@@ -153,8 +155,11 @@ CONFIG_S3C2410_LOWLEVEL_UART_PORT=0
153# 155#
154CONFIG_CPU_32=y 156CONFIG_CPU_32=y
155CONFIG_CPU_ARM920T=y 157CONFIG_CPU_ARM920T=y
158CONFIG_CPU_ARM926T=y
156CONFIG_CPU_32v4=y 159CONFIG_CPU_32v4=y
160CONFIG_CPU_32v5=y
157CONFIG_CPU_ABRT_EV4T=y 161CONFIG_CPU_ABRT_EV4T=y
162CONFIG_CPU_ABRT_EV5TJ=y
158CONFIG_CPU_CACHE_V4WT=y 163CONFIG_CPU_CACHE_V4WT=y
159CONFIG_CPU_CACHE_VIVT=y 164CONFIG_CPU_CACHE_VIVT=y
160CONFIG_CPU_COPY_V4WB=y 165CONFIG_CPU_COPY_V4WB=y
@@ -167,6 +172,7 @@ CONFIG_CPU_TLB_V4WBI=y
167# CONFIG_CPU_ICACHE_DISABLE is not set 172# CONFIG_CPU_ICACHE_DISABLE is not set
168# CONFIG_CPU_DCACHE_DISABLE is not set 173# CONFIG_CPU_DCACHE_DISABLE is not set
169# CONFIG_CPU_DCACHE_WRITETHROUGH is not set 174# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
175# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
170 176
171# 177#
172# Bus support 178# Bus support
@@ -214,6 +220,7 @@ CONFIG_CMDLINE="root=/dev/hda1 ro init=/bin/bash console=ttySAC0"
214CONFIG_FPE_NWFPE=y 220CONFIG_FPE_NWFPE=y
215# CONFIG_FPE_NWFPE_XP is not set 221# CONFIG_FPE_NWFPE_XP is not set
216# CONFIG_FPE_FASTFPE is not set 222# CONFIG_FPE_FASTFPE is not set
223# CONFIG_VFP is not set
217 224
218# 225#
219# Userspace binary formats 226# Userspace binary formats
@@ -242,6 +249,8 @@ CONFIG_NET=y
242# CONFIG_NETDEBUG is not set 249# CONFIG_NETDEBUG is not set
243# CONFIG_PACKET is not set 250# CONFIG_PACKET is not set
244CONFIG_UNIX=y 251CONFIG_UNIX=y
252CONFIG_XFRM=y
253# CONFIG_XFRM_USER is not set
245# CONFIG_NET_KEY is not set 254# CONFIG_NET_KEY is not set
246CONFIG_INET=y 255CONFIG_INET=y
247# CONFIG_IP_MULTICAST is not set 256# CONFIG_IP_MULTICAST is not set
@@ -260,6 +269,8 @@ CONFIG_IP_PNP_BOOTP=y
260# CONFIG_INET_IPCOMP is not set 269# CONFIG_INET_IPCOMP is not set
261# CONFIG_INET_XFRM_TUNNEL is not set 270# CONFIG_INET_XFRM_TUNNEL is not set
262# CONFIG_INET_TUNNEL is not set 271# CONFIG_INET_TUNNEL is not set
272CONFIG_INET_XFRM_MODE_TRANSPORT=y
273CONFIG_INET_XFRM_MODE_TUNNEL=y
263CONFIG_INET_DIAG=y 274CONFIG_INET_DIAG=y
264CONFIG_INET_TCP_DIAG=y 275CONFIG_INET_TCP_DIAG=y
265# CONFIG_TCP_CONG_ADVANCED is not set 276# CONFIG_TCP_CONG_ADVANCED is not set
@@ -267,6 +278,7 @@ CONFIG_TCP_CONG_BIC=y
267# CONFIG_IPV6 is not set 278# CONFIG_IPV6 is not set
268# CONFIG_INET6_XFRM_TUNNEL is not set 279# CONFIG_INET6_XFRM_TUNNEL is not set
269# CONFIG_INET6_TUNNEL is not set 280# CONFIG_INET6_TUNNEL is not set
281# CONFIG_NETWORK_SECMARK is not set
270# CONFIG_NETFILTER is not set 282# CONFIG_NETFILTER is not set
271 283
272# 284#
@@ -321,6 +333,7 @@ CONFIG_STANDALONE=y
321CONFIG_PREVENT_FIRMWARE_BUILD=y 333CONFIG_PREVENT_FIRMWARE_BUILD=y
322# CONFIG_FW_LOADER is not set 334# CONFIG_FW_LOADER is not set
323# CONFIG_DEBUG_DRIVER is not set 335# CONFIG_DEBUG_DRIVER is not set
336# CONFIG_SYS_HYPERVISOR is not set
324 337
325# 338#
326# Connector - unified userspace <-> kernelspace linker 339# Connector - unified userspace <-> kernelspace linker
@@ -408,10 +421,12 @@ CONFIG_MTD_BAST_MAXSIZE=4
408# 421#
409CONFIG_MTD_NAND=y 422CONFIG_MTD_NAND=y
410# CONFIG_MTD_NAND_VERIFY_WRITE is not set 423# CONFIG_MTD_NAND_VERIFY_WRITE is not set
424# CONFIG_MTD_NAND_ECC_SMC is not set
411CONFIG_MTD_NAND_IDS=y 425CONFIG_MTD_NAND_IDS=y
412CONFIG_MTD_NAND_S3C2410=y 426CONFIG_MTD_NAND_S3C2410=y
413# CONFIG_MTD_NAND_S3C2410_DEBUG is not set 427# CONFIG_MTD_NAND_S3C2410_DEBUG is not set
414# CONFIG_MTD_NAND_S3C2410_HWECC is not set 428# CONFIG_MTD_NAND_S3C2410_HWECC is not set
429# CONFIG_MTD_NAND_S3C2410_CLKSTOP is not set
415# CONFIG_MTD_NAND_DISKONCHIP is not set 430# CONFIG_MTD_NAND_DISKONCHIP is not set
416# CONFIG_MTD_NAND_NANDSIM is not set 431# CONFIG_MTD_NAND_NANDSIM is not set
417 432
@@ -425,8 +440,8 @@ CONFIG_MTD_NAND_S3C2410=y
425# 440#
426CONFIG_PARPORT=y 441CONFIG_PARPORT=y
427# CONFIG_PARPORT_PC is not set 442# CONFIG_PARPORT_PC is not set
428# CONFIG_PARPORT_ARC is not set
429# CONFIG_PARPORT_GSC is not set 443# CONFIG_PARPORT_GSC is not set
444# CONFIG_PARPORT_AX88796 is not set
430CONFIG_PARPORT_1284=y 445CONFIG_PARPORT_1284=y
431 446
432# 447#
@@ -735,6 +750,7 @@ CONFIG_I2C_ALGOBIT=m
735# 750#
736# CONFIG_I2C_ELEKTOR is not set 751# CONFIG_I2C_ELEKTOR is not set
737CONFIG_I2C_ISA=m 752CONFIG_I2C_ISA=m
753# CONFIG_I2C_OCORES is not set
738# CONFIG_I2C_PARPORT is not set 754# CONFIG_I2C_PARPORT is not set
739# CONFIG_I2C_PARPORT_LIGHT is not set 755# CONFIG_I2C_PARPORT_LIGHT is not set
740CONFIG_I2C_S3C2410=y 756CONFIG_I2C_S3C2410=y
@@ -765,13 +781,13 @@ CONFIG_SENSORS_EEPROM=m
765# 781#
766# Dallas's 1-wire bus 782# Dallas's 1-wire bus
767# 783#
768# CONFIG_W1 is not set
769 784
770# 785#
771# Hardware Monitoring support 786# Hardware Monitoring support
772# 787#
773CONFIG_HWMON=y 788CONFIG_HWMON=y
774CONFIG_HWMON_VID=m 789CONFIG_HWMON_VID=m
790# CONFIG_SENSORS_ABITUGURU is not set
775# CONFIG_SENSORS_ADM1021 is not set 791# CONFIG_SENSORS_ADM1021 is not set
776# CONFIG_SENSORS_ADM1025 is not set 792# CONFIG_SENSORS_ADM1025 is not set
777# CONFIG_SENSORS_ADM1026 is not set 793# CONFIG_SENSORS_ADM1026 is not set
@@ -799,8 +815,10 @@ CONFIG_SENSORS_LM85=m
799# CONFIG_SENSORS_MAX1619 is not set 815# CONFIG_SENSORS_MAX1619 is not set
800# CONFIG_SENSORS_PC87360 is not set 816# CONFIG_SENSORS_PC87360 is not set
801# CONFIG_SENSORS_SMSC47M1 is not set 817# CONFIG_SENSORS_SMSC47M1 is not set
818# CONFIG_SENSORS_SMSC47M192 is not set
802# CONFIG_SENSORS_SMSC47B397 is not set 819# CONFIG_SENSORS_SMSC47B397 is not set
803# CONFIG_SENSORS_W83781D is not set 820# CONFIG_SENSORS_W83781D is not set
821# CONFIG_SENSORS_W83791D is not set
804# CONFIG_SENSORS_W83792D is not set 822# CONFIG_SENSORS_W83792D is not set
805# CONFIG_SENSORS_W83L785TS is not set 823# CONFIG_SENSORS_W83L785TS is not set
806# CONFIG_SENSORS_W83627HF is not set 824# CONFIG_SENSORS_W83627HF is not set
@@ -845,6 +863,7 @@ CONFIG_FB_CFB_COPYAREA=y
845CONFIG_FB_CFB_IMAGEBLIT=y 863CONFIG_FB_CFB_IMAGEBLIT=y
846# CONFIG_FB_MACMODES is not set 864# CONFIG_FB_MACMODES is not set
847CONFIG_FB_FIRMWARE_EDID=y 865CONFIG_FB_FIRMWARE_EDID=y
866# CONFIG_FB_BACKLIGHT is not set
848CONFIG_FB_MODE_HELPERS=y 867CONFIG_FB_MODE_HELPERS=y
849# CONFIG_FB_TILEBLITTING is not set 868# CONFIG_FB_TILEBLITTING is not set
850# CONFIG_FB_S1D13XXX is not set 869# CONFIG_FB_S1D13XXX is not set
@@ -976,10 +995,12 @@ CONFIG_USB_MON=y
976# CONFIG_USB_LEGOTOWER is not set 995# CONFIG_USB_LEGOTOWER is not set
977# CONFIG_USB_LCD is not set 996# CONFIG_USB_LCD is not set
978# CONFIG_USB_LED is not set 997# CONFIG_USB_LED is not set
998# CONFIG_USB_CY7C63 is not set
979# CONFIG_USB_CYTHERM is not set 999# CONFIG_USB_CYTHERM is not set
980# CONFIG_USB_PHIDGETKIT is not set 1000# CONFIG_USB_PHIDGETKIT is not set
981# CONFIG_USB_PHIDGETSERVO is not set 1001# CONFIG_USB_PHIDGETSERVO is not set
982# CONFIG_USB_IDMOUSE is not set 1002# CONFIG_USB_IDMOUSE is not set
1003# CONFIG_USB_APPLEDISPLAY is not set
983# CONFIG_USB_LD is not set 1004# CONFIG_USB_LD is not set
984# CONFIG_USB_TEST is not set 1005# CONFIG_USB_TEST is not set
985 1006
@@ -1024,6 +1045,7 @@ CONFIG_FS_MBCACHE=y
1024# CONFIG_MINIX_FS is not set 1045# CONFIG_MINIX_FS is not set
1025CONFIG_ROMFS_FS=y 1046CONFIG_ROMFS_FS=y
1026CONFIG_INOTIFY=y 1047CONFIG_INOTIFY=y
1048CONFIG_INOTIFY_USER=y
1027# CONFIG_QUOTA is not set 1049# CONFIG_QUOTA is not set
1028CONFIG_DNOTIFY=y 1050CONFIG_DNOTIFY=y
1029# CONFIG_AUTOFS_FS is not set 1051# CONFIG_AUTOFS_FS is not set
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index b5bcebca1cd6..75af6d6e2f28 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -340,7 +340,7 @@ sys_mmap2:
340 streq r5, [sp, #4] 340 streq r5, [sp, #4]
341 beq do_mmap2 341 beq do_mmap2
342 mov r0, #-EINVAL 342 mov r0, #-EINVAL
343 RETINSTR(mov,pc, lr) 343 mov pc, lr
344#else 344#else
345 str r5, [sp, #4] 345 str r5, [sp, #4]
346 b do_mmap2 346 b do_mmap2
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index adf62e5eaad7..2af7e44218af 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -39,7 +39,7 @@
39 __INIT 39 __INIT
40 .type stext, %function 40 .type stext, %function
41ENTRY(stext) 41ENTRY(stext)
42 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode 42 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
43 @ and irqs disabled 43 @ and irqs disabled
44 mrc p15, 0, r9, c0, c0 @ get processor id 44 mrc p15, 0, r9, c0, c0 @ get processor id
45 bl __lookup_processor_type @ r5=procinfo r9=cpuid 45 bl __lookup_processor_type @ r5=procinfo r9=cpuid
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 04f7344e356a..330b9476c398 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -71,7 +71,7 @@
71 __INIT 71 __INIT
72 .type stext, %function 72 .type stext, %function
73ENTRY(stext) 73ENTRY(stext)
74 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode 74 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
75 @ and irqs disabled 75 @ and irqs disabled
76 mrc p15, 0, r9, c0, c0 @ get processor id 76 mrc p15, 0, r9, c0, c0 @ get processor id
77 bl __lookup_processor_type @ r5=procinfo r9=cpuid 77 bl __lookup_processor_type @ r5=procinfo r9=cpuid
@@ -104,7 +104,7 @@ ENTRY(secondary_startup)
104 * the processor type - there is no need to check the machine type 104 * the processor type - there is no need to check the machine type
105 * as it has already been validated by the primary processor. 105 * as it has already been validated by the primary processor.
106 */ 106 */
107 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC 107 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
108 mrc p15, 0, r9, c0, c0 @ get processor id 108 mrc p15, 0, r9, c0, c0 @ get processor id
109 bl __lookup_processor_type 109 bl __lookup_processor_type
110 movs r10, r5 @ invalid processor? 110 movs r10, r5 @ invalid processor?
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 9fc9af88c60c..093ccba0503c 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -808,7 +808,7 @@ static int __init topology_init(void)
808 int cpu; 808 int cpu;
809 809
810 for_each_possible_cpu(cpu) 810 for_each_possible_cpu(cpu)
811 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu, NULL); 811 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
812 812
813 return 0; 813 return 0;
814} 814}
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index 16153c86c3f8..058b80d72aa1 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -41,7 +41,7 @@ ENTRY(c_backtrace)
41 movne r0, #0 41 movne r0, #0
42 movs frame, r0 42 movs frame, r0
431: moveq r0, #-2 431: moveq r0, #-2
44 LOADREGS(eqfd, sp!, {r4 - r8, pc}) 44 ldmeqfd sp!, {r4 - r8, pc}
45 45
462: stmfd sp!, {pc} @ calculate offset of PC in STMIA instruction 462: stmfd sp!, {pc} @ calculate offset of PC in STMIA instruction
47 ldr r0, [sp], #4 47 ldr r0, [sp], #4
@@ -85,7 +85,7 @@ ENTRY(c_backtrace)
85 * A zero next framepointer means we're done. 85 * A zero next framepointer means we're done.
86 */ 86 */
87 teq next, #0 87 teq next, #0
88 LOADREGS(eqfd, sp!, {r4 - r8, pc}) 88 ldmeqfd sp!, {r4 - r8, pc}
89 89
90 /* 90 /*
91 * The next framepointer must be above the 91 * The next framepointer must be above the
@@ -104,7 +104,7 @@ ENTRY(c_backtrace)
1041007: ldr r0, =.Lbad 1041007: ldr r0, =.Lbad
105 mov r1, frame 105 mov r1, frame
106 bl printk 106 bl printk
107 LOADREGS(fd, sp!, {r4 - r8, pc}) 107 ldmfd sp!, {r4 - r8, pc}
108 .ltorg 108 .ltorg
109 .previous 109 .previous
110 110
@@ -145,7 +145,7 @@ ENTRY(c_backtrace)
145 adrne r0, .Lcr 145 adrne r0, .Lcr
146 blne printk 146 blne printk
147 mov r0, stack 147 mov r0, stack
148 LOADREGS(fd, sp!, {instr, reg, stack, r7, r8, pc}) 148 ldmfd sp!, {instr, reg, stack, r7, r8, pc}
149 149
150.Lfp: .asciz " r%d = %08X%c" 150.Lfp: .asciz " r%d = %08X%c"
151.Lcr: .asciz "\n" 151.Lcr: .asciz "\n"
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 7ff9f831b3f9..ea435ae2e4a5 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -43,10 +43,10 @@ USER( strnebt r2, [r0], #1)
43 tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1 43 tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1
44USER( strnebt r2, [r0], #1) 44USER( strnebt r2, [r0], #1)
45 mov r0, #0 45 mov r0, #0
46 LOADREGS(fd,sp!, {r1, pc}) 46 ldmfd sp!, {r1, pc}
47 47
48 .section .fixup,"ax" 48 .section .fixup,"ax"
49 .align 0 49 .align 0
509001: LOADREGS(fd,sp!, {r0, pc}) 509001: ldmfd sp!, {r0, pc}
51 .previous 51 .previous
52 52
diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
index 68117968482b..666c99cc0744 100644
--- a/arch/arm/lib/copy_page.S
+++ b/arch/arm/lib/copy_page.S
@@ -43,4 +43,4 @@ ENTRY(copy_page)
43 bgt 1b @ 1 43 bgt 1b @ 1
44 PLD( ldmeqia r1!, {r3, r4, ip, lr} ) 44 PLD( ldmeqia r1!, {r3, r4, ip, lr} )
45 PLD( beq 2b ) 45 PLD( beq 2b )
46 LOADREGS(fd, sp!, {r4, pc}) @ 3 46 ldmfd sp!, {r4, pc} @ 3
diff --git a/arch/arm/lib/csumipv6.S b/arch/arm/lib/csumipv6.S
index 7065a20ee8ad..9621469beec1 100644
--- a/arch/arm/lib/csumipv6.S
+++ b/arch/arm/lib/csumipv6.S
@@ -28,5 +28,5 @@ ENTRY(__csum_ipv6_magic)
28 adcs r0, r0, r3 28 adcs r0, r0, r3
29 adcs r0, r0, r2 29 adcs r0, r0, r2
30 adcs r0, r0, #0 30 adcs r0, r0, #0
31 LOADREGS(fd, sp!, {pc}) 31 ldmfd sp!, {pc}
32 32
diff --git a/arch/arm/lib/delay.S b/arch/arm/lib/delay.S
index 9183b06c0e2f..930a70259220 100644
--- a/arch/arm/lib/delay.S
+++ b/arch/arm/lib/delay.S
@@ -31,7 +31,7 @@ ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06
31 mov r2, r2, lsr #10 @ max = 0x00007fff 31 mov r2, r2, lsr #10 @ max = 0x00007fff
32 mul r0, r2, r0 @ max = 2^32-1 32 mul r0, r2, r0 @ max = 2^32-1
33 movs r0, r0, lsr #6 33 movs r0, r0, lsr #6
34 RETINSTR(moveq,pc,lr) 34 moveq pc, lr
35 35
36/* 36/*
37 * loops = r0 * HZ * loops_per_jiffy / 1000000 37 * loops = r0 * HZ * loops_per_jiffy / 1000000
@@ -43,20 +43,20 @@ ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06
43ENTRY(__delay) 43ENTRY(__delay)
44 subs r0, r0, #1 44 subs r0, r0, #1
45#if 0 45#if 0
46 RETINSTR(movls,pc,lr) 46 movls pc, lr
47 subs r0, r0, #1 47 subs r0, r0, #1
48 RETINSTR(movls,pc,lr) 48 movls pc, lr
49 subs r0, r0, #1 49 subs r0, r0, #1
50 RETINSTR(movls,pc,lr) 50 movls pc, lr
51 subs r0, r0, #1 51 subs r0, r0, #1
52 RETINSTR(movls,pc,lr) 52 movls pc, lr
53 subs r0, r0, #1 53 subs r0, r0, #1
54 RETINSTR(movls,pc,lr) 54 movls pc, lr
55 subs r0, r0, #1 55 subs r0, r0, #1
56 RETINSTR(movls,pc,lr) 56 movls pc, lr
57 subs r0, r0, #1 57 subs r0, r0, #1
58 RETINSTR(movls,pc,lr) 58 movls pc, lr
59 subs r0, r0, #1 59 subs r0, r0, #1
60#endif 60#endif
61 bhi __delay 61 bhi __delay
62 RETINSTR(mov,pc,lr) 62 mov pc, lr
diff --git a/arch/arm/lib/ecard.S b/arch/arm/lib/ecard.S
index fb7b602a6f76..c55aaa2a2088 100644
--- a/arch/arm/lib/ecard.S
+++ b/arch/arm/lib/ecard.S
@@ -29,7 +29,7 @@ ENTRY(ecard_loader_read)
29 CPSR2SPSR(r0) 29 CPSR2SPSR(r0)
30 mov lr, pc 30 mov lr, pc
31 mov pc, r2 31 mov pc, r2
32 LOADREGS(fd, sp!, {r4 - r12, pc}) 32 ldmfd sp!, {r4 - r12, pc}
33 33
34@ Purpose: call an expansion card loader to reset the card 34@ Purpose: call an expansion card loader to reset the card
35@ Proto : void read_loader(int card_base, char *loader); 35@ Proto : void read_loader(int card_base, char *loader);
@@ -41,5 +41,5 @@ ENTRY(ecard_loader_reset)
41 CPSR2SPSR(r0) 41 CPSR2SPSR(r0)
42 mov lr, pc 42 mov lr, pc
43 add pc, r1, #8 43 add pc, r1, #8
44 LOADREGS(fd, sp!, {r4 - r12, pc}) 44 ldmfd sp!, {r4 - r12, pc}
45 45
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index 6f8e27a58c78..a5ca0248aa4e 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -32,7 +32,7 @@ ENTRY(_find_first_zero_bit_le)
322: cmp r2, r1 @ any more? 322: cmp r2, r1 @ any more?
33 blo 1b 33 blo 1b
343: mov r0, r1 @ no free bits 343: mov r0, r1 @ no free bits
35 RETINSTR(mov,pc,lr) 35 mov pc, lr
36 36
37/* 37/*
38 * Purpose : Find next 'zero' bit 38 * Purpose : Find next 'zero' bit
@@ -66,7 +66,7 @@ ENTRY(_find_first_bit_le)
662: cmp r2, r1 @ any more? 662: cmp r2, r1 @ any more?
67 blo 1b 67 blo 1b
683: mov r0, r1 @ no free bits 683: mov r0, r1 @ no free bits
69 RETINSTR(mov,pc,lr) 69 mov pc, lr
70 70
71/* 71/*
72 * Purpose : Find next 'one' bit 72 * Purpose : Find next 'one' bit
@@ -98,7 +98,7 @@ ENTRY(_find_first_zero_bit_be)
982: cmp r2, r1 @ any more? 982: cmp r2, r1 @ any more?
99 blo 1b 99 blo 1b
1003: mov r0, r1 @ no free bits 1003: mov r0, r1 @ no free bits
101 RETINSTR(mov,pc,lr) 101 mov pc, lr
102 102
103ENTRY(_find_next_zero_bit_be) 103ENTRY(_find_next_zero_bit_be)
104 teq r1, #0 104 teq r1, #0
@@ -126,7 +126,7 @@ ENTRY(_find_first_bit_be)
1262: cmp r2, r1 @ any more? 1262: cmp r2, r1 @ any more?
127 blo 1b 127 blo 1b
1283: mov r0, r1 @ no free bits 1283: mov r0, r1 @ no free bits
129 RETINSTR(mov,pc,lr) 129 mov pc, lr
130 130
131ENTRY(_find_next_bit_be) 131ENTRY(_find_next_bit_be)
132 teq r1, #0 132 teq r1, #0
@@ -164,5 +164,5 @@ ENTRY(_find_next_bit_be)
164 addeq r2, r2, #1 164 addeq r2, r2, #1
165 mov r0, r2 165 mov r0, r2
166#endif 166#endif
167 RETINSTR(mov,pc,lr) 167 mov pc, lr
168 168
diff --git a/arch/arm/lib/io-readsb.S b/arch/arm/lib/io-readsb.S
index d3d8de71a2c8..fb966ad0276f 100644
--- a/arch/arm/lib/io-readsb.S
+++ b/arch/arm/lib/io-readsb.S
@@ -72,7 +72,7 @@ ENTRY(__raw_readsb)
72 bpl .Linsb_16_lp 72 bpl .Linsb_16_lp
73 73
74 tst r2, #15 74 tst r2, #15
75 LOADREGS(eqfd, sp!, {r4 - r6, pc}) 75 ldmeqfd sp!, {r4 - r6, pc}
76 76
77.Linsb_no_16: tst r2, #8 77.Linsb_no_16: tst r2, #8
78 beq .Linsb_no_8 78 beq .Linsb_no_8
@@ -109,7 +109,7 @@ ENTRY(__raw_readsb)
109 str r3, [r1], #4 109 str r3, [r1], #4
110 110
111.Linsb_no_4: ands r2, r2, #3 111.Linsb_no_4: ands r2, r2, #3
112 LOADREGS(eqfd, sp!, {r4 - r6, pc}) 112 ldmeqfd sp!, {r4 - r6, pc}
113 113
114 cmp r2, #2 114 cmp r2, #2
115 ldrb r3, [r0] 115 ldrb r3, [r0]
@@ -119,4 +119,4 @@ ENTRY(__raw_readsb)
119 ldrgtb r3, [r0] 119 ldrgtb r3, [r0]
120 strgtb r3, [r1] 120 strgtb r3, [r1]
121 121
122 LOADREGS(fd, sp!, {r4 - r6, pc}) 122 ldmfd sp!, {r4 - r6, pc}
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S
index 146d47c15455..4ef904185142 100644
--- a/arch/arm/lib/io-readsw-armv3.S
+++ b/arch/arm/lib/io-readsw-armv3.S
@@ -28,7 +28,7 @@
28 strb r3, [r1], #1 28 strb r3, [r1], #1
29 29
30 subs r2, r2, #1 30 subs r2, r2, #1
31 RETINSTR(moveq, pc, lr) 31 moveq pc, lr
32 32
33ENTRY(__raw_readsw) 33ENTRY(__raw_readsw)
34 teq r2, #0 @ do we have to check for the zero len? 34 teq r2, #0 @ do we have to check for the zero len?
@@ -69,7 +69,7 @@ ENTRY(__raw_readsw)
69 bpl .Linsw_8_lp 69 bpl .Linsw_8_lp
70 70
71 tst r2, #7 71 tst r2, #7
72 LOADREGS(eqfd, sp!, {r4, r5, r6, pc}) 72 ldmeqfd sp!, {r4, r5, r6, pc}
73 73
74.Lno_insw_8: tst r2, #4 74.Lno_insw_8: tst r2, #4
75 beq .Lno_insw_4 75 beq .Lno_insw_4
@@ -102,6 +102,6 @@ ENTRY(__raw_readsw)
102 movne r3, r3, lsr #8 102 movne r3, r3, lsr #8
103 strneb r3, [r1] 103 strneb r3, [r1]
104 104
105 LOADREGS(fd, sp!, {r4, r5, r6, pc}) 105 ldmfd sp!, {r4, r5, r6, pc}
106 106
107 107
diff --git a/arch/arm/lib/io-writesb.S b/arch/arm/lib/io-writesb.S
index 08209fc640ea..7eba2b6cc69f 100644
--- a/arch/arm/lib/io-writesb.S
+++ b/arch/arm/lib/io-writesb.S
@@ -64,7 +64,7 @@ ENTRY(__raw_writesb)
64 bpl .Loutsb_16_lp 64 bpl .Loutsb_16_lp
65 65
66 tst r2, #15 66 tst r2, #15
67 LOADREGS(eqfd, sp!, {r4, r5, pc}) 67 ldmeqfd sp!, {r4, r5, pc}
68 68
69.Loutsb_no_16: tst r2, #8 69.Loutsb_no_16: tst r2, #8
70 beq .Loutsb_no_8 70 beq .Loutsb_no_8
@@ -80,7 +80,7 @@ ENTRY(__raw_writesb)
80 outword r3 80 outword r3
81 81
82.Loutsb_no_4: ands r2, r2, #3 82.Loutsb_no_4: ands r2, r2, #3
83 LOADREGS(eqfd, sp!, {r4, r5, pc}) 83 ldmeqfd sp!, {r4, r5, pc}
84 84
85 cmp r2, #2 85 cmp r2, #2
86 ldrb r3, [r1], #1 86 ldrb r3, [r1], #1
@@ -90,4 +90,4 @@ ENTRY(__raw_writesb)
90 ldrgtb r3, [r1] 90 ldrgtb r3, [r1]
91 strgtb r3, [r0] 91 strgtb r3, [r0]
92 92
93 LOADREGS(fd, sp!, {r4, r5, pc}) 93 ldmfd sp!, {r4, r5, pc}
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S
index 52d62b481295..1607a29f49b7 100644
--- a/arch/arm/lib/io-writesw-armv3.S
+++ b/arch/arm/lib/io-writesw-armv3.S
@@ -29,7 +29,7 @@
29 orr r3, r3, r3, lsl #16 29 orr r3, r3, r3, lsl #16
30 str r3, [r0] 30 str r3, [r0]
31 subs r2, r2, #1 31 subs r2, r2, #1
32 RETINSTR(moveq, pc, lr) 32 moveq pc, lr
33 33
34ENTRY(__raw_writesw) 34ENTRY(__raw_writesw)
35 teq r2, #0 @ do we have to check for the zero len? 35 teq r2, #0 @ do we have to check for the zero len?
@@ -80,7 +80,7 @@ ENTRY(__raw_writesw)
80 bpl .Loutsw_8_lp 80 bpl .Loutsw_8_lp
81 81
82 tst r2, #7 82 tst r2, #7
83 LOADREGS(eqfd, sp!, {r4, r5, r6, pc}) 83 ldmeqfd sp!, {r4, r5, r6, pc}
84 84
85.Lno_outsw_8: tst r2, #4 85.Lno_outsw_8: tst r2, #4
86 beq .Lno_outsw_4 86 beq .Lno_outsw_4
@@ -124,4 +124,4 @@ ENTRY(__raw_writesw)
124 orrne ip, ip, ip, lsr #16 124 orrne ip, ip, ip, lsr #16
125 strne ip, [r0] 125 strne ip, [r0]
126 126
127 LOADREGS(fd, sp!, {r4, r5, r6, pc}) 127 ldmfd sp!, {r4, r5, r6, pc}
diff --git a/arch/arm/lib/memchr.S b/arch/arm/lib/memchr.S
index ac34fe55d21a..e7ab1ea8ebaa 100644
--- a/arch/arm/lib/memchr.S
+++ b/arch/arm/lib/memchr.S
@@ -22,4 +22,4 @@ ENTRY(memchr)
22 bne 1b 22 bne 1b
23 sub r0, r0, #1 23 sub r0, r0, #1
242: movne r0, #0 242: movne r0, #0
25 RETINSTR(mov,pc,lr) 25 mov pc, lr
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index a1795f599937..95b110b07a89 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -53,7 +53,7 @@ ENTRY(memset)
53 stmgeia r0!, {r1, r3, ip, lr} 53 stmgeia r0!, {r1, r3, ip, lr}
54 stmgeia r0!, {r1, r3, ip, lr} 54 stmgeia r0!, {r1, r3, ip, lr}
55 bgt 2b 55 bgt 2b
56 LOADREGS(eqfd, sp!, {pc}) @ Now <64 bytes to go. 56 ldmeqfd sp!, {pc} @ Now <64 bytes to go.
57/* 57/*
58 * No need to correct the count; we're only testing bits from now on 58 * No need to correct the count; we're only testing bits from now on
59 */ 59 */
@@ -77,4 +77,4 @@ ENTRY(memset)
77 strneb r1, [r0], #1 77 strneb r1, [r0], #1
78 tst r2, #1 78 tst r2, #1
79 strneb r1, [r0], #1 79 strneb r1, [r0], #1
80 RETINSTR(mov,pc,lr) 80 mov pc, lr
diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S
index 51ccc60160fd..abf2508e8221 100644
--- a/arch/arm/lib/memzero.S
+++ b/arch/arm/lib/memzero.S
@@ -53,7 +53,7 @@ ENTRY(__memzero)
53 stmgeia r0!, {r2, r3, ip, lr} @ 4 53 stmgeia r0!, {r2, r3, ip, lr} @ 4
54 stmgeia r0!, {r2, r3, ip, lr} @ 4 54 stmgeia r0!, {r2, r3, ip, lr} @ 4
55 bgt 3b @ 1 55 bgt 3b @ 1
56 LOADREGS(eqfd, sp!, {pc}) @ 1/2 quick exit 56 ldmeqfd sp!, {pc} @ 1/2 quick exit
57/* 57/*
58 * No need to correct the count; we're only testing bits from now on 58 * No need to correct the count; we're only testing bits from now on
59 */ 59 */
@@ -77,4 +77,4 @@ ENTRY(__memzero)
77 strneb r2, [r0], #1 @ 1 77 strneb r2, [r0], #1 @ 1
78 tst r1, #1 @ 1 a byte left over 78 tst r1, #1 @ 1 a byte left over
79 strneb r2, [r0], #1 @ 1 79 strneb r2, [r0], #1 @ 1
80 RETINSTR(mov,pc,lr) @ 1 80 mov pc, lr @ 1
diff --git a/arch/arm/lib/strchr.S b/arch/arm/lib/strchr.S
index 5b9b493733fc..9f18d6fdee6a 100644
--- a/arch/arm/lib/strchr.S
+++ b/arch/arm/lib/strchr.S
@@ -23,4 +23,4 @@ ENTRY(strchr)
23 teq r2, r1 23 teq r2, r1
24 movne r0, #0 24 movne r0, #0
25 subeq r0, r0, #1 25 subeq r0, r0, #1
26 RETINSTR(mov,pc,lr) 26 mov pc, lr
diff --git a/arch/arm/lib/strncpy_from_user.S b/arch/arm/lib/strncpy_from_user.S
index 629cc8775276..35649f04fcac 100644
--- a/arch/arm/lib/strncpy_from_user.S
+++ b/arch/arm/lib/strncpy_from_user.S
@@ -21,7 +21,6 @@
21 * -EFAULT on exception, or "len" if we fill the whole buffer 21 * -EFAULT on exception, or "len" if we fill the whole buffer
22 */ 22 */
23ENTRY(__arch_strncpy_from_user) 23ENTRY(__arch_strncpy_from_user)
24 save_lr
25 mov ip, r1 24 mov ip, r1
261: subs r2, r2, #1 251: subs r2, r2, #1
27USER( ldrplbt r3, [r1], #1) 26USER( ldrplbt r3, [r1], #1)
@@ -31,13 +30,13 @@ USER( ldrplbt r3, [r1], #1)
31 bne 1b 30 bne 1b
32 sub r1, r1, #1 @ take NUL character out of count 31 sub r1, r1, #1 @ take NUL character out of count
332: sub r0, r1, ip 322: sub r0, r1, ip
34 restore_pc 33 mov pc, lr
35 34
36 .section .fixup,"ax" 35 .section .fixup,"ax"
37 .align 0 36 .align 0
389001: mov r3, #0 379001: mov r3, #0
39 strb r3, [r0, #0] @ null terminate 38 strb r3, [r0, #0] @ null terminate
40 mov r0, #-EFAULT 39 mov r0, #-EFAULT
41 restore_pc 40 mov pc, lr
42 .previous 41 .previous
43 42
diff --git a/arch/arm/lib/strnlen_user.S b/arch/arm/lib/strnlen_user.S
index 67bcd8268128..3668a15991ef 100644
--- a/arch/arm/lib/strnlen_user.S
+++ b/arch/arm/lib/strnlen_user.S
@@ -21,7 +21,6 @@
21 * or zero on exception, or n + 1 if too long 21 * or zero on exception, or n + 1 if too long
22 */ 22 */
23ENTRY(__arch_strnlen_user) 23ENTRY(__arch_strnlen_user)
24 save_lr
25 mov r2, r0 24 mov r2, r0
261: 251:
27USER( ldrbt r3, [r0], #1) 26USER( ldrbt r3, [r0], #1)
@@ -31,10 +30,10 @@ USER( ldrbt r3, [r0], #1)
31 bne 1b 30 bne 1b
32 add r0, r0, #1 31 add r0, r0, #1
332: sub r0, r0, r2 322: sub r0, r0, r2
34 restore_pc 33 mov pc, lr
35 34
36 .section .fixup,"ax" 35 .section .fixup,"ax"
37 .align 0 36 .align 0
389001: mov r0, #0 379001: mov r0, #0
39 restore_pc 38 mov pc, lr
40 .previous 39 .previous
diff --git a/arch/arm/lib/strrchr.S b/arch/arm/lib/strrchr.S
index fa923f026f15..538df220aa48 100644
--- a/arch/arm/lib/strrchr.S
+++ b/arch/arm/lib/strrchr.S
@@ -22,4 +22,4 @@ ENTRY(strrchr)
22 teq r2, #0 22 teq r2, #0
23 bne 1b 23 bne 1b
24 mov r0, r3 24 mov r0, r3
25 RETINSTR(mov,pc,lr) 25 mov pc, lr
diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
index 0cc450f863b6..1f1545d737be 100644
--- a/arch/arm/lib/uaccess.S
+++ b/arch/arm/lib/uaccess.S
@@ -105,7 +105,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
105 movs ip, r2 105 movs ip, r2
106 bne .Lc2u_nowords 106 bne .Lc2u_nowords
107.Lc2u_finished: mov r0, #0 107.Lc2u_finished: mov r0, #0
108 LOADREGS(fd,sp!,{r2, r4 - r7, pc}) 108 ldmfd sp!, {r2, r4 - r7, pc}
109 109
110.Lc2u_src_not_aligned: 110.Lc2u_src_not_aligned:
111 bic r1, r1, #3 111 bic r1, r1, #3
@@ -280,7 +280,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
280 280
281 .section .fixup,"ax" 281 .section .fixup,"ax"
282 .align 0 282 .align 0
2839001: LOADREGS(fd,sp!, {r0, r4 - r7, pc}) 2839001: ldmfd sp!, {r0, r4 - r7, pc}
284 .previous 284 .previous
285 285
286/* Prototype: unsigned long __arch_copy_from_user(void *to,const void *from,unsigned long n); 286/* Prototype: unsigned long __arch_copy_from_user(void *to,const void *from,unsigned long n);
@@ -369,7 +369,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
369 bne .Lcfu_nowords 369 bne .Lcfu_nowords
370.Lcfu_finished: mov r0, #0 370.Lcfu_finished: mov r0, #0
371 add sp, sp, #8 371 add sp, sp, #8
372 LOADREGS(fd,sp!,{r4 - r7, pc}) 372 ldmfd sp!, {r4 - r7, pc}
373 373
374.Lcfu_src_not_aligned: 374.Lcfu_src_not_aligned:
375 bic r1, r1, #3 375 bic r1, r1, #3
@@ -556,6 +556,6 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
556 movne r1, r4 556 movne r1, r4
557 blne __memzero 557 blne __memzero
558 mov r0, r4 558 mov r0, r4
559 LOADREGS(fd,sp!, {r4 - r7, pc}) 559 ldmfd sp!, {r4 - r7, pc}
560 .previous 560 .previous
561 561
diff --git a/arch/arm/mach-at91rm9200/Kconfig b/arch/arm/mach-at91rm9200/Kconfig
index 1ab5b7828318..70d402f76ce5 100644
--- a/arch/arm/mach-at91rm9200/Kconfig
+++ b/arch/arm/mach-at91rm9200/Kconfig
@@ -4,6 +4,12 @@ menu "AT91RM9200 Implementations"
4 4
5comment "AT91RM9200 Board Type" 5comment "AT91RM9200 Board Type"
6 6
7config MACH_ONEARM
8 bool "Ajeco 1ARM Single Board Computer"
9 depends on ARCH_AT91RM9200
10 help
11 Select this if you are using Ajeco's 1ARM Single Board Computer
12
7config ARCH_AT91RM9200DK 13config ARCH_AT91RM9200DK
8 bool "Atmel AT91RM9200-DK Development board" 14 bool "Atmel AT91RM9200-DK Development board"
9 depends on ARCH_AT91RM9200 15 depends on ARCH_AT91RM9200
diff --git a/arch/arm/mach-at91rm9200/Makefile b/arch/arm/mach-at91rm9200/Makefile
index 81ebc6684ad2..82db957322df 100644
--- a/arch/arm/mach-at91rm9200/Makefile
+++ b/arch/arm/mach-at91rm9200/Makefile
@@ -10,6 +10,7 @@ obj- :=
10obj-$(CONFIG_PM) += pm.o 10obj-$(CONFIG_PM) += pm.o
11 11
12# Board-specific support 12# Board-specific support
13obj-$(CONFIG_MACH_ONEARM) += board-1arm.o
13obj-$(CONFIG_ARCH_AT91RM9200DK) += board-dk.o 14obj-$(CONFIG_ARCH_AT91RM9200DK) += board-dk.o
14obj-$(CONFIG_MACH_AT91RM9200EK) += board-ek.o 15obj-$(CONFIG_MACH_AT91RM9200EK) += board-ek.o
15obj-$(CONFIG_MACH_CSB337) += board-csb337.o 16obj-$(CONFIG_MACH_CSB337) += board-csb337.o
diff --git a/arch/arm/mach-at91rm9200/board-1arm.c b/arch/arm/mach-at91rm9200/board-1arm.c
new file mode 100644
index 000000000000..dc79e0992af7
--- /dev/null
+++ b/arch/arm/mach-at91rm9200/board-1arm.c
@@ -0,0 +1,109 @@
1/*
2 * linux/arch/arm/mach-at91rm9200/board-1arm.c
3 *
4 * Copyright (C) 2005 SAN People
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/config.h>
22#include <linux/types.h>
23#include <linux/init.h>
24#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/platform_device.h>
27
28#include <asm/hardware.h>
29#include <asm/setup.h>
30#include <asm/mach-types.h>
31#include <asm/irq.h>
32
33#include <asm/mach/arch.h>
34#include <asm/mach/map.h>
35#include <asm/mach/irq.h>
36
37#include <asm/hardware.h>
38#include <asm/arch/board.h>
39#include <asm/arch/gpio.h>
40
41#include "generic.h"
42
43static void __init onearm_init_irq(void)
44{
45 /* Initialize AIC controller */
46 at91rm9200_init_irq(NULL);
47
48 /* Set up the GPIO interrupts */
49 at91_gpio_irq_setup(PQFP_GPIO_BANKS);
50}
51
52/*
53 * Serial port configuration.
54 * 0 .. 3 = USART0 .. USART3
55 * 4 = DBGU
56 */
57static struct at91_uart_config __initdata onearm_uart_config = {
58 .console_tty = 0, /* ttyS0 */
59 .nr_tty = 3,
60 .tty_map = { 4, 0, 1, -1, -1 }, /* ttyS0, ..., ttyS4 */
61};
62
63static void __init onearm_map_io(void)
64{
65 at91rm9200_map_io();
66
67 /* Initialize clocks: 18.432 MHz crystal */
68 at91_clock_init(18432000);
69
70 /* Setup the serial ports and console */
71 at91_init_serial(&onearm_uart_config);
72}
73
74static struct at91_eth_data __initdata onearm_eth_data = {
75 .phy_irq_pin = AT91_PIN_PC4,
76 .is_rmii = 1,
77};
78
79static struct at91_usbh_data __initdata onearm_usbh_data = {
80 .ports = 1,
81};
82
83static struct at91_udc_data __initdata onearm_udc_data = {
84 .vbus_pin = AT91_PIN_PC2,
85 .pullup_pin = AT91_PIN_PC3,
86};
87
88static void __init onearm_board_init(void)
89{
90 /* Serial */
91 at91_add_device_serial();
92 /* Ethernet */
93 at91_add_device_eth(&onearm_eth_data);
94 /* USB Host */
95 at91_add_device_usbh(&onearm_usbh_data);
96 /* USB Device */
97 at91_add_device_udc(&onearm_udc_data);
98}
99
100MACHINE_START(ONEARM, "Ajeco 1ARM single board computer")
101 /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
102 .phys_io = AT91_BASE_SYS,
103 .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
104 .boot_params = AT91_SDRAM_BASE + 0x100,
105 .timer = &at91rm9200_timer,
106 .map_io = onearm_map_io,
107 .init_irq = onearm_init_irq,
108 .init_machine = onearm_board_init,
109MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 3b23f43cb160..57f23b465392 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -35,7 +35,6 @@ config ARCH_ADI_COYOTE
35 35
36config ARCH_IXDP425 36config ARCH_IXDP425
37 bool "IXDP425" 37 bool "IXDP425"
38 select PCI
39 help 38 help
40 Say 'Y' here if you want your kernel to support Intel's 39 Say 'Y' here if you want your kernel to support Intel's
41 IXDP425 Development Platform (Also known as Richfield). 40 IXDP425 Development Platform (Also known as Richfield).
@@ -43,7 +42,6 @@ config ARCH_IXDP425
43 42
44config MACH_IXDPG425 43config MACH_IXDPG425
45 bool "IXDPG425" 44 bool "IXDPG425"
46 select PCI
47 help 45 help
48 Say 'Y' here if you want your kernel to support Intel's 46 Say 'Y' here if you want your kernel to support Intel's
49 IXDPG425 Development Platform (Also known as Montajade). 47 IXDPG425 Development Platform (Also known as Montajade).
@@ -51,7 +49,6 @@ config MACH_IXDPG425
51 49
52config MACH_IXDP465 50config MACH_IXDP465
53 bool "IXDP465" 51 bool "IXDP465"
54 select PCI
55 help 52 help
56 Say 'Y' here if you want your kernel to support Intel's 53 Say 'Y' here if you want your kernel to support Intel's
57 IXDP465 Development Platform (Also known as BMP). 54 IXDP465 Development Platform (Also known as BMP).
diff --git a/arch/arm/mach-ixp4xx/Makefile b/arch/arm/mach-ixp4xx/Makefile
index 5a4aaa0e0a09..640315d8b96a 100644
--- a/arch/arm/mach-ixp4xx/Makefile
+++ b/arch/arm/mach-ixp4xx/Makefile
@@ -2,13 +2,23 @@
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
5obj-pci-y :=
6obj-pci-n :=
7
8obj-pci-$(CONFIG_ARCH_IXDP4XX) += ixdp425-pci.o
9obj-pci-$(CONFIG_MACH_IXDPG425) += ixdpg425-pci.o
10obj-pci-$(CONFIG_ARCH_ADI_COYOTE) += coyote-pci.o
11obj-pci-$(CONFIG_MACH_GTWX5715) += gtwx5715-pci.o
12obj-pci-$(CONFIG_MACH_NSLU2) += nslu2-pci.o
13obj-pci-$(CONFIG_MACH_NAS100D) += nas100d-pci.o
14
5obj-y += common.o 15obj-y += common.o
6 16
7obj-$(CONFIG_PCI) += common-pci.o 17obj-$(CONFIG_ARCH_IXDP4XX) += ixdp425-setup.o
8obj-$(CONFIG_ARCH_IXDP4XX) += ixdp425-pci.o ixdp425-setup.o 18obj-$(CONFIG_MACH_IXDPG425) += coyote-setup.o
9obj-$(CONFIG_MACH_IXDPG425) += ixdpg425-pci.o coyote-setup.o 19obj-$(CONFIG_ARCH_ADI_COYOTE) += coyote-setup.o
10obj-$(CONFIG_ARCH_ADI_COYOTE) += coyote-pci.o coyote-setup.o 20obj-$(CONFIG_MACH_GTWX5715) += gtwx5715-setup.o
11obj-$(CONFIG_MACH_GTWX5715) += gtwx5715-pci.o gtwx5715-setup.o 21obj-$(CONFIG_MACH_NSLU2) += nslu2-setup.o nslu2-power.o
12obj-$(CONFIG_MACH_NSLU2) += nslu2-pci.o nslu2-setup.o nslu2-power.o 22obj-$(CONFIG_MACH_NAS100D) += nas100d-setup.o nas100d-power.o
13obj-$(CONFIG_MACH_NAS100D) += nas100d-pci.o nas100d-setup.o nas100d-power.o
14 23
24obj-$(CONFIG_PCI) += $(obj-pci-$(CONFIG_PCI)) common-pci.o
diff --git a/arch/arm/mach-pxa/sleep.S b/arch/arm/mach-pxa/sleep.S
index c9862688ff3d..0650bed3b96e 100644
--- a/arch/arm/mach-pxa/sleep.S
+++ b/arch/arm/mach-pxa/sleep.S
@@ -189,7 +189,7 @@ ENTRY(pxa_cpu_suspend)
189 .data 189 .data
190 .align 5 190 .align 5
191ENTRY(pxa_cpu_resume) 191ENTRY(pxa_cpu_resume)
192 mov r0, #PSR_I_BIT | PSR_F_BIT | MODE_SVC @ set SVC, irqs off 192 mov r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
193 msr cpsr_c, r0 193 msr cpsr_c, r0
194 194
195 ldr r0, sleep_save_sp @ stack phys addr 195 ldr r0, sleep_save_sp @ stack phys addr
diff --git a/arch/arm/mach-s3c2410/Kconfig b/arch/arm/mach-s3c2410/Kconfig
index f5d9cd498a5f..b4171dd43df0 100644
--- a/arch/arm/mach-s3c2410/Kconfig
+++ b/arch/arm/mach-s3c2410/Kconfig
@@ -71,13 +71,13 @@ config ARCH_S3C2440
71 Say Y here if you are using the SMDK2440. 71 Say Y here if you are using the SMDK2440.
72 72
73config SMDK2440_CPU2440 73config SMDK2440_CPU2440
74 bool "SMDK2440 with S3C2440 cpu module" 74 bool "SMDK2440 with S3C2440 CPU module"
75 depends on ARCH_S3C2440 75 depends on ARCH_S3C2440
76 default y if ARCH_S3C2440 76 default y if ARCH_S3C2440
77 select CPU_S3C2440 77 select CPU_S3C2440
78 78
79config SMDK2440_CPU2442 79config SMDK2440_CPU2442
80 bool "SMDM2440 with S3C2442 cpu module" 80 bool "SMDM2440 with S3C2442 CPU module"
81 depends on ARCH_S3C2440 81 depends on ARCH_S3C2440
82 select CPU_S3C2442 82 select CPU_S3C2442
83 83
diff --git a/arch/arm/mach-s3c2410/sleep.S b/arch/arm/mach-s3c2410/sleep.S
index 5f6761ed96b2..dc27167f4d59 100644
--- a/arch/arm/mach-s3c2410/sleep.S
+++ b/arch/arm/mach-s3c2410/sleep.S
@@ -128,7 +128,7 @@ s3c2410_sleep_save_phys:
128 */ 128 */
129 129
130ENTRY(s3c2410_cpu_resume) 130ENTRY(s3c2410_cpu_resume)
131 mov r0, #PSR_I_BIT | PSR_F_BIT | MODE_SVC 131 mov r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE
132 msr cpsr_c, r0 132 msr cpsr_c, r0
133 133
134 @@ load UART to allow us to print the two characters for 134 @@ load UART to allow us to print the two characters for
diff --git a/arch/arm/mach-sa1100/sleep.S b/arch/arm/mach-sa1100/sleep.S
index 2fa1e289d177..5a84062f92af 100644
--- a/arch/arm/mach-sa1100/sleep.S
+++ b/arch/arm/mach-sa1100/sleep.S
@@ -177,7 +177,7 @@ sa1110_sdram_controller_fix:
177 .data 177 .data
178 .align 5 178 .align 5
179ENTRY(sa1100_cpu_resume) 179ENTRY(sa1100_cpu_resume)
180 mov r0, #PSR_F_BIT | PSR_I_BIT | MODE_SVC 180 mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
181 msr cpsr_c, r0 @ set SVC, irqs off 181 msr cpsr_c, r0 @ set SVC, irqs off
182 182
183 ldr r0, sleep_save_sp @ stack phys addr 183 ldr r0, sleep_save_sp @ stack phys addr
diff --git a/arch/arm/mm/copypage-v3.S b/arch/arm/mm/copypage-v3.S
index 3c58ebbf0359..2ee394b11bcb 100644
--- a/arch/arm/mm/copypage-v3.S
+++ b/arch/arm/mm/copypage-v3.S
@@ -35,7 +35,7 @@ ENTRY(v3_copy_user_page)
35 stmia r0!, {r3, r4, ip, lr} @ 4 35 stmia r0!, {r3, r4, ip, lr} @ 4
36 ldmneia r1!, {r3, r4, ip, lr} @ 4 36 ldmneia r1!, {r3, r4, ip, lr} @ 4
37 bne 1b @ 1 37 bne 1b @ 1
38 LOADREGS(fd, sp!, {r4, pc}) @ 3 38 ldmfd sp!, {r4, pc} @ 3
39 39
40 .align 5 40 .align 5
41/* 41/*
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index ee6f15298735..09b1a41a6de8 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -29,38 +29,6 @@
29#define TTB_RGN_WT (2 << 3) 29#define TTB_RGN_WT (2 << 3)
30#define TTB_RGN_WB (3 << 3) 30#define TTB_RGN_WB (3 << 3)
31 31
32 .macro cpsie, flags
33 .ifc \flags, f
34 .long 0xf1080040
35 .exitm
36 .endif
37 .ifc \flags, i
38 .long 0xf1080080
39 .exitm
40 .endif
41 .ifc \flags, if
42 .long 0xf10800c0
43 .exitm
44 .endif
45 .err
46 .endm
47
48 .macro cpsid, flags
49 .ifc \flags, f
50 .long 0xf10c0040
51 .exitm
52 .endif
53 .ifc \flags, i
54 .long 0xf10c0080
55 .exitm
56 .endif
57 .ifc \flags, if
58 .long 0xf10c00c0
59 .exitm
60 .endif
61 .err
62 .endm
63
64ENTRY(cpu_v6_proc_init) 32ENTRY(cpu_v6_proc_init)
65 mov pc, lr 33 mov pc, lr
66 34
diff --git a/arch/arm/nwfpe/entry26.S b/arch/arm/nwfpe/entry26.S
index 51940a96d6a6..3e6fb5d21d64 100644
--- a/arch/arm/nwfpe/entry26.S
+++ b/arch/arm/nwfpe/entry26.S
@@ -26,7 +26,7 @@
26It is called from the kernel with code similar to this: 26It is called from the kernel with code similar to this:
27 27
28 mov fp, #0 28 mov fp, #0
29 teqp pc, #PSR_I_BIT | MODE_SVC 29 teqp pc, #PSR_I_BIT | SVC_MODE
30 ldr r4, .LC2 30 ldr r4, .LC2
31 ldr pc, [r4] @ Call FP module USR entry point 31 ldr pc, [r4] @ Call FP module USR entry point
32 32
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 6d7de9c0412f..e1372a25311d 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
12# 12#
13# http://www.arm.linux.org.uk/developer/machines/?action=new 13# http://www.arm.linux.org.uk/developer/machines/?action=new
14# 14#
15# Last update: Mon May 8 20:11:05 2006 15# Last update: Mon Jun 26 22:26:08 2006
16# 16#
17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number 17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
18# 18#
@@ -566,8 +566,8 @@ switchgrass MACH_SWITCHGRASS SWITCHGRASS 549
566ens_cmu MACH_ENS_CMU ENS_CMU 550 566ens_cmu MACH_ENS_CMU ENS_CMU 550
567mm6_sdb MACH_MM6_SDB MM6_SDB 551 567mm6_sdb MACH_MM6_SDB MM6_SDB 551
568saturn MACH_SATURN SATURN 552 568saturn MACH_SATURN SATURN 552
569i30030evb MACH_ARGONPLUSEVB ARGONPLUSEVB 553 569i30030evb MACH_I30030EVB I30030EVB 553
570mxc27530evb MACH_SCMA11EVB SCMA11EVB 554 570mxc27530evb MACH_MXC27530EVB MXC27530EVB 554
571smdk2800 MACH_SMDK2800 SMDK2800 555 571smdk2800 MACH_SMDK2800 SMDK2800 555
572mtwilson MACH_MTWILSON MTWILSON 556 572mtwilson MACH_MTWILSON MTWILSON 556
573ziti MACH_ZITI ZITI 557 573ziti MACH_ZITI ZITI 557
@@ -647,7 +647,7 @@ sendt MACH_SENDT SENDT 630
647mx2jazz MACH_MX2JAZZ MX2JAZZ 631 647mx2jazz MACH_MX2JAZZ MX2JAZZ 631
648multiio MACH_MULTIIO MULTIIO 632 648multiio MACH_MULTIIO MULTIIO 632
649hrdisplay MACH_HRDISPLAY HRDISPLAY 633 649hrdisplay MACH_HRDISPLAY HRDISPLAY 633
650mxc27530ads MACH_SCMA11BB SCMA11BB 634 650mxc27530ads MACH_MXC27530ADS MXC27530ADS 634
651trizeps3 MACH_TRIZEPS3 TRIZEPS3 635 651trizeps3 MACH_TRIZEPS3 TRIZEPS3 635
652zefeerdza MACH_ZEFEERDZA ZEFEERDZA 636 652zefeerdza MACH_ZEFEERDZA ZEFEERDZA 636
653zefeerdzb MACH_ZEFEERDZB ZEFEERDZB 637 653zefeerdzb MACH_ZEFEERDZB ZEFEERDZB 637
@@ -721,7 +721,7 @@ gp32 MACH_GP32 GP32 706
721gem MACH_GEM GEM 707 721gem MACH_GEM GEM 707
722i858 MACH_I858 I858 708 722i858 MACH_I858 I858 708
723hx2750 MACH_HX2750 HX2750 709 723hx2750 MACH_HX2750 HX2750 709
724mxc91131evb MACH_ZEUSEVB ZEUSEVB 710 724mxc91131evb MACH_MXC91131EVB MXC91131EVB 710
725p700 MACH_P700 P700 711 725p700 MACH_P700 P700 711
726cpe MACH_CPE CPE 712 726cpe MACH_CPE CPE 712
727spitz MACH_SPITZ SPITZ 713 727spitz MACH_SPITZ SPITZ 713
@@ -802,7 +802,7 @@ cpuat91 MACH_CPUAT91 CPUAT91 787
802rea9200 MACH_REA9200 REA9200 788 802rea9200 MACH_REA9200 REA9200 788
803acts_pune_sa1110 MACH_ACTS_PUNE_SA1110 ACTS_PUNE_SA1110 789 803acts_pune_sa1110 MACH_ACTS_PUNE_SA1110 ACTS_PUNE_SA1110 789
804ixp425 MACH_IXP425 IXP425 790 804ixp425 MACH_IXP425 IXP425 790
805i30030ads MACH_ARGONPLUSODYSSEY ARGONPLUSODYSSEY 791 805i30030ads MACH_I30030ADS I30030ADS 791
806perch MACH_PERCH PERCH 792 806perch MACH_PERCH PERCH 792
807eis05r1 MACH_EIS05R1 EIS05R1 793 807eis05r1 MACH_EIS05R1 EIS05R1 793
808pepperpad MACH_PEPPERPAD PEPPERPAD 794 808pepperpad MACH_PEPPERPAD PEPPERPAD 794
@@ -930,7 +930,7 @@ netclient MACH_NETCLIENT NETCLIENT 916
930xscale_palmtt5 MACH_XSCALE_PALMTT5 XSCALE_PALMTT5 917 930xscale_palmtt5 MACH_XSCALE_PALMTT5 XSCALE_PALMTT5 917
931xscale_palmtc MACH_OMAP_PALMTC OMAP_PALMTC 918 931xscale_palmtc MACH_OMAP_PALMTC OMAP_PALMTC 918
932omap_apollon MACH_OMAP_APOLLON OMAP_APOLLON 919 932omap_apollon MACH_OMAP_APOLLON OMAP_APOLLON 919
933mxc30030evb MACH_ARGONLVEVB ARGONLVEVB 920 933mxc30030evb MACH_MXC30030EVB MXC30030EVB 920
934rea_2d MACH_REA_2D REA_2D 921 934rea_2d MACH_REA_2D REA_2D 921
935eti3e524 MACH_TI3E524 TI3E524 922 935eti3e524 MACH_TI3E524 TI3E524 922
936ateb9200 MACH_ATEB9200 ATEB9200 923 936ateb9200 MACH_ATEB9200 ATEB9200 923
@@ -986,7 +986,7 @@ redfox MACH_REDFOX REDFOX 972
986mysh_ep9315_1 MACH_MYSH_EP9315_1 MYSH_EP9315_1 973 986mysh_ep9315_1 MACH_MYSH_EP9315_1 MYSH_EP9315_1 973
987tpf106 MACH_TPF106 TPF106 974 987tpf106 MACH_TPF106 TPF106 974
988at91rm9200kg MACH_AT91RM9200KG AT91RM9200KG 975 988at91rm9200kg MACH_AT91RM9200KG AT91RM9200KG 975
989racemt2 MACH_SLEDB SLEDB 976 989rcmt2 MACH_SLEDB SLEDB 976
990ontrack MACH_ONTRACK ONTRACK 977 990ontrack MACH_ONTRACK ONTRACK 977
991pm1200 MACH_PM1200 PM1200 978 991pm1200 MACH_PM1200 PM1200 978
992ess24562 MACH_ESS24XXX ESS24XXX 979 992ess24562 MACH_ESS24XXX ESS24XXX 979
@@ -1022,7 +1022,7 @@ smdk2440 MACH_SMDK2440 SMDK2440 1008
1022smdk2412 MACH_SMDK2412 SMDK2412 1009 1022smdk2412 MACH_SMDK2412 SMDK2412 1009
1023webbox MACH_WEBBOX WEBBOX 1010 1023webbox MACH_WEBBOX WEBBOX 1010
1024cwwndp MACH_CWWNDP CWWNDP 1011 1024cwwndp MACH_CWWNDP CWWNDP 1011
1025dragon MACH_DRAGON DRAGON 1012 1025i839 MACH_DRAGON DRAGON 1012
1026opendo_cpu_board MACH_OPENDO_CPU_BOARD OPENDO_CPU_BOARD 1013 1026opendo_cpu_board MACH_OPENDO_CPU_BOARD OPENDO_CPU_BOARD 1013
1027ccm2200 MACH_CCM2200 CCM2200 1014 1027ccm2200 MACH_CCM2200 CCM2200 1014
1028etwarm MACH_ETWARM ETWARM 1015 1028etwarm MACH_ETWARM ETWARM 1015
@@ -1040,3 +1040,56 @@ edg79524 MACH_EDG79524 EDG79524 1026
1040ai2410 MACH_AI2410 AI2410 1027 1040ai2410 MACH_AI2410 AI2410 1027
1041ixp465 MACH_IXP465 IXP465 1028 1041ixp465 MACH_IXP465 IXP465 1028
1042balloon3 MACH_BALLOON3 BALLOON3 1029 1042balloon3 MACH_BALLOON3 BALLOON3 1029
1043heins MACH_HEINS HEINS 1030
1044mpluseva MACH_MPLUSEVA MPLUSEVA 1031
1045rt042 MACH_RT042 RT042 1032
1046cwiem MACH_CWIEM CWIEM 1033
1047cm_x270 MACH_CM_X270 CM_X270 1034
1048cm_x255 MACH_CM_X255 CM_X255 1035
1049esh_at91 MACH_ESH_AT91 ESH_AT91 1036
1050sandgate3 MACH_SANDGATE3 SANDGATE3 1037
1051primo MACH_PRIMO PRIMO 1038
1052gemstone MACH_GEMSTONE GEMSTONE 1039
1053pronghorn_metro MACH_PRONGHORNMETRO PRONGHORNMETRO 1040
1054sidewinder MACH_SIDEWINDER SIDEWINDER 1041
1055picomod1 MACH_PICOMOD1 PICOMOD1 1042
1056sg590 MACH_SG590 SG590 1043
1057akai9307 MACH_AKAI9307 AKAI9307 1044
1058fontaine MACH_FONTAINE FONTAINE 1045
1059wombat MACH_WOMBAT WOMBAT 1046
1060acq300 MACH_ACQ300 ACQ300 1047
1061mod_270 MACH_MOD_270 MOD_270 1048
1062vmc_vc0820 MACH_VC0820 VC0820 1049
1063ani_aim MACH_ANI_AIM ANI_AIM 1050
1064jellyfish MACH_JELLYFISH JELLYFISH 1051
1065amanita MACH_AMANITA AMANITA 1052
1066vlink MACH_VLINK VLINK 1053
1067dexflex MACH_DEXFLEX DEXFLEX 1054
1068eigen_ttq MACH_EIGEN_TTQ EIGEN_TTQ 1055
1069arcom_titan MACH_ARCOM_TITAN ARCOM_TITAN 1056
1070tabla MACH_TABLA TABLA 1057
1071mdirac3 MACH_MDIRAC3 MDIRAC3 1058
1072mrhfbp2 MACH_MRHFBP2 MRHFBP2 1059
1073at91rm9200rb MACH_AT91RM9200RB AT91RM9200RB 1060
1074ani_apm MACH_ANI_APM ANI_APM 1061
1075ella1 MACH_ELLA1 ELLA1 1062
1076inhand_pxa27x MACH_INHAND_PXA27X INHAND_PXA27X 1063
1077inhand_pxa25x MACH_INHAND_PXA25X INHAND_PXA25X 1064
1078empos_xm MACH_EMPOS_XM EMPOS_XM 1065
1079empos MACH_EMPOS EMPOS 1066
1080empos_tiny MACH_EMPOS_TINY EMPOS_TINY 1067
1081empos_sm MACH_EMPOS_SM EMPOS_SM 1068
1082egret MACH_EGRET EGRET 1069
1083ostrich MACH_OSTRICH OSTRICH 1070
1084n50 MACH_N50 N50 1071
1085ecbat91 MACH_ECBAT91 ECBAT91 1072
1086stareast MACH_STAREAST STAREAST 1073
1087dspg_dw MACH_DSPG_DW DSPG_DW 1074
1088onearm MACH_ONEARM ONEARM 1075
1089mrg110_6 MACH_MRG110_6 MRG110_6 1076
1090wrt300nv2 MACH_WRT300NV2 WRT300NV2 1077
1091xm_bulverde MACH_XM_BULVERDE XM_BULVERDE 1078
1092msm6100 MACH_MSM6100 MSM6100 1079
1093eti_b1 MACH_ETI_B1 ETI_B1 1080
1094za9l_series MACH_ZILOG_ZA9L ZILOG_ZA9L 1081
1095bit2440 MACH_BIT2440 BIT2440 1082
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 1596101cfaf8..3bb221db164a 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -14,6 +14,10 @@ config X86_32
14 486, 586, Pentiums, and various instruction-set-compatible chips by 14 486, 586, Pentiums, and various instruction-set-compatible chips by
15 AMD, Cyrix, and others. 15 AMD, Cyrix, and others.
16 16
17config GENERIC_TIME
18 bool
19 default y
20
17config SEMAPHORE_SLEEPERS 21config SEMAPHORE_SLEEPERS
18 bool 22 bool
19 default y 23 default y
@@ -229,7 +233,7 @@ config NR_CPUS
229 233
230config SCHED_SMT 234config SCHED_SMT
231 bool "SMT (Hyperthreading) scheduler support" 235 bool "SMT (Hyperthreading) scheduler support"
232 depends on SMP 236 depends on X86_HT
233 help 237 help
234 SMT scheduler support improves the CPU scheduler's decision making 238 SMT scheduler support improves the CPU scheduler's decision making
235 when dealing with Intel Pentium 4 chips with HyperThreading at a 239 when dealing with Intel Pentium 4 chips with HyperThreading at a
@@ -238,7 +242,7 @@ config SCHED_SMT
238 242
239config SCHED_MC 243config SCHED_MC
240 bool "Multi-core scheduler support" 244 bool "Multi-core scheduler support"
241 depends on SMP 245 depends on X86_HT
242 default y 246 default y
243 help 247 help
244 Multi-core scheduler support improves the CPU scheduler's decision 248 Multi-core scheduler support improves the CPU scheduler's decision
@@ -324,6 +328,15 @@ config X86_MCE_P4THERMAL
324 Enabling this feature will cause a message to be printed when the P4 328 Enabling this feature will cause a message to be printed when the P4
325 enters thermal throttling. 329 enters thermal throttling.
326 330
331config VM86
332 default y
333 bool "Enable VM86 support" if EMBEDDED
334 help
335 This option is required by programs like DOSEMU to run 16-bit legacy
336 code on X86 processors. It also may be needed by software like
337 XFree86 to initialize some video cards via BIOS. Disabling this
338 option saves about 6k.
339
327config TOSHIBA 340config TOSHIBA
328 tristate "Toshiba Laptop support" 341 tristate "Toshiba Laptop support"
329 ---help--- 342 ---help---
@@ -721,7 +734,7 @@ config KEXEC
721 help 734 help
722 kexec is a system call that implements the ability to shutdown your 735 kexec is a system call that implements the ability to shutdown your
723 current kernel, and to start another kernel. It is like a reboot 736 current kernel, and to start another kernel. It is like a reboot
724 but it is indepedent of the system firmware. And like a reboot 737 but it is independent of the system firmware. And like a reboot
725 you can start any kernel with it, not just Linux. 738 you can start any kernel with it, not just Linux.
726 739
727 The name comes from the similiarity to the exec system call. 740 The name comes from the similiarity to the exec system call.
@@ -767,6 +780,17 @@ config HOTPLUG_CPU
767 enable suspend on SMP systems. CPUs can be controlled through 780 enable suspend on SMP systems. CPUs can be controlled through
768 /sys/devices/system/cpu. 781 /sys/devices/system/cpu.
769 782
783config COMPAT_VDSO
784 bool "Compat VDSO support"
785 default y
786 help
787 Map the VDSO to the predictable old-style address too.
788 ---help---
789 Say N here if you are running a sufficiently recent glibc
790 version (2.3.3 or later), to remove the high-mapped
791 VDSO mapping and to exclusively use the randomized VDSO.
792
793 If unsure, say Y.
770 794
771endmenu 795endmenu
772 796
@@ -1046,13 +1070,27 @@ config SCx200
1046 tristate "NatSemi SCx200 support" 1070 tristate "NatSemi SCx200 support"
1047 depends on !X86_VOYAGER 1071 depends on !X86_VOYAGER
1048 help 1072 help
1049 This provides basic support for the National Semiconductor SCx200 1073 This provides basic support for National Semiconductor's
1050 processor. Right now this is just a driver for the GPIO pins. 1074 (now AMD's) Geode processors. The driver probes for the
1075 PCI-IDs of several on-chip devices, so its a good dependency
1076 for other scx200_* drivers.
1051 1077
1052 If you don't know what to do here, say N. 1078 If compiled as a module, the driver is named scx200.
1053 1079
1054 This support is also available as a module. If compiled as a 1080config SCx200HR_TIMER
1055 module, it will be called scx200. 1081 tristate "NatSemi SCx200 27MHz High-Resolution Timer Support"
1082 depends on SCx200 && GENERIC_TIME
1083 default y
1084 help
1085 This driver provides a clocksource built upon the on-chip
1086 27MHz high-resolution timer. Its also a workaround for
1087 NSC Geode SC-1100's buggy TSC, which loses time when the
1088 processor goes idle (as is done by the scheduler). The
1089 other workaround is idle=poll boot option.
1090
1091config K8_NB
1092 def_bool y
1093 depends on AGP_AMD64
1056 1094
1057source "drivers/pcmcia/Kconfig" 1095source "drivers/pcmcia/Kconfig"
1058 1096
diff --git a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu
index eb130482ba18..21c9a4e71104 100644
--- a/arch/i386/Kconfig.cpu
+++ b/arch/i386/Kconfig.cpu
@@ -41,7 +41,7 @@ config M386
41 - "GeodeGX1" for Geode GX1 (Cyrix MediaGX). 41 - "GeodeGX1" for Geode GX1 (Cyrix MediaGX).
42 - "Geode GX/LX" For AMD Geode GX and LX processors. 42 - "Geode GX/LX" For AMD Geode GX and LX processors.
43 - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3. 43 - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
44 - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above). 44 - "VIA C3-2" for VIA C3-2 "Nehemiah" (model 9 and above).
45 45
46 If you don't know what to do, choose "386". 46 If you don't know what to do, choose "386".
47 47
diff --git a/arch/i386/boot/Makefile b/arch/i386/boot/Makefile
index 33e55476381b..e97946626064 100644
--- a/arch/i386/boot/Makefile
+++ b/arch/i386/boot/Makefile
@@ -109,8 +109,13 @@ fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf
109isoimage: $(BOOTIMAGE) 109isoimage: $(BOOTIMAGE)
110 -rm -rf $(obj)/isoimage 110 -rm -rf $(obj)/isoimage
111 mkdir $(obj)/isoimage 111 mkdir $(obj)/isoimage
112 cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \ 112 for i in lib lib64 share end ; do \
113 $(obj)/isoimage 113 if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
114 cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
115 break ; \
116 fi ; \
117 if [ $$i = end ] ; then exit 1 ; fi ; \
118 done
114 cp $(BOOTIMAGE) $(obj)/isoimage/linux 119 cp $(BOOTIMAGE) $(obj)/isoimage/linux
115 echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg 120 echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg
116 if [ -f '$(FDINITRD)' ] ; then \ 121 if [ -f '$(FDINITRD)' ] ; then \
diff --git a/arch/i386/boot/compressed/misc.c b/arch/i386/boot/compressed/misc.c
index f19f3a7492a5..b2ccd543410d 100644
--- a/arch/i386/boot/compressed/misc.c
+++ b/arch/i386/boot/compressed/misc.c
@@ -24,14 +24,6 @@
24 24
25#undef memset 25#undef memset
26#undef memcpy 26#undef memcpy
27
28/*
29 * Why do we do this? Don't ask me..
30 *
31 * Incomprehensible are the ways of bootloaders.
32 */
33static void* memset(void *, int, size_t);
34static void* memcpy(void *, __const void *, size_t);
35#define memzero(s, n) memset ((s), 0, (n)) 27#define memzero(s, n) memset ((s), 0, (n))
36 28
37typedef unsigned char uch; 29typedef unsigned char uch;
@@ -93,7 +85,7 @@ static unsigned char *real_mode; /* Pointer to real-mode data */
93#endif 85#endif
94#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0)) 86#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0))
95 87
96extern char input_data[]; 88extern unsigned char input_data[];
97extern int input_len; 89extern int input_len;
98 90
99static long bytes_out = 0; 91static long bytes_out = 0;
@@ -103,6 +95,9 @@ static unsigned long output_ptr = 0;
103static void *malloc(int size); 95static void *malloc(int size);
104static void free(void *where); 96static void free(void *where);
105 97
98static void *memset(void *s, int c, unsigned n);
99static void *memcpy(void *dest, const void *src, unsigned n);
100
106static void putstr(const char *); 101static void putstr(const char *);
107 102
108extern int end; 103extern int end;
@@ -205,7 +200,7 @@ static void putstr(const char *s)
205 outb_p(0xff & (pos >> 1), vidport+1); 200 outb_p(0xff & (pos >> 1), vidport+1);
206} 201}
207 202
208static void* memset(void* s, int c, size_t n) 203static void* memset(void* s, int c, unsigned n)
209{ 204{
210 int i; 205 int i;
211 char *ss = (char*)s; 206 char *ss = (char*)s;
@@ -214,14 +209,13 @@ static void* memset(void* s, int c, size_t n)
214 return s; 209 return s;
215} 210}
216 211
217static void* memcpy(void* __dest, __const void* __src, 212static void* memcpy(void* dest, const void* src, unsigned n)
218 size_t __n)
219{ 213{
220 int i; 214 int i;
221 char *d = (char *)__dest, *s = (char *)__src; 215 char *d = (char *)dest, *s = (char *)src;
222 216
223 for (i=0;i<__n;i++) d[i] = s[i]; 217 for (i=0;i<n;i++) d[i] = s[i];
224 return __dest; 218 return dest;
225} 219}
226 220
227/* =========================================================================== 221/* ===========================================================================
@@ -309,7 +303,7 @@ static void setup_normal_output_buffer(void)
309#else 303#else
310 if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory"); 304 if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory");
311#endif 305#endif
312 output_data = (char *)__PHYSICAL_START; /* Normally Points to 1M */ 306 output_data = (unsigned char *)__PHYSICAL_START; /* Normally Points to 1M */
313 free_mem_end_ptr = (long)real_mode; 307 free_mem_end_ptr = (long)real_mode;
314} 308}
315 309
@@ -324,11 +318,9 @@ static void setup_output_buffer_if_we_run_high(struct moveparams *mv)
324#ifdef STANDARD_MEMORY_BIOS_CALL 318#ifdef STANDARD_MEMORY_BIOS_CALL
325 if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory"); 319 if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory");
326#else 320#else
327 if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 321 if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory");
328 (3*1024))
329 error("Less than 4MB of memory");
330#endif 322#endif
331 mv->low_buffer_start = output_data = (char *)LOW_BUFFER_START; 323 mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START;
332 low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX 324 low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX
333 ? LOW_BUFFER_MAX : (unsigned int)real_mode) & ~0xfff; 325 ? LOW_BUFFER_MAX : (unsigned int)real_mode) & ~0xfff;
334 low_buffer_size = low_buffer_end - LOW_BUFFER_START; 326 low_buffer_size = low_buffer_end - LOW_BUFFER_START;
diff --git a/arch/i386/boot/video.S b/arch/i386/boot/video.S
index c9343c3a8082..8c2a6faeeae5 100644
--- a/arch/i386/boot/video.S
+++ b/arch/i386/boot/video.S
@@ -1929,7 +1929,7 @@ skip10: movb %ah, %al
1929 ret 1929 ret
1930 1930
1931store_edid: 1931store_edid:
1932#ifdef CONFIG_FB_FIRMWARE_EDID 1932#ifdef CONFIG_FIRMWARE_EDID
1933 pushw %es # just save all registers 1933 pushw %es # just save all registers
1934 pushw %ax 1934 pushw %ax
1935 pushw %bx 1935 pushw %bx
@@ -1947,6 +1947,22 @@ store_edid:
1947 rep 1947 rep
1948 stosl 1948 stosl
1949 1949
1950 pushw %es # save ES
1951 xorw %di, %di # Report Capability
1952 pushw %di
1953 popw %es # ES:DI must be 0:0
1954 movw $0x4f15, %ax
1955 xorw %bx, %bx
1956 xorw %cx, %cx
1957 int $0x10
1958 popw %es # restore ES
1959
1960 cmpb $0x00, %ah # call successful
1961 jne no_edid
1962
1963 cmpb $0x4f, %al # function supported
1964 jne no_edid
1965
1950 movw $0x4f15, %ax # do VBE/DDC 1966 movw $0x4f15, %ax # do VBE/DDC
1951 movw $0x01, %bx 1967 movw $0x01, %bx
1952 movw $0x00, %cx 1968 movw $0x00, %cx
@@ -1954,6 +1970,7 @@ store_edid:
1954 movw $0x140, %di 1970 movw $0x140, %di
1955 int $0x10 1971 int $0x10
1956 1972
1973no_edid:
1957 popw %di # restore all registers 1974 popw %di # restore all registers
1958 popw %dx 1975 popw %dx
1959 popw %cx 1976 popw %cx
diff --git a/arch/i386/crypto/aes-i586-asm.S b/arch/i386/crypto/aes-i586-asm.S
index 911b15377f2e..f942f0c8f630 100644
--- a/arch/i386/crypto/aes-i586-asm.S
+++ b/arch/i386/crypto/aes-i586-asm.S
@@ -36,22 +36,19 @@
36.file "aes-i586-asm.S" 36.file "aes-i586-asm.S"
37.text 37.text
38 38
39// aes_rval aes_enc_blk(const unsigned char in_blk[], unsigned char out_blk[], const aes_ctx cx[1])// 39#include <asm/asm-offsets.h>
40// aes_rval aes_dec_blk(const unsigned char in_blk[], unsigned char out_blk[], const aes_ctx cx[1])//
41
42#define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words)
43 40
44// offsets to parameters with one register pushed onto stack 41#define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words)
45
46#define in_blk 8 // input byte array address parameter
47#define out_blk 12 // output byte array address parameter
48#define ctx 16 // AES context structure
49 42
50// offsets in context structure 43/* offsets to parameters with one register pushed onto stack */
44#define tfm 8
45#define out_blk 12
46#define in_blk 16
51 47
52#define ekey 0 // encryption key schedule base address 48/* offsets in crypto_tfm structure */
53#define nrnd 256 // number of rounds 49#define ekey (crypto_tfm_ctx_offset + 0)
54#define dkey 260 // decryption key schedule base address 50#define nrnd (crypto_tfm_ctx_offset + 256)
51#define dkey (crypto_tfm_ctx_offset + 260)
55 52
56// register mapping for encrypt and decrypt subroutines 53// register mapping for encrypt and decrypt subroutines
57 54
@@ -220,6 +217,7 @@
220 do_col (table, r5,r0,r1,r4, r2,r3); /* idx=r5 */ 217 do_col (table, r5,r0,r1,r4, r2,r3); /* idx=r5 */
221 218
222// AES (Rijndael) Encryption Subroutine 219// AES (Rijndael) Encryption Subroutine
220/* void aes_enc_blk(struct crypto_tfm *tfm, u8 *out_blk, const u8 *in_blk) */
223 221
224.global aes_enc_blk 222.global aes_enc_blk
225 223
@@ -230,7 +228,7 @@
230 228
231aes_enc_blk: 229aes_enc_blk:
232 push %ebp 230 push %ebp
233 mov ctx(%esp),%ebp // pointer to context 231 mov tfm(%esp),%ebp
234 232
235// CAUTION: the order and the values used in these assigns 233// CAUTION: the order and the values used in these assigns
236// rely on the register mappings 234// rely on the register mappings
@@ -295,6 +293,7 @@ aes_enc_blk:
295 ret 293 ret
296 294
297// AES (Rijndael) Decryption Subroutine 295// AES (Rijndael) Decryption Subroutine
296/* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out_blk, const u8 *in_blk) */
298 297
299.global aes_dec_blk 298.global aes_dec_blk
300 299
@@ -305,7 +304,7 @@ aes_enc_blk:
305 304
306aes_dec_blk: 305aes_dec_blk:
307 push %ebp 306 push %ebp
308 mov ctx(%esp),%ebp // pointer to context 307 mov tfm(%esp),%ebp
309 308
310// CAUTION: the order and the values used in these assigns 309// CAUTION: the order and the values used in these assigns
311// rely on the register mappings 310// rely on the register mappings
diff --git a/arch/i386/crypto/aes.c b/arch/i386/crypto/aes.c
index a50397b1d5c7..d3806daa3de3 100644
--- a/arch/i386/crypto/aes.c
+++ b/arch/i386/crypto/aes.c
@@ -45,8 +45,8 @@
45#include <linux/crypto.h> 45#include <linux/crypto.h>
46#include <linux/linkage.h> 46#include <linux/linkage.h>
47 47
48asmlinkage void aes_enc_blk(const u8 *src, u8 *dst, void *ctx); 48asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
49asmlinkage void aes_dec_blk(const u8 *src, u8 *dst, void *ctx); 49asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
50 50
51#define AES_MIN_KEY_SIZE 16 51#define AES_MIN_KEY_SIZE 16
52#define AES_MAX_KEY_SIZE 32 52#define AES_MAX_KEY_SIZE 32
@@ -378,12 +378,12 @@ static void gen_tabs(void)
378 k[8*(i)+11] = ss[3]; \ 378 k[8*(i)+11] = ss[3]; \
379} 379}
380 380
381static int 381static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
382aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) 382 unsigned int key_len, u32 *flags)
383{ 383{
384 int i; 384 int i;
385 u32 ss[8]; 385 u32 ss[8];
386 struct aes_ctx *ctx = ctx_arg; 386 struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
387 const __le32 *key = (const __le32 *)in_key; 387 const __le32 *key = (const __le32 *)in_key;
388 388
389 /* encryption schedule */ 389 /* encryption schedule */
@@ -464,16 +464,16 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
464 return 0; 464 return 0;
465} 465}
466 466
467static inline void aes_encrypt(void *ctx, u8 *dst, const u8 *src) 467static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
468{ 468{
469 aes_enc_blk(src, dst, ctx); 469 aes_enc_blk(tfm, dst, src);
470} 470}
471static inline void aes_decrypt(void *ctx, u8 *dst, const u8 *src) 471
472static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
472{ 473{
473 aes_dec_blk(src, dst, ctx); 474 aes_dec_blk(tfm, dst, src);
474} 475}
475 476
476
477static struct crypto_alg aes_alg = { 477static struct crypto_alg aes_alg = {
478 .cra_name = "aes", 478 .cra_name = "aes",
479 .cra_driver_name = "aes-i586", 479 .cra_driver_name = "aes-i586",
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 96fb8a020af2..5e70c2fb273a 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -7,10 +7,9 @@ extra-y := head.o init_task.o vmlinux.lds
7obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ 7obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ 8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
9 pci-dma.o i386_ksyms.o i387.o bootflag.o \ 9 pci-dma.o i386_ksyms.o i387.o bootflag.o \
10 quirks.o i8237.o topology.o alternative.o 10 quirks.o i8237.o topology.o alternative.o i8253.o tsc.o
11 11
12obj-y += cpu/ 12obj-y += cpu/
13obj-y += timers/
14obj-y += acpi/ 13obj-y += acpi/
15obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o 14obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o
16obj-$(CONFIG_MCA) += mca.o 15obj-$(CONFIG_MCA) += mca.o
@@ -37,6 +36,8 @@ obj-$(CONFIG_EFI) += efi.o efi_stub.o
37obj-$(CONFIG_DOUBLEFAULT) += doublefault.o 36obj-$(CONFIG_DOUBLEFAULT) += doublefault.o
38obj-$(CONFIG_VM86) += vm86.o 37obj-$(CONFIG_VM86) += vm86.o
39obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 38obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
39obj-$(CONFIG_HPET_TIMER) += hpet.o
40obj-$(CONFIG_K8_NB) += k8.o
40 41
41EXTRA_AFLAGS := -traditional 42EXTRA_AFLAGS := -traditional
42 43
@@ -76,3 +77,6 @@ SYSCFLAGS_vsyscall-syms.o = -r
76$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \ 77$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
77 $(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE 78 $(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
78 $(call if_changed,syscall) 79 $(call if_changed,syscall)
80
81k8-y += ../../x86_64/kernel/k8.o
82
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index 5cbd6f99fb2a..50eb0e03777e 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -4,27 +4,41 @@
4#include <asm/alternative.h> 4#include <asm/alternative.h>
5#include <asm/sections.h> 5#include <asm/sections.h>
6 6
7#define DEBUG 0 7static int no_replacement = 0;
8#if DEBUG 8static int smp_alt_once = 0;
9# define DPRINTK(fmt, args...) printk(fmt, args) 9static int debug_alternative = 0;
10#else 10
11# define DPRINTK(fmt, args...) 11static int __init noreplacement_setup(char *s)
12#endif 12{
13 no_replacement = 1;
14 return 1;
15}
16static int __init bootonly(char *str)
17{
18 smp_alt_once = 1;
19 return 1;
20}
21static int __init debug_alt(char *str)
22{
23 debug_alternative = 1;
24 return 1;
25}
13 26
27__setup("noreplacement", noreplacement_setup);
28__setup("smp-alt-boot", bootonly);
29__setup("debug-alternative", debug_alt);
30
31#define DPRINTK(fmt, args...) if (debug_alternative) \
32 printk(KERN_DEBUG fmt, args)
33
34#ifdef GENERIC_NOP1
14/* Use inline assembly to define this because the nops are defined 35/* Use inline assembly to define this because the nops are defined
15 as inline assembly strings in the include files and we cannot 36 as inline assembly strings in the include files and we cannot
16 get them easily into strings. */ 37 get them easily into strings. */
17asm("\t.data\nintelnops: " 38asm("\t.data\nintelnops: "
18 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 39 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
19 GENERIC_NOP7 GENERIC_NOP8); 40 GENERIC_NOP7 GENERIC_NOP8);
20asm("\t.data\nk8nops: " 41extern unsigned char intelnops[];
21 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
22 K8_NOP7 K8_NOP8);
23asm("\t.data\nk7nops: "
24 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
25 K7_NOP7 K7_NOP8);
26
27extern unsigned char intelnops[], k8nops[], k7nops[];
28static unsigned char *intel_nops[ASM_NOP_MAX+1] = { 42static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
29 NULL, 43 NULL,
30 intelnops, 44 intelnops,
@@ -36,6 +50,13 @@ static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
36 intelnops + 1 + 2 + 3 + 4 + 5 + 6, 50 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
37 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 51 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
38}; 52};
53#endif
54
55#ifdef K8_NOP1
56asm("\t.data\nk8nops: "
57 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
58 K8_NOP7 K8_NOP8);
59extern unsigned char k8nops[];
39static unsigned char *k8_nops[ASM_NOP_MAX+1] = { 60static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
40 NULL, 61 NULL,
41 k8nops, 62 k8nops,
@@ -47,6 +68,13 @@ static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
47 k8nops + 1 + 2 + 3 + 4 + 5 + 6, 68 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
48 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 69 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
49}; 70};
71#endif
72
73#ifdef K7_NOP1
74asm("\t.data\nk7nops: "
75 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
76 K7_NOP7 K7_NOP8);
77extern unsigned char k7nops[];
50static unsigned char *k7_nops[ASM_NOP_MAX+1] = { 78static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
51 NULL, 79 NULL,
52 k7nops, 80 k7nops,
@@ -58,6 +86,18 @@ static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
58 k7nops + 1 + 2 + 3 + 4 + 5 + 6, 86 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
59 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 87 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
60}; 88};
89#endif
90
91#ifdef CONFIG_X86_64
92
93extern char __vsyscall_0;
94static inline unsigned char** find_nop_table(void)
95{
96 return k8_nops;
97}
98
99#else /* CONFIG_X86_64 */
100
61static struct nop { 101static struct nop {
62 int cpuid; 102 int cpuid;
63 unsigned char **noptable; 103 unsigned char **noptable;
@@ -67,14 +107,6 @@ static struct nop {
67 { -1, NULL } 107 { -1, NULL }
68}; 108};
69 109
70
71extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
72extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
73extern u8 *__smp_locks[], *__smp_locks_end[];
74
75extern u8 __smp_alt_begin[], __smp_alt_end[];
76
77
78static unsigned char** find_nop_table(void) 110static unsigned char** find_nop_table(void)
79{ 111{
80 unsigned char **noptable = intel_nops; 112 unsigned char **noptable = intel_nops;
@@ -89,6 +121,14 @@ static unsigned char** find_nop_table(void)
89 return noptable; 121 return noptable;
90} 122}
91 123
124#endif /* CONFIG_X86_64 */
125
126extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
127extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
128extern u8 *__smp_locks[], *__smp_locks_end[];
129
130extern u8 __smp_alt_begin[], __smp_alt_end[];
131
92/* Replace instructions with better alternatives for this CPU type. 132/* Replace instructions with better alternatives for this CPU type.
93 This runs before SMP is initialized to avoid SMP problems with 133 This runs before SMP is initialized to avoid SMP problems with
94 self modifying code. This implies that assymetric systems where 134 self modifying code. This implies that assymetric systems where
@@ -99,6 +139,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
99{ 139{
100 unsigned char **noptable = find_nop_table(); 140 unsigned char **noptable = find_nop_table();
101 struct alt_instr *a; 141 struct alt_instr *a;
142 u8 *instr;
102 int diff, i, k; 143 int diff, i, k;
103 144
104 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end); 145 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
@@ -106,7 +147,16 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
106 BUG_ON(a->replacementlen > a->instrlen); 147 BUG_ON(a->replacementlen > a->instrlen);
107 if (!boot_cpu_has(a->cpuid)) 148 if (!boot_cpu_has(a->cpuid))
108 continue; 149 continue;
109 memcpy(a->instr, a->replacement, a->replacementlen); 150 instr = a->instr;
151#ifdef CONFIG_X86_64
152 /* vsyscall code is not mapped yet. resolve it manually. */
153 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
154 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
155 DPRINTK("%s: vsyscall fixup: %p => %p\n",
156 __FUNCTION__, a->instr, instr);
157 }
158#endif
159 memcpy(instr, a->replacement, a->replacementlen);
110 diff = a->instrlen - a->replacementlen; 160 diff = a->instrlen - a->replacementlen;
111 /* Pad the rest with nops */ 161 /* Pad the rest with nops */
112 for (i = a->replacementlen; diff > 0; diff -= k, i += k) { 162 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
@@ -186,14 +236,6 @@ struct smp_alt_module {
186static LIST_HEAD(smp_alt_modules); 236static LIST_HEAD(smp_alt_modules);
187static DEFINE_SPINLOCK(smp_alt); 237static DEFINE_SPINLOCK(smp_alt);
188 238
189static int smp_alt_once = 0;
190static int __init bootonly(char *str)
191{
192 smp_alt_once = 1;
193 return 1;
194}
195__setup("smp-alt-boot", bootonly);
196
197void alternatives_smp_module_add(struct module *mod, char *name, 239void alternatives_smp_module_add(struct module *mod, char *name,
198 void *locks, void *locks_end, 240 void *locks, void *locks_end,
199 void *text, void *text_end) 241 void *text, void *text_end)
@@ -201,6 +243,9 @@ void alternatives_smp_module_add(struct module *mod, char *name,
201 struct smp_alt_module *smp; 243 struct smp_alt_module *smp;
202 unsigned long flags; 244 unsigned long flags;
203 245
246 if (no_replacement)
247 return;
248
204 if (smp_alt_once) { 249 if (smp_alt_once) {
205 if (boot_cpu_has(X86_FEATURE_UP)) 250 if (boot_cpu_has(X86_FEATURE_UP))
206 alternatives_smp_unlock(locks, locks_end, 251 alternatives_smp_unlock(locks, locks_end,
@@ -235,7 +280,7 @@ void alternatives_smp_module_del(struct module *mod)
235 struct smp_alt_module *item; 280 struct smp_alt_module *item;
236 unsigned long flags; 281 unsigned long flags;
237 282
238 if (smp_alt_once) 283 if (no_replacement || smp_alt_once)
239 return; 284 return;
240 285
241 spin_lock_irqsave(&smp_alt, flags); 286 spin_lock_irqsave(&smp_alt, flags);
@@ -256,7 +301,7 @@ void alternatives_smp_switch(int smp)
256 struct smp_alt_module *mod; 301 struct smp_alt_module *mod;
257 unsigned long flags; 302 unsigned long flags;
258 303
259 if (smp_alt_once) 304 if (no_replacement || smp_alt_once)
260 return; 305 return;
261 BUG_ON(!smp && (num_online_cpus() > 1)); 306 BUG_ON(!smp && (num_online_cpus() > 1));
262 307
@@ -285,6 +330,13 @@ void alternatives_smp_switch(int smp)
285 330
286void __init alternative_instructions(void) 331void __init alternative_instructions(void)
287{ 332{
333 if (no_replacement) {
334 printk(KERN_INFO "(SMP-)alternatives turned off\n");
335 free_init_pages("SMP alternatives",
336 (unsigned long)__smp_alt_begin,
337 (unsigned long)__smp_alt_end);
338 return;
339 }
288 apply_alternatives(__alt_instructions, __alt_instructions_end); 340 apply_alternatives(__alt_instructions, __alt_instructions_end);
289 341
290 /* switch to patch-once-at-boottime-only mode and free the 342 /* switch to patch-once-at-boottime-only mode and free the
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 5ab59c12335b..7ce09492fc0c 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -36,6 +36,7 @@
36#include <asm/arch_hooks.h> 36#include <asm/arch_hooks.h>
37#include <asm/hpet.h> 37#include <asm/hpet.h>
38#include <asm/i8253.h> 38#include <asm/i8253.h>
39#include <asm/nmi.h>
39 40
40#include <mach_apic.h> 41#include <mach_apic.h>
41#include <mach_apicdef.h> 42#include <mach_apicdef.h>
@@ -156,7 +157,7 @@ void clear_local_APIC(void)
156 maxlvt = get_maxlvt(); 157 maxlvt = get_maxlvt();
157 158
158 /* 159 /*
159 * Masking an LVT entry on a P6 can trigger a local APIC error 160 * Masking an LVT entry can trigger a local APIC error
160 * if the vector is zero. Mask LVTERR first to prevent this. 161 * if the vector is zero. Mask LVTERR first to prevent this.
161 */ 162 */
162 if (maxlvt >= 3) { 163 if (maxlvt >= 3) {
@@ -1117,7 +1118,18 @@ void disable_APIC_timer(void)
1117 unsigned long v; 1118 unsigned long v;
1118 1119
1119 v = apic_read(APIC_LVTT); 1120 v = apic_read(APIC_LVTT);
1120 apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED); 1121 /*
1122 * When an illegal vector value (0-15) is written to an LVT
1123 * entry and delivery mode is Fixed, the APIC may signal an
1124 * illegal vector error, with out regard to whether the mask
1125 * bit is set or whether an interrupt is actually seen on input.
1126 *
1127 * Boot sequence might call this function when the LVTT has
1128 * '0' vector value. So make sure vector field is set to
1129 * valid value.
1130 */
1131 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1132 apic_write_around(APIC_LVTT, v);
1121 } 1133 }
1122} 1134}
1123 1135
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index 9e819eb68229..7c5729d1fd06 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -764,9 +764,9 @@ static int apm_do_idle(void)
764 int idled = 0; 764 int idled = 0;
765 int polling; 765 int polling;
766 766
767 polling = test_thread_flag(TIF_POLLING_NRFLAG); 767 polling = !!(current_thread_info()->status & TS_POLLING);
768 if (polling) { 768 if (polling) {
769 clear_thread_flag(TIF_POLLING_NRFLAG); 769 current_thread_info()->status &= ~TS_POLLING;
770 smp_mb__after_clear_bit(); 770 smp_mb__after_clear_bit();
771 } 771 }
772 if (!need_resched()) { 772 if (!need_resched()) {
@@ -774,7 +774,7 @@ static int apm_do_idle(void)
774 ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax); 774 ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax);
775 } 775 }
776 if (polling) 776 if (polling)
777 set_thread_flag(TIF_POLLING_NRFLAG); 777 current_thread_info()->status |= TS_POLLING;
778 778
779 if (!idled) 779 if (!idled)
780 return 0; 780 return 0;
diff --git a/arch/i386/kernel/asm-offsets.c b/arch/i386/kernel/asm-offsets.c
index 36d66e2077d0..c80271f8f084 100644
--- a/arch/i386/kernel/asm-offsets.c
+++ b/arch/i386/kernel/asm-offsets.c
@@ -4,6 +4,7 @@
4 * to extract and format the required data. 4 * to extract and format the required data.
5 */ 5 */
6 6
7#include <linux/crypto.h>
7#include <linux/sched.h> 8#include <linux/sched.h>
8#include <linux/signal.h> 9#include <linux/signal.h>
9#include <linux/personality.h> 10#include <linux/personality.h>
@@ -13,6 +14,7 @@
13#include <asm/fixmap.h> 14#include <asm/fixmap.h>
14#include <asm/processor.h> 15#include <asm/processor.h>
15#include <asm/thread_info.h> 16#include <asm/thread_info.h>
17#include <asm/elf.h>
16 18
17#define DEFINE(sym, val) \ 19#define DEFINE(sym, val) \
18 asm volatile("\n->" #sym " %0 " #val : : "i" (val)) 20 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
@@ -53,6 +55,7 @@ void foo(void)
53 OFFSET(TI_preempt_count, thread_info, preempt_count); 55 OFFSET(TI_preempt_count, thread_info, preempt_count);
54 OFFSET(TI_addr_limit, thread_info, addr_limit); 56 OFFSET(TI_addr_limit, thread_info, addr_limit);
55 OFFSET(TI_restart_block, thread_info, restart_block); 57 OFFSET(TI_restart_block, thread_info, restart_block);
58 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
56 BLANK(); 59 BLANK();
57 60
58 OFFSET(EXEC_DOMAIN_handler, exec_domain, handler); 61 OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
@@ -68,5 +71,7 @@ void foo(void)
68 sizeof(struct tss_struct)); 71 sizeof(struct tss_struct));
69 72
70 DEFINE(PAGE_SIZE_asm, PAGE_SIZE); 73 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
71 DEFINE(VSYSCALL_BASE, __fix_to_virt(FIX_VSYSCALL)); 74 DEFINE(VDSO_PRELINK, VDSO_PRELINK);
75
76 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
72} 77}
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index 786d1a57048b..e6a2d6b80cda 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -224,22 +224,26 @@ static void __init init_amd(struct cpuinfo_x86 *c)
224 224
225#ifdef CONFIG_X86_HT 225#ifdef CONFIG_X86_HT
226 /* 226 /*
227 * On a AMD dual core setup the lower bits of the APIC id 227 * On a AMD multi core setup the lower bits of the APIC id
228 * distingush the cores. Assumes number of cores is a power 228 * distingush the cores.
229 * of two.
230 */ 229 */
231 if (c->x86_max_cores > 1) { 230 if (c->x86_max_cores > 1) {
232 int cpu = smp_processor_id(); 231 int cpu = smp_processor_id();
233 unsigned bits = 0; 232 unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf;
234 while ((1 << bits) < c->x86_max_cores) 233
235 bits++; 234 if (bits == 0) {
236 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1); 235 while ((1 << bits) < c->x86_max_cores)
237 phys_proc_id[cpu] >>= bits; 236 bits++;
237 }
238 c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1);
239 c->phys_proc_id >>= bits;
238 printk(KERN_INFO "CPU %d(%d) -> Core %d\n", 240 printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
239 cpu, c->x86_max_cores, cpu_core_id[cpu]); 241 cpu, c->x86_max_cores, c->cpu_core_id);
240 } 242 }
241#endif 243#endif
242 244
245 if (cpuid_eax(0x80000000) >= 0x80000006)
246 num_cache_leaves = 3;
243} 247}
244 248
245static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) 249static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 44f2c5f2dda1..70c87de582c7 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -294,7 +294,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c)
294 if (c->x86 >= 0x6) 294 if (c->x86 >= 0x6)
295 c->x86_model += ((tfms >> 16) & 0xF) << 4; 295 c->x86_model += ((tfms >> 16) & 0xF) << 4;
296 c->x86_mask = tfms & 15; 296 c->x86_mask = tfms & 15;
297#ifdef CONFIG_SMP 297#ifdef CONFIG_X86_HT
298 c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); 298 c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
299#else 299#else
300 c->apicid = (ebx >> 24) & 0xFF; 300 c->apicid = (ebx >> 24) & 0xFF;
@@ -319,7 +319,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c)
319 early_intel_workaround(c); 319 early_intel_workaround(c);
320 320
321#ifdef CONFIG_X86_HT 321#ifdef CONFIG_X86_HT
322 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff; 322 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
323#endif 323#endif
324} 324}
325 325
@@ -477,11 +477,9 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
477{ 477{
478 u32 eax, ebx, ecx, edx; 478 u32 eax, ebx, ecx, edx;
479 int index_msb, core_bits; 479 int index_msb, core_bits;
480 int cpu = smp_processor_id();
481 480
482 cpuid(1, &eax, &ebx, &ecx, &edx); 481 cpuid(1, &eax, &ebx, &ecx, &edx);
483 482
484
485 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) 483 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
486 return; 484 return;
487 485
@@ -492,16 +490,17 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
492 } else if (smp_num_siblings > 1 ) { 490 } else if (smp_num_siblings > 1 ) {
493 491
494 if (smp_num_siblings > NR_CPUS) { 492 if (smp_num_siblings > NR_CPUS) {
495 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); 493 printk(KERN_WARNING "CPU: Unsupported number of the "
494 "siblings %d", smp_num_siblings);
496 smp_num_siblings = 1; 495 smp_num_siblings = 1;
497 return; 496 return;
498 } 497 }
499 498
500 index_msb = get_count_order(smp_num_siblings); 499 index_msb = get_count_order(smp_num_siblings);
501 phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); 500 c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
502 501
503 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 502 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
504 phys_proc_id[cpu]); 503 c->phys_proc_id);
505 504
506 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 505 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
507 506
@@ -509,12 +508,12 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
509 508
510 core_bits = get_count_order(c->x86_max_cores); 509 core_bits = get_count_order(c->x86_max_cores);
511 510
512 cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & 511 c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
513 ((1 << core_bits) - 1); 512 ((1 << core_bits) - 1);
514 513
515 if (c->x86_max_cores > 1) 514 if (c->x86_max_cores > 1)
516 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 515 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
517 cpu_core_id[cpu]); 516 c->cpu_core_id);
518 } 517 }
519} 518}
520#endif 519#endif
@@ -613,6 +612,12 @@ void __cpuinit cpu_init(void)
613 set_in_cr4(X86_CR4_TSD); 612 set_in_cr4(X86_CR4_TSD);
614 } 613 }
615 614
615 /* The CPU hotplug case */
616 if (cpu_gdt_descr->address) {
617 gdt = (struct desc_struct *)cpu_gdt_descr->address;
618 memset(gdt, 0, PAGE_SIZE);
619 goto old_gdt;
620 }
616 /* 621 /*
617 * This is a horrible hack to allocate the GDT. The problem 622 * This is a horrible hack to allocate the GDT. The problem
618 * is that cpu_init() is called really early for the boot CPU 623 * is that cpu_init() is called really early for the boot CPU
@@ -631,7 +636,7 @@ void __cpuinit cpu_init(void)
631 local_irq_enable(); 636 local_irq_enable();
632 } 637 }
633 } 638 }
634 639old_gdt:
635 /* 640 /*
636 * Initialize the per-CPU GDT with the boot GDT, 641 * Initialize the per-CPU GDT with the boot GDT,
637 * and set up the GDT descriptor: 642 * and set up the GDT descriptor:
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c
index fc32c8028e24..f03b7f94c304 100644
--- a/arch/i386/kernel/cpu/cyrix.c
+++ b/arch/i386/kernel/cpu/cyrix.c
@@ -354,7 +354,7 @@ static void __init init_nsc(struct cpuinfo_x86 *c)
354 * This function only handles the GX processor, and kicks every 354 * This function only handles the GX processor, and kicks every
355 * thing else to the Cyrix init function above - that should 355 * thing else to the Cyrix init function above - that should
356 * cover any processors that might have been branded differently 356 * cover any processors that might have been branded differently
357 * after NSC aquired Cyrix. 357 * after NSC acquired Cyrix.
358 * 358 *
359 * If this breaks your GX1 horribly, please e-mail 359 * If this breaks your GX1 horribly, please e-mail
360 * info-linux@ldcmail.amd.com to tell us. 360 * info-linux@ldcmail.amd.com to tell us.
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
index 5386b29bb5a5..10afc645c540 100644
--- a/arch/i386/kernel/cpu/intel.c
+++ b/arch/i386/kernel/cpu/intel.c
@@ -122,6 +122,12 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
122 122
123 select_idle_routine(c); 123 select_idle_routine(c);
124 l2 = init_intel_cacheinfo(c); 124 l2 = init_intel_cacheinfo(c);
125 if (c->cpuid_level > 9 ) {
126 unsigned eax = cpuid_eax(10);
127 /* Check for version and the number of counters */
128 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
129 set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability);
130 }
125 131
126 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ 132 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
127 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) 133 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index c8547a6fa7e6..e9f0b928b0a9 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -4,6 +4,7 @@
4 * Changes: 4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4) 5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. 6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen : CPUID4 emulation on AMD.
7 */ 8 */
8 9
9#include <linux/init.h> 10#include <linux/init.h>
@@ -130,25 +131,111 @@ struct _cpuid4_info {
130 cpumask_t shared_cpu_map; 131 cpumask_t shared_cpu_map;
131}; 132};
132 133
133static unsigned short num_cache_leaves; 134unsigned short num_cache_leaves;
135
136/* AMD doesn't have CPUID4. Emulate it here to report the same
137 information to the user. This makes some assumptions about the machine:
138 No L3, L2 not shared, no SMT etc. that is currently true on AMD CPUs.
139
140 In theory the TLBs could be reported as fake type (they are in "dummy").
141 Maybe later */
142union l1_cache {
143 struct {
144 unsigned line_size : 8;
145 unsigned lines_per_tag : 8;
146 unsigned assoc : 8;
147 unsigned size_in_kb : 8;
148 };
149 unsigned val;
150};
151
152union l2_cache {
153 struct {
154 unsigned line_size : 8;
155 unsigned lines_per_tag : 4;
156 unsigned assoc : 4;
157 unsigned size_in_kb : 16;
158 };
159 unsigned val;
160};
161
162static const unsigned short assocs[] = {
163 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
164 [8] = 16,
165 [0xf] = 0xffff // ??
166 };
167static const unsigned char levels[] = { 1, 1, 2 };
168static const unsigned char types[] = { 1, 2, 3 };
169
170static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
171 union _cpuid4_leaf_ebx *ebx,
172 union _cpuid4_leaf_ecx *ecx)
173{
174 unsigned dummy;
175 unsigned line_size, lines_per_tag, assoc, size_in_kb;
176 union l1_cache l1i, l1d;
177 union l2_cache l2;
178
179 eax->full = 0;
180 ebx->full = 0;
181 ecx->full = 0;
182
183 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
184 cpuid(0x80000006, &dummy, &dummy, &l2.val, &dummy);
185
186 if (leaf > 2 || !l1d.val || !l1i.val || !l2.val)
187 return;
188
189 eax->split.is_self_initializing = 1;
190 eax->split.type = types[leaf];
191 eax->split.level = levels[leaf];
192 eax->split.num_threads_sharing = 0;
193 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
194
195 if (leaf <= 1) {
196 union l1_cache *l1 = leaf == 0 ? &l1d : &l1i;
197 assoc = l1->assoc;
198 line_size = l1->line_size;
199 lines_per_tag = l1->lines_per_tag;
200 size_in_kb = l1->size_in_kb;
201 } else {
202 assoc = l2.assoc;
203 line_size = l2.line_size;
204 lines_per_tag = l2.lines_per_tag;
205 /* cpu_data has errata corrections for K7 applied */
206 size_in_kb = current_cpu_data.x86_cache_size;
207 }
208
209 if (assoc == 0xf)
210 eax->split.is_fully_associative = 1;
211 ebx->split.coherency_line_size = line_size - 1;
212 ebx->split.ways_of_associativity = assocs[assoc] - 1;
213 ebx->split.physical_line_partition = lines_per_tag - 1;
214 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
215 (ebx->split.ways_of_associativity + 1) - 1;
216}
134 217
135static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) 218static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
136{ 219{
137 unsigned int eax, ebx, ecx, edx; 220 union _cpuid4_leaf_eax eax;
138 union _cpuid4_leaf_eax cache_eax; 221 union _cpuid4_leaf_ebx ebx;
222 union _cpuid4_leaf_ecx ecx;
223 unsigned edx;
139 224
140 cpuid_count(4, index, &eax, &ebx, &ecx, &edx); 225 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
141 cache_eax.full = eax; 226 amd_cpuid4(index, &eax, &ebx, &ecx);
142 if (cache_eax.split.type == CACHE_TYPE_NULL) 227 else
228 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
229 if (eax.split.type == CACHE_TYPE_NULL)
143 return -EIO; /* better error ? */ 230 return -EIO; /* better error ? */
144 231
145 this_leaf->eax.full = eax; 232 this_leaf->eax = eax;
146 this_leaf->ebx.full = ebx; 233 this_leaf->ebx = ebx;
147 this_leaf->ecx.full = ecx; 234 this_leaf->ecx = ecx;
148 this_leaf->size = (this_leaf->ecx.split.number_of_sets + 1) * 235 this_leaf->size = (ecx.split.number_of_sets + 1) *
149 (this_leaf->ebx.split.coherency_line_size + 1) * 236 (ebx.split.coherency_line_size + 1) *
150 (this_leaf->ebx.split.physical_line_partition + 1) * 237 (ebx.split.physical_line_partition + 1) *
151 (this_leaf->ebx.split.ways_of_associativity + 1); 238 (ebx.split.ways_of_associativity + 1);
152 return 0; 239 return 0;
153} 240}
154 241
@@ -174,7 +261,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
174 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ 261 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
175 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ 262 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
176 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; 263 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
177#ifdef CONFIG_SMP 264#ifdef CONFIG_X86_HT
178 unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data); 265 unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
179#endif 266#endif
180 267
@@ -296,14 +383,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
296 383
297 if (new_l2) { 384 if (new_l2) {
298 l2 = new_l2; 385 l2 = new_l2;
299#ifdef CONFIG_SMP 386#ifdef CONFIG_X86_HT
300 cpu_llc_id[cpu] = l2_id; 387 cpu_llc_id[cpu] = l2_id;
301#endif 388#endif
302 } 389 }
303 390
304 if (new_l3) { 391 if (new_l3) {
305 l3 = new_l3; 392 l3 = new_l3;
306#ifdef CONFIG_SMP 393#ifdef CONFIG_X86_HT
307 cpu_llc_id[cpu] = l3_id; 394 cpu_llc_id[cpu] = l3_id;
308#endif 395#endif
309 } 396 }
@@ -642,7 +729,7 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
642 return; 729 return;
643} 730}
644 731
645static int cacheinfo_cpu_callback(struct notifier_block *nfb, 732static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
646 unsigned long action, void *hcpu) 733 unsigned long action, void *hcpu)
647{ 734{
648 unsigned int cpu = (unsigned long)hcpu; 735 unsigned int cpu = (unsigned long)hcpu;
@@ -660,7 +747,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb,
660 return NOTIFY_OK; 747 return NOTIFY_OK;
661} 748}
662 749
663static struct notifier_block cacheinfo_cpu_notifier = 750static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
664{ 751{
665 .notifier_call = cacheinfo_cpu_callback, 752 .notifier_call = cacheinfo_cpu_callback,
666}; 753};
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index a19fcb262dbb..f54a15268ed7 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -18,7 +18,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
18 * applications want to get the raw CPUID data, they should access 18 * applications want to get the raw CPUID data, they should access
19 * /dev/cpu/<cpu_nr>/cpuid instead. 19 * /dev/cpu/<cpu_nr>/cpuid instead.
20 */ 20 */
21 static char *x86_cap_flags[] = { 21 static const char * const x86_cap_flags[] = {
22 /* Intel-defined */ 22 /* Intel-defined */
23 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", 23 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
24 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", 24 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
@@ -62,7 +62,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
62 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 62 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
63 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 63 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
64 }; 64 };
65 static char *x86_power_flags[] = { 65 static const char * const x86_power_flags[] = {
66 "ts", /* temperature sensor */ 66 "ts", /* temperature sensor */
67 "fid", /* frequency id control */ 67 "fid", /* frequency id control */
68 "vid", /* voltage id control */ 68 "vid", /* voltage id control */
@@ -109,9 +109,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
109 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); 109 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
110#ifdef CONFIG_X86_HT 110#ifdef CONFIG_X86_HT
111 if (c->x86_max_cores * smp_num_siblings > 1) { 111 if (c->x86_max_cores * smp_num_siblings > 1) {
112 seq_printf(m, "physical id\t: %d\n", phys_proc_id[n]); 112 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
113 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n])); 113 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n]));
114 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[n]); 114 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
115 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 115 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
116 } 116 }
117#endif 117#endif
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index 1d9a4abcdfc7..f6dfa9fb675c 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -183,7 +183,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long ac
183 return NOTIFY_OK; 183 return NOTIFY_OK;
184} 184}
185 185
186static struct notifier_block cpuid_class_cpu_notifier = 186static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier =
187{ 187{
188 .notifier_call = cpuid_class_cpu_callback, 188 .notifier_call = cpuid_class_cpu_callback,
189}; 189};
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index 21dc1bbb8067..48f0f62f781c 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -120,14 +120,9 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
120 return 1; 120 return 1;
121} 121}
122 122
123/*
124 * By using the NMI code instead of a vector we just sneak thru the
125 * word generator coming out with just what we want. AND it does
126 * not matter if clustered_apic_mode is set or not.
127 */
128static void smp_send_nmi_allbutself(void) 123static void smp_send_nmi_allbutself(void)
129{ 124{
130 send_IPI_allbutself(APIC_DM_NMI); 125 send_IPI_allbutself(NMI_VECTOR);
131} 126}
132 127
133static void nmi_shootdown_cpus(void) 128static void nmi_shootdown_cpus(void)
@@ -163,7 +158,7 @@ static void nmi_shootdown_cpus(void)
163void machine_crash_shutdown(struct pt_regs *regs) 158void machine_crash_shutdown(struct pt_regs *regs)
164{ 159{
165 /* This function is only called after the system 160 /* This function is only called after the system
166 * has paniced or is otherwise in a critical state. 161 * has panicked or is otherwise in a critical state.
167 * The minimum amount of code to allow a kexec'd kernel 162 * The minimum amount of code to allow a kexec'd kernel
168 * to run successfully needs to happen here. 163 * to run successfully needs to happen here.
169 * 164 *
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index cfc683f153b9..fbdb933251b6 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -48,6 +48,7 @@
48#include <asm/smp.h> 48#include <asm/smp.h>
49#include <asm/page.h> 49#include <asm/page.h>
50#include <asm/desc.h> 50#include <asm/desc.h>
51#include <asm/dwarf2.h>
51#include "irq_vectors.h" 52#include "irq_vectors.h"
52 53
53#define nr_syscalls ((syscall_table_size)/4) 54#define nr_syscalls ((syscall_table_size)/4)
@@ -82,34 +83,76 @@ VM_MASK = 0x00020000
82#define resume_kernel restore_nocheck 83#define resume_kernel restore_nocheck
83#endif 84#endif
84 85
86#ifdef CONFIG_VM86
87#define resume_userspace_sig check_userspace
88#else
89#define resume_userspace_sig resume_userspace
90#endif
91
85#define SAVE_ALL \ 92#define SAVE_ALL \
86 cld; \ 93 cld; \
87 pushl %es; \ 94 pushl %es; \
95 CFI_ADJUST_CFA_OFFSET 4;\
96 /*CFI_REL_OFFSET es, 0;*/\
88 pushl %ds; \ 97 pushl %ds; \
98 CFI_ADJUST_CFA_OFFSET 4;\
99 /*CFI_REL_OFFSET ds, 0;*/\
89 pushl %eax; \ 100 pushl %eax; \
101 CFI_ADJUST_CFA_OFFSET 4;\
102 CFI_REL_OFFSET eax, 0;\
90 pushl %ebp; \ 103 pushl %ebp; \
104 CFI_ADJUST_CFA_OFFSET 4;\
105 CFI_REL_OFFSET ebp, 0;\
91 pushl %edi; \ 106 pushl %edi; \
107 CFI_ADJUST_CFA_OFFSET 4;\
108 CFI_REL_OFFSET edi, 0;\
92 pushl %esi; \ 109 pushl %esi; \
110 CFI_ADJUST_CFA_OFFSET 4;\
111 CFI_REL_OFFSET esi, 0;\
93 pushl %edx; \ 112 pushl %edx; \
113 CFI_ADJUST_CFA_OFFSET 4;\
114 CFI_REL_OFFSET edx, 0;\
94 pushl %ecx; \ 115 pushl %ecx; \
116 CFI_ADJUST_CFA_OFFSET 4;\
117 CFI_REL_OFFSET ecx, 0;\
95 pushl %ebx; \ 118 pushl %ebx; \
119 CFI_ADJUST_CFA_OFFSET 4;\
120 CFI_REL_OFFSET ebx, 0;\
96 movl $(__USER_DS), %edx; \ 121 movl $(__USER_DS), %edx; \
97 movl %edx, %ds; \ 122 movl %edx, %ds; \
98 movl %edx, %es; 123 movl %edx, %es;
99 124
100#define RESTORE_INT_REGS \ 125#define RESTORE_INT_REGS \
101 popl %ebx; \ 126 popl %ebx; \
127 CFI_ADJUST_CFA_OFFSET -4;\
128 CFI_RESTORE ebx;\
102 popl %ecx; \ 129 popl %ecx; \
130 CFI_ADJUST_CFA_OFFSET -4;\
131 CFI_RESTORE ecx;\
103 popl %edx; \ 132 popl %edx; \
133 CFI_ADJUST_CFA_OFFSET -4;\
134 CFI_RESTORE edx;\
104 popl %esi; \ 135 popl %esi; \
136 CFI_ADJUST_CFA_OFFSET -4;\
137 CFI_RESTORE esi;\
105 popl %edi; \ 138 popl %edi; \
139 CFI_ADJUST_CFA_OFFSET -4;\
140 CFI_RESTORE edi;\
106 popl %ebp; \ 141 popl %ebp; \
107 popl %eax 142 CFI_ADJUST_CFA_OFFSET -4;\
143 CFI_RESTORE ebp;\
144 popl %eax; \
145 CFI_ADJUST_CFA_OFFSET -4;\
146 CFI_RESTORE eax
108 147
109#define RESTORE_REGS \ 148#define RESTORE_REGS \
110 RESTORE_INT_REGS; \ 149 RESTORE_INT_REGS; \
1111: popl %ds; \ 1501: popl %ds; \
151 CFI_ADJUST_CFA_OFFSET -4;\
152 /*CFI_RESTORE ds;*/\
1122: popl %es; \ 1532: popl %es; \
154 CFI_ADJUST_CFA_OFFSET -4;\
155 /*CFI_RESTORE es;*/\
113.section .fixup,"ax"; \ 156.section .fixup,"ax"; \
1143: movl $0,(%esp); \ 1573: movl $0,(%esp); \
115 jmp 1b; \ 158 jmp 1b; \
@@ -122,13 +165,43 @@ VM_MASK = 0x00020000
122 .long 2b,4b; \ 165 .long 2b,4b; \
123.previous 166.previous
124 167
168#define RING0_INT_FRAME \
169 CFI_STARTPROC simple;\
170 CFI_DEF_CFA esp, 3*4;\
171 /*CFI_OFFSET cs, -2*4;*/\
172 CFI_OFFSET eip, -3*4
173
174#define RING0_EC_FRAME \
175 CFI_STARTPROC simple;\
176 CFI_DEF_CFA esp, 4*4;\
177 /*CFI_OFFSET cs, -2*4;*/\
178 CFI_OFFSET eip, -3*4
179
180#define RING0_PTREGS_FRAME \
181 CFI_STARTPROC simple;\
182 CFI_DEF_CFA esp, OLDESP-EBX;\
183 /*CFI_OFFSET cs, CS-OLDESP;*/\
184 CFI_OFFSET eip, EIP-OLDESP;\
185 /*CFI_OFFSET es, ES-OLDESP;*/\
186 /*CFI_OFFSET ds, DS-OLDESP;*/\
187 CFI_OFFSET eax, EAX-OLDESP;\
188 CFI_OFFSET ebp, EBP-OLDESP;\
189 CFI_OFFSET edi, EDI-OLDESP;\
190 CFI_OFFSET esi, ESI-OLDESP;\
191 CFI_OFFSET edx, EDX-OLDESP;\
192 CFI_OFFSET ecx, ECX-OLDESP;\
193 CFI_OFFSET ebx, EBX-OLDESP
125 194
126ENTRY(ret_from_fork) 195ENTRY(ret_from_fork)
196 CFI_STARTPROC
127 pushl %eax 197 pushl %eax
198 CFI_ADJUST_CFA_OFFSET -4
128 call schedule_tail 199 call schedule_tail
129 GET_THREAD_INFO(%ebp) 200 GET_THREAD_INFO(%ebp)
130 popl %eax 201 popl %eax
202 CFI_ADJUST_CFA_OFFSET -4
131 jmp syscall_exit 203 jmp syscall_exit
204 CFI_ENDPROC
132 205
133/* 206/*
134 * Return to user mode is not as complex as all this looks, 207 * Return to user mode is not as complex as all this looks,
@@ -139,10 +212,12 @@ ENTRY(ret_from_fork)
139 212
140 # userspace resumption stub bypassing syscall exit tracing 213 # userspace resumption stub bypassing syscall exit tracing
141 ALIGN 214 ALIGN
215 RING0_PTREGS_FRAME
142ret_from_exception: 216ret_from_exception:
143 preempt_stop 217 preempt_stop
144ret_from_intr: 218ret_from_intr:
145 GET_THREAD_INFO(%ebp) 219 GET_THREAD_INFO(%ebp)
220check_userspace:
146 movl EFLAGS(%esp), %eax # mix EFLAGS and CS 221 movl EFLAGS(%esp), %eax # mix EFLAGS and CS
147 movb CS(%esp), %al 222 movb CS(%esp), %al
148 testl $(VM_MASK | 3), %eax 223 testl $(VM_MASK | 3), %eax
@@ -171,20 +246,38 @@ need_resched:
171 call preempt_schedule_irq 246 call preempt_schedule_irq
172 jmp need_resched 247 jmp need_resched
173#endif 248#endif
249 CFI_ENDPROC
174 250
175/* SYSENTER_RETURN points to after the "sysenter" instruction in 251/* SYSENTER_RETURN points to after the "sysenter" instruction in
176 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ 252 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
177 253
178 # sysenter call handler stub 254 # sysenter call handler stub
179ENTRY(sysenter_entry) 255ENTRY(sysenter_entry)
256 CFI_STARTPROC simple
257 CFI_DEF_CFA esp, 0
258 CFI_REGISTER esp, ebp
180 movl TSS_sysenter_esp0(%esp),%esp 259 movl TSS_sysenter_esp0(%esp),%esp
181sysenter_past_esp: 260sysenter_past_esp:
182 sti 261 sti
183 pushl $(__USER_DS) 262 pushl $(__USER_DS)
263 CFI_ADJUST_CFA_OFFSET 4
264 /*CFI_REL_OFFSET ss, 0*/
184 pushl %ebp 265 pushl %ebp
266 CFI_ADJUST_CFA_OFFSET 4
267 CFI_REL_OFFSET esp, 0
185 pushfl 268 pushfl
269 CFI_ADJUST_CFA_OFFSET 4
186 pushl $(__USER_CS) 270 pushl $(__USER_CS)
187 pushl $SYSENTER_RETURN 271 CFI_ADJUST_CFA_OFFSET 4
272 /*CFI_REL_OFFSET cs, 0*/
273 /*
274 * Push current_thread_info()->sysenter_return to the stack.
275 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
276 * pushed above; +8 corresponds to copy_thread's esp0 setting.
277 */
278 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
279 CFI_ADJUST_CFA_OFFSET 4
280 CFI_REL_OFFSET eip, 0
188 281
189/* 282/*
190 * Load the potential sixth argument from user stack. 283 * Load the potential sixth argument from user stack.
@@ -199,6 +292,7 @@ sysenter_past_esp:
199.previous 292.previous
200 293
201 pushl %eax 294 pushl %eax
295 CFI_ADJUST_CFA_OFFSET 4
202 SAVE_ALL 296 SAVE_ALL
203 GET_THREAD_INFO(%ebp) 297 GET_THREAD_INFO(%ebp)
204 298
@@ -219,11 +313,14 @@ sysenter_past_esp:
219 xorl %ebp,%ebp 313 xorl %ebp,%ebp
220 sti 314 sti
221 sysexit 315 sysexit
316 CFI_ENDPROC
222 317
223 318
224 # system call handler stub 319 # system call handler stub
225ENTRY(system_call) 320ENTRY(system_call)
321 RING0_INT_FRAME # can't unwind into user space anyway
226 pushl %eax # save orig_eax 322 pushl %eax # save orig_eax
323 CFI_ADJUST_CFA_OFFSET 4
227 SAVE_ALL 324 SAVE_ALL
228 GET_THREAD_INFO(%ebp) 325 GET_THREAD_INFO(%ebp)
229 testl $TF_MASK,EFLAGS(%esp) 326 testl $TF_MASK,EFLAGS(%esp)
@@ -256,10 +353,12 @@ restore_all:
256 movb CS(%esp), %al 353 movb CS(%esp), %al
257 andl $(VM_MASK | (4 << 8) | 3), %eax 354 andl $(VM_MASK | (4 << 8) | 3), %eax
258 cmpl $((4 << 8) | 3), %eax 355 cmpl $((4 << 8) | 3), %eax
356 CFI_REMEMBER_STATE
259 je ldt_ss # returning to user-space with LDT SS 357 je ldt_ss # returning to user-space with LDT SS
260restore_nocheck: 358restore_nocheck:
261 RESTORE_REGS 359 RESTORE_REGS
262 addl $4, %esp 360 addl $4, %esp
361 CFI_ADJUST_CFA_OFFSET -4
2631: iret 3621: iret
264.section .fixup,"ax" 363.section .fixup,"ax"
265iret_exc: 364iret_exc:
@@ -273,6 +372,7 @@ iret_exc:
273 .long 1b,iret_exc 372 .long 1b,iret_exc
274.previous 373.previous
275 374
375 CFI_RESTORE_STATE
276ldt_ss: 376ldt_ss:
277 larl OLDSS(%esp), %eax 377 larl OLDSS(%esp), %eax
278 jnz restore_nocheck 378 jnz restore_nocheck
@@ -285,11 +385,13 @@ ldt_ss:
285 * CPUs, which we can try to work around to make 385 * CPUs, which we can try to work around to make
286 * dosemu and wine happy. */ 386 * dosemu and wine happy. */
287 subl $8, %esp # reserve space for switch16 pointer 387 subl $8, %esp # reserve space for switch16 pointer
388 CFI_ADJUST_CFA_OFFSET 8
288 cli 389 cli
289 movl %esp, %eax 390 movl %esp, %eax
290 /* Set up the 16bit stack frame with switch32 pointer on top, 391 /* Set up the 16bit stack frame with switch32 pointer on top,
291 * and a switch16 pointer on top of the current frame. */ 392 * and a switch16 pointer on top of the current frame. */
292 call setup_x86_bogus_stack 393 call setup_x86_bogus_stack
394 CFI_ADJUST_CFA_OFFSET -8 # frame has moved
293 RESTORE_REGS 395 RESTORE_REGS
294 lss 20+4(%esp), %esp # switch to 16bit stack 396 lss 20+4(%esp), %esp # switch to 16bit stack
2951: iret 3971: iret
@@ -297,9 +399,11 @@ ldt_ss:
297 .align 4 399 .align 4
298 .long 1b,iret_exc 400 .long 1b,iret_exc
299.previous 401.previous
402 CFI_ENDPROC
300 403
301 # perform work that needs to be done immediately before resumption 404 # perform work that needs to be done immediately before resumption
302 ALIGN 405 ALIGN
406 RING0_PTREGS_FRAME # can't unwind into user space anyway
303work_pending: 407work_pending:
304 testb $_TIF_NEED_RESCHED, %cl 408 testb $_TIF_NEED_RESCHED, %cl
305 jz work_notifysig 409 jz work_notifysig
@@ -323,18 +427,20 @@ work_notifysig: # deal with pending signals and
323 # vm86-space 427 # vm86-space
324 xorl %edx, %edx 428 xorl %edx, %edx
325 call do_notify_resume 429 call do_notify_resume
326 jmp resume_userspace 430 jmp resume_userspace_sig
327 431
328 ALIGN 432 ALIGN
329work_notifysig_v86: 433work_notifysig_v86:
330#ifdef CONFIG_VM86 434#ifdef CONFIG_VM86
331 pushl %ecx # save ti_flags for do_notify_resume 435 pushl %ecx # save ti_flags for do_notify_resume
436 CFI_ADJUST_CFA_OFFSET 4
332 call save_v86_state # %eax contains pt_regs pointer 437 call save_v86_state # %eax contains pt_regs pointer
333 popl %ecx 438 popl %ecx
439 CFI_ADJUST_CFA_OFFSET -4
334 movl %eax, %esp 440 movl %eax, %esp
335 xorl %edx, %edx 441 xorl %edx, %edx
336 call do_notify_resume 442 call do_notify_resume
337 jmp resume_userspace 443 jmp resume_userspace_sig
338#endif 444#endif
339 445
340 # perform syscall exit tracing 446 # perform syscall exit tracing
@@ -363,19 +469,21 @@ syscall_exit_work:
363 movl $1, %edx 469 movl $1, %edx
364 call do_syscall_trace 470 call do_syscall_trace
365 jmp resume_userspace 471 jmp resume_userspace
472 CFI_ENDPROC
366 473
367 ALIGN 474 RING0_INT_FRAME # can't unwind into user space anyway
368syscall_fault: 475syscall_fault:
369 pushl %eax # save orig_eax 476 pushl %eax # save orig_eax
477 CFI_ADJUST_CFA_OFFSET 4
370 SAVE_ALL 478 SAVE_ALL
371 GET_THREAD_INFO(%ebp) 479 GET_THREAD_INFO(%ebp)
372 movl $-EFAULT,EAX(%esp) 480 movl $-EFAULT,EAX(%esp)
373 jmp resume_userspace 481 jmp resume_userspace
374 482
375 ALIGN
376syscall_badsys: 483syscall_badsys:
377 movl $-ENOSYS,EAX(%esp) 484 movl $-ENOSYS,EAX(%esp)
378 jmp resume_userspace 485 jmp resume_userspace
486 CFI_ENDPROC
379 487
380#define FIXUP_ESPFIX_STACK \ 488#define FIXUP_ESPFIX_STACK \
381 movl %esp, %eax; \ 489 movl %esp, %eax; \
@@ -387,16 +495,21 @@ syscall_badsys:
387 movl %eax, %esp; 495 movl %eax, %esp;
388#define UNWIND_ESPFIX_STACK \ 496#define UNWIND_ESPFIX_STACK \
389 pushl %eax; \ 497 pushl %eax; \
498 CFI_ADJUST_CFA_OFFSET 4; \
390 movl %ss, %eax; \ 499 movl %ss, %eax; \
391 /* see if on 16bit stack */ \ 500 /* see if on 16bit stack */ \
392 cmpw $__ESPFIX_SS, %ax; \ 501 cmpw $__ESPFIX_SS, %ax; \
393 jne 28f; \ 502 je 28f; \
394 movl $__KERNEL_DS, %edx; \ 50327: popl %eax; \
395 movl %edx, %ds; \ 504 CFI_ADJUST_CFA_OFFSET -4; \
396 movl %edx, %es; \ 505.section .fixup,"ax"; \
50628: movl $__KERNEL_DS, %eax; \
507 movl %eax, %ds; \
508 movl %eax, %es; \
397 /* switch to 32bit stack */ \ 509 /* switch to 32bit stack */ \
398 FIXUP_ESPFIX_STACK \ 510 FIXUP_ESPFIX_STACK; \
39928: popl %eax; 511 jmp 27b; \
512.previous
400 513
401/* 514/*
402 * Build the entry stubs and pointer table with 515 * Build the entry stubs and pointer table with
@@ -408,9 +521,14 @@ ENTRY(interrupt)
408 521
409vector=0 522vector=0
410ENTRY(irq_entries_start) 523ENTRY(irq_entries_start)
524 RING0_INT_FRAME
411.rept NR_IRQS 525.rept NR_IRQS
412 ALIGN 526 ALIGN
4131: pushl $vector-256 527 .if vector
528 CFI_ADJUST_CFA_OFFSET -4
529 .endif
5301: pushl $~(vector)
531 CFI_ADJUST_CFA_OFFSET 4
414 jmp common_interrupt 532 jmp common_interrupt
415.data 533.data
416 .long 1b 534 .long 1b
@@ -424,60 +542,99 @@ common_interrupt:
424 movl %esp,%eax 542 movl %esp,%eax
425 call do_IRQ 543 call do_IRQ
426 jmp ret_from_intr 544 jmp ret_from_intr
545 CFI_ENDPROC
427 546
428#define BUILD_INTERRUPT(name, nr) \ 547#define BUILD_INTERRUPT(name, nr) \
429ENTRY(name) \ 548ENTRY(name) \
430 pushl $nr-256; \ 549 RING0_INT_FRAME; \
431 SAVE_ALL \ 550 pushl $~(nr); \
551 CFI_ADJUST_CFA_OFFSET 4; \
552 SAVE_ALL; \
432 movl %esp,%eax; \ 553 movl %esp,%eax; \
433 call smp_/**/name; \ 554 call smp_/**/name; \
434 jmp ret_from_intr; 555 jmp ret_from_intr; \
556 CFI_ENDPROC
435 557
436/* The include is where all of the SMP etc. interrupts come from */ 558/* The include is where all of the SMP etc. interrupts come from */
437#include "entry_arch.h" 559#include "entry_arch.h"
438 560
439ENTRY(divide_error) 561ENTRY(divide_error)
562 RING0_INT_FRAME
440 pushl $0 # no error code 563 pushl $0 # no error code
564 CFI_ADJUST_CFA_OFFSET 4
441 pushl $do_divide_error 565 pushl $do_divide_error
566 CFI_ADJUST_CFA_OFFSET 4
442 ALIGN 567 ALIGN
443error_code: 568error_code:
444 pushl %ds 569 pushl %ds
570 CFI_ADJUST_CFA_OFFSET 4
571 /*CFI_REL_OFFSET ds, 0*/
445 pushl %eax 572 pushl %eax
573 CFI_ADJUST_CFA_OFFSET 4
574 CFI_REL_OFFSET eax, 0
446 xorl %eax, %eax 575 xorl %eax, %eax
447 pushl %ebp 576 pushl %ebp
577 CFI_ADJUST_CFA_OFFSET 4
578 CFI_REL_OFFSET ebp, 0
448 pushl %edi 579 pushl %edi
580 CFI_ADJUST_CFA_OFFSET 4
581 CFI_REL_OFFSET edi, 0
449 pushl %esi 582 pushl %esi
583 CFI_ADJUST_CFA_OFFSET 4
584 CFI_REL_OFFSET esi, 0
450 pushl %edx 585 pushl %edx
586 CFI_ADJUST_CFA_OFFSET 4
587 CFI_REL_OFFSET edx, 0
451 decl %eax # eax = -1 588 decl %eax # eax = -1
452 pushl %ecx 589 pushl %ecx
590 CFI_ADJUST_CFA_OFFSET 4
591 CFI_REL_OFFSET ecx, 0
453 pushl %ebx 592 pushl %ebx
593 CFI_ADJUST_CFA_OFFSET 4
594 CFI_REL_OFFSET ebx, 0
454 cld 595 cld
455 pushl %es 596 pushl %es
597 CFI_ADJUST_CFA_OFFSET 4
598 /*CFI_REL_OFFSET es, 0*/
456 UNWIND_ESPFIX_STACK 599 UNWIND_ESPFIX_STACK
457 popl %ecx 600 popl %ecx
601 CFI_ADJUST_CFA_OFFSET -4
602 /*CFI_REGISTER es, ecx*/
458 movl ES(%esp), %edi # get the function address 603 movl ES(%esp), %edi # get the function address
459 movl ORIG_EAX(%esp), %edx # get the error code 604 movl ORIG_EAX(%esp), %edx # get the error code
460 movl %eax, ORIG_EAX(%esp) 605 movl %eax, ORIG_EAX(%esp)
461 movl %ecx, ES(%esp) 606 movl %ecx, ES(%esp)
607 /*CFI_REL_OFFSET es, ES*/
462 movl $(__USER_DS), %ecx 608 movl $(__USER_DS), %ecx
463 movl %ecx, %ds 609 movl %ecx, %ds
464 movl %ecx, %es 610 movl %ecx, %es
465 movl %esp,%eax # pt_regs pointer 611 movl %esp,%eax # pt_regs pointer
466 call *%edi 612 call *%edi
467 jmp ret_from_exception 613 jmp ret_from_exception
614 CFI_ENDPROC
468 615
469ENTRY(coprocessor_error) 616ENTRY(coprocessor_error)
617 RING0_INT_FRAME
470 pushl $0 618 pushl $0
619 CFI_ADJUST_CFA_OFFSET 4
471 pushl $do_coprocessor_error 620 pushl $do_coprocessor_error
621 CFI_ADJUST_CFA_OFFSET 4
472 jmp error_code 622 jmp error_code
623 CFI_ENDPROC
473 624
474ENTRY(simd_coprocessor_error) 625ENTRY(simd_coprocessor_error)
626 RING0_INT_FRAME
475 pushl $0 627 pushl $0
628 CFI_ADJUST_CFA_OFFSET 4
476 pushl $do_simd_coprocessor_error 629 pushl $do_simd_coprocessor_error
630 CFI_ADJUST_CFA_OFFSET 4
477 jmp error_code 631 jmp error_code
632 CFI_ENDPROC
478 633
479ENTRY(device_not_available) 634ENTRY(device_not_available)
635 RING0_INT_FRAME
480 pushl $-1 # mark this as an int 636 pushl $-1 # mark this as an int
637 CFI_ADJUST_CFA_OFFSET 4
481 SAVE_ALL 638 SAVE_ALL
482 movl %cr0, %eax 639 movl %cr0, %eax
483 testl $0x4, %eax # EM (math emulation bit) 640 testl $0x4, %eax # EM (math emulation bit)
@@ -487,9 +644,12 @@ ENTRY(device_not_available)
487 jmp ret_from_exception 644 jmp ret_from_exception
488device_not_available_emulate: 645device_not_available_emulate:
489 pushl $0 # temporary storage for ORIG_EIP 646 pushl $0 # temporary storage for ORIG_EIP
647 CFI_ADJUST_CFA_OFFSET 4
490 call math_emulate 648 call math_emulate
491 addl $4, %esp 649 addl $4, %esp
650 CFI_ADJUST_CFA_OFFSET -4
492 jmp ret_from_exception 651 jmp ret_from_exception
652 CFI_ENDPROC
493 653
494/* 654/*
495 * Debug traps and NMI can happen at the one SYSENTER instruction 655 * Debug traps and NMI can happen at the one SYSENTER instruction
@@ -514,16 +674,19 @@ label: \
514 pushl $sysenter_past_esp 674 pushl $sysenter_past_esp
515 675
516KPROBE_ENTRY(debug) 676KPROBE_ENTRY(debug)
677 RING0_INT_FRAME
517 cmpl $sysenter_entry,(%esp) 678 cmpl $sysenter_entry,(%esp)
518 jne debug_stack_correct 679 jne debug_stack_correct
519 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) 680 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
520debug_stack_correct: 681debug_stack_correct:
521 pushl $-1 # mark this as an int 682 pushl $-1 # mark this as an int
683 CFI_ADJUST_CFA_OFFSET 4
522 SAVE_ALL 684 SAVE_ALL
523 xorl %edx,%edx # error code 0 685 xorl %edx,%edx # error code 0
524 movl %esp,%eax # pt_regs pointer 686 movl %esp,%eax # pt_regs pointer
525 call do_debug 687 call do_debug
526 jmp ret_from_exception 688 jmp ret_from_exception
689 CFI_ENDPROC
527 .previous .text 690 .previous .text
528/* 691/*
529 * NMI is doubly nasty. It can happen _while_ we're handling 692 * NMI is doubly nasty. It can happen _while_ we're handling
@@ -534,14 +697,18 @@ debug_stack_correct:
534 * fault happened on the sysenter path. 697 * fault happened on the sysenter path.
535 */ 698 */
536ENTRY(nmi) 699ENTRY(nmi)
700 RING0_INT_FRAME
537 pushl %eax 701 pushl %eax
702 CFI_ADJUST_CFA_OFFSET 4
538 movl %ss, %eax 703 movl %ss, %eax
539 cmpw $__ESPFIX_SS, %ax 704 cmpw $__ESPFIX_SS, %ax
540 popl %eax 705 popl %eax
706 CFI_ADJUST_CFA_OFFSET -4
541 je nmi_16bit_stack 707 je nmi_16bit_stack
542 cmpl $sysenter_entry,(%esp) 708 cmpl $sysenter_entry,(%esp)
543 je nmi_stack_fixup 709 je nmi_stack_fixup
544 pushl %eax 710 pushl %eax
711 CFI_ADJUST_CFA_OFFSET 4
545 movl %esp,%eax 712 movl %esp,%eax
546 /* Do not access memory above the end of our stack page, 713 /* Do not access memory above the end of our stack page,
547 * it might not exist. 714 * it might not exist.
@@ -549,16 +716,19 @@ ENTRY(nmi)
549 andl $(THREAD_SIZE-1),%eax 716 andl $(THREAD_SIZE-1),%eax
550 cmpl $(THREAD_SIZE-20),%eax 717 cmpl $(THREAD_SIZE-20),%eax
551 popl %eax 718 popl %eax
719 CFI_ADJUST_CFA_OFFSET -4
552 jae nmi_stack_correct 720 jae nmi_stack_correct
553 cmpl $sysenter_entry,12(%esp) 721 cmpl $sysenter_entry,12(%esp)
554 je nmi_debug_stack_check 722 je nmi_debug_stack_check
555nmi_stack_correct: 723nmi_stack_correct:
556 pushl %eax 724 pushl %eax
725 CFI_ADJUST_CFA_OFFSET 4
557 SAVE_ALL 726 SAVE_ALL
558 xorl %edx,%edx # zero error code 727 xorl %edx,%edx # zero error code
559 movl %esp,%eax # pt_regs pointer 728 movl %esp,%eax # pt_regs pointer
560 call do_nmi 729 call do_nmi
561 jmp restore_all 730 jmp restore_all
731 CFI_ENDPROC
562 732
563nmi_stack_fixup: 733nmi_stack_fixup:
564 FIX_STACK(12,nmi_stack_correct, 1) 734 FIX_STACK(12,nmi_stack_correct, 1)
@@ -574,94 +744,177 @@ nmi_debug_stack_check:
574 jmp nmi_stack_correct 744 jmp nmi_stack_correct
575 745
576nmi_16bit_stack: 746nmi_16bit_stack:
747 RING0_INT_FRAME
577 /* create the pointer to lss back */ 748 /* create the pointer to lss back */
578 pushl %ss 749 pushl %ss
750 CFI_ADJUST_CFA_OFFSET 4
579 pushl %esp 751 pushl %esp
752 CFI_ADJUST_CFA_OFFSET 4
580 movzwl %sp, %esp 753 movzwl %sp, %esp
581 addw $4, (%esp) 754 addw $4, (%esp)
582 /* copy the iret frame of 12 bytes */ 755 /* copy the iret frame of 12 bytes */
583 .rept 3 756 .rept 3
584 pushl 16(%esp) 757 pushl 16(%esp)
758 CFI_ADJUST_CFA_OFFSET 4
585 .endr 759 .endr
586 pushl %eax 760 pushl %eax
761 CFI_ADJUST_CFA_OFFSET 4
587 SAVE_ALL 762 SAVE_ALL
588 FIXUP_ESPFIX_STACK # %eax == %esp 763 FIXUP_ESPFIX_STACK # %eax == %esp
764 CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
589 xorl %edx,%edx # zero error code 765 xorl %edx,%edx # zero error code
590 call do_nmi 766 call do_nmi
591 RESTORE_REGS 767 RESTORE_REGS
592 lss 12+4(%esp), %esp # back to 16bit stack 768 lss 12+4(%esp), %esp # back to 16bit stack
5931: iret 7691: iret
770 CFI_ENDPROC
594.section __ex_table,"a" 771.section __ex_table,"a"
595 .align 4 772 .align 4
596 .long 1b,iret_exc 773 .long 1b,iret_exc
597.previous 774.previous
598 775
599KPROBE_ENTRY(int3) 776KPROBE_ENTRY(int3)
777 RING0_INT_FRAME
600 pushl $-1 # mark this as an int 778 pushl $-1 # mark this as an int
779 CFI_ADJUST_CFA_OFFSET 4
601 SAVE_ALL 780 SAVE_ALL
602 xorl %edx,%edx # zero error code 781 xorl %edx,%edx # zero error code
603 movl %esp,%eax # pt_regs pointer 782 movl %esp,%eax # pt_regs pointer
604 call do_int3 783 call do_int3
605 jmp ret_from_exception 784 jmp ret_from_exception
785 CFI_ENDPROC
606 .previous .text 786 .previous .text
607 787
608ENTRY(overflow) 788ENTRY(overflow)
789 RING0_INT_FRAME
609 pushl $0 790 pushl $0
791 CFI_ADJUST_CFA_OFFSET 4
610 pushl $do_overflow 792 pushl $do_overflow
793 CFI_ADJUST_CFA_OFFSET 4
611 jmp error_code 794 jmp error_code
795 CFI_ENDPROC
612 796
613ENTRY(bounds) 797ENTRY(bounds)
798 RING0_INT_FRAME
614 pushl $0 799 pushl $0
800 CFI_ADJUST_CFA_OFFSET 4
615 pushl $do_bounds 801 pushl $do_bounds
802 CFI_ADJUST_CFA_OFFSET 4
616 jmp error_code 803 jmp error_code
804 CFI_ENDPROC
617 805
618ENTRY(invalid_op) 806ENTRY(invalid_op)
807 RING0_INT_FRAME
619 pushl $0 808 pushl $0
809 CFI_ADJUST_CFA_OFFSET 4
620 pushl $do_invalid_op 810 pushl $do_invalid_op
811 CFI_ADJUST_CFA_OFFSET 4
621 jmp error_code 812 jmp error_code
813 CFI_ENDPROC
622 814
623ENTRY(coprocessor_segment_overrun) 815ENTRY(coprocessor_segment_overrun)
816 RING0_INT_FRAME
624 pushl $0 817 pushl $0
818 CFI_ADJUST_CFA_OFFSET 4
625 pushl $do_coprocessor_segment_overrun 819 pushl $do_coprocessor_segment_overrun
820 CFI_ADJUST_CFA_OFFSET 4
626 jmp error_code 821 jmp error_code
822 CFI_ENDPROC
627 823
628ENTRY(invalid_TSS) 824ENTRY(invalid_TSS)
825 RING0_EC_FRAME
629 pushl $do_invalid_TSS 826 pushl $do_invalid_TSS
827 CFI_ADJUST_CFA_OFFSET 4
630 jmp error_code 828 jmp error_code
829 CFI_ENDPROC
631 830
632ENTRY(segment_not_present) 831ENTRY(segment_not_present)
832 RING0_EC_FRAME
633 pushl $do_segment_not_present 833 pushl $do_segment_not_present
834 CFI_ADJUST_CFA_OFFSET 4
634 jmp error_code 835 jmp error_code
836 CFI_ENDPROC
635 837
636ENTRY(stack_segment) 838ENTRY(stack_segment)
839 RING0_EC_FRAME
637 pushl $do_stack_segment 840 pushl $do_stack_segment
841 CFI_ADJUST_CFA_OFFSET 4
638 jmp error_code 842 jmp error_code
843 CFI_ENDPROC
639 844
640KPROBE_ENTRY(general_protection) 845KPROBE_ENTRY(general_protection)
846 RING0_EC_FRAME
641 pushl $do_general_protection 847 pushl $do_general_protection
848 CFI_ADJUST_CFA_OFFSET 4
642 jmp error_code 849 jmp error_code
850 CFI_ENDPROC
643 .previous .text 851 .previous .text
644 852
645ENTRY(alignment_check) 853ENTRY(alignment_check)
854 RING0_EC_FRAME
646 pushl $do_alignment_check 855 pushl $do_alignment_check
856 CFI_ADJUST_CFA_OFFSET 4
647 jmp error_code 857 jmp error_code
858 CFI_ENDPROC
648 859
649KPROBE_ENTRY(page_fault) 860KPROBE_ENTRY(page_fault)
861 RING0_EC_FRAME
650 pushl $do_page_fault 862 pushl $do_page_fault
863 CFI_ADJUST_CFA_OFFSET 4
651 jmp error_code 864 jmp error_code
865 CFI_ENDPROC
652 .previous .text 866 .previous .text
653 867
654#ifdef CONFIG_X86_MCE 868#ifdef CONFIG_X86_MCE
655ENTRY(machine_check) 869ENTRY(machine_check)
870 RING0_INT_FRAME
656 pushl $0 871 pushl $0
872 CFI_ADJUST_CFA_OFFSET 4
657 pushl machine_check_vector 873 pushl machine_check_vector
874 CFI_ADJUST_CFA_OFFSET 4
658 jmp error_code 875 jmp error_code
876 CFI_ENDPROC
659#endif 877#endif
660 878
661ENTRY(spurious_interrupt_bug) 879ENTRY(spurious_interrupt_bug)
880 RING0_INT_FRAME
662 pushl $0 881 pushl $0
882 CFI_ADJUST_CFA_OFFSET 4
663 pushl $do_spurious_interrupt_bug 883 pushl $do_spurious_interrupt_bug
884 CFI_ADJUST_CFA_OFFSET 4
664 jmp error_code 885 jmp error_code
886 CFI_ENDPROC
887
888#ifdef CONFIG_STACK_UNWIND
889ENTRY(arch_unwind_init_running)
890 CFI_STARTPROC
891 movl 4(%esp), %edx
892 movl (%esp), %ecx
893 leal 4(%esp), %eax
894 movl %ebx, EBX(%edx)
895 xorl %ebx, %ebx
896 movl %ebx, ECX(%edx)
897 movl %ebx, EDX(%edx)
898 movl %esi, ESI(%edx)
899 movl %edi, EDI(%edx)
900 movl %ebp, EBP(%edx)
901 movl %ebx, EAX(%edx)
902 movl $__USER_DS, DS(%edx)
903 movl $__USER_DS, ES(%edx)
904 movl %ebx, ORIG_EAX(%edx)
905 movl %ecx, EIP(%edx)
906 movl 12(%esp), %ecx
907 movl $__KERNEL_CS, CS(%edx)
908 movl %ebx, EFLAGS(%edx)
909 movl %eax, OLDESP(%edx)
910 movl 8(%esp), %eax
911 movl %ecx, 8(%esp)
912 movl EBX(%edx), %ebx
913 movl $__KERNEL_DS, OLDSS(%edx)
914 jmpl *%eax
915 CFI_ENDPROC
916ENDPROC(arch_unwind_init_running)
917#endif
665 918
666.section .rodata,"a" 919.section .rodata,"a"
667#include "syscall_table.S" 920#include "syscall_table.S"
diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c
new file mode 100644
index 000000000000..c6737c35815d
--- /dev/null
+++ b/arch/i386/kernel/hpet.c
@@ -0,0 +1,67 @@
1#include <linux/clocksource.h>
2#include <linux/errno.h>
3#include <linux/hpet.h>
4#include <linux/init.h>
5
6#include <asm/hpet.h>
7#include <asm/io.h>
8
9#define HPET_MASK CLOCKSOURCE_MASK(32)
10#define HPET_SHIFT 22
11
12/* FSEC = 10^-15 NSEC = 10^-9 */
13#define FSEC_PER_NSEC 1000000
14
15static void *hpet_ptr;
16
17static cycle_t read_hpet(void)
18{
19 return (cycle_t)readl(hpet_ptr);
20}
21
22static struct clocksource clocksource_hpet = {
23 .name = "hpet",
24 .rating = 250,
25 .read = read_hpet,
26 .mask = HPET_MASK,
27 .mult = 0, /* set below */
28 .shift = HPET_SHIFT,
29 .is_continuous = 1,
30};
31
32static int __init init_hpet_clocksource(void)
33{
34 unsigned long hpet_period;
35 void __iomem* hpet_base;
36 u64 tmp;
37
38 if (!hpet_address)
39 return -ENODEV;
40
41 /* calculate the hpet address: */
42 hpet_base =
43 (void __iomem*)ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
44 hpet_ptr = hpet_base + HPET_COUNTER;
45
46 /* calculate the frequency: */
47 hpet_period = readl(hpet_base + HPET_PERIOD);
48
49 /*
50 * hpet period is in femto seconds per cycle
51 * so we need to convert this to ns/cyc units
52 * aproximated by mult/2^shift
53 *
54 * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
55 * fsec/cyc * 1ns/1000000fsec * 2^shift = mult
56 * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
57 * (fsec/cyc << shift)/1000000 = mult
58 * (hpet_period << shift)/FSEC_PER_NSEC = mult
59 */
60 tmp = (u64)hpet_period << HPET_SHIFT;
61 do_div(tmp, FSEC_PER_NSEC);
62 clocksource_hpet.mult = (u32)tmp;
63
64 return clocksource_register(&clocksource_hpet);
65}
66
67module_init(init_hpet_clocksource);
diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c
new file mode 100644
index 000000000000..477b24daff53
--- /dev/null
+++ b/arch/i386/kernel/i8253.c
@@ -0,0 +1,118 @@
1/*
2 * i8253.c 8253/PIT functions
3 *
4 */
5#include <linux/clocksource.h>
6#include <linux/spinlock.h>
7#include <linux/jiffies.h>
8#include <linux/sysdev.h>
9#include <linux/module.h>
10#include <linux/init.h>
11
12#include <asm/smp.h>
13#include <asm/delay.h>
14#include <asm/i8253.h>
15#include <asm/io.h>
16
17#include "io_ports.h"
18
19DEFINE_SPINLOCK(i8253_lock);
20EXPORT_SYMBOL(i8253_lock);
21
22void setup_pit_timer(void)
23{
24 unsigned long flags;
25
26 spin_lock_irqsave(&i8253_lock, flags);
27 outb_p(0x34,PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */
28 udelay(10);
29 outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
30 udelay(10);
31 outb(LATCH >> 8 , PIT_CH0); /* MSB */
32 spin_unlock_irqrestore(&i8253_lock, flags);
33}
34
35/*
36 * Since the PIT overflows every tick, its not very useful
37 * to just read by itself. So use jiffies to emulate a free
38 * running counter:
39 */
40static cycle_t pit_read(void)
41{
42 unsigned long flags;
43 int count;
44 u32 jifs;
45 static int old_count;
46 static u32 old_jifs;
47
48 spin_lock_irqsave(&i8253_lock, flags);
49 /*
50 * Although our caller may have the read side of xtime_lock,
51 * this is now a seqlock, and we are cheating in this routine
52 * by having side effects on state that we cannot undo if
53 * there is a collision on the seqlock and our caller has to
54 * retry. (Namely, old_jifs and old_count.) So we must treat
55 * jiffies as volatile despite the lock. We read jiffies
56 * before latching the timer count to guarantee that although
57 * the jiffies value might be older than the count (that is,
58 * the counter may underflow between the last point where
59 * jiffies was incremented and the point where we latch the
60 * count), it cannot be newer.
61 */
62 jifs = jiffies;
63 outb_p(0x00, PIT_MODE); /* latch the count ASAP */
64 count = inb_p(PIT_CH0); /* read the latched count */
65 count |= inb_p(PIT_CH0) << 8;
66
67 /* VIA686a test code... reset the latch if count > max + 1 */
68 if (count > LATCH) {
69 outb_p(0x34, PIT_MODE);
70 outb_p(LATCH & 0xff, PIT_CH0);
71 outb(LATCH >> 8, PIT_CH0);
72 count = LATCH - 1;
73 }
74
75 /*
76 * It's possible for count to appear to go the wrong way for a
77 * couple of reasons:
78 *
79 * 1. The timer counter underflows, but we haven't handled the
80 * resulting interrupt and incremented jiffies yet.
81 * 2. Hardware problem with the timer, not giving us continuous time,
82 * the counter does small "jumps" upwards on some Pentium systems,
83 * (see c't 95/10 page 335 for Neptun bug.)
84 *
85 * Previous attempts to handle these cases intelligently were
86 * buggy, so we just do the simple thing now.
87 */
88 if (count > old_count && jifs == old_jifs) {
89 count = old_count;
90 }
91 old_count = count;
92 old_jifs = jifs;
93
94 spin_unlock_irqrestore(&i8253_lock, flags);
95
96 count = (LATCH - 1) - count;
97
98 return (cycle_t)(jifs * LATCH) + count;
99}
100
101static struct clocksource clocksource_pit = {
102 .name = "pit",
103 .rating = 110,
104 .read = pit_read,
105 .mask = CLOCKSOURCE_MASK(32),
106 .mult = 0,
107 .shift = 20,
108};
109
110static int __init init_pit_clocksource(void)
111{
112 if (num_possible_cpus() > 4) /* PIT does not scale! */
113 return 0;
114
115 clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20);
116 return clocksource_register(&clocksource_pit);
117}
118module_init(init_pit_clocksource);
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index b7636b96e104..c1a42feba286 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -175,7 +175,7 @@ static void mask_and_ack_8259A(unsigned int irq)
175 * Lightweight spurious IRQ detection. We do not want 175 * Lightweight spurious IRQ detection. We do not want
176 * to overdo spurious IRQ handling - it's usually a sign 176 * to overdo spurious IRQ handling - it's usually a sign
177 * of hardware problems, so we only do the checks we can 177 * of hardware problems, so we only do the checks we can
178 * do without slowing down good hardware unnecesserily. 178 * do without slowing down good hardware unnecessarily.
179 * 179 *
180 * Note that IRQ7 and IRQ15 (the two spurious IRQs 180 * Note that IRQ7 and IRQ15 (the two spurious IRQs
181 * usually resulting from the 8259A-1|2 PICs) occur 181 * usually resulting from the 8259A-1|2 PICs) occur
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index a62df3e764c5..72ae414e4d49 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -38,6 +38,7 @@
38#include <asm/desc.h> 38#include <asm/desc.h>
39#include <asm/timer.h> 39#include <asm/timer.h>
40#include <asm/i8259.h> 40#include <asm/i8259.h>
41#include <asm/nmi.h>
41 42
42#include <mach_apic.h> 43#include <mach_apic.h>
43 44
@@ -50,6 +51,7 @@ atomic_t irq_mis_count;
50static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 51static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
51 52
52static DEFINE_SPINLOCK(ioapic_lock); 53static DEFINE_SPINLOCK(ioapic_lock);
54static DEFINE_SPINLOCK(vector_lock);
53 55
54int timer_over_8254 __initdata = 1; 56int timer_over_8254 __initdata = 1;
55 57
@@ -1161,10 +1163,17 @@ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
1161int assign_irq_vector(int irq) 1163int assign_irq_vector(int irq)
1162{ 1164{
1163 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0; 1165 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
1166 unsigned long flags;
1167 int vector;
1168
1169 BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
1164 1170
1165 BUG_ON(irq >= NR_IRQ_VECTORS); 1171 spin_lock_irqsave(&vector_lock, flags);
1166 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) 1172
1173 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
1174 spin_unlock_irqrestore(&vector_lock, flags);
1167 return IO_APIC_VECTOR(irq); 1175 return IO_APIC_VECTOR(irq);
1176 }
1168next: 1177next:
1169 current_vector += 8; 1178 current_vector += 8;
1170 if (current_vector == SYSCALL_VECTOR) 1179 if (current_vector == SYSCALL_VECTOR)
@@ -1172,16 +1181,21 @@ next:
1172 1181
1173 if (current_vector >= FIRST_SYSTEM_VECTOR) { 1182 if (current_vector >= FIRST_SYSTEM_VECTOR) {
1174 offset++; 1183 offset++;
1175 if (!(offset%8)) 1184 if (!(offset%8)) {
1185 spin_unlock_irqrestore(&vector_lock, flags);
1176 return -ENOSPC; 1186 return -ENOSPC;
1187 }
1177 current_vector = FIRST_DEVICE_VECTOR + offset; 1188 current_vector = FIRST_DEVICE_VECTOR + offset;
1178 } 1189 }
1179 1190
1180 vector_irq[current_vector] = irq; 1191 vector = current_vector;
1192 vector_irq[vector] = irq;
1181 if (irq != AUTO_ASSIGN) 1193 if (irq != AUTO_ASSIGN)
1182 IO_APIC_VECTOR(irq) = current_vector; 1194 IO_APIC_VECTOR(irq) = vector;
1183 1195
1184 return current_vector; 1196 spin_unlock_irqrestore(&vector_lock, flags);
1197
1198 return vector;
1185} 1199}
1186 1200
1187static struct hw_interrupt_type ioapic_level_type; 1201static struct hw_interrupt_type ioapic_level_type;
@@ -1193,21 +1207,14 @@ static struct hw_interrupt_type ioapic_edge_type;
1193 1207
1194static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger) 1208static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
1195{ 1209{
1196 if (use_pci_vector() && !platform_legacy_irq(irq)) { 1210 unsigned idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
1197 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1211
1198 trigger == IOAPIC_LEVEL) 1212 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1199 irq_desc[vector].handler = &ioapic_level_type; 1213 trigger == IOAPIC_LEVEL)
1200 else 1214 irq_desc[idx].handler = &ioapic_level_type;
1201 irq_desc[vector].handler = &ioapic_edge_type; 1215 else
1202 set_intr_gate(vector, interrupt[vector]); 1216 irq_desc[idx].handler = &ioapic_edge_type;
1203 } else { 1217 set_intr_gate(vector, interrupt[idx]);
1204 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1205 trigger == IOAPIC_LEVEL)
1206 irq_desc[irq].handler = &ioapic_level_type;
1207 else
1208 irq_desc[irq].handler = &ioapic_edge_type;
1209 set_intr_gate(vector, interrupt[irq]);
1210 }
1211} 1218}
1212 1219
1213static void __init setup_IO_APIC_irqs(void) 1220static void __init setup_IO_APIC_irqs(void)
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 49ce4c31b713..c703bc7b0880 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -53,8 +53,8 @@ static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
53 */ 53 */
54fastcall unsigned int do_IRQ(struct pt_regs *regs) 54fastcall unsigned int do_IRQ(struct pt_regs *regs)
55{ 55{
56 /* high bits used in ret_from_ code */ 56 /* high bit used in ret_from_ code */
57 int irq = regs->orig_eax & 0xff; 57 int irq = ~regs->orig_eax;
58#ifdef CONFIG_4KSTACKS 58#ifdef CONFIG_4KSTACKS
59 union irq_ctx *curctx, *irqctx; 59 union irq_ctx *curctx, *irqctx;
60 u32 *isp; 60 u32 *isp;
@@ -100,8 +100,8 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
100 * softirq checks work in the hardirq context. 100 * softirq checks work in the hardirq context.
101 */ 101 */
102 irqctx->tinfo.preempt_count = 102 irqctx->tinfo.preempt_count =
103 irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK | 103 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
104 curctx->tinfo.preempt_count & SOFTIRQ_MASK; 104 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
105 105
106 asm volatile( 106 asm volatile(
107 " xchgl %%ebx,%%esp \n" 107 " xchgl %%ebx,%%esp \n"
@@ -227,7 +227,7 @@ int show_interrupts(struct seq_file *p, void *v)
227 if (i == 0) { 227 if (i == 0) {
228 seq_printf(p, " "); 228 seq_printf(p, " ");
229 for_each_online_cpu(j) 229 for_each_online_cpu(j)
230 seq_printf(p, "CPU%d ",j); 230 seq_printf(p, "CPU%-8d",j);
231 seq_putc(p, '\n'); 231 seq_putc(p, '\n');
232 } 232 }
233 233
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 395a9a6dff88..727e419ad78a 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -57,34 +57,85 @@ static __always_inline void set_jmp_op(void *from, void *to)
57/* 57/*
58 * returns non-zero if opcodes can be boosted. 58 * returns non-zero if opcodes can be boosted.
59 */ 59 */
60static __always_inline int can_boost(kprobe_opcode_t opcode) 60static __always_inline int can_boost(kprobe_opcode_t *opcodes)
61{ 61{
62 switch (opcode & 0xf0 ) { 62#define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \
63 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
64 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
65 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
66 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
67 << (row % 32))
68 /*
69 * Undefined/reserved opcodes, conditional jump, Opcode Extension
70 * Groups, and some special opcodes can not be boost.
71 */
72 static const unsigned long twobyte_is_boostable[256 / 32] = {
73 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
74 /* ------------------------------- */
75 W(0x00, 0,0,1,1,0,0,1,0,1,1,0,0,0,0,0,0)| /* 00 */
76 W(0x10, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 10 */
77 W(0x20, 1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0)| /* 20 */
78 W(0x30, 0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 30 */
79 W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 40 */
80 W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 50 */
81 W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1)| /* 60 */
82 W(0x70, 0,0,0,0,1,1,1,1,0,0,0,0,0,0,1,1), /* 70 */
83 W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 80 */
84 W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1), /* 90 */
85 W(0xa0, 1,1,0,1,1,1,0,0,1,1,0,1,1,1,0,1)| /* a0 */
86 W(0xb0, 1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1), /* b0 */
87 W(0xc0, 1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1)| /* c0 */
88 W(0xd0, 0,1,1,1,0,1,0,0,1,1,0,1,1,1,0,1), /* d0 */
89 W(0xe0, 0,1,1,0,0,1,0,0,1,1,0,1,1,1,0,1)| /* e0 */
90 W(0xf0, 0,1,1,1,0,1,0,0,1,1,1,0,1,1,1,0) /* f0 */
91 /* ------------------------------- */
92 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
93 };
94#undef W
95 kprobe_opcode_t opcode;
96 kprobe_opcode_t *orig_opcodes = opcodes;
97retry:
98 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
99 return 0;
100 opcode = *(opcodes++);
101
102 /* 2nd-byte opcode */
103 if (opcode == 0x0f) {
104 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
105 return 0;
106 return test_bit(*opcodes, twobyte_is_boostable);
107 }
108
109 switch (opcode & 0xf0) {
110 case 0x60:
111 if (0x63 < opcode && opcode < 0x67)
112 goto retry; /* prefixes */
113 /* can't boost Address-size override and bound */
114 return (opcode != 0x62 && opcode != 0x67);
63 case 0x70: 115 case 0x70:
64 return 0; /* can't boost conditional jump */ 116 return 0; /* can't boost conditional jump */
65 case 0x90:
66 /* can't boost call and pushf */
67 return opcode != 0x9a && opcode != 0x9c;
68 case 0xc0: 117 case 0xc0:
69 /* can't boost undefined opcodes and soft-interruptions */ 118 /* can't boost software-interruptions */
70 return (0xc1 < opcode && opcode < 0xc6) || 119 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
71 (0xc7 < opcode && opcode < 0xcc) || opcode == 0xcf;
72 case 0xd0: 120 case 0xd0:
73 /* can boost AA* and XLAT */ 121 /* can boost AA* and XLAT */
74 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); 122 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
75 case 0xe0: 123 case 0xe0:
76 /* can boost in/out and (may be) jmps */ 124 /* can boost in/out and absolute jmps */
77 return (0xe3 < opcode && opcode != 0xe8); 125 return ((opcode & 0x04) || opcode == 0xea);
78 case 0xf0: 126 case 0xf0:
127 if ((opcode & 0x0c) == 0 && opcode != 0xf1)
128 goto retry; /* lock/rep(ne) prefix */
79 /* clear and set flags can be boost */ 129 /* clear and set flags can be boost */
80 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); 130 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
81 default: 131 default:
82 /* currently, can't boost 2 bytes opcodes */ 132 if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
83 return opcode != 0x0f; 133 goto retry; /* prefixes */
134 /* can't boost CS override and call */
135 return (opcode != 0x2e && opcode != 0x9a);
84 } 136 }
85} 137}
86 138
87
88/* 139/*
89 * returns non-zero if opcode modifies the interrupt flag. 140 * returns non-zero if opcode modifies the interrupt flag.
90 */ 141 */
@@ -109,7 +160,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
109 160
110 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 161 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
111 p->opcode = *p->addr; 162 p->opcode = *p->addr;
112 if (can_boost(p->opcode)) { 163 if (can_boost(p->addr)) {
113 p->ainsn.boostable = 0; 164 p->ainsn.boostable = 0;
114 } else { 165 } else {
115 p->ainsn.boostable = -1; 166 p->ainsn.boostable = -1;
@@ -208,7 +259,9 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
208 struct kprobe_ctlblk *kcb; 259 struct kprobe_ctlblk *kcb;
209#ifdef CONFIG_PREEMPT 260#ifdef CONFIG_PREEMPT
210 unsigned pre_preempt_count = preempt_count(); 261 unsigned pre_preempt_count = preempt_count();
211#endif /* CONFIG_PREEMPT */ 262#else
263 unsigned pre_preempt_count = 1;
264#endif
212 265
213 addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t)); 266 addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
214 267
@@ -285,22 +338,14 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
285 /* handler has already set things up, so skip ss setup */ 338 /* handler has already set things up, so skip ss setup */
286 return 1; 339 return 1;
287 340
288 if (p->ainsn.boostable == 1 && 341ss_probe:
289#ifdef CONFIG_PREEMPT 342 if (pre_preempt_count && p->ainsn.boostable == 1 && !p->post_handler){
290 !(pre_preempt_count) && /*
291 * This enables booster when the direct
292 * execution path aren't preempted.
293 */
294#endif /* CONFIG_PREEMPT */
295 !p->post_handler && !p->break_handler ) {
296 /* Boost up -- we can execute copied instructions directly */ 343 /* Boost up -- we can execute copied instructions directly */
297 reset_current_kprobe(); 344 reset_current_kprobe();
298 regs->eip = (unsigned long)p->ainsn.insn; 345 regs->eip = (unsigned long)p->ainsn.insn;
299 preempt_enable_no_resched(); 346 preempt_enable_no_resched();
300 return 1; 347 return 1;
301 } 348 }
302
303ss_probe:
304 prepare_singlestep(p, regs); 349 prepare_singlestep(p, regs);
305 kcb->kprobe_status = KPROBE_HIT_SS; 350 kcb->kprobe_status = KPROBE_HIT_SS;
306 return 1; 351 return 1;
diff --git a/arch/i386/kernel/machine_kexec.c b/arch/i386/kernel/machine_kexec.c
index f73d7374a2ba..511abe52a94e 100644
--- a/arch/i386/kernel/machine_kexec.c
+++ b/arch/i386/kernel/machine_kexec.c
@@ -133,9 +133,9 @@ typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)(
133 unsigned long start_address, 133 unsigned long start_address,
134 unsigned int has_pae) ATTRIB_NORET; 134 unsigned int has_pae) ATTRIB_NORET;
135 135
136const extern unsigned char relocate_new_kernel[]; 136extern const unsigned char relocate_new_kernel[];
137extern void relocate_new_kernel_end(void); 137extern void relocate_new_kernel_end(void);
138const extern unsigned int relocate_new_kernel_size; 138extern const unsigned int relocate_new_kernel_size;
139 139
140/* 140/*
141 * A architecture hook called to validate the 141 * A architecture hook called to validate the
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
index 7a328230e540..d022cb8fd725 100644
--- a/arch/i386/kernel/msr.c
+++ b/arch/i386/kernel/msr.c
@@ -266,7 +266,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb, unsigned long acti
266 return NOTIFY_OK; 266 return NOTIFY_OK;
267} 267}
268 268
269static struct notifier_block msr_class_cpu_notifier = 269static struct notifier_block __cpuinitdata msr_class_cpu_notifier =
270{ 270{
271 .notifier_call = msr_class_cpu_callback, 271 .notifier_call = msr_class_cpu_callback,
272}; 272};
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index d43b498ec745..a76e93146585 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -14,21 +14,17 @@
14 */ 14 */
15 15
16#include <linux/config.h> 16#include <linux/config.h>
17#include <linux/mm.h>
18#include <linux/delay.h> 17#include <linux/delay.h>
19#include <linux/bootmem.h>
20#include <linux/smp_lock.h>
21#include <linux/interrupt.h> 18#include <linux/interrupt.h>
22#include <linux/mc146818rtc.h>
23#include <linux/kernel_stat.h>
24#include <linux/module.h> 19#include <linux/module.h>
25#include <linux/nmi.h> 20#include <linux/nmi.h>
26#include <linux/sysdev.h> 21#include <linux/sysdev.h>
27#include <linux/sysctl.h> 22#include <linux/sysctl.h>
23#include <linux/percpu.h>
28 24
29#include <asm/smp.h> 25#include <asm/smp.h>
30#include <asm/div64.h>
31#include <asm/nmi.h> 26#include <asm/nmi.h>
27#include <asm/intel_arch_perfmon.h>
32 28
33#include "mach_traps.h" 29#include "mach_traps.h"
34 30
@@ -100,6 +96,9 @@ int nmi_active;
100 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ 96 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
101 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) 97 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
102 98
99#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
100#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
101
103#ifdef CONFIG_SMP 102#ifdef CONFIG_SMP
104/* The performance counters used by NMI_LOCAL_APIC don't trigger when 103/* The performance counters used by NMI_LOCAL_APIC don't trigger when
105 * the CPU is idle. To make sure the NMI watchdog really ticks on all 104 * the CPU is idle. To make sure the NMI watchdog really ticks on all
@@ -212,6 +211,8 @@ static int __init setup_nmi_watchdog(char *str)
212 211
213__setup("nmi_watchdog=", setup_nmi_watchdog); 212__setup("nmi_watchdog=", setup_nmi_watchdog);
214 213
214static void disable_intel_arch_watchdog(void);
215
215static void disable_lapic_nmi_watchdog(void) 216static void disable_lapic_nmi_watchdog(void)
216{ 217{
217 if (nmi_active <= 0) 218 if (nmi_active <= 0)
@@ -221,6 +222,10 @@ static void disable_lapic_nmi_watchdog(void)
221 wrmsr(MSR_K7_EVNTSEL0, 0, 0); 222 wrmsr(MSR_K7_EVNTSEL0, 0, 0);
222 break; 223 break;
223 case X86_VENDOR_INTEL: 224 case X86_VENDOR_INTEL:
225 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
226 disable_intel_arch_watchdog();
227 break;
228 }
224 switch (boot_cpu_data.x86) { 229 switch (boot_cpu_data.x86) {
225 case 6: 230 case 6:
226 if (boot_cpu_data.x86_model > 0xd) 231 if (boot_cpu_data.x86_model > 0xd)
@@ -449,6 +454,53 @@ static int setup_p4_watchdog(void)
449 return 1; 454 return 1;
450} 455}
451 456
457static void disable_intel_arch_watchdog(void)
458{
459 unsigned ebx;
460
461 /*
462 * Check whether the Architectural PerfMon supports
463 * Unhalted Core Cycles Event or not.
464 * NOTE: Corresponding bit = 0 in ebp indicates event present.
465 */
466 ebx = cpuid_ebx(10);
467 if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
468 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
469}
470
471static int setup_intel_arch_watchdog(void)
472{
473 unsigned int evntsel;
474 unsigned ebx;
475
476 /*
477 * Check whether the Architectural PerfMon supports
478 * Unhalted Core Cycles Event or not.
479 * NOTE: Corresponding bit = 0 in ebp indicates event present.
480 */
481 ebx = cpuid_ebx(10);
482 if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
483 return 0;
484
485 nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
486
487 clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
488 clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);
489
490 evntsel = ARCH_PERFMON_EVENTSEL_INT
491 | ARCH_PERFMON_EVENTSEL_OS
492 | ARCH_PERFMON_EVENTSEL_USR
493 | ARCH_PERFMON_NMI_EVENT_SEL
494 | ARCH_PERFMON_NMI_EVENT_UMASK;
495
496 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
497 write_watchdog_counter("INTEL_ARCH_PERFCTR0");
498 apic_write(APIC_LVTPC, APIC_DM_NMI);
499 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
500 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
501 return 1;
502}
503
452void setup_apic_nmi_watchdog (void) 504void setup_apic_nmi_watchdog (void)
453{ 505{
454 switch (boot_cpu_data.x86_vendor) { 506 switch (boot_cpu_data.x86_vendor) {
@@ -458,6 +510,11 @@ void setup_apic_nmi_watchdog (void)
458 setup_k7_watchdog(); 510 setup_k7_watchdog();
459 break; 511 break;
460 case X86_VENDOR_INTEL: 512 case X86_VENDOR_INTEL:
513 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
514 if (!setup_intel_arch_watchdog())
515 return;
516 break;
517 }
461 switch (boot_cpu_data.x86) { 518 switch (boot_cpu_data.x86) {
462 case 6: 519 case 6:
463 if (boot_cpu_data.x86_model > 0xd) 520 if (boot_cpu_data.x86_model > 0xd)
@@ -561,7 +618,8 @@ void nmi_watchdog_tick (struct pt_regs * regs)
561 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); 618 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
562 apic_write(APIC_LVTPC, APIC_DM_NMI); 619 apic_write(APIC_LVTPC, APIC_DM_NMI);
563 } 620 }
564 else if (nmi_perfctr_msr == MSR_P6_PERFCTR0) { 621 else if (nmi_perfctr_msr == MSR_P6_PERFCTR0 ||
622 nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
565 /* Only P6 based Pentium M need to re-unmask 623 /* Only P6 based Pentium M need to re-unmask
566 * the apic vector but it doesn't hurt 624 * the apic vector but it doesn't hurt
567 * other P6 variant */ 625 * other P6 variant */
diff --git a/arch/i386/kernel/numaq.c b/arch/i386/kernel/numaq.c
index 5f5b075f860a..0caf14652bad 100644
--- a/arch/i386/kernel/numaq.c
+++ b/arch/i386/kernel/numaq.c
@@ -79,10 +79,12 @@ int __init get_memcfg_numaq(void)
79 return 1; 79 return 1;
80} 80}
81 81
82static int __init numaq_dsc_disable(void) 82static int __init numaq_tsc_disable(void)
83{ 83{
84 printk(KERN_DEBUG "NUMAQ: disabling TSC\n"); 84 if (num_online_nodes() > 1) {
85 tsc_disable = 1; 85 printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
86 tsc_disable = 1;
87 }
86 return 0; 88 return 0;
87} 89}
88core_initcall(numaq_dsc_disable); 90arch_initcall(numaq_tsc_disable);
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 6259afea46d1..6946b06e2784 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -102,7 +102,7 @@ void default_idle(void)
102 local_irq_enable(); 102 local_irq_enable();
103 103
104 if (!hlt_counter && boot_cpu_data.hlt_works_ok) { 104 if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
105 clear_thread_flag(TIF_POLLING_NRFLAG); 105 current_thread_info()->status &= ~TS_POLLING;
106 smp_mb__after_clear_bit(); 106 smp_mb__after_clear_bit();
107 while (!need_resched()) { 107 while (!need_resched()) {
108 local_irq_disable(); 108 local_irq_disable();
@@ -111,7 +111,7 @@ void default_idle(void)
111 else 111 else
112 local_irq_enable(); 112 local_irq_enable();
113 } 113 }
114 set_thread_flag(TIF_POLLING_NRFLAG); 114 current_thread_info()->status |= TS_POLLING;
115 } else { 115 } else {
116 while (!need_resched()) 116 while (!need_resched())
117 cpu_relax(); 117 cpu_relax();
@@ -174,7 +174,7 @@ void cpu_idle(void)
174{ 174{
175 int cpu = smp_processor_id(); 175 int cpu = smp_processor_id();
176 176
177 set_thread_flag(TIF_POLLING_NRFLAG); 177 current_thread_info()->status |= TS_POLLING;
178 178
179 /* endless idle loop with no priority at all */ 179 /* endless idle loop with no priority at all */
180 while (1) { 180 while (1) {
@@ -312,7 +312,7 @@ void show_regs(struct pt_regs * regs)
312 cr3 = read_cr3(); 312 cr3 = read_cr3();
313 cr4 = read_cr4_safe(); 313 cr4 = read_cr4_safe();
314 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); 314 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
315 show_trace(NULL, &regs->esp); 315 show_trace(NULL, regs, &regs->esp);
316} 316}
317 317
318/* 318/*
diff --git a/arch/i386/kernel/scx200.c b/arch/i386/kernel/scx200.c
index 321f5fd26e75..9bf590cefc7d 100644
--- a/arch/i386/kernel/scx200.c
+++ b/arch/i386/kernel/scx200.c
@@ -9,6 +9,7 @@
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/mutex.h>
12#include <linux/pci.h> 13#include <linux/pci.h>
13 14
14#include <linux/scx200.h> 15#include <linux/scx200.h>
@@ -45,11 +46,19 @@ static struct pci_driver scx200_pci_driver = {
45 .probe = scx200_probe, 46 .probe = scx200_probe,
46}; 47};
47 48
48static DEFINE_SPINLOCK(scx200_gpio_config_lock); 49static DEFINE_MUTEX(scx200_gpio_config_lock);
49 50
50static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 51static void __devinit scx200_init_shadow(void)
51{ 52{
52 int bank; 53 int bank;
54
55 /* read the current values driven on the GPIO signals */
56 for (bank = 0; bank < 2; ++bank)
57 scx200_gpio_shadow[bank] = inl(scx200_gpio_base + 0x10 * bank);
58}
59
60static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
61{
53 unsigned base; 62 unsigned base;
54 63
55 if (pdev->device == PCI_DEVICE_ID_NS_SCx200_BRIDGE || 64 if (pdev->device == PCI_DEVICE_ID_NS_SCx200_BRIDGE ||
@@ -63,10 +72,7 @@ static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_
63 } 72 }
64 73
65 scx200_gpio_base = base; 74 scx200_gpio_base = base;
66 75 scx200_init_shadow();
67 /* read the current values driven on the GPIO signals */
68 for (bank = 0; bank < 2; ++bank)
69 scx200_gpio_shadow[bank] = inl(scx200_gpio_base + 0x10 * bank);
70 76
71 } else { 77 } else {
72 /* find the base of the Configuration Block */ 78 /* find the base of the Configuration Block */
@@ -87,12 +93,11 @@ static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_
87 return 0; 93 return 0;
88} 94}
89 95
90u32 scx200_gpio_configure(int index, u32 mask, u32 bits) 96u32 scx200_gpio_configure(unsigned index, u32 mask, u32 bits)
91{ 97{
92 u32 config, new_config; 98 u32 config, new_config;
93 unsigned long flags;
94 99
95 spin_lock_irqsave(&scx200_gpio_config_lock, flags); 100 mutex_lock(&scx200_gpio_config_lock);
96 101
97 outl(index, scx200_gpio_base + 0x20); 102 outl(index, scx200_gpio_base + 0x20);
98 config = inl(scx200_gpio_base + 0x24); 103 config = inl(scx200_gpio_base + 0x24);
@@ -100,45 +105,11 @@ u32 scx200_gpio_configure(int index, u32 mask, u32 bits)
100 new_config = (config & mask) | bits; 105 new_config = (config & mask) | bits;
101 outl(new_config, scx200_gpio_base + 0x24); 106 outl(new_config, scx200_gpio_base + 0x24);
102 107
103 spin_unlock_irqrestore(&scx200_gpio_config_lock, flags); 108 mutex_unlock(&scx200_gpio_config_lock);
104 109
105 return config; 110 return config;
106} 111}
107 112
108#if 0
109void scx200_gpio_dump(unsigned index)
110{
111 u32 config = scx200_gpio_configure(index, ~0, 0);
112 printk(KERN_DEBUG "GPIO%02u: 0x%08lx", index, (unsigned long)config);
113
114 if (config & 1)
115 printk(" OE"); /* output enabled */
116 else
117 printk(" TS"); /* tristate */
118 if (config & 2)
119 printk(" PP"); /* push pull */
120 else
121 printk(" OD"); /* open drain */
122 if (config & 4)
123 printk(" PUE"); /* pull up enabled */
124 else
125 printk(" PUD"); /* pull up disabled */
126 if (config & 8)
127 printk(" LOCKED"); /* locked */
128 if (config & 16)
129 printk(" LEVEL"); /* level input */
130 else
131 printk(" EDGE"); /* edge input */
132 if (config & 32)
133 printk(" HI"); /* trigger on rising edge */
134 else
135 printk(" LO"); /* trigger on falling edge */
136 if (config & 64)
137 printk(" DEBOUNCE"); /* debounce */
138 printk("\n");
139}
140#endif /* 0 */
141
142static int __init scx200_init(void) 113static int __init scx200_init(void)
143{ 114{
144 printk(KERN_INFO NAME ": NatSemi SCx200 Driver\n"); 115 printk(KERN_INFO NAME ": NatSemi SCx200 Driver\n");
@@ -159,10 +130,3 @@ EXPORT_SYMBOL(scx200_gpio_base);
159EXPORT_SYMBOL(scx200_gpio_shadow); 130EXPORT_SYMBOL(scx200_gpio_shadow);
160EXPORT_SYMBOL(scx200_gpio_configure); 131EXPORT_SYMBOL(scx200_gpio_configure);
161EXPORT_SYMBOL(scx200_cb_base); 132EXPORT_SYMBOL(scx200_cb_base);
162
163/*
164 Local variables:
165 compile-command: "make -k -C ../../.. SUBDIRS=arch/i386/kernel modules"
166 c-basic-offset: 8
167 End:
168*/
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 6bef9273733e..4a65040cc624 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -1575,6 +1575,7 @@ void __init setup_arch(char **cmdline_p)
1575 conswitchp = &dummy_con; 1575 conswitchp = &dummy_con;
1576#endif 1576#endif
1577#endif 1577#endif
1578 tsc_init();
1578} 1579}
1579 1580
1580static __init int add_pcspkr(void) 1581static __init int add_pcspkr(void)
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index 5c352c3a9e7f..43002cfb40c4 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -351,7 +351,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
351 goto give_sigsegv; 351 goto give_sigsegv;
352 } 352 }
353 353
354 restorer = &__kernel_sigreturn; 354 restorer = (void *)VDSO_SYM(&__kernel_sigreturn);
355 if (ka->sa.sa_flags & SA_RESTORER) 355 if (ka->sa.sa_flags & SA_RESTORER)
356 restorer = ka->sa.sa_restorer; 356 restorer = ka->sa.sa_restorer;
357 357
@@ -447,7 +447,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
447 goto give_sigsegv; 447 goto give_sigsegv;
448 448
449 /* Set up to return from userspace. */ 449 /* Set up to return from userspace. */
450 restorer = &__kernel_rt_sigreturn; 450 restorer = (void *)VDSO_SYM(&__kernel_rt_sigreturn);
451 if (ka->sa.sa_flags & SA_RESTORER) 451 if (ka->sa.sa_flags & SA_RESTORER)
452 restorer = ka->sa.sa_restorer; 452 restorer = ka->sa.sa_restorer;
453 err |= __put_user(restorer, &frame->pretcode); 453 err |= __put_user(restorer, &frame->pretcode);
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index d134e9643a58..c10789d7a9d3 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -114,7 +114,17 @@ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_m
114 114
115static inline int __prepare_ICR (unsigned int shortcut, int vector) 115static inline int __prepare_ICR (unsigned int shortcut, int vector)
116{ 116{
117 return APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL; 117 unsigned int icr = shortcut | APIC_DEST_LOGICAL;
118
119 switch (vector) {
120 default:
121 icr |= APIC_DM_FIXED | vector;
122 break;
123 case NMI_VECTOR:
124 icr |= APIC_DM_NMI;
125 break;
126 }
127 return icr;
118} 128}
119 129
120static inline int __prepare_ICR2 (unsigned int mask) 130static inline int __prepare_ICR2 (unsigned int mask)
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index bd0ca5c9f053..89e7315e539c 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -52,6 +52,7 @@
52#include <asm/tlbflush.h> 52#include <asm/tlbflush.h>
53#include <asm/desc.h> 53#include <asm/desc.h>
54#include <asm/arch_hooks.h> 54#include <asm/arch_hooks.h>
55#include <asm/nmi.h>
55 56
56#include <mach_apic.h> 57#include <mach_apic.h>
57#include <mach_wakecpu.h> 58#include <mach_wakecpu.h>
@@ -66,12 +67,6 @@ int smp_num_siblings = 1;
66EXPORT_SYMBOL(smp_num_siblings); 67EXPORT_SYMBOL(smp_num_siblings);
67#endif 68#endif
68 69
69/* Package ID of each logical CPU */
70int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
71
72/* Core ID of each logical CPU */
73int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
74
75/* Last level cache ID of each logical CPU */ 70/* Last level cache ID of each logical CPU */
76int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; 71int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
77 72
@@ -453,10 +448,12 @@ cpumask_t cpu_coregroup_map(int cpu)
453 struct cpuinfo_x86 *c = cpu_data + cpu; 448 struct cpuinfo_x86 *c = cpu_data + cpu;
454 /* 449 /*
455 * For perf, we return last level cache shared map. 450 * For perf, we return last level cache shared map.
456 * TBD: when power saving sched policy is added, we will return 451 * And for power savings, we return cpu_core_map
457 * cpu_core_map when power saving policy is enabled
458 */ 452 */
459 return c->llc_shared_map; 453 if (sched_mc_power_savings || sched_smt_power_savings)
454 return cpu_core_map[cpu];
455 else
456 return c->llc_shared_map;
460} 457}
461 458
462/* representing cpus for which sibling maps can be computed */ 459/* representing cpus for which sibling maps can be computed */
@@ -472,8 +469,8 @@ set_cpu_sibling_map(int cpu)
472 469
473 if (smp_num_siblings > 1) { 470 if (smp_num_siblings > 1) {
474 for_each_cpu_mask(i, cpu_sibling_setup_map) { 471 for_each_cpu_mask(i, cpu_sibling_setup_map) {
475 if (phys_proc_id[cpu] == phys_proc_id[i] && 472 if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
476 cpu_core_id[cpu] == cpu_core_id[i]) { 473 c[cpu].cpu_core_id == c[i].cpu_core_id) {
477 cpu_set(i, cpu_sibling_map[cpu]); 474 cpu_set(i, cpu_sibling_map[cpu]);
478 cpu_set(cpu, cpu_sibling_map[i]); 475 cpu_set(cpu, cpu_sibling_map[i]);
479 cpu_set(i, cpu_core_map[cpu]); 476 cpu_set(i, cpu_core_map[cpu]);
@@ -500,7 +497,7 @@ set_cpu_sibling_map(int cpu)
500 cpu_set(i, c[cpu].llc_shared_map); 497 cpu_set(i, c[cpu].llc_shared_map);
501 cpu_set(cpu, c[i].llc_shared_map); 498 cpu_set(cpu, c[i].llc_shared_map);
502 } 499 }
503 if (phys_proc_id[cpu] == phys_proc_id[i]) { 500 if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
504 cpu_set(i, cpu_core_map[cpu]); 501 cpu_set(i, cpu_core_map[cpu]);
505 cpu_set(cpu, cpu_core_map[i]); 502 cpu_set(cpu, cpu_core_map[i]);
506 /* 503 /*
@@ -1055,6 +1052,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
1055 struct warm_boot_cpu_info info; 1052 struct warm_boot_cpu_info info;
1056 struct work_struct task; 1053 struct work_struct task;
1057 int apicid, ret; 1054 int apicid, ret;
1055 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
1058 1056
1059 apicid = x86_cpu_to_apicid[cpu]; 1057 apicid = x86_cpu_to_apicid[cpu];
1060 if (apicid == BAD_APICID) { 1058 if (apicid == BAD_APICID) {
@@ -1062,6 +1060,18 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
1062 goto exit; 1060 goto exit;
1063 } 1061 }
1064 1062
1063 /*
1064 * the CPU isn't initialized at boot time, allocate gdt table here.
1065 * cpu_init will initialize it
1066 */
1067 if (!cpu_gdt_descr->address) {
1068 cpu_gdt_descr->address = get_zeroed_page(GFP_KERNEL);
1069 if (!cpu_gdt_descr->address)
1070 printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
1071 ret = -ENOMEM;
1072 goto exit;
1073 }
1074
1065 info.complete = &done; 1075 info.complete = &done;
1066 info.apicid = apicid; 1076 info.apicid = apicid;
1067 info.cpu = cpu; 1077 info.cpu = cpu;
@@ -1339,8 +1349,8 @@ remove_siblinginfo(int cpu)
1339 cpu_clear(cpu, cpu_sibling_map[sibling]); 1349 cpu_clear(cpu, cpu_sibling_map[sibling]);
1340 cpus_clear(cpu_sibling_map[cpu]); 1350 cpus_clear(cpu_sibling_map[cpu]);
1341 cpus_clear(cpu_core_map[cpu]); 1351 cpus_clear(cpu_core_map[cpu]);
1342 phys_proc_id[cpu] = BAD_APICID; 1352 c[cpu].phys_proc_id = 0;
1343 cpu_core_id[cpu] = BAD_APICID; 1353 c[cpu].cpu_core_id = 0;
1344 cpu_clear(cpu, cpu_sibling_setup_map); 1354 cpu_clear(cpu, cpu_sibling_setup_map);
1345} 1355}
1346 1356
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
index 0bada1870bdf..c60419dee018 100644
--- a/arch/i386/kernel/sysenter.c
+++ b/arch/i386/kernel/sysenter.c
@@ -2,6 +2,8 @@
2 * linux/arch/i386/kernel/sysenter.c 2 * linux/arch/i386/kernel/sysenter.c
3 * 3 *
4 * (C) Copyright 2002 Linus Torvalds 4 * (C) Copyright 2002 Linus Torvalds
5 * Portions based on the vdso-randomization code from exec-shield:
6 * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
5 * 7 *
6 * This file contains the needed initializations to support sysenter. 8 * This file contains the needed initializations to support sysenter.
7 */ 9 */
@@ -13,12 +15,31 @@
13#include <linux/gfp.h> 15#include <linux/gfp.h>
14#include <linux/string.h> 16#include <linux/string.h>
15#include <linux/elf.h> 17#include <linux/elf.h>
18#include <linux/mm.h>
19#include <linux/module.h>
16 20
17#include <asm/cpufeature.h> 21#include <asm/cpufeature.h>
18#include <asm/msr.h> 22#include <asm/msr.h>
19#include <asm/pgtable.h> 23#include <asm/pgtable.h>
20#include <asm/unistd.h> 24#include <asm/unistd.h>
21 25
26/*
27 * Should the kernel map a VDSO page into processes and pass its
28 * address down to glibc upon exec()?
29 */
30unsigned int __read_mostly vdso_enabled = 1;
31
32EXPORT_SYMBOL_GPL(vdso_enabled);
33
34static int __init vdso_setup(char *s)
35{
36 vdso_enabled = simple_strtoul(s, NULL, 0);
37
38 return 1;
39}
40
41__setup("vdso=", vdso_setup);
42
22extern asmlinkage void sysenter_entry(void); 43extern asmlinkage void sysenter_entry(void);
23 44
24void enable_sep_cpu(void) 45void enable_sep_cpu(void)
@@ -45,23 +66,122 @@ void enable_sep_cpu(void)
45 */ 66 */
46extern const char vsyscall_int80_start, vsyscall_int80_end; 67extern const char vsyscall_int80_start, vsyscall_int80_end;
47extern const char vsyscall_sysenter_start, vsyscall_sysenter_end; 68extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
69static void *syscall_page;
48 70
49int __init sysenter_setup(void) 71int __init sysenter_setup(void)
50{ 72{
51 void *page = (void *)get_zeroed_page(GFP_ATOMIC); 73 syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
52 74
53 __set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY_EXEC); 75#ifdef CONFIG_COMPAT_VDSO
76 __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY);
77 printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
78#else
79 /*
80 * In the non-compat case the ELF coredumping code needs the fixmap:
81 */
82 __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_KERNEL_RO);
83#endif
54 84
55 if (!boot_cpu_has(X86_FEATURE_SEP)) { 85 if (!boot_cpu_has(X86_FEATURE_SEP)) {
56 memcpy(page, 86 memcpy(syscall_page,
57 &vsyscall_int80_start, 87 &vsyscall_int80_start,
58 &vsyscall_int80_end - &vsyscall_int80_start); 88 &vsyscall_int80_end - &vsyscall_int80_start);
59 return 0; 89 return 0;
60 } 90 }
61 91
62 memcpy(page, 92 memcpy(syscall_page,
63 &vsyscall_sysenter_start, 93 &vsyscall_sysenter_start,
64 &vsyscall_sysenter_end - &vsyscall_sysenter_start); 94 &vsyscall_sysenter_end - &vsyscall_sysenter_start);
65 95
66 return 0; 96 return 0;
67} 97}
98
99static struct page *syscall_nopage(struct vm_area_struct *vma,
100 unsigned long adr, int *type)
101{
102 struct page *p = virt_to_page(adr - vma->vm_start + syscall_page);
103 get_page(p);
104 return p;
105}
106
107/* Prevent VMA merging */
108static void syscall_vma_close(struct vm_area_struct *vma)
109{
110}
111
112static struct vm_operations_struct syscall_vm_ops = {
113 .close = syscall_vma_close,
114 .nopage = syscall_nopage,
115};
116
117/* Defined in vsyscall-sysenter.S */
118extern void SYSENTER_RETURN;
119
120/* Setup a VMA at program startup for the vsyscall page */
121int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
122{
123 struct vm_area_struct *vma;
124 struct mm_struct *mm = current->mm;
125 unsigned long addr;
126 int ret;
127
128 down_write(&mm->mmap_sem);
129 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
130 if (IS_ERR_VALUE(addr)) {
131 ret = addr;
132 goto up_fail;
133 }
134
135 vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
136 if (!vma) {
137 ret = -ENOMEM;
138 goto up_fail;
139 }
140
141 vma->vm_start = addr;
142 vma->vm_end = addr + PAGE_SIZE;
143 /* MAYWRITE to allow gdb to COW and set breakpoints */
144 vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
145 vma->vm_flags |= mm->def_flags;
146 vma->vm_page_prot = protection_map[vma->vm_flags & 7];
147 vma->vm_ops = &syscall_vm_ops;
148 vma->vm_mm = mm;
149
150 ret = insert_vm_struct(mm, vma);
151 if (ret)
152 goto free_vma;
153
154 current->mm->context.vdso = (void *)addr;
155 current_thread_info()->sysenter_return =
156 (void *)VDSO_SYM(&SYSENTER_RETURN);
157 mm->total_vm++;
158up_fail:
159 up_write(&mm->mmap_sem);
160 return ret;
161
162free_vma:
163 kmem_cache_free(vm_area_cachep, vma);
164 return ret;
165}
166
167const char *arch_vma_name(struct vm_area_struct *vma)
168{
169 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
170 return "[vdso]";
171 return NULL;
172}
173
174struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
175{
176 return NULL;
177}
178
179int in_gate_area(struct task_struct *task, unsigned long addr)
180{
181 return 0;
182}
183
184int in_gate_area_no_task(unsigned long addr)
185{
186 return 0;
187}
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 9d3074759856..5f43d0410122 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -82,13 +82,6 @@ extern unsigned long wall_jiffies;
82DEFINE_SPINLOCK(rtc_lock); 82DEFINE_SPINLOCK(rtc_lock);
83EXPORT_SYMBOL(rtc_lock); 83EXPORT_SYMBOL(rtc_lock);
84 84
85#include <asm/i8253.h>
86
87DEFINE_SPINLOCK(i8253_lock);
88EXPORT_SYMBOL(i8253_lock);
89
90struct timer_opts *cur_timer __read_mostly = &timer_none;
91
92/* 85/*
93 * This is a special lock that is owned by the CPU and holds the index 86 * This is a special lock that is owned by the CPU and holds the index
94 * register we are working with. It is required for NMI access to the 87 * register we are working with. It is required for NMI access to the
@@ -118,99 +111,19 @@ void rtc_cmos_write(unsigned char val, unsigned char addr)
118} 111}
119EXPORT_SYMBOL(rtc_cmos_write); 112EXPORT_SYMBOL(rtc_cmos_write);
120 113
121/*
122 * This version of gettimeofday has microsecond resolution
123 * and better than microsecond precision on fast x86 machines with TSC.
124 */
125void do_gettimeofday(struct timeval *tv)
126{
127 unsigned long seq;
128 unsigned long usec, sec;
129 unsigned long max_ntp_tick;
130
131 do {
132 unsigned long lost;
133
134 seq = read_seqbegin(&xtime_lock);
135
136 usec = cur_timer->get_offset();
137 lost = jiffies - wall_jiffies;
138
139 /*
140 * If time_adjust is negative then NTP is slowing the clock
141 * so make sure not to go into next possible interval.
142 * Better to lose some accuracy than have time go backwards..
143 */
144 if (unlikely(time_adjust < 0)) {
145 max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
146 usec = min(usec, max_ntp_tick);
147
148 if (lost)
149 usec += lost * max_ntp_tick;
150 }
151 else if (unlikely(lost))
152 usec += lost * (USEC_PER_SEC / HZ);
153
154 sec = xtime.tv_sec;
155 usec += (xtime.tv_nsec / 1000);
156 } while (read_seqretry(&xtime_lock, seq));
157
158 while (usec >= 1000000) {
159 usec -= 1000000;
160 sec++;
161 }
162
163 tv->tv_sec = sec;
164 tv->tv_usec = usec;
165}
166
167EXPORT_SYMBOL(do_gettimeofday);
168
169int do_settimeofday(struct timespec *tv)
170{
171 time_t wtm_sec, sec = tv->tv_sec;
172 long wtm_nsec, nsec = tv->tv_nsec;
173
174 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
175 return -EINVAL;
176
177 write_seqlock_irq(&xtime_lock);
178 /*
179 * This is revolting. We need to set "xtime" correctly. However, the
180 * value in this location is the value at the most recent update of
181 * wall time. Discover what correction gettimeofday() would have
182 * made, and then undo it!
183 */
184 nsec -= cur_timer->get_offset() * NSEC_PER_USEC;
185 nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
186
187 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
188 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
189
190 set_normalized_timespec(&xtime, sec, nsec);
191 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
192
193 ntp_clear();
194 write_sequnlock_irq(&xtime_lock);
195 clock_was_set();
196 return 0;
197}
198
199EXPORT_SYMBOL(do_settimeofday);
200
201static int set_rtc_mmss(unsigned long nowtime) 114static int set_rtc_mmss(unsigned long nowtime)
202{ 115{
203 int retval; 116 int retval;
204 117 unsigned long flags;
205 WARN_ON(irqs_disabled());
206 118
207 /* gets recalled with irq locally disabled */ 119 /* gets recalled with irq locally disabled */
208 spin_lock_irq(&rtc_lock); 120 /* XXX - does irqsave resolve this? -johnstul */
121 spin_lock_irqsave(&rtc_lock, flags);
209 if (efi_enabled) 122 if (efi_enabled)
210 retval = efi_set_rtc_mmss(nowtime); 123 retval = efi_set_rtc_mmss(nowtime);
211 else 124 else
212 retval = mach_set_rtc_mmss(nowtime); 125 retval = mach_set_rtc_mmss(nowtime);
213 spin_unlock_irq(&rtc_lock); 126 spin_unlock_irqrestore(&rtc_lock, flags);
214 127
215 return retval; 128 return retval;
216} 129}
@@ -218,16 +131,6 @@ static int set_rtc_mmss(unsigned long nowtime)
218 131
219int timer_ack; 132int timer_ack;
220 133
221/* monotonic_clock(): returns # of nanoseconds passed since time_init()
222 * Note: This function is required to return accurate
223 * time even in the absence of multiple timer ticks.
224 */
225unsigned long long monotonic_clock(void)
226{
227 return cur_timer->monotonic_clock();
228}
229EXPORT_SYMBOL(monotonic_clock);
230
231#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER) 134#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
232unsigned long profile_pc(struct pt_regs *regs) 135unsigned long profile_pc(struct pt_regs *regs)
233{ 136{
@@ -242,11 +145,21 @@ EXPORT_SYMBOL(profile_pc);
242#endif 145#endif
243 146
244/* 147/*
245 * timer_interrupt() needs to keep up the real-time clock, 148 * This is the same as the above, except we _also_ save the current
246 * as well as call the "do_timer()" routine every clocktick 149 * Time Stamp Counter value at the time of the timer interrupt, so that
150 * we later on can estimate the time of day more exactly.
247 */ 151 */
248static inline void do_timer_interrupt(int irq, struct pt_regs *regs) 152irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
249{ 153{
154 /*
155 * Here we are in the timer irq handler. We just have irqs locally
156 * disabled but we don't know if the timer_bh is running on the other
157 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
158 * the irq version of write_lock because as just said we have irq
159 * locally disabled. -arca
160 */
161 write_seqlock(&xtime_lock);
162
250#ifdef CONFIG_X86_IO_APIC 163#ifdef CONFIG_X86_IO_APIC
251 if (timer_ack) { 164 if (timer_ack) {
252 /* 165 /*
@@ -279,27 +192,6 @@ static inline void do_timer_interrupt(int irq, struct pt_regs *regs)
279 irq = inb_p( 0x61 ); /* read the current state */ 192 irq = inb_p( 0x61 ); /* read the current state */
280 outb_p( irq|0x80, 0x61 ); /* reset the IRQ */ 193 outb_p( irq|0x80, 0x61 ); /* reset the IRQ */
281 } 194 }
282}
283
284/*
285 * This is the same as the above, except we _also_ save the current
286 * Time Stamp Counter value at the time of the timer interrupt, so that
287 * we later on can estimate the time of day more exactly.
288 */
289irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
290{
291 /*
292 * Here we are in the timer irq handler. We just have irqs locally
293 * disabled but we don't know if the timer_bh is running on the other
294 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
295 * the irq version of write_lock because as just said we have irq
296 * locally disabled. -arca
297 */
298 write_seqlock(&xtime_lock);
299
300 cur_timer->mark_offset();
301
302 do_timer_interrupt(irq, regs);
303 195
304 write_sequnlock(&xtime_lock); 196 write_sequnlock(&xtime_lock);
305 197
@@ -380,7 +272,6 @@ void notify_arch_cmos_timer(void)
380 272
381static long clock_cmos_diff, sleep_start; 273static long clock_cmos_diff, sleep_start;
382 274
383static struct timer_opts *last_timer;
384static int timer_suspend(struct sys_device *dev, pm_message_t state) 275static int timer_suspend(struct sys_device *dev, pm_message_t state)
385{ 276{
386 /* 277 /*
@@ -389,10 +280,6 @@ static int timer_suspend(struct sys_device *dev, pm_message_t state)
389 clock_cmos_diff = -get_cmos_time(); 280 clock_cmos_diff = -get_cmos_time();
390 clock_cmos_diff += get_seconds(); 281 clock_cmos_diff += get_seconds();
391 sleep_start = get_cmos_time(); 282 sleep_start = get_cmos_time();
392 last_timer = cur_timer;
393 cur_timer = &timer_none;
394 if (last_timer->suspend)
395 last_timer->suspend(state);
396 return 0; 283 return 0;
397} 284}
398 285
@@ -415,10 +302,6 @@ static int timer_resume(struct sys_device *dev)
415 jiffies_64 += sleep_length; 302 jiffies_64 += sleep_length;
416 wall_jiffies += sleep_length; 303 wall_jiffies += sleep_length;
417 write_sequnlock_irqrestore(&xtime_lock, flags); 304 write_sequnlock_irqrestore(&xtime_lock, flags);
418 if (last_timer->resume)
419 last_timer->resume();
420 cur_timer = last_timer;
421 last_timer = NULL;
422 touch_softlockup_watchdog(); 305 touch_softlockup_watchdog();
423 return 0; 306 return 0;
424} 307}
@@ -460,9 +343,6 @@ static void __init hpet_time_init(void)
460 printk("Using HPET for base-timer\n"); 343 printk("Using HPET for base-timer\n");
461 } 344 }
462 345
463 cur_timer = select_timer();
464 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
465
466 time_init_hook(); 346 time_init_hook();
467} 347}
468#endif 348#endif
@@ -484,8 +364,5 @@ void __init time_init(void)
484 set_normalized_timespec(&wall_to_monotonic, 364 set_normalized_timespec(&wall_to_monotonic,
485 -xtime.tv_sec, -xtime.tv_nsec); 365 -xtime.tv_sec, -xtime.tv_nsec);
486 366
487 cur_timer = select_timer();
488 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
489
490 time_init_hook(); 367 time_init_hook();
491} 368}
diff --git a/arch/i386/kernel/timers/Makefile b/arch/i386/kernel/timers/Makefile
deleted file mode 100644
index 8fa12be658dd..000000000000
--- a/arch/i386/kernel/timers/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
1#
2# Makefile for x86 timers
3#
4
5obj-y := timer.o timer_none.o timer_tsc.o timer_pit.o common.o
6
7obj-$(CONFIG_X86_CYCLONE_TIMER) += timer_cyclone.o
8obj-$(CONFIG_HPET_TIMER) += timer_hpet.o
9obj-$(CONFIG_X86_PM_TIMER) += timer_pm.o
diff --git a/arch/i386/kernel/timers/common.c b/arch/i386/kernel/timers/common.c
deleted file mode 100644
index 8163fe0cf1f0..000000000000
--- a/arch/i386/kernel/timers/common.c
+++ /dev/null
@@ -1,172 +0,0 @@
1/*
2 * Common functions used across the timers go here
3 */
4
5#include <linux/init.h>
6#include <linux/timex.h>
7#include <linux/errno.h>
8#include <linux/jiffies.h>
9#include <linux/module.h>
10
11#include <asm/io.h>
12#include <asm/timer.h>
13#include <asm/hpet.h>
14
15#include "mach_timer.h"
16
17/* ------ Calibrate the TSC -------
18 * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
19 * Too much 64-bit arithmetic here to do this cleanly in C, and for
20 * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
21 * output busy loop as low as possible. We avoid reading the CTC registers
22 * directly because of the awkward 8-bit access mechanism of the 82C54
23 * device.
24 */
25
26#define CALIBRATE_TIME (5 * 1000020/HZ)
27
28unsigned long calibrate_tsc(void)
29{
30 mach_prepare_counter();
31
32 {
33 unsigned long startlow, starthigh;
34 unsigned long endlow, endhigh;
35 unsigned long count;
36
37 rdtsc(startlow,starthigh);
38 mach_countup(&count);
39 rdtsc(endlow,endhigh);
40
41
42 /* Error: ECTCNEVERSET */
43 if (count <= 1)
44 goto bad_ctc;
45
46 /* 64-bit subtract - gcc just messes up with long longs */
47 __asm__("subl %2,%0\n\t"
48 "sbbl %3,%1"
49 :"=a" (endlow), "=d" (endhigh)
50 :"g" (startlow), "g" (starthigh),
51 "0" (endlow), "1" (endhigh));
52
53 /* Error: ECPUTOOFAST */
54 if (endhigh)
55 goto bad_ctc;
56
57 /* Error: ECPUTOOSLOW */
58 if (endlow <= CALIBRATE_TIME)
59 goto bad_ctc;
60
61 __asm__("divl %2"
62 :"=a" (endlow), "=d" (endhigh)
63 :"r" (endlow), "0" (0), "1" (CALIBRATE_TIME));
64
65 return endlow;
66 }
67
68 /*
69 * The CTC wasn't reliable: we got a hit on the very first read,
70 * or the CPU was so fast/slow that the quotient wouldn't fit in
71 * 32 bits..
72 */
73bad_ctc:
74 return 0;
75}
76
77#ifdef CONFIG_HPET_TIMER
78/* ------ Calibrate the TSC using HPET -------
79 * Return 2^32 * (1 / (TSC clocks per usec)) for getting the CPU freq.
80 * Second output is parameter 1 (when non NULL)
81 * Set 2^32 * (1 / (tsc per HPET clk)) for delay_hpet().
82 * calibrate_tsc() calibrates the processor TSC by comparing
83 * it to the HPET timer of known frequency.
84 * Too much 64-bit arithmetic here to do this cleanly in C
85 */
86#define CALIBRATE_CNT_HPET (5 * hpet_tick)
87#define CALIBRATE_TIME_HPET (5 * KERNEL_TICK_USEC)
88
89unsigned long __devinit calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr)
90{
91 unsigned long tsc_startlow, tsc_starthigh;
92 unsigned long tsc_endlow, tsc_endhigh;
93 unsigned long hpet_start, hpet_end;
94 unsigned long result, remain;
95
96 hpet_start = hpet_readl(HPET_COUNTER);
97 rdtsc(tsc_startlow, tsc_starthigh);
98 do {
99 hpet_end = hpet_readl(HPET_COUNTER);
100 } while ((hpet_end - hpet_start) < CALIBRATE_CNT_HPET);
101 rdtsc(tsc_endlow, tsc_endhigh);
102
103 /* 64-bit subtract - gcc just messes up with long longs */
104 __asm__("subl %2,%0\n\t"
105 "sbbl %3,%1"
106 :"=a" (tsc_endlow), "=d" (tsc_endhigh)
107 :"g" (tsc_startlow), "g" (tsc_starthigh),
108 "0" (tsc_endlow), "1" (tsc_endhigh));
109
110 /* Error: ECPUTOOFAST */
111 if (tsc_endhigh)
112 goto bad_calibration;
113
114 /* Error: ECPUTOOSLOW */
115 if (tsc_endlow <= CALIBRATE_TIME_HPET)
116 goto bad_calibration;
117
118 ASM_DIV64_REG(result, remain, tsc_endlow, 0, CALIBRATE_TIME_HPET);
119 if (remain > (tsc_endlow >> 1))
120 result++; /* rounding the result */
121
122 if (tsc_hpet_quotient_ptr) {
123 unsigned long tsc_hpet_quotient;
124
125 ASM_DIV64_REG(tsc_hpet_quotient, remain, tsc_endlow, 0,
126 CALIBRATE_CNT_HPET);
127 if (remain > (tsc_endlow >> 1))
128 tsc_hpet_quotient++; /* rounding the result */
129 *tsc_hpet_quotient_ptr = tsc_hpet_quotient;
130 }
131
132 return result;
133bad_calibration:
134 /*
135 * the CPU was so fast/slow that the quotient wouldn't fit in
136 * 32 bits..
137 */
138 return 0;
139}
140#endif
141
142
143unsigned long read_timer_tsc(void)
144{
145 unsigned long retval;
146 rdtscl(retval);
147 return retval;
148}
149
150
151/* calculate cpu_khz */
152void init_cpu_khz(void)
153{
154 if (cpu_has_tsc) {
155 unsigned long tsc_quotient = calibrate_tsc();
156 if (tsc_quotient) {
157 /* report CPU clock rate in Hz.
158 * The formula is (10^6 * 2^32) / (2^32 * 1 / (clocks/us)) =
159 * clock/second. Our precision is about 100 ppm.
160 */
161 { unsigned long eax=0, edx=1000;
162 __asm__("divl %2"
163 :"=a" (cpu_khz), "=d" (edx)
164 :"r" (tsc_quotient),
165 "0" (eax), "1" (edx));
166 printk("Detected %u.%03u MHz processor.\n",
167 cpu_khz / 1000, cpu_khz % 1000);
168 }
169 }
170 }
171}
172
diff --git a/arch/i386/kernel/timers/timer.c b/arch/i386/kernel/timers/timer.c
deleted file mode 100644
index 7e39ed8e33f8..000000000000
--- a/arch/i386/kernel/timers/timer.c
+++ /dev/null
@@ -1,75 +0,0 @@
1#include <linux/init.h>
2#include <linux/kernel.h>
3#include <linux/string.h>
4#include <asm/timer.h>
5
6#ifdef CONFIG_HPET_TIMER
7/*
8 * HPET memory read is slower than tsc reads, but is more dependable as it
9 * always runs at constant frequency and reduces complexity due to
10 * cpufreq. So, we prefer HPET timer to tsc based one. Also, we cannot use
11 * timer_pit when HPET is active. So, we default to timer_tsc.
12 */
13#endif
14/* list of timers, ordered by preference, NULL terminated */
15static struct init_timer_opts* __initdata timers[] = {
16#ifdef CONFIG_X86_CYCLONE_TIMER
17 &timer_cyclone_init,
18#endif
19#ifdef CONFIG_HPET_TIMER
20 &timer_hpet_init,
21#endif
22#ifdef CONFIG_X86_PM_TIMER
23 &timer_pmtmr_init,
24#endif
25 &timer_tsc_init,
26 &timer_pit_init,
27 NULL,
28};
29
30static char clock_override[10] __initdata;
31
32static int __init clock_setup(char* str)
33{
34 if (str)
35 strlcpy(clock_override, str, sizeof(clock_override));
36 return 1;
37}
38__setup("clock=", clock_setup);
39
40
41/* The chosen timesource has been found to be bad.
42 * Fall back to a known good timesource (the PIT)
43 */
44void clock_fallback(void)
45{
46 cur_timer = &timer_pit;
47}
48
49/* iterates through the list of timers, returning the first
50 * one that initializes successfully.
51 */
52struct timer_opts* __init select_timer(void)
53{
54 int i = 0;
55
56 /* find most preferred working timer */
57 while (timers[i]) {
58 if (timers[i]->init)
59 if (timers[i]->init(clock_override) == 0)
60 return timers[i]->opts;
61 ++i;
62 }
63
64 panic("select_timer: Cannot find a suitable timer\n");
65 return NULL;
66}
67
68int read_current_timer(unsigned long *timer_val)
69{
70 if (cur_timer->read_timer) {
71 *timer_val = cur_timer->read_timer();
72 return 0;
73 }
74 return -1;
75}
diff --git a/arch/i386/kernel/timers/timer_cyclone.c b/arch/i386/kernel/timers/timer_cyclone.c
deleted file mode 100644
index 13892a65c941..000000000000
--- a/arch/i386/kernel/timers/timer_cyclone.c
+++ /dev/null
@@ -1,259 +0,0 @@
1/* Cyclone-timer:
2 * This code implements timer_ops for the cyclone counter found
3 * on IBM x440, x360, and other Summit based systems.
4 *
5 * Copyright (C) 2002 IBM, John Stultz (johnstul@us.ibm.com)
6 */
7
8
9#include <linux/spinlock.h>
10#include <linux/init.h>
11#include <linux/timex.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/jiffies.h>
15
16#include <asm/timer.h>
17#include <asm/io.h>
18#include <asm/pgtable.h>
19#include <asm/fixmap.h>
20#include <asm/i8253.h>
21
22#include "io_ports.h"
23
24/* Number of usecs that the last interrupt was delayed */
25static int delay_at_last_interrupt;
26
27#define CYCLONE_CBAR_ADDR 0xFEB00CD0
28#define CYCLONE_PMCC_OFFSET 0x51A0
29#define CYCLONE_MPMC_OFFSET 0x51D0
30#define CYCLONE_MPCS_OFFSET 0x51A8
31#define CYCLONE_TIMER_FREQ 100000000
32#define CYCLONE_TIMER_MASK (((u64)1<<40)-1) /* 40 bit mask */
33int use_cyclone = 0;
34
35static u32* volatile cyclone_timer; /* Cyclone MPMC0 register */
36static u32 last_cyclone_low;
37static u32 last_cyclone_high;
38static unsigned long long monotonic_base;
39static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
40
41/* helper macro to atomically read both cyclone counter registers */
42#define read_cyclone_counter(low,high) \
43 do{ \
44 high = cyclone_timer[1]; low = cyclone_timer[0]; \
45 } while (high != cyclone_timer[1]);
46
47
48static void mark_offset_cyclone(void)
49{
50 unsigned long lost, delay;
51 unsigned long delta = last_cyclone_low;
52 int count;
53 unsigned long long this_offset, last_offset;
54
55 write_seqlock(&monotonic_lock);
56 last_offset = ((unsigned long long)last_cyclone_high<<32)|last_cyclone_low;
57
58 spin_lock(&i8253_lock);
59 read_cyclone_counter(last_cyclone_low,last_cyclone_high);
60
61 /* read values for delay_at_last_interrupt */
62 outb_p(0x00, 0x43); /* latch the count ASAP */
63
64 count = inb_p(0x40); /* read the latched count */
65 count |= inb(0x40) << 8;
66
67 /*
68 * VIA686a test code... reset the latch if count > max + 1
69 * from timer_pit.c - cjb
70 */
71 if (count > LATCH) {
72 outb_p(0x34, PIT_MODE);
73 outb_p(LATCH & 0xff, PIT_CH0);
74 outb(LATCH >> 8, PIT_CH0);
75 count = LATCH - 1;
76 }
77 spin_unlock(&i8253_lock);
78
79 /* lost tick compensation */
80 delta = last_cyclone_low - delta;
81 delta /= (CYCLONE_TIMER_FREQ/1000000);
82 delta += delay_at_last_interrupt;
83 lost = delta/(1000000/HZ);
84 delay = delta%(1000000/HZ);
85 if (lost >= 2)
86 jiffies_64 += lost-1;
87
88 /* update the monotonic base value */
89 this_offset = ((unsigned long long)last_cyclone_high<<32)|last_cyclone_low;
90 monotonic_base += (this_offset - last_offset) & CYCLONE_TIMER_MASK;
91 write_sequnlock(&monotonic_lock);
92
93 /* calculate delay_at_last_interrupt */
94 count = ((LATCH-1) - count) * TICK_SIZE;
95 delay_at_last_interrupt = (count + LATCH/2) / LATCH;
96
97
98 /* catch corner case where tick rollover occured
99 * between cyclone and pit reads (as noted when
100 * usec delta is > 90% # of usecs/tick)
101 */
102 if (lost && abs(delay - delay_at_last_interrupt) > (900000/HZ))
103 jiffies_64++;
104}
105
106static unsigned long get_offset_cyclone(void)
107{
108 u32 offset;
109
110 if(!cyclone_timer)
111 return delay_at_last_interrupt;
112
113 /* Read the cyclone timer */
114 offset = cyclone_timer[0];
115
116 /* .. relative to previous jiffy */
117 offset = offset - last_cyclone_low;
118
119 /* convert cyclone ticks to microseconds */
120 /* XXX slow, can we speed this up? */
121 offset = offset/(CYCLONE_TIMER_FREQ/1000000);
122
123 /* our adjusted time offset in microseconds */
124 return delay_at_last_interrupt + offset;
125}
126
127static unsigned long long monotonic_clock_cyclone(void)
128{
129 u32 now_low, now_high;
130 unsigned long long last_offset, this_offset, base;
131 unsigned long long ret;
132 unsigned seq;
133
134 /* atomically read monotonic base & last_offset */
135 do {
136 seq = read_seqbegin(&monotonic_lock);
137 last_offset = ((unsigned long long)last_cyclone_high<<32)|last_cyclone_low;
138 base = monotonic_base;
139 } while (read_seqretry(&monotonic_lock, seq));
140
141
142 /* Read the cyclone counter */
143 read_cyclone_counter(now_low,now_high);
144 this_offset = ((unsigned long long)now_high<<32)|now_low;
145
146 /* convert to nanoseconds */
147 ret = base + ((this_offset - last_offset)&CYCLONE_TIMER_MASK);
148 return ret * (1000000000 / CYCLONE_TIMER_FREQ);
149}
150
151static int __init init_cyclone(char* override)
152{
153 u32* reg;
154 u32 base; /* saved cyclone base address */
155 u32 pageaddr; /* page that contains cyclone_timer register */
156 u32 offset; /* offset from pageaddr to cyclone_timer register */
157 int i;
158
159 /* check clock override */
160 if (override[0] && strncmp(override,"cyclone",7))
161 return -ENODEV;
162
163 /*make sure we're on a summit box*/
164 if(!use_cyclone) return -ENODEV;
165
166 printk(KERN_INFO "Summit chipset: Starting Cyclone Counter.\n");
167
168 /* find base address */
169 pageaddr = (CYCLONE_CBAR_ADDR)&PAGE_MASK;
170 offset = (CYCLONE_CBAR_ADDR)&(~PAGE_MASK);
171 set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr);
172 reg = (u32*)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);
173 if(!reg){
174 printk(KERN_ERR "Summit chipset: Could not find valid CBAR register.\n");
175 return -ENODEV;
176 }
177 base = *reg;
178 if(!base){
179 printk(KERN_ERR "Summit chipset: Could not find valid CBAR value.\n");
180 return -ENODEV;
181 }
182
183 /* setup PMCC */
184 pageaddr = (base + CYCLONE_PMCC_OFFSET)&PAGE_MASK;
185 offset = (base + CYCLONE_PMCC_OFFSET)&(~PAGE_MASK);
186 set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr);
187 reg = (u32*)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);
188 if(!reg){
189 printk(KERN_ERR "Summit chipset: Could not find valid PMCC register.\n");
190 return -ENODEV;
191 }
192 reg[0] = 0x00000001;
193
194 /* setup MPCS */
195 pageaddr = (base + CYCLONE_MPCS_OFFSET)&PAGE_MASK;
196 offset = (base + CYCLONE_MPCS_OFFSET)&(~PAGE_MASK);
197 set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr);
198 reg = (u32*)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);
199 if(!reg){
200 printk(KERN_ERR "Summit chipset: Could not find valid MPCS register.\n");
201 return -ENODEV;
202 }
203 reg[0] = 0x00000001;
204
205 /* map in cyclone_timer */
206 pageaddr = (base + CYCLONE_MPMC_OFFSET)&PAGE_MASK;
207 offset = (base + CYCLONE_MPMC_OFFSET)&(~PAGE_MASK);
208 set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr);
209 cyclone_timer = (u32*)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);
210 if(!cyclone_timer){
211 printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n");
212 return -ENODEV;
213 }
214
215 /*quick test to make sure its ticking*/
216 for(i=0; i<3; i++){
217 u32 old = cyclone_timer[0];
218 int stall = 100;
219 while(stall--) barrier();
220 if(cyclone_timer[0] == old){
221 printk(KERN_ERR "Summit chipset: Counter not counting! DISABLED\n");
222 cyclone_timer = 0;
223 return -ENODEV;
224 }
225 }
226
227 init_cpu_khz();
228
229 /* Everything looks good! */
230 return 0;
231}
232
233
234static void delay_cyclone(unsigned long loops)
235{
236 unsigned long bclock, now;
237 if(!cyclone_timer)
238 return;
239 bclock = cyclone_timer[0];
240 do {
241 rep_nop();
242 now = cyclone_timer[0];
243 } while ((now-bclock) < loops);
244}
245/************************************************************/
246
247/* cyclone timer_opts struct */
248static struct timer_opts timer_cyclone = {
249 .name = "cyclone",
250 .mark_offset = mark_offset_cyclone,
251 .get_offset = get_offset_cyclone,
252 .monotonic_clock = monotonic_clock_cyclone,
253 .delay = delay_cyclone,
254};
255
256struct init_timer_opts __initdata timer_cyclone_init = {
257 .init = init_cyclone,
258 .opts = &timer_cyclone,
259};
diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c
deleted file mode 100644
index 17a6fe7166e7..000000000000
--- a/arch/i386/kernel/timers/timer_hpet.c
+++ /dev/null
@@ -1,217 +0,0 @@
1/*
2 * This code largely moved from arch/i386/kernel/time.c.
3 * See comments there for proper credits.
4 */
5
6#include <linux/spinlock.h>
7#include <linux/init.h>
8#include <linux/timex.h>
9#include <linux/errno.h>
10#include <linux/string.h>
11#include <linux/jiffies.h>
12
13#include <asm/timer.h>
14#include <asm/io.h>
15#include <asm/processor.h>
16
17#include "io_ports.h"
18#include "mach_timer.h"
19#include <asm/hpet.h>
20
21static unsigned long hpet_usec_quotient __read_mostly; /* convert hpet clks to usec */
22static unsigned long tsc_hpet_quotient __read_mostly; /* convert tsc to hpet clks */
23static unsigned long hpet_last; /* hpet counter value at last tick*/
24static unsigned long last_tsc_low; /* lsb 32 bits of Time Stamp Counter */
25static unsigned long last_tsc_high; /* msb 32 bits of Time Stamp Counter */
26static unsigned long long monotonic_base;
27static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
28
29/* convert from cycles(64bits) => nanoseconds (64bits)
30 * basic equation:
31 * ns = cycles / (freq / ns_per_sec)
32 * ns = cycles * (ns_per_sec / freq)
33 * ns = cycles * (10^9 / (cpu_khz * 10^3))
34 * ns = cycles * (10^6 / cpu_khz)
35 *
36 * Then we use scaling math (suggested by george@mvista.com) to get:
37 * ns = cycles * (10^6 * SC / cpu_khz) / SC
38 * ns = cycles * cyc2ns_scale / SC
39 *
40 * And since SC is a constant power of two, we can convert the div
41 * into a shift.
42 *
43 * We can use khz divisor instead of mhz to keep a better percision, since
44 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
45 * (mathieu.desnoyers@polymtl.ca)
46 *
47 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
48 */
49static unsigned long cyc2ns_scale __read_mostly;
50#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
51
52static inline void set_cyc2ns_scale(unsigned long cpu_khz)
53{
54 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
55}
56
57static inline unsigned long long cycles_2_ns(unsigned long long cyc)
58{
59 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
60}
61
62static unsigned long long monotonic_clock_hpet(void)
63{
64 unsigned long long last_offset, this_offset, base;
65 unsigned seq;
66
67 /* atomically read monotonic base & last_offset */
68 do {
69 seq = read_seqbegin(&monotonic_lock);
70 last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
71 base = monotonic_base;
72 } while (read_seqretry(&monotonic_lock, seq));
73
74 /* Read the Time Stamp Counter */
75 rdtscll(this_offset);
76
77 /* return the value in ns */
78 return base + cycles_2_ns(this_offset - last_offset);
79}
80
81static unsigned long get_offset_hpet(void)
82{
83 register unsigned long eax, edx;
84
85 eax = hpet_readl(HPET_COUNTER);
86 eax -= hpet_last; /* hpet delta */
87 eax = min(hpet_tick, eax);
88 /*
89 * Time offset = (hpet delta) * ( usecs per HPET clock )
90 * = (hpet delta) * ( usecs per tick / HPET clocks per tick)
91 * = (hpet delta) * ( hpet_usec_quotient ) / (2^32)
92 *
93 * Where,
94 * hpet_usec_quotient = (2^32 * usecs per tick)/HPET clocks per tick
95 *
96 * Using a mull instead of a divl saves some cycles in critical path.
97 */
98 ASM_MUL64_REG(eax, edx, hpet_usec_quotient, eax);
99
100 /* our adjusted time offset in microseconds */
101 return edx;
102}
103
104static void mark_offset_hpet(void)
105{
106 unsigned long long this_offset, last_offset;
107 unsigned long offset;
108
109 write_seqlock(&monotonic_lock);
110 last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
111 rdtsc(last_tsc_low, last_tsc_high);
112
113 if (hpet_use_timer)
114 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
115 else
116 offset = hpet_readl(HPET_COUNTER);
117 if (unlikely(((offset - hpet_last) >= (2*hpet_tick)) && (hpet_last != 0))) {
118 int lost_ticks = ((offset - hpet_last) / hpet_tick) - 1;
119 jiffies_64 += lost_ticks;
120 }
121 hpet_last = offset;
122
123 /* update the monotonic base value */
124 this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
125 monotonic_base += cycles_2_ns(this_offset - last_offset);
126 write_sequnlock(&monotonic_lock);
127}
128
129static void delay_hpet(unsigned long loops)
130{
131 unsigned long hpet_start, hpet_end;
132 unsigned long eax;
133
134 /* loops is the number of cpu cycles. Convert it to hpet clocks */
135 ASM_MUL64_REG(eax, loops, tsc_hpet_quotient, loops);
136
137 hpet_start = hpet_readl(HPET_COUNTER);
138 do {
139 rep_nop();
140 hpet_end = hpet_readl(HPET_COUNTER);
141 } while ((hpet_end - hpet_start) < (loops));
142}
143
144static struct timer_opts timer_hpet;
145
146static int __init init_hpet(char* override)
147{
148 unsigned long result, remain;
149
150 /* check clock override */
151 if (override[0] && strncmp(override,"hpet",4))
152 return -ENODEV;
153
154 if (!is_hpet_enabled())
155 return -ENODEV;
156
157 printk("Using HPET for gettimeofday\n");
158 if (cpu_has_tsc) {
159 unsigned long tsc_quotient = calibrate_tsc_hpet(&tsc_hpet_quotient);
160 if (tsc_quotient) {
161 /* report CPU clock rate in Hz.
162 * The formula is (10^6 * 2^32) / (2^32 * 1 / (clocks/us)) =
163 * clock/second. Our precision is about 100 ppm.
164 */
165 { unsigned long eax=0, edx=1000;
166 ASM_DIV64_REG(cpu_khz, edx, tsc_quotient,
167 eax, edx);
168 printk("Detected %u.%03u MHz processor.\n",
169 cpu_khz / 1000, cpu_khz % 1000);
170 }
171 set_cyc2ns_scale(cpu_khz);
172 }
173 /* set this only when cpu_has_tsc */
174 timer_hpet.read_timer = read_timer_tsc;
175 }
176
177 /*
178 * Math to calculate hpet to usec multiplier
179 * Look for the comments at get_offset_hpet()
180 */
181 ASM_DIV64_REG(result, remain, hpet_tick, 0, KERNEL_TICK_USEC);
182 if (remain > (hpet_tick >> 1))
183 result++; /* rounding the result */
184 hpet_usec_quotient = result;
185
186 return 0;
187}
188
189static int hpet_resume(void)
190{
191 write_seqlock(&monotonic_lock);
192 /* Assume this is the last mark offset time */
193 rdtsc(last_tsc_low, last_tsc_high);
194
195 if (hpet_use_timer)
196 hpet_last = hpet_readl(HPET_T0_CMP) - hpet_tick;
197 else
198 hpet_last = hpet_readl(HPET_COUNTER);
199 write_sequnlock(&monotonic_lock);
200 return 0;
201}
202/************************************************************/
203
204/* tsc timer_opts struct */
205static struct timer_opts timer_hpet __read_mostly = {
206 .name = "hpet",
207 .mark_offset = mark_offset_hpet,
208 .get_offset = get_offset_hpet,
209 .monotonic_clock = monotonic_clock_hpet,
210 .delay = delay_hpet,
211 .resume = hpet_resume,
212};
213
214struct init_timer_opts __initdata timer_hpet_init = {
215 .init = init_hpet,
216 .opts = &timer_hpet,
217};
diff --git a/arch/i386/kernel/timers/timer_none.c b/arch/i386/kernel/timers/timer_none.c
deleted file mode 100644
index 4ea2f414dbbd..000000000000
--- a/arch/i386/kernel/timers/timer_none.c
+++ /dev/null
@@ -1,39 +0,0 @@
1#include <linux/init.h>
2#include <asm/timer.h>
3
4static void mark_offset_none(void)
5{
6 /* nothing needed */
7}
8
9static unsigned long get_offset_none(void)
10{
11 return 0;
12}
13
14static unsigned long long monotonic_clock_none(void)
15{
16 return 0;
17}
18
19static void delay_none(unsigned long loops)
20{
21 int d0;
22 __asm__ __volatile__(
23 "\tjmp 1f\n"
24 ".align 16\n"
25 "1:\tjmp 2f\n"
26 ".align 16\n"
27 "2:\tdecl %0\n\tjns 2b"
28 :"=&a" (d0)
29 :"0" (loops));
30}
31
32/* none timer_opts struct */
33struct timer_opts timer_none = {
34 .name = "none",
35 .mark_offset = mark_offset_none,
36 .get_offset = get_offset_none,
37 .monotonic_clock = monotonic_clock_none,
38 .delay = delay_none,
39};
diff --git a/arch/i386/kernel/timers/timer_pit.c b/arch/i386/kernel/timers/timer_pit.c
deleted file mode 100644
index b9b6bd56b9ba..000000000000
--- a/arch/i386/kernel/timers/timer_pit.c
+++ /dev/null
@@ -1,177 +0,0 @@
1/*
2 * This code largely moved from arch/i386/kernel/time.c.
3 * See comments there for proper credits.
4 */
5
6#include <linux/spinlock.h>
7#include <linux/module.h>
8#include <linux/device.h>
9#include <linux/sysdev.h>
10#include <linux/timex.h>
11#include <asm/delay.h>
12#include <asm/mpspec.h>
13#include <asm/timer.h>
14#include <asm/smp.h>
15#include <asm/io.h>
16#include <asm/arch_hooks.h>
17#include <asm/i8253.h>
18
19#include "do_timer.h"
20#include "io_ports.h"
21
22static int count_p; /* counter in get_offset_pit() */
23
24static int __init init_pit(char* override)
25{
26 /* check clock override */
27 if (override[0] && strncmp(override,"pit",3))
28 printk(KERN_ERR "Warning: clock= override failed. Defaulting "
29 "to PIT\n");
30 init_cpu_khz();
31 count_p = LATCH;
32 return 0;
33}
34
35static void mark_offset_pit(void)
36{
37 /* nothing needed */
38}
39
40static unsigned long long monotonic_clock_pit(void)
41{
42 return 0;
43}
44
45static void delay_pit(unsigned long loops)
46{
47 int d0;
48 __asm__ __volatile__(
49 "\tjmp 1f\n"
50 ".align 16\n"
51 "1:\tjmp 2f\n"
52 ".align 16\n"
53 "2:\tdecl %0\n\tjns 2b"
54 :"=&a" (d0)
55 :"0" (loops));
56}
57
58
59/* This function must be called with xtime_lock held.
60 * It was inspired by Steve McCanne's microtime-i386 for BSD. -- jrs
61 *
62 * However, the pc-audio speaker driver changes the divisor so that
63 * it gets interrupted rather more often - it loads 64 into the
64 * counter rather than 11932! This has an adverse impact on
65 * do_gettimeoffset() -- it stops working! What is also not
66 * good is that the interval that our timer function gets called
67 * is no longer 10.0002 ms, but 9.9767 ms. To get around this
68 * would require using a different timing source. Maybe someone
69 * could use the RTC - I know that this can interrupt at frequencies
70 * ranging from 8192Hz to 2Hz. If I had the energy, I'd somehow fix
71 * it so that at startup, the timer code in sched.c would select
72 * using either the RTC or the 8253 timer. The decision would be
73 * based on whether there was any other device around that needed
74 * to trample on the 8253. I'd set up the RTC to interrupt at 1024 Hz,
75 * and then do some jiggery to have a version of do_timer that
76 * advanced the clock by 1/1024 s. Every time that reached over 1/100
77 * of a second, then do all the old code. If the time was kept correct
78 * then do_gettimeoffset could just return 0 - there is no low order
79 * divider that can be accessed.
80 *
81 * Ideally, you would be able to use the RTC for the speaker driver,
82 * but it appears that the speaker driver really needs interrupt more
83 * often than every 120 us or so.
84 *
85 * Anyway, this needs more thought.... pjsg (1993-08-28)
86 *
87 * If you are really that interested, you should be reading
88 * comp.protocols.time.ntp!
89 */
90
91static unsigned long get_offset_pit(void)
92{
93 int count;
94 unsigned long flags;
95 static unsigned long jiffies_p = 0;
96
97 /*
98 * cache volatile jiffies temporarily; we have xtime_lock.
99 */
100 unsigned long jiffies_t;
101
102 spin_lock_irqsave(&i8253_lock, flags);
103 /* timer count may underflow right here */
104 outb_p(0x00, PIT_MODE); /* latch the count ASAP */
105
106 count = inb_p(PIT_CH0); /* read the latched count */
107
108 /*
109 * We do this guaranteed double memory access instead of a _p
110 * postfix in the previous port access. Wheee, hackady hack
111 */
112 jiffies_t = jiffies;
113
114 count |= inb_p(PIT_CH0) << 8;
115
116 /* VIA686a test code... reset the latch if count > max + 1 */
117 if (count > LATCH) {
118 outb_p(0x34, PIT_MODE);
119 outb_p(LATCH & 0xff, PIT_CH0);
120 outb(LATCH >> 8, PIT_CH0);
121 count = LATCH - 1;
122 }
123
124 /*
125 * avoiding timer inconsistencies (they are rare, but they happen)...
126 * there are two kinds of problems that must be avoided here:
127 * 1. the timer counter underflows
128 * 2. hardware problem with the timer, not giving us continuous time,
129 * the counter does small "jumps" upwards on some Pentium systems,
130 * (see c't 95/10 page 335 for Neptun bug.)
131 */
132
133 if( jiffies_t == jiffies_p ) {
134 if( count > count_p ) {
135 /* the nutcase */
136 count = do_timer_overflow(count);
137 }
138 } else
139 jiffies_p = jiffies_t;
140
141 count_p = count;
142
143 spin_unlock_irqrestore(&i8253_lock, flags);
144
145 count = ((LATCH-1) - count) * TICK_SIZE;
146 count = (count + LATCH/2) / LATCH;
147
148 return count;
149}
150
151
152/* tsc timer_opts struct */
153struct timer_opts timer_pit = {
154 .name = "pit",
155 .mark_offset = mark_offset_pit,
156 .get_offset = get_offset_pit,
157 .monotonic_clock = monotonic_clock_pit,
158 .delay = delay_pit,
159};
160
161struct init_timer_opts __initdata timer_pit_init = {
162 .init = init_pit,
163 .opts = &timer_pit,
164};
165
166void setup_pit_timer(void)
167{
168 unsigned long flags;
169
170 spin_lock_irqsave(&i8253_lock, flags);
171 outb_p(0x34,PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */
172 udelay(10);
173 outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
174 udelay(10);
175 outb(LATCH >> 8 , PIT_CH0); /* MSB */
176 spin_unlock_irqrestore(&i8253_lock, flags);
177}
diff --git a/arch/i386/kernel/timers/timer_pm.c b/arch/i386/kernel/timers/timer_pm.c
deleted file mode 100644
index 144e94a04933..000000000000
--- a/arch/i386/kernel/timers/timer_pm.c
+++ /dev/null
@@ -1,342 +0,0 @@
1/*
2 * (C) Dominik Brodowski <linux@brodo.de> 2003
3 *
4 * Driver to use the Power Management Timer (PMTMR) available in some
5 * southbridges as primary timing source for the Linux kernel.
6 *
7 * Based on parts of linux/drivers/acpi/hardware/hwtimer.c, timer_pit.c,
8 * timer_hpet.c, and on Arjan van de Ven's implementation for 2.4.
9 *
10 * This file is licensed under the GPL v2.
11 */
12
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/device.h>
17#include <linux/init.h>
18#include <linux/pci.h>
19#include <asm/types.h>
20#include <asm/timer.h>
21#include <asm/smp.h>
22#include <asm/io.h>
23#include <asm/arch_hooks.h>
24
25#include <linux/timex.h>
26#include "mach_timer.h"
27
28/* Number of PMTMR ticks expected during calibration run */
29#define PMTMR_TICKS_PER_SEC 3579545
30#define PMTMR_EXPECTED_RATE \
31 ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (CLOCK_TICK_RATE>>10))
32
33
34/* The I/O port the PMTMR resides at.
35 * The location is detected during setup_arch(),
36 * in arch/i386/acpi/boot.c */
37u32 pmtmr_ioport = 0;
38
39
40/* value of the Power timer at last timer interrupt */
41static u32 offset_tick;
42static u32 offset_delay;
43
44static unsigned long long monotonic_base;
45static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
46
47#define ACPI_PM_MASK 0xFFFFFF /* limit it to 24 bits */
48
49static int pmtmr_need_workaround __read_mostly = 1;
50
51/*helper function to safely read acpi pm timesource*/
52static inline u32 read_pmtmr(void)
53{
54 if (pmtmr_need_workaround) {
55 u32 v1, v2, v3;
56
57 /* It has been reported that because of various broken
58 * chipsets (ICH4, PIIX4 and PIIX4E) where the ACPI PM time
59 * source is not latched, so you must read it multiple
60 * times to insure a safe value is read.
61 */
62 do {
63 v1 = inl(pmtmr_ioport);
64 v2 = inl(pmtmr_ioport);
65 v3 = inl(pmtmr_ioport);
66 } while ((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1)
67 || (v3 > v1 && v3 < v2));
68
69 /* mask the output to 24 bits */
70 return v2 & ACPI_PM_MASK;
71 }
72
73 return inl(pmtmr_ioport) & ACPI_PM_MASK;
74}
75
76
77/*
78 * Some boards have the PMTMR running way too fast. We check
79 * the PMTMR rate against PIT channel 2 to catch these cases.
80 */
81static int verify_pmtmr_rate(void)
82{
83 u32 value1, value2;
84 unsigned long count, delta;
85
86 mach_prepare_counter();
87 value1 = read_pmtmr();
88 mach_countup(&count);
89 value2 = read_pmtmr();
90 delta = (value2 - value1) & ACPI_PM_MASK;
91
92 /* Check that the PMTMR delta is within 5% of what we expect */
93 if (delta < (PMTMR_EXPECTED_RATE * 19) / 20 ||
94 delta > (PMTMR_EXPECTED_RATE * 21) / 20) {
95 printk(KERN_INFO "PM-Timer running at invalid rate: %lu%% of normal - aborting.\n", 100UL * delta / PMTMR_EXPECTED_RATE);
96 return -1;
97 }
98
99 return 0;
100}
101
102
103static int init_pmtmr(char* override)
104{
105 u32 value1, value2;
106 unsigned int i;
107
108 if (override[0] && strncmp(override,"pmtmr",5))
109 return -ENODEV;
110
111 if (!pmtmr_ioport)
112 return -ENODEV;
113
114 /* we use the TSC for delay_pmtmr, so make sure it exists */
115 if (!cpu_has_tsc)
116 return -ENODEV;
117
118 /* "verify" this timing source */
119 value1 = read_pmtmr();
120 for (i = 0; i < 10000; i++) {
121 value2 = read_pmtmr();
122 if (value2 == value1)
123 continue;
124 if (value2 > value1)
125 goto pm_good;
126 if ((value2 < value1) && ((value2) < 0xFFF))
127 goto pm_good;
128 printk(KERN_INFO "PM-Timer had inconsistent results: 0x%#x, 0x%#x - aborting.\n", value1, value2);
129 return -EINVAL;
130 }
131 printk(KERN_INFO "PM-Timer had no reasonable result: 0x%#x - aborting.\n", value1);
132 return -ENODEV;
133
134pm_good:
135 if (verify_pmtmr_rate() != 0)
136 return -ENODEV;
137
138 init_cpu_khz();
139 return 0;
140}
141
142static inline u32 cyc2us(u32 cycles)
143{
144 /* The Power Management Timer ticks at 3.579545 ticks per microsecond.
145 * 1 / PM_TIMER_FREQUENCY == 0.27936511 =~ 286/1024 [error: 0.024%]
146 *
147 * Even with HZ = 100, delta is at maximum 35796 ticks, so it can
148 * easily be multiplied with 286 (=0x11E) without having to fear
149 * u32 overflows.
150 */
151 cycles *= 286;
152 return (cycles >> 10);
153}
154
155/*
156 * this gets called during each timer interrupt
157 * - Called while holding the writer xtime_lock
158 */
159static void mark_offset_pmtmr(void)
160{
161 u32 lost, delta, last_offset;
162 static int first_run = 1;
163 last_offset = offset_tick;
164
165 write_seqlock(&monotonic_lock);
166
167 offset_tick = read_pmtmr();
168
169 /* calculate tick interval */
170 delta = (offset_tick - last_offset) & ACPI_PM_MASK;
171
172 /* convert to usecs */
173 delta = cyc2us(delta);
174
175 /* update the monotonic base value */
176 monotonic_base += delta * NSEC_PER_USEC;
177 write_sequnlock(&monotonic_lock);
178
179 /* convert to ticks */
180 delta += offset_delay;
181 lost = delta / (USEC_PER_SEC / HZ);
182 offset_delay = delta % (USEC_PER_SEC / HZ);
183
184
185 /* compensate for lost ticks */
186 if (lost >= 2)
187 jiffies_64 += lost - 1;
188
189 /* don't calculate delay for first run,
190 or if we've got less then a tick */
191 if (first_run || (lost < 1)) {
192 first_run = 0;
193 offset_delay = 0;
194 }
195}
196
197static int pmtmr_resume(void)
198{
199 write_seqlock(&monotonic_lock);
200 /* Assume this is the last mark offset time */
201 offset_tick = read_pmtmr();
202 write_sequnlock(&monotonic_lock);
203 return 0;
204}
205
206static unsigned long long monotonic_clock_pmtmr(void)
207{
208 u32 last_offset, this_offset;
209 unsigned long long base, ret;
210 unsigned seq;
211
212
213 /* atomically read monotonic base & last_offset */
214 do {
215 seq = read_seqbegin(&monotonic_lock);
216 last_offset = offset_tick;
217 base = monotonic_base;
218 } while (read_seqretry(&monotonic_lock, seq));
219
220 /* Read the pmtmr */
221 this_offset = read_pmtmr();
222
223 /* convert to nanoseconds */
224 ret = (this_offset - last_offset) & ACPI_PM_MASK;
225 ret = base + (cyc2us(ret) * NSEC_PER_USEC);
226 return ret;
227}
228
229static void delay_pmtmr(unsigned long loops)
230{
231 unsigned long bclock, now;
232
233 rdtscl(bclock);
234 do
235 {
236 rep_nop();
237 rdtscl(now);
238 } while ((now-bclock) < loops);
239}
240
241
242/*
243 * get the offset (in microseconds) from the last call to mark_offset()
244 * - Called holding a reader xtime_lock
245 */
246static unsigned long get_offset_pmtmr(void)
247{
248 u32 now, offset, delta = 0;
249
250 offset = offset_tick;
251 now = read_pmtmr();
252 delta = (now - offset)&ACPI_PM_MASK;
253
254 return (unsigned long) offset_delay + cyc2us(delta);
255}
256
257
258/* acpi timer_opts struct */
259static struct timer_opts timer_pmtmr = {
260 .name = "pmtmr",
261 .mark_offset = mark_offset_pmtmr,
262 .get_offset = get_offset_pmtmr,
263 .monotonic_clock = monotonic_clock_pmtmr,
264 .delay = delay_pmtmr,
265 .read_timer = read_timer_tsc,
266 .resume = pmtmr_resume,
267};
268
269struct init_timer_opts __initdata timer_pmtmr_init = {
270 .init = init_pmtmr,
271 .opts = &timer_pmtmr,
272};
273
274#ifdef CONFIG_PCI
275/*
276 * PIIX4 Errata:
277 *
278 * The power management timer may return improper results when read.
279 * Although the timer value settles properly after incrementing,
280 * while incrementing there is a 3 ns window every 69.8 ns where the
281 * timer value is indeterminate (a 4.2% chance that the data will be
282 * incorrect when read). As a result, the ACPI free running count up
283 * timer specification is violated due to erroneous reads.
284 */
285static int __init pmtmr_bug_check(void)
286{
287 static struct pci_device_id gray_list[] __initdata = {
288 /* these chipsets may have bug. */
289 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
290 PCI_DEVICE_ID_INTEL_82801DB_0) },
291 { },
292 };
293 struct pci_dev *dev;
294 int pmtmr_has_bug = 0;
295 u8 rev;
296
297 if (cur_timer != &timer_pmtmr || !pmtmr_need_workaround)
298 return 0;
299
300 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
301 PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
302 if (dev) {
303 pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
304 /* the bug has been fixed in PIIX4M */
305 if (rev < 3) {
306 printk(KERN_WARNING "* Found PM-Timer Bug on this "
307 "chipset. Due to workarounds for a bug,\n"
308 "* this time source is slow. Consider trying "
309 "other time sources (clock=)\n");
310 pmtmr_has_bug = 1;
311 }
312 pci_dev_put(dev);
313 }
314
315 if (pci_dev_present(gray_list)) {
316 printk(KERN_WARNING "* This chipset may have PM-Timer Bug. Due"
317 " to workarounds for a bug,\n"
318 "* this time source is slow. If you are sure your timer"
319 " does not have\n"
320 "* this bug, please use \"pmtmr_good\" to disable the "
321 "workaround\n");
322 pmtmr_has_bug = 1;
323 }
324
325 if (!pmtmr_has_bug)
326 pmtmr_need_workaround = 0;
327
328 return 0;
329}
330device_initcall(pmtmr_bug_check);
331#endif
332
333static int __init pmtr_good_setup(char *__str)
334{
335 pmtmr_need_workaround = 0;
336 return 1;
337}
338__setup("pmtmr_good", pmtr_good_setup);
339
340MODULE_LICENSE("GPL");
341MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
342MODULE_DESCRIPTION("Power Management Timer (PMTMR) as primary timing source for x86");
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
deleted file mode 100644
index f1187ddb0d0f..000000000000
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ /dev/null
@@ -1,617 +0,0 @@
1/*
2 * This code largely moved from arch/i386/kernel/time.c.
3 * See comments there for proper credits.
4 *
5 * 2004-06-25 Jesper Juhl
6 * moved mark_offset_tsc below cpufreq_delayed_get to avoid gcc 3.4
7 * failing to inline.
8 */
9
10#include <linux/spinlock.h>
11#include <linux/init.h>
12#include <linux/timex.h>
13#include <linux/errno.h>
14#include <linux/cpufreq.h>
15#include <linux/string.h>
16#include <linux/jiffies.h>
17
18#include <asm/timer.h>
19#include <asm/io.h>
20/* processor.h for distable_tsc flag */
21#include <asm/processor.h>
22
23#include "io_ports.h"
24#include "mach_timer.h"
25
26#include <asm/hpet.h>
27#include <asm/i8253.h>
28
29#ifdef CONFIG_HPET_TIMER
30static unsigned long hpet_usec_quotient;
31static unsigned long hpet_last;
32static struct timer_opts timer_tsc;
33#endif
34
35static inline void cpufreq_delayed_get(void);
36
37int tsc_disable __devinitdata = 0;
38
39static int use_tsc;
40/* Number of usecs that the last interrupt was delayed */
41static int delay_at_last_interrupt;
42
43static unsigned long last_tsc_low; /* lsb 32 bits of Time Stamp Counter */
44static unsigned long last_tsc_high; /* msb 32 bits of Time Stamp Counter */
45static unsigned long long monotonic_base;
46static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
47
48/* Avoid compensating for lost ticks before TSCs are synched */
49static int detect_lost_ticks;
50static int __init start_lost_tick_compensation(void)
51{
52 detect_lost_ticks = 1;
53 return 0;
54}
55late_initcall(start_lost_tick_compensation);
56
57/* convert from cycles(64bits) => nanoseconds (64bits)
58 * basic equation:
59 * ns = cycles / (freq / ns_per_sec)
60 * ns = cycles * (ns_per_sec / freq)
61 * ns = cycles * (10^9 / (cpu_khz * 10^3))
62 * ns = cycles * (10^6 / cpu_khz)
63 *
64 * Then we use scaling math (suggested by george@mvista.com) to get:
65 * ns = cycles * (10^6 * SC / cpu_khz) / SC
66 * ns = cycles * cyc2ns_scale / SC
67 *
68 * And since SC is a constant power of two, we can convert the div
69 * into a shift.
70 *
71 * We can use khz divisor instead of mhz to keep a better percision, since
72 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
73 * (mathieu.desnoyers@polymtl.ca)
74 *
75 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
76 */
77static unsigned long cyc2ns_scale __read_mostly;
78#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
79
80static inline void set_cyc2ns_scale(unsigned long cpu_khz)
81{
82 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
83}
84
85static inline unsigned long long cycles_2_ns(unsigned long long cyc)
86{
87 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
88}
89
90static int count2; /* counter for mark_offset_tsc() */
91
92/* Cached *multiplier* to convert TSC counts to microseconds.
93 * (see the equation below).
94 * Equal to 2^32 * (1 / (clocks per usec) ).
95 * Initialized in time_init.
96 */
97static unsigned long fast_gettimeoffset_quotient;
98
99static unsigned long get_offset_tsc(void)
100{
101 register unsigned long eax, edx;
102
103 /* Read the Time Stamp Counter */
104
105 rdtsc(eax,edx);
106
107 /* .. relative to previous jiffy (32 bits is enough) */
108 eax -= last_tsc_low; /* tsc_low delta */
109
110 /*
111 * Time offset = (tsc_low delta) * fast_gettimeoffset_quotient
112 * = (tsc_low delta) * (usecs_per_clock)
113 * = (tsc_low delta) * (usecs_per_jiffy / clocks_per_jiffy)
114 *
115 * Using a mull instead of a divl saves up to 31 clock cycles
116 * in the critical path.
117 */
118
119 __asm__("mull %2"
120 :"=a" (eax), "=d" (edx)
121 :"rm" (fast_gettimeoffset_quotient),
122 "0" (eax));
123
124 /* our adjusted time offset in microseconds */
125 return delay_at_last_interrupt + edx;
126}
127
128static unsigned long long monotonic_clock_tsc(void)
129{
130 unsigned long long last_offset, this_offset, base;
131 unsigned seq;
132
133 /* atomically read monotonic base & last_offset */
134 do {
135 seq = read_seqbegin(&monotonic_lock);
136 last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
137 base = monotonic_base;
138 } while (read_seqretry(&monotonic_lock, seq));
139
140 /* Read the Time Stamp Counter */
141 rdtscll(this_offset);
142
143 /* return the value in ns */
144 return base + cycles_2_ns(this_offset - last_offset);
145}
146
147/*
148 * Scheduler clock - returns current time in nanosec units.
149 */
150unsigned long long sched_clock(void)
151{
152 unsigned long long this_offset;
153
154 /*
155 * In the NUMA case we dont use the TSC as they are not
156 * synchronized across all CPUs.
157 */
158#ifndef CONFIG_NUMA
159 if (!use_tsc)
160#endif
161 /* no locking but a rare wrong value is not a big deal */
162 return jiffies_64 * (1000000000 / HZ);
163
164 /* Read the Time Stamp Counter */
165 rdtscll(this_offset);
166
167 /* return the value in ns */
168 return cycles_2_ns(this_offset);
169}
170
171static void delay_tsc(unsigned long loops)
172{
173 unsigned long bclock, now;
174
175 rdtscl(bclock);
176 do
177 {
178 rep_nop();
179 rdtscl(now);
180 } while ((now-bclock) < loops);
181}
182
183#ifdef CONFIG_HPET_TIMER
184static void mark_offset_tsc_hpet(void)
185{
186 unsigned long long this_offset, last_offset;
187 unsigned long offset, temp, hpet_current;
188
189 write_seqlock(&monotonic_lock);
190 last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
191 /*
192 * It is important that these two operations happen almost at
193 * the same time. We do the RDTSC stuff first, since it's
194 * faster. To avoid any inconsistencies, we need interrupts
195 * disabled locally.
196 */
197 /*
198 * Interrupts are just disabled locally since the timer irq
199 * has the SA_INTERRUPT flag set. -arca
200 */
201 /* read Pentium cycle counter */
202
203 hpet_current = hpet_readl(HPET_COUNTER);
204 rdtsc(last_tsc_low, last_tsc_high);
205
206 /* lost tick compensation */
207 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
208 if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))
209 && detect_lost_ticks) {
210 int lost_ticks = (offset - hpet_last) / hpet_tick;
211 jiffies_64 += lost_ticks;
212 }
213 hpet_last = hpet_current;
214
215 /* update the monotonic base value */
216 this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
217 monotonic_base += cycles_2_ns(this_offset - last_offset);
218 write_sequnlock(&monotonic_lock);
219
220 /* calculate delay_at_last_interrupt */
221 /*
222 * Time offset = (hpet delta) * ( usecs per HPET clock )
223 * = (hpet delta) * ( usecs per tick / HPET clocks per tick)
224 * = (hpet delta) * ( hpet_usec_quotient ) / (2^32)
225 * Where,
226 * hpet_usec_quotient = (2^32 * usecs per tick)/HPET clocks per tick
227 */
228 delay_at_last_interrupt = hpet_current - offset;
229 ASM_MUL64_REG(temp, delay_at_last_interrupt,
230 hpet_usec_quotient, delay_at_last_interrupt);
231}
232#endif
233
234
235#ifdef CONFIG_CPU_FREQ
236#include <linux/workqueue.h>
237
238static unsigned int cpufreq_delayed_issched = 0;
239static unsigned int cpufreq_init = 0;
240static struct work_struct cpufreq_delayed_get_work;
241
242static void handle_cpufreq_delayed_get(void *v)
243{
244 unsigned int cpu;
245 for_each_online_cpu(cpu) {
246 cpufreq_get(cpu);
247 }
248 cpufreq_delayed_issched = 0;
249}
250
251/* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
252 * to verify the CPU frequency the timing core thinks the CPU is running
253 * at is still correct.
254 */
255static inline void cpufreq_delayed_get(void)
256{
257 if (cpufreq_init && !cpufreq_delayed_issched) {
258 cpufreq_delayed_issched = 1;
259 printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
260 schedule_work(&cpufreq_delayed_get_work);
261 }
262}
263
264/* If the CPU frequency is scaled, TSC-based delays will need a different
265 * loops_per_jiffy value to function properly.
266 */
267
268static unsigned int ref_freq = 0;
269static unsigned long loops_per_jiffy_ref = 0;
270
271#ifndef CONFIG_SMP
272static unsigned long fast_gettimeoffset_ref = 0;
273static unsigned int cpu_khz_ref = 0;
274#endif
275
276static int
277time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
278 void *data)
279{
280 struct cpufreq_freqs *freq = data;
281
282 if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
283 write_seqlock_irq(&xtime_lock);
284 if (!ref_freq) {
285 if (!freq->old){
286 ref_freq = freq->new;
287 goto end;
288 }
289 ref_freq = freq->old;
290 loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
291#ifndef CONFIG_SMP
292 fast_gettimeoffset_ref = fast_gettimeoffset_quotient;
293 cpu_khz_ref = cpu_khz;
294#endif
295 }
296
297 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
298 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
299 (val == CPUFREQ_RESUMECHANGE)) {
300 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
301 cpu_data[freq->cpu].loops_per_jiffy = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
302#ifndef CONFIG_SMP
303 if (cpu_khz)
304 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
305 if (use_tsc) {
306 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
307 fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq);
308 set_cyc2ns_scale(cpu_khz);
309 }
310 }
311#endif
312 }
313
314end:
315 if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
316 write_sequnlock_irq(&xtime_lock);
317
318 return 0;
319}
320
321static struct notifier_block time_cpufreq_notifier_block = {
322 .notifier_call = time_cpufreq_notifier
323};
324
325
326static int __init cpufreq_tsc(void)
327{
328 int ret;
329 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
330 ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
331 CPUFREQ_TRANSITION_NOTIFIER);
332 if (!ret)
333 cpufreq_init = 1;
334 return ret;
335}
336core_initcall(cpufreq_tsc);
337
338#else /* CONFIG_CPU_FREQ */
339static inline void cpufreq_delayed_get(void) { return; }
340#endif
341
342int recalibrate_cpu_khz(void)
343{
344#ifndef CONFIG_SMP
345 unsigned int cpu_khz_old = cpu_khz;
346
347 if (cpu_has_tsc) {
348 local_irq_disable();
349 init_cpu_khz();
350 local_irq_enable();
351 cpu_data[0].loops_per_jiffy =
352 cpufreq_scale(cpu_data[0].loops_per_jiffy,
353 cpu_khz_old,
354 cpu_khz);
355 return 0;
356 } else
357 return -ENODEV;
358#else
359 return -ENODEV;
360#endif
361}
362EXPORT_SYMBOL(recalibrate_cpu_khz);
363
364static void mark_offset_tsc(void)
365{
366 unsigned long lost,delay;
367 unsigned long delta = last_tsc_low;
368 int count;
369 int countmp;
370 static int count1 = 0;
371 unsigned long long this_offset, last_offset;
372 static int lost_count = 0;
373
374 write_seqlock(&monotonic_lock);
375 last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
376 /*
377 * It is important that these two operations happen almost at
378 * the same time. We do the RDTSC stuff first, since it's
379 * faster. To avoid any inconsistencies, we need interrupts
380 * disabled locally.
381 */
382
383 /*
384 * Interrupts are just disabled locally since the timer irq
385 * has the SA_INTERRUPT flag set. -arca
386 */
387
388 /* read Pentium cycle counter */
389
390 rdtsc(last_tsc_low, last_tsc_high);
391
392 spin_lock(&i8253_lock);
393 outb_p(0x00, PIT_MODE); /* latch the count ASAP */
394
395 count = inb_p(PIT_CH0); /* read the latched count */
396 count |= inb(PIT_CH0) << 8;
397
398 /*
399 * VIA686a test code... reset the latch if count > max + 1
400 * from timer_pit.c - cjb
401 */
402 if (count > LATCH) {
403 outb_p(0x34, PIT_MODE);
404 outb_p(LATCH & 0xff, PIT_CH0);
405 outb(LATCH >> 8, PIT_CH0);
406 count = LATCH - 1;
407 }
408
409 spin_unlock(&i8253_lock);
410
411 if (pit_latch_buggy) {
412 /* get center value of last 3 time lutch */
413 if ((count2 >= count && count >= count1)
414 || (count1 >= count && count >= count2)) {
415 count2 = count1; count1 = count;
416 } else if ((count1 >= count2 && count2 >= count)
417 || (count >= count2 && count2 >= count1)) {
418 countmp = count;count = count2;
419 count2 = count1;count1 = countmp;
420 } else {
421 count2 = count1; count1 = count; count = count1;
422 }
423 }
424
425 /* lost tick compensation */
426 delta = last_tsc_low - delta;
427 {
428 register unsigned long eax, edx;
429 eax = delta;
430 __asm__("mull %2"
431 :"=a" (eax), "=d" (edx)
432 :"rm" (fast_gettimeoffset_quotient),
433 "0" (eax));
434 delta = edx;
435 }
436 delta += delay_at_last_interrupt;
437 lost = delta/(1000000/HZ);
438 delay = delta%(1000000/HZ);
439 if (lost >= 2 && detect_lost_ticks) {
440 jiffies_64 += lost-1;
441
442 /* sanity check to ensure we're not always losing ticks */
443 if (lost_count++ > 100) {
444 printk(KERN_WARNING "Losing too many ticks!\n");
445 printk(KERN_WARNING "TSC cannot be used as a timesource. \n");
446 printk(KERN_WARNING "Possible reasons for this are:\n");
447 printk(KERN_WARNING " You're running with Speedstep,\n");
448 printk(KERN_WARNING " You don't have DMA enabled for your hard disk (see hdparm),\n");
449 printk(KERN_WARNING " Incorrect TSC synchronization on an SMP system (see dmesg).\n");
450 printk(KERN_WARNING "Falling back to a sane timesource now.\n");
451
452 clock_fallback();
453 }
454 /* ... but give the TSC a fair chance */
455 if (lost_count > 25)
456 cpufreq_delayed_get();
457 } else
458 lost_count = 0;
459 /* update the monotonic base value */
460 this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
461 monotonic_base += cycles_2_ns(this_offset - last_offset);
462 write_sequnlock(&monotonic_lock);
463
464 /* calculate delay_at_last_interrupt */
465 count = ((LATCH-1) - count) * TICK_SIZE;
466 delay_at_last_interrupt = (count + LATCH/2) / LATCH;
467
468 /* catch corner case where tick rollover occured
469 * between tsc and pit reads (as noted when
470 * usec delta is > 90% # of usecs/tick)
471 */
472 if (lost && abs(delay - delay_at_last_interrupt) > (900000/HZ))
473 jiffies_64++;
474}
475
476static int __init init_tsc(char* override)
477{
478
479 /* check clock override */
480 if (override[0] && strncmp(override,"tsc",3)) {
481#ifdef CONFIG_HPET_TIMER
482 if (is_hpet_enabled()) {
483 printk(KERN_ERR "Warning: clock= override failed. Defaulting to tsc\n");
484 } else
485#endif
486 {
487 return -ENODEV;
488 }
489 }
490
491 /*
492 * If we have APM enabled or the CPU clock speed is variable
493 * (CPU stops clock on HLT or slows clock to save power)
494 * then the TSC timestamps may diverge by up to 1 jiffy from
495 * 'real time' but nothing will break.
496 * The most frequent case is that the CPU is "woken" from a halt
497 * state by the timer interrupt itself, so we get 0 error. In the
498 * rare cases where a driver would "wake" the CPU and request a
499 * timestamp, the maximum error is < 1 jiffy. But timestamps are
500 * still perfectly ordered.
501 * Note that the TSC counter will be reset if APM suspends
502 * to disk; this won't break the kernel, though, 'cuz we're
503 * smart. See arch/i386/kernel/apm.c.
504 */
505 /*
506 * Firstly we have to do a CPU check for chips with
507 * a potentially buggy TSC. At this point we haven't run
508 * the ident/bugs checks so we must run this hook as it
509 * may turn off the TSC flag.
510 *
511 * NOTE: this doesn't yet handle SMP 486 machines where only
512 * some CPU's have a TSC. Thats never worked and nobody has
513 * moaned if you have the only one in the world - you fix it!
514 */
515
516 count2 = LATCH; /* initialize counter for mark_offset_tsc() */
517
518 if (cpu_has_tsc) {
519 unsigned long tsc_quotient;
520#ifdef CONFIG_HPET_TIMER
521 if (is_hpet_enabled() && hpet_use_timer) {
522 unsigned long result, remain;
523 printk("Using TSC for gettimeofday\n");
524 tsc_quotient = calibrate_tsc_hpet(NULL);
525 timer_tsc.mark_offset = &mark_offset_tsc_hpet;
526 /*
527 * Math to calculate hpet to usec multiplier
528 * Look for the comments at get_offset_tsc_hpet()
529 */
530 ASM_DIV64_REG(result, remain, hpet_tick,
531 0, KERNEL_TICK_USEC);
532 if (remain > (hpet_tick >> 1))
533 result++; /* rounding the result */
534
535 hpet_usec_quotient = result;
536 } else
537#endif
538 {
539 tsc_quotient = calibrate_tsc();
540 }
541
542 if (tsc_quotient) {
543 fast_gettimeoffset_quotient = tsc_quotient;
544 use_tsc = 1;
545 /*
546 * We could be more selective here I suspect
547 * and just enable this for the next intel chips ?
548 */
549 /* report CPU clock rate in Hz.
550 * The formula is (10^6 * 2^32) / (2^32 * 1 / (clocks/us)) =
551 * clock/second. Our precision is about 100 ppm.
552 */
553 { unsigned long eax=0, edx=1000;
554 __asm__("divl %2"
555 :"=a" (cpu_khz), "=d" (edx)
556 :"r" (tsc_quotient),
557 "0" (eax), "1" (edx));
558 printk("Detected %u.%03u MHz processor.\n",
559 cpu_khz / 1000, cpu_khz % 1000);
560 }
561 set_cyc2ns_scale(cpu_khz);
562 return 0;
563 }
564 }
565 return -ENODEV;
566}
567
568static int tsc_resume(void)
569{
570 write_seqlock(&monotonic_lock);
571 /* Assume this is the last mark offset time */
572 rdtsc(last_tsc_low, last_tsc_high);
573#ifdef CONFIG_HPET_TIMER
574 if (is_hpet_enabled() && hpet_use_timer)
575 hpet_last = hpet_readl(HPET_COUNTER);
576#endif
577 write_sequnlock(&monotonic_lock);
578 return 0;
579}
580
581#ifndef CONFIG_X86_TSC
582/* disable flag for tsc. Takes effect by clearing the TSC cpu flag
583 * in cpu/common.c */
584static int __init tsc_setup(char *str)
585{
586 tsc_disable = 1;
587 return 1;
588}
589#else
590static int __init tsc_setup(char *str)
591{
592 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
593 "cannot disable TSC.\n");
594 return 1;
595}
596#endif
597__setup("notsc", tsc_setup);
598
599
600
601/************************************************************/
602
603/* tsc timer_opts struct */
604static struct timer_opts timer_tsc = {
605 .name = "tsc",
606 .mark_offset = mark_offset_tsc,
607 .get_offset = get_offset_tsc,
608 .monotonic_clock = monotonic_clock_tsc,
609 .delay = delay_tsc,
610 .read_timer = read_timer_tsc,
611 .resume = tsc_resume,
612};
613
614struct init_timer_opts __initdata timer_tsc_init = {
615 .init = init_tsc,
616 .opts = &timer_tsc,
617};
diff --git a/arch/i386/kernel/topology.c b/arch/i386/kernel/topology.c
index 296355292c7c..e2e281d4bcc8 100644
--- a/arch/i386/kernel/topology.c
+++ b/arch/i386/kernel/topology.c
@@ -32,15 +32,8 @@
32 32
33static struct i386_cpu cpu_devices[NR_CPUS]; 33static struct i386_cpu cpu_devices[NR_CPUS];
34 34
35int arch_register_cpu(int num){ 35int arch_register_cpu(int num)
36 struct node *parent = NULL; 36{
37
38#ifdef CONFIG_NUMA
39 int node = cpu_to_node(num);
40 if (node_online(node))
41 parent = &node_devices[node].node;
42#endif /* CONFIG_NUMA */
43
44 /* 37 /*
45 * CPU0 cannot be offlined due to several 38 * CPU0 cannot be offlined due to several
46 * restrictions and assumptions in kernel. This basically 39 * restrictions and assumptions in kernel. This basically
@@ -50,21 +43,13 @@ int arch_register_cpu(int num){
50 if (!num) 43 if (!num)
51 cpu_devices[num].cpu.no_control = 1; 44 cpu_devices[num].cpu.no_control = 1;
52 45
53 return register_cpu(&cpu_devices[num].cpu, num, parent); 46 return register_cpu(&cpu_devices[num].cpu, num);
54} 47}
55 48
56#ifdef CONFIG_HOTPLUG_CPU 49#ifdef CONFIG_HOTPLUG_CPU
57 50
58void arch_unregister_cpu(int num) { 51void arch_unregister_cpu(int num) {
59 struct node *parent = NULL; 52 return unregister_cpu(&cpu_devices[num].cpu);
60
61#ifdef CONFIG_NUMA
62 int node = cpu_to_node(num);
63 if (node_online(node))
64 parent = &node_devices[node].node;
65#endif /* CONFIG_NUMA */
66
67 return unregister_cpu(&cpu_devices[num].cpu, parent);
68} 53}
69EXPORT_SYMBOL(arch_register_cpu); 54EXPORT_SYMBOL(arch_register_cpu);
70EXPORT_SYMBOL(arch_unregister_cpu); 55EXPORT_SYMBOL(arch_unregister_cpu);
@@ -74,16 +59,13 @@ EXPORT_SYMBOL(arch_unregister_cpu);
74 59
75#ifdef CONFIG_NUMA 60#ifdef CONFIG_NUMA
76#include <linux/mmzone.h> 61#include <linux/mmzone.h>
77#include <asm/node.h>
78
79struct i386_node node_devices[MAX_NUMNODES];
80 62
81static int __init topology_init(void) 63static int __init topology_init(void)
82{ 64{
83 int i; 65 int i;
84 66
85 for_each_online_node(i) 67 for_each_online_node(i)
86 arch_register_node(i); 68 register_one_node(i);
87 69
88 for_each_present_cpu(i) 70 for_each_present_cpu(i)
89 arch_register_cpu(i); 71 arch_register_cpu(i);
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index dcc14477af1f..78464097470a 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -28,6 +28,7 @@
28#include <linux/utsname.h> 28#include <linux/utsname.h>
29#include <linux/kprobes.h> 29#include <linux/kprobes.h>
30#include <linux/kexec.h> 30#include <linux/kexec.h>
31#include <linux/unwind.h>
31 32
32#ifdef CONFIG_EISA 33#ifdef CONFIG_EISA
33#include <linux/ioport.h> 34#include <linux/ioport.h>
@@ -47,7 +48,7 @@
47#include <asm/desc.h> 48#include <asm/desc.h>
48#include <asm/i387.h> 49#include <asm/i387.h>
49#include <asm/nmi.h> 50#include <asm/nmi.h>
50 51#include <asm/unwind.h>
51#include <asm/smp.h> 52#include <asm/smp.h>
52#include <asm/arch_hooks.h> 53#include <asm/arch_hooks.h>
53#include <asm/kdebug.h> 54#include <asm/kdebug.h>
@@ -92,6 +93,7 @@ asmlinkage void spurious_interrupt_bug(void);
92asmlinkage void machine_check(void); 93asmlinkage void machine_check(void);
93 94
94static int kstack_depth_to_print = 24; 95static int kstack_depth_to_print = 24;
96static int call_trace = 1;
95ATOMIC_NOTIFIER_HEAD(i386die_chain); 97ATOMIC_NOTIFIER_HEAD(i386die_chain);
96 98
97int register_die_notifier(struct notifier_block *nb) 99int register_die_notifier(struct notifier_block *nb)
@@ -170,7 +172,23 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
170 return ebp; 172 return ebp;
171} 173}
172 174
173static void show_trace_log_lvl(struct task_struct *task, 175static asmlinkage int show_trace_unwind(struct unwind_frame_info *info, void *log_lvl)
176{
177 int n = 0;
178 int printed = 0; /* nr of entries already printed on current line */
179
180 while (unwind(info) == 0 && UNW_PC(info)) {
181 ++n;
182 printed = print_addr_and_symbol(UNW_PC(info), log_lvl, printed);
183 if (arch_unw_user_mode(info))
184 break;
185 }
186 if (printed)
187 printk("\n");
188 return n;
189}
190
191static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
174 unsigned long *stack, char *log_lvl) 192 unsigned long *stack, char *log_lvl)
175{ 193{
176 unsigned long ebp; 194 unsigned long ebp;
@@ -178,6 +196,26 @@ static void show_trace_log_lvl(struct task_struct *task,
178 if (!task) 196 if (!task)
179 task = current; 197 task = current;
180 198
199 if (call_trace >= 0) {
200 int unw_ret = 0;
201 struct unwind_frame_info info;
202
203 if (regs) {
204 if (unwind_init_frame_info(&info, task, regs) == 0)
205 unw_ret = show_trace_unwind(&info, log_lvl);
206 } else if (task == current)
207 unw_ret = unwind_init_running(&info, show_trace_unwind, log_lvl);
208 else {
209 if (unwind_init_blocked(&info, task) == 0)
210 unw_ret = show_trace_unwind(&info, log_lvl);
211 }
212 if (unw_ret > 0) {
213 if (call_trace > 0)
214 return;
215 printk("%sLegacy call trace:\n", log_lvl);
216 }
217 }
218
181 if (task == current) { 219 if (task == current) {
182 /* Grab ebp right from our regs */ 220 /* Grab ebp right from our regs */
183 asm ("movl %%ebp, %0" : "=r" (ebp) : ); 221 asm ("movl %%ebp, %0" : "=r" (ebp) : );
@@ -198,13 +236,13 @@ static void show_trace_log_lvl(struct task_struct *task,
198 } 236 }
199} 237}
200 238
201void show_trace(struct task_struct *task, unsigned long * stack) 239void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long * stack)
202{ 240{
203 show_trace_log_lvl(task, stack, ""); 241 show_trace_log_lvl(task, regs, stack, "");
204} 242}
205 243
206static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp, 244static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
207 char *log_lvl) 245 unsigned long *esp, char *log_lvl)
208{ 246{
209 unsigned long *stack; 247 unsigned long *stack;
210 int i; 248 int i;
@@ -225,13 +263,13 @@ static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp,
225 printk("%08lx ", *stack++); 263 printk("%08lx ", *stack++);
226 } 264 }
227 printk("\n%sCall Trace:\n", log_lvl); 265 printk("\n%sCall Trace:\n", log_lvl);
228 show_trace_log_lvl(task, esp, log_lvl); 266 show_trace_log_lvl(task, regs, esp, log_lvl);
229} 267}
230 268
231void show_stack(struct task_struct *task, unsigned long *esp) 269void show_stack(struct task_struct *task, unsigned long *esp)
232{ 270{
233 printk(" "); 271 printk(" ");
234 show_stack_log_lvl(task, esp, ""); 272 show_stack_log_lvl(task, NULL, esp, "");
235} 273}
236 274
237/* 275/*
@@ -241,7 +279,7 @@ void dump_stack(void)
241{ 279{
242 unsigned long stack; 280 unsigned long stack;
243 281
244 show_trace(current, &stack); 282 show_trace(current, NULL, &stack);
245} 283}
246 284
247EXPORT_SYMBOL(dump_stack); 285EXPORT_SYMBOL(dump_stack);
@@ -285,7 +323,7 @@ void show_registers(struct pt_regs *regs)
285 u8 __user *eip; 323 u8 __user *eip;
286 324
287 printk("\n" KERN_EMERG "Stack: "); 325 printk("\n" KERN_EMERG "Stack: ");
288 show_stack_log_lvl(NULL, (unsigned long *)esp, KERN_EMERG); 326 show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
289 327
290 printk(KERN_EMERG "Code: "); 328 printk(KERN_EMERG "Code: ");
291 329
@@ -1215,3 +1253,15 @@ static int __init kstack_setup(char *s)
1215 return 1; 1253 return 1;
1216} 1254}
1217__setup("kstack=", kstack_setup); 1255__setup("kstack=", kstack_setup);
1256
1257static int __init call_trace_setup(char *s)
1258{
1259 if (strcmp(s, "old") == 0)
1260 call_trace = -1;
1261 else if (strcmp(s, "both") == 0)
1262 call_trace = 0;
1263 else if (strcmp(s, "new") == 0)
1264 call_trace = 1;
1265 return 1;
1266}
1267__setup("call_trace=", call_trace_setup);
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
new file mode 100644
index 000000000000..7e0d8dab2075
--- /dev/null
+++ b/arch/i386/kernel/tsc.c
@@ -0,0 +1,478 @@
1/*
2 * This code largely moved from arch/i386/kernel/timer/timer_tsc.c
3 * which was originally moved from arch/i386/kernel/time.c.
4 * See comments there for proper credits.
5 */
6
7#include <linux/clocksource.h>
8#include <linux/workqueue.h>
9#include <linux/cpufreq.h>
10#include <linux/jiffies.h>
11#include <linux/init.h>
12#include <linux/dmi.h>
13
14#include <asm/delay.h>
15#include <asm/tsc.h>
16#include <asm/delay.h>
17#include <asm/io.h>
18
19#include "mach_timer.h"
20
21/*
22 * On some systems the TSC frequency does not
23 * change with the cpu frequency. So we need
24 * an extra value to store the TSC freq
25 */
26unsigned int tsc_khz;
27
28int tsc_disable __cpuinitdata = 0;
29
30#ifdef CONFIG_X86_TSC
31static int __init tsc_setup(char *str)
32{
33 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
34 "cannot disable TSC.\n");
35 return 1;
36}
37#else
38/*
39 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
40 * in cpu/common.c
41 */
42static int __init tsc_setup(char *str)
43{
44 tsc_disable = 1;
45
46 return 1;
47}
48#endif
49
50__setup("notsc", tsc_setup);
51
52/*
53 * code to mark and check if the TSC is unstable
54 * due to cpufreq or due to unsynced TSCs
55 */
56static int tsc_unstable;
57
58static inline int check_tsc_unstable(void)
59{
60 return tsc_unstable;
61}
62
63void mark_tsc_unstable(void)
64{
65 tsc_unstable = 1;
66}
67EXPORT_SYMBOL_GPL(mark_tsc_unstable);
68
69/* Accellerators for sched_clock()
70 * convert from cycles(64bits) => nanoseconds (64bits)
71 * basic equation:
72 * ns = cycles / (freq / ns_per_sec)
73 * ns = cycles * (ns_per_sec / freq)
74 * ns = cycles * (10^9 / (cpu_khz * 10^3))
75 * ns = cycles * (10^6 / cpu_khz)
76 *
77 * Then we use scaling math (suggested by george@mvista.com) to get:
78 * ns = cycles * (10^6 * SC / cpu_khz) / SC
79 * ns = cycles * cyc2ns_scale / SC
80 *
81 * And since SC is a constant power of two, we can convert the div
82 * into a shift.
83 *
84 * We can use khz divisor instead of mhz to keep a better percision, since
85 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
86 * (mathieu.desnoyers@polymtl.ca)
87 *
88 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
89 */
90static unsigned long cyc2ns_scale __read_mostly;
91
92#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
93
94static inline void set_cyc2ns_scale(unsigned long cpu_khz)
95{
96 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
97}
98
99static inline unsigned long long cycles_2_ns(unsigned long long cyc)
100{
101 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
102}
103
104/*
105 * Scheduler clock - returns current time in nanosec units.
106 */
107unsigned long long sched_clock(void)
108{
109 unsigned long long this_offset;
110
111 /*
112 * in the NUMA case we dont use the TSC as they are not
113 * synchronized across all CPUs.
114 */
115#ifndef CONFIG_NUMA
116 if (!cpu_khz || check_tsc_unstable())
117#endif
118 /* no locking but a rare wrong value is not a big deal */
119 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
120
121 /* read the Time Stamp Counter: */
122 rdtscll(this_offset);
123
124 /* return the value in ns */
125 return cycles_2_ns(this_offset);
126}
127
128static unsigned long calculate_cpu_khz(void)
129{
130 unsigned long long start, end;
131 unsigned long count;
132 u64 delta64;
133 int i;
134 unsigned long flags;
135
136 local_irq_save(flags);
137
138 /* run 3 times to ensure the cache is warm */
139 for (i = 0; i < 3; i++) {
140 mach_prepare_counter();
141 rdtscll(start);
142 mach_countup(&count);
143 rdtscll(end);
144 }
145 /*
146 * Error: ECTCNEVERSET
147 * The CTC wasn't reliable: we got a hit on the very first read,
148 * or the CPU was so fast/slow that the quotient wouldn't fit in
149 * 32 bits..
150 */
151 if (count <= 1)
152 goto err;
153
154 delta64 = end - start;
155
156 /* cpu freq too fast: */
157 if (delta64 > (1ULL<<32))
158 goto err;
159
160 /* cpu freq too slow: */
161 if (delta64 <= CALIBRATE_TIME_MSEC)
162 goto err;
163
164 delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
165 do_div(delta64,CALIBRATE_TIME_MSEC);
166
167 local_irq_restore(flags);
168 return (unsigned long)delta64;
169err:
170 local_irq_restore(flags);
171 return 0;
172}
173
174int recalibrate_cpu_khz(void)
175{
176#ifndef CONFIG_SMP
177 unsigned long cpu_khz_old = cpu_khz;
178
179 if (cpu_has_tsc) {
180 cpu_khz = calculate_cpu_khz();
181 tsc_khz = cpu_khz;
182 cpu_data[0].loops_per_jiffy =
183 cpufreq_scale(cpu_data[0].loops_per_jiffy,
184 cpu_khz_old, cpu_khz);
185 return 0;
186 } else
187 return -ENODEV;
188#else
189 return -ENODEV;
190#endif
191}
192
193EXPORT_SYMBOL(recalibrate_cpu_khz);
194
195void tsc_init(void)
196{
197 if (!cpu_has_tsc || tsc_disable)
198 return;
199
200 cpu_khz = calculate_cpu_khz();
201 tsc_khz = cpu_khz;
202
203 if (!cpu_khz)
204 return;
205
206 printk("Detected %lu.%03lu MHz processor.\n",
207 (unsigned long)cpu_khz / 1000,
208 (unsigned long)cpu_khz % 1000);
209
210 set_cyc2ns_scale(cpu_khz);
211 use_tsc_delay();
212}
213
214#ifdef CONFIG_CPU_FREQ
215
216static unsigned int cpufreq_delayed_issched = 0;
217static unsigned int cpufreq_init = 0;
218static struct work_struct cpufreq_delayed_get_work;
219
220static void handle_cpufreq_delayed_get(void *v)
221{
222 unsigned int cpu;
223
224 for_each_online_cpu(cpu)
225 cpufreq_get(cpu);
226
227 cpufreq_delayed_issched = 0;
228}
229
230/*
231 * if we notice cpufreq oddness, schedule a call to cpufreq_get() as it tries
232 * to verify the CPU frequency the timing core thinks the CPU is running
233 * at is still correct.
234 */
235static inline void cpufreq_delayed_get(void)
236{
237 if (cpufreq_init && !cpufreq_delayed_issched) {
238 cpufreq_delayed_issched = 1;
239 printk(KERN_DEBUG "Checking if CPU frequency changed.\n");
240 schedule_work(&cpufreq_delayed_get_work);
241 }
242}
243
244/*
245 * if the CPU frequency is scaled, TSC-based delays will need a different
246 * loops_per_jiffy value to function properly.
247 */
248static unsigned int ref_freq = 0;
249static unsigned long loops_per_jiffy_ref = 0;
250static unsigned long cpu_khz_ref = 0;
251
252static int
253time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
254{
255 struct cpufreq_freqs *freq = data;
256
257 if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
258 write_seqlock_irq(&xtime_lock);
259
260 if (!ref_freq) {
261 if (!freq->old){
262 ref_freq = freq->new;
263 goto end;
264 }
265 ref_freq = freq->old;
266 loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
267 cpu_khz_ref = cpu_khz;
268 }
269
270 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
271 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
272 (val == CPUFREQ_RESUMECHANGE)) {
273 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
274 cpu_data[freq->cpu].loops_per_jiffy =
275 cpufreq_scale(loops_per_jiffy_ref,
276 ref_freq, freq->new);
277
278 if (cpu_khz) {
279
280 if (num_online_cpus() == 1)
281 cpu_khz = cpufreq_scale(cpu_khz_ref,
282 ref_freq, freq->new);
283 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
284 tsc_khz = cpu_khz;
285 set_cyc2ns_scale(cpu_khz);
286 /*
287 * TSC based sched_clock turns
288 * to junk w/ cpufreq
289 */
290 mark_tsc_unstable();
291 }
292 }
293 }
294end:
295 if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
296 write_sequnlock_irq(&xtime_lock);
297
298 return 0;
299}
300
301static struct notifier_block time_cpufreq_notifier_block = {
302 .notifier_call = time_cpufreq_notifier
303};
304
305static int __init cpufreq_tsc(void)
306{
307 int ret;
308
309 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
310 ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
311 CPUFREQ_TRANSITION_NOTIFIER);
312 if (!ret)
313 cpufreq_init = 1;
314
315 return ret;
316}
317
318core_initcall(cpufreq_tsc);
319
320#endif
321
322/* clock source code */
323
324static unsigned long current_tsc_khz = 0;
325static int tsc_update_callback(void);
326
327static cycle_t read_tsc(void)
328{
329 cycle_t ret;
330
331 rdtscll(ret);
332
333 return ret;
334}
335
336static struct clocksource clocksource_tsc = {
337 .name = "tsc",
338 .rating = 300,
339 .read = read_tsc,
340 .mask = CLOCKSOURCE_MASK(64),
341 .mult = 0, /* to be set */
342 .shift = 22,
343 .update_callback = tsc_update_callback,
344 .is_continuous = 1,
345};
346
347static int tsc_update_callback(void)
348{
349 int change = 0;
350
351 /* check to see if we should switch to the safe clocksource: */
352 if (clocksource_tsc.rating != 50 && check_tsc_unstable()) {
353 clocksource_tsc.rating = 50;
354 clocksource_reselect();
355 change = 1;
356 }
357
358 /* only update if tsc_khz has changed: */
359 if (current_tsc_khz != tsc_khz) {
360 current_tsc_khz = tsc_khz;
361 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
362 clocksource_tsc.shift);
363 change = 1;
364 }
365
366 return change;
367}
368
369static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d)
370{
371 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
372 d->ident);
373 mark_tsc_unstable();
374 return 0;
375}
376
377/* List of systems that have known TSC problems */
378static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
379 {
380 .callback = dmi_mark_tsc_unstable,
381 .ident = "IBM Thinkpad 380XD",
382 .matches = {
383 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
384 DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
385 },
386 },
387 {}
388};
389
390#define TSC_FREQ_CHECK_INTERVAL (10*MSEC_PER_SEC) /* 10sec in MS */
391static struct timer_list verify_tsc_freq_timer;
392
393/* XXX - Probably should add locking */
394static void verify_tsc_freq(unsigned long unused)
395{
396 static u64 last_tsc;
397 static unsigned long last_jiffies;
398
399 u64 now_tsc, interval_tsc;
400 unsigned long now_jiffies, interval_jiffies;
401
402
403 if (check_tsc_unstable())
404 return;
405
406 rdtscll(now_tsc);
407 now_jiffies = jiffies;
408
409 if (!last_jiffies) {
410 goto out;
411 }
412
413 interval_jiffies = now_jiffies - last_jiffies;
414 interval_tsc = now_tsc - last_tsc;
415 interval_tsc *= HZ;
416 do_div(interval_tsc, cpu_khz*1000);
417
418 if (interval_tsc < (interval_jiffies * 3 / 4)) {
419 printk("TSC appears to be running slowly. "
420 "Marking it as unstable\n");
421 mark_tsc_unstable();
422 return;
423 }
424
425out:
426 last_tsc = now_tsc;
427 last_jiffies = now_jiffies;
428 /* set us up to go off on the next interval: */
429 mod_timer(&verify_tsc_freq_timer,
430 jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL));
431}
432
433/*
434 * Make an educated guess if the TSC is trustworthy and synchronized
435 * over all CPUs.
436 */
437static __init int unsynchronized_tsc(void)
438{
439 /*
440 * Intel systems are normally all synchronized.
441 * Exceptions must mark TSC as unstable:
442 */
443 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
444 return 0;
445
446 /* assume multi socket systems are not synchronized: */
447 return num_possible_cpus() > 1;
448}
449
450static int __init init_tsc_clocksource(void)
451{
452
453 if (cpu_has_tsc && tsc_khz && !tsc_disable) {
454 /* check blacklist */
455 dmi_check_system(bad_tsc_dmi_table);
456
457 if (unsynchronized_tsc()) /* mark unstable if unsynced */
458 mark_tsc_unstable();
459 current_tsc_khz = tsc_khz;
460 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
461 clocksource_tsc.shift);
462 /* lower the rating if we already know its unstable: */
463 if (check_tsc_unstable())
464 clocksource_tsc.rating = 50;
465
466 init_timer(&verify_tsc_freq_timer);
467 verify_tsc_freq_timer.function = verify_tsc_freq;
468 verify_tsc_freq_timer.expires =
469 jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL);
470 add_timer(&verify_tsc_freq_timer);
471
472 return clocksource_register(&clocksource_tsc);
473 }
474
475 return 0;
476}
477
478module_init(init_tsc_clocksource);
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index 7512f39c9f25..2d4f1386e2b1 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -71,6 +71,15 @@ SECTIONS
71 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) } 71 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) }
72 _edata = .; /* End of data section */ 72 _edata = .; /* End of data section */
73 73
74#ifdef CONFIG_STACK_UNWIND
75 . = ALIGN(4);
76 .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {
77 __start_unwind = .;
78 *(.eh_frame)
79 __end_unwind = .;
80 }
81#endif
82
74 . = ALIGN(THREAD_SIZE); /* init_task */ 83 . = ALIGN(THREAD_SIZE); /* init_task */
75 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { 84 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
76 *(.data.init_task) 85 *(.data.init_task)
diff --git a/arch/i386/kernel/vsyscall-sysenter.S b/arch/i386/kernel/vsyscall-sysenter.S
index 3b62baa6a371..1a36d26e15eb 100644
--- a/arch/i386/kernel/vsyscall-sysenter.S
+++ b/arch/i386/kernel/vsyscall-sysenter.S
@@ -42,10 +42,10 @@ __kernel_vsyscall:
42 /* 7: align return point with nop's to make disassembly easier */ 42 /* 7: align return point with nop's to make disassembly easier */
43 .space 7,0x90 43 .space 7,0x90
44 44
45 /* 14: System call restart point is here! (SYSENTER_RETURN - 2) */ 45 /* 14: System call restart point is here! (SYSENTER_RETURN-2) */
46 jmp .Lenter_kernel 46 jmp .Lenter_kernel
47 /* 16: System call normal return point is here! */ 47 /* 16: System call normal return point is here! */
48 .globl SYSENTER_RETURN /* Symbol used by entry.S. */ 48 .globl SYSENTER_RETURN /* Symbol used by sysenter.c */
49SYSENTER_RETURN: 49SYSENTER_RETURN:
50 pop %ebp 50 pop %ebp
51.Lpop_ebp: 51.Lpop_ebp:
diff --git a/arch/i386/kernel/vsyscall.lds.S b/arch/i386/kernel/vsyscall.lds.S
index 98699ca6e52d..e26975fc68b6 100644
--- a/arch/i386/kernel/vsyscall.lds.S
+++ b/arch/i386/kernel/vsyscall.lds.S
@@ -7,7 +7,7 @@
7 7
8SECTIONS 8SECTIONS
9{ 9{
10 . = VSYSCALL_BASE + SIZEOF_HEADERS; 10 . = VDSO_PRELINK + SIZEOF_HEADERS;
11 11
12 .hash : { *(.hash) } :text 12 .hash : { *(.hash) } :text
13 .dynsym : { *(.dynsym) } 13 .dynsym : { *(.dynsym) }
@@ -20,7 +20,7 @@ SECTIONS
20 For the layouts to match, we need to skip more than enough 20 For the layouts to match, we need to skip more than enough
21 space for the dynamic symbol table et al. If this amount 21 space for the dynamic symbol table et al. If this amount
22 is insufficient, ld -shared will barf. Just increase it here. */ 22 is insufficient, ld -shared will barf. Just increase it here. */
23 . = VSYSCALL_BASE + 0x400; 23 . = VDSO_PRELINK + 0x400;
24 24
25 .text : { *(.text) } :text =0x90909090 25 .text : { *(.text) } :text =0x90909090
26 .note : { *(.note.*) } :text :note 26 .note : { *(.note.*) } :text :note
diff --git a/arch/i386/lib/delay.c b/arch/i386/lib/delay.c
index c49a6acbee56..3c0714c4b669 100644
--- a/arch/i386/lib/delay.c
+++ b/arch/i386/lib/delay.c
@@ -10,43 +10,92 @@
10 * we have to worry about. 10 * we have to worry about.
11 */ 11 */
12 12
13#include <linux/module.h>
13#include <linux/config.h> 14#include <linux/config.h>
14#include <linux/sched.h> 15#include <linux/sched.h>
15#include <linux/delay.h> 16#include <linux/delay.h>
16#include <linux/module.h> 17
17#include <asm/processor.h> 18#include <asm/processor.h>
18#include <asm/delay.h> 19#include <asm/delay.h>
19#include <asm/timer.h> 20#include <asm/timer.h>
20 21
21#ifdef CONFIG_SMP 22#ifdef CONFIG_SMP
22#include <asm/smp.h> 23# include <asm/smp.h>
23#endif 24#endif
24 25
25extern struct timer_opts* timer; 26/* simple loop based delay: */
27static void delay_loop(unsigned long loops)
28{
29 int d0;
30
31 __asm__ __volatile__(
32 "\tjmp 1f\n"
33 ".align 16\n"
34 "1:\tjmp 2f\n"
35 ".align 16\n"
36 "2:\tdecl %0\n\tjns 2b"
37 :"=&a" (d0)
38 :"0" (loops));
39}
40
41/* TSC based delay: */
42static void delay_tsc(unsigned long loops)
43{
44 unsigned long bclock, now;
45
46 rdtscl(bclock);
47 do {
48 rep_nop();
49 rdtscl(now);
50 } while ((now-bclock) < loops);
51}
52
53/*
54 * Since we calibrate only once at boot, this
55 * function should be set once at boot and not changed
56 */
57static void (*delay_fn)(unsigned long) = delay_loop;
58
59void use_tsc_delay(void)
60{
61 delay_fn = delay_tsc;
62}
63
64int read_current_timer(unsigned long *timer_val)
65{
66 if (delay_fn == delay_tsc) {
67 rdtscl(*timer_val);
68 return 0;
69 }
70 return -1;
71}
26 72
27void __delay(unsigned long loops) 73void __delay(unsigned long loops)
28{ 74{
29 cur_timer->delay(loops); 75 delay_fn(loops);
30} 76}
31 77
32inline void __const_udelay(unsigned long xloops) 78inline void __const_udelay(unsigned long xloops)
33{ 79{
34 int d0; 80 int d0;
81
35 xloops *= 4; 82 xloops *= 4;
36 __asm__("mull %0" 83 __asm__("mull %0"
37 :"=d" (xloops), "=&a" (d0) 84 :"=d" (xloops), "=&a" (d0)
38 :"1" (xloops),"0" (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4))); 85 :"1" (xloops), "0"
39 __delay(++xloops); 86 (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4)));
87
88 __delay(++xloops);
40} 89}
41 90
42void __udelay(unsigned long usecs) 91void __udelay(unsigned long usecs)
43{ 92{
44 __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ 93 __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
45} 94}
46 95
47void __ndelay(unsigned long nsecs) 96void __ndelay(unsigned long nsecs)
48{ 97{
49 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ 98 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
50} 99}
51 100
52EXPORT_SYMBOL(__delay); 101EXPORT_SYMBOL(__delay);
diff --git a/arch/i386/mach-voyager/setup.c b/arch/i386/mach-voyager/setup.c
index 0e225054e222..defc6ebbd565 100644
--- a/arch/i386/mach-voyager/setup.c
+++ b/arch/i386/mach-voyager/setup.c
@@ -5,10 +5,10 @@
5#include <linux/config.h> 5#include <linux/config.h>
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/interrupt.h> 7#include <linux/interrupt.h>
8#include <asm/acpi.h>
9#include <asm/arch_hooks.h> 8#include <asm/arch_hooks.h>
10#include <asm/voyager.h> 9#include <asm/voyager.h>
11#include <asm/e820.h> 10#include <asm/e820.h>
11#include <asm/io.h>
12#include <asm/setup.h> 12#include <asm/setup.h>
13 13
14void __init pre_intr_init_hook(void) 14void __init pre_intr_init_hook(void)
@@ -27,8 +27,7 @@ void __init intr_init_hook(void)
27 smp_intr_init(); 27 smp_intr_init();
28#endif 28#endif
29 29
30 if (!acpi_ioapic) 30 setup_irq(2, &irq2);
31 setup_irq(2, &irq2);
32} 31}
33 32
34void __init pre_setup_arch_hook(void) 33void __init pre_setup_arch_hook(void)
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index 70e560a1b79a..8242af9ebc6f 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -661,6 +661,7 @@ do_boot_cpu(__u8 cpu)
661 print_cpu_info(&cpu_data[cpu]); 661 print_cpu_info(&cpu_data[cpu]);
662 wmb(); 662 wmb();
663 cpu_set(cpu, cpu_callout_map); 663 cpu_set(cpu, cpu_callout_map);
664 cpu_set(cpu, cpu_present_map);
664 } 665 }
665 else { 666 else {
666 printk("CPU%d FAILED TO BOOT: ", cpu); 667 printk("CPU%d FAILED TO BOOT: ", cpu);
@@ -1912,6 +1913,7 @@ void __devinit smp_prepare_boot_cpu(void)
1912 cpu_set(smp_processor_id(), cpu_online_map); 1913 cpu_set(smp_processor_id(), cpu_online_map);
1913 cpu_set(smp_processor_id(), cpu_callout_map); 1914 cpu_set(smp_processor_id(), cpu_callout_map);
1914 cpu_set(smp_processor_id(), cpu_possible_map); 1915 cpu_set(smp_processor_id(), cpu_possible_map);
1916 cpu_set(smp_processor_id(), cpu_present_map);
1915} 1917}
1916 1918
1917int __devinit 1919int __devinit
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index bd6fe96cc16d..6ee7faaf2c1b 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -30,6 +30,40 @@
30 30
31extern void die(const char *,struct pt_regs *,long); 31extern void die(const char *,struct pt_regs *,long);
32 32
33#ifdef CONFIG_KPROBES
34ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
35int register_page_fault_notifier(struct notifier_block *nb)
36{
37 vmalloc_sync_all();
38 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
39}
40
41int unregister_page_fault_notifier(struct notifier_block *nb)
42{
43 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
44}
45
46static inline int notify_page_fault(enum die_val val, const char *str,
47 struct pt_regs *regs, long err, int trap, int sig)
48{
49 struct die_args args = {
50 .regs = regs,
51 .str = str,
52 .err = err,
53 .trapnr = trap,
54 .signr = sig
55 };
56 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
57}
58#else
59static inline int notify_page_fault(enum die_val val, const char *str,
60 struct pt_regs *regs, long err, int trap, int sig)
61{
62 return NOTIFY_DONE;
63}
64#endif
65
66
33/* 67/*
34 * Unlock any spinlocks which will prevent us from getting the 68 * Unlock any spinlocks which will prevent us from getting the
35 * message out 69 * message out
@@ -324,7 +358,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
324 if (unlikely(address >= TASK_SIZE)) { 358 if (unlikely(address >= TASK_SIZE)) {
325 if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0) 359 if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
326 return; 360 return;
327 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, 361 if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
328 SIGSEGV) == NOTIFY_STOP) 362 SIGSEGV) == NOTIFY_STOP)
329 return; 363 return;
330 /* 364 /*
@@ -334,7 +368,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
334 goto bad_area_nosemaphore; 368 goto bad_area_nosemaphore;
335 } 369 }
336 370
337 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, 371 if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
338 SIGSEGV) == NOTIFY_STOP) 372 SIGSEGV) == NOTIFY_STOP)
339 return; 373 return;
340 374
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index bf19513f0cea..f84b16e007ff 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/highmem.h> 24#include <linux/highmem.h>
25#include <linux/pagemap.h> 25#include <linux/pagemap.h>
26#include <linux/poison.h>
26#include <linux/bootmem.h> 27#include <linux/bootmem.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/proc_fs.h> 29#include <linux/proc_fs.h>
@@ -654,7 +655,7 @@ void __init mem_init(void)
654 */ 655 */
655#ifdef CONFIG_MEMORY_HOTPLUG 656#ifdef CONFIG_MEMORY_HOTPLUG
656#ifndef CONFIG_NEED_MULTIPLE_NODES 657#ifndef CONFIG_NEED_MULTIPLE_NODES
657int add_memory(u64 start, u64 size) 658int arch_add_memory(int nid, u64 start, u64 size)
658{ 659{
659 struct pglist_data *pgdata = &contig_page_data; 660 struct pglist_data *pgdata = &contig_page_data;
660 struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; 661 struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
@@ -753,7 +754,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
753 for (addr = begin; addr < end; addr += PAGE_SIZE) { 754 for (addr = begin; addr < end; addr += PAGE_SIZE) {
754 ClearPageReserved(virt_to_page(addr)); 755 ClearPageReserved(virt_to_page(addr));
755 init_page_count(virt_to_page(addr)); 756 init_page_count(virt_to_page(addr));
756 memset((void *)addr, 0xcc, PAGE_SIZE); 757 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
757 free_page(addr); 758 free_page(addr);
758 totalram_pages++; 759 totalram_pages++;
759 } 760 }
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index 0887b34bc59b..353a836ed63c 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -229,8 +229,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
229 if (PageHighMem(page)) 229 if (PageHighMem(page))
230 return; 230 return;
231 if (!enable) 231 if (!enable)
232 mutex_debug_check_no_locks_freed(page_address(page), 232 debug_check_no_locks_freed(page_address(page),
233 numpages * PAGE_SIZE); 233 numpages * PAGE_SIZE);
234 234
235 /* the return value is ignored - the calls cannot fail, 235 /* the return value is ignored - the calls cannot fail,
236 * large pages are disabled at boot time. 236 * large pages are disabled at boot time.
diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c
index ec0fd3cfa774..fa8a37bcb391 100644
--- a/arch/i386/oprofile/nmi_int.c
+++ b/arch/i386/oprofile/nmi_int.c
@@ -281,9 +281,9 @@ static int nmi_create_files(struct super_block * sb, struct dentry * root)
281 281
282 for (i = 0; i < model->num_counters; ++i) { 282 for (i = 0; i < model->num_counters; ++i) {
283 struct dentry * dir; 283 struct dentry * dir;
284 char buf[2]; 284 char buf[4];
285 285
286 snprintf(buf, 2, "%d", i); 286 snprintf(buf, sizeof(buf), "%d", i);
287 dir = oprofilefs_mkdir(sb, root, buf); 287 dir = oprofilefs_mkdir(sb, root, buf);
288 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled); 288 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
289 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event); 289 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
diff --git a/arch/i386/oprofile/op_model_athlon.c b/arch/i386/oprofile/op_model_athlon.c
index 3ad9a72a5036..693bdea4a52b 100644
--- a/arch/i386/oprofile/op_model_athlon.c
+++ b/arch/i386/oprofile/op_model_athlon.c
@@ -13,6 +13,7 @@
13#include <linux/oprofile.h> 13#include <linux/oprofile.h>
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/msr.h> 15#include <asm/msr.h>
16#include <asm/nmi.h>
16 17
17#include "op_x86_model.h" 18#include "op_x86_model.h"
18#include "op_counter.h" 19#include "op_counter.h"
diff --git a/arch/i386/oprofile/op_model_p4.c b/arch/i386/oprofile/op_model_p4.c
index ac8a066035c2..7c61d357b82b 100644
--- a/arch/i386/oprofile/op_model_p4.c
+++ b/arch/i386/oprofile/op_model_p4.c
@@ -14,6 +14,7 @@
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/fixmap.h> 15#include <asm/fixmap.h>
16#include <asm/apic.h> 16#include <asm/apic.h>
17#include <asm/nmi.h>
17 18
18#include "op_x86_model.h" 19#include "op_x86_model.h"
19#include "op_counter.h" 20#include "op_counter.h"
diff --git a/arch/i386/oprofile/op_model_ppro.c b/arch/i386/oprofile/op_model_ppro.c
index d719015fc044..5c3ab4b027ad 100644
--- a/arch/i386/oprofile/op_model_ppro.c
+++ b/arch/i386/oprofile/op_model_ppro.c
@@ -14,6 +14,7 @@
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/msr.h> 15#include <asm/msr.h>
16#include <asm/apic.h> 16#include <asm/apic.h>
17#include <asm/nmi.h>
17 18
18#include "op_x86_model.h" 19#include "op_x86_model.h"
19#include "op_counter.h" 20#include "op_counter.h"
diff --git a/arch/i386/pci/pcbios.c b/arch/i386/pci/pcbios.c
index 1eec0868f4b3..ed1512a175ab 100644
--- a/arch/i386/pci/pcbios.c
+++ b/arch/i386/pci/pcbios.c
@@ -371,8 +371,7 @@ void __devinit pcibios_sort(void)
371 list_for_each(ln, &pci_devices) { 371 list_for_each(ln, &pci_devices) {
372 d = pci_dev_g(ln); 372 d = pci_dev_g(ln);
373 if (d->bus->number == bus && d->devfn == devfn) { 373 if (d->bus->number == bus && d->devfn == devfn) {
374 list_del(&d->global_list); 374 list_move_tail(&d->global_list, &sorted_devices);
375 list_add_tail(&d->global_list, &sorted_devices);
376 if (d == dev) 375 if (d == dev)
377 found = 1; 376 found = 1;
378 break; 377 break;
@@ -390,8 +389,7 @@ void __devinit pcibios_sort(void)
390 if (!found) { 389 if (!found) {
391 printk(KERN_WARNING "PCI: Device %s not found by BIOS\n", 390 printk(KERN_WARNING "PCI: Device %s not found by BIOS\n",
392 pci_name(dev)); 391 pci_name(dev));
393 list_del(&dev->global_list); 392 list_move_tail(&dev->global_list, &sorted_devices);
394 list_add_tail(&dev->global_list, &sorted_devices);
395 } 393 }
396 } 394 }
397 list_splice(&sorted_devices, &pci_devices); 395 list_splice(&sorted_devices, &pci_devices);
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 18318749884b..a56df7bf022d 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -374,6 +374,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
374 def_bool y 374 def_bool y
375 depends on NEED_MULTIPLE_NODES 375 depends on NEED_MULTIPLE_NODES
376 376
377config HAVE_ARCH_NODEDATA_EXTENSION
378 def_bool y
379 depends on NUMA
380
377config IA32_SUPPORT 381config IA32_SUPPORT
378 bool "Support for Linux/x86 binaries" 382 bool "Support for Linux/x86 binaries"
379 help 383 help
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 859fb37ff49b..303a9afcf2a1 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -959,7 +959,7 @@ remove_palinfo_proc_entries(unsigned int hcpu)
959 } 959 }
960} 960}
961 961
962static int palinfo_cpu_callback(struct notifier_block *nfb, 962static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
963 unsigned long action, 963 unsigned long action,
964 void *hcpu) 964 void *hcpu)
965{ 965{
@@ -978,7 +978,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
978 return NOTIFY_OK; 978 return NOTIFY_OK;
979} 979}
980 980
981static struct notifier_block palinfo_cpu_notifier = 981static struct notifier_block __cpuinitdata palinfo_cpu_notifier =
982{ 982{
983 .notifier_call = palinfo_cpu_callback, 983 .notifier_call = palinfo_cpu_callback,
984 .priority = 0, 984 .priority = 0,
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 355d57970ba3..b045c279136c 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -272,9 +272,9 @@ cpu_idle (void)
272 /* endless idle loop with no priority at all */ 272 /* endless idle loop with no priority at all */
273 while (1) { 273 while (1) {
274 if (can_do_pal_halt) 274 if (can_do_pal_halt)
275 clear_thread_flag(TIF_POLLING_NRFLAG); 275 current_thread_info()->status &= ~TS_POLLING;
276 else 276 else
277 set_thread_flag(TIF_POLLING_NRFLAG); 277 current_thread_info()->status |= TS_POLLING;
278 278
279 if (!need_resched()) { 279 if (!need_resched()) {
280 void (*idle)(void); 280 void (*idle)(void);
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 663a186ad194..9065f0f01ba3 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -572,7 +572,7 @@ static struct file_operations salinfo_data_fops = {
572}; 572};
573 573
574#ifdef CONFIG_HOTPLUG_CPU 574#ifdef CONFIG_HOTPLUG_CPU
575static int 575static int __devinit
576salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) 576salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
577{ 577{
578 unsigned int i, cpu = (unsigned long)hcpu; 578 unsigned int i, cpu = (unsigned long)hcpu;
@@ -673,9 +673,7 @@ salinfo_init(void)
673 salinfo_timer.function = &salinfo_timeout; 673 salinfo_timer.function = &salinfo_timeout;
674 add_timer(&salinfo_timer); 674 add_timer(&salinfo_timer);
675 675
676#ifdef CONFIG_HOTPLUG_CPU 676 register_hotcpu_notifier(&salinfo_cpu_notifier);
677 register_cpu_notifier(&salinfo_cpu_notifier);
678#endif
679 677
680 return 0; 678 return 0;
681} 679}
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 879edb51d1e0..5511d9c6c701 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -26,19 +26,10 @@
26#include <asm/numa.h> 26#include <asm/numa.h>
27#include <asm/cpu.h> 27#include <asm/cpu.h>
28 28
29#ifdef CONFIG_NUMA
30static struct node *sysfs_nodes;
31#endif
32static struct ia64_cpu *sysfs_cpus; 29static struct ia64_cpu *sysfs_cpus;
33 30
34int arch_register_cpu(int num) 31int arch_register_cpu(int num)
35{ 32{
36 struct node *parent = NULL;
37
38#ifdef CONFIG_NUMA
39 parent = &sysfs_nodes[cpu_to_node(num)];
40#endif /* CONFIG_NUMA */
41
42#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) 33#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
43 /* 34 /*
44 * If CPEI cannot be re-targetted, and this is 35 * If CPEI cannot be re-targetted, and this is
@@ -48,21 +39,14 @@ int arch_register_cpu(int num)
48 sysfs_cpus[num].cpu.no_control = 1; 39 sysfs_cpus[num].cpu.no_control = 1;
49#endif 40#endif
50 41
51 return register_cpu(&sysfs_cpus[num].cpu, num, parent); 42 return register_cpu(&sysfs_cpus[num].cpu, num);
52} 43}
53 44
54#ifdef CONFIG_HOTPLUG_CPU 45#ifdef CONFIG_HOTPLUG_CPU
55 46
56void arch_unregister_cpu(int num) 47void arch_unregister_cpu(int num)
57{ 48{
58 struct node *parent = NULL; 49 return unregister_cpu(&sysfs_cpus[num].cpu);
59
60#ifdef CONFIG_NUMA
61 int node = cpu_to_node(num);
62 parent = &sysfs_nodes[node];
63#endif /* CONFIG_NUMA */
64
65 return unregister_cpu(&sysfs_cpus[num].cpu, parent);
66} 50}
67EXPORT_SYMBOL(arch_register_cpu); 51EXPORT_SYMBOL(arch_register_cpu);
68EXPORT_SYMBOL(arch_unregister_cpu); 52EXPORT_SYMBOL(arch_unregister_cpu);
@@ -74,17 +58,11 @@ static int __init topology_init(void)
74 int i, err = 0; 58 int i, err = 0;
75 59
76#ifdef CONFIG_NUMA 60#ifdef CONFIG_NUMA
77 sysfs_nodes = kzalloc(sizeof(struct node) * MAX_NUMNODES, GFP_KERNEL);
78 if (!sysfs_nodes) {
79 err = -ENOMEM;
80 goto out;
81 }
82
83 /* 61 /*
84 * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes? 62 * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
85 */ 63 */
86 for_each_online_node(i) { 64 for_each_online_node(i) {
87 if ((err = register_node(&sysfs_nodes[i], i, 0))) 65 if ((err = register_one_node(i)))
88 goto out; 66 goto out;
89 } 67 }
90#endif 68#endif
@@ -426,7 +404,7 @@ static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
426 * When a cpu is hot-plugged, do a check and initiate 404 * When a cpu is hot-plugged, do a check and initiate
427 * cache kobject if necessary 405 * cache kobject if necessary
428 */ 406 */
429static int cache_cpu_callback(struct notifier_block *nfb, 407static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
430 unsigned long action, void *hcpu) 408 unsigned long action, void *hcpu)
431{ 409{
432 unsigned int cpu = (unsigned long)hcpu; 410 unsigned int cpu = (unsigned long)hcpu;
@@ -444,7 +422,7 @@ static int cache_cpu_callback(struct notifier_block *nfb,
444 return NOTIFY_OK; 422 return NOTIFY_OK;
445} 423}
446 424
447static struct notifier_block cache_cpu_notifier = 425static struct notifier_block __cpuinitdata cache_cpu_notifier =
448{ 426{
449 .notifier_call = cache_cpu_callback 427 .notifier_call = cache_cpu_callback
450}; 428};
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index b6bcc9fa3603..525b082eb661 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -33,7 +33,6 @@
33 */ 33 */
34struct early_node_data { 34struct early_node_data {
35 struct ia64_node_data *node_data; 35 struct ia64_node_data *node_data;
36 pg_data_t *pgdat;
37 unsigned long pernode_addr; 36 unsigned long pernode_addr;
38 unsigned long pernode_size; 37 unsigned long pernode_size;
39 struct bootmem_data bootmem_data; 38 struct bootmem_data bootmem_data;
@@ -46,6 +45,8 @@ struct early_node_data {
46static struct early_node_data mem_data[MAX_NUMNODES] __initdata; 45static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
47static nodemask_t memory_less_mask __initdata; 46static nodemask_t memory_less_mask __initdata;
48 47
48static pg_data_t *pgdat_list[MAX_NUMNODES];
49
49/* 50/*
50 * To prevent cache aliasing effects, align per-node structures so that they 51 * To prevent cache aliasing effects, align per-node structures so that they
51 * start at addresses that are strided by node number. 52 * start at addresses that are strided by node number.
@@ -99,7 +100,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
99 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been 100 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
100 * called yet. Note that node 0 will also count all non-existent cpus. 101 * called yet. Note that node 0 will also count all non-existent cpus.
101 */ 102 */
102static int __init early_nr_cpus_node(int node) 103static int __meminit early_nr_cpus_node(int node)
103{ 104{
104 int cpu, n = 0; 105 int cpu, n = 0;
105 106
@@ -114,7 +115,7 @@ static int __init early_nr_cpus_node(int node)
114 * compute_pernodesize - compute size of pernode data 115 * compute_pernodesize - compute size of pernode data
115 * @node: the node id. 116 * @node: the node id.
116 */ 117 */
117static unsigned long __init compute_pernodesize(int node) 118static unsigned long __meminit compute_pernodesize(int node)
118{ 119{
119 unsigned long pernodesize = 0, cpus; 120 unsigned long pernodesize = 0, cpus;
120 121
@@ -175,13 +176,13 @@ static void __init fill_pernode(int node, unsigned long pernode,
175 pernode += PERCPU_PAGE_SIZE * cpus; 176 pernode += PERCPU_PAGE_SIZE * cpus;
176 pernode += node * L1_CACHE_BYTES; 177 pernode += node * L1_CACHE_BYTES;
177 178
178 mem_data[node].pgdat = __va(pernode); 179 pgdat_list[node] = __va(pernode);
179 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); 180 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
180 181
181 mem_data[node].node_data = __va(pernode); 182 mem_data[node].node_data = __va(pernode);
182 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); 183 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
183 184
184 mem_data[node].pgdat->bdata = bdp; 185 pgdat_list[node]->bdata = bdp;
185 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); 186 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
186 187
187 cpu_data = per_cpu_node_setup(cpu_data, node); 188 cpu_data = per_cpu_node_setup(cpu_data, node);
@@ -268,7 +269,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
268static int __init free_node_bootmem(unsigned long start, unsigned long len, 269static int __init free_node_bootmem(unsigned long start, unsigned long len,
269 int node) 270 int node)
270{ 271{
271 free_bootmem_node(mem_data[node].pgdat, start, len); 272 free_bootmem_node(pgdat_list[node], start, len);
272 273
273 return 0; 274 return 0;
274} 275}
@@ -287,7 +288,7 @@ static void __init reserve_pernode_space(void)
287 int node; 288 int node;
288 289
289 for_each_online_node(node) { 290 for_each_online_node(node) {
290 pg_data_t *pdp = mem_data[node].pgdat; 291 pg_data_t *pdp = pgdat_list[node];
291 292
292 if (node_isset(node, memory_less_mask)) 293 if (node_isset(node, memory_less_mask))
293 continue; 294 continue;
@@ -307,6 +308,17 @@ static void __init reserve_pernode_space(void)
307 } 308 }
308} 309}
309 310
311static void __meminit scatter_node_data(void)
312{
313 pg_data_t **dst;
314 int node;
315
316 for_each_online_node(node) {
317 dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
318 memcpy(dst, pgdat_list, sizeof(pgdat_list));
319 }
320}
321
310/** 322/**
311 * initialize_pernode_data - fixup per-cpu & per-node pointers 323 * initialize_pernode_data - fixup per-cpu & per-node pointers
312 * 324 *
@@ -317,17 +329,10 @@ static void __init reserve_pernode_space(void)
317 */ 329 */
318static void __init initialize_pernode_data(void) 330static void __init initialize_pernode_data(void)
319{ 331{
320 pg_data_t *pgdat_list[MAX_NUMNODES];
321 int cpu, node; 332 int cpu, node;
322 333
323 for_each_online_node(node) 334 scatter_node_data();
324 pgdat_list[node] = mem_data[node].pgdat;
325 335
326 /* Copy the pg_data_t list to each node and init the node field */
327 for_each_online_node(node) {
328 memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list,
329 sizeof(pgdat_list));
330 }
331#ifdef CONFIG_SMP 336#ifdef CONFIG_SMP
332 /* Set the node_data pointer for each per-cpu struct */ 337 /* Set the node_data pointer for each per-cpu struct */
333 for (cpu = 0; cpu < NR_CPUS; cpu++) { 338 for (cpu = 0; cpu < NR_CPUS; cpu++) {
@@ -372,7 +377,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
372 if (bestnode == -1) 377 if (bestnode == -1)
373 bestnode = anynode; 378 bestnode = anynode;
374 379
375 ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, pernodesize, 380 ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
376 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 381 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
377 382
378 return ptr; 383 return ptr;
@@ -476,7 +481,7 @@ void __init find_memory(void)
476 pernodesize = mem_data[node].pernode_size; 481 pernodesize = mem_data[node].pernode_size;
477 map = pernode + pernodesize; 482 map = pernode + pernodesize;
478 483
479 init_bootmem_node(mem_data[node].pgdat, 484 init_bootmem_node(pgdat_list[node],
480 map>>PAGE_SHIFT, 485 map>>PAGE_SHIFT,
481 bdp->node_boot_start>>PAGE_SHIFT, 486 bdp->node_boot_start>>PAGE_SHIFT,
482 bdp->node_low_pfn); 487 bdp->node_low_pfn);
@@ -786,3 +791,21 @@ void __init paging_init(void)
786 791
787 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); 792 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
788} 793}
794
795pg_data_t *arch_alloc_nodedata(int nid)
796{
797 unsigned long size = compute_pernodesize(nid);
798
799 return kzalloc(size, GFP_KERNEL);
800}
801
802void arch_free_nodedata(pg_data_t *pgdat)
803{
804 kfree(pgdat);
805}
806
807void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
808{
809 pgdat_list[update_node] = update_pgdat;
810 scatter_node_data();
811}
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index d98ec49570b8..14ef7cceb208 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -19,6 +19,40 @@
19 19
20extern void die (char *, struct pt_regs *, long); 20extern void die (char *, struct pt_regs *, long);
21 21
22#ifdef CONFIG_KPROBES
23ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
24
25/* Hook to register for page fault notifications */
26int register_page_fault_notifier(struct notifier_block *nb)
27{
28 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
29}
30
31int unregister_page_fault_notifier(struct notifier_block *nb)
32{
33 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
34}
35
36static inline int notify_page_fault(enum die_val val, const char *str,
37 struct pt_regs *regs, long err, int trap, int sig)
38{
39 struct die_args args = {
40 .regs = regs,
41 .str = str,
42 .err = err,
43 .trapnr = trap,
44 .signr = sig
45 };
46 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
47}
48#else
49static inline int notify_page_fault(enum die_val val, const char *str,
50 struct pt_regs *regs, long err, int trap, int sig)
51{
52 return NOTIFY_DONE;
53}
54#endif
55
22/* 56/*
23 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment 57 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
24 * (inside region 5, on ia64) and that page is present. 58 * (inside region 5, on ia64) and that page is present.
@@ -84,7 +118,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
84 /* 118 /*
85 * This is to handle the kprobes on user space access instructions 119 * This is to handle the kprobes on user space access instructions
86 */ 120 */
87 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT, 121 if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT,
88 SIGSEGV) == NOTIFY_STOP) 122 SIGSEGV) == NOTIFY_STOP)
89 return; 123 return;
90 124
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 11f08001f8c2..38306e98f04b 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -652,7 +652,7 @@ void online_page(struct page *page)
652 num_physpages++; 652 num_physpages++;
653} 653}
654 654
655int add_memory(u64 start, u64 size) 655int arch_add_memory(int nid, u64 start, u64 size)
656{ 656{
657 pg_data_t *pgdat; 657 pg_data_t *pgdat;
658 struct zone *zone; 658 struct zone *zone;
@@ -660,7 +660,7 @@ int add_memory(u64 start, u64 size)
660 unsigned long nr_pages = size >> PAGE_SHIFT; 660 unsigned long nr_pages = size >> PAGE_SHIFT;
661 int ret; 661 int ret;
662 662
663 pgdat = NODE_DATA(0); 663 pgdat = NODE_DATA(nid);
664 664
665 zone = pgdat->node_zones + ZONE_NORMAL; 665 zone = pgdat->node_zones + ZONE_NORMAL;
666 ret = __add_pages(zone, start_pfn, nr_pages); 666 ret = __add_pages(zone, start_pfn, nr_pages);
@@ -671,7 +671,6 @@ int add_memory(u64 start, u64 size)
671 671
672 return ret; 672 return ret;
673} 673}
674EXPORT_SYMBOL_GPL(add_memory);
675 674
676int remove_memory(u64 start, u64 size) 675int remove_memory(u64 start, u64 size)
677{ 676{
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index dc8e2b696713..677c6c0fd661 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -27,7 +27,7 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
27int sn_force_interrupt_flag = 1; 27int sn_force_interrupt_flag = 1;
28extern int sn_ioif_inited; 28extern int sn_ioif_inited;
29struct list_head **sn_irq_lh; 29struct list_head **sn_irq_lh;
30static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ 30static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
31 31
32u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, 32u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
33 struct sn_irq_info *sn_irq_info, 33 struct sn_irq_info *sn_irq_info,
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
index 3cd3c2988a48..1ff483c8a4c9 100644
--- a/arch/m32r/kernel/setup.c
+++ b/arch/m32r/kernel/setup.c
@@ -275,7 +275,7 @@ static int __init topology_init(void)
275 int i; 275 int i;
276 276
277 for_each_present_cpu(i) 277 for_each_present_cpu(i)
278 register_cpu(&cpu_devices[i], i, NULL); 278 register_cpu(&cpu_devices[i], i);
279 279
280 return 0; 280 return 0;
281} 281}
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
index d6d582a5abb0..a226668f20c3 100644
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -94,8 +94,7 @@ pmd_t *get_pointer_table (void)
94 PD_MARKBITS(dp) = mask & ~tmp; 94 PD_MARKBITS(dp) = mask & ~tmp;
95 if (!PD_MARKBITS(dp)) { 95 if (!PD_MARKBITS(dp)) {
96 /* move to end of list */ 96 /* move to end of list */
97 list_del(dp); 97 list_move_tail(dp, &ptable_list);
98 list_add_tail(dp, &ptable_list);
99 } 98 }
100 return (pmd_t *) (page_address(PD_PAGE(dp)) + off); 99 return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
101} 100}
@@ -123,8 +122,7 @@ int free_pointer_table (pmd_t *ptable)
123 * move this descriptor to the front of the list, since 122 * move this descriptor to the front of the list, since
124 * it has one or more free tables. 123 * it has one or more free tables.
125 */ 124 */
126 list_del(dp); 125 list_move(dp, &ptable_list);
127 list_add(dp, &ptable_list);
128 } 126 }
129 return 0; 127 return 0;
130} 128}
diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c
index f04a1d25f1a2..97c7bfde8ae8 100644
--- a/arch/m68k/sun3/sun3dvma.c
+++ b/arch/m68k/sun3/sun3dvma.c
@@ -119,8 +119,7 @@ static inline int refill(void)
119 if(hole->end == prev->start) { 119 if(hole->end == prev->start) {
120 hole->size += prev->size; 120 hole->size += prev->size;
121 hole->end = prev->end; 121 hole->end = prev->end;
122 list_del(&(prev->list)); 122 list_move(&(prev->list), &hole_cache);
123 list_add(&(prev->list), &hole_cache);
124 ret++; 123 ret++;
125 } 124 }
126 125
@@ -182,8 +181,7 @@ static inline unsigned long get_baddr(int len, unsigned long align)
182#endif 181#endif
183 return hole->end; 182 return hole->end;
184 } else if(hole->size == newlen) { 183 } else if(hole->size == newlen) {
185 list_del(&(hole->list)); 184 list_move(&(hole->list), &hole_cache);
186 list_add(&(hole->list), &hole_cache);
187 dvma_entry_use(hole->start) = newlen; 185 dvma_entry_use(hole->start) = newlen;
188#ifdef DVMA_DEBUG 186#ifdef DVMA_DEBUG
189 dvma_allocs++; 187 dvma_allocs++;
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 6c6980b9b6d4..8b6e723eb82b 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -472,38 +472,46 @@ config 4KSTACKS
472 running more threads on a system and also reduces the pressure 472 running more threads on a system and also reduces the pressure
473 on the VM subsystem for higher order allocations. 473 on the VM subsystem for higher order allocations.
474 474
475choice 475comment "RAM configuration"
476 prompt "RAM size" 476
477 default AUTO 477config RAMBASE
478 478 hex "Address of the base of RAM"
479config RAMAUTO 479 default "0"
480 bool "AUTO" 480 help
481 ---help--- 481 Define the address that RAM starts at. On many platforms this is
482 Configure the RAM size on your platform. Many platforms can auto 482 0, the base of the address space. And this is the default. Some
483 detect this, on those choose the AUTO option. Otherwise set the 483 platforms choose to setup their RAM at other addresses within the
484 RAM size you intend using. 484 processor address space.
485 485
486config RAM4MB 486config RAMSIZE
487 bool "4MiB" 487 hex "Size of RAM (in bytes)"
488 help 488 default "0x400000"
489 Set RAM size to be 4MiB. 489 help
490 490 Define the size of the system RAM. If you select 0 then the
491config RAM8MB 491 kernel will try to probe the RAM size at runtime. This is not
492 bool "8MiB" 492 supported on all CPU types.
493 help 493
494 Set RAM size to be 8MiB. 494config VECTORBASE
495 495 hex "Address of the base of system vectors"
496config RAM16MB 496 default "0"
497 bool "16MiB" 497 help
498 help 498 Define the address of the the system vectors. Commonly this is
499 Set RAM size to be 16MiB. 499 put at the start of RAM, but it doesn't have to be. On ColdFire
500 500 platforms this address is programmed into the VBR register, thus
501config RAM32MB 501 actually setting the address to use.
502 bool "32MiB" 502
503 help 503config KERNELBASE
504 Set RAM size to be 32MiB. 504 hex "Address of the base of kernel code"
505 505 default "0x400"
506endchoice 506 help
507 Typically on m68k systems the kernel will not start at the base
508 of RAM, but usually some small offset from it. Define the start
509 address of the kernel here. The most common setup will have the
510 processor vectors at the base of RAM and then the start of the
511 kernel. On some platforms some RAM is reserved for boot loaders
512 and the kernel starts after that. The 0x400 default was based on
513 a system with the RAM based at address 0, and leaving enough room
514 for the theoretical maximum number of 256 vectors.
507 515
508choice 516choice
509 prompt "RAM bus width" 517 prompt "RAM bus width"
@@ -511,7 +519,7 @@ choice
511 519
512config RAMAUTOBIT 520config RAMAUTOBIT
513 bool "AUTO" 521 bool "AUTO"
514 ---help--- 522 help
515 Select the physical RAM data bus size. Not needed on most platforms, 523 Select the physical RAM data bus size. Not needed on most platforms,
516 so you can generally choose AUTO. 524 so you can generally choose AUTO.
517 525
@@ -545,7 +553,9 @@ config RAMKERNEL
545config ROMKERNEL 553config ROMKERNEL
546 bool "ROM" 554 bool "ROM"
547 help 555 help
548 The kernel will be resident in FLASH/ROM when running. 556 The kernel will be resident in FLASH/ROM when running. This is
557 often referred to as Execute-in-Place (XIP), since the kernel
558 code executes from the position it is stored in the FLASH/ROM.
549 559
550endchoice 560endchoice
551 561
diff --git a/arch/m68knommu/Makefile b/arch/m68knommu/Makefile
index 6f880cbff1c8..8951793fd8d4 100644
--- a/arch/m68knommu/Makefile
+++ b/arch/m68knommu/Makefile
@@ -21,6 +21,7 @@ platform-$(CONFIG_M527x) := 527x
21platform-$(CONFIG_M5272) := 5272 21platform-$(CONFIG_M5272) := 5272
22platform-$(CONFIG_M528x) := 528x 22platform-$(CONFIG_M528x) := 528x
23platform-$(CONFIG_M5307) := 5307 23platform-$(CONFIG_M5307) := 5307
24platform-$(CONFIG_M532x) := 532x
24platform-$(CONFIG_M5407) := 5407 25platform-$(CONFIG_M5407) := 5407
25PLATFORM := $(platform-y) 26PLATFORM := $(platform-y)
26 27
@@ -44,6 +45,7 @@ board-$(CONFIG_senTec) := senTec
44board-$(CONFIG_SNEHA) := SNEHA 45board-$(CONFIG_SNEHA) := SNEHA
45board-$(CONFIG_M5208EVB) := M5208EVB 46board-$(CONFIG_M5208EVB) := M5208EVB
46board-$(CONFIG_MOD5272) := MOD5272 47board-$(CONFIG_MOD5272) := MOD5272
48board-$(CONFIG_AVNET) := AVNET
47BOARD := $(board-y) 49BOARD := $(board-y)
48 50
49model-$(CONFIG_RAMKERNEL) := ram 51model-$(CONFIG_RAMKERNEL) := ram
@@ -65,6 +67,7 @@ cpuclass-$(CONFIG_M527x) := 5307
65cpuclass-$(CONFIG_M5272) := 5307 67cpuclass-$(CONFIG_M5272) := 5307
66cpuclass-$(CONFIG_M528x) := 5307 68cpuclass-$(CONFIG_M528x) := 5307
67cpuclass-$(CONFIG_M5307) := 5307 69cpuclass-$(CONFIG_M5307) := 5307
70cpuclass-$(CONFIG_M532x) := 5307
68cpuclass-$(CONFIG_M5407) := 5307 71cpuclass-$(CONFIG_M5407) := 5307
69cpuclass-$(CONFIG_M68328) := 68328 72cpuclass-$(CONFIG_M68328) := 68328
70cpuclass-$(CONFIG_M68EZ328) := 68328 73cpuclass-$(CONFIG_M68EZ328) := 68328
@@ -81,16 +84,17 @@ export PLATFORM BOARD MODEL CPUCLASS
81# 84#
82# Some CFLAG additions based on specific CPU type. 85# Some CFLAG additions based on specific CPU type.
83# 86#
84cflags-$(CONFIG_M5206) := -m5200 -Wa,-S -Wa,-m5200 87cflags-$(CONFIG_M5206) := -m5200
85cflags-$(CONFIG_M5206e) := -m5200 -Wa,-S -Wa,-m5200 88cflags-$(CONFIG_M5206e) := -m5200
86cflags-$(CONFIG_M520x) := -m5307 -Wa,-S -Wa,-m5307 89cflags-$(CONFIG_M520x) := -m5307
87cflags-$(CONFIG_M523x) := -m5307 -Wa,-S -Wa,-m5307 90cflags-$(CONFIG_M523x) := -m5307
88cflags-$(CONFIG_M5249) := -m5200 -Wa,-S -Wa,-m5200 91cflags-$(CONFIG_M5249) := -m5200
89cflags-$(CONFIG_M527x) := -m5307 -Wa,-S -Wa,-m5307 92cflags-$(CONFIG_M527x) := -m5307
90cflags-$(CONFIG_M5272) := -m5307 -Wa,-S -Wa,-m5307 93cflags-$(CONFIG_M5272) := -m5307
91cflags-$(CONFIG_M528x) := -m5307 -Wa,-S -Wa,-m5307 94cflags-$(CONFIG_M528x) := -m5307
92cflags-$(CONFIG_M5307) := -m5307 -Wa,-S -Wa,-m5307 95cflags-$(CONFIG_M5307) := -m5307
93cflags-$(CONFIG_M5407) := -m5200 -Wa,-S -Wa,-m5200 96cflags-$(CONFIG_M532x) := -m5307
97cflags-$(CONFIG_M5407) := -m5200
94cflags-$(CONFIG_M68328) := -m68000 98cflags-$(CONFIG_M68328) := -m68000
95cflags-$(CONFIG_M68EZ328) := -m68000 99cflags-$(CONFIG_M68EZ328) := -m68000
96cflags-$(CONFIG_M68VZ328) := -m68000 100cflags-$(CONFIG_M68VZ328) := -m68000
diff --git a/arch/m68knommu/defconfig b/arch/m68knommu/defconfig
index 2d59ba1a79ba..3891de09ac23 100644
--- a/arch/m68knommu/defconfig
+++ b/arch/m68knommu/defconfig
@@ -1,21 +1,22 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.13-uc0 3# Linux kernel version: 2.6.17
4# Wed Aug 31 15:03:26 2005 4# Tue Jun 27 12:57:06 2006
5# 5#
6CONFIG_M68KNOMMU=y 6CONFIG_M68K=y
7# CONFIG_MMU is not set 7# CONFIG_MMU is not set
8# CONFIG_FPU is not set 8# CONFIG_FPU is not set
9CONFIG_UID16=y
10CONFIG_RWSEM_GENERIC_SPINLOCK=y 9CONFIG_RWSEM_GENERIC_SPINLOCK=y
11# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set 10# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
11CONFIG_GENERIC_FIND_NEXT_BIT=y
12CONFIG_GENERIC_HWEIGHT=y
12CONFIG_GENERIC_CALIBRATE_DELAY=y 13CONFIG_GENERIC_CALIBRATE_DELAY=y
14CONFIG_TIME_LOW_RES=y
13 15
14# 16#
15# Code maturity level options 17# Code maturity level options
16# 18#
17CONFIG_EXPERIMENTAL=y 19CONFIG_EXPERIMENTAL=y
18CONFIG_CLEAN_COMPILE=y
19CONFIG_BROKEN_ON_SMP=y 20CONFIG_BROKEN_ON_SMP=y
20CONFIG_INIT_ENV_ARG_LIMIT=32 21CONFIG_INIT_ENV_ARG_LIMIT=32
21 22
@@ -23,26 +24,30 @@ CONFIG_INIT_ENV_ARG_LIMIT=32
23# General setup 24# General setup
24# 25#
25CONFIG_LOCALVERSION="" 26CONFIG_LOCALVERSION=""
27CONFIG_LOCALVERSION_AUTO=y
28# CONFIG_SYSVIPC is not set
26# CONFIG_POSIX_MQUEUE is not set 29# CONFIG_POSIX_MQUEUE is not set
27# CONFIG_BSD_PROCESS_ACCT is not set 30# CONFIG_BSD_PROCESS_ACCT is not set
28# CONFIG_SYSCTL is not set 31# CONFIG_SYSCTL is not set
29# CONFIG_AUDIT is not set 32# CONFIG_AUDIT is not set
30# CONFIG_HOTPLUG is not set
31# CONFIG_KOBJECT_UEVENT is not set
32# CONFIG_IKCONFIG is not set 33# CONFIG_IKCONFIG is not set
34# CONFIG_RELAY is not set
35CONFIG_INITRAMFS_SOURCE=""
36CONFIG_UID16=y
37# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
33CONFIG_EMBEDDED=y 38CONFIG_EMBEDDED=y
34# CONFIG_KALLSYMS is not set 39# CONFIG_KALLSYMS is not set
40# CONFIG_HOTPLUG is not set
35CONFIG_PRINTK=y 41CONFIG_PRINTK=y
36CONFIG_BUG=y 42CONFIG_BUG=y
43CONFIG_ELF_CORE=y
37CONFIG_BASE_FULL=y 44CONFIG_BASE_FULL=y
38# CONFIG_FUTEX is not set 45# CONFIG_FUTEX is not set
39# CONFIG_EPOLL is not set 46# CONFIG_EPOLL is not set
40# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 47CONFIG_SLAB=y
41CONFIG_CC_ALIGN_FUNCTIONS=0 48CONFIG_TINY_SHMEM=y
42CONFIG_CC_ALIGN_LABELS=0
43CONFIG_CC_ALIGN_LOOPS=0
44CONFIG_CC_ALIGN_JUMPS=0
45CONFIG_BASE_SMALL=0 49CONFIG_BASE_SMALL=0
50# CONFIG_SLOB is not set
46 51
47# 52#
48# Loadable module support 53# Loadable module support
@@ -50,6 +55,24 @@ CONFIG_BASE_SMALL=0
50# CONFIG_MODULES is not set 55# CONFIG_MODULES is not set
51 56
52# 57#
58# Block layer
59#
60# CONFIG_BLK_DEV_IO_TRACE is not set
61
62#
63# IO Schedulers
64#
65CONFIG_IOSCHED_NOOP=y
66# CONFIG_IOSCHED_AS is not set
67# CONFIG_IOSCHED_DEADLINE is not set
68# CONFIG_IOSCHED_CFQ is not set
69# CONFIG_DEFAULT_AS is not set
70# CONFIG_DEFAULT_DEADLINE is not set
71# CONFIG_DEFAULT_CFQ is not set
72CONFIG_DEFAULT_NOOP=y
73CONFIG_DEFAULT_IOSCHED="noop"
74
75#
53# Processor type and features 76# Processor type and features
54# 77#
55# CONFIG_M68328 is not set 78# CONFIG_M68328 is not set
@@ -58,6 +81,7 @@ CONFIG_BASE_SMALL=0
58# CONFIG_M68360 is not set 81# CONFIG_M68360 is not set
59# CONFIG_M5206 is not set 82# CONFIG_M5206 is not set
60# CONFIG_M5206e is not set 83# CONFIG_M5206e is not set
84# CONFIG_M520x is not set
61# CONFIG_M523x is not set 85# CONFIG_M523x is not set
62# CONFIG_M5249 is not set 86# CONFIG_M5249 is not set
63# CONFIG_M5271 is not set 87# CONFIG_M5271 is not set
@@ -65,29 +89,12 @@ CONFIG_M5272=y
65# CONFIG_M5275 is not set 89# CONFIG_M5275 is not set
66# CONFIG_M528x is not set 90# CONFIG_M528x is not set
67# CONFIG_M5307 is not set 91# CONFIG_M5307 is not set
92# CONFIG_M532x is not set
68# CONFIG_M5407 is not set 93# CONFIG_M5407 is not set
69CONFIG_COLDFIRE=y 94CONFIG_COLDFIRE=y
70# CONFIG_CLOCK_AUTO is not set 95CONFIG_CLOCK_SET=y
71# CONFIG_CLOCK_11MHz is not set 96CONFIG_CLOCK_FREQ=66666666
72# CONFIG_CLOCK_16MHz is not set 97CONFIG_CLOCK_DIV=1
73# CONFIG_CLOCK_20MHz is not set
74# CONFIG_CLOCK_24MHz is not set
75# CONFIG_CLOCK_25MHz is not set
76# CONFIG_CLOCK_33MHz is not set
77# CONFIG_CLOCK_40MHz is not set
78# CONFIG_CLOCK_45MHz is not set
79# CONFIG_CLOCK_48MHz is not set
80# CONFIG_CLOCK_50MHz is not set
81# CONFIG_CLOCK_54MHz is not set
82# CONFIG_CLOCK_60MHz is not set
83# CONFIG_CLOCK_62_5MHz is not set
84# CONFIG_CLOCK_64MHz is not set
85CONFIG_CLOCK_66MHz=y
86# CONFIG_CLOCK_70MHz is not set
87# CONFIG_CLOCK_100MHz is not set
88# CONFIG_CLOCK_140MHz is not set
89# CONFIG_CLOCK_150MHz is not set
90# CONFIG_CLOCK_166MHz is not set
91 98
92# 99#
93# Platform 100# Platform
@@ -102,11 +109,14 @@ CONFIG_M5272C3=y
102CONFIG_FREESCALE=y 109CONFIG_FREESCALE=y
103# CONFIG_LARGE_ALLOCS is not set 110# CONFIG_LARGE_ALLOCS is not set
104CONFIG_4KSTACKS=y 111CONFIG_4KSTACKS=y
105CONFIG_RAMAUTO=y 112
106# CONFIG_RAM4MB is not set 113#
107# CONFIG_RAM8MB is not set 114# RAM configuration
108# CONFIG_RAM16MB is not set 115#
109# CONFIG_RAM32MB is not set 116CONFIG_RAMBASE=0x0
117CONFIG_RAMSIZE=0x800000
118CONFIG_VECTORBASE=0x0
119CONFIG_KERNELBASE=0x20000
110CONFIG_RAMAUTOBIT=y 120CONFIG_RAMAUTOBIT=y
111# CONFIG_RAM8BIT is not set 121# CONFIG_RAM8BIT is not set
112# CONFIG_RAM16BIT is not set 122# CONFIG_RAM16BIT is not set
@@ -119,6 +129,8 @@ CONFIG_FLATMEM_MANUAL=y
119# CONFIG_SPARSEMEM_MANUAL is not set 129# CONFIG_SPARSEMEM_MANUAL is not set
120CONFIG_FLATMEM=y 130CONFIG_FLATMEM=y
121CONFIG_FLAT_NODE_MEM_MAP=y 131CONFIG_FLAT_NODE_MEM_MAP=y
132# CONFIG_SPARSEMEM_STATIC is not set
133CONFIG_SPLIT_PTLOCK_CPUS=4
122 134
123# 135#
124# Bus options (PCI, PCMCIA, EISA, MCA, ISA) 136# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
@@ -140,6 +152,7 @@ CONFIG_FLAT_NODE_MEM_MAP=y
140CONFIG_BINFMT_FLAT=y 152CONFIG_BINFMT_FLAT=y
141# CONFIG_BINFMT_ZFLAT is not set 153# CONFIG_BINFMT_ZFLAT is not set
142# CONFIG_BINFMT_SHARED_FLAT is not set 154# CONFIG_BINFMT_SHARED_FLAT is not set
155# CONFIG_BINFMT_AOUT is not set
143# CONFIG_BINFMT_MISC is not set 156# CONFIG_BINFMT_MISC is not set
144 157
145# 158#
@@ -155,6 +168,7 @@ CONFIG_NET=y
155# 168#
156# Networking options 169# Networking options
157# 170#
171# CONFIG_NETDEBUG is not set
158CONFIG_PACKET=y 172CONFIG_PACKET=y
159# CONFIG_PACKET_MMAP is not set 173# CONFIG_PACKET_MMAP is not set
160CONFIG_UNIX=y 174CONFIG_UNIX=y
@@ -171,18 +185,30 @@ CONFIG_IP_FIB_HASH=y
171# CONFIG_INET_AH is not set 185# CONFIG_INET_AH is not set
172# CONFIG_INET_ESP is not set 186# CONFIG_INET_ESP is not set
173# CONFIG_INET_IPCOMP is not set 187# CONFIG_INET_IPCOMP is not set
188# CONFIG_INET_XFRM_TUNNEL is not set
174# CONFIG_INET_TUNNEL is not set 189# CONFIG_INET_TUNNEL is not set
175# CONFIG_IP_TCPDIAG is not set 190# CONFIG_INET_DIAG is not set
176# CONFIG_IP_TCPDIAG_IPV6 is not set
177# CONFIG_TCP_CONG_ADVANCED is not set 191# CONFIG_TCP_CONG_ADVANCED is not set
178CONFIG_TCP_CONG_BIC=y 192CONFIG_TCP_CONG_BIC=y
179# CONFIG_IPV6 is not set 193# CONFIG_IPV6 is not set
194# CONFIG_INET6_XFRM_TUNNEL is not set
195# CONFIG_INET6_TUNNEL is not set
180# CONFIG_NETFILTER is not set 196# CONFIG_NETFILTER is not set
181 197
182# 198#
199# DCCP Configuration (EXPERIMENTAL)
200#
201# CONFIG_IP_DCCP is not set
202
203#
183# SCTP Configuration (EXPERIMENTAL) 204# SCTP Configuration (EXPERIMENTAL)
184# 205#
185# CONFIG_IP_SCTP is not set 206# CONFIG_IP_SCTP is not set
207
208#
209# TIPC Configuration (EXPERIMENTAL)
210#
211# CONFIG_TIPC is not set
186# CONFIG_ATM is not set 212# CONFIG_ATM is not set
187# CONFIG_BRIDGE is not set 213# CONFIG_BRIDGE is not set
188# CONFIG_VLAN_8021Q is not set 214# CONFIG_VLAN_8021Q is not set
@@ -195,8 +221,11 @@ CONFIG_TCP_CONG_BIC=y
195# CONFIG_NET_DIVERT is not set 221# CONFIG_NET_DIVERT is not set
196# CONFIG_ECONET is not set 222# CONFIG_ECONET is not set
197# CONFIG_WAN_ROUTER is not set 223# CONFIG_WAN_ROUTER is not set
224
225#
226# QoS and/or fair queueing
227#
198# CONFIG_NET_SCHED is not set 228# CONFIG_NET_SCHED is not set
199# CONFIG_NET_CLS_ROUTE is not set
200 229
201# 230#
202# Network testing 231# Network testing
@@ -205,6 +234,7 @@ CONFIG_TCP_CONG_BIC=y
205# CONFIG_HAMRADIO is not set 234# CONFIG_HAMRADIO is not set
206# CONFIG_IRDA is not set 235# CONFIG_IRDA is not set
207# CONFIG_BT is not set 236# CONFIG_BT is not set
237# CONFIG_IEEE80211 is not set
208 238
209# 239#
210# Device Drivers 240# Device Drivers
@@ -218,6 +248,11 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
218# CONFIG_FW_LOADER is not set 248# CONFIG_FW_LOADER is not set
219 249
220# 250#
251# Connector - unified userspace <-> kernelspace linker
252#
253# CONFIG_CONNECTOR is not set
254
255#
221# Memory Technology Devices (MTD) 256# Memory Technology Devices (MTD)
222# 257#
223CONFIG_MTD=y 258CONFIG_MTD=y
@@ -235,6 +270,7 @@ CONFIG_MTD_BLOCK=y
235# CONFIG_FTL is not set 270# CONFIG_FTL is not set
236# CONFIG_NFTL is not set 271# CONFIG_NFTL is not set
237# CONFIG_INFTL is not set 272# CONFIG_INFTL is not set
273# CONFIG_RFD_FTL is not set
238 274
239# 275#
240# RAM/ROM/Flash chip drivers 276# RAM/ROM/Flash chip drivers
@@ -254,13 +290,13 @@ CONFIG_MTD_CFI_I2=y
254CONFIG_MTD_RAM=y 290CONFIG_MTD_RAM=y
255# CONFIG_MTD_ROM is not set 291# CONFIG_MTD_ROM is not set
256# CONFIG_MTD_ABSENT is not set 292# CONFIG_MTD_ABSENT is not set
293# CONFIG_MTD_OBSOLETE_CHIPS is not set
257 294
258# 295#
259# Mapping drivers for chip access 296# Mapping drivers for chip access
260# 297#
261# CONFIG_MTD_COMPLEX_MAPPINGS is not set 298# CONFIG_MTD_COMPLEX_MAPPINGS is not set
262CONFIG_MTD_UCLINUX=y 299CONFIG_MTD_UCLINUX=y
263# CONFIG_MTD_SNAPGEARuC is not set
264# CONFIG_MTD_PLATRAM is not set 300# CONFIG_MTD_PLATRAM is not set
265 301
266# 302#
@@ -269,7 +305,6 @@ CONFIG_MTD_UCLINUX=y
269# CONFIG_MTD_SLRAM is not set 305# CONFIG_MTD_SLRAM is not set
270# CONFIG_MTD_PHRAM is not set 306# CONFIG_MTD_PHRAM is not set
271# CONFIG_MTD_MTDRAM is not set 307# CONFIG_MTD_MTDRAM is not set
272# CONFIG_MTD_BLKMTD is not set
273# CONFIG_MTD_BLOCK2MTD is not set 308# CONFIG_MTD_BLOCK2MTD is not set
274 309
275# 310#
@@ -285,6 +320,11 @@ CONFIG_MTD_UCLINUX=y
285# CONFIG_MTD_NAND is not set 320# CONFIG_MTD_NAND is not set
286 321
287# 322#
323# OneNAND Flash Device Drivers
324#
325# CONFIG_MTD_ONENAND is not set
326
327#
288# Parallel port support 328# Parallel port support
289# 329#
290# CONFIG_PARPORT is not set 330# CONFIG_PARPORT is not set
@@ -296,7 +336,6 @@ CONFIG_MTD_UCLINUX=y
296# 336#
297# Block devices 337# Block devices
298# 338#
299# CONFIG_BLK_DEV_FD is not set
300# CONFIG_BLK_DEV_COW_COMMON is not set 339# CONFIG_BLK_DEV_COW_COMMON is not set
301# CONFIG_BLK_DEV_LOOP is not set 340# CONFIG_BLK_DEV_LOOP is not set
302# CONFIG_BLK_DEV_NBD is not set 341# CONFIG_BLK_DEV_NBD is not set
@@ -304,16 +343,7 @@ CONFIG_BLK_DEV_RAM=y
304CONFIG_BLK_DEV_RAM_COUNT=16 343CONFIG_BLK_DEV_RAM_COUNT=16
305CONFIG_BLK_DEV_RAM_SIZE=4096 344CONFIG_BLK_DEV_RAM_SIZE=4096
306# CONFIG_BLK_DEV_INITRD is not set 345# CONFIG_BLK_DEV_INITRD is not set
307CONFIG_INITRAMFS_SOURCE=""
308# CONFIG_CDROM_PKTCDVD is not set 346# CONFIG_CDROM_PKTCDVD is not set
309
310#
311# IO Schedulers
312#
313CONFIG_IOSCHED_NOOP=y
314# CONFIG_IOSCHED_AS is not set
315# CONFIG_IOSCHED_DEADLINE is not set
316# CONFIG_IOSCHED_CFQ is not set
317# CONFIG_ATA_OVER_ETH is not set 347# CONFIG_ATA_OVER_ETH is not set
318 348
319# 349#
@@ -324,6 +354,7 @@ CONFIG_IOSCHED_NOOP=y
324# 354#
325# SCSI device support 355# SCSI device support
326# 356#
357# CONFIG_RAID_ATTRS is not set
327# CONFIG_SCSI is not set 358# CONFIG_SCSI is not set
328 359
329# 360#
@@ -354,13 +385,15 @@ CONFIG_NETDEVICES=y
354# CONFIG_TUN is not set 385# CONFIG_TUN is not set
355 386
356# 387#
388# PHY device support
389#
390# CONFIG_PHYLIB is not set
391
392#
357# Ethernet (10 or 100Mbit) 393# Ethernet (10 or 100Mbit)
358# 394#
359CONFIG_NET_ETHERNET=y 395CONFIG_NET_ETHERNET=y
360# CONFIG_MII is not set 396# CONFIG_MII is not set
361# CONFIG_NET_VENDOR_SMC is not set
362# CONFIG_NE2000 is not set
363# CONFIG_NET_PCI is not set
364CONFIG_FEC=y 397CONFIG_FEC=y
365# CONFIG_FEC2 is not set 398# CONFIG_FEC2 is not set
366 399
@@ -392,6 +425,7 @@ CONFIG_PPP=y
392# CONFIG_PPP_SYNC_TTY is not set 425# CONFIG_PPP_SYNC_TTY is not set
393# CONFIG_PPP_DEFLATE is not set 426# CONFIG_PPP_DEFLATE is not set
394# CONFIG_PPP_BSDCOMP is not set 427# CONFIG_PPP_BSDCOMP is not set
428# CONFIG_PPP_MPPE is not set
395# CONFIG_PPPOE is not set 429# CONFIG_PPPOE is not set
396# CONFIG_SLIP is not set 430# CONFIG_SLIP is not set
397# CONFIG_SHAPER is not set 431# CONFIG_SHAPER is not set
@@ -425,8 +459,6 @@ CONFIG_PPP=y
425# 459#
426# CONFIG_VT is not set 460# CONFIG_VT is not set
427# CONFIG_SERIAL_NONSTANDARD is not set 461# CONFIG_SERIAL_NONSTANDARD is not set
428# CONFIG_LEDMAN is not set
429# CONFIG_RESETSWITCH is not set
430 462
431# 463#
432# Serial drivers 464# Serial drivers
@@ -450,8 +482,6 @@ CONFIG_LEGACY_PTY_COUNT=256
450# Watchdog Cards 482# Watchdog Cards
451# 483#
452# CONFIG_WATCHDOG is not set 484# CONFIG_WATCHDOG is not set
453# CONFIG_MCFWATCHDOG is not set
454# CONFIG_RTC is not set
455# CONFIG_GEN_RTC is not set 485# CONFIG_GEN_RTC is not set
456# CONFIG_DTLK is not set 486# CONFIG_DTLK is not set
457# CONFIG_R3964 is not set 487# CONFIG_R3964 is not set
@@ -464,14 +494,19 @@ CONFIG_LEGACY_PTY_COUNT=256
464# 494#
465# TPM devices 495# TPM devices
466# 496#
467# CONFIG_MCF_QSPI is not set 497# CONFIG_TCG_TPM is not set
468# CONFIG_M41T11M6 is not set 498# CONFIG_TELCLOCK is not set
469 499
470# 500#
471# I2C support 501# I2C support
472# 502#
473# CONFIG_I2C is not set 503# CONFIG_I2C is not set
474# CONFIG_I2C_SENSOR is not set 504
505#
506# SPI support
507#
508# CONFIG_SPI is not set
509# CONFIG_SPI_MASTER is not set
475 510
476# 511#
477# Dallas's 1-wire bus 512# Dallas's 1-wire bus
@@ -482,6 +517,7 @@ CONFIG_LEGACY_PTY_COUNT=256
482# Hardware Monitoring support 517# Hardware Monitoring support
483# 518#
484# CONFIG_HWMON is not set 519# CONFIG_HWMON is not set
520# CONFIG_HWMON_VID is not set
485 521
486# 522#
487# Misc devices 523# Misc devices
@@ -491,6 +527,7 @@ CONFIG_LEGACY_PTY_COUNT=256
491# Multimedia devices 527# Multimedia devices
492# 528#
493# CONFIG_VIDEO_DEV is not set 529# CONFIG_VIDEO_DEV is not set
530CONFIG_VIDEO_V4L2=y
494 531
495# 532#
496# Digital Video Broadcasting Devices 533# Digital Video Broadcasting Devices
@@ -503,11 +540,6 @@ CONFIG_LEGACY_PTY_COUNT=256
503# CONFIG_FB is not set 540# CONFIG_FB is not set
504 541
505# 542#
506# SPI support
507#
508# CONFIG_SPI is not set
509
510#
511# Sound 543# Sound
512# 544#
513# CONFIG_SOUND is not set 545# CONFIG_SOUND is not set
@@ -517,6 +549,11 @@ CONFIG_LEGACY_PTY_COUNT=256
517# 549#
518# CONFIG_USB_ARCH_HAS_HCD is not set 550# CONFIG_USB_ARCH_HAS_HCD is not set
519# CONFIG_USB_ARCH_HAS_OHCI is not set 551# CONFIG_USB_ARCH_HAS_OHCI is not set
552# CONFIG_USB_ARCH_HAS_EHCI is not set
553
554#
555# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
556#
520 557
521# 558#
522# USB Gadget Support 559# USB Gadget Support
@@ -529,29 +566,43 @@ CONFIG_LEGACY_PTY_COUNT=256
529# CONFIG_MMC is not set 566# CONFIG_MMC is not set
530 567
531# 568#
569# LED devices
570#
571# CONFIG_NEW_LEDS is not set
572
573#
574# LED drivers
575#
576
577#
578# LED Triggers
579#
580
581#
532# InfiniBand support 582# InfiniBand support
533# 583#
534 584
535# 585#
536# SN Devices 586# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
537# 587#
538 588
539# 589#
590# Real Time Clock
591#
592# CONFIG_RTC_CLASS is not set
593
594#
540# File systems 595# File systems
541# 596#
542CONFIG_EXT2_FS=y 597CONFIG_EXT2_FS=y
543# CONFIG_EXT2_FS_XATTR is not set 598# CONFIG_EXT2_FS_XATTR is not set
544# CONFIG_EXT2_FS_XIP is not set 599# CONFIG_EXT2_FS_XIP is not set
545# CONFIG_EXT3_FS is not set 600# CONFIG_EXT3_FS is not set
546# CONFIG_JBD is not set
547# CONFIG_REISERFS_FS is not set 601# CONFIG_REISERFS_FS is not set
548# CONFIG_JFS_FS is not set 602# CONFIG_JFS_FS is not set
549# CONFIG_FS_POSIX_ACL is not set 603# CONFIG_FS_POSIX_ACL is not set
550
551#
552# XFS support
553#
554# CONFIG_XFS_FS is not set 604# CONFIG_XFS_FS is not set
605# CONFIG_OCFS2_FS is not set
555# CONFIG_MINIX_FS is not set 606# CONFIG_MINIX_FS is not set
556CONFIG_ROMFS_FS=y 607CONFIG_ROMFS_FS=y
557# CONFIG_INOTIFY is not set 608# CONFIG_INOTIFY is not set
@@ -559,6 +610,7 @@ CONFIG_ROMFS_FS=y
559# CONFIG_DNOTIFY is not set 610# CONFIG_DNOTIFY is not set
560# CONFIG_AUTOFS_FS is not set 611# CONFIG_AUTOFS_FS is not set
561# CONFIG_AUTOFS4_FS is not set 612# CONFIG_AUTOFS4_FS is not set
613# CONFIG_FUSE_FS is not set
562 614
563# 615#
564# CD-ROM/DVD Filesystems 616# CD-ROM/DVD Filesystems
@@ -581,6 +633,7 @@ CONFIG_SYSFS=y
581# CONFIG_TMPFS is not set 633# CONFIG_TMPFS is not set
582# CONFIG_HUGETLB_PAGE is not set 634# CONFIG_HUGETLB_PAGE is not set
583CONFIG_RAMFS=y 635CONFIG_RAMFS=y
636# CONFIG_CONFIGFS_FS is not set
584 637
585# 638#
586# Miscellaneous filesystems 639# Miscellaneous filesystems
@@ -611,6 +664,7 @@ CONFIG_RAMFS=y
611# CONFIG_NCP_FS is not set 664# CONFIG_NCP_FS is not set
612# CONFIG_CODA_FS is not set 665# CONFIG_CODA_FS is not set
613# CONFIG_AFS_FS is not set 666# CONFIG_AFS_FS is not set
667# CONFIG_9P_FS is not set
614 668
615# 669#
616# Partition Types 670# Partition Types
@@ -627,8 +681,12 @@ CONFIG_MSDOS_PARTITION=y
627# Kernel hacking 681# Kernel hacking
628# 682#
629# CONFIG_PRINTK_TIME is not set 683# CONFIG_PRINTK_TIME is not set
684# CONFIG_MAGIC_SYSRQ is not set
630# CONFIG_DEBUG_KERNEL is not set 685# CONFIG_DEBUG_KERNEL is not set
631CONFIG_LOG_BUF_SHIFT=14 686CONFIG_LOG_BUF_SHIFT=14
687# CONFIG_DEBUG_BUGVERBOSE is not set
688# CONFIG_DEBUG_FS is not set
689# CONFIG_UNWIND_INFO is not set
632# CONFIG_FULLDEBUG is not set 690# CONFIG_FULLDEBUG is not set
633# CONFIG_HIGHPROFILE is not set 691# CONFIG_HIGHPROFILE is not set
634# CONFIG_BOOTPARAM is not set 692# CONFIG_BOOTPARAM is not set
@@ -655,5 +713,6 @@ CONFIG_LOG_BUF_SHIFT=14
655# Library routines 713# Library routines
656# 714#
657# CONFIG_CRC_CCITT is not set 715# CONFIG_CRC_CCITT is not set
716# CONFIG_CRC16 is not set
658# CONFIG_CRC32 is not set 717# CONFIG_CRC32 is not set
659# CONFIG_LIBCRC32C is not set 718# CONFIG_LIBCRC32C is not set
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S
index a331cc90797c..6a2f0c693254 100644
--- a/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/arch/m68knommu/kernel/vmlinux.lds.S
@@ -1,7 +1,7 @@
1/* 1/*
2 * vmlinux.lds.S -- master linker script for m68knommu arch 2 * vmlinux.lds.S -- master linker script for m68knommu arch
3 * 3 *
4 * (C) Copyright 2002-2004, Greg Ungerer <gerg@snapgear.com> 4 * (C) Copyright 2002-2006, Greg Ungerer <gerg@snapgear.com>
5 * 5 *
6 * This ends up looking compilcated, because of the number of 6 * This ends up looking compilcated, because of the number of
7 * address variations for ram and rom/flash layouts. The real 7 * address variations for ram and rom/flash layouts. The real
@@ -22,13 +22,7 @@
22#define ROM_START 0x10c10400 22#define ROM_START 0x10c10400
23#define ROM_LENGTH 0xfec00 23#define ROM_LENGTH 0xfec00
24#define ROM_END 0x10d00000 24#define ROM_END 0x10d00000
25#define RAMVEC_START 0x00000000 25#define DATA_ADDR CONFIG_KERNELBASE
26#define RAMVEC_LENGTH 0x400
27#define RAM_START 0x10000400
28#define RAM_LENGTH 0xffc00
29#define RAM_END 0x10100000
30#define _ramend _ram_end_notused
31#define DATA_ADDR RAM_START
32#endif 26#endif
33 27
34/* 28/*
@@ -41,11 +35,6 @@
41#define ROM_START 0x10c10400 35#define ROM_START 0x10c10400
42#define ROM_LENGTH 0x1efc00 36#define ROM_LENGTH 0x1efc00
43#define ROM_END 0x10e00000 37#define ROM_END 0x10e00000
44#define RAMVEC_START 0x00000000
45#define RAMVEC_LENGTH 0x400
46#define RAM_START 0x00020400
47#define RAM_LENGTH 0x7dfc00
48#define RAM_END 0x00800000
49#endif 38#endif
50#ifdef CONFIG_ROMKERNEL 39#ifdef CONFIG_ROMKERNEL
51#define ROMVEC_START 0x10c10000 40#define ROMVEC_START 0x10c10000
@@ -53,11 +42,6 @@
53#define ROM_START 0x10c10400 42#define ROM_START 0x10c10400
54#define ROM_LENGTH 0x1efc00 43#define ROM_LENGTH 0x1efc00
55#define ROM_END 0x10e00000 44#define ROM_END 0x10e00000
56#define RAMVEC_START 0x00000000
57#define RAMVEC_LENGTH 0x400
58#define RAM_START 0x00020000
59#define RAM_LENGTH 0x600000
60#define RAM_END 0x00800000
61#endif 45#endif
62#ifdef CONFIG_HIMEMKERNEL 46#ifdef CONFIG_HIMEMKERNEL
63#define ROMVEC_START 0x00600000 47#define ROMVEC_START 0x00600000
@@ -65,141 +49,28 @@
65#define ROM_START 0x00600400 49#define ROM_START 0x00600400
66#define ROM_LENGTH 0x1efc00 50#define ROM_LENGTH 0x1efc00
67#define ROM_END 0x007f0000 51#define ROM_END 0x007f0000
68#define RAMVEC_START 0x00000000
69#define RAMVEC_LENGTH 0x400
70#define RAM_START 0x00020000
71#define RAM_LENGTH 0x5e0000
72#define RAM_END 0x00600000
73#endif 52#endif
74#endif 53#endif
75 54
76#ifdef CONFIG_DRAGEN2
77#define RAM_START 0x10000
78#define RAM_LENGTH 0x7f0000
79#endif
80
81#ifdef CONFIG_UCQUICC 55#ifdef CONFIG_UCQUICC
82#define ROMVEC_START 0x00000000 56#define ROMVEC_START 0x00000000
83#define ROMVEC_LENGTH 0x404 57#define ROMVEC_LENGTH 0x404
84#define ROM_START 0x00000404 58#define ROM_START 0x00000404
85#define ROM_LENGTH 0x1ff6fc 59#define ROM_LENGTH 0x1ff6fc
86#define ROM_END 0x00200000 60#define ROM_END 0x00200000
87#define RAMVEC_START 0x00200000
88#define RAMVEC_LENGTH 0x404
89#define RAM_START 0x00200404
90#define RAM_LENGTH 0x1ff6fc
91#define RAM_END 0x00400000
92#endif
93
94/*
95 * The standard Arnewsh 5206 board only has 1MiB of ram. Not normally
96 * enough to be useful. Assume the user has fitted something larger,
97 * at least 4MiB in size. No point in not letting the kernel completely
98 * link, it will be obvious if it is too big when they go to load it.
99 */
100#if defined(CONFIG_ARN5206)
101#define RAM_START 0x10000
102#define RAM_LENGTH 0x3f0000
103#endif
104
105/*
106 * The Motorola 5206eLITE board only has 1MiB of static RAM.
107 */
108#if defined(CONFIG_ELITE)
109#define RAM_START 0x30020000
110#define RAM_LENGTH 0xe0000
111#endif
112
113/*
114 * All the Motorola eval boards have the same basic arrangement.
115 * The end of RAM will vary depending on how much ram is fitted,
116 * but this isn't important here, we assume at least 4MiB.
117 */
118#if defined(CONFIG_M5206eC3) || defined(CONFIG_M5249C3) || \
119 defined(CONFIG_M5272C3) || defined(CONFIG_M5307C3) || \
120 defined(CONFIG_ARN5307) || defined(CONFIG_M5407C3) || \
121 defined(CONFIG_M5271EVB) || defined(CONFIG_M5275EVB) || \
122 defined(CONFIG_M5235EVB)
123#define RAM_START 0x20000
124#define RAM_LENGTH 0x3e0000
125#endif
126
127/*
128 * The Freescale 5208EVB board has 32MB of RAM.
129 */
130#if defined(CONFIG_M5208EVB)
131#define RAM_START 0x40020000
132#define RAM_LENGTH 0x01fe0000
133#endif
134
135/*
136 * The senTec COBRA5272 board has nearly the same memory layout as
137 * the M5272C3. We assume 16MiB ram.
138 */
139#if defined(CONFIG_COBRA5272)
140#define RAM_START 0x20000
141#define RAM_LENGTH 0xfe0000
142#endif
143
144#if defined(CONFIG_M5282EVB)
145#define RAM_START 0x10000
146#define RAM_LENGTH 0x3f0000
147#endif
148
149/*
150 * The senTec COBRA5282 board has the same memory layout as the M5282EVB.
151 */
152#if defined(CONFIG_COBRA5282)
153#define RAM_START 0x10000
154#define RAM_LENGTH 0x3f0000
155#endif
156
157
158/*
159 * The EMAC SoM-5282EM module.
160 */
161#if defined(CONFIG_SOM5282EM)
162#define RAM_START 0x10000
163#define RAM_LENGTH 0xff0000
164#endif
165
166
167/*
168 * These flash boot boards use all of ram for operation. Again the
169 * actual memory size is not important here, assume at least 4MiB.
170 * They currently have no support for running in flash.
171 */
172#if defined(CONFIG_NETtel) || defined(CONFIG_eLIA) || \
173 defined(CONFIG_DISKtel) || defined(CONFIG_SECUREEDGEMP3) || \
174 defined(CONFIG_HW_FEITH)
175#define RAM_START 0x400
176#define RAM_LENGTH 0x3ffc00
177#endif
178
179/*
180 * Sneha Boards mimimun memory
181 * The end of RAM will vary depending on how much ram is fitted,
182 * but this isn't important here, we assume at least 4MiB.
183 */
184#if defined(CONFIG_CPU16B)
185#define RAM_START 0x20000
186#define RAM_LENGTH 0x3e0000
187#endif
188
189#if defined(CONFIG_MOD5272)
190#define RAM_START 0x02000000
191#define RAM_LENGTH 0x00800000
192#define RAMVEC_START 0x20000000
193#define RAMVEC_LENGTH 0x00000400
194#endif 61#endif
195 62
196#if defined(CONFIG_RAMKERNEL) 63#if defined(CONFIG_RAMKERNEL)
64#define RAM_START CONFIG_KERNELBASE
65#define RAM_LENGTH (CONFIG_RAMBASE + CONFIG_RAMSIZE - CONFIG_KERNELBASE)
197#define TEXT ram 66#define TEXT ram
198#define DATA ram 67#define DATA ram
199#define INIT ram 68#define INIT ram
200#define BSS ram 69#define BSS ram
201#endif 70#endif
202#if defined(CONFIG_ROMKERNEL) || defined(CONFIG_HIMEMKERNEL) 71#if defined(CONFIG_ROMKERNEL) || defined(CONFIG_HIMEMKERNEL)
72#define RAM_START CONFIG_RAMBASE
73#define RAM_LENGTH CONFIG_RAMSIZE
203#define TEXT rom 74#define TEXT rom
204#define DATA ram 75#define DATA ram
205#define INIT ram 76#define INIT ram
@@ -215,13 +86,7 @@ OUTPUT_ARCH(m68k)
215ENTRY(_start) 86ENTRY(_start)
216 87
217MEMORY { 88MEMORY {
218#ifdef RAMVEC_START
219 ramvec : ORIGIN = RAMVEC_START, LENGTH = RAMVEC_LENGTH
220#endif
221 ram : ORIGIN = RAM_START, LENGTH = RAM_LENGTH 89 ram : ORIGIN = RAM_START, LENGTH = RAM_LENGTH
222#ifdef RAM_END
223 eram : ORIGIN = RAM_END, LENGTH = 0
224#endif
225#ifdef ROM_START 90#ifdef ROM_START
226 romvec : ORIGIN = ROMVEC_START, LENGTH = ROMVEC_LENGTH 91 romvec : ORIGIN = ROMVEC_START, LENGTH = ROMVEC_LENGTH
227 rom : ORIGIN = ROM_START, LENGTH = ROM_LENGTH 92 rom : ORIGIN = ROM_START, LENGTH = ROM_LENGTH
@@ -308,12 +173,6 @@ SECTIONS {
308 __rom_end = . ; 173 __rom_end = . ;
309 } > erom 174 } > erom
310#endif 175#endif
311#ifdef RAMVEC_START
312 . = RAMVEC_START ;
313 .ramvec : {
314 __ramvec = .;
315 } > ramvec
316#endif
317 176
318 .data DATA_ADDR : { 177 .data DATA_ADDR : {
319 . = ALIGN(4); 178 . = ALIGN(4);
@@ -373,12 +232,5 @@ SECTIONS {
373 _ebss = . ; 232 _ebss = . ;
374 } > BSS 233 } > BSS
375 234
376#ifdef RAM_END
377 . = RAM_END ;
378 .eram : {
379 __ramend = . ;
380 _ramend = . ;
381 } > eram
382#endif
383} 235}
384 236
diff --git a/arch/m68knommu/platform/5307/head.S b/arch/m68knommu/platform/5307/head.S
index c30c462b99b1..1d9eb301d7ac 100644
--- a/arch/m68knommu/platform/5307/head.S
+++ b/arch/m68knommu/platform/5307/head.S
@@ -3,7 +3,7 @@
3/* 3/*
4 * head.S -- common startup code for ColdFire CPUs. 4 * head.S -- common startup code for ColdFire CPUs.
5 * 5 *
6 * (C) Copyright 1999-2004, Greg Ungerer (gerg@snapgear.com). 6 * (C) Copyright 1999-2006, Greg Ungerer <gerg@snapgear.com>.
7 */ 7 */
8 8
9/*****************************************************************************/ 9/*****************************************************************************/
@@ -19,47 +19,15 @@
19/*****************************************************************************/ 19/*****************************************************************************/
20 20
21/* 21/*
22 * Define fixed memory sizes. Configuration of a fixed memory size 22 * If we don't have a fixed memory size, then lets build in code
23 * overrides everything else. If the user defined a size we just
24 * blindly use it (they know what they are doing right :-)
25 */
26#if defined(CONFIG_RAM32MB)
27#define MEM_SIZE 0x02000000 /* memory size 32Mb */
28#elif defined(CONFIG_RAM16MB)
29#define MEM_SIZE 0x01000000 /* memory size 16Mb */
30#elif defined(CONFIG_RAM8MB)
31#define MEM_SIZE 0x00800000 /* memory size 8Mb */
32#elif defined(CONFIG_RAM4MB)
33#define MEM_SIZE 0x00400000 /* memory size 4Mb */
34#elif defined(CONFIG_RAM1MB)
35#define MEM_SIZE 0x00100000 /* memory size 1Mb */
36#endif
37
38/*
39 * Memory size exceptions for special cases. Some boards may be set
40 * for auto memory sizing, but we can't do it that way for some reason.
41 * For example the 5206eLITE board has static RAM, and auto-detecting
42 * the SDRAM will do you no good at all. Same goes for the MOD5272.
43 */
44#ifdef CONFIG_RAMAUTO
45#if defined(CONFIG_M5206eLITE)
46#define MEM_SIZE 0x00100000 /* 1MiB default memory */
47#endif
48#if defined(CONFIG_MOD5272)
49#define MEM_SIZE 0x00800000 /* 8MiB default memory */
50#endif
51#endif /* CONFIG_RAMAUTO */
52
53
54/*
55 * If we don't have a fixed memory size now, then lets build in code
56 * to auto detect the DRAM size. Obviously this is the prefered 23 * to auto detect the DRAM size. Obviously this is the prefered
57 * method, and should work for most boards (it won't work for those 24 * method, and should work for most boards. It won't work for those
58 * that do not have their RAM starting at address 0). 25 * that do not have their RAM starting at address 0, and it only
26 * works on SDRAM (not boards fitted with SRAM).
59 */ 27 */
60#if defined(MEM_SIZE) 28#if CONFIG_RAMSIZE != 0
61.macro GET_MEM_SIZE 29.macro GET_MEM_SIZE
62 movel #MEM_SIZE,%d0 /* hard coded memory size */ 30 movel #CONFIG_RAMSIZE,%d0 /* hard coded memory size */
63.endm 31.endm
64 32
65#elif defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \ 33#elif defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
@@ -98,37 +66,7 @@
98.endm 66.endm
99 67
100#else 68#else
101#error "ERROR: I don't know how to determine your boards memory size?" 69#error "ERROR: I don't know how to probe your boards memory size?"
102#endif
103
104
105/*
106 * Most ColdFire boards have their DRAM starting at address 0.
107 * Notable exception is the 5206eLITE board, another is the MOD5272.
108 */
109#if defined(CONFIG_M5206eLITE)
110#define MEM_BASE 0x30000000
111#endif
112#if defined(CONFIG_MOD5272)
113#define MEM_BASE 0x02000000
114#define VBR_BASE 0x20000000 /* vectors in SRAM */
115#endif
116#if defined(CONFIG_M5208EVB)
117#define MEM_BASE 0x40000000
118#endif
119
120#ifndef MEM_BASE
121#define MEM_BASE 0x00000000 /* memory base at address 0 */
122#endif
123
124/*
125 * The default location for the vectors is at the base of RAM.
126 * Some boards might like to use internal SRAM or something like
127 * that. If no board specific header defines an alternative then
128 * use the base of RAM.
129 */
130#ifndef VBR_BASE
131#define VBR_BASE MEM_BASE /* vector address */
132#endif 70#endif
133 71
134/*****************************************************************************/ 72/*****************************************************************************/
@@ -191,11 +129,11 @@ _start:
191 * Create basic memory configuration. Set VBR accordingly, 129 * Create basic memory configuration. Set VBR accordingly,
192 * and size memory. 130 * and size memory.
193 */ 131 */
194 movel #VBR_BASE,%a7 132 movel #CONFIG_VECTORBASE,%a7
195 movec %a7,%VBR /* set vectors addr */ 133 movec %a7,%VBR /* set vectors addr */
196 movel %a7,_ramvec 134 movel %a7,_ramvec
197 135
198 movel #MEM_BASE,%a7 /* mark the base of RAM */ 136 movel #CONFIG_RAMBASE,%a7 /* mark the base of RAM */
199 movel %a7,_rambase 137 movel %a7,_rambase
200 138
201 GET_MEM_SIZE /* macro code determines size */ 139 GET_MEM_SIZE /* macro code determines size */
diff --git a/arch/m68knommu/platform/68328/head-pilot.S b/arch/m68knommu/platform/68328/head-pilot.S
index c46775fe04be..46b3604f999c 100644
--- a/arch/m68knommu/platform/68328/head-pilot.S
+++ b/arch/m68knommu/platform/68328/head-pilot.S
@@ -21,7 +21,6 @@
21.global _start 21.global _start
22 22
23.global _rambase 23.global _rambase
24.global __ramvec
25.global _ramvec 24.global _ramvec
26.global _ramstart 25.global _ramstart
27.global _ramend 26.global _ramend
@@ -121,7 +120,7 @@ L0:
121 DBG_PUTC('B') 120 DBG_PUTC('B')
122 121
123 /* Copy command line from beginning of RAM (+16) to end of bss */ 122 /* Copy command line from beginning of RAM (+16) to end of bss */
124 movel #__ramvec, %d7 123 movel #CONFIG_VECTORBASE, %d7
125 addl #16, %d7 124 addl #16, %d7
126 moveal %d7, %a0 125 moveal %d7, %a0
127 moveal #_ebss, %a1 126 moveal #_ebss, %a1
diff --git a/arch/m68knommu/platform/68328/head-ram.S b/arch/m68knommu/platform/68328/head-ram.S
index 6bdc9bce43f2..e8dc9241ff96 100644
--- a/arch/m68knommu/platform/68328/head-ram.S
+++ b/arch/m68knommu/platform/68328/head-ram.S
@@ -1,10 +1,7 @@
1#include <linux/config.h> 1#include <linux/config.h>
2 2
3 .global __main 3 .global __main
4 .global __ram_start
5 .global __ram_end
6 .global __rom_start 4 .global __rom_start
7 .global __rom_end
8 5
9 .global _rambase 6 .global _rambase
10 .global _ramstart 7 .global _ramstart
@@ -12,6 +9,7 @@
12 .global splash_bits 9 .global splash_bits
13 .global _start 10 .global _start
14 .global _stext 11 .global _stext
12 .global _edata
15 13
16#define DEBUG 14#define DEBUG
17#define ROM_OFFSET 0x10C00000 15#define ROM_OFFSET 0x10C00000
@@ -73,7 +71,7 @@ pclp1:
73#ifdef CONFIG_RELOCATE 71#ifdef CONFIG_RELOCATE
74 /* Copy me to RAM */ 72 /* Copy me to RAM */
75 moveal #__rom_start, %a0 73 moveal #__rom_start, %a0
76 moveal #__ram_start, %a1 74 moveal #_stext, %a1
77 moveal #_edata, %a2 75 moveal #_edata, %a2
78 76
79 /* Copy %a0 to %a1 until %a1 == %a2 */ 77 /* Copy %a0 to %a1 until %a1 == %a2 */
diff --git a/arch/m68knommu/platform/68328/head-rom.S b/arch/m68knommu/platform/68328/head-rom.S
index 2b448a297011..234430b9551c 100644
--- a/arch/m68knommu/platform/68328/head-rom.S
+++ b/arch/m68knommu/platform/68328/head-rom.S
@@ -28,6 +28,8 @@ _ramstart:
28_ramend: 28_ramend:
29.long 0 29.long 0
30 30
31#define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE)
32
31#ifdef CONFIG_INIT_LCD 33#ifdef CONFIG_INIT_LCD
32splash_bits: 34splash_bits:
33#include "bootlogo.rh" 35#include "bootlogo.rh"
@@ -48,7 +50,7 @@ _stext: movew #0x2700,%sr
48 moveb #0x81, 0xfffffA27 /* LCKCON */ 50 moveb #0x81, 0xfffffA27 /* LCKCON */
49 movew #0xff00, 0xfffff412 /* LCD pins */ 51 movew #0xff00, 0xfffff412 /* LCD pins */
50#endif 52#endif
51 moveal #__ramend-CONFIG_MEMORY_RESERVE*0x100000 - 0x10, %sp 53 moveal #RAMEND-CONFIG_MEMORY_RESERVE*0x100000 - 0x10, %sp
52 movew #32767, %d0 /* PLL settle wait loop */ 54 movew #32767, %d0 /* PLL settle wait loop */
531: subq #1, %d0 551: subq #1, %d0
54 bne 1b 56 bne 1b
@@ -73,13 +75,13 @@ _stext: movew #0x2700,%sr
73 bhi 1b 75 bhi 1b
74 76
75 movel #_sdata, %d0 77 movel #_sdata, %d0
76 movel %d0, _rambase 78 movel %d0, _rambase
77 movel #_ebss, %d0 79 movel #_ebss, %d0
78 movel %d0, _ramstart 80 movel %d0, _ramstart
79 movel #__ramend-CONFIG_MEMORY_RESERVE*0x100000, %d0 81 movel #RAMEND-CONFIG_MEMORY_RESERVE*0x100000, %d0
80 movel %d0, _ramend 82 movel %d0, _ramend
81 movel #__ramvec, %d0 83 movel #CONFIG_VECTORBASE, %d0
82 movel %d0, _ramvec 84 movel %d0, _ramvec
83 85
84/* 86/*
85 * load the current task pointer and stack 87 * load the current task pointer and stack
diff --git a/arch/m68knommu/platform/68360/head-ram.S b/arch/m68knommu/platform/68360/head-ram.S
index a5c639a51eef..f497713a4ec7 100644
--- a/arch/m68knommu/platform/68360/head-ram.S
+++ b/arch/m68knommu/platform/68360/head-ram.S
@@ -18,7 +18,6 @@
18.global _start 18.global _start
19 19
20.global _rambase 20.global _rambase
21.global __ramvec
22.global _ramvec 21.global _ramvec
23.global _ramstart 22.global _ramstart
24.global _ramend 23.global _ramend
@@ -26,6 +25,8 @@
26.global _quicc_base 25.global _quicc_base
27.global _periph_base 26.global _periph_base
28 27
28#define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE)
29
29#define REGB 0x1000 30#define REGB 0x1000
30#define PEPAR (_dprbase + REGB + 0x0016) 31#define PEPAR (_dprbase + REGB + 0x0016)
31#define GMR (_dprbase + REGB + 0x0040) 32#define GMR (_dprbase + REGB + 0x0040)
@@ -103,7 +104,7 @@ _stext:
103 nop 104 nop
104 ori.w #MCU_DISABLE_INTRPTS, %sr /* disable interrupts: */ 105 ori.w #MCU_DISABLE_INTRPTS, %sr /* disable interrupts: */
105 /* We should not need to setup the boot stack the reset should do it. */ 106 /* We should not need to setup the boot stack the reset should do it. */
106 movea.l #__ramend, %sp /*set up stack at the end of DRAM:*/ 107 movea.l #RAMEND, %sp /*set up stack at the end of DRAM:*/
107 108
108set_mbar_register: 109set_mbar_register:
109 moveq.l #0x07, %d1 /* Setup MBAR */ 110 moveq.l #0x07, %d1 /* Setup MBAR */
@@ -163,7 +164,7 @@ configure_memory_controller:
163 move.l %d0, GMR 164 move.l %d0, GMR
164 165
165configure_chip_select_0: 166configure_chip_select_0:
166 move.l #__ramend, %d0 167 move.l #RAMEND, %d0
167 subi.l #__ramstart, %d0 168 subi.l #__ramstart, %d0
168 subq.l #0x01, %d0 169 subq.l #0x01, %d0
169 eori.l #SIM_OR_MASK, %d0 170 eori.l #SIM_OR_MASK, %d0
@@ -234,16 +235,10 @@ store_ram_size:
234 /* Set ram size information */ 235 /* Set ram size information */
235 move.l #_sdata, _rambase 236 move.l #_sdata, _rambase
236 move.l #_ebss, _ramstart 237 move.l #_ebss, _ramstart
237 move.l #__ramend, %d0 238 move.l #RAMEND, %d0
238 sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/ 239 sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/
239 move.l %d0, _ramend /* Different from __ramend.*/ 240 move.l %d0, _ramend /* Different from RAMEND.*/
240 241
241store_flash_size:
242 /* Set rom size information */
243 move.l #__rom_end, %d0
244 sub.l #__rom_start, %d0
245 move.l %d0, rom_length
246
247 pea 0 242 pea 0
248 pea env 243 pea env
249 pea %sp@(4) 244 pea %sp@(4)
@@ -286,7 +281,7 @@ _dprbase:
286 */ 281 */
287 282
288.section ".data.initvect","awx" 283.section ".data.initvect","awx"
289 .long __ramend /* Reset: Initial Stack Pointer - 0. */ 284 .long RAMEND /* Reset: Initial Stack Pointer - 0. */
290 .long _start /* Reset: Initial Program Counter - 1. */ 285 .long _start /* Reset: Initial Program Counter - 1. */
291 .long buserr /* Bus Error - 2. */ 286 .long buserr /* Bus Error - 2. */
292 .long trap /* Address Error - 3. */ 287 .long trap /* Address Error - 3. */
diff --git a/arch/m68knommu/platform/68360/head-rom.S b/arch/m68knommu/platform/68360/head-rom.S
index 0da357a4cfee..2d28c3e19a88 100644
--- a/arch/m68knommu/platform/68360/head-rom.S
+++ b/arch/m68knommu/platform/68360/head-rom.S
@@ -18,7 +18,6 @@
18.global _start 18.global _start
19 19
20.global _rambase 20.global _rambase
21.global __ramvec
22.global _ramvec 21.global _ramvec
23.global _ramstart 22.global _ramstart
24.global _ramend 23.global _ramend
@@ -26,6 +25,8 @@
26.global _quicc_base 25.global _quicc_base
27.global _periph_base 26.global _periph_base
28 27
28#define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE)
29
29#define REGB 0x1000 30#define REGB 0x1000
30#define PEPAR (_dprbase + REGB + 0x0016) 31#define PEPAR (_dprbase + REGB + 0x0016)
31#define GMR (_dprbase + REGB + 0x0040) 32#define GMR (_dprbase + REGB + 0x0040)
@@ -115,7 +116,7 @@ _stext:
115 nop 116 nop
116 ori.w #MCU_DISABLE_INTRPTS, %sr /* disable interrupts: */ 117 ori.w #MCU_DISABLE_INTRPTS, %sr /* disable interrupts: */
117 /* We should not need to setup the boot stack the reset should do it. */ 118 /* We should not need to setup the boot stack the reset should do it. */
118 movea.l #__ramend, %sp /* set up stack at the end of DRAM:*/ 119 movea.l #RAMEND, %sp /* set up stack at the end of DRAM:*/
119 120
120 121
121set_mbar_register: 122set_mbar_register:
@@ -245,16 +246,10 @@ store_ram_size:
245 /* Set ram size information */ 246 /* Set ram size information */
246 move.l #_sdata, _rambase 247 move.l #_sdata, _rambase
247 move.l #_ebss, _ramstart 248 move.l #_ebss, _ramstart
248 move.l #__ramend, %d0 249 move.l #RAMEND, %d0
249 sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/ 250 sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/
250 move.l %d0, _ramend /* Different from __ramend.*/ 251 move.l %d0, _ramend /* Different from RAMEND.*/
251 252
252store_flash_size:
253 /* Set rom size information */
254 move.l #__rom_end, %d0
255 sub.l #__rom_start, %d0
256 move.l %d0, rom_length
257
258 pea 0 253 pea 0
259 pea env 254 pea env
260 pea %sp@(4) 255 pea %sp@(4)
@@ -298,7 +293,7 @@ _dprbase:
298 */ 293 */
299 294
300.section ".data.initvect","awx" 295.section ".data.initvect","awx"
301 .long __ramend /* Reset: Initial Stack Pointer - 0. */ 296 .long RAMEND /* Reset: Initial Stack Pointer - 0. */
302 .long _start /* Reset: Initial Program Counter - 1. */ 297 .long _start /* Reset: Initial Program Counter - 1. */
303 .long buserr /* Bus Error - 2. */ 298 .long buserr /* Bus Error - 2. */
304 .long trap /* Address Error - 3. */ 299 .long trap /* Address Error - 3. */
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 298f82fe8440..9096a5ea4229 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -446,7 +446,7 @@ static int __init topology_init(void)
446 int ret; 446 int ret;
447 447
448 for_each_present_cpu(cpu) { 448 for_each_present_cpu(cpu) {
449 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); 449 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu);
450 if (ret) 450 if (ret)
451 printk(KERN_WARNING "topology_init: register_cpu %d " 451 printk(KERN_WARNING "topology_init: register_cpu %d "
452 "failed (%d)\n", cpu, ret); 452 "failed (%d)\n", cpu, ret);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 2e8e52c135e6..70cf09afdf56 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -367,7 +367,7 @@ void mipsmt_prepare_cpus(void)
367 dvpe(); 367 dvpe();
368 dmt(); 368 dmt();
369 369
370 freeIPIq.lock = SPIN_LOCK_UNLOCKED; 370 spin_lock_init(&freeIPIq.lock);
371 371
372 /* 372 /*
373 * We probably don't have as many VPEs as we do SMP "CPUs", 373 * We probably don't have as many VPEs as we do SMP "CPUs",
@@ -375,7 +375,7 @@ void mipsmt_prepare_cpus(void)
375 */ 375 */
376 for (i=0; i<NR_CPUS; i++) { 376 for (i=0; i<NR_CPUS; i++) {
377 IPIQ[i].head = IPIQ[i].tail = NULL; 377 IPIQ[i].head = IPIQ[i].tail = NULL;
378 IPIQ[i].lock = SPIN_LOCK_UNLOCKED; 378 spin_lock_init(&IPIQ[i].lock);
379 IPIQ[i].depth = 0; 379 IPIQ[i].depth = 0;
380 ipi_timer_latch[i] = 0; 380 ipi_timer_latch[i] = 0;
381 } 381 }
diff --git a/arch/mips/momentum/ocelot_g/gt-irq.c b/arch/mips/momentum/ocelot_g/gt-irq.c
index e5eceed1beff..8bd9b844fa9e 100644
--- a/arch/mips/momentum/ocelot_g/gt-irq.c
+++ b/arch/mips/momentum/ocelot_g/gt-irq.c
@@ -59,7 +59,7 @@ void hook_irq_handler(int int_cause, int bit_num, void *isr_ptr)
59 * bit_num - Indicates which bit number in the cause register 59 * bit_num - Indicates which bit number in the cause register
60 * 60 *
61 * Outputs : 61 * Outputs :
62 * 1 if succesful, 0 if failure 62 * 1 if successful, 0 if failure
63 */ 63 */
64int enable_galileo_irq(int int_cause, int bit_num) 64int enable_galileo_irq(int int_cause, int bit_num)
65{ 65{
@@ -83,7 +83,7 @@ int enable_galileo_irq(int int_cause, int bit_num)
83 * bit_num - Indicates which bit number in the cause register 83 * bit_num - Indicates which bit number in the cause register
84 * 84 *
85 * Outputs : 85 * Outputs :
86 * 1 if succesful, 0 if failure 86 * 1 if successful, 0 if failure
87 */ 87 */
88int disable_galileo_irq(int int_cause, int bit_num) 88int disable_galileo_irq(int int_cause, int bit_num)
89{ 89{
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index c31e4cff64e0..65eb55400d77 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -38,7 +38,7 @@ static int op_mips_create_files(struct super_block * sb, struct dentry * root)
38 38
39 for (i = 0; i < model->num_counters; ++i) { 39 for (i = 0; i < model->num_counters; ++i) {
40 struct dentry *dir; 40 struct dentry *dir;
41 char buf[3]; 41 char buf[4];
42 42
43 snprintf(buf, sizeof buf, "%d", i); 43 snprintf(buf, sizeof buf, "%d", i);
44 dir = oprofilefs_mkdir(sb, root, buf); 44 dir = oprofilefs_mkdir(sb, root, buf);
diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c
index a9c58e067b53..8134220ed600 100644
--- a/arch/mips/sgi-ip22/ip22-reset.c
+++ b/arch/mips/sgi-ip22/ip22-reset.c
@@ -34,7 +34,7 @@
34#define POWERDOWN_TIMEOUT 120 34#define POWERDOWN_TIMEOUT 120
35 35
36/* 36/*
37 * Blink frequency during reboot grace period and when paniced. 37 * Blink frequency during reboot grace period and when panicked.
38 */ 38 */
39#define POWERDOWN_FREQ (HZ / 4) 39#define POWERDOWN_FREQ (HZ / 4)
40#define PANIC_FREQ (HZ / 8) 40#define PANIC_FREQ (HZ / 8)
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c
index ab9d9cef089e..79ddb4605659 100644
--- a/arch/mips/sgi-ip32/ip32-reset.c
+++ b/arch/mips/sgi-ip32/ip32-reset.c
@@ -28,13 +28,13 @@
28 28
29#define POWERDOWN_TIMEOUT 120 29#define POWERDOWN_TIMEOUT 120
30/* 30/*
31 * Blink frequency during reboot grace period and when paniced. 31 * Blink frequency during reboot grace period and when panicked.
32 */ 32 */
33#define POWERDOWN_FREQ (HZ / 4) 33#define POWERDOWN_FREQ (HZ / 4)
34#define PANIC_FREQ (HZ / 8) 34#define PANIC_FREQ (HZ / 8)
35 35
36static struct timer_list power_timer, blink_timer, debounce_timer; 36static struct timer_list power_timer, blink_timer, debounce_timer;
37static int has_paniced, shuting_down; 37static int has_panicked, shuting_down;
38 38
39static void ip32_machine_restart(char *command) __attribute__((noreturn)); 39static void ip32_machine_restart(char *command) __attribute__((noreturn));
40static void ip32_machine_halt(void) __attribute__((noreturn)); 40static void ip32_machine_halt(void) __attribute__((noreturn));
@@ -109,7 +109,7 @@ static void debounce(unsigned long data)
109 } 109 }
110 CMOS_WRITE(reg_a & ~DS_REGA_DV0, RTC_REG_A); 110 CMOS_WRITE(reg_a & ~DS_REGA_DV0, RTC_REG_A);
111 111
112 if (has_paniced) 112 if (has_panicked)
113 ip32_machine_restart(NULL); 113 ip32_machine_restart(NULL);
114 114
115 enable_irq(MACEISA_RTC_IRQ); 115 enable_irq(MACEISA_RTC_IRQ);
@@ -117,7 +117,7 @@ static void debounce(unsigned long data)
117 117
118static inline void ip32_power_button(void) 118static inline void ip32_power_button(void)
119{ 119{
120 if (has_paniced) 120 if (has_panicked)
121 return; 121 return;
122 122
123 if (shuting_down || kill_proc(1, SIGINT, 1)) { 123 if (shuting_down || kill_proc(1, SIGINT, 1)) {
@@ -161,9 +161,9 @@ static int panic_event(struct notifier_block *this, unsigned long event,
161{ 161{
162 unsigned long led; 162 unsigned long led;
163 163
164 if (has_paniced) 164 if (has_panicked)
165 return NOTIFY_DONE; 165 return NOTIFY_DONE;
166 has_paniced = 1; 166 has_panicked = 1;
167 167
168 /* turn off the green LED */ 168 /* turn off the green LED */
169 led = mace->perif.ctrl.misc | MACEISA_LED_GREEN; 169 led = mace->perif.ctrl.misc | MACEISA_LED_GREEN;
diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c
index 3ba040050e4c..068b20d822e7 100644
--- a/arch/parisc/kernel/topology.c
+++ b/arch/parisc/kernel/topology.c
@@ -26,11 +26,10 @@ static struct cpu cpu_devices[NR_CPUS] __read_mostly;
26 26
27static int __init topology_init(void) 27static int __init topology_init(void)
28{ 28{
29 struct node *parent = NULL;
30 int num; 29 int num;
31 30
32 for_each_present_cpu(num) { 31 for_each_present_cpu(num) {
33 register_cpu(&cpu_devices[num], num, parent); 32 register_cpu(&cpu_devices[num], num);
34 } 33 }
35 return 0; 34 return 0;
36} 35}
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index dbcb85994f46..e253a45dcf10 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -179,7 +179,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
179 179
180 /* 180 /*
181 * This function is only called after the system 181 * This function is only called after the system
182 * has paniced or is otherwise in a critical state. 182 * has panicked or is otherwise in a critical state.
183 * The minimum amount of code to allow a kexec'd kernel 183 * The minimum amount of code to allow a kexec'd kernel
184 * to run successfully needs to happen here. 184 * to run successfully needs to happen here.
185 * 185 *
diff --git a/arch/powerpc/kernel/machine_kexec_32.c b/arch/powerpc/kernel/machine_kexec_32.c
index 443606134dff..cbaa34196797 100644
--- a/arch/powerpc/kernel/machine_kexec_32.c
+++ b/arch/powerpc/kernel/machine_kexec_32.c
@@ -30,8 +30,8 @@ typedef NORET_TYPE void (*relocate_new_kernel_t)(
30 */ 30 */
31void default_machine_kexec(struct kimage *image) 31void default_machine_kexec(struct kimage *image)
32{ 32{
33 const extern unsigned char relocate_new_kernel[]; 33 extern const unsigned char relocate_new_kernel[];
34 const extern unsigned int relocate_new_kernel_size; 34 extern const unsigned int relocate_new_kernel_size;
35 unsigned long page_list; 35 unsigned long page_list;
36 unsigned long reboot_code_buffer, reboot_code_buffer_phys; 36 unsigned long reboot_code_buffer, reboot_code_buffer_phys;
37 relocate_new_kernel_t rnk; 37 relocate_new_kernel_t rnk;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index e5a44812441a..0932a62a1c96 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -215,7 +215,7 @@ int __init ppc_init(void)
215 215
216 /* register CPU devices */ 216 /* register CPU devices */
217 for_each_possible_cpu(i) 217 for_each_possible_cpu(i)
218 register_cpu(&cpu_devices[i], i, NULL); 218 register_cpu(&cpu_devices[i], i);
219 219
220 /* call platform init */ 220 /* call platform init */
221 if (ppc_md.init != NULL) { 221 if (ppc_md.init != NULL) {
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 5bc2585c8036..4662b580efa1 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -279,7 +279,7 @@ static void unregister_cpu_online(unsigned int cpu)
279} 279}
280#endif /* CONFIG_HOTPLUG_CPU */ 280#endif /* CONFIG_HOTPLUG_CPU */
281 281
282static int sysfs_cpu_notify(struct notifier_block *self, 282static int __devinit sysfs_cpu_notify(struct notifier_block *self,
283 unsigned long action, void *hcpu) 283 unsigned long action, void *hcpu)
284{ 284{
285 unsigned int cpu = (unsigned int)(long)hcpu; 285 unsigned int cpu = (unsigned int)(long)hcpu;
@@ -297,30 +297,19 @@ static int sysfs_cpu_notify(struct notifier_block *self,
297 return NOTIFY_OK; 297 return NOTIFY_OK;
298} 298}
299 299
300static struct notifier_block sysfs_cpu_nb = { 300static struct notifier_block __devinitdata sysfs_cpu_nb = {
301 .notifier_call = sysfs_cpu_notify, 301 .notifier_call = sysfs_cpu_notify,
302}; 302};
303 303
304/* NUMA stuff */ 304/* NUMA stuff */
305 305
306#ifdef CONFIG_NUMA 306#ifdef CONFIG_NUMA
307static struct node node_devices[MAX_NUMNODES];
308
309static void register_nodes(void) 307static void register_nodes(void)
310{ 308{
311 int i; 309 int i;
312 310
313 for (i = 0; i < MAX_NUMNODES; i++) { 311 for (i = 0; i < MAX_NUMNODES; i++)
314 if (node_online(i)) { 312 register_one_node(i);
315 int p_node = parent_node(i);
316 struct node *parent = NULL;
317
318 if (p_node != i)
319 parent = &node_devices[p_node];
320
321 register_node(&node_devices[i], i, parent);
322 }
323 }
324} 313}
325 314
326int sysfs_add_device_to_node(struct sys_device *dev, int nid) 315int sysfs_add_device_to_node(struct sys_device *dev, int nid)
@@ -359,23 +348,13 @@ static SYSDEV_ATTR(physical_id, 0444, show_physical_id, NULL);
359static int __init topology_init(void) 348static int __init topology_init(void)
360{ 349{
361 int cpu; 350 int cpu;
362 struct node *parent = NULL;
363 351
364 register_nodes(); 352 register_nodes();
365
366 register_cpu_notifier(&sysfs_cpu_nb); 353 register_cpu_notifier(&sysfs_cpu_nb);
367 354
368 for_each_possible_cpu(cpu) { 355 for_each_possible_cpu(cpu) {
369 struct cpu *c = &per_cpu(cpu_devices, cpu); 356 struct cpu *c = &per_cpu(cpu_devices, cpu);
370 357
371#ifdef CONFIG_NUMA
372 /* The node to which a cpu belongs can't be known
373 * until the cpu is made present.
374 */
375 parent = NULL;
376 if (cpu_present(cpu))
377 parent = &node_devices[cpu_to_node(cpu)];
378#endif
379 /* 358 /*
380 * For now, we just see if the system supports making 359 * For now, we just see if the system supports making
381 * the RTAS calls for CPU hotplug. But, there may be a 360 * the RTAS calls for CPU hotplug. But, there may be a
@@ -387,7 +366,7 @@ static int __init topology_init(void)
387 c->no_control = 1; 366 c->no_control = 1;
388 367
389 if (cpu_online(cpu) || (c->no_control == 0)) { 368 if (cpu_online(cpu) || (c->no_control == 0)) {
390 register_cpu(c, cpu, parent); 369 register_cpu(c, cpu);
391 370
392 sysdev_create_file(&c->sysdev, &attr_physical_id); 371 sysdev_create_file(&c->sysdev, &attr_physical_id);
393 } 372 }
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index d20907561f46..7dd5dab789a1 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -102,7 +102,7 @@ EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
102u64 tb_to_xs; 102u64 tb_to_xs;
103unsigned tb_to_us; 103unsigned tb_to_us;
104 104
105#define TICKLEN_SCALE (SHIFT_SCALE - 10) 105#define TICKLEN_SCALE TICK_LENGTH_SHIFT
106u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */ 106u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
107u64 ticklen_to_xs; /* 0.64 fraction */ 107u64 ticklen_to_xs; /* 0.64 fraction */
108 108
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index fdbba4206d59..a0a9e1e0061e 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -40,6 +40,40 @@
40#include <asm/kdebug.h> 40#include <asm/kdebug.h>
41#include <asm/siginfo.h> 41#include <asm/siginfo.h>
42 42
43#ifdef CONFIG_KPROBES
44ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
45
46/* Hook to register for page fault notifications */
47int register_page_fault_notifier(struct notifier_block *nb)
48{
49 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
50}
51
52int unregister_page_fault_notifier(struct notifier_block *nb)
53{
54 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
55}
56
57static inline int notify_page_fault(enum die_val val, const char *str,
58 struct pt_regs *regs, long err, int trap, int sig)
59{
60 struct die_args args = {
61 .regs = regs,
62 .str = str,
63 .err = err,
64 .trapnr = trap,
65 .signr = sig
66 };
67 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
68}
69#else
70static inline int notify_page_fault(enum die_val val, const char *str,
71 struct pt_regs *regs, long err, int trap, int sig)
72{
73 return NOTIFY_DONE;
74}
75#endif
76
43/* 77/*
44 * Check whether the instruction at regs->nip is a store using 78 * Check whether the instruction at regs->nip is a store using
45 * an update addressing form which will update r1. 79 * an update addressing form which will update r1.
@@ -142,7 +176,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
142 is_write = error_code & ESR_DST; 176 is_write = error_code & ESR_DST;
143#endif /* CONFIG_4xx || CONFIG_BOOKE */ 177#endif /* CONFIG_4xx || CONFIG_BOOKE */
144 178
145 if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code, 179 if (notify_page_fault(DIE_PAGE_FAULT, "page_fault", regs, error_code,
146 11, SIGSEGV) == NOTIFY_STOP) 180 11, SIGSEGV) == NOTIFY_STOP)
147 return 0; 181 return 0;
148 182
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 9e30f968c184..d454caada265 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -41,6 +41,7 @@
41#include <linux/idr.h> 41#include <linux/idr.h>
42#include <linux/nodemask.h> 42#include <linux/nodemask.h>
43#include <linux/module.h> 43#include <linux/module.h>
44#include <linux/poison.h>
44 45
45#include <asm/pgalloc.h> 46#include <asm/pgalloc.h>
46#include <asm/page.h> 47#include <asm/page.h>
@@ -90,7 +91,7 @@ void free_initmem(void)
90 91
91 addr = (unsigned long)__init_begin; 92 addr = (unsigned long)__init_begin;
92 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { 93 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
93 memset((void *)addr, 0xcc, PAGE_SIZE); 94 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
94 ClearPageReserved(virt_to_page(addr)); 95 ClearPageReserved(virt_to_page(addr));
95 init_page_count(virt_to_page(addr)); 96 init_page_count(virt_to_page(addr));
96 free_page(addr); 97 free_page(addr);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 69f3b9a20beb..089d939a0b3e 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -114,15 +114,20 @@ void online_page(struct page *page)
114 num_physpages++; 114 num_physpages++;
115} 115}
116 116
117int __devinit add_memory(u64 start, u64 size) 117#ifdef CONFIG_NUMA
118int memory_add_physaddr_to_nid(u64 start)
119{
120 return hot_add_scn_to_nid(start);
121}
122#endif
123
124int __devinit arch_add_memory(int nid, u64 start, u64 size)
118{ 125{
119 struct pglist_data *pgdata; 126 struct pglist_data *pgdata;
120 struct zone *zone; 127 struct zone *zone;
121 int nid;
122 unsigned long start_pfn = start >> PAGE_SHIFT; 128 unsigned long start_pfn = start >> PAGE_SHIFT;
123 unsigned long nr_pages = size >> PAGE_SHIFT; 129 unsigned long nr_pages = size >> PAGE_SHIFT;
124 130
125 nid = hot_add_scn_to_nid(start);
126 pgdata = NODE_DATA(nid); 131 pgdata = NODE_DATA(nid);
127 132
128 start = (unsigned long)__va(start); 133 start = (unsigned long)__va(start);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index aa98cb3b59d8..fbe23933f731 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -334,7 +334,7 @@ out:
334 return nid; 334 return nid;
335} 335}
336 336
337static int cpu_numa_callback(struct notifier_block *nfb, 337static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
338 unsigned long action, 338 unsigned long action,
339 void *hcpu) 339 void *hcpu)
340{ 340{
@@ -609,14 +609,15 @@ static void __init *careful_allocation(int nid, unsigned long size,
609 return (void *)ret; 609 return (void *)ret;
610} 610}
611 611
612static struct notifier_block __cpuinitdata ppc64_numa_nb = {
613 .notifier_call = cpu_numa_callback,
614 .priority = 1 /* Must run before sched domains notifier. */
615};
616
612void __init do_init_bootmem(void) 617void __init do_init_bootmem(void)
613{ 618{
614 int nid; 619 int nid;
615 unsigned int i; 620 unsigned int i;
616 static struct notifier_block ppc64_numa_nb = {
617 .notifier_call = cpu_numa_callback,
618 .priority = 1 /* Must run before sched domains notifier. */
619 };
620 621
621 min_low_pfn = 0; 622 min_low_pfn = 0;
622 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; 623 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index 27ad56bd227e..fd0bbbe7a4de 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -94,7 +94,7 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
94 94
95 for (i = 0; i < model->num_counters; ++i) { 95 for (i = 0; i < model->num_counters; ++i) {
96 struct dentry *dir; 96 struct dentry *dir;
97 char buf[3]; 97 char buf[4];
98 98
99 snprintf(buf, sizeof buf, "%d", i); 99 snprintf(buf, sizeof buf, "%d", i);
100 dir = oprofilefs_mkdir(sb, root, buf); 100 dir = oprofilefs_mkdir(sb, root, buf);
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index b30e55dab832..a656d810a44a 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -2100,7 +2100,7 @@ EXPORT_SYMBOL_GPL(spu_save);
2100 * @spu: pointer to SPU iomem structure. 2100 * @spu: pointer to SPU iomem structure.
2101 * 2101 *
2102 * Perform harvest + restore, as we may not be coming 2102 * Perform harvest + restore, as we may not be coming
2103 * from a previous succesful save operation, and the 2103 * from a previous successful save operation, and the
2104 * hardware state is unknown. 2104 * hardware state is unknown.
2105 */ 2105 */
2106int spu_restore(struct spu_state *new, struct spu *spu) 2106int spu_restore(struct spu_state *new, struct spu *spu)
@@ -2203,7 +2203,7 @@ void spu_init_csa(struct spu_state *csa)
2203 2203
2204 memset(lscsa, 0, sizeof(struct spu_lscsa)); 2204 memset(lscsa, 0, sizeof(struct spu_lscsa));
2205 csa->lscsa = lscsa; 2205 csa->lscsa = lscsa;
2206 csa->register_lock = SPIN_LOCK_UNLOCKED; 2206 spin_lock_init(&csa->register_lock);
2207 2207
2208 /* Set LS pages reserved to allow for user-space mapping. */ 2208 /* Set LS pages reserved to allow for user-space mapping. */
2209 for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE) 2209 for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c
index 047f954a89eb..93e7505debc5 100644
--- a/arch/powerpc/platforms/powermac/pfunc_core.c
+++ b/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -546,7 +546,7 @@ struct pmf_device {
546}; 546};
547 547
548static LIST_HEAD(pmf_devices); 548static LIST_HEAD(pmf_devices);
549static spinlock_t pmf_lock = SPIN_LOCK_UNLOCKED; 549static DEFINE_SPINLOCK(pmf_lock);
550static DEFINE_MUTEX(pmf_irq_mutex); 550static DEFINE_MUTEX(pmf_irq_mutex);
551 551
552static void pmf_release_device(struct kref *kref) 552static void pmf_release_device(struct kref *kref)
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index 98c23aec85be..c37a8497c60f 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -287,7 +287,7 @@ void pci_addr_cache_remove_device(struct pci_dev *dev)
287 * find the pci device that corresponds to a given address. 287 * find the pci device that corresponds to a given address.
288 * This routine scans all pci busses to build the cache. 288 * This routine scans all pci busses to build the cache.
289 * Must be run late in boot process, after the pci controllers 289 * Must be run late in boot process, after the pci controllers
290 * have been scaned for devices (after all device resources are known). 290 * have been scanned for devices (after all device resources are known).
291 */ 291 */
292void __init pci_addr_cache_build(void) 292void __init pci_addr_cache_build(void)
293{ 293{
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 8f2d12935b99..45ccc687e57c 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -35,7 +35,7 @@
35 */ 35 */
36 36
37/* EEH event workqueue setup. */ 37/* EEH event workqueue setup. */
38static spinlock_t eeh_eventlist_lock = SPIN_LOCK_UNLOCKED; 38static DEFINE_SPINLOCK(eeh_eventlist_lock);
39LIST_HEAD(eeh_eventlist); 39LIST_HEAD(eeh_eventlist);
40static void eeh_thread_launcher(void *); 40static void eeh_thread_launcher(void *);
41DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL); 41DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c
index 74e0d31a3559..615350d46b52 100644
--- a/arch/powerpc/sysdev/mmio_nvram.c
+++ b/arch/powerpc/sysdev/mmio_nvram.c
@@ -32,7 +32,7 @@
32 32
33static void __iomem *mmio_nvram_start; 33static void __iomem *mmio_nvram_start;
34static long mmio_nvram_len; 34static long mmio_nvram_len;
35static spinlock_t mmio_nvram_lock = SPIN_LOCK_UNLOCKED; 35static DEFINE_SPINLOCK(mmio_nvram_lock);
36 36
37static ssize_t mmio_nvram_read(char *buf, size_t count, loff_t *index) 37static ssize_t mmio_nvram_read(char *buf, size_t count, loff_t *index)
38{ 38{
diff --git a/arch/ppc/kernel/machine_kexec.c b/arch/ppc/kernel/machine_kexec.c
index 84d65a87191e..a469ba438cbe 100644
--- a/arch/ppc/kernel/machine_kexec.c
+++ b/arch/ppc/kernel/machine_kexec.c
@@ -25,8 +25,8 @@ typedef NORET_TYPE void (*relocate_new_kernel_t)(
25 unsigned long reboot_code_buffer, 25 unsigned long reboot_code_buffer,
26 unsigned long start_address) ATTRIB_NORET; 26 unsigned long start_address) ATTRIB_NORET;
27 27
28const extern unsigned char relocate_new_kernel[]; 28extern const unsigned char relocate_new_kernel[];
29const extern unsigned int relocate_new_kernel_size; 29extern const unsigned int relocate_new_kernel_size;
30 30
31void machine_shutdown(void) 31void machine_shutdown(void)
32{ 32{
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 1f79e84ab464..4b4607d89bfa 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -475,7 +475,7 @@ int __init ppc_init(void)
475 475
476 /* register CPU devices */ 476 /* register CPU devices */
477 for_each_possible_cpu(i) 477 for_each_possible_cpu(i)
478 register_cpu(&cpu_devices[i], i, NULL); 478 register_cpu(&cpu_devices[i], i);
479 479
480 /* call platform init */ 480 /* call platform init */
481 if (ppc_md.init != NULL) { 481 if (ppc_md.init != NULL) {
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 9a22434a580c..54d35c130907 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -652,7 +652,7 @@ appldata_cpu_notify(struct notifier_block *self,
652 return NOTIFY_OK; 652 return NOTIFY_OK;
653} 653}
654 654
655static struct notifier_block appldata_nb = { 655static struct notifier_block __devinitdata appldata_nb = {
656 .notifier_call = appldata_cpu_notify, 656 .notifier_call = appldata_cpu_notify,
657}; 657};
658 658
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index c5ca2dc5d428..5713c7e5bd16 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -37,10 +37,10 @@ struct s390_aes_ctx {
37 int key_len; 37 int key_len;
38}; 38};
39 39
40static int aes_set_key(void *ctx, const u8 *in_key, unsigned int key_len, 40static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
41 u32 *flags) 41 unsigned int key_len, u32 *flags)
42{ 42{
43 struct s390_aes_ctx *sctx = ctx; 43 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
44 44
45 switch (key_len) { 45 switch (key_len) {
46 case 16: 46 case 16:
@@ -70,9 +70,9 @@ fail:
70 return -EINVAL; 70 return -EINVAL;
71} 71}
72 72
73static void aes_encrypt(void *ctx, u8 *out, const u8 *in) 73static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
74{ 74{
75 const struct s390_aes_ctx *sctx = ctx; 75 const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
76 76
77 switch (sctx->key_len) { 77 switch (sctx->key_len) {
78 case 16: 78 case 16:
@@ -90,9 +90,9 @@ static void aes_encrypt(void *ctx, u8 *out, const u8 *in)
90 } 90 }
91} 91}
92 92
93static void aes_decrypt(void *ctx, u8 *out, const u8 *in) 93static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
94{ 94{
95 const struct s390_aes_ctx *sctx = ctx; 95 const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
96 96
97 switch (sctx->key_len) { 97 switch (sctx->key_len) {
98 case 16: 98 case 16:
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index e3c37aa0a199..b3f7496a79b4 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -44,10 +44,10 @@ struct crypt_s390_des3_192_ctx {
44 u8 key[DES3_192_KEY_SIZE]; 44 u8 key[DES3_192_KEY_SIZE];
45}; 45};
46 46
47static int des_setkey(void *ctx, const u8 *key, unsigned int keylen, 47static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
48 u32 *flags) 48 unsigned int keylen, u32 *flags)
49{ 49{
50 struct crypt_s390_des_ctx *dctx = ctx; 50 struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
51 int ret; 51 int ret;
52 52
53 /* test if key is valid (not a weak key) */ 53 /* test if key is valid (not a weak key) */
@@ -57,16 +57,16 @@ static int des_setkey(void *ctx, const u8 *key, unsigned int keylen,
57 return ret; 57 return ret;
58} 58}
59 59
60static void des_encrypt(void *ctx, u8 *out, const u8 *in) 60static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
61{ 61{
62 struct crypt_s390_des_ctx *dctx = ctx; 62 struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
63 63
64 crypt_s390_km(KM_DEA_ENCRYPT, dctx->key, out, in, DES_BLOCK_SIZE); 64 crypt_s390_km(KM_DEA_ENCRYPT, dctx->key, out, in, DES_BLOCK_SIZE);
65} 65}
66 66
67static void des_decrypt(void *ctx, u8 *out, const u8 *in) 67static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
68{ 68{
69 struct crypt_s390_des_ctx *dctx = ctx; 69 struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
70 70
71 crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE); 71 crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE);
72} 72}
@@ -166,11 +166,11 @@ static struct crypto_alg des_alg = {
166 * Implementers MUST reject keys that exhibit this property. 166 * Implementers MUST reject keys that exhibit this property.
167 * 167 *
168 */ 168 */
169static int des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen, 169static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key,
170 u32 *flags) 170 unsigned int keylen, u32 *flags)
171{ 171{
172 int i, ret; 172 int i, ret;
173 struct crypt_s390_des3_128_ctx *dctx = ctx; 173 struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm);
174 const u8* temp_key = key; 174 const u8* temp_key = key;
175 175
176 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) { 176 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) {
@@ -186,17 +186,17 @@ static int des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen,
186 return 0; 186 return 0;
187} 187}
188 188
189static void des3_128_encrypt(void *ctx, u8 *dst, const u8 *src) 189static void des3_128_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
190{ 190{
191 struct crypt_s390_des3_128_ctx *dctx = ctx; 191 struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm);
192 192
193 crypt_s390_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src, 193 crypt_s390_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src,
194 DES3_128_BLOCK_SIZE); 194 DES3_128_BLOCK_SIZE);
195} 195}
196 196
197static void des3_128_decrypt(void *ctx, u8 *dst, const u8 *src) 197static void des3_128_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
198{ 198{
199 struct crypt_s390_des3_128_ctx *dctx = ctx; 199 struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm);
200 200
201 crypt_s390_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src, 201 crypt_s390_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src,
202 DES3_128_BLOCK_SIZE); 202 DES3_128_BLOCK_SIZE);
@@ -302,11 +302,11 @@ static struct crypto_alg des3_128_alg = {
302 * property. 302 * property.
303 * 303 *
304 */ 304 */
305static int des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen, 305static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key,
306 u32 *flags) 306 unsigned int keylen, u32 *flags)
307{ 307{
308 int i, ret; 308 int i, ret;
309 struct crypt_s390_des3_192_ctx *dctx = ctx; 309 struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
310 const u8* temp_key = key; 310 const u8* temp_key = key;
311 311
312 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && 312 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
@@ -325,17 +325,17 @@ static int des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen,
325 return 0; 325 return 0;
326} 326}
327 327
328static void des3_192_encrypt(void *ctx, u8 *dst, const u8 *src) 328static void des3_192_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
329{ 329{
330 struct crypt_s390_des3_192_ctx *dctx = ctx; 330 struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
331 331
332 crypt_s390_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src, 332 crypt_s390_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src,
333 DES3_192_BLOCK_SIZE); 333 DES3_192_BLOCK_SIZE);
334} 334}
335 335
336static void des3_192_decrypt(void *ctx, u8 *dst, const u8 *src) 336static void des3_192_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
337{ 337{
338 struct crypt_s390_des3_192_ctx *dctx = ctx; 338 struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
339 339
340 crypt_s390_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src, 340 crypt_s390_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src,
341 DES3_192_BLOCK_SIZE); 341 DES3_192_BLOCK_SIZE);
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 98c896b86dcd..9d34a35b1aa5 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -40,28 +40,29 @@ struct crypt_s390_sha1_ctx {
40 u8 buffer[2 * SHA1_BLOCK_SIZE]; 40 u8 buffer[2 * SHA1_BLOCK_SIZE];
41}; 41};
42 42
43static void 43static void sha1_init(struct crypto_tfm *tfm)
44sha1_init(void *ctx)
45{ 44{
46 static const struct crypt_s390_sha1_ctx initstate = { 45 struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm);
47 .state = { 46 static const u32 initstate[5] = {
48 0x67452301, 47 0x67452301,
49 0xEFCDAB89, 48 0xEFCDAB89,
50 0x98BADCFE, 49 0x98BADCFE,
51 0x10325476, 50 0x10325476,
52 0xC3D2E1F0 51 0xC3D2E1F0
53 },
54 }; 52 };
55 memcpy(ctx, &initstate, sizeof(initstate)); 53
54 ctx->count = 0;
55 memcpy(ctx->state, &initstate, sizeof(initstate));
56 ctx->buf_len = 0;
56} 57}
57 58
58static void 59static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
59sha1_update(void *ctx, const u8 *data, unsigned int len) 60 unsigned int len)
60{ 61{
61 struct crypt_s390_sha1_ctx *sctx; 62 struct crypt_s390_sha1_ctx *sctx;
62 long imd_len; 63 long imd_len;
63 64
64 sctx = ctx; 65 sctx = crypto_tfm_ctx(tfm);
65 sctx->count += len * 8; //message bit length 66 sctx->count += len * 8; //message bit length
66 67
67 //anything in buffer yet? -> must be completed 68 //anything in buffer yet? -> must be completed
@@ -110,10 +111,9 @@ pad_message(struct crypt_s390_sha1_ctx* sctx)
110} 111}
111 112
112/* Add padding and return the message digest. */ 113/* Add padding and return the message digest. */
113static void 114static void sha1_final(struct crypto_tfm *tfm, u8 *out)
114sha1_final(void* ctx, u8 *out)
115{ 115{
116 struct crypt_s390_sha1_ctx *sctx = ctx; 116 struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
117 117
118 //must perform manual padding 118 //must perform manual padding
119 pad_message(sctx); 119 pad_message(sctx);
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 1ec5e92b3454..f573df30f31d 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -31,9 +31,9 @@ struct s390_sha256_ctx {
31 u8 buf[2 * SHA256_BLOCK_SIZE]; 31 u8 buf[2 * SHA256_BLOCK_SIZE];
32}; 32};
33 33
34static void sha256_init(void *ctx) 34static void sha256_init(struct crypto_tfm *tfm)
35{ 35{
36 struct s390_sha256_ctx *sctx = ctx; 36 struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
37 37
38 sctx->state[0] = 0x6a09e667; 38 sctx->state[0] = 0x6a09e667;
39 sctx->state[1] = 0xbb67ae85; 39 sctx->state[1] = 0xbb67ae85;
@@ -44,12 +44,12 @@ static void sha256_init(void *ctx)
44 sctx->state[6] = 0x1f83d9ab; 44 sctx->state[6] = 0x1f83d9ab;
45 sctx->state[7] = 0x5be0cd19; 45 sctx->state[7] = 0x5be0cd19;
46 sctx->count = 0; 46 sctx->count = 0;
47 memset(sctx->buf, 0, sizeof(sctx->buf));
48} 47}
49 48
50static void sha256_update(void *ctx, const u8 *data, unsigned int len) 49static void sha256_update(struct crypto_tfm *tfm, const u8 *data,
50 unsigned int len)
51{ 51{
52 struct s390_sha256_ctx *sctx = ctx; 52 struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
53 unsigned int index; 53 unsigned int index;
54 int ret; 54 int ret;
55 55
@@ -108,9 +108,9 @@ static void pad_message(struct s390_sha256_ctx* sctx)
108} 108}
109 109
110/* Add padding and return the message digest */ 110/* Add padding and return the message digest */
111static void sha256_final(void* ctx, u8 *out) 111static void sha256_final(struct crypto_tfm *tfm, u8 *out)
112{ 112{
113 struct s390_sha256_ctx *sctx = ctx; 113 struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
114 114
115 /* must perform manual padding */ 115 /* must perform manual padding */
116 pad_message(sctx); 116 pad_message(sctx);
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index bad81b5832db..fbde6a915264 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -27,8 +27,8 @@ static void kexec_halt_all_cpus(void *);
27 27
28typedef void (*relocate_kernel_t) (kimage_entry_t *, unsigned long); 28typedef void (*relocate_kernel_t) (kimage_entry_t *, unsigned long);
29 29
30const extern unsigned char relocate_kernel[]; 30extern const unsigned char relocate_kernel[];
31const extern unsigned long long relocate_kernel_len; 31extern const unsigned long long relocate_kernel_len;
32 32
33int 33int
34machine_kexec_prepare(struct kimage *image) 34machine_kexec_prepare(struct kimage *image)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 343120c9223d..8e03219eea76 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -869,7 +869,7 @@ static int __init topology_init(void)
869 int ret; 869 int ret;
870 870
871 for_each_possible_cpu(cpu) { 871 for_each_possible_cpu(cpu) {
872 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); 872 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu);
873 if (ret) 873 if (ret)
874 printk(KERN_WARNING "topology_init: register_cpu %d " 874 printk(KERN_WARNING "topology_init: register_cpu %d "
875 "failed (%d)\n", cpu, ret); 875 "failed (%d)\n", cpu, ret);
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index dfe6f0856617..1f0439dc245a 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -356,7 +356,7 @@ static void internal_add_vtimer(struct vtimer_list *timer)
356 356
357 set_vtimer(event->expires); 357 set_vtimer(event->expires);
358 spin_unlock_irqrestore(&vt_list->lock, flags); 358 spin_unlock_irqrestore(&vt_list->lock, flags);
359 /* release CPU aquired in prepare_vtimer or mod_virt_timer() */ 359 /* release CPU acquired in prepare_vtimer or mod_virt_timer() */
360 put_cpu(); 360 put_cpu();
361} 361}
362 362
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index c72e17a96eed..e467a450662b 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -147,7 +147,7 @@ endif
147# them changed. We use .arch and .mach to indicate when they were 147# them changed. We use .arch and .mach to indicate when they were
148# updated last, otherwise make uses the target directory mtime. 148# updated last, otherwise make uses the target directory mtime.
149 149
150include/asm-sh/.cpu: $(wildcard include/config/cpu/*.h) include/config/MARKER 150include/asm-sh/.cpu: $(wildcard include/config/cpu/*.h) include/config/auto.conf
151 @echo ' SYMLINK include/asm-sh/cpu -> include/asm-sh/$(cpuincdir-y)' 151 @echo ' SYMLINK include/asm-sh/cpu -> include/asm-sh/$(cpuincdir-y)'
152 $(Q)if [ ! -d include/asm-sh ]; then mkdir -p include/asm-sh; fi 152 $(Q)if [ ! -d include/asm-sh ]; then mkdir -p include/asm-sh; fi
153 $(Q)ln -fsn $(incdir-prefix)$(cpuincdir-y) include/asm-sh/cpu 153 $(Q)ln -fsn $(incdir-prefix)$(cpuincdir-y) include/asm-sh/cpu
@@ -157,7 +157,7 @@ include/asm-sh/.cpu: $(wildcard include/config/cpu/*.h) include/config/MARKER
157# don't, just reference the parent directory so the semantics are 157# don't, just reference the parent directory so the semantics are
158# kept roughly the same. 158# kept roughly the same.
159 159
160include/asm-sh/.mach: $(wildcard include/config/sh/*.h) include/config/MARKER 160include/asm-sh/.mach: $(wildcard include/config/sh/*.h) include/config/auto.conf
161 @echo -n ' SYMLINK include/asm-sh/mach -> ' 161 @echo -n ' SYMLINK include/asm-sh/mach -> '
162 $(Q)if [ ! -d include/asm-sh ]; then mkdir -p include/asm-sh; fi 162 $(Q)if [ ! -d include/asm-sh ]; then mkdir -p include/asm-sh; fi
163 $(Q)if [ -d $(incdir-prefix)$(incdir-y) ]; then \ 163 $(Q)if [ -d $(incdir-prefix)$(incdir-y) ]; then \
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 43546525f28f..6bcd8d92399f 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -25,8 +25,8 @@ typedef NORET_TYPE void (*relocate_new_kernel_t)(
25 unsigned long start_address, 25 unsigned long start_address,
26 unsigned long vbr_reg) ATTRIB_NORET; 26 unsigned long vbr_reg) ATTRIB_NORET;
27 27
28const extern unsigned char relocate_new_kernel[]; 28extern const unsigned char relocate_new_kernel[];
29const extern unsigned int relocate_new_kernel_size; 29extern const unsigned int relocate_new_kernel_size;
30extern void *gdb_vbr_vector; 30extern void *gdb_vbr_vector;
31 31
32/* 32/*
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index bb229ef030f3..9af22116c9a2 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -402,7 +402,7 @@ static int __init topology_init(void)
402 int cpu_id; 402 int cpu_id;
403 403
404 for_each_possible_cpu(cpu_id) 404 for_each_possible_cpu(cpu_id)
405 register_cpu(&cpu[cpu_id], cpu_id, NULL); 405 register_cpu(&cpu[cpu_id], cpu_id);
406 406
407 return 0; 407 return 0;
408} 408}
diff --git a/arch/sh/oprofile/op_model_sh7750.c b/arch/sh/oprofile/op_model_sh7750.c
index 5ec9ddcc4b0b..c265185b22a7 100644
--- a/arch/sh/oprofile/op_model_sh7750.c
+++ b/arch/sh/oprofile/op_model_sh7750.c
@@ -198,7 +198,7 @@ static int sh7750_perf_counter_create_files(struct super_block *sb, struct dentr
198 198
199 for (i = 0; i < NR_CNTRS; i++) { 199 for (i = 0; i < NR_CNTRS; i++) {
200 struct dentry *dir; 200 struct dentry *dir;
201 char buf[3]; 201 char buf[4];
202 202
203 snprintf(buf, sizeof(buf), "%d", i); 203 snprintf(buf, sizeof(buf), "%d", i);
204 dir = oprofilefs_mkdir(sb, root, buf); 204 dir = oprofilefs_mkdir(sb, root, buf);
diff --git a/arch/sh64/kernel/setup.c b/arch/sh64/kernel/setup.c
index d2711c9c9d13..da98d8dbcf95 100644
--- a/arch/sh64/kernel/setup.c
+++ b/arch/sh64/kernel/setup.c
@@ -309,7 +309,7 @@ static struct cpu cpu[1];
309 309
310static int __init topology_init(void) 310static int __init topology_init(void)
311{ 311{
312 return register_cpu(cpu, 0, NULL); 312 return register_cpu(cpu, 0);
313} 313}
314 314
315subsys_initcall(topology_init); 315subsys_initcall(topology_init);
diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c
index 001b8673b4bd..80a809478781 100644
--- a/arch/sparc/kernel/of_device.c
+++ b/arch/sparc/kernel/of_device.c
@@ -138,6 +138,7 @@ struct bus_type ebus_bus_type = {
138 .suspend = of_device_suspend, 138 .suspend = of_device_suspend,
139 .resume = of_device_resume, 139 .resume = of_device_resume,
140}; 140};
141EXPORT_SYMBOL(ebus_bus_type);
141#endif 142#endif
142 143
143#ifdef CONFIG_SBUS 144#ifdef CONFIG_SBUS
@@ -149,6 +150,7 @@ struct bus_type sbus_bus_type = {
149 .suspend = of_device_suspend, 150 .suspend = of_device_suspend,
150 .resume = of_device_resume, 151 .resume = of_device_resume,
151}; 152};
153EXPORT_SYMBOL(sbus_bus_type);
152#endif 154#endif
153 155
154static int __init of_bus_driver_init(void) 156static int __init of_bus_driver_init(void)
diff --git a/arch/sparc/kernel/prom.c b/arch/sparc/kernel/prom.c
index 63b2b9bd778e..946ce6d15819 100644
--- a/arch/sparc/kernel/prom.c
+++ b/arch/sparc/kernel/prom.c
@@ -27,6 +27,11 @@
27 27
28static struct device_node *allnodes; 28static struct device_node *allnodes;
29 29
30/* use when traversing tree through the allnext, child, sibling,
31 * or parent members of struct device_node.
32 */
33static DEFINE_RWLOCK(devtree_lock);
34
30int of_device_is_compatible(struct device_node *device, const char *compat) 35int of_device_is_compatible(struct device_node *device, const char *compat)
31{ 36{
32 const char* cp; 37 const char* cp;
@@ -185,6 +190,54 @@ int of_getintprop_default(struct device_node *np, const char *name, int def)
185} 190}
186EXPORT_SYMBOL(of_getintprop_default); 191EXPORT_SYMBOL(of_getintprop_default);
187 192
193int of_set_property(struct device_node *dp, const char *name, void *val, int len)
194{
195 struct property **prevp;
196 void *new_val;
197 int err;
198
199 new_val = kmalloc(len, GFP_KERNEL);
200 if (!new_val)
201 return -ENOMEM;
202
203 memcpy(new_val, val, len);
204
205 err = -ENODEV;
206
207 write_lock(&devtree_lock);
208 prevp = &dp->properties;
209 while (*prevp) {
210 struct property *prop = *prevp;
211
212 if (!strcmp(prop->name, name)) {
213 void *old_val = prop->value;
214 int ret;
215
216 ret = prom_setprop(dp->node, name, val, len);
217 err = -EINVAL;
218 if (ret >= 0) {
219 prop->value = new_val;
220 prop->length = len;
221
222 if (OF_IS_DYNAMIC(prop))
223 kfree(old_val);
224
225 OF_MARK_DYNAMIC(prop);
226
227 err = 0;
228 }
229 break;
230 }
231 prevp = &(*prevp)->next;
232 }
233 write_unlock(&devtree_lock);
234
235 /* XXX Upate procfs if necessary... */
236
237 return err;
238}
239EXPORT_SYMBOL(of_set_property);
240
188static unsigned int prom_early_allocated; 241static unsigned int prom_early_allocated;
189 242
190static void * __init prom_early_alloc(unsigned long size) 243static void * __init prom_early_alloc(unsigned long size)
@@ -354,7 +407,9 @@ static char * __init build_full_name(struct device_node *dp)
354 return n; 407 return n;
355} 408}
356 409
357static struct property * __init build_one_prop(phandle node, char *prev) 410static unsigned int unique_id;
411
412static struct property * __init build_one_prop(phandle node, char *prev, char *special_name, void *special_val, int special_len)
358{ 413{
359 static struct property *tmp = NULL; 414 static struct property *tmp = NULL;
360 struct property *p; 415 struct property *p;
@@ -364,25 +419,34 @@ static struct property * __init build_one_prop(phandle node, char *prev)
364 p = tmp; 419 p = tmp;
365 memset(p, 0, sizeof(*p) + 32); 420 memset(p, 0, sizeof(*p) + 32);
366 tmp = NULL; 421 tmp = NULL;
367 } else 422 } else {
368 p = prom_early_alloc(sizeof(struct property) + 32); 423 p = prom_early_alloc(sizeof(struct property) + 32);
424 p->unique_id = unique_id++;
425 }
369 426
370 p->name = (char *) (p + 1); 427 p->name = (char *) (p + 1);
371 if (prev == NULL) { 428 if (special_name) {
372 prom_firstprop(node, p->name); 429 p->length = special_len;
430 p->value = prom_early_alloc(special_len);
431 memcpy(p->value, special_val, special_len);
373 } else { 432 } else {
374 prom_nextprop(node, prev, p->name); 433 if (prev == NULL) {
375 } 434 prom_firstprop(node, p->name);
376 if (strlen(p->name) == 0) { 435 } else {
377 tmp = p; 436 prom_nextprop(node, prev, p->name);
378 return NULL; 437 }
379 } 438 if (strlen(p->name) == 0) {
380 p->length = prom_getproplen(node, p->name); 439 tmp = p;
381 if (p->length <= 0) { 440 return NULL;
382 p->length = 0; 441 }
383 } else { 442 p->length = prom_getproplen(node, p->name);
384 p->value = prom_early_alloc(p->length); 443 if (p->length <= 0) {
385 len = prom_getproperty(node, p->name, p->value, p->length); 444 p->length = 0;
445 } else {
446 p->value = prom_early_alloc(p->length + 1);
447 prom_getproperty(node, p->name, p->value, p->length);
448 ((unsigned char *)p->value)[p->length] = '\0';
449 }
386 } 450 }
387 return p; 451 return p;
388} 452}
@@ -391,9 +455,14 @@ static struct property * __init build_prop_list(phandle node)
391{ 455{
392 struct property *head, *tail; 456 struct property *head, *tail;
393 457
394 head = tail = build_one_prop(node, NULL); 458 head = tail = build_one_prop(node, NULL,
459 ".node", &node, sizeof(node));
460
461 tail->next = build_one_prop(node, NULL, NULL, NULL, 0);
462 tail = tail->next;
395 while(tail) { 463 while(tail) {
396 tail->next = build_one_prop(node, tail->name); 464 tail->next = build_one_prop(node, tail->name,
465 NULL, NULL, 0);
397 tail = tail->next; 466 tail = tail->next;
398 } 467 }
399 468
@@ -422,6 +491,7 @@ static struct device_node * __init create_node(phandle node)
422 return NULL; 491 return NULL;
423 492
424 dp = prom_early_alloc(sizeof(*dp)); 493 dp = prom_early_alloc(sizeof(*dp));
494 dp->unique_id = unique_id++;
425 495
426 kref_init(&dp->kref); 496 kref_init(&dp->kref);
427 497
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index fa5006946062..5db7e1d85385 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -9,3 +9,5 @@ lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
9 strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \ 9 strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \
10 copy_user.o locks.o atomic.o atomic32.o bitops.o \ 10 copy_user.o locks.o atomic.o atomic32.o bitops.o \
11 lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o 11 lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o
12
13obj-y += iomap.o
diff --git a/arch/sparc/lib/iomap.c b/arch/sparc/lib/iomap.c
new file mode 100644
index 000000000000..54501c1ca785
--- /dev/null
+++ b/arch/sparc/lib/iomap.c
@@ -0,0 +1,48 @@
1/*
2 * Implement the sparc iomap interfaces
3 */
4#include <linux/pci.h>
5#include <linux/module.h>
6#include <asm/io.h>
7
8/* Create a virtual mapping cookie for an IO port range */
9void __iomem *ioport_map(unsigned long port, unsigned int nr)
10{
11 return (void __iomem *) (unsigned long) port;
12}
13
14void ioport_unmap(void __iomem *addr)
15{
16 /* Nothing to do */
17}
18EXPORT_SYMBOL(ioport_map);
19EXPORT_SYMBOL(ioport_unmap);
20
21/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
22void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
23{
24 unsigned long start = pci_resource_start(dev, bar);
25 unsigned long len = pci_resource_len(dev, bar);
26 unsigned long flags = pci_resource_flags(dev, bar);
27
28 if (!len || !start)
29 return NULL;
30 if (maxlen && len > maxlen)
31 len = maxlen;
32 if (flags & IORESOURCE_IO)
33 return ioport_map(start, len);
34 if (flags & IORESOURCE_MEM) {
35 if (flags & IORESOURCE_CACHEABLE)
36 return ioremap(start, len);
37 return ioremap_nocache(start, len);
38 }
39 /* What? */
40 return NULL;
41}
42
43void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
44{
45 /* nothing to do */
46}
47EXPORT_SYMBOL(pci_iomap);
48EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/sparc64/kernel/auxio.c b/arch/sparc64/kernel/auxio.c
index 2c42894b188f..c2c69c167d18 100644
--- a/arch/sparc64/kernel/auxio.c
+++ b/arch/sparc64/kernel/auxio.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include <linux/config.h> 8#include <linux/config.h>
9#include <linux/module.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <linux/init.h> 11#include <linux/init.h>
11#include <linux/ioport.h> 12#include <linux/ioport.h>
@@ -16,8 +17,8 @@
16#include <asm/ebus.h> 17#include <asm/ebus.h>
17#include <asm/auxio.h> 18#include <asm/auxio.h>
18 19
19/* This cannot be static, as it is referenced in irq.c */
20void __iomem *auxio_register = NULL; 20void __iomem *auxio_register = NULL;
21EXPORT_SYMBOL(auxio_register);
21 22
22enum auxio_type { 23enum auxio_type {
23 AUXIO_TYPE_NODEV, 24 AUXIO_TYPE_NODEV,
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 31e0fbb0d82c..cc89b06d0178 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -563,67 +563,6 @@ void handler_irq(int irq, struct pt_regs *regs)
563 irq_exit(); 563 irq_exit();
564} 564}
565 565
566#ifdef CONFIG_BLK_DEV_FD
567extern irqreturn_t floppy_interrupt(int, void *, struct pt_regs *);
568
569/* XXX No easy way to include asm/floppy.h XXX */
570extern unsigned char *pdma_vaddr;
571extern unsigned long pdma_size;
572extern volatile int doing_pdma;
573extern unsigned long fdc_status;
574
575irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
576{
577 if (likely(doing_pdma)) {
578 void __iomem *stat = (void __iomem *) fdc_status;
579 unsigned char *vaddr = pdma_vaddr;
580 unsigned long size = pdma_size;
581 u8 val;
582
583 while (size) {
584 val = readb(stat);
585 if (unlikely(!(val & 0x80))) {
586 pdma_vaddr = vaddr;
587 pdma_size = size;
588 return IRQ_HANDLED;
589 }
590 if (unlikely(!(val & 0x20))) {
591 pdma_vaddr = vaddr;
592 pdma_size = size;
593 doing_pdma = 0;
594 goto main_interrupt;
595 }
596 if (val & 0x40) {
597 /* read */
598 *vaddr++ = readb(stat + 1);
599 } else {
600 unsigned char data = *vaddr++;
601
602 /* write */
603 writeb(data, stat + 1);
604 }
605 size--;
606 }
607
608 pdma_vaddr = vaddr;
609 pdma_size = size;
610
611 /* Send Terminal Count pulse to floppy controller. */
612 val = readb(auxio_register);
613 val |= AUXIO_AUX1_FTCNT;
614 writeb(val, auxio_register);
615 val &= ~AUXIO_AUX1_FTCNT;
616 writeb(val, auxio_register);
617
618 doing_pdma = 0;
619 }
620
621main_interrupt:
622 return floppy_interrupt(irq, dev_cookie, regs);
623}
624EXPORT_SYMBOL(sparc_floppy_irq);
625#endif
626
627struct sun5_timer { 566struct sun5_timer {
628 u64 count0; 567 u64 count0;
629 u64 limit0; 568 u64 limit0;
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
index 566aa343aa62..768475bbce82 100644
--- a/arch/sparc64/kernel/of_device.c
+++ b/arch/sparc64/kernel/of_device.c
@@ -138,6 +138,7 @@ struct bus_type isa_bus_type = {
138 .suspend = of_device_suspend, 138 .suspend = of_device_suspend,
139 .resume = of_device_resume, 139 .resume = of_device_resume,
140}; 140};
141EXPORT_SYMBOL(isa_bus_type);
141 142
142struct bus_type ebus_bus_type = { 143struct bus_type ebus_bus_type = {
143 .name = "ebus", 144 .name = "ebus",
@@ -147,6 +148,7 @@ struct bus_type ebus_bus_type = {
147 .suspend = of_device_suspend, 148 .suspend = of_device_suspend,
148 .resume = of_device_resume, 149 .resume = of_device_resume,
149}; 150};
151EXPORT_SYMBOL(ebus_bus_type);
150#endif 152#endif
151 153
152#ifdef CONFIG_SBUS 154#ifdef CONFIG_SBUS
@@ -158,6 +160,7 @@ struct bus_type sbus_bus_type = {
158 .suspend = of_device_suspend, 160 .suspend = of_device_suspend,
159 .resume = of_device_resume, 161 .resume = of_device_resume,
160}; 162};
163EXPORT_SYMBOL(sbus_bus_type);
161#endif 164#endif
162 165
163static int __init of_bus_driver_init(void) 166static int __init of_bus_driver_init(void)
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index e9d703eea806..8e87e7ea0325 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -27,6 +27,11 @@
27 27
28static struct device_node *allnodes; 28static struct device_node *allnodes;
29 29
30/* use when traversing tree through the allnext, child, sibling,
31 * or parent members of struct device_node.
32 */
33static DEFINE_RWLOCK(devtree_lock);
34
30int of_device_is_compatible(struct device_node *device, const char *compat) 35int of_device_is_compatible(struct device_node *device, const char *compat)
31{ 36{
32 const char* cp; 37 const char* cp;
@@ -185,6 +190,54 @@ int of_getintprop_default(struct device_node *np, const char *name, int def)
185} 190}
186EXPORT_SYMBOL(of_getintprop_default); 191EXPORT_SYMBOL(of_getintprop_default);
187 192
193int of_set_property(struct device_node *dp, const char *name, void *val, int len)
194{
195 struct property **prevp;
196 void *new_val;
197 int err;
198
199 new_val = kmalloc(len, GFP_KERNEL);
200 if (!new_val)
201 return -ENOMEM;
202
203 memcpy(new_val, val, len);
204
205 err = -ENODEV;
206
207 write_lock(&devtree_lock);
208 prevp = &dp->properties;
209 while (*prevp) {
210 struct property *prop = *prevp;
211
212 if (!strcmp(prop->name, name)) {
213 void *old_val = prop->value;
214 int ret;
215
216 ret = prom_setprop(dp->node, name, val, len);
217 err = -EINVAL;
218 if (ret >= 0) {
219 prop->value = new_val;
220 prop->length = len;
221
222 if (OF_IS_DYNAMIC(prop))
223 kfree(old_val);
224
225 OF_MARK_DYNAMIC(prop);
226
227 err = 0;
228 }
229 break;
230 }
231 prevp = &(*prevp)->next;
232 }
233 write_unlock(&devtree_lock);
234
235 /* XXX Upate procfs if necessary... */
236
237 return err;
238}
239EXPORT_SYMBOL(of_set_property);
240
188static unsigned int prom_early_allocated; 241static unsigned int prom_early_allocated;
189 242
190static void * __init prom_early_alloc(unsigned long size) 243static void * __init prom_early_alloc(unsigned long size)
@@ -531,7 +584,9 @@ static char * __init build_full_name(struct device_node *dp)
531 return n; 584 return n;
532} 585}
533 586
534static struct property * __init build_one_prop(phandle node, char *prev) 587static unsigned int unique_id;
588
589static struct property * __init build_one_prop(phandle node, char *prev, char *special_name, void *special_val, int special_len)
535{ 590{
536 static struct property *tmp = NULL; 591 static struct property *tmp = NULL;
537 struct property *p; 592 struct property *p;
@@ -540,25 +595,35 @@ static struct property * __init build_one_prop(phandle node, char *prev)
540 p = tmp; 595 p = tmp;
541 memset(p, 0, sizeof(*p) + 32); 596 memset(p, 0, sizeof(*p) + 32);
542 tmp = NULL; 597 tmp = NULL;
543 } else 598 } else {
544 p = prom_early_alloc(sizeof(struct property) + 32); 599 p = prom_early_alloc(sizeof(struct property) + 32);
600 p->unique_id = unique_id++;
601 }
545 602
546 p->name = (char *) (p + 1); 603 p->name = (char *) (p + 1);
547 if (prev == NULL) { 604 if (special_name) {
548 prom_firstprop(node, p->name); 605 strcpy(p->name, special_name);
606 p->length = special_len;
607 p->value = prom_early_alloc(special_len);
608 memcpy(p->value, special_val, special_len);
549 } else { 609 } else {
550 prom_nextprop(node, prev, p->name); 610 if (prev == NULL) {
551 } 611 prom_firstprop(node, p->name);
552 if (strlen(p->name) == 0) { 612 } else {
553 tmp = p; 613 prom_nextprop(node, prev, p->name);
554 return NULL; 614 }
555 } 615 if (strlen(p->name) == 0) {
556 p->length = prom_getproplen(node, p->name); 616 tmp = p;
557 if (p->length <= 0) { 617 return NULL;
558 p->length = 0; 618 }
559 } else { 619 p->length = prom_getproplen(node, p->name);
560 p->value = prom_early_alloc(p->length); 620 if (p->length <= 0) {
561 prom_getproperty(node, p->name, p->value, p->length); 621 p->length = 0;
622 } else {
623 p->value = prom_early_alloc(p->length + 1);
624 prom_getproperty(node, p->name, p->value, p->length);
625 ((unsigned char *)p->value)[p->length] = '\0';
626 }
562 } 627 }
563 return p; 628 return p;
564} 629}
@@ -567,9 +632,14 @@ static struct property * __init build_prop_list(phandle node)
567{ 632{
568 struct property *head, *tail; 633 struct property *head, *tail;
569 634
570 head = tail = build_one_prop(node, NULL); 635 head = tail = build_one_prop(node, NULL,
636 ".node", &node, sizeof(node));
637
638 tail->next = build_one_prop(node, NULL, NULL, NULL, 0);
639 tail = tail->next;
571 while(tail) { 640 while(tail) {
572 tail->next = build_one_prop(node, tail->name); 641 tail->next = build_one_prop(node, tail->name,
642 NULL, NULL, 0);
573 tail = tail->next; 643 tail = tail->next;
574 } 644 }
575 645
@@ -598,6 +668,7 @@ static struct device_node * __init create_node(phandle node)
598 return NULL; 668 return NULL;
599 669
600 dp = prom_early_alloc(sizeof(*dp)); 670 dp = prom_early_alloc(sizeof(*dp));
671 dp->unique_id = unique_id++;
601 672
602 kref_init(&dp->kref); 673 kref_init(&dp->kref);
603 674
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index a6a7d8168346..116d9632002d 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -537,7 +537,7 @@ static int __init topology_init(void)
537 for_each_possible_cpu(i) { 537 for_each_possible_cpu(i) {
538 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); 538 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
539 if (p) { 539 if (p) {
540 register_cpu(p, i, NULL); 540 register_cpu(p, i);
541 err = 0; 541 err = 0;
542 } 542 }
543 } 543 }
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 6e002aacb961..1605967cce91 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -31,6 +31,40 @@
31#include <asm/kdebug.h> 31#include <asm/kdebug.h>
32#include <asm/mmu_context.h> 32#include <asm/mmu_context.h>
33 33
34#ifdef CONFIG_KPROBES
35ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
36
37/* Hook to register for page fault notifications */
38int register_page_fault_notifier(struct notifier_block *nb)
39{
40 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
41}
42
43int unregister_page_fault_notifier(struct notifier_block *nb)
44{
45 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
46}
47
48static inline int notify_page_fault(enum die_val val, const char *str,
49 struct pt_regs *regs, long err, int trap, int sig)
50{
51 struct die_args args = {
52 .regs = regs,
53 .str = str,
54 .err = err,
55 .trapnr = trap,
56 .signr = sig
57 };
58 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
59}
60#else
61static inline int notify_page_fault(enum die_val val, const char *str,
62 struct pt_regs *regs, long err, int trap, int sig)
63{
64 return NOTIFY_DONE;
65}
66#endif
67
34/* 68/*
35 * To debug kernel to catch accesses to certain virtual/physical addresses. 69 * To debug kernel to catch accesses to certain virtual/physical addresses.
36 * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints. 70 * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints.
@@ -263,7 +297,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
263 297
264 fault_code = get_thread_fault_code(); 298 fault_code = get_thread_fault_code();
265 299
266 if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, 300 if (notify_page_fault(DIE_PAGE_FAULT, "page_fault", regs,
267 fault_code, 0, SIGSEGV) == NOTIFY_STOP) 301 fault_code, 0, SIGSEGV) == NOTIFY_STOP)
268 return; 302 return;
269 303
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 513993414747..cb75a27adb51 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -18,6 +18,7 @@
18#include <linux/initrd.h> 18#include <linux/initrd.h>
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/pagemap.h> 20#include <linux/pagemap.h>
21#include <linux/poison.h>
21#include <linux/fs.h> 22#include <linux/fs.h>
22#include <linux/seq_file.h> 23#include <linux/seq_file.h>
23#include <linux/kprobes.h> 24#include <linux/kprobes.h>
@@ -1520,7 +1521,7 @@ void free_initmem(void)
1520 page = (addr + 1521 page = (addr +
1521 ((unsigned long) __va(kern_base)) - 1522 ((unsigned long) __va(kern_base)) -
1522 ((unsigned long) KERNBASE)); 1523 ((unsigned long) KERNBASE));
1523 memset((void *)addr, 0xcc, PAGE_SIZE); 1524 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
1524 p = virt_to_page(page); 1525 p = virt_to_page(page);
1525 1526
1526 ClearPageReserved(p); 1527 ClearPageReserved(p);
@@ -1568,6 +1569,7 @@ pgprot_t PAGE_EXEC __read_mostly;
1568unsigned long pg_iobits __read_mostly; 1569unsigned long pg_iobits __read_mostly;
1569 1570
1570unsigned long _PAGE_IE __read_mostly; 1571unsigned long _PAGE_IE __read_mostly;
1572EXPORT_SYMBOL(_PAGE_IE);
1571 1573
1572unsigned long _PAGE_E __read_mostly; 1574unsigned long _PAGE_E __read_mostly;
1573EXPORT_SYMBOL(_PAGE_E); 1575EXPORT_SYMBOL(_PAGE_E);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 0897852b09a3..290cec6d69e2 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -1222,7 +1222,7 @@ int open_ubd_file(char *file, struct openflags *openflags, int shared,
1222 } 1222 }
1223 } 1223 }
1224 1224
1225 /* Succesful return case! */ 1225 /* Successful return case! */
1226 if(backing_file_out == NULL) 1226 if(backing_file_out == NULL)
1227 return(fd); 1227 return(fd);
1228 1228
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index af44130f0d65..ccc4a7fb97a3 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -386,24 +386,45 @@ config HPET_EMULATE_RTC
386 bool "Provide RTC interrupt" 386 bool "Provide RTC interrupt"
387 depends on HPET_TIMER && RTC=y 387 depends on HPET_TIMER && RTC=y
388 388
389config GART_IOMMU 389# Mark as embedded because too many people got it wrong.
390 bool "K8 GART IOMMU support" 390# The code disables itself when not needed.
391config IOMMU
392 bool "IOMMU support" if EMBEDDED
391 default y 393 default y
392 select SWIOTLB 394 select SWIOTLB
393 select AGP 395 select AGP
394 depends on PCI 396 depends on PCI
395 help 397 help
396 Support for hardware IOMMU in AMD's Opteron/Athlon64 Processors 398 Support for full DMA access of devices with 32bit memory access only
397 and for the bounce buffering software IOMMU. 399 on systems with more than 3GB. This is usually needed for USB,
398 Needed to run systems with more than 3GB of memory properly with 400 sound, many IDE/SATA chipsets and some other devices.
399 32-bit PCI devices that do not support DAC (Double Address Cycle). 401 Provides a driver for the AMD Athlon64/Opteron/Turion/Sempron GART
400 The IOMMU can be turned off at runtime with the iommu=off parameter. 402 based IOMMU and a software bounce buffer based IOMMU used on Intel
401 Normally the kernel will take the right choice by itself. 403 systems and as fallback.
402 This option includes a driver for the AMD Opteron/Athlon64 IOMMU 404 The code is only active when needed (enough memory and limited
403 northbridge and a software emulation used on other systems without 405 device) unless CONFIG_IOMMU_DEBUG or iommu=force is specified
404 hardware IOMMU. If unsure, say Y. 406 too.
405 407
406# need this always selected by GART_IOMMU for the VIA workaround 408config CALGARY_IOMMU
409 bool "IBM Calgary IOMMU support"
410 default y
411 select SWIOTLB
412 depends on PCI && EXPERIMENTAL
413 help
414 Support for hardware IOMMUs in IBM's xSeries x366 and x460
415 systems. Needed to run systems with more than 3GB of memory
416 properly with 32-bit PCI devices that do not support DAC
417 (Double Address Cycle). Calgary also supports bus level
418 isolation, where all DMAs pass through the IOMMU. This
419 prevents them from going anywhere except their intended
420 destination. This catches hard-to-find kernel bugs and
421 mis-behaving drivers and devices that do not use the DMA-API
422 properly to set up their DMA buffers. The IOMMU can be
423 turned off at boot time with the iommu=off parameter.
424 Normally the kernel will make the right choice by itself.
425 If unsure, say Y.
426
427# need this always selected by IOMMU for the VIA workaround
407config SWIOTLB 428config SWIOTLB
408 bool 429 bool
409 430
@@ -501,6 +522,10 @@ config REORDER
501 optimal TLB usage. If you have pretty much any version of binutils, 522 optimal TLB usage. If you have pretty much any version of binutils,
502 this can increase your kernel build time by roughly one minute. 523 this can increase your kernel build time by roughly one minute.
503 524
525config K8_NB
526 def_bool y
527 depends on AGP_AMD64 || IOMMU || (PCI && NUMA)
528
504endmenu 529endmenu
505 530
506# 531#
diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug
index ea31b4c62105..1d92ab56c0f9 100644
--- a/arch/x86_64/Kconfig.debug
+++ b/arch/x86_64/Kconfig.debug
@@ -13,7 +13,7 @@ config DEBUG_RODATA
13 If in doubt, say "N". 13 If in doubt, say "N".
14 14
15config IOMMU_DEBUG 15config IOMMU_DEBUG
16 depends on GART_IOMMU && DEBUG_KERNEL 16 depends on IOMMU && DEBUG_KERNEL
17 bool "Enable IOMMU debugging" 17 bool "Enable IOMMU debugging"
18 help 18 help
19 Force the IOMMU to on even when you have less than 4GB of 19 Force the IOMMU to on even when you have less than 4GB of
@@ -35,6 +35,22 @@ config IOMMU_LEAK
35 Add a simple leak tracer to the IOMMU code. This is useful when you 35 Add a simple leak tracer to the IOMMU code. This is useful when you
36 are debugging a buggy device driver that leaks IOMMU mappings. 36 are debugging a buggy device driver that leaks IOMMU mappings.
37 37
38config DEBUG_STACKOVERFLOW
39 bool "Check for stack overflows"
40 depends on DEBUG_KERNEL
41 help
42 This option will cause messages to be printed if free stack space
43 drops below a certain limit.
44
45config DEBUG_STACK_USAGE
46 bool "Stack utilization instrumentation"
47 depends on DEBUG_KERNEL
48 help
49 Enables the display of the minimum amount of free stack which each
50 task has ever had available in the sysrq-T and sysrq-P debug output.
51
52 This option will slow down process creation somewhat.
53
38#config X86_REMOTE_DEBUG 54#config X86_REMOTE_DEBUG
39# bool "kgdb debugging stub" 55# bool "kgdb debugging stub"
40 56
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index e573e2ab5510..431bb4bc36cd 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -27,6 +27,7 @@ LDFLAGS_vmlinux :=
27CHECKFLAGS += -D__x86_64__ -m64 27CHECKFLAGS += -D__x86_64__ -m64
28 28
29cflags-y := 29cflags-y :=
30cflags-kernel-y :=
30cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8) 31cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
31cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona) 32cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
32cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic) 33cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
@@ -35,7 +36,7 @@ cflags-y += -m64
35cflags-y += -mno-red-zone 36cflags-y += -mno-red-zone
36cflags-y += -mcmodel=kernel 37cflags-y += -mcmodel=kernel
37cflags-y += -pipe 38cflags-y += -pipe
38cflags-$(CONFIG_REORDER) += -ffunction-sections 39cflags-kernel-$(CONFIG_REORDER) += -ffunction-sections
39# this makes reading assembly source easier, but produces worse code 40# this makes reading assembly source easier, but produces worse code
40# actually it makes the kernel smaller too. 41# actually it makes the kernel smaller too.
41cflags-y += -fno-reorder-blocks 42cflags-y += -fno-reorder-blocks
@@ -55,6 +56,7 @@ cflags-y += $(call cc-option,-funit-at-a-time)
55cflags-y += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,) 56cflags-y += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
56 57
57CFLAGS += $(cflags-y) 58CFLAGS += $(cflags-y)
59CFLAGS_KERNEL += $(cflags-kernel-y)
58AFLAGS += -m64 60AFLAGS += -m64
59 61
60head-y := arch/x86_64/kernel/head.o arch/x86_64/kernel/head64.o arch/x86_64/kernel/init_task.o 62head-y := arch/x86_64/kernel/head.o arch/x86_64/kernel/head64.o arch/x86_64/kernel/init_task.o
diff --git a/arch/x86_64/boot/Makefile b/arch/x86_64/boot/Makefile
index 43ee6c50c277..deb063e7762d 100644
--- a/arch/x86_64/boot/Makefile
+++ b/arch/x86_64/boot/Makefile
@@ -107,8 +107,13 @@ fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf
107isoimage: $(BOOTIMAGE) 107isoimage: $(BOOTIMAGE)
108 -rm -rf $(obj)/isoimage 108 -rm -rf $(obj)/isoimage
109 mkdir $(obj)/isoimage 109 mkdir $(obj)/isoimage
110 cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \ 110 for i in lib lib64 share end ; do \
111 $(obj)/isoimage 111 if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
112 cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
113 break ; \
114 fi ; \
115 if [ $$i = end ] ; then exit 1 ; fi ; \
116 done
112 cp $(BOOTIMAGE) $(obj)/isoimage/linux 117 cp $(BOOTIMAGE) $(obj)/isoimage/linux
113 echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg 118 echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg
114 if [ -f '$(FDINITRD)' ] ; then \ 119 if [ -f '$(FDINITRD)' ] ; then \
diff --git a/arch/x86_64/boot/compressed/misc.c b/arch/x86_64/boot/compressed/misc.c
index cf4b88c416dc..3755b2e394d0 100644
--- a/arch/x86_64/boot/compressed/misc.c
+++ b/arch/x86_64/boot/compressed/misc.c
@@ -77,11 +77,11 @@ static void gzip_release(void **);
77 */ 77 */
78static unsigned char *real_mode; /* Pointer to real-mode data */ 78static unsigned char *real_mode; /* Pointer to real-mode data */
79 79
80#define EXT_MEM_K (*(unsigned short *)(real_mode + 0x2)) 80#define RM_EXT_MEM_K (*(unsigned short *)(real_mode + 0x2))
81#ifndef STANDARD_MEMORY_BIOS_CALL 81#ifndef STANDARD_MEMORY_BIOS_CALL
82#define ALT_MEM_K (*(unsigned long *)(real_mode + 0x1e0)) 82#define RM_ALT_MEM_K (*(unsigned long *)(real_mode + 0x1e0))
83#endif 83#endif
84#define SCREEN_INFO (*(struct screen_info *)(real_mode+0)) 84#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0))
85 85
86extern unsigned char input_data[]; 86extern unsigned char input_data[];
87extern int input_len; 87extern int input_len;
@@ -92,9 +92,9 @@ static unsigned long output_ptr = 0;
92 92
93static void *malloc(int size); 93static void *malloc(int size);
94static void free(void *where); 94static void free(void *where);
95 95
96void* memset(void* s, int c, unsigned n); 96static void *memset(void *s, int c, unsigned n);
97void* memcpy(void* dest, const void* src, unsigned n); 97static void *memcpy(void *dest, const void *src, unsigned n);
98 98
99static void putstr(const char *); 99static void putstr(const char *);
100 100
@@ -162,8 +162,8 @@ static void putstr(const char *s)
162 int x,y,pos; 162 int x,y,pos;
163 char c; 163 char c;
164 164
165 x = SCREEN_INFO.orig_x; 165 x = RM_SCREEN_INFO.orig_x;
166 y = SCREEN_INFO.orig_y; 166 y = RM_SCREEN_INFO.orig_y;
167 167
168 while ( ( c = *s++ ) != '\0' ) { 168 while ( ( c = *s++ ) != '\0' ) {
169 if ( c == '\n' ) { 169 if ( c == '\n' ) {
@@ -184,8 +184,8 @@ static void putstr(const char *s)
184 } 184 }
185 } 185 }
186 186
187 SCREEN_INFO.orig_x = x; 187 RM_SCREEN_INFO.orig_x = x;
188 SCREEN_INFO.orig_y = y; 188 RM_SCREEN_INFO.orig_y = y;
189 189
190 pos = (x + cols * y) * 2; /* Update cursor position */ 190 pos = (x + cols * y) * 2; /* Update cursor position */
191 outb_p(14, vidport); 191 outb_p(14, vidport);
@@ -194,7 +194,7 @@ static void putstr(const char *s)
194 outb_p(0xff & (pos >> 1), vidport+1); 194 outb_p(0xff & (pos >> 1), vidport+1);
195} 195}
196 196
197void* memset(void* s, int c, unsigned n) 197static void* memset(void* s, int c, unsigned n)
198{ 198{
199 int i; 199 int i;
200 char *ss = (char*)s; 200 char *ss = (char*)s;
@@ -203,7 +203,7 @@ void* memset(void* s, int c, unsigned n)
203 return s; 203 return s;
204} 204}
205 205
206void* memcpy(void* dest, const void* src, unsigned n) 206static void* memcpy(void* dest, const void* src, unsigned n)
207{ 207{
208 int i; 208 int i;
209 char *d = (char *)dest, *s = (char *)src; 209 char *d = (char *)dest, *s = (char *)src;
@@ -278,15 +278,15 @@ static void error(char *x)
278 putstr(x); 278 putstr(x);
279 putstr("\n\n -- System halted"); 279 putstr("\n\n -- System halted");
280 280
281 while(1); 281 while(1); /* Halt */
282} 282}
283 283
284void setup_normal_output_buffer(void) 284static void setup_normal_output_buffer(void)
285{ 285{
286#ifdef STANDARD_MEMORY_BIOS_CALL 286#ifdef STANDARD_MEMORY_BIOS_CALL
287 if (EXT_MEM_K < 1024) error("Less than 2MB of memory"); 287 if (RM_EXT_MEM_K < 1024) error("Less than 2MB of memory");
288#else 288#else
289 if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < 1024) error("Less than 2MB of memory"); 289 if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory");
290#endif 290#endif
291 output_data = (unsigned char *)__PHYSICAL_START; /* Normally Points to 1M */ 291 output_data = (unsigned char *)__PHYSICAL_START; /* Normally Points to 1M */
292 free_mem_end_ptr = (long)real_mode; 292 free_mem_end_ptr = (long)real_mode;
@@ -297,13 +297,13 @@ struct moveparams {
297 uch *high_buffer_start; int hcount; 297 uch *high_buffer_start; int hcount;
298}; 298};
299 299
300void setup_output_buffer_if_we_run_high(struct moveparams *mv) 300static void setup_output_buffer_if_we_run_high(struct moveparams *mv)
301{ 301{
302 high_buffer_start = (uch *)(((ulg)&end) + HEAP_SIZE); 302 high_buffer_start = (uch *)(((ulg)&end) + HEAP_SIZE);
303#ifdef STANDARD_MEMORY_BIOS_CALL 303#ifdef STANDARD_MEMORY_BIOS_CALL
304 if (EXT_MEM_K < (3*1024)) error("Less than 4MB of memory"); 304 if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory");
305#else 305#else
306 if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory"); 306 if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory");
307#endif 307#endif
308 mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START; 308 mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START;
309 low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX 309 low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX
@@ -319,7 +319,7 @@ void setup_output_buffer_if_we_run_high(struct moveparams *mv)
319 mv->high_buffer_start = high_buffer_start; 319 mv->high_buffer_start = high_buffer_start;
320} 320}
321 321
322void close_output_buffer_if_we_run_high(struct moveparams *mv) 322static void close_output_buffer_if_we_run_high(struct moveparams *mv)
323{ 323{
324 if (bytes_out > low_buffer_size) { 324 if (bytes_out > low_buffer_size) {
325 mv->lcount = low_buffer_size; 325 mv->lcount = low_buffer_size;
@@ -335,7 +335,7 @@ int decompress_kernel(struct moveparams *mv, void *rmode)
335{ 335{
336 real_mode = rmode; 336 real_mode = rmode;
337 337
338 if (SCREEN_INFO.orig_video_mode == 7) { 338 if (RM_SCREEN_INFO.orig_video_mode == 7) {
339 vidmem = (char *) 0xb0000; 339 vidmem = (char *) 0xb0000;
340 vidport = 0x3b4; 340 vidport = 0x3b4;
341 } else { 341 } else {
@@ -343,8 +343,8 @@ int decompress_kernel(struct moveparams *mv, void *rmode)
343 vidport = 0x3d4; 343 vidport = 0x3d4;
344 } 344 }
345 345
346 lines = SCREEN_INFO.orig_video_lines; 346 lines = RM_SCREEN_INFO.orig_video_lines;
347 cols = SCREEN_INFO.orig_video_cols; 347 cols = RM_SCREEN_INFO.orig_video_cols;
348 348
349 if (free_mem_ptr < 0x100000) setup_normal_output_buffer(); 349 if (free_mem_ptr < 0x100000) setup_normal_output_buffer();
350 else setup_output_buffer_if_we_run_high(mv); 350 else setup_output_buffer_if_we_run_high(mv);
diff --git a/arch/x86_64/boot/tools/build.c b/arch/x86_64/boot/tools/build.c
index c44f5e2ec100..eae86691709a 100644
--- a/arch/x86_64/boot/tools/build.c
+++ b/arch/x86_64/boot/tools/build.c
@@ -149,10 +149,8 @@ int main(int argc, char ** argv)
149 sz = sb.st_size; 149 sz = sb.st_size;
150 fprintf (stderr, "System is %d kB\n", sz/1024); 150 fprintf (stderr, "System is %d kB\n", sz/1024);
151 sys_size = (sz + 15) / 16; 151 sys_size = (sz + 15) / 16;
152 /* 0x40000*16 = 4.0 MB, reasonable estimate for the current maximum */ 152 if (!is_big_kernel && sys_size > DEF_SYSSIZE)
153 if (sys_size > (is_big_kernel ? 0x40000 : DEF_SYSSIZE)) 153 die("System is too big. Try using bzImage or modules.");
154 die("System is too big. Try using %smodules.",
155 is_big_kernel ? "" : "bzImage or ");
156 while (sz > 0) { 154 while (sz > 0) {
157 int l, n; 155 int l, n;
158 156
diff --git a/arch/x86_64/boot/video.S b/arch/x86_64/boot/video.S
index 32327bb37aff..2aa565c136e5 100644
--- a/arch/x86_64/boot/video.S
+++ b/arch/x86_64/boot/video.S
@@ -1929,6 +1929,7 @@ skip10: movb %ah, %al
1929 ret 1929 ret
1930 1930
1931store_edid: 1931store_edid:
1932#ifdef CONFIG_FIRMWARE_EDID
1932 pushw %es # just save all registers 1933 pushw %es # just save all registers
1933 pushw %ax 1934 pushw %ax
1934 pushw %bx 1935 pushw %bx
@@ -1946,6 +1947,22 @@ store_edid:
1946 rep 1947 rep
1947 stosl 1948 stosl
1948 1949
1950 pushw %es # save ES
1951 xorw %di, %di # Report Capability
1952 pushw %di
1953 popw %es # ES:DI must be 0:0
1954 movw $0x4f15, %ax
1955 xorw %bx, %bx
1956 xorw %cx, %cx
1957 int $0x10
1958 popw %es # restore ES
1959
1960 cmpb $0x00, %ah # call successful
1961 jne no_edid
1962
1963 cmpb $0x4f, %al # function supported
1964 jne no_edid
1965
1949 movw $0x4f15, %ax # do VBE/DDC 1966 movw $0x4f15, %ax # do VBE/DDC
1950 movw $0x01, %bx 1967 movw $0x01, %bx
1951 movw $0x00, %cx 1968 movw $0x00, %cx
@@ -1953,12 +1970,14 @@ store_edid:
1953 movw $0x140, %di 1970 movw $0x140, %di
1954 int $0x10 1971 int $0x10
1955 1972
1973no_edid:
1956 popw %di # restore all registers 1974 popw %di # restore all registers
1957 popw %dx 1975 popw %dx
1958 popw %cx 1976 popw %cx
1959 popw %bx 1977 popw %bx
1960 popw %ax 1978 popw %ax
1961 popw %es 1979 popw %es
1980#endif
1962 ret 1981 ret
1963 1982
1964# VIDEO_SELECT-only variables 1983# VIDEO_SELECT-only variables
diff --git a/arch/x86_64/crypto/aes-x86_64-asm.S b/arch/x86_64/crypto/aes-x86_64-asm.S
index 483cbb23ab8d..26b40de4d0b0 100644
--- a/arch/x86_64/crypto/aes-x86_64-asm.S
+++ b/arch/x86_64/crypto/aes-x86_64-asm.S
@@ -15,6 +15,10 @@
15 15
16.text 16.text
17 17
18#include <asm/asm-offsets.h>
19
20#define BASE crypto_tfm_ctx_offset
21
18#define R1 %rax 22#define R1 %rax
19#define R1E %eax 23#define R1E %eax
20#define R1X %ax 24#define R1X %ax
@@ -46,19 +50,19 @@
46#define R10 %r10 50#define R10 %r10
47#define R11 %r11 51#define R11 %r11
48 52
49#define prologue(FUNC,BASE,B128,B192,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11) \ 53#define prologue(FUNC,KEY,B128,B192,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11) \
50 .global FUNC; \ 54 .global FUNC; \
51 .type FUNC,@function; \ 55 .type FUNC,@function; \
52 .align 8; \ 56 .align 8; \
53FUNC: movq r1,r2; \ 57FUNC: movq r1,r2; \
54 movq r3,r4; \ 58 movq r3,r4; \
55 leaq BASE+52(r8),r9; \ 59 leaq BASE+KEY+52(r8),r9; \
56 movq r10,r11; \ 60 movq r10,r11; \
57 movl (r7),r5 ## E; \ 61 movl (r7),r5 ## E; \
58 movl 4(r7),r1 ## E; \ 62 movl 4(r7),r1 ## E; \
59 movl 8(r7),r6 ## E; \ 63 movl 8(r7),r6 ## E; \
60 movl 12(r7),r7 ## E; \ 64 movl 12(r7),r7 ## E; \
61 movl (r8),r10 ## E; \ 65 movl BASE(r8),r10 ## E; \
62 xorl -48(r9),r5 ## E; \ 66 xorl -48(r9),r5 ## E; \
63 xorl -44(r9),r1 ## E; \ 67 xorl -44(r9),r1 ## E; \
64 xorl -40(r9),r6 ## E; \ 68 xorl -40(r9),r6 ## E; \
@@ -128,8 +132,8 @@ FUNC: movq r1,r2; \
128 movl r3 ## E,r1 ## E; \ 132 movl r3 ## E,r1 ## E; \
129 movl r4 ## E,r2 ## E; 133 movl r4 ## E,r2 ## E;
130 134
131#define entry(FUNC,BASE,B128,B192) \ 135#define entry(FUNC,KEY,B128,B192) \
132 prologue(FUNC,BASE,B128,B192,R2,R8,R7,R9,R1,R3,R4,R6,R10,R5,R11) 136 prologue(FUNC,KEY,B128,B192,R2,R8,R7,R9,R1,R3,R4,R6,R10,R5,R11)
133 137
134#define return epilogue(R8,R2,R9,R7,R5,R6,R3,R4,R11) 138#define return epilogue(R8,R2,R9,R7,R5,R6,R3,R4,R11)
135 139
@@ -147,9 +151,9 @@ FUNC: movq r1,r2; \
147#define decrypt_final(TAB,OFFSET) \ 151#define decrypt_final(TAB,OFFSET) \
148 round(TAB,OFFSET,R2,R1,R4,R3,R6,R5,R7,R10,R5,R6,R3,R4) 152 round(TAB,OFFSET,R2,R1,R4,R3,R6,R5,R7,R10,R5,R6,R3,R4)
149 153
150/* void aes_encrypt(void *ctx, u8 *out, const u8 *in) */ 154/* void aes_enc_blk(stuct crypto_tfm *tfm, u8 *out, const u8 *in) */
151 155
152 entry(aes_encrypt,0,enc128,enc192) 156 entry(aes_enc_blk,0,enc128,enc192)
153 encrypt_round(aes_ft_tab,-96) 157 encrypt_round(aes_ft_tab,-96)
154 encrypt_round(aes_ft_tab,-80) 158 encrypt_round(aes_ft_tab,-80)
155enc192: encrypt_round(aes_ft_tab,-64) 159enc192: encrypt_round(aes_ft_tab,-64)
@@ -166,9 +170,9 @@ enc128: encrypt_round(aes_ft_tab,-32)
166 encrypt_final(aes_fl_tab,112) 170 encrypt_final(aes_fl_tab,112)
167 return 171 return
168 172
169/* void aes_decrypt(void *ctx, u8 *out, const u8 *in) */ 173/* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in) */
170 174
171 entry(aes_decrypt,240,dec128,dec192) 175 entry(aes_dec_blk,240,dec128,dec192)
172 decrypt_round(aes_it_tab,-96) 176 decrypt_round(aes_it_tab,-96)
173 decrypt_round(aes_it_tab,-80) 177 decrypt_round(aes_it_tab,-80)
174dec192: decrypt_round(aes_it_tab,-64) 178dec192: decrypt_round(aes_it_tab,-64)
diff --git a/arch/x86_64/crypto/aes.c b/arch/x86_64/crypto/aes.c
index 6f77e7700d32..68866fab37aa 100644
--- a/arch/x86_64/crypto/aes.c
+++ b/arch/x86_64/crypto/aes.c
@@ -227,10 +227,10 @@ static void __init gen_tabs(void)
227 t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \ 227 t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \
228} 228}
229 229
230static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, 230static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
231 u32 *flags) 231 unsigned int key_len, u32 *flags)
232{ 232{
233 struct aes_ctx *ctx = ctx_arg; 233 struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
234 const __le32 *key = (const __le32 *)in_key; 234 const __le32 *key = (const __le32 *)in_key;
235 u32 i, j, t, u, v, w; 235 u32 i, j, t, u, v, w;
236 236
@@ -283,8 +283,18 @@ static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len,
283 return 0; 283 return 0;
284} 284}
285 285
286extern void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in); 286asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
287extern void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in); 287asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
288
289static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
290{
291 aes_enc_blk(tfm, dst, src);
292}
293
294static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
295{
296 aes_dec_blk(tfm, dst, src);
297}
288 298
289static struct crypto_alg aes_alg = { 299static struct crypto_alg aes_alg = {
290 .cra_name = "aes", 300 .cra_name = "aes",
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 69db0c0721d1..e69d403949c8 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.17-rc1-git11 3# Linux kernel version: 2.6.17-git6
4# Sun Apr 16 07:22:36 2006 4# Sat Jun 24 00:52:28 2006
5# 5#
6CONFIG_X86_64=y 6CONFIG_X86_64=y
7CONFIG_64BIT=y 7CONFIG_64BIT=y
@@ -42,7 +42,6 @@ CONFIG_IKCONFIG_PROC=y
42# CONFIG_RELAY is not set 42# CONFIG_RELAY is not set
43CONFIG_INITRAMFS_SOURCE="" 43CONFIG_INITRAMFS_SOURCE=""
44CONFIG_UID16=y 44CONFIG_UID16=y
45CONFIG_VM86=y
46CONFIG_CC_OPTIMIZE_FOR_SIZE=y 45CONFIG_CC_OPTIMIZE_FOR_SIZE=y
47# CONFIG_EMBEDDED is not set 46# CONFIG_EMBEDDED is not set
48CONFIG_KALLSYMS=y 47CONFIG_KALLSYMS=y
@@ -57,7 +56,6 @@ CONFIG_FUTEX=y
57CONFIG_EPOLL=y 56CONFIG_EPOLL=y
58CONFIG_SHMEM=y 57CONFIG_SHMEM=y
59CONFIG_SLAB=y 58CONFIG_SLAB=y
60CONFIG_DOUBLEFAULT=y
61# CONFIG_TINY_SHMEM is not set 59# CONFIG_TINY_SHMEM is not set
62CONFIG_BASE_SMALL=0 60CONFIG_BASE_SMALL=0
63# CONFIG_SLOB is not set 61# CONFIG_SLOB is not set
@@ -144,7 +142,8 @@ CONFIG_NR_CPUS=32
144CONFIG_HOTPLUG_CPU=y 142CONFIG_HOTPLUG_CPU=y
145CONFIG_HPET_TIMER=y 143CONFIG_HPET_TIMER=y
146CONFIG_HPET_EMULATE_RTC=y 144CONFIG_HPET_EMULATE_RTC=y
147CONFIG_GART_IOMMU=y 145CONFIG_IOMMU=y
146# CONFIG_CALGARY_IOMMU is not set
148CONFIG_SWIOTLB=y 147CONFIG_SWIOTLB=y
149CONFIG_X86_MCE=y 148CONFIG_X86_MCE=y
150CONFIG_X86_MCE_INTEL=y 149CONFIG_X86_MCE_INTEL=y
@@ -158,6 +157,7 @@ CONFIG_HZ_250=y
158# CONFIG_HZ_1000 is not set 157# CONFIG_HZ_1000 is not set
159CONFIG_HZ=250 158CONFIG_HZ=250
160# CONFIG_REORDER is not set 159# CONFIG_REORDER is not set
160CONFIG_K8_NB=y
161CONFIG_GENERIC_HARDIRQS=y 161CONFIG_GENERIC_HARDIRQS=y
162CONFIG_GENERIC_IRQ_PROBE=y 162CONFIG_GENERIC_IRQ_PROBE=y
163CONFIG_ISA_DMA_API=y 163CONFIG_ISA_DMA_API=y
@@ -293,6 +293,8 @@ CONFIG_IP_PNP_DHCP=y
293# CONFIG_INET_IPCOMP is not set 293# CONFIG_INET_IPCOMP is not set
294# CONFIG_INET_XFRM_TUNNEL is not set 294# CONFIG_INET_XFRM_TUNNEL is not set
295# CONFIG_INET_TUNNEL is not set 295# CONFIG_INET_TUNNEL is not set
296# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
297# CONFIG_INET_XFRM_MODE_TUNNEL is not set
296CONFIG_INET_DIAG=y 298CONFIG_INET_DIAG=y
297CONFIG_INET_TCP_DIAG=y 299CONFIG_INET_TCP_DIAG=y
298# CONFIG_TCP_CONG_ADVANCED is not set 300# CONFIG_TCP_CONG_ADVANCED is not set
@@ -305,7 +307,10 @@ CONFIG_IPV6=y
305# CONFIG_INET6_IPCOMP is not set 307# CONFIG_INET6_IPCOMP is not set
306# CONFIG_INET6_XFRM_TUNNEL is not set 308# CONFIG_INET6_XFRM_TUNNEL is not set
307# CONFIG_INET6_TUNNEL is not set 309# CONFIG_INET6_TUNNEL is not set
310# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
311# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
308# CONFIG_IPV6_TUNNEL is not set 312# CONFIG_IPV6_TUNNEL is not set
313# CONFIG_NETWORK_SECMARK is not set
309# CONFIG_NETFILTER is not set 314# CONFIG_NETFILTER is not set
310 315
311# 316#
@@ -344,6 +349,7 @@ CONFIG_IPV6=y
344# Network testing 349# Network testing
345# 350#
346# CONFIG_NET_PKTGEN is not set 351# CONFIG_NET_PKTGEN is not set
352# CONFIG_NET_TCPPROBE is not set
347# CONFIG_HAMRADIO is not set 353# CONFIG_HAMRADIO is not set
348# CONFIG_IRDA is not set 354# CONFIG_IRDA is not set
349# CONFIG_BT is not set 355# CONFIG_BT is not set
@@ -360,6 +366,7 @@ CONFIG_STANDALONE=y
360CONFIG_PREVENT_FIRMWARE_BUILD=y 366CONFIG_PREVENT_FIRMWARE_BUILD=y
361CONFIG_FW_LOADER=y 367CONFIG_FW_LOADER=y
362# CONFIG_DEBUG_DRIVER is not set 368# CONFIG_DEBUG_DRIVER is not set
369# CONFIG_SYS_HYPERVISOR is not set
363 370
364# 371#
365# Connector - unified userspace <-> kernelspace linker 372# Connector - unified userspace <-> kernelspace linker
@@ -526,6 +533,7 @@ CONFIG_SCSI_ATA_PIIX=y
526# CONFIG_SCSI_SATA_MV is not set 533# CONFIG_SCSI_SATA_MV is not set
527CONFIG_SCSI_SATA_NV=y 534CONFIG_SCSI_SATA_NV=y
528# CONFIG_SCSI_PDC_ADMA is not set 535# CONFIG_SCSI_PDC_ADMA is not set
536# CONFIG_SCSI_HPTIOP is not set
529# CONFIG_SCSI_SATA_QSTOR is not set 537# CONFIG_SCSI_SATA_QSTOR is not set
530# CONFIG_SCSI_SATA_PROMISE is not set 538# CONFIG_SCSI_SATA_PROMISE is not set
531# CONFIG_SCSI_SATA_SX4 is not set 539# CONFIG_SCSI_SATA_SX4 is not set
@@ -591,10 +599,7 @@ CONFIG_IEEE1394=y
591# 599#
592# Device Drivers 600# Device Drivers
593# 601#
594 602# CONFIG_IEEE1394_PCILYNX is not set
595#
596# Texas Instruments PCILynx requires I2C
597#
598CONFIG_IEEE1394_OHCI1394=y 603CONFIG_IEEE1394_OHCI1394=y
599 604
600# 605#
@@ -645,7 +650,16 @@ CONFIG_VORTEX=y
645# 650#
646# Tulip family network device support 651# Tulip family network device support
647# 652#
648# CONFIG_NET_TULIP is not set 653CONFIG_NET_TULIP=y
654# CONFIG_DE2104X is not set
655CONFIG_TULIP=y
656# CONFIG_TULIP_MWI is not set
657# CONFIG_TULIP_MMIO is not set
658# CONFIG_TULIP_NAPI is not set
659# CONFIG_DE4X5 is not set
660# CONFIG_WINBOND_840 is not set
661# CONFIG_DM9102 is not set
662# CONFIG_ULI526X is not set
649# CONFIG_HP100 is not set 663# CONFIG_HP100 is not set
650CONFIG_NET_PCI=y 664CONFIG_NET_PCI=y
651# CONFIG_PCNET32 is not set 665# CONFIG_PCNET32 is not set
@@ -697,6 +711,7 @@ CONFIG_TIGON3=y
697# CONFIG_IXGB is not set 711# CONFIG_IXGB is not set
698CONFIG_S2IO=m 712CONFIG_S2IO=m
699# CONFIG_S2IO_NAPI is not set 713# CONFIG_S2IO_NAPI is not set
714# CONFIG_MYRI10GE is not set
700 715
701# 716#
702# Token Ring devices 717# Token Ring devices
@@ -887,7 +902,56 @@ CONFIG_HPET_MMAP=y
887# 902#
888# I2C support 903# I2C support
889# 904#
890# CONFIG_I2C is not set 905CONFIG_I2C=m
906CONFIG_I2C_CHARDEV=m
907
908#
909# I2C Algorithms
910#
911# CONFIG_I2C_ALGOBIT is not set
912# CONFIG_I2C_ALGOPCF is not set
913# CONFIG_I2C_ALGOPCA is not set
914
915#
916# I2C Hardware Bus support
917#
918# CONFIG_I2C_ALI1535 is not set
919# CONFIG_I2C_ALI1563 is not set
920# CONFIG_I2C_ALI15X3 is not set
921# CONFIG_I2C_AMD756 is not set
922# CONFIG_I2C_AMD8111 is not set
923# CONFIG_I2C_I801 is not set
924# CONFIG_I2C_I810 is not set
925# CONFIG_I2C_PIIX4 is not set
926CONFIG_I2C_ISA=m
927# CONFIG_I2C_NFORCE2 is not set
928# CONFIG_I2C_OCORES is not set
929# CONFIG_I2C_PARPORT_LIGHT is not set
930# CONFIG_I2C_PROSAVAGE is not set
931# CONFIG_I2C_SAVAGE4 is not set
932# CONFIG_I2C_SIS5595 is not set
933# CONFIG_I2C_SIS630 is not set
934# CONFIG_I2C_SIS96X is not set
935# CONFIG_I2C_STUB is not set
936# CONFIG_I2C_VIA is not set
937# CONFIG_I2C_VIAPRO is not set
938# CONFIG_I2C_VOODOO3 is not set
939# CONFIG_I2C_PCA_ISA is not set
940
941#
942# Miscellaneous I2C Chip support
943#
944# CONFIG_SENSORS_DS1337 is not set
945# CONFIG_SENSORS_DS1374 is not set
946# CONFIG_SENSORS_EEPROM is not set
947# CONFIG_SENSORS_PCF8574 is not set
948# CONFIG_SENSORS_PCA9539 is not set
949# CONFIG_SENSORS_PCF8591 is not set
950# CONFIG_SENSORS_MAX6875 is not set
951# CONFIG_I2C_DEBUG_CORE is not set
952# CONFIG_I2C_DEBUG_ALGO is not set
953# CONFIG_I2C_DEBUG_BUS is not set
954# CONFIG_I2C_DEBUG_CHIP is not set
891 955
892# 956#
893# SPI support 957# SPI support
@@ -898,14 +962,51 @@ CONFIG_HPET_MMAP=y
898# 962#
899# Dallas's 1-wire bus 963# Dallas's 1-wire bus
900# 964#
901# CONFIG_W1 is not set
902 965
903# 966#
904# Hardware Monitoring support 967# Hardware Monitoring support
905# 968#
906CONFIG_HWMON=y 969CONFIG_HWMON=y
907# CONFIG_HWMON_VID is not set 970# CONFIG_HWMON_VID is not set
971# CONFIG_SENSORS_ABITUGURU is not set
972# CONFIG_SENSORS_ADM1021 is not set
973# CONFIG_SENSORS_ADM1025 is not set
974# CONFIG_SENSORS_ADM1026 is not set
975# CONFIG_SENSORS_ADM1031 is not set
976# CONFIG_SENSORS_ADM9240 is not set
977# CONFIG_SENSORS_ASB100 is not set
978# CONFIG_SENSORS_ATXP1 is not set
979# CONFIG_SENSORS_DS1621 is not set
908# CONFIG_SENSORS_F71805F is not set 980# CONFIG_SENSORS_F71805F is not set
981# CONFIG_SENSORS_FSCHER is not set
982# CONFIG_SENSORS_FSCPOS is not set
983# CONFIG_SENSORS_GL518SM is not set
984# CONFIG_SENSORS_GL520SM is not set
985# CONFIG_SENSORS_IT87 is not set
986# CONFIG_SENSORS_LM63 is not set
987# CONFIG_SENSORS_LM75 is not set
988# CONFIG_SENSORS_LM77 is not set
989# CONFIG_SENSORS_LM78 is not set
990# CONFIG_SENSORS_LM80 is not set
991# CONFIG_SENSORS_LM83 is not set
992# CONFIG_SENSORS_LM85 is not set
993# CONFIG_SENSORS_LM87 is not set
994# CONFIG_SENSORS_LM90 is not set
995# CONFIG_SENSORS_LM92 is not set
996# CONFIG_SENSORS_MAX1619 is not set
997# CONFIG_SENSORS_PC87360 is not set
998# CONFIG_SENSORS_SIS5595 is not set
999# CONFIG_SENSORS_SMSC47M1 is not set
1000# CONFIG_SENSORS_SMSC47M192 is not set
1001CONFIG_SENSORS_SMSC47B397=m
1002# CONFIG_SENSORS_VIA686A is not set
1003# CONFIG_SENSORS_VT8231 is not set
1004# CONFIG_SENSORS_W83781D is not set
1005# CONFIG_SENSORS_W83791D is not set
1006# CONFIG_SENSORS_W83792D is not set
1007# CONFIG_SENSORS_W83L785TS is not set
1008# CONFIG_SENSORS_W83627HF is not set
1009# CONFIG_SENSORS_W83627EHF is not set
909# CONFIG_SENSORS_HDAPS is not set 1010# CONFIG_SENSORS_HDAPS is not set
910# CONFIG_HWMON_DEBUG_CHIP is not set 1011# CONFIG_HWMON_DEBUG_CHIP is not set
911 1012
@@ -918,6 +1019,7 @@ CONFIG_HWMON=y
918# Multimedia devices 1019# Multimedia devices
919# 1020#
920# CONFIG_VIDEO_DEV is not set 1021# CONFIG_VIDEO_DEV is not set
1022CONFIG_VIDEO_V4L2=y
921 1023
922# 1024#
923# Digital Video Broadcasting Devices 1025# Digital Video Broadcasting Devices
@@ -953,28 +1055,17 @@ CONFIG_SOUND=y
953# Open Sound System 1055# Open Sound System
954# 1056#
955CONFIG_SOUND_PRIME=y 1057CONFIG_SOUND_PRIME=y
956CONFIG_OBSOLETE_OSS_DRIVER=y
957# CONFIG_SOUND_BT878 is not set 1058# CONFIG_SOUND_BT878 is not set
958# CONFIG_SOUND_CMPCI is not set
959# CONFIG_SOUND_EMU10K1 is not set 1059# CONFIG_SOUND_EMU10K1 is not set
960# CONFIG_SOUND_FUSION is not set 1060# CONFIG_SOUND_FUSION is not set
961# CONFIG_SOUND_CS4281 is not set
962# CONFIG_SOUND_ES1370 is not set
963# CONFIG_SOUND_ES1371 is not set 1061# CONFIG_SOUND_ES1371 is not set
964# CONFIG_SOUND_ESSSOLO1 is not set
965# CONFIG_SOUND_MAESTRO is not set
966# CONFIG_SOUND_MAESTRO3 is not set
967CONFIG_SOUND_ICH=y 1062CONFIG_SOUND_ICH=y
968# CONFIG_SOUND_SONICVIBES is not set
969# CONFIG_SOUND_TRIDENT is not set 1063# CONFIG_SOUND_TRIDENT is not set
970# CONFIG_SOUND_MSNDCLAS is not set 1064# CONFIG_SOUND_MSNDCLAS is not set
971# CONFIG_SOUND_MSNDPIN is not set 1065# CONFIG_SOUND_MSNDPIN is not set
972# CONFIG_SOUND_VIA82CXXX is not set 1066# CONFIG_SOUND_VIA82CXXX is not set
973# CONFIG_SOUND_OSS is not set 1067# CONFIG_SOUND_OSS is not set
974# CONFIG_SOUND_ALI5455 is not set 1068# CONFIG_SOUND_TVMIXER is not set
975# CONFIG_SOUND_FORTE is not set
976# CONFIG_SOUND_RME96XX is not set
977# CONFIG_SOUND_AD1980 is not set
978 1069
979# 1070#
980# USB support 1071# USB support
@@ -1000,6 +1091,7 @@ CONFIG_USB_DEVICEFS=y
1000CONFIG_USB_EHCI_HCD=y 1091CONFIG_USB_EHCI_HCD=y
1001# CONFIG_USB_EHCI_SPLIT_ISO is not set 1092# CONFIG_USB_EHCI_SPLIT_ISO is not set
1002# CONFIG_USB_EHCI_ROOT_HUB_TT is not set 1093# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
1094# CONFIG_USB_EHCI_TT_NEWSCHED is not set
1003# CONFIG_USB_ISP116X_HCD is not set 1095# CONFIG_USB_ISP116X_HCD is not set
1004CONFIG_USB_OHCI_HCD=y 1096CONFIG_USB_OHCI_HCD=y
1005# CONFIG_USB_OHCI_BIG_ENDIAN is not set 1097# CONFIG_USB_OHCI_BIG_ENDIAN is not set
@@ -1089,10 +1181,12 @@ CONFIG_USB_MON=y
1089# CONFIG_USB_LEGOTOWER is not set 1181# CONFIG_USB_LEGOTOWER is not set
1090# CONFIG_USB_LCD is not set 1182# CONFIG_USB_LCD is not set
1091# CONFIG_USB_LED is not set 1183# CONFIG_USB_LED is not set
1184# CONFIG_USB_CY7C63 is not set
1092# CONFIG_USB_CYTHERM is not set 1185# CONFIG_USB_CYTHERM is not set
1093# CONFIG_USB_PHIDGETKIT is not set 1186# CONFIG_USB_PHIDGETKIT is not set
1094# CONFIG_USB_PHIDGETSERVO is not set 1187# CONFIG_USB_PHIDGETSERVO is not set
1095# CONFIG_USB_IDMOUSE is not set 1188# CONFIG_USB_IDMOUSE is not set
1189# CONFIG_USB_APPLEDISPLAY is not set
1096# CONFIG_USB_SISUSBVGA is not set 1190# CONFIG_USB_SISUSBVGA is not set
1097# CONFIG_USB_LD is not set 1191# CONFIG_USB_LD is not set
1098# CONFIG_USB_TEST is not set 1192# CONFIG_USB_TEST is not set
@@ -1141,6 +1235,19 @@ CONFIG_USB_MON=y
1141# CONFIG_RTC_CLASS is not set 1235# CONFIG_RTC_CLASS is not set
1142 1236
1143# 1237#
1238# DMA Engine support
1239#
1240# CONFIG_DMA_ENGINE is not set
1241
1242#
1243# DMA Clients
1244#
1245
1246#
1247# DMA Devices
1248#
1249
1250#
1144# Firmware Drivers 1251# Firmware Drivers
1145# 1252#
1146# CONFIG_EDD is not set 1253# CONFIG_EDD is not set
@@ -1175,6 +1282,7 @@ CONFIG_FS_POSIX_ACL=y
1175# CONFIG_MINIX_FS is not set 1282# CONFIG_MINIX_FS is not set
1176# CONFIG_ROMFS_FS is not set 1283# CONFIG_ROMFS_FS is not set
1177CONFIG_INOTIFY=y 1284CONFIG_INOTIFY=y
1285CONFIG_INOTIFY_USER=y
1178# CONFIG_QUOTA is not set 1286# CONFIG_QUOTA is not set
1179CONFIG_DNOTIFY=y 1287CONFIG_DNOTIFY=y
1180CONFIG_AUTOFS_FS=y 1288CONFIG_AUTOFS_FS=y
@@ -1331,7 +1439,8 @@ CONFIG_DETECT_SOFTLOCKUP=y
1331CONFIG_DEBUG_FS=y 1439CONFIG_DEBUG_FS=y
1332# CONFIG_DEBUG_VM is not set 1440# CONFIG_DEBUG_VM is not set
1333# CONFIG_FRAME_POINTER is not set 1441# CONFIG_FRAME_POINTER is not set
1334# CONFIG_UNWIND_INFO is not set 1442CONFIG_UNWIND_INFO=y
1443CONFIG_STACK_UNWIND=y
1335# CONFIG_FORCED_INLINING is not set 1444# CONFIG_FORCED_INLINING is not set
1336# CONFIG_RCU_TORTURE_TEST is not set 1445# CONFIG_RCU_TORTURE_TEST is not set
1337# CONFIG_DEBUG_RODATA is not set 1446# CONFIG_DEBUG_RODATA is not set
diff --git a/arch/x86_64/ia32/fpu32.c b/arch/x86_64/ia32/fpu32.c
index 1c23095f1813..2c8209a3605a 100644
--- a/arch/x86_64/ia32/fpu32.c
+++ b/arch/x86_64/ia32/fpu32.c
@@ -2,7 +2,6 @@
2 * Copyright 2002 Andi Kleen, SuSE Labs. 2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * FXSAVE<->i387 conversion support. Based on code by Gareth Hughes. 3 * FXSAVE<->i387 conversion support. Based on code by Gareth Hughes.
4 * This is used for ptrace, signals and coredumps in 32bit emulation. 4 * This is used for ptrace, signals and coredumps in 32bit emulation.
5 * $Id: fpu32.c,v 1.1 2002/03/21 14:16:32 ak Exp $
6 */ 5 */
7 6
8#include <linux/sched.h> 7#include <linux/sched.h>
diff --git a/arch/x86_64/ia32/ia32_signal.c b/arch/x86_64/ia32/ia32_signal.c
index e0a92439f634..25e5ca22204c 100644
--- a/arch/x86_64/ia32/ia32_signal.c
+++ b/arch/x86_64/ia32/ia32_signal.c
@@ -6,8 +6,6 @@
6 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson 6 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
7 * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes 7 * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
8 * 2000-12-* x86-64 compatibility mode signal handling by Andi Kleen 8 * 2000-12-* x86-64 compatibility mode signal handling by Andi Kleen
9 *
10 * $Id: ia32_signal.c,v 1.22 2002/07/29 10:34:03 ak Exp $
11 */ 9 */
12 10
13#include <linux/sched.h> 11#include <linux/sched.h>
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 4ec594ab1a98..c536fa98ea37 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -155,6 +155,7 @@ sysenter_tracesys:
155 .previous 155 .previous
156 jmp sysenter_do_call 156 jmp sysenter_do_call
157 CFI_ENDPROC 157 CFI_ENDPROC
158ENDPROC(ia32_sysenter_target)
158 159
159/* 160/*
160 * 32bit SYSCALL instruction entry. 161 * 32bit SYSCALL instruction entry.
@@ -178,7 +179,7 @@ sysenter_tracesys:
178 */ 179 */
179ENTRY(ia32_cstar_target) 180ENTRY(ia32_cstar_target)
180 CFI_STARTPROC32 simple 181 CFI_STARTPROC32 simple
181 CFI_DEF_CFA rsp,0 182 CFI_DEF_CFA rsp,PDA_STACKOFFSET
182 CFI_REGISTER rip,rcx 183 CFI_REGISTER rip,rcx
183 /*CFI_REGISTER rflags,r11*/ 184 /*CFI_REGISTER rflags,r11*/
184 swapgs 185 swapgs
@@ -249,6 +250,7 @@ cstar_tracesys:
249 .quad 1b,ia32_badarg 250 .quad 1b,ia32_badarg
250 .previous 251 .previous
251 jmp cstar_do_call 252 jmp cstar_do_call
253END(ia32_cstar_target)
252 254
253ia32_badarg: 255ia32_badarg:
254 movq $-EFAULT,%rax 256 movq $-EFAULT,%rax
@@ -314,16 +316,13 @@ ia32_tracesys:
314 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */ 316 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
315 RESTORE_REST 317 RESTORE_REST
316 jmp ia32_do_syscall 318 jmp ia32_do_syscall
319END(ia32_syscall)
317 320
318ia32_badsys: 321ia32_badsys:
319 movq $0,ORIG_RAX-ARGOFFSET(%rsp) 322 movq $0,ORIG_RAX-ARGOFFSET(%rsp)
320 movq $-ENOSYS,RAX-ARGOFFSET(%rsp) 323 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
321 jmp int_ret_from_sys_call 324 jmp int_ret_from_sys_call
322 325
323ni_syscall:
324 movq %rax,%rdi
325 jmp sys32_ni_syscall
326
327quiet_ni_syscall: 326quiet_ni_syscall:
328 movq $-ENOSYS,%rax 327 movq $-ENOSYS,%rax
329 ret 328 ret
@@ -370,10 +369,10 @@ ENTRY(ia32_ptregs_common)
370 RESTORE_REST 369 RESTORE_REST
371 jmp ia32_sysret /* misbalances the return cache */ 370 jmp ia32_sysret /* misbalances the return cache */
372 CFI_ENDPROC 371 CFI_ENDPROC
372END(ia32_ptregs_common)
373 373
374 .section .rodata,"a" 374 .section .rodata,"a"
375 .align 8 375 .align 8
376 .globl ia32_sys_call_table
377ia32_sys_call_table: 376ia32_sys_call_table:
378 .quad sys_restart_syscall 377 .quad sys_restart_syscall
379 .quad sys_exit 378 .quad sys_exit
diff --git a/arch/x86_64/ia32/ptrace32.c b/arch/x86_64/ia32/ptrace32.c
index 23a4515a73b4..a590b7a0d92d 100644
--- a/arch/x86_64/ia32/ptrace32.c
+++ b/arch/x86_64/ia32/ptrace32.c
@@ -7,8 +7,6 @@
7 * 7 *
8 * This allows to access 64bit processes too; but there is no way to see the extended 8 * This allows to access 64bit processes too; but there is no way to see the extended
9 * register contents. 9 * register contents.
10 *
11 * $Id: ptrace32.c,v 1.16 2003/03/14 16:06:35 ak Exp $
12 */ 10 */
13 11
14#include <linux/kernel.h> 12#include <linux/kernel.h>
@@ -27,6 +25,7 @@
27#include <asm/debugreg.h> 25#include <asm/debugreg.h>
28#include <asm/i387.h> 26#include <asm/i387.h>
29#include <asm/fpu32.h> 27#include <asm/fpu32.h>
28#include <asm/ia32.h>
30 29
31/* 30/*
32 * Determines which flags the user has access to [1 = access, 0 = no access]. 31 * Determines which flags the user has access to [1 = access, 0 = no access].
@@ -199,6 +198,24 @@ static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
199 198
200#undef R32 199#undef R32
201 200
201static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data)
202{
203 int ret;
204 compat_siginfo_t *si32 = (compat_siginfo_t *)compat_ptr(data);
205 siginfo_t *si = compat_alloc_user_space(sizeof(siginfo_t));
206 if (request == PTRACE_SETSIGINFO) {
207 ret = copy_siginfo_from_user32(si, si32);
208 if (ret)
209 return ret;
210 }
211 ret = sys_ptrace(request, pid, addr, (unsigned long)si);
212 if (ret)
213 return ret;
214 if (request == PTRACE_GETSIGINFO)
215 ret = copy_siginfo_to_user32(si32, si);
216 return ret;
217}
218
202asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) 219asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
203{ 220{
204 struct task_struct *child; 221 struct task_struct *child;
@@ -208,9 +225,19 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
208 __u32 val; 225 __u32 val;
209 226
210 switch (request) { 227 switch (request) {
211 default: 228 case PTRACE_TRACEME:
229 case PTRACE_ATTACH:
230 case PTRACE_KILL:
231 case PTRACE_CONT:
232 case PTRACE_SINGLESTEP:
233 case PTRACE_DETACH:
234 case PTRACE_SYSCALL:
235 case PTRACE_SETOPTIONS:
212 return sys_ptrace(request, pid, addr, data); 236 return sys_ptrace(request, pid, addr, data);
213 237
238 default:
239 return -EINVAL;
240
214 case PTRACE_PEEKTEXT: 241 case PTRACE_PEEKTEXT:
215 case PTRACE_PEEKDATA: 242 case PTRACE_PEEKDATA:
216 case PTRACE_POKEDATA: 243 case PTRACE_POKEDATA:
@@ -225,10 +252,11 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
225 case PTRACE_GETFPXREGS: 252 case PTRACE_GETFPXREGS:
226 case PTRACE_GETEVENTMSG: 253 case PTRACE_GETEVENTMSG:
227 break; 254 break;
228 }
229 255
230 if (request == PTRACE_TRACEME) 256 case PTRACE_SETSIGINFO:
231 return ptrace_traceme(); 257 case PTRACE_GETSIGINFO:
258 return ptrace32_siginfo(request, pid, addr, data);
259 }
232 260
233 child = ptrace_get_task_struct(pid); 261 child = ptrace_get_task_struct(pid);
234 if (IS_ERR(child)) 262 if (IS_ERR(child))
@@ -349,8 +377,7 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
349 break; 377 break;
350 378
351 default: 379 default:
352 ret = -EINVAL; 380 BUG();
353 break;
354 } 381 }
355 382
356 out: 383 out:
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index f182b20858e2..dc88154c412b 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -508,19 +508,6 @@ sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options)
508 return compat_sys_wait4(pid, stat_addr, options, NULL); 508 return compat_sys_wait4(pid, stat_addr, options, NULL);
509} 509}
510 510
511int sys32_ni_syscall(int call)
512{
513 struct task_struct *me = current;
514 static char lastcomm[sizeof(me->comm)];
515
516 if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) {
517 printk(KERN_INFO "IA32 syscall %d from %s not implemented\n",
518 call, me->comm);
519 strncpy(lastcomm, me->comm, sizeof(lastcomm));
520 }
521 return -ENOSYS;
522}
523
524/* 32-bit timeval and related flotsam. */ 511/* 32-bit timeval and related flotsam. */
525 512
526asmlinkage long 513asmlinkage long
@@ -916,7 +903,7 @@ long sys32_vm86_warning(void)
916 struct task_struct *me = current; 903 struct task_struct *me = current;
917 static char lastcomm[sizeof(me->comm)]; 904 static char lastcomm[sizeof(me->comm)];
918 if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) { 905 if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) {
919 printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n", 906 compat_printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n",
920 me->comm); 907 me->comm);
921 strncpy(lastcomm, me->comm, sizeof(lastcomm)); 908 strncpy(lastcomm, me->comm, sizeof(lastcomm));
922 } 909 }
@@ -929,13 +916,3 @@ long sys32_lookup_dcookie(u32 addr_low, u32 addr_high,
929 return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len); 916 return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len);
930} 917}
931 918
932static int __init ia32_init (void)
933{
934 printk("IA32 emulation $Id: sys_ia32.c,v 1.32 2002/03/24 13:02:28 ak Exp $\n");
935 return 0;
936}
937
938__initcall(ia32_init);
939
940extern unsigned long ia32_sys_call_table[];
941EXPORT_SYMBOL(ia32_sys_call_table);
diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile
index 059c88313f4e..aeb9c560be88 100644
--- a/arch/x86_64/kernel/Makefile
+++ b/arch/x86_64/kernel/Makefile
@@ -8,7 +8,7 @@ obj-y := process.o signal.o entry.o traps.o irq.o \
8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \ 8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \
9 x8664_ksyms.o i387.o syscall.o vsyscall.o \ 9 x8664_ksyms.o i387.o syscall.o vsyscall.o \
10 setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \ 10 setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \
11 pci-dma.o pci-nommu.o 11 pci-dma.o pci-nommu.o alternative.o
12 12
13obj-$(CONFIG_X86_MCE) += mce.o 13obj-$(CONFIG_X86_MCE) += mce.o
14obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o 14obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o
@@ -28,11 +28,13 @@ obj-$(CONFIG_PM) += suspend.o
28obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o 28obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o
29obj-$(CONFIG_CPU_FREQ) += cpufreq/ 29obj-$(CONFIG_CPU_FREQ) += cpufreq/
30obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 30obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
31obj-$(CONFIG_GART_IOMMU) += pci-gart.o aperture.o 31obj-$(CONFIG_IOMMU) += pci-gart.o aperture.o
32obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary.o tce.o
32obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o 33obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
33obj-$(CONFIG_KPROBES) += kprobes.o 34obj-$(CONFIG_KPROBES) += kprobes.o
34obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o 35obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o
35obj-$(CONFIG_X86_VSMP) += vsmp.o 36obj-$(CONFIG_X86_VSMP) += vsmp.o
37obj-$(CONFIG_K8_NB) += k8.o
36 38
37obj-$(CONFIG_MODULES) += module.o 39obj-$(CONFIG_MODULES) += module.o
38 40
@@ -49,3 +51,5 @@ intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o
49quirks-y += ../../i386/kernel/quirks.o 51quirks-y += ../../i386/kernel/quirks.o
50i8237-y += ../../i386/kernel/i8237.o 52i8237-y += ../../i386/kernel/i8237.o
51msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o 53msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o
54alternative-y += ../../i386/kernel/alternative.o
55
diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c
index 70b9d21ed675..a195ef06ec55 100644
--- a/arch/x86_64/kernel/aperture.c
+++ b/arch/x86_64/kernel/aperture.c
@@ -8,7 +8,6 @@
8 * because only the bootmem allocator can allocate 32+MB. 8 * because only the bootmem allocator can allocate 32+MB.
9 * 9 *
10 * Copyright 2002 Andi Kleen, SuSE Labs. 10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 * $Id: aperture.c,v 1.7 2003/08/01 03:36:18 ak Exp $
12 */ 11 */
13#include <linux/config.h> 12#include <linux/config.h>
14#include <linux/kernel.h> 13#include <linux/kernel.h>
@@ -24,6 +23,7 @@
24#include <asm/proto.h> 23#include <asm/proto.h>
25#include <asm/pci-direct.h> 24#include <asm/pci-direct.h>
26#include <asm/dma.h> 25#include <asm/dma.h>
26#include <asm/k8.h>
27 27
28int iommu_aperture; 28int iommu_aperture;
29int iommu_aperture_disabled __initdata = 0; 29int iommu_aperture_disabled __initdata = 0;
@@ -37,8 +37,6 @@ int fix_aperture __initdata = 1;
37/* This code runs before the PCI subsystem is initialized, so just 37/* This code runs before the PCI subsystem is initialized, so just
38 access the northbridge directly. */ 38 access the northbridge directly. */
39 39
40#define NB_ID_3 (PCI_VENDOR_ID_AMD | (0x1103<<16))
41
42static u32 __init allocate_aperture(void) 40static u32 __init allocate_aperture(void)
43{ 41{
44 pg_data_t *nd0 = NODE_DATA(0); 42 pg_data_t *nd0 = NODE_DATA(0);
@@ -68,20 +66,20 @@ static u32 __init allocate_aperture(void)
68 return (u32)__pa(p); 66 return (u32)__pa(p);
69} 67}
70 68
71static int __init aperture_valid(char *name, u64 aper_base, u32 aper_size) 69static int __init aperture_valid(u64 aper_base, u32 aper_size)
72{ 70{
73 if (!aper_base) 71 if (!aper_base)
74 return 0; 72 return 0;
75 if (aper_size < 64*1024*1024) { 73 if (aper_size < 64*1024*1024) {
76 printk("Aperture from %s too small (%d MB)\n", name, aper_size>>20); 74 printk("Aperture too small (%d MB)\n", aper_size>>20);
77 return 0; 75 return 0;
78 } 76 }
79 if (aper_base + aper_size >= 0xffffffff) { 77 if (aper_base + aper_size >= 0xffffffff) {
80 printk("Aperture from %s beyond 4GB. Ignoring.\n",name); 78 printk("Aperture beyond 4GB. Ignoring.\n");
81 return 0; 79 return 0;
82 } 80 }
83 if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) { 81 if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
84 printk("Aperture from %s pointing to e820 RAM. Ignoring.\n",name); 82 printk("Aperture pointing to e820 RAM. Ignoring.\n");
85 return 0; 83 return 0;
86 } 84 }
87 return 1; 85 return 1;
@@ -140,7 +138,7 @@ static __u32 __init read_agp(int num, int slot, int func, int cap, u32 *order)
140 printk("Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n", 138 printk("Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n",
141 aper, 32 << *order, apsizereg); 139 aper, 32 << *order, apsizereg);
142 140
143 if (!aperture_valid("AGP bridge", aper, (32*1024*1024) << *order)) 141 if (!aperture_valid(aper, (32*1024*1024) << *order))
144 return 0; 142 return 0;
145 return (u32)aper; 143 return (u32)aper;
146} 144}
@@ -208,10 +206,10 @@ void __init iommu_hole_init(void)
208 206
209 fix = 0; 207 fix = 0;
210 for (num = 24; num < 32; num++) { 208 for (num = 24; num < 32; num++) {
211 char name[30]; 209 if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
212 if (read_pci_config(0, num, 3, 0x00) != NB_ID_3) 210 continue;
213 continue;
214 211
212 iommu_detected = 1;
215 iommu_aperture = 1; 213 iommu_aperture = 1;
216 214
217 aper_order = (read_pci_config(0, num, 3, 0x90) >> 1) & 7; 215 aper_order = (read_pci_config(0, num, 3, 0x90) >> 1) & 7;
@@ -222,9 +220,7 @@ void __init iommu_hole_init(void)
222 printk("CPU %d: aperture @ %Lx size %u MB\n", num-24, 220 printk("CPU %d: aperture @ %Lx size %u MB\n", num-24,
223 aper_base, aper_size>>20); 221 aper_base, aper_size>>20);
224 222
225 sprintf(name, "northbridge cpu %d", num-24); 223 if (!aperture_valid(aper_base, aper_size)) {
226
227 if (!aperture_valid(name, aper_base, aper_size)) {
228 fix = 1; 224 fix = 1;
229 break; 225 break;
230 } 226 }
@@ -273,7 +269,7 @@ void __init iommu_hole_init(void)
273 269
274 /* Fix up the north bridges */ 270 /* Fix up the north bridges */
275 for (num = 24; num < 32; num++) { 271 for (num = 24; num < 32; num++) {
276 if (read_pci_config(0, num, 3, 0x00) != NB_ID_3) 272 if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
277 continue; 273 continue;
278 274
279 /* Don't enable translation yet. That is done later. 275 /* Don't enable translation yet. That is done later.
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 29ef99001e05..b2ead91df218 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -100,7 +100,7 @@ void clear_local_APIC(void)
100 maxlvt = get_maxlvt(); 100 maxlvt = get_maxlvt();
101 101
102 /* 102 /*
103 * Masking an LVT entry on a P6 can trigger a local APIC error 103 * Masking an LVT entry can trigger a local APIC error
104 * if the vector is zero. Mask LVTERR first to prevent this. 104 * if the vector is zero. Mask LVTERR first to prevent this.
105 */ 105 */
106 if (maxlvt >= 3) { 106 if (maxlvt >= 3) {
@@ -851,7 +851,18 @@ void disable_APIC_timer(void)
851 unsigned long v; 851 unsigned long v;
852 852
853 v = apic_read(APIC_LVTT); 853 v = apic_read(APIC_LVTT);
854 apic_write(APIC_LVTT, v | APIC_LVT_MASKED); 854 /*
855 * When an illegal vector value (0-15) is written to an LVT
856 * entry and delivery mode is Fixed, the APIC may signal an
857 * illegal vector error, with out regard to whether the mask
858 * bit is set or whether an interrupt is actually seen on input.
859 *
860 * Boot sequence might call this function when the LVTT has
861 * '0' vector value. So make sure vector field is set to
862 * valid value.
863 */
864 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
865 apic_write(APIC_LVTT, v);
855 } 866 }
856} 867}
857 868
@@ -909,15 +920,13 @@ int setup_profiling_timer(unsigned int multiplier)
909 return -EINVAL; 920 return -EINVAL;
910} 921}
911 922
912#ifdef CONFIG_X86_MCE_AMD 923void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector,
913void setup_threshold_lvt(unsigned long lvt_off) 924 unsigned char msg_type, unsigned char mask)
914{ 925{
915 unsigned int v = 0; 926 unsigned long reg = (lvt_off << 4) + K8_APIC_EXT_LVT_BASE;
916 unsigned long reg = (lvt_off << 4) + 0x500; 927 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
917 v |= THRESHOLD_APIC_VECTOR;
918 apic_write(reg, v); 928 apic_write(reg, v);
919} 929}
920#endif /* CONFIG_X86_MCE_AMD */
921 930
922#undef APIC_DIVISOR 931#undef APIC_DIVISOR
923 932
@@ -983,7 +992,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
983} 992}
984 993
985/* 994/*
986 * oem_force_hpet_timer -- force HPET mode for some boxes. 995 * apic_is_clustered_box() -- Check if we can expect good TSC
987 * 996 *
988 * Thus far, the major user of this is IBM's Summit2 series: 997 * Thus far, the major user of this is IBM's Summit2 series:
989 * 998 *
@@ -991,7 +1000,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
991 * multi-chassis. Use available data to take a good guess. 1000 * multi-chassis. Use available data to take a good guess.
992 * If in doubt, go HPET. 1001 * If in doubt, go HPET.
993 */ 1002 */
994__cpuinit int oem_force_hpet_timer(void) 1003__cpuinit int apic_is_clustered_box(void)
995{ 1004{
996 int i, clusters, zeros; 1005 int i, clusters, zeros;
997 unsigned id; 1006 unsigned id;
@@ -1022,8 +1031,7 @@ __cpuinit int oem_force_hpet_timer(void)
1022 } 1031 }
1023 1032
1024 /* 1033 /*
1025 * If clusters > 2, then should be multi-chassis. Return 1 for HPET. 1034 * If clusters > 2, then should be multi-chassis.
1026 * Else return 0 to use TSC.
1027 * May have to revisit this when multi-core + hyperthreaded CPUs come 1035 * May have to revisit this when multi-core + hyperthreaded CPUs come
1028 * out, but AFAIK this will work even for them. 1036 * out, but AFAIK this will work even for them.
1029 */ 1037 */
diff --git a/arch/x86_64/kernel/asm-offsets.c b/arch/x86_64/kernel/asm-offsets.c
index 38834bbbae11..96687e2beb2c 100644
--- a/arch/x86_64/kernel/asm-offsets.c
+++ b/arch/x86_64/kernel/asm-offsets.c
@@ -4,6 +4,7 @@
4 * and format the required data. 4 * and format the required data.
5 */ 5 */
6 6
7#include <linux/crypto.h>
7#include <linux/sched.h> 8#include <linux/sched.h>
8#include <linux/stddef.h> 9#include <linux/stddef.h>
9#include <linux/errno.h> 10#include <linux/errno.h>
@@ -68,5 +69,7 @@ int main(void)
68 DEFINE(pbe_next, offsetof(struct pbe, next)); 69 DEFINE(pbe_next, offsetof(struct pbe, next));
69 BLANK(); 70 BLANK();
70 DEFINE(TSS_ist, offsetof(struct tss_struct, ist)); 71 DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
72 BLANK();
73 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
71 return 0; 74 return 0;
72} 75}
diff --git a/arch/x86_64/kernel/crash.c b/arch/x86_64/kernel/crash.c
index 4e6c3b729e39..d8d5750d6106 100644
--- a/arch/x86_64/kernel/crash.c
+++ b/arch/x86_64/kernel/crash.c
@@ -111,14 +111,14 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
111 atomic_dec(&waiting_for_crash_ipi); 111 atomic_dec(&waiting_for_crash_ipi);
112 /* Assume hlt works */ 112 /* Assume hlt works */
113 for(;;) 113 for(;;)
114 asm("hlt"); 114 halt();
115 115
116 return 1; 116 return 1;
117} 117}
118 118
119static void smp_send_nmi_allbutself(void) 119static void smp_send_nmi_allbutself(void)
120{ 120{
121 send_IPI_allbutself(APIC_DM_NMI); 121 send_IPI_allbutself(NMI_VECTOR);
122} 122}
123 123
124/* 124/*
@@ -161,7 +161,7 @@ void machine_crash_shutdown(struct pt_regs *regs)
161{ 161{
162 /* 162 /*
163 * This function is only called after the system 163 * This function is only called after the system
164 * has paniced or is otherwise in a critical state. 164 * has panicked or is otherwise in a critical state.
165 * The minimum amount of code to allow a kexec'd kernel 165 * The minimum amount of code to allow a kexec'd kernel
166 * to run successfully needs to happen here. 166 * to run successfully needs to happen here.
167 * 167 *
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 1ef6028f721e..9e94d834624b 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * Handle the memory map. 2 * Handle the memory map.
3 * The functions here do the job until bootmem takes over. 3 * The functions here do the job until bootmem takes over.
4 * $Id: e820.c,v 1.4 2002/09/19 19:25:32 ak Exp $
5 * 4 *
6 * Getting sanitize_e820_map() in sync with i386 version by applying change: 5 * Getting sanitize_e820_map() in sync with i386 version by applying change:
7 * - Provisions for empty E820 memory regions (reported by certain BIOSes). 6 * - Provisions for empty E820 memory regions (reported by certain BIOSes).
@@ -621,6 +620,7 @@ void __init parse_memmapopt(char *p, char **from)
621} 620}
622 621
623unsigned long pci_mem_start = 0xaeedbabe; 622unsigned long pci_mem_start = 0xaeedbabe;
623EXPORT_SYMBOL(pci_mem_start);
624 624
625/* 625/*
626 * Search for the biggest gap in the low 32 bits of the e820 626 * Search for the biggest gap in the low 32 bits of the e820
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 586b34c00c48..22cac4487b57 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -154,6 +154,7 @@ rff_trace:
154 GET_THREAD_INFO(%rcx) 154 GET_THREAD_INFO(%rcx)
155 jmp rff_action 155 jmp rff_action
156 CFI_ENDPROC 156 CFI_ENDPROC
157END(ret_from_fork)
157 158
158/* 159/*
159 * System call entry. Upto 6 arguments in registers are supported. 160 * System call entry. Upto 6 arguments in registers are supported.
@@ -188,7 +189,7 @@ rff_trace:
188 189
189ENTRY(system_call) 190ENTRY(system_call)
190 CFI_STARTPROC simple 191 CFI_STARTPROC simple
191 CFI_DEF_CFA rsp,0 192 CFI_DEF_CFA rsp,PDA_STACKOFFSET
192 CFI_REGISTER rip,rcx 193 CFI_REGISTER rip,rcx
193 /*CFI_REGISTER rflags,r11*/ 194 /*CFI_REGISTER rflags,r11*/
194 swapgs 195 swapgs
@@ -285,6 +286,7 @@ tracesys:
285 /* Use IRET because user could have changed frame */ 286 /* Use IRET because user could have changed frame */
286 jmp int_ret_from_sys_call 287 jmp int_ret_from_sys_call
287 CFI_ENDPROC 288 CFI_ENDPROC
289END(system_call)
288 290
289/* 291/*
290 * Syscall return path ending with IRET. 292 * Syscall return path ending with IRET.
@@ -364,6 +366,7 @@ int_restore_rest:
364 cli 366 cli
365 jmp int_with_check 367 jmp int_with_check
366 CFI_ENDPROC 368 CFI_ENDPROC
369END(int_ret_from_sys_call)
367 370
368/* 371/*
369 * Certain special system calls that need to save a complete full stack frame. 372 * Certain special system calls that need to save a complete full stack frame.
@@ -375,6 +378,7 @@ int_restore_rest:
375 leaq \func(%rip),%rax 378 leaq \func(%rip),%rax
376 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */ 379 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
377 jmp ptregscall_common 380 jmp ptregscall_common
381END(\label)
378 .endm 382 .endm
379 383
380 CFI_STARTPROC 384 CFI_STARTPROC
@@ -404,6 +408,7 @@ ENTRY(ptregscall_common)
404 CFI_REL_OFFSET rip, 0 408 CFI_REL_OFFSET rip, 0
405 ret 409 ret
406 CFI_ENDPROC 410 CFI_ENDPROC
411END(ptregscall_common)
407 412
408ENTRY(stub_execve) 413ENTRY(stub_execve)
409 CFI_STARTPROC 414 CFI_STARTPROC
@@ -418,6 +423,7 @@ ENTRY(stub_execve)
418 RESTORE_REST 423 RESTORE_REST
419 jmp int_ret_from_sys_call 424 jmp int_ret_from_sys_call
420 CFI_ENDPROC 425 CFI_ENDPROC
426END(stub_execve)
421 427
422/* 428/*
423 * sigreturn is special because it needs to restore all registers on return. 429 * sigreturn is special because it needs to restore all registers on return.
@@ -435,6 +441,7 @@ ENTRY(stub_rt_sigreturn)
435 RESTORE_REST 441 RESTORE_REST
436 jmp int_ret_from_sys_call 442 jmp int_ret_from_sys_call
437 CFI_ENDPROC 443 CFI_ENDPROC
444END(stub_rt_sigreturn)
438 445
439/* 446/*
440 * initial frame state for interrupts and exceptions 447 * initial frame state for interrupts and exceptions
@@ -466,29 +473,18 @@ ENTRY(stub_rt_sigreturn)
466/* 0(%rsp): interrupt number */ 473/* 0(%rsp): interrupt number */
467 .macro interrupt func 474 .macro interrupt func
468 cld 475 cld
469#ifdef CONFIG_DEBUG_INFO
470 SAVE_ALL
471 movq %rsp,%rdi
472 /*
473 * Setup a stack frame pointer. This allows gdb to trace
474 * back to the original stack.
475 */
476 movq %rsp,%rbp
477 CFI_DEF_CFA_REGISTER rbp
478#else
479 SAVE_ARGS 476 SAVE_ARGS
480 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler 477 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
481#endif 478 pushq %rbp
479 CFI_ADJUST_CFA_OFFSET 8
480 CFI_REL_OFFSET rbp, 0
481 movq %rsp,%rbp
482 CFI_DEF_CFA_REGISTER rbp
482 testl $3,CS(%rdi) 483 testl $3,CS(%rdi)
483 je 1f 484 je 1f
484 swapgs 485 swapgs
4851: incl %gs:pda_irqcount # RED-PEN should check preempt count 4861: incl %gs:pda_irqcount # RED-PEN should check preempt count
486 movq %gs:pda_irqstackptr,%rax 487 cmoveq %gs:pda_irqstackptr,%rsp
487 cmoveq %rax,%rsp /*todo This needs CFI annotation! */
488 pushq %rdi # save old stack
489#ifndef CONFIG_DEBUG_INFO
490 CFI_ADJUST_CFA_OFFSET 8
491#endif
492 call \func 488 call \func
493 .endm 489 .endm
494 490
@@ -497,17 +493,11 @@ ENTRY(common_interrupt)
497 interrupt do_IRQ 493 interrupt do_IRQ
498 /* 0(%rsp): oldrsp-ARGOFFSET */ 494 /* 0(%rsp): oldrsp-ARGOFFSET */
499ret_from_intr: 495ret_from_intr:
500 popq %rdi
501#ifndef CONFIG_DEBUG_INFO
502 CFI_ADJUST_CFA_OFFSET -8
503#endif
504 cli 496 cli
505 decl %gs:pda_irqcount 497 decl %gs:pda_irqcount
506#ifdef CONFIG_DEBUG_INFO 498 leaveq
507 movq RBP(%rdi),%rbp
508 CFI_DEF_CFA_REGISTER rsp 499 CFI_DEF_CFA_REGISTER rsp
509#endif 500 CFI_ADJUST_CFA_OFFSET -8
510 leaq ARGOFFSET(%rdi),%rsp /*todo This needs CFI annotation! */
511exit_intr: 501exit_intr:
512 GET_THREAD_INFO(%rcx) 502 GET_THREAD_INFO(%rcx)
513 testl $3,CS-ARGOFFSET(%rsp) 503 testl $3,CS-ARGOFFSET(%rsp)
@@ -589,14 +579,16 @@ retint_kernel:
589 call preempt_schedule_irq 579 call preempt_schedule_irq
590 jmp exit_intr 580 jmp exit_intr
591#endif 581#endif
582
592 CFI_ENDPROC 583 CFI_ENDPROC
584END(common_interrupt)
593 585
594/* 586/*
595 * APIC interrupts. 587 * APIC interrupts.
596 */ 588 */
597 .macro apicinterrupt num,func 589 .macro apicinterrupt num,func
598 INTR_FRAME 590 INTR_FRAME
599 pushq $\num-256 591 pushq $~(\num)
600 CFI_ADJUST_CFA_OFFSET 8 592 CFI_ADJUST_CFA_OFFSET 8
601 interrupt \func 593 interrupt \func
602 jmp ret_from_intr 594 jmp ret_from_intr
@@ -605,17 +597,21 @@ retint_kernel:
605 597
606ENTRY(thermal_interrupt) 598ENTRY(thermal_interrupt)
607 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt 599 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
600END(thermal_interrupt)
608 601
609ENTRY(threshold_interrupt) 602ENTRY(threshold_interrupt)
610 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt 603 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
604END(threshold_interrupt)
611 605
612#ifdef CONFIG_SMP 606#ifdef CONFIG_SMP
613ENTRY(reschedule_interrupt) 607ENTRY(reschedule_interrupt)
614 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt 608 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
609END(reschedule_interrupt)
615 610
616 .macro INVALIDATE_ENTRY num 611 .macro INVALIDATE_ENTRY num
617ENTRY(invalidate_interrupt\num) 612ENTRY(invalidate_interrupt\num)
618 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt 613 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
614END(invalidate_interrupt\num)
619 .endm 615 .endm
620 616
621 INVALIDATE_ENTRY 0 617 INVALIDATE_ENTRY 0
@@ -629,17 +625,21 @@ ENTRY(invalidate_interrupt\num)
629 625
630ENTRY(call_function_interrupt) 626ENTRY(call_function_interrupt)
631 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt 627 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
628END(call_function_interrupt)
632#endif 629#endif
633 630
634#ifdef CONFIG_X86_LOCAL_APIC 631#ifdef CONFIG_X86_LOCAL_APIC
635ENTRY(apic_timer_interrupt) 632ENTRY(apic_timer_interrupt)
636 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt 633 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
634END(apic_timer_interrupt)
637 635
638ENTRY(error_interrupt) 636ENTRY(error_interrupt)
639 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt 637 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
638END(error_interrupt)
640 639
641ENTRY(spurious_interrupt) 640ENTRY(spurious_interrupt)
642 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt 641 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
642END(spurious_interrupt)
643#endif 643#endif
644 644
645/* 645/*
@@ -777,6 +777,7 @@ error_kernelspace:
777 cmpq $gs_change,RIP(%rsp) 777 cmpq $gs_change,RIP(%rsp)
778 je error_swapgs 778 je error_swapgs
779 jmp error_sti 779 jmp error_sti
780END(error_entry)
780 781
781 /* Reload gs selector with exception handling */ 782 /* Reload gs selector with exception handling */
782 /* edi: new selector */ 783 /* edi: new selector */
@@ -794,6 +795,7 @@ gs_change:
794 CFI_ADJUST_CFA_OFFSET -8 795 CFI_ADJUST_CFA_OFFSET -8
795 ret 796 ret
796 CFI_ENDPROC 797 CFI_ENDPROC
798ENDPROC(load_gs_index)
797 799
798 .section __ex_table,"a" 800 .section __ex_table,"a"
799 .align 8 801 .align 8
@@ -847,7 +849,7 @@ ENTRY(kernel_thread)
847 UNFAKE_STACK_FRAME 849 UNFAKE_STACK_FRAME
848 ret 850 ret
849 CFI_ENDPROC 851 CFI_ENDPROC
850 852ENDPROC(kernel_thread)
851 853
852child_rip: 854child_rip:
853 /* 855 /*
@@ -860,6 +862,7 @@ child_rip:
860 # exit 862 # exit
861 xorl %edi, %edi 863 xorl %edi, %edi
862 call do_exit 864 call do_exit
865ENDPROC(child_rip)
863 866
864/* 867/*
865 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly. 868 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
@@ -889,19 +892,24 @@ ENTRY(execve)
889 UNFAKE_STACK_FRAME 892 UNFAKE_STACK_FRAME
890 ret 893 ret
891 CFI_ENDPROC 894 CFI_ENDPROC
895ENDPROC(execve)
892 896
893KPROBE_ENTRY(page_fault) 897KPROBE_ENTRY(page_fault)
894 errorentry do_page_fault 898 errorentry do_page_fault
899END(page_fault)
895 .previous .text 900 .previous .text
896 901
897ENTRY(coprocessor_error) 902ENTRY(coprocessor_error)
898 zeroentry do_coprocessor_error 903 zeroentry do_coprocessor_error
904END(coprocessor_error)
899 905
900ENTRY(simd_coprocessor_error) 906ENTRY(simd_coprocessor_error)
901 zeroentry do_simd_coprocessor_error 907 zeroentry do_simd_coprocessor_error
908END(simd_coprocessor_error)
902 909
903ENTRY(device_not_available) 910ENTRY(device_not_available)
904 zeroentry math_state_restore 911 zeroentry math_state_restore
912END(device_not_available)
905 913
906 /* runs on exception stack */ 914 /* runs on exception stack */
907KPROBE_ENTRY(debug) 915KPROBE_ENTRY(debug)
@@ -911,6 +919,7 @@ KPROBE_ENTRY(debug)
911 paranoidentry do_debug, DEBUG_STACK 919 paranoidentry do_debug, DEBUG_STACK
912 jmp paranoid_exit 920 jmp paranoid_exit
913 CFI_ENDPROC 921 CFI_ENDPROC
922END(debug)
914 .previous .text 923 .previous .text
915 924
916 /* runs on exception stack */ 925 /* runs on exception stack */
@@ -961,6 +970,7 @@ paranoid_schedule:
961 cli 970 cli
962 jmp paranoid_userspace 971 jmp paranoid_userspace
963 CFI_ENDPROC 972 CFI_ENDPROC
973END(nmi)
964 .previous .text 974 .previous .text
965 975
966KPROBE_ENTRY(int3) 976KPROBE_ENTRY(int3)
@@ -970,22 +980,28 @@ KPROBE_ENTRY(int3)
970 paranoidentry do_int3, DEBUG_STACK 980 paranoidentry do_int3, DEBUG_STACK
971 jmp paranoid_exit 981 jmp paranoid_exit
972 CFI_ENDPROC 982 CFI_ENDPROC
983END(int3)
973 .previous .text 984 .previous .text
974 985
975ENTRY(overflow) 986ENTRY(overflow)
976 zeroentry do_overflow 987 zeroentry do_overflow
988END(overflow)
977 989
978ENTRY(bounds) 990ENTRY(bounds)
979 zeroentry do_bounds 991 zeroentry do_bounds
992END(bounds)
980 993
981ENTRY(invalid_op) 994ENTRY(invalid_op)
982 zeroentry do_invalid_op 995 zeroentry do_invalid_op
996END(invalid_op)
983 997
984ENTRY(coprocessor_segment_overrun) 998ENTRY(coprocessor_segment_overrun)
985 zeroentry do_coprocessor_segment_overrun 999 zeroentry do_coprocessor_segment_overrun
1000END(coprocessor_segment_overrun)
986 1001
987ENTRY(reserved) 1002ENTRY(reserved)
988 zeroentry do_reserved 1003 zeroentry do_reserved
1004END(reserved)
989 1005
990 /* runs on exception stack */ 1006 /* runs on exception stack */
991ENTRY(double_fault) 1007ENTRY(double_fault)
@@ -993,12 +1009,15 @@ ENTRY(double_fault)
993 paranoidentry do_double_fault 1009 paranoidentry do_double_fault
994 jmp paranoid_exit 1010 jmp paranoid_exit
995 CFI_ENDPROC 1011 CFI_ENDPROC
1012END(double_fault)
996 1013
997ENTRY(invalid_TSS) 1014ENTRY(invalid_TSS)
998 errorentry do_invalid_TSS 1015 errorentry do_invalid_TSS
1016END(invalid_TSS)
999 1017
1000ENTRY(segment_not_present) 1018ENTRY(segment_not_present)
1001 errorentry do_segment_not_present 1019 errorentry do_segment_not_present
1020END(segment_not_present)
1002 1021
1003 /* runs on exception stack */ 1022 /* runs on exception stack */
1004ENTRY(stack_segment) 1023ENTRY(stack_segment)
@@ -1006,19 +1025,24 @@ ENTRY(stack_segment)
1006 paranoidentry do_stack_segment 1025 paranoidentry do_stack_segment
1007 jmp paranoid_exit 1026 jmp paranoid_exit
1008 CFI_ENDPROC 1027 CFI_ENDPROC
1028END(stack_segment)
1009 1029
1010KPROBE_ENTRY(general_protection) 1030KPROBE_ENTRY(general_protection)
1011 errorentry do_general_protection 1031 errorentry do_general_protection
1032END(general_protection)
1012 .previous .text 1033 .previous .text
1013 1034
1014ENTRY(alignment_check) 1035ENTRY(alignment_check)
1015 errorentry do_alignment_check 1036 errorentry do_alignment_check
1037END(alignment_check)
1016 1038
1017ENTRY(divide_error) 1039ENTRY(divide_error)
1018 zeroentry do_divide_error 1040 zeroentry do_divide_error
1041END(divide_error)
1019 1042
1020ENTRY(spurious_interrupt_bug) 1043ENTRY(spurious_interrupt_bug)
1021 zeroentry do_spurious_interrupt_bug 1044 zeroentry do_spurious_interrupt_bug
1045END(spurious_interrupt_bug)
1022 1046
1023#ifdef CONFIG_X86_MCE 1047#ifdef CONFIG_X86_MCE
1024 /* runs on exception stack */ 1048 /* runs on exception stack */
@@ -1029,6 +1053,7 @@ ENTRY(machine_check)
1029 paranoidentry do_machine_check 1053 paranoidentry do_machine_check
1030 jmp paranoid_exit 1054 jmp paranoid_exit
1031 CFI_ENDPROC 1055 CFI_ENDPROC
1056END(machine_check)
1032#endif 1057#endif
1033 1058
1034ENTRY(call_softirq) 1059ENTRY(call_softirq)
@@ -1046,3 +1071,37 @@ ENTRY(call_softirq)
1046 decl %gs:pda_irqcount 1071 decl %gs:pda_irqcount
1047 ret 1072 ret
1048 CFI_ENDPROC 1073 CFI_ENDPROC
1074ENDPROC(call_softirq)
1075
1076#ifdef CONFIG_STACK_UNWIND
1077ENTRY(arch_unwind_init_running)
1078 CFI_STARTPROC
1079 movq %r15, R15(%rdi)
1080 movq %r14, R14(%rdi)
1081 xchgq %rsi, %rdx
1082 movq %r13, R13(%rdi)
1083 movq %r12, R12(%rdi)
1084 xorl %eax, %eax
1085 movq %rbp, RBP(%rdi)
1086 movq %rbx, RBX(%rdi)
1087 movq (%rsp), %rcx
1088 movq %rax, R11(%rdi)
1089 movq %rax, R10(%rdi)
1090 movq %rax, R9(%rdi)
1091 movq %rax, R8(%rdi)
1092 movq %rax, RAX(%rdi)
1093 movq %rax, RCX(%rdi)
1094 movq %rax, RDX(%rdi)
1095 movq %rax, RSI(%rdi)
1096 movq %rax, RDI(%rdi)
1097 movq %rax, ORIG_RAX(%rdi)
1098 movq %rcx, RIP(%rdi)
1099 leaq 8(%rsp), %rcx
1100 movq $__KERNEL_CS, CS(%rdi)
1101 movq %rax, EFLAGS(%rdi)
1102 movq %rcx, RSP(%rdi)
1103 movq $__KERNEL_DS, SS(%rdi)
1104 jmpq *%rdx
1105 CFI_ENDPROC
1106ENDPROC(arch_unwind_init_running)
1107#endif
diff --git a/arch/x86_64/kernel/genapic_flat.c b/arch/x86_64/kernel/genapic_flat.c
index 1a2ab825be98..21c7066e236a 100644
--- a/arch/x86_64/kernel/genapic_flat.c
+++ b/arch/x86_64/kernel/genapic_flat.c
@@ -78,22 +78,29 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector)
78 78
79static void flat_send_IPI_allbutself(int vector) 79static void flat_send_IPI_allbutself(int vector)
80{ 80{
81#ifndef CONFIG_HOTPLUG_CPU 81#ifdef CONFIG_HOTPLUG_CPU
82 if (((num_online_cpus()) - 1) >= 1) 82 int hotplug = 1;
83 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
84#else 83#else
85 cpumask_t allbutme = cpu_online_map; 84 int hotplug = 0;
85#endif
86 if (hotplug || vector == NMI_VECTOR) {
87 cpumask_t allbutme = cpu_online_map;
86 88
87 cpu_clear(smp_processor_id(), allbutme); 89 cpu_clear(smp_processor_id(), allbutme);
88 90
89 if (!cpus_empty(allbutme)) 91 if (!cpus_empty(allbutme))
90 flat_send_IPI_mask(allbutme, vector); 92 flat_send_IPI_mask(allbutme, vector);
91#endif 93 } else if (num_online_cpus() > 1) {
94 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
95 }
92} 96}
93 97
94static void flat_send_IPI_all(int vector) 98static void flat_send_IPI_all(int vector)
95{ 99{
96 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); 100 if (vector == NMI_VECTOR)
101 flat_send_IPI_mask(cpu_online_map, vector);
102 else
103 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
97} 104}
98 105
99static int flat_apic_id_registered(void) 106static int flat_apic_id_registered(void)
@@ -108,10 +115,7 @@ static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask)
108 115
109static unsigned int phys_pkg_id(int index_msb) 116static unsigned int phys_pkg_id(int index_msb)
110{ 117{
111 u32 ebx; 118 return hard_smp_processor_id() >> index_msb;
112
113 ebx = cpuid_ebx(1);
114 return ((ebx >> 24) & 0xFF) >> index_msb;
115} 119}
116 120
117struct genapic apic_flat = { 121struct genapic apic_flat = {
diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c
index cea20a66c150..e6a71c9556d9 100644
--- a/arch/x86_64/kernel/head64.c
+++ b/arch/x86_64/kernel/head64.c
@@ -2,8 +2,6 @@
2 * linux/arch/x86_64/kernel/head64.c -- prepare to run common code 2 * linux/arch/x86_64/kernel/head64.c -- prepare to run common code
3 * 3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
6 * $Id: head64.c,v 1.22 2001/07/06 14:28:20 ak Exp $
7 */ 5 */
8 6
9#include <linux/init.h> 7#include <linux/init.h>
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index 5ecd34ab8c2b..86b2c1e197aa 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -44,11 +44,11 @@
44 BI(x,8) BI(x,9) BI(x,a) BI(x,b) \ 44 BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
45 BI(x,c) BI(x,d) BI(x,e) BI(x,f) 45 BI(x,c) BI(x,d) BI(x,e) BI(x,f)
46 46
47#define BUILD_14_IRQS(x) \ 47#define BUILD_15_IRQS(x) \
48 BI(x,0) BI(x,1) BI(x,2) BI(x,3) \ 48 BI(x,0) BI(x,1) BI(x,2) BI(x,3) \
49 BI(x,4) BI(x,5) BI(x,6) BI(x,7) \ 49 BI(x,4) BI(x,5) BI(x,6) BI(x,7) \
50 BI(x,8) BI(x,9) BI(x,a) BI(x,b) \ 50 BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
51 BI(x,c) BI(x,d) 51 BI(x,c) BI(x,d) BI(x,e)
52 52
53/* 53/*
54 * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: 54 * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
@@ -73,13 +73,13 @@ BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
73BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) 73BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd)
74 74
75#ifdef CONFIG_PCI_MSI 75#ifdef CONFIG_PCI_MSI
76 BUILD_14_IRQS(0xe) 76 BUILD_15_IRQS(0xe)
77#endif 77#endif
78 78
79#endif 79#endif
80 80
81#undef BUILD_16_IRQS 81#undef BUILD_16_IRQS
82#undef BUILD_14_IRQS 82#undef BUILD_15_IRQS
83#undef BI 83#undef BI
84 84
85 85
@@ -92,11 +92,11 @@ BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd)
92 IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \ 92 IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
93 IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f) 93 IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
94 94
95#define IRQLIST_14(x) \ 95#define IRQLIST_15(x) \
96 IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \ 96 IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \
97 IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \ 97 IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \
98 IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \ 98 IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
99 IRQ(x,c), IRQ(x,d) 99 IRQ(x,c), IRQ(x,d), IRQ(x,e)
100 100
101void (*interrupt[NR_IRQS])(void) = { 101void (*interrupt[NR_IRQS])(void) = {
102 IRQLIST_16(0x0), 102 IRQLIST_16(0x0),
@@ -108,7 +108,7 @@ void (*interrupt[NR_IRQS])(void) = {
108 IRQLIST_16(0xc), IRQLIST_16(0xd) 108 IRQLIST_16(0xc), IRQLIST_16(0xd)
109 109
110#ifdef CONFIG_PCI_MSI 110#ifdef CONFIG_PCI_MSI
111 , IRQLIST_14(0xe) 111 , IRQLIST_15(0xe)
112#endif 112#endif
113 113
114#endif 114#endif
@@ -278,7 +278,7 @@ static void mask_and_ack_8259A(unsigned int irq)
278 * Lightweight spurious IRQ detection. We do not want 278 * Lightweight spurious IRQ detection. We do not want
279 * to overdo spurious IRQ handling - it's usually a sign 279 * to overdo spurious IRQ handling - it's usually a sign
280 * of hardware problems, so we only do the checks we can 280 * of hardware problems, so we only do the checks we can
281 * do without slowing down good hardware unnecesserily. 281 * do without slowing down good hardware unnecessarily.
282 * 282 *
283 * Note that IRQ7 and IRQ15 (the two spurious IRQs 283 * Note that IRQ7 and IRQ15 (the two spurious IRQs
284 * usually resulting from the 8259A-1|2 PICs) occur 284 * usually resulting from the 8259A-1|2 PICs) occur
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 9cc7031b7151..c768d8a036d0 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -41,6 +41,7 @@
41#include <asm/mach_apic.h> 41#include <asm/mach_apic.h>
42#include <asm/acpi.h> 42#include <asm/acpi.h>
43#include <asm/dma.h> 43#include <asm/dma.h>
44#include <asm/nmi.h>
44 45
45#define __apicdebuginit __init 46#define __apicdebuginit __init
46 47
@@ -56,6 +57,7 @@ int timer_over_8254 __initdata = 0;
56static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 57static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
57 58
58static DEFINE_SPINLOCK(ioapic_lock); 59static DEFINE_SPINLOCK(ioapic_lock);
60static DEFINE_SPINLOCK(vector_lock);
59 61
60/* 62/*
61 * # of IRQ routing registers 63 * # of IRQ routing registers
@@ -317,7 +319,7 @@ void __init check_ioapic(void)
317 vendor &= 0xffff; 319 vendor &= 0xffff;
318 switch (vendor) { 320 switch (vendor) {
319 case PCI_VENDOR_ID_VIA: 321 case PCI_VENDOR_ID_VIA:
320#ifdef CONFIG_GART_IOMMU 322#ifdef CONFIG_IOMMU
321 if ((end_pfn > MAX_DMA32_PFN || 323 if ((end_pfn > MAX_DMA32_PFN ||
322 force_iommu) && 324 force_iommu) &&
323 !iommu_aperture_allowed) { 325 !iommu_aperture_allowed) {
@@ -834,10 +836,17 @@ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
834int assign_irq_vector(int irq) 836int assign_irq_vector(int irq)
835{ 837{
836 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0; 838 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
839 unsigned long flags;
840 int vector;
837 841
838 BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS); 842 BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
839 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) 843
844 spin_lock_irqsave(&vector_lock, flags);
845
846 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
847 spin_unlock_irqrestore(&vector_lock, flags);
840 return IO_APIC_VECTOR(irq); 848 return IO_APIC_VECTOR(irq);
849 }
841next: 850next:
842 current_vector += 8; 851 current_vector += 8;
843 if (current_vector == IA32_SYSCALL_VECTOR) 852 if (current_vector == IA32_SYSCALL_VECTOR)
@@ -849,11 +858,14 @@ next:
849 current_vector = FIRST_DEVICE_VECTOR + offset; 858 current_vector = FIRST_DEVICE_VECTOR + offset;
850 } 859 }
851 860
852 vector_irq[current_vector] = irq; 861 vector = current_vector;
862 vector_irq[vector] = irq;
853 if (irq != AUTO_ASSIGN) 863 if (irq != AUTO_ASSIGN)
854 IO_APIC_VECTOR(irq) = current_vector; 864 IO_APIC_VECTOR(irq) = vector;
865
866 spin_unlock_irqrestore(&vector_lock, flags);
855 867
856 return current_vector; 868 return vector;
857} 869}
858 870
859extern void (*interrupt[NR_IRQS])(void); 871extern void (*interrupt[NR_IRQS])(void);
@@ -866,21 +878,14 @@ static struct hw_interrupt_type ioapic_edge_type;
866 878
867static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger) 879static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
868{ 880{
869 if (use_pci_vector() && !platform_legacy_irq(irq)) { 881 unsigned idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
870 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 882
871 trigger == IOAPIC_LEVEL) 883 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
872 irq_desc[vector].handler = &ioapic_level_type; 884 trigger == IOAPIC_LEVEL)
873 else 885 irq_desc[idx].handler = &ioapic_level_type;
874 irq_desc[vector].handler = &ioapic_edge_type; 886 else
875 set_intr_gate(vector, interrupt[vector]); 887 irq_desc[idx].handler = &ioapic_edge_type;
876 } else { 888 set_intr_gate(vector, interrupt[idx]);
877 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
878 trigger == IOAPIC_LEVEL)
879 irq_desc[irq].handler = &ioapic_level_type;
880 else
881 irq_desc[irq].handler = &ioapic_edge_type;
882 set_intr_gate(vector, interrupt[irq]);
883 }
884} 889}
885 890
886static void __init setup_IO_APIC_irqs(void) 891static void __init setup_IO_APIC_irqs(void)
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index d8bd0b345b1e..3be0a7e4bf08 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -26,6 +26,30 @@ atomic_t irq_mis_count;
26#endif 26#endif
27#endif 27#endif
28 28
29#ifdef CONFIG_DEBUG_STACKOVERFLOW
30/*
31 * Probabilistic stack overflow check:
32 *
33 * Only check the stack in process context, because everything else
34 * runs on the big interrupt stacks. Checking reliably is too expensive,
35 * so we just check from interrupts.
36 */
37static inline void stack_overflow_check(struct pt_regs *regs)
38{
39 u64 curbase = (u64) current->thread_info;
40 static unsigned long warned = -60*HZ;
41
42 if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
43 regs->rsp < curbase + sizeof(struct thread_info) + 128 &&
44 time_after(jiffies, warned + 60*HZ)) {
45 printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n",
46 current->comm, curbase, regs->rsp);
47 show_stack(NULL,NULL);
48 warned = jiffies;
49 }
50}
51#endif
52
29/* 53/*
30 * Generic, controller-independent functions: 54 * Generic, controller-independent functions:
31 */ 55 */
@@ -39,7 +63,7 @@ int show_interrupts(struct seq_file *p, void *v)
39 if (i == 0) { 63 if (i == 0) {
40 seq_printf(p, " "); 64 seq_printf(p, " ");
41 for_each_online_cpu(j) 65 for_each_online_cpu(j)
42 seq_printf(p, "CPU%d ",j); 66 seq_printf(p, "CPU%-8d",j);
43 seq_putc(p, '\n'); 67 seq_putc(p, '\n');
44 } 68 }
45 69
@@ -91,12 +115,14 @@ skip:
91 */ 115 */
92asmlinkage unsigned int do_IRQ(struct pt_regs *regs) 116asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
93{ 117{
94 /* high bits used in ret_from_ code */ 118 /* high bit used in ret_from_ code */
95 unsigned irq = regs->orig_rax & 0xff; 119 unsigned irq = ~regs->orig_rax;
96 120
97 exit_idle(); 121 exit_idle();
98 irq_enter(); 122 irq_enter();
99 123#ifdef CONFIG_DEBUG_STACKOVERFLOW
124 stack_overflow_check(regs);
125#endif
100 __do_IRQ(irq, regs); 126 __do_IRQ(irq, regs);
101 irq_exit(); 127 irq_exit();
102 128
diff --git a/arch/x86_64/kernel/k8.c b/arch/x86_64/kernel/k8.c
new file mode 100644
index 000000000000..6416682d33d0
--- /dev/null
+++ b/arch/x86_64/kernel/k8.c
@@ -0,0 +1,118 @@
1/*
2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */
5#include <linux/gfp.h>
6#include <linux/types.h>
7#include <linux/init.h>
8#include <linux/errno.h>
9#include <linux/module.h>
10#include <linux/spinlock.h>
11#include <asm/k8.h>
12
13int num_k8_northbridges;
14EXPORT_SYMBOL(num_k8_northbridges);
15
16static u32 *flush_words;
17
18struct pci_device_id k8_nb_ids[] = {
19 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
21 {}
22};
23EXPORT_SYMBOL(k8_nb_ids);
24
25struct pci_dev **k8_northbridges;
26EXPORT_SYMBOL(k8_northbridges);
27
28static struct pci_dev *next_k8_northbridge(struct pci_dev *dev)
29{
30 do {
31 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
32 if (!dev)
33 break;
34 } while (!pci_match_id(&k8_nb_ids[0], dev));
35 return dev;
36}
37
38int cache_k8_northbridges(void)
39{
40 int i;
41 struct pci_dev *dev;
42 if (num_k8_northbridges)
43 return 0;
44
45 num_k8_northbridges = 0;
46 dev = NULL;
47 while ((dev = next_k8_northbridge(dev)) != NULL)
48 num_k8_northbridges++;
49
50 k8_northbridges = kmalloc((num_k8_northbridges + 1) * sizeof(void *),
51 GFP_KERNEL);
52 if (!k8_northbridges)
53 return -ENOMEM;
54
55 flush_words = kmalloc(num_k8_northbridges * sizeof(u32), GFP_KERNEL);
56 if (!flush_words) {
57 kfree(k8_northbridges);
58 return -ENOMEM;
59 }
60
61 dev = NULL;
62 i = 0;
63 while ((dev = next_k8_northbridge(dev)) != NULL) {
64 k8_northbridges[i++] = dev;
65 pci_read_config_dword(dev, 0x9c, &flush_words[i]);
66 }
67 k8_northbridges[i] = NULL;
68 return 0;
69}
70EXPORT_SYMBOL_GPL(cache_k8_northbridges);
71
72/* Ignores subdevice/subvendor but as far as I can figure out
73 they're useless anyways */
74int __init early_is_k8_nb(u32 device)
75{
76 struct pci_device_id *id;
77 u32 vendor = device & 0xffff;
78 device >>= 16;
79 for (id = k8_nb_ids; id->vendor; id++)
80 if (vendor == id->vendor && device == id->device)
81 return 1;
82 return 0;
83}
84
85void k8_flush_garts(void)
86{
87 int flushed, i;
88 unsigned long flags;
89 static DEFINE_SPINLOCK(gart_lock);
90
91 /* Avoid races between AGP and IOMMU. In theory it's not needed
92 but I'm not sure if the hardware won't lose flush requests
93 when another is pending. This whole thing is so expensive anyways
94 that it doesn't matter to serialize more. -AK */
95 spin_lock_irqsave(&gart_lock, flags);
96 flushed = 0;
97 for (i = 0; i < num_k8_northbridges; i++) {
98 pci_write_config_dword(k8_northbridges[i], 0x9c,
99 flush_words[i]|1);
100 flushed++;
101 }
102 for (i = 0; i < num_k8_northbridges; i++) {
103 u32 w;
104 /* Make sure the hardware actually executed the flush*/
105 for (;;) {
106 pci_read_config_dword(k8_northbridges[i],
107 0x9c, &w);
108 if (!(w & 1))
109 break;
110 cpu_relax();
111 }
112 }
113 spin_unlock_irqrestore(&gart_lock, flags);
114 if (!flushed)
115 printk("nothing to flush?\n");
116}
117EXPORT_SYMBOL_GPL(k8_flush_garts);
118
diff --git a/arch/x86_64/kernel/machine_kexec.c b/arch/x86_64/kernel/machine_kexec.c
index 25ac8a3faae6..83fb24a02821 100644
--- a/arch/x86_64/kernel/machine_kexec.c
+++ b/arch/x86_64/kernel/machine_kexec.c
@@ -149,8 +149,8 @@ typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page,
149 unsigned long start_address, 149 unsigned long start_address,
150 unsigned long pgtable) ATTRIB_NORET; 150 unsigned long pgtable) ATTRIB_NORET;
151 151
152const extern unsigned char relocate_new_kernel[]; 152extern const unsigned char relocate_new_kernel[];
153const extern unsigned long relocate_new_kernel_size; 153extern const unsigned long relocate_new_kernel_size;
154 154
155int machine_kexec_prepare(struct kimage *image) 155int machine_kexec_prepare(struct kimage *image)
156{ 156{
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index c69fc43cee7b..88845674c661 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -562,7 +562,7 @@ static struct sysdev_class mce_sysclass = {
562 set_kset_name("machinecheck"), 562 set_kset_name("machinecheck"),
563}; 563};
564 564
565static DEFINE_PER_CPU(struct sys_device, device_mce); 565DEFINE_PER_CPU(struct sys_device, device_mce);
566 566
567/* Why are there no generic functions for this? */ 567/* Why are there no generic functions for this? */
568#define ACCESSOR(name, var, start) \ 568#define ACCESSOR(name, var, start) \
@@ -629,7 +629,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
629#endif 629#endif
630 630
631/* Get notified when a cpu comes on/off. Be hotplug friendly. */ 631/* Get notified when a cpu comes on/off. Be hotplug friendly. */
632static int 632static __cpuinit int
633mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 633mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
634{ 634{
635 unsigned int cpu = (unsigned long)hcpu; 635 unsigned int cpu = (unsigned long)hcpu;
@@ -647,7 +647,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
647 return NOTIFY_OK; 647 return NOTIFY_OK;
648} 648}
649 649
650static struct notifier_block mce_cpu_notifier = { 650static struct notifier_block __cpuinitdata mce_cpu_notifier = {
651 .notifier_call = mce_cpu_callback, 651 .notifier_call = mce_cpu_callback,
652}; 652};
653 653
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index d13b241ad094..335200aa2737 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * (c) 2005 Advanced Micro Devices, Inc. 2 * (c) 2005, 2006 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the 3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or 4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html 5 * http://www.gnu.org/licenses/gpl.html
@@ -8,9 +8,10 @@
8 * 8 *
9 * Support : jacob.shin@amd.com 9 * Support : jacob.shin@amd.com
10 * 10 *
11 * MC4_MISC0 DRAM ECC Error Threshold available under AMD K8 Rev F. 11 * April 2006
12 * MC4_MISC0 exists per physical processor. 12 * - added support for AMD Family 0x10 processors
13 * 13 *
14 * All MC4_MISCi registers are shared between multi-cores
14 */ 15 */
15 16
16#include <linux/cpu.h> 17#include <linux/cpu.h>
@@ -29,32 +30,45 @@
29#include <asm/percpu.h> 30#include <asm/percpu.h>
30#include <asm/idle.h> 31#include <asm/idle.h>
31 32
32#define PFX "mce_threshold: " 33#define PFX "mce_threshold: "
33#define VERSION "version 1.00.9" 34#define VERSION "version 1.1.1"
34#define NR_BANKS 5 35#define NR_BANKS 6
35#define THRESHOLD_MAX 0xFFF 36#define NR_BLOCKS 9
36#define INT_TYPE_APIC 0x00020000 37#define THRESHOLD_MAX 0xFFF
37#define MASK_VALID_HI 0x80000000 38#define INT_TYPE_APIC 0x00020000
38#define MASK_LVTOFF_HI 0x00F00000 39#define MASK_VALID_HI 0x80000000
39#define MASK_COUNT_EN_HI 0x00080000 40#define MASK_LVTOFF_HI 0x00F00000
40#define MASK_INT_TYPE_HI 0x00060000 41#define MASK_COUNT_EN_HI 0x00080000
41#define MASK_OVERFLOW_HI 0x00010000 42#define MASK_INT_TYPE_HI 0x00060000
43#define MASK_OVERFLOW_HI 0x00010000
42#define MASK_ERR_COUNT_HI 0x00000FFF 44#define MASK_ERR_COUNT_HI 0x00000FFF
43#define MASK_OVERFLOW 0x0001000000000000L 45#define MASK_BLKPTR_LO 0xFF000000
46#define MCG_XBLK_ADDR 0xC0000400
44 47
45struct threshold_bank { 48struct threshold_block {
49 unsigned int block;
50 unsigned int bank;
46 unsigned int cpu; 51 unsigned int cpu;
47 u8 bank; 52 u32 address;
48 u8 interrupt_enable; 53 u16 interrupt_enable;
49 u16 threshold_limit; 54 u16 threshold_limit;
50 struct kobject kobj; 55 struct kobject kobj;
56 struct list_head miscj;
51}; 57};
52 58
53static struct threshold_bank threshold_defaults = { 59/* defaults used early on boot */
60static struct threshold_block threshold_defaults = {
54 .interrupt_enable = 0, 61 .interrupt_enable = 0,
55 .threshold_limit = THRESHOLD_MAX, 62 .threshold_limit = THRESHOLD_MAX,
56}; 63};
57 64
65struct threshold_bank {
66 struct kobject kobj;
67 struct threshold_block *blocks;
68 cpumask_t cpus;
69};
70static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
71
58#ifdef CONFIG_SMP 72#ifdef CONFIG_SMP
59static unsigned char shared_bank[NR_BANKS] = { 73static unsigned char shared_bank[NR_BANKS] = {
60 0, 0, 0, 0, 1 74 0, 0, 0, 0, 1
@@ -68,12 +82,12 @@ static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
68 */ 82 */
69 83
70/* must be called with correct cpu affinity */ 84/* must be called with correct cpu affinity */
71static void threshold_restart_bank(struct threshold_bank *b, 85static void threshold_restart_bank(struct threshold_block *b,
72 int reset, u16 old_limit) 86 int reset, u16 old_limit)
73{ 87{
74 u32 mci_misc_hi, mci_misc_lo; 88 u32 mci_misc_hi, mci_misc_lo;
75 89
76 rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi); 90 rdmsr(b->address, mci_misc_lo, mci_misc_hi);
77 91
78 if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) 92 if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
79 reset = 1; /* limit cannot be lower than err count */ 93 reset = 1; /* limit cannot be lower than err count */
@@ -94,35 +108,57 @@ static void threshold_restart_bank(struct threshold_bank *b,
94 (mci_misc_hi &= ~MASK_INT_TYPE_HI); 108 (mci_misc_hi &= ~MASK_INT_TYPE_HI);
95 109
96 mci_misc_hi |= MASK_COUNT_EN_HI; 110 mci_misc_hi |= MASK_COUNT_EN_HI;
97 wrmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi); 111 wrmsr(b->address, mci_misc_lo, mci_misc_hi);
98} 112}
99 113
114/* cpu init entry point, called from mce.c with preempt off */
100void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) 115void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
101{ 116{
102 int bank; 117 unsigned int bank, block;
103 u32 mci_misc_lo, mci_misc_hi;
104 unsigned int cpu = smp_processor_id(); 118 unsigned int cpu = smp_processor_id();
119 u32 low = 0, high = 0, address = 0;
105 120
106 for (bank = 0; bank < NR_BANKS; ++bank) { 121 for (bank = 0; bank < NR_BANKS; ++bank) {
107 rdmsr(MSR_IA32_MC0_MISC + bank * 4, mci_misc_lo, mci_misc_hi); 122 for (block = 0; block < NR_BLOCKS; ++block) {
123 if (block == 0)
124 address = MSR_IA32_MC0_MISC + bank * 4;
125 else if (block == 1)
126 address = MCG_XBLK_ADDR
127 + ((low & MASK_BLKPTR_LO) >> 21);
128 else
129 ++address;
130
131 if (rdmsr_safe(address, &low, &high))
132 continue;
108 133
109 /* !valid, !counter present, bios locked */ 134 if (!(high & MASK_VALID_HI)) {
110 if (!(mci_misc_hi & MASK_VALID_HI) || 135 if (block)
111 !(mci_misc_hi & MASK_VALID_HI >> 1) || 136 continue;
112 (mci_misc_hi & MASK_VALID_HI >> 2)) 137 else
113 continue; 138 break;
139 }
114 140
115 per_cpu(bank_map, cpu) |= (1 << bank); 141 if (!(high & MASK_VALID_HI >> 1) ||
142 (high & MASK_VALID_HI >> 2))
143 continue;
116 144
145 if (!block)
146 per_cpu(bank_map, cpu) |= (1 << bank);
117#ifdef CONFIG_SMP 147#ifdef CONFIG_SMP
118 if (shared_bank[bank] && cpu_core_id[cpu]) 148 if (shared_bank[bank] && c->cpu_core_id)
119 continue; 149 break;
120#endif 150#endif
151 high &= ~MASK_LVTOFF_HI;
152 high |= K8_APIC_EXT_LVT_ENTRY_THRESHOLD << 20;
153 wrmsr(address, low, high);
121 154
122 setup_threshold_lvt((mci_misc_hi & MASK_LVTOFF_HI) >> 20); 155 setup_APIC_extened_lvt(K8_APIC_EXT_LVT_ENTRY_THRESHOLD,
123 threshold_defaults.cpu = cpu; 156 THRESHOLD_APIC_VECTOR,
124 threshold_defaults.bank = bank; 157 K8_APIC_EXT_INT_MSG_FIX, 0);
125 threshold_restart_bank(&threshold_defaults, 0, 0); 158
159 threshold_defaults.address = address;
160 threshold_restart_bank(&threshold_defaults, 0, 0);
161 }
126 } 162 }
127} 163}
128 164
@@ -137,8 +173,9 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
137 */ 173 */
138asmlinkage void mce_threshold_interrupt(void) 174asmlinkage void mce_threshold_interrupt(void)
139{ 175{
140 int bank; 176 unsigned int bank, block;
141 struct mce m; 177 struct mce m;
178 u32 low = 0, high = 0, address = 0;
142 179
143 ack_APIC_irq(); 180 ack_APIC_irq();
144 exit_idle(); 181 exit_idle();
@@ -150,15 +187,42 @@ asmlinkage void mce_threshold_interrupt(void)
150 187
151 /* assume first bank caused it */ 188 /* assume first bank caused it */
152 for (bank = 0; bank < NR_BANKS; ++bank) { 189 for (bank = 0; bank < NR_BANKS; ++bank) {
153 m.bank = MCE_THRESHOLD_BASE + bank; 190 for (block = 0; block < NR_BLOCKS; ++block) {
154 rdmsrl(MSR_IA32_MC0_MISC + bank * 4, m.misc); 191 if (block == 0)
192 address = MSR_IA32_MC0_MISC + bank * 4;
193 else if (block == 1)
194 address = MCG_XBLK_ADDR
195 + ((low & MASK_BLKPTR_LO) >> 21);
196 else
197 ++address;
198
199 if (rdmsr_safe(address, &low, &high))
200 continue;
155 201
156 if (m.misc & MASK_OVERFLOW) { 202 if (!(high & MASK_VALID_HI)) {
157 mce_log(&m); 203 if (block)
158 goto out; 204 continue;
205 else
206 break;
207 }
208
209 if (!(high & MASK_VALID_HI >> 1) ||
210 (high & MASK_VALID_HI >> 2))
211 continue;
212
213 if (high & MASK_OVERFLOW_HI) {
214 rdmsrl(address, m.misc);
215 rdmsrl(MSR_IA32_MC0_STATUS + bank * 4,
216 m.status);
217 m.bank = K8_MCE_THRESHOLD_BASE
218 + bank * NR_BLOCKS
219 + block;
220 mce_log(&m);
221 goto out;
222 }
159 } 223 }
160 } 224 }
161 out: 225out:
162 irq_exit(); 226 irq_exit();
163} 227}
164 228
@@ -166,20 +230,12 @@ asmlinkage void mce_threshold_interrupt(void)
166 * Sysfs Interface 230 * Sysfs Interface
167 */ 231 */
168 232
169static struct sysdev_class threshold_sysclass = {
170 set_kset_name("threshold"),
171};
172
173static DEFINE_PER_CPU(struct sys_device, device_threshold);
174
175struct threshold_attr { 233struct threshold_attr {
176 struct attribute attr; 234 struct attribute attr;
177 ssize_t(*show) (struct threshold_bank *, char *); 235 ssize_t(*show) (struct threshold_block *, char *);
178 ssize_t(*store) (struct threshold_bank *, const char *, size_t count); 236 ssize_t(*store) (struct threshold_block *, const char *, size_t count);
179}; 237};
180 238
181static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
182
183static cpumask_t affinity_set(unsigned int cpu) 239static cpumask_t affinity_set(unsigned int cpu)
184{ 240{
185 cpumask_t oldmask = current->cpus_allowed; 241 cpumask_t oldmask = current->cpus_allowed;
@@ -194,15 +250,15 @@ static void affinity_restore(cpumask_t oldmask)
194 set_cpus_allowed(current, oldmask); 250 set_cpus_allowed(current, oldmask);
195} 251}
196 252
197#define SHOW_FIELDS(name) \ 253#define SHOW_FIELDS(name) \
198 static ssize_t show_ ## name(struct threshold_bank * b, char *buf) \ 254static ssize_t show_ ## name(struct threshold_block * b, char *buf) \
199 { \ 255{ \
200 return sprintf(buf, "%lx\n", (unsigned long) b->name); \ 256 return sprintf(buf, "%lx\n", (unsigned long) b->name); \
201 } 257}
202SHOW_FIELDS(interrupt_enable) 258SHOW_FIELDS(interrupt_enable)
203SHOW_FIELDS(threshold_limit) 259SHOW_FIELDS(threshold_limit)
204 260
205static ssize_t store_interrupt_enable(struct threshold_bank *b, 261static ssize_t store_interrupt_enable(struct threshold_block *b,
206 const char *buf, size_t count) 262 const char *buf, size_t count)
207{ 263{
208 char *end; 264 char *end;
@@ -219,7 +275,7 @@ static ssize_t store_interrupt_enable(struct threshold_bank *b,
219 return end - buf; 275 return end - buf;
220} 276}
221 277
222static ssize_t store_threshold_limit(struct threshold_bank *b, 278static ssize_t store_threshold_limit(struct threshold_block *b,
223 const char *buf, size_t count) 279 const char *buf, size_t count)
224{ 280{
225 char *end; 281 char *end;
@@ -242,18 +298,18 @@ static ssize_t store_threshold_limit(struct threshold_bank *b,
242 return end - buf; 298 return end - buf;
243} 299}
244 300
245static ssize_t show_error_count(struct threshold_bank *b, char *buf) 301static ssize_t show_error_count(struct threshold_block *b, char *buf)
246{ 302{
247 u32 high, low; 303 u32 high, low;
248 cpumask_t oldmask; 304 cpumask_t oldmask;
249 oldmask = affinity_set(b->cpu); 305 oldmask = affinity_set(b->cpu);
250 rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, low, high); /* ignore low 32 */ 306 rdmsr(b->address, low, high);
251 affinity_restore(oldmask); 307 affinity_restore(oldmask);
252 return sprintf(buf, "%x\n", 308 return sprintf(buf, "%x\n",
253 (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit)); 309 (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit));
254} 310}
255 311
256static ssize_t store_error_count(struct threshold_bank *b, 312static ssize_t store_error_count(struct threshold_block *b,
257 const char *buf, size_t count) 313 const char *buf, size_t count)
258{ 314{
259 cpumask_t oldmask; 315 cpumask_t oldmask;
@@ -269,13 +325,13 @@ static ssize_t store_error_count(struct threshold_bank *b,
269 .store = _store, \ 325 .store = _store, \
270}; 326};
271 327
272#define ATTR_FIELDS(name) \ 328#define RW_ATTR(name) \
273 static struct threshold_attr name = \ 329static struct threshold_attr name = \
274 THRESHOLD_ATTR(name, 0644, show_## name, store_## name) 330 THRESHOLD_ATTR(name, 0644, show_## name, store_## name)
275 331
276ATTR_FIELDS(interrupt_enable); 332RW_ATTR(interrupt_enable);
277ATTR_FIELDS(threshold_limit); 333RW_ATTR(threshold_limit);
278ATTR_FIELDS(error_count); 334RW_ATTR(error_count);
279 335
280static struct attribute *default_attrs[] = { 336static struct attribute *default_attrs[] = {
281 &interrupt_enable.attr, 337 &interrupt_enable.attr,
@@ -284,12 +340,12 @@ static struct attribute *default_attrs[] = {
284 NULL 340 NULL
285}; 341};
286 342
287#define to_bank(k) container_of(k,struct threshold_bank,kobj) 343#define to_block(k) container_of(k, struct threshold_block, kobj)
288#define to_attr(a) container_of(a,struct threshold_attr,attr) 344#define to_attr(a) container_of(a, struct threshold_attr, attr)
289 345
290static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) 346static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
291{ 347{
292 struct threshold_bank *b = to_bank(kobj); 348 struct threshold_block *b = to_block(kobj);
293 struct threshold_attr *a = to_attr(attr); 349 struct threshold_attr *a = to_attr(attr);
294 ssize_t ret; 350 ssize_t ret;
295 ret = a->show ? a->show(b, buf) : -EIO; 351 ret = a->show ? a->show(b, buf) : -EIO;
@@ -299,7 +355,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
299static ssize_t store(struct kobject *kobj, struct attribute *attr, 355static ssize_t store(struct kobject *kobj, struct attribute *attr,
300 const char *buf, size_t count) 356 const char *buf, size_t count)
301{ 357{
302 struct threshold_bank *b = to_bank(kobj); 358 struct threshold_block *b = to_block(kobj);
303 struct threshold_attr *a = to_attr(attr); 359 struct threshold_attr *a = to_attr(attr);
304 ssize_t ret; 360 ssize_t ret;
305 ret = a->store ? a->store(b, buf, count) : -EIO; 361 ret = a->store ? a->store(b, buf, count) : -EIO;
@@ -316,69 +372,174 @@ static struct kobj_type threshold_ktype = {
316 .default_attrs = default_attrs, 372 .default_attrs = default_attrs,
317}; 373};
318 374
375static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
376 unsigned int bank,
377 unsigned int block,
378 u32 address)
379{
380 int err;
381 u32 low, high;
382 struct threshold_block *b = NULL;
383
384 if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
385 return 0;
386
387 if (rdmsr_safe(address, &low, &high))
388 goto recurse;
389
390 if (!(high & MASK_VALID_HI)) {
391 if (block)
392 goto recurse;
393 else
394 return 0;
395 }
396
397 if (!(high & MASK_VALID_HI >> 1) ||
398 (high & MASK_VALID_HI >> 2))
399 goto recurse;
400
401 b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
402 if (!b)
403 return -ENOMEM;
404 memset(b, 0, sizeof(struct threshold_block));
405
406 b->block = block;
407 b->bank = bank;
408 b->cpu = cpu;
409 b->address = address;
410 b->interrupt_enable = 0;
411 b->threshold_limit = THRESHOLD_MAX;
412
413 INIT_LIST_HEAD(&b->miscj);
414
415 if (per_cpu(threshold_banks, cpu)[bank]->blocks)
416 list_add(&b->miscj,
417 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
418 else
419 per_cpu(threshold_banks, cpu)[bank]->blocks = b;
420
421 kobject_set_name(&b->kobj, "misc%i", block);
422 b->kobj.parent = &per_cpu(threshold_banks, cpu)[bank]->kobj;
423 b->kobj.ktype = &threshold_ktype;
424 err = kobject_register(&b->kobj);
425 if (err)
426 goto out_free;
427recurse:
428 if (!block) {
429 address = (low & MASK_BLKPTR_LO) >> 21;
430 if (!address)
431 return 0;
432 address += MCG_XBLK_ADDR;
433 } else
434 ++address;
435
436 err = allocate_threshold_blocks(cpu, bank, ++block, address);
437 if (err)
438 goto out_free;
439
440 return err;
441
442out_free:
443 if (b) {
444 kobject_unregister(&b->kobj);
445 kfree(b);
446 }
447 return err;
448}
449
319/* symlinks sibling shared banks to first core. first core owns dir/files. */ 450/* symlinks sibling shared banks to first core. first core owns dir/files. */
320static __cpuinit int threshold_create_bank(unsigned int cpu, int bank) 451static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
321{ 452{
322 int err = 0; 453 int i, err = 0;
323 struct threshold_bank *b = NULL; 454 struct threshold_bank *b = NULL;
455 cpumask_t oldmask = CPU_MASK_NONE;
456 char name[32];
457
458 sprintf(name, "threshold_bank%i", bank);
324 459
325#ifdef CONFIG_SMP 460#ifdef CONFIG_SMP
326 if (cpu_core_id[cpu] && shared_bank[bank]) { /* symlink */ 461 if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */
327 char name[16]; 462 i = first_cpu(cpu_core_map[cpu]);
328 unsigned lcpu = first_cpu(cpu_core_map[cpu]); 463
329 if (cpu_core_id[lcpu]) 464 /* first core not up yet */
330 goto out; /* first core not up yet */ 465 if (cpu_data[i].cpu_core_id)
466 goto out;
467
468 /* already linked */
469 if (per_cpu(threshold_banks, cpu)[bank])
470 goto out;
471
472 b = per_cpu(threshold_banks, i)[bank];
331 473
332 b = per_cpu(threshold_banks, lcpu)[bank];
333 if (!b) 474 if (!b)
334 goto out; 475 goto out;
335 sprintf(name, "bank%i", bank); 476
336 err = sysfs_create_link(&per_cpu(device_threshold, cpu).kobj, 477 err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj,
337 &b->kobj, name); 478 &b->kobj, name);
338 if (err) 479 if (err)
339 goto out; 480 goto out;
481
482 b->cpus = cpu_core_map[cpu];
340 per_cpu(threshold_banks, cpu)[bank] = b; 483 per_cpu(threshold_banks, cpu)[bank] = b;
341 goto out; 484 goto out;
342 } 485 }
343#endif 486#endif
344 487
345 b = kmalloc(sizeof(struct threshold_bank), GFP_KERNEL); 488 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
346 if (!b) { 489 if (!b) {
347 err = -ENOMEM; 490 err = -ENOMEM;
348 goto out; 491 goto out;
349 } 492 }
350 memset(b, 0, sizeof(struct threshold_bank)); 493 memset(b, 0, sizeof(struct threshold_bank));
351 494
352 b->cpu = cpu; 495 kobject_set_name(&b->kobj, "threshold_bank%i", bank);
353 b->bank = bank; 496 b->kobj.parent = &per_cpu(device_mce, cpu).kobj;
354 b->interrupt_enable = 0; 497#ifndef CONFIG_SMP
355 b->threshold_limit = THRESHOLD_MAX; 498 b->cpus = CPU_MASK_ALL;
356 kobject_set_name(&b->kobj, "bank%i", bank); 499#else
357 b->kobj.parent = &per_cpu(device_threshold, cpu).kobj; 500 b->cpus = cpu_core_map[cpu];
358 b->kobj.ktype = &threshold_ktype; 501#endif
359
360 err = kobject_register(&b->kobj); 502 err = kobject_register(&b->kobj);
361 if (err) { 503 if (err)
362 kfree(b); 504 goto out_free;
363 goto out; 505
364 }
365 per_cpu(threshold_banks, cpu)[bank] = b; 506 per_cpu(threshold_banks, cpu)[bank] = b;
366 out: 507
508 oldmask = affinity_set(cpu);
509 err = allocate_threshold_blocks(cpu, bank, 0,
510 MSR_IA32_MC0_MISC + bank * 4);
511 affinity_restore(oldmask);
512
513 if (err)
514 goto out_free;
515
516 for_each_cpu_mask(i, b->cpus) {
517 if (i == cpu)
518 continue;
519
520 err = sysfs_create_link(&per_cpu(device_mce, i).kobj,
521 &b->kobj, name);
522 if (err)
523 goto out;
524
525 per_cpu(threshold_banks, i)[bank] = b;
526 }
527
528 goto out;
529
530out_free:
531 per_cpu(threshold_banks, cpu)[bank] = NULL;
532 kfree(b);
533out:
367 return err; 534 return err;
368} 535}
369 536
370/* create dir/files for all valid threshold banks */ 537/* create dir/files for all valid threshold banks */
371static __cpuinit int threshold_create_device(unsigned int cpu) 538static __cpuinit int threshold_create_device(unsigned int cpu)
372{ 539{
373 int bank; 540 unsigned int bank;
374 int err = 0; 541 int err = 0;
375 542
376 per_cpu(device_threshold, cpu).id = cpu;
377 per_cpu(device_threshold, cpu).cls = &threshold_sysclass;
378 err = sysdev_register(&per_cpu(device_threshold, cpu));
379 if (err)
380 goto out;
381
382 for (bank = 0; bank < NR_BANKS; ++bank) { 543 for (bank = 0; bank < NR_BANKS; ++bank) {
383 if (!(per_cpu(bank_map, cpu) & 1 << bank)) 544 if (!(per_cpu(bank_map, cpu) & 1 << bank))
384 continue; 545 continue;
@@ -386,7 +547,7 @@ static __cpuinit int threshold_create_device(unsigned int cpu)
386 if (err) 547 if (err)
387 goto out; 548 goto out;
388 } 549 }
389 out: 550out:
390 return err; 551 return err;
391} 552}
392 553
@@ -397,92 +558,85 @@ static __cpuinit int threshold_create_device(unsigned int cpu)
397 * of shared sysfs dir/files, and rest of the cores will be symlinked to it. 558 * of shared sysfs dir/files, and rest of the cores will be symlinked to it.
398 */ 559 */
399 560
400/* cpu hotplug call removes all symlinks before first core dies */ 561static __cpuinit void deallocate_threshold_block(unsigned int cpu,
562 unsigned int bank)
563{
564 struct threshold_block *pos = NULL;
565 struct threshold_block *tmp = NULL;
566 struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
567
568 if (!head)
569 return;
570
571 list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
572 kobject_unregister(&pos->kobj);
573 list_del(&pos->miscj);
574 kfree(pos);
575 }
576
577 kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
578 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
579}
580
401static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank) 581static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank)
402{ 582{
583 int i = 0;
403 struct threshold_bank *b; 584 struct threshold_bank *b;
404 char name[16]; 585 char name[32];
405 586
406 b = per_cpu(threshold_banks, cpu)[bank]; 587 b = per_cpu(threshold_banks, cpu)[bank];
588
407 if (!b) 589 if (!b)
408 return; 590 return;
409 if (shared_bank[bank] && atomic_read(&b->kobj.kref.refcount) > 2) { 591
410 sprintf(name, "bank%i", bank); 592 if (!b->blocks)
411 sysfs_remove_link(&per_cpu(device_threshold, cpu).kobj, name); 593 goto free_out;
412 per_cpu(threshold_banks, cpu)[bank] = NULL; 594
413 } else { 595 sprintf(name, "threshold_bank%i", bank);
414 kobject_unregister(&b->kobj); 596
415 kfree(per_cpu(threshold_banks, cpu)[bank]); 597 /* sibling symlink */
598 if (shared_bank[bank] && b->blocks->cpu != cpu) {
599 sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
600 per_cpu(threshold_banks, i)[bank] = NULL;
601 return;
602 }
603
604 /* remove all sibling symlinks before unregistering */
605 for_each_cpu_mask(i, b->cpus) {
606 if (i == cpu)
607 continue;
608
609 sysfs_remove_link(&per_cpu(device_mce, i).kobj, name);
610 per_cpu(threshold_banks, i)[bank] = NULL;
416 } 611 }
612
613 deallocate_threshold_block(cpu, bank);
614
615free_out:
616 kobject_unregister(&b->kobj);
617 kfree(b);
618 per_cpu(threshold_banks, cpu)[bank] = NULL;
417} 619}
418 620
419static __cpuinit void threshold_remove_device(unsigned int cpu) 621static __cpuinit void threshold_remove_device(unsigned int cpu)
420{ 622{
421 int bank; 623 unsigned int bank;
422 624
423 for (bank = 0; bank < NR_BANKS; ++bank) { 625 for (bank = 0; bank < NR_BANKS; ++bank) {
424 if (!(per_cpu(bank_map, cpu) & 1 << bank)) 626 if (!(per_cpu(bank_map, cpu) & 1 << bank))
425 continue; 627 continue;
426 threshold_remove_bank(cpu, bank); 628 threshold_remove_bank(cpu, bank);
427 } 629 }
428 sysdev_unregister(&per_cpu(device_threshold, cpu));
429} 630}
430 631
431/* link all existing siblings when first core comes up */
432static __cpuinit int threshold_create_symlinks(unsigned int cpu)
433{
434 int bank, err = 0;
435 unsigned int lcpu = 0;
436
437 if (cpu_core_id[cpu])
438 return 0;
439 for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
440 if (lcpu == cpu)
441 continue;
442 for (bank = 0; bank < NR_BANKS; ++bank) {
443 if (!(per_cpu(bank_map, cpu) & 1 << bank))
444 continue;
445 if (!shared_bank[bank])
446 continue;
447 err = threshold_create_bank(lcpu, bank);
448 }
449 }
450 return err;
451}
452
453/* remove all symlinks before first core dies. */
454static __cpuinit void threshold_remove_symlinks(unsigned int cpu)
455{
456 int bank;
457 unsigned int lcpu = 0;
458 if (cpu_core_id[cpu])
459 return;
460 for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
461 if (lcpu == cpu)
462 continue;
463 for (bank = 0; bank < NR_BANKS; ++bank) {
464 if (!(per_cpu(bank_map, cpu) & 1 << bank))
465 continue;
466 if (!shared_bank[bank])
467 continue;
468 threshold_remove_bank(lcpu, bank);
469 }
470 }
471}
472#else /* !CONFIG_HOTPLUG_CPU */ 632#else /* !CONFIG_HOTPLUG_CPU */
473static __cpuinit void threshold_create_symlinks(unsigned int cpu)
474{
475}
476static __cpuinit void threshold_remove_symlinks(unsigned int cpu)
477{
478}
479static void threshold_remove_device(unsigned int cpu) 633static void threshold_remove_device(unsigned int cpu)
480{ 634{
481} 635}
482#endif 636#endif
483 637
484/* get notified when a cpu comes on/off */ 638/* get notified when a cpu comes on/off */
485static int threshold_cpu_callback(struct notifier_block *nfb, 639static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb,
486 unsigned long action, void *hcpu) 640 unsigned long action, void *hcpu)
487{ 641{
488 /* cpu was unsigned int to begin with */ 642 /* cpu was unsigned int to begin with */
@@ -494,13 +648,6 @@ static int threshold_cpu_callback(struct notifier_block *nfb,
494 switch (action) { 648 switch (action) {
495 case CPU_ONLINE: 649 case CPU_ONLINE:
496 threshold_create_device(cpu); 650 threshold_create_device(cpu);
497 threshold_create_symlinks(cpu);
498 break;
499 case CPU_DOWN_PREPARE:
500 threshold_remove_symlinks(cpu);
501 break;
502 case CPU_DOWN_FAILED:
503 threshold_create_symlinks(cpu);
504 break; 651 break;
505 case CPU_DEAD: 652 case CPU_DEAD:
506 threshold_remove_device(cpu); 653 threshold_remove_device(cpu);
@@ -512,29 +659,22 @@ static int threshold_cpu_callback(struct notifier_block *nfb,
512 return NOTIFY_OK; 659 return NOTIFY_OK;
513} 660}
514 661
515static struct notifier_block threshold_cpu_notifier = { 662static struct notifier_block threshold_cpu_notifier __cpuinitdata = {
516 .notifier_call = threshold_cpu_callback, 663 .notifier_call = threshold_cpu_callback,
517}; 664};
518 665
519static __init int threshold_init_device(void) 666static __init int threshold_init_device(void)
520{ 667{
521 int err; 668 unsigned lcpu = 0;
522 int lcpu = 0;
523
524 err = sysdev_class_register(&threshold_sysclass);
525 if (err)
526 goto out;
527 669
528 /* to hit CPUs online before the notifier is up */ 670 /* to hit CPUs online before the notifier is up */
529 for_each_online_cpu(lcpu) { 671 for_each_online_cpu(lcpu) {
530 err = threshold_create_device(lcpu); 672 int err = threshold_create_device(lcpu);
531 if (err) 673 if (err)
532 goto out; 674 return err;
533 } 675 }
534 register_cpu_notifier(&threshold_cpu_notifier); 676 register_cpu_notifier(&threshold_cpu_notifier);
535 677 return 0;
536 out:
537 return err;
538} 678}
539 679
540device_initcall(threshold_init_device); 680device_initcall(threshold_init_device);
diff --git a/arch/x86_64/kernel/module.c b/arch/x86_64/kernel/module.c
index bac195c74bcc..9d0958ff547f 100644
--- a/arch/x86_64/kernel/module.c
+++ b/arch/x86_64/kernel/module.c
@@ -145,26 +145,38 @@ int apply_relocate(Elf_Shdr *sechdrs,
145 return -ENOSYS; 145 return -ENOSYS;
146} 146}
147 147
148extern void apply_alternatives(void *start, void *end);
149
150int module_finalize(const Elf_Ehdr *hdr, 148int module_finalize(const Elf_Ehdr *hdr,
151 const Elf_Shdr *sechdrs, 149 const Elf_Shdr *sechdrs,
152 struct module *me) 150 struct module *me)
153{ 151{
154 const Elf_Shdr *s; 152 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL;
155 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 153 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
156 154
157 /* look for .altinstructions to patch */ 155 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
158 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 156 if (!strcmp(".text", secstrings + s->sh_name))
159 void *seg; 157 text = s;
160 if (strcmp(".altinstructions", secstrings + s->sh_name)) 158 if (!strcmp(".altinstructions", secstrings + s->sh_name))
161 continue; 159 alt = s;
162 seg = (void *)s->sh_addr; 160 if (!strcmp(".smp_locks", secstrings + s->sh_name))
163 apply_alternatives(seg, seg + s->sh_size); 161 locks= s;
164 } 162 }
163
164 if (alt) {
165 /* patch .altinstructions */
166 void *aseg = (void *)alt->sh_addr;
167 apply_alternatives(aseg, aseg + alt->sh_size);
168 }
169 if (locks && text) {
170 void *lseg = (void *)locks->sh_addr;
171 void *tseg = (void *)text->sh_addr;
172 alternatives_smp_module_add(me, me->name,
173 lseg, lseg + locks->sh_size,
174 tseg, tseg + text->sh_size);
175 }
165 return 0; 176 return 0;
166} 177}
167 178
168void module_arch_cleanup(struct module *mod) 179void module_arch_cleanup(struct module *mod)
169{ 180{
181 alternatives_smp_module_del(mod);
170} 182}
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 4e6357fe0ec3..399489c93132 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -15,11 +15,7 @@
15#include <linux/config.h> 15#include <linux/config.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/bootmem.h>
19#include <linux/smp_lock.h>
20#include <linux/interrupt.h> 18#include <linux/interrupt.h>
21#include <linux/mc146818rtc.h>
22#include <linux/kernel_stat.h>
23#include <linux/module.h> 19#include <linux/module.h>
24#include <linux/sysdev.h> 20#include <linux/sysdev.h>
25#include <linux/nmi.h> 21#include <linux/nmi.h>
@@ -27,14 +23,11 @@
27#include <linux/kprobes.h> 23#include <linux/kprobes.h>
28 24
29#include <asm/smp.h> 25#include <asm/smp.h>
30#include <asm/mtrr.h>
31#include <asm/mpspec.h>
32#include <asm/nmi.h> 26#include <asm/nmi.h>
33#include <asm/msr.h>
34#include <asm/proto.h> 27#include <asm/proto.h>
35#include <asm/kdebug.h> 28#include <asm/kdebug.h>
36#include <asm/local.h>
37#include <asm/mce.h> 29#include <asm/mce.h>
30#include <asm/intel_arch_perfmon.h>
38 31
39/* 32/*
40 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: 33 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
@@ -74,6 +67,9 @@ static unsigned int nmi_p4_cccr_val;
74#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 67#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
75#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 68#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
76 69
70#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
71#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
72
77#define MSR_P4_MISC_ENABLE 0x1A0 73#define MSR_P4_MISC_ENABLE 0x1A0
78#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) 74#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
79#define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12) 75#define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12)
@@ -105,7 +101,10 @@ static __cpuinit inline int nmi_known_cpu(void)
105 case X86_VENDOR_AMD: 101 case X86_VENDOR_AMD:
106 return boot_cpu_data.x86 == 15; 102 return boot_cpu_data.x86 == 15;
107 case X86_VENDOR_INTEL: 103 case X86_VENDOR_INTEL:
108 return boot_cpu_data.x86 == 15; 104 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
105 return 1;
106 else
107 return (boot_cpu_data.x86 == 15);
109 } 108 }
110 return 0; 109 return 0;
111} 110}
@@ -211,6 +210,8 @@ int __init setup_nmi_watchdog(char *str)
211 210
212__setup("nmi_watchdog=", setup_nmi_watchdog); 211__setup("nmi_watchdog=", setup_nmi_watchdog);
213 212
213static void disable_intel_arch_watchdog(void);
214
214static void disable_lapic_nmi_watchdog(void) 215static void disable_lapic_nmi_watchdog(void)
215{ 216{
216 if (nmi_active <= 0) 217 if (nmi_active <= 0)
@@ -223,6 +224,8 @@ static void disable_lapic_nmi_watchdog(void)
223 if (boot_cpu_data.x86 == 15) { 224 if (boot_cpu_data.x86 == 15) {
224 wrmsr(MSR_P4_IQ_CCCR0, 0, 0); 225 wrmsr(MSR_P4_IQ_CCCR0, 0, 0);
225 wrmsr(MSR_P4_CRU_ESCR0, 0, 0); 226 wrmsr(MSR_P4_CRU_ESCR0, 0, 0);
227 } else if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
228 disable_intel_arch_watchdog();
226 } 229 }
227 break; 230 break;
228 } 231 }
@@ -375,6 +378,53 @@ static void setup_k7_watchdog(void)
375 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); 378 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
376} 379}
377 380
381static void disable_intel_arch_watchdog(void)
382{
383 unsigned ebx;
384
385 /*
386 * Check whether the Architectural PerfMon supports
387 * Unhalted Core Cycles Event or not.
388 * NOTE: Corresponding bit = 0 in ebp indicates event present.
389 */
390 ebx = cpuid_ebx(10);
391 if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
392 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
393}
394
395static int setup_intel_arch_watchdog(void)
396{
397 unsigned int evntsel;
398 unsigned ebx;
399
400 /*
401 * Check whether the Architectural PerfMon supports
402 * Unhalted Core Cycles Event or not.
403 * NOTE: Corresponding bit = 0 in ebp indicates event present.
404 */
405 ebx = cpuid_ebx(10);
406 if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
407 return 0;
408
409 nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
410
411 clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
412 clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);
413
414 evntsel = ARCH_PERFMON_EVENTSEL_INT
415 | ARCH_PERFMON_EVENTSEL_OS
416 | ARCH_PERFMON_EVENTSEL_USR
417 | ARCH_PERFMON_NMI_EVENT_SEL
418 | ARCH_PERFMON_NMI_EVENT_UMASK;
419
420 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
421 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0, -((u64)cpu_khz * 1000 / nmi_hz));
422 apic_write(APIC_LVTPC, APIC_DM_NMI);
423 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
424 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
425 return 1;
426}
427
378 428
379static int setup_p4_watchdog(void) 429static int setup_p4_watchdog(void)
380{ 430{
@@ -428,10 +478,16 @@ void setup_apic_nmi_watchdog(void)
428 setup_k7_watchdog(); 478 setup_k7_watchdog();
429 break; 479 break;
430 case X86_VENDOR_INTEL: 480 case X86_VENDOR_INTEL:
431 if (boot_cpu_data.x86 != 15) 481 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
432 return; 482 if (!setup_intel_arch_watchdog())
433 if (!setup_p4_watchdog()) 483 return;
484 } else if (boot_cpu_data.x86 == 15) {
485 if (!setup_p4_watchdog())
486 return;
487 } else {
434 return; 488 return;
489 }
490
435 break; 491 break;
436 492
437 default: 493 default:
@@ -516,7 +572,14 @@ void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
516 */ 572 */
517 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); 573 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
518 apic_write(APIC_LVTPC, APIC_DM_NMI); 574 apic_write(APIC_LVTPC, APIC_DM_NMI);
519 } 575 } else if (nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
576 /*
577 * For Intel based architectural perfmon
578 * - LVTPC is masked on interrupt and must be
579 * unmasked by the LVTPC handler.
580 */
581 apic_write(APIC_LVTPC, APIC_DM_NMI);
582 }
520 wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); 583 wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
521 } 584 }
522} 585}
diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c
new file mode 100644
index 000000000000..d91cb843f54d
--- /dev/null
+++ b/arch/x86_64/kernel/pci-calgary.c
@@ -0,0 +1,1018 @@
1/*
2 * Derived from arch/powerpc/kernel/iommu.c
3 *
4 * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation
5 * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/config.h>
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/types.h>
26#include <linux/slab.h>
27#include <linux/mm.h>
28#include <linux/spinlock.h>
29#include <linux/string.h>
30#include <linux/dma-mapping.h>
31#include <linux/init.h>
32#include <linux/bitops.h>
33#include <linux/pci_ids.h>
34#include <linux/pci.h>
35#include <linux/delay.h>
36#include <asm/proto.h>
37#include <asm/calgary.h>
38#include <asm/tce.h>
39#include <asm/pci-direct.h>
40#include <asm/system.h>
41#include <asm/dma.h>
42
43#define PCI_DEVICE_ID_IBM_CALGARY 0x02a1
44#define PCI_VENDOR_DEVICE_ID_CALGARY \
45 (PCI_VENDOR_ID_IBM | PCI_DEVICE_ID_IBM_CALGARY << 16)
46
47/* we need these for register space address calculation */
48#define START_ADDRESS 0xfe000000
49#define CHASSIS_BASE 0
50#define ONE_BASED_CHASSIS_NUM 1
51
52/* register offsets inside the host bridge space */
53#define PHB_CSR_OFFSET 0x0110
54#define PHB_PLSSR_OFFSET 0x0120
55#define PHB_CONFIG_RW_OFFSET 0x0160
56#define PHB_IOBASE_BAR_LOW 0x0170
57#define PHB_IOBASE_BAR_HIGH 0x0180
58#define PHB_MEM_1_LOW 0x0190
59#define PHB_MEM_1_HIGH 0x01A0
60#define PHB_IO_ADDR_SIZE 0x01B0
61#define PHB_MEM_1_SIZE 0x01C0
62#define PHB_MEM_ST_OFFSET 0x01D0
63#define PHB_AER_OFFSET 0x0200
64#define PHB_CONFIG_0_HIGH 0x0220
65#define PHB_CONFIG_0_LOW 0x0230
66#define PHB_CONFIG_0_END 0x0240
67#define PHB_MEM_2_LOW 0x02B0
68#define PHB_MEM_2_HIGH 0x02C0
69#define PHB_MEM_2_SIZE_HIGH 0x02D0
70#define PHB_MEM_2_SIZE_LOW 0x02E0
71#define PHB_DOSHOLE_OFFSET 0x08E0
72
73/* PHB_CONFIG_RW */
74#define PHB_TCE_ENABLE 0x20000000
75#define PHB_SLOT_DISABLE 0x1C000000
76#define PHB_DAC_DISABLE 0x01000000
77#define PHB_MEM2_ENABLE 0x00400000
78#define PHB_MCSR_ENABLE 0x00100000
79/* TAR (Table Address Register) */
80#define TAR_SW_BITS 0x0000ffffffff800fUL
81#define TAR_VALID 0x0000000000000008UL
82/* CSR (Channel/DMA Status Register) */
83#define CSR_AGENT_MASK 0xffe0ffff
84
85#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */
86#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * 2) /* max dev->bus->number */
87#define PHBS_PER_CALGARY 4
88
89/* register offsets in Calgary's internal register space */
90static const unsigned long tar_offsets[] = {
91 0x0580 /* TAR0 */,
92 0x0588 /* TAR1 */,
93 0x0590 /* TAR2 */,
94 0x0598 /* TAR3 */
95};
96
97static const unsigned long split_queue_offsets[] = {
98 0x4870 /* SPLIT QUEUE 0 */,
99 0x5870 /* SPLIT QUEUE 1 */,
100 0x6870 /* SPLIT QUEUE 2 */,
101 0x7870 /* SPLIT QUEUE 3 */
102};
103
104static const unsigned long phb_offsets[] = {
105 0x8000 /* PHB0 */,
106 0x9000 /* PHB1 */,
107 0xA000 /* PHB2 */,
108 0xB000 /* PHB3 */
109};
110
111void* tce_table_kva[MAX_NUM_OF_PHBS * MAX_NUMNODES];
112unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
113static int translate_empty_slots __read_mostly = 0;
114static int calgary_detected __read_mostly = 0;
115
116/*
117 * the bitmap of PHBs the user requested that we disable
118 * translation on.
119 */
120static DECLARE_BITMAP(translation_disabled, MAX_NUMNODES * MAX_PHB_BUS_NUM);
121
122static void tce_cache_blast(struct iommu_table *tbl);
123
124/* enable this to stress test the chip's TCE cache */
125#ifdef CONFIG_IOMMU_DEBUG
126static inline void tce_cache_blast_stress(struct iommu_table *tbl)
127{
128 tce_cache_blast(tbl);
129}
130#else
131static inline void tce_cache_blast_stress(struct iommu_table *tbl)
132{
133}
134#endif /* BLAST_TCE_CACHE_ON_UNMAP */
135
136static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen)
137{
138 unsigned int npages;
139
140 npages = PAGE_ALIGN(dma + dmalen) - (dma & PAGE_MASK);
141 npages >>= PAGE_SHIFT;
142
143 return npages;
144}
145
146static inline int translate_phb(struct pci_dev* dev)
147{
148 int disabled = test_bit(dev->bus->number, translation_disabled);
149 return !disabled;
150}
151
152static void iommu_range_reserve(struct iommu_table *tbl,
153 unsigned long start_addr, unsigned int npages)
154{
155 unsigned long index;
156 unsigned long end;
157
158 index = start_addr >> PAGE_SHIFT;
159
160 /* bail out if we're asked to reserve a region we don't cover */
161 if (index >= tbl->it_size)
162 return;
163
164 end = index + npages;
165 if (end > tbl->it_size) /* don't go off the table */
166 end = tbl->it_size;
167
168 while (index < end) {
169 if (test_bit(index, tbl->it_map))
170 printk(KERN_ERR "Calgary: entry already allocated at "
171 "0x%lx tbl %p dma 0x%lx npages %u\n",
172 index, tbl, start_addr, npages);
173 ++index;
174 }
175 set_bit_string(tbl->it_map, start_addr >> PAGE_SHIFT, npages);
176}
177
178static unsigned long iommu_range_alloc(struct iommu_table *tbl,
179 unsigned int npages)
180{
181 unsigned long offset;
182
183 BUG_ON(npages == 0);
184
185 offset = find_next_zero_string(tbl->it_map, tbl->it_hint,
186 tbl->it_size, npages);
187 if (offset == ~0UL) {
188 tce_cache_blast(tbl);
189 offset = find_next_zero_string(tbl->it_map, 0,
190 tbl->it_size, npages);
191 if (offset == ~0UL) {
192 printk(KERN_WARNING "Calgary: IOMMU full.\n");
193 if (panic_on_overflow)
194 panic("Calgary: fix the allocator.\n");
195 else
196 return bad_dma_address;
197 }
198 }
199
200 set_bit_string(tbl->it_map, offset, npages);
201 tbl->it_hint = offset + npages;
202 BUG_ON(tbl->it_hint > tbl->it_size);
203
204 return offset;
205}
206
207static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr,
208 unsigned int npages, int direction)
209{
210 unsigned long entry, flags;
211 dma_addr_t ret = bad_dma_address;
212
213 spin_lock_irqsave(&tbl->it_lock, flags);
214
215 entry = iommu_range_alloc(tbl, npages);
216
217 if (unlikely(entry == bad_dma_address))
218 goto error;
219
220 /* set the return dma address */
221 ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK);
222
223 /* put the TCEs in the HW table */
224 tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK,
225 direction);
226
227 spin_unlock_irqrestore(&tbl->it_lock, flags);
228
229 return ret;
230
231error:
232 spin_unlock_irqrestore(&tbl->it_lock, flags);
233 printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
234 "iommu %p\n", npages, tbl);
235 return bad_dma_address;
236}
237
238static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
239 unsigned int npages)
240{
241 unsigned long entry;
242 unsigned long i;
243
244 entry = dma_addr >> PAGE_SHIFT;
245
246 BUG_ON(entry + npages > tbl->it_size);
247
248 tce_free(tbl, entry, npages);
249
250 for (i = 0; i < npages; ++i) {
251 if (!test_bit(entry + i, tbl->it_map))
252 printk(KERN_ERR "Calgary: bit is off at 0x%lx "
253 "tbl %p dma 0x%Lx entry 0x%lx npages %u\n",
254 entry + i, tbl, dma_addr, entry, npages);
255 }
256
257 __clear_bit_string(tbl->it_map, entry, npages);
258
259 tce_cache_blast_stress(tbl);
260}
261
262static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
263 unsigned int npages)
264{
265 unsigned long flags;
266
267 spin_lock_irqsave(&tbl->it_lock, flags);
268
269 __iommu_free(tbl, dma_addr, npages);
270
271 spin_unlock_irqrestore(&tbl->it_lock, flags);
272}
273
274static void __calgary_unmap_sg(struct iommu_table *tbl,
275 struct scatterlist *sglist, int nelems, int direction)
276{
277 while (nelems--) {
278 unsigned int npages;
279 dma_addr_t dma = sglist->dma_address;
280 unsigned int dmalen = sglist->dma_length;
281
282 if (dmalen == 0)
283 break;
284
285 npages = num_dma_pages(dma, dmalen);
286 __iommu_free(tbl, dma, npages);
287 sglist++;
288 }
289}
290
291void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist,
292 int nelems, int direction)
293{
294 unsigned long flags;
295 struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
296
297 if (!translate_phb(to_pci_dev(dev)))
298 return;
299
300 spin_lock_irqsave(&tbl->it_lock, flags);
301
302 __calgary_unmap_sg(tbl, sglist, nelems, direction);
303
304 spin_unlock_irqrestore(&tbl->it_lock, flags);
305}
306
307static int calgary_nontranslate_map_sg(struct device* dev,
308 struct scatterlist *sg, int nelems, int direction)
309{
310 int i;
311
312 for (i = 0; i < nelems; i++ ) {
313 struct scatterlist *s = &sg[i];
314 BUG_ON(!s->page);
315 s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
316 s->dma_length = s->length;
317 }
318 return nelems;
319}
320
321int calgary_map_sg(struct device *dev, struct scatterlist *sg,
322 int nelems, int direction)
323{
324 struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
325 unsigned long flags;
326 unsigned long vaddr;
327 unsigned int npages;
328 unsigned long entry;
329 int i;
330
331 if (!translate_phb(to_pci_dev(dev)))
332 return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
333
334 spin_lock_irqsave(&tbl->it_lock, flags);
335
336 for (i = 0; i < nelems; i++ ) {
337 struct scatterlist *s = &sg[i];
338 BUG_ON(!s->page);
339
340 vaddr = (unsigned long)page_address(s->page) + s->offset;
341 npages = num_dma_pages(vaddr, s->length);
342
343 entry = iommu_range_alloc(tbl, npages);
344 if (entry == bad_dma_address) {
345 /* makes sure unmap knows to stop */
346 s->dma_length = 0;
347 goto error;
348 }
349
350 s->dma_address = (entry << PAGE_SHIFT) | s->offset;
351
352 /* insert into HW table */
353 tce_build(tbl, entry, npages, vaddr & PAGE_MASK,
354 direction);
355
356 s->dma_length = s->length;
357 }
358
359 spin_unlock_irqrestore(&tbl->it_lock, flags);
360
361 return nelems;
362error:
363 __calgary_unmap_sg(tbl, sg, nelems, direction);
364 for (i = 0; i < nelems; i++) {
365 sg[i].dma_address = bad_dma_address;
366 sg[i].dma_length = 0;
367 }
368 spin_unlock_irqrestore(&tbl->it_lock, flags);
369 return 0;
370}
371
372dma_addr_t calgary_map_single(struct device *dev, void *vaddr,
373 size_t size, int direction)
374{
375 dma_addr_t dma_handle = bad_dma_address;
376 unsigned long uaddr;
377 unsigned int npages;
378 struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
379
380 uaddr = (unsigned long)vaddr;
381 npages = num_dma_pages(uaddr, size);
382
383 if (translate_phb(to_pci_dev(dev)))
384 dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
385 else
386 dma_handle = virt_to_bus(vaddr);
387
388 return dma_handle;
389}
390
391void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
392 size_t size, int direction)
393{
394 struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
395 unsigned int npages;
396
397 if (!translate_phb(to_pci_dev(dev)))
398 return;
399
400 npages = num_dma_pages(dma_handle, size);
401 iommu_free(tbl, dma_handle, npages);
402}
403
404void* calgary_alloc_coherent(struct device *dev, size_t size,
405 dma_addr_t *dma_handle, gfp_t flag)
406{
407 void *ret = NULL;
408 dma_addr_t mapping;
409 unsigned int npages, order;
410 struct iommu_table *tbl;
411
412 tbl = to_pci_dev(dev)->bus->self->sysdata;
413
414 size = PAGE_ALIGN(size); /* size rounded up to full pages */
415 npages = size >> PAGE_SHIFT;
416 order = get_order(size);
417
418 /* alloc enough pages (and possibly more) */
419 ret = (void *)__get_free_pages(flag, order);
420 if (!ret)
421 goto error;
422 memset(ret, 0, size);
423
424 if (translate_phb(to_pci_dev(dev))) {
425 /* set up tces to cover the allocated range */
426 mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL);
427 if (mapping == bad_dma_address)
428 goto free;
429
430 *dma_handle = mapping;
431 } else /* non translated slot */
432 *dma_handle = virt_to_bus(ret);
433
434 return ret;
435
436free:
437 free_pages((unsigned long)ret, get_order(size));
438 ret = NULL;
439error:
440 return ret;
441}
442
443static struct dma_mapping_ops calgary_dma_ops = {
444 .alloc_coherent = calgary_alloc_coherent,
445 .map_single = calgary_map_single,
446 .unmap_single = calgary_unmap_single,
447 .map_sg = calgary_map_sg,
448 .unmap_sg = calgary_unmap_sg,
449};
450
451static inline int busno_to_phbid(unsigned char num)
452{
453 return bus_to_phb(num) % PHBS_PER_CALGARY;
454}
455
456static inline unsigned long split_queue_offset(unsigned char num)
457{
458 size_t idx = busno_to_phbid(num);
459
460 return split_queue_offsets[idx];
461}
462
463static inline unsigned long tar_offset(unsigned char num)
464{
465 size_t idx = busno_to_phbid(num);
466
467 return tar_offsets[idx];
468}
469
470static inline unsigned long phb_offset(unsigned char num)
471{
472 size_t idx = busno_to_phbid(num);
473
474 return phb_offsets[idx];
475}
476
477static inline void __iomem* calgary_reg(void __iomem *bar, unsigned long offset)
478{
479 unsigned long target = ((unsigned long)bar) | offset;
480 return (void __iomem*)target;
481}
482
483static void tce_cache_blast(struct iommu_table *tbl)
484{
485 u64 val;
486 u32 aer;
487 int i = 0;
488 void __iomem *bbar = tbl->bbar;
489 void __iomem *target;
490
491 /* disable arbitration on the bus */
492 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET);
493 aer = readl(target);
494 writel(0, target);
495
496 /* read plssr to ensure it got there */
497 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_PLSSR_OFFSET);
498 val = readl(target);
499
500 /* poll split queues until all DMA activity is done */
501 target = calgary_reg(bbar, split_queue_offset(tbl->it_busno));
502 do {
503 val = readq(target);
504 i++;
505 } while ((val & 0xff) != 0xff && i < 100);
506 if (i == 100)
507 printk(KERN_WARNING "Calgary: PCI bus not quiesced, "
508 "continuing anyway\n");
509
510 /* invalidate TCE cache */
511 target = calgary_reg(bbar, tar_offset(tbl->it_busno));
512 writeq(tbl->tar_val, target);
513
514 /* enable arbitration */
515 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET);
516 writel(aer, target);
517 (void)readl(target); /* flush */
518}
519
520static void __init calgary_reserve_mem_region(struct pci_dev *dev, u64 start,
521 u64 limit)
522{
523 unsigned int numpages;
524
525 limit = limit | 0xfffff;
526 limit++;
527
528 numpages = ((limit - start) >> PAGE_SHIFT);
529 iommu_range_reserve(dev->sysdata, start, numpages);
530}
531
532static void __init calgary_reserve_peripheral_mem_1(struct pci_dev *dev)
533{
534 void __iomem *target;
535 u64 low, high, sizelow;
536 u64 start, limit;
537 struct iommu_table *tbl = dev->sysdata;
538 unsigned char busnum = dev->bus->number;
539 void __iomem *bbar = tbl->bbar;
540
541 /* peripheral MEM_1 region */
542 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_LOW);
543 low = be32_to_cpu(readl(target));
544 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_HIGH);
545 high = be32_to_cpu(readl(target));
546 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_SIZE);
547 sizelow = be32_to_cpu(readl(target));
548
549 start = (high << 32) | low;
550 limit = sizelow;
551
552 calgary_reserve_mem_region(dev, start, limit);
553}
554
555static void __init calgary_reserve_peripheral_mem_2(struct pci_dev *dev)
556{
557 void __iomem *target;
558 u32 val32;
559 u64 low, high, sizelow, sizehigh;
560 u64 start, limit;
561 struct iommu_table *tbl = dev->sysdata;
562 unsigned char busnum = dev->bus->number;
563 void __iomem *bbar = tbl->bbar;
564
565 /* is it enabled? */
566 target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
567 val32 = be32_to_cpu(readl(target));
568 if (!(val32 & PHB_MEM2_ENABLE))
569 return;
570
571 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_LOW);
572 low = be32_to_cpu(readl(target));
573 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_HIGH);
574 high = be32_to_cpu(readl(target));
575 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_LOW);
576 sizelow = be32_to_cpu(readl(target));
577 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_HIGH);
578 sizehigh = be32_to_cpu(readl(target));
579
580 start = (high << 32) | low;
581 limit = (sizehigh << 32) | sizelow;
582
583 calgary_reserve_mem_region(dev, start, limit);
584}
585
586/*
587 * some regions of the IO address space do not get translated, so we
588 * must not give devices IO addresses in those regions. The regions
589 * are the 640KB-1MB region and the two PCI peripheral memory holes.
590 * Reserve all of them in the IOMMU bitmap to avoid giving them out
591 * later.
592 */
593static void __init calgary_reserve_regions(struct pci_dev *dev)
594{
595 unsigned int npages;
596 void __iomem *bbar;
597 unsigned char busnum;
598 u64 start;
599 struct iommu_table *tbl = dev->sysdata;
600
601 bbar = tbl->bbar;
602 busnum = dev->bus->number;
603
604 /* reserve bad_dma_address in case it's a legal address */
605 iommu_range_reserve(tbl, bad_dma_address, 1);
606
607 /* avoid the BIOS/VGA first 640KB-1MB region */
608 start = (640 * 1024);
609 npages = ((1024 - 640) * 1024) >> PAGE_SHIFT;
610 iommu_range_reserve(tbl, start, npages);
611
612 /* reserve the two PCI peripheral memory regions in IO space */
613 calgary_reserve_peripheral_mem_1(dev);
614 calgary_reserve_peripheral_mem_2(dev);
615}
616
617static int __init calgary_setup_tar(struct pci_dev *dev, void __iomem *bbar)
618{
619 u64 val64;
620 u64 table_phys;
621 void __iomem *target;
622 int ret;
623 struct iommu_table *tbl;
624
625 /* build TCE tables for each PHB */
626 ret = build_tce_table(dev, bbar);
627 if (ret)
628 return ret;
629
630 calgary_reserve_regions(dev);
631
632 /* set TARs for each PHB */
633 target = calgary_reg(bbar, tar_offset(dev->bus->number));
634 val64 = be64_to_cpu(readq(target));
635
636 /* zero out all TAR bits under sw control */
637 val64 &= ~TAR_SW_BITS;
638
639 tbl = dev->sysdata;
640 table_phys = (u64)__pa(tbl->it_base);
641 val64 |= table_phys;
642
643 BUG_ON(specified_table_size > TCE_TABLE_SIZE_8M);
644 val64 |= (u64) specified_table_size;
645
646 tbl->tar_val = cpu_to_be64(val64);
647 writeq(tbl->tar_val, target);
648 readq(target); /* flush */
649
650 return 0;
651}
652
653static void __init calgary_free_tar(struct pci_dev *dev)
654{
655 u64 val64;
656 struct iommu_table *tbl = dev->sysdata;
657 void __iomem *target;
658
659 target = calgary_reg(tbl->bbar, tar_offset(dev->bus->number));
660 val64 = be64_to_cpu(readq(target));
661 val64 &= ~TAR_SW_BITS;
662 writeq(cpu_to_be64(val64), target);
663 readq(target); /* flush */
664
665 kfree(tbl);
666 dev->sysdata = NULL;
667}
668
669static void calgary_watchdog(unsigned long data)
670{
671 struct pci_dev *dev = (struct pci_dev *)data;
672 struct iommu_table *tbl = dev->sysdata;
673 void __iomem *bbar = tbl->bbar;
674 u32 val32;
675 void __iomem *target;
676
677 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_CSR_OFFSET);
678 val32 = be32_to_cpu(readl(target));
679
680 /* If no error, the agent ID in the CSR is not valid */
681 if (val32 & CSR_AGENT_MASK) {
682 printk(KERN_EMERG "calgary_watchdog: DMA error on bus %d, "
683 "CSR = %#x\n", dev->bus->number, val32);
684 writel(0, target);
685
686 /* Disable bus that caused the error */
687 target = calgary_reg(bbar, phb_offset(tbl->it_busno) |
688 PHB_CONFIG_RW_OFFSET);
689 val32 = be32_to_cpu(readl(target));
690 val32 |= PHB_SLOT_DISABLE;
691 writel(cpu_to_be32(val32), target);
692 readl(target); /* flush */
693 } else {
694 /* Reset the timer */
695 mod_timer(&tbl->watchdog_timer, jiffies + 2 * HZ);
696 }
697}
698
699static void __init calgary_enable_translation(struct pci_dev *dev)
700{
701 u32 val32;
702 unsigned char busnum;
703 void __iomem *target;
704 void __iomem *bbar;
705 struct iommu_table *tbl;
706
707 busnum = dev->bus->number;
708 tbl = dev->sysdata;
709 bbar = tbl->bbar;
710
711 /* enable TCE in PHB Config Register */
712 target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
713 val32 = be32_to_cpu(readl(target));
714 val32 |= PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE;
715
716 printk(KERN_INFO "Calgary: enabling translation on PHB %d\n", busnum);
717 printk(KERN_INFO "Calgary: errant DMAs will now be prevented on this "
718 "bus.\n");
719
720 writel(cpu_to_be32(val32), target);
721 readl(target); /* flush */
722
723 init_timer(&tbl->watchdog_timer);
724 tbl->watchdog_timer.function = &calgary_watchdog;
725 tbl->watchdog_timer.data = (unsigned long)dev;
726 mod_timer(&tbl->watchdog_timer, jiffies);
727}
728
729static void __init calgary_disable_translation(struct pci_dev *dev)
730{
731 u32 val32;
732 unsigned char busnum;
733 void __iomem *target;
734 void __iomem *bbar;
735 struct iommu_table *tbl;
736
737 busnum = dev->bus->number;
738 tbl = dev->sysdata;
739 bbar = tbl->bbar;
740
741 /* disable TCE in PHB Config Register */
742 target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
743 val32 = be32_to_cpu(readl(target));
744 val32 &= ~(PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE);
745
746 printk(KERN_INFO "Calgary: disabling translation on PHB %d!\n", busnum);
747 writel(cpu_to_be32(val32), target);
748 readl(target); /* flush */
749
750 del_timer_sync(&tbl->watchdog_timer);
751}
752
753static inline unsigned int __init locate_register_space(struct pci_dev *dev)
754{
755 int rionodeid;
756 u32 address;
757
758 rionodeid = (dev->bus->number % 15 > 4) ? 3 : 2;
759 /*
760 * register space address calculation as follows:
761 * FE0MB-8MB*OneBasedChassisNumber+1MB*(RioNodeId-ChassisBase)
762 * ChassisBase is always zero for x366/x260/x460
763 * RioNodeId is 2 for first Calgary, 3 for second Calgary
764 */
765 address = START_ADDRESS -
766 (0x800000 * (ONE_BASED_CHASSIS_NUM + dev->bus->number / 15)) +
767 (0x100000) * (rionodeid - CHASSIS_BASE);
768 return address;
769}
770
771static int __init calgary_init_one_nontraslated(struct pci_dev *dev)
772{
773 dev->sysdata = NULL;
774 dev->bus->self = dev;
775
776 return 0;
777}
778
779static int __init calgary_init_one(struct pci_dev *dev)
780{
781 u32 address;
782 void __iomem *bbar;
783 int ret;
784
785 address = locate_register_space(dev);
786 /* map entire 1MB of Calgary config space */
787 bbar = ioremap_nocache(address, 1024 * 1024);
788 if (!bbar) {
789 ret = -ENODATA;
790 goto done;
791 }
792
793 ret = calgary_setup_tar(dev, bbar);
794 if (ret)
795 goto iounmap;
796
797 dev->bus->self = dev;
798 calgary_enable_translation(dev);
799
800 return 0;
801
802iounmap:
803 iounmap(bbar);
804done:
805 return ret;
806}
807
808static int __init calgary_init(void)
809{
810 int i, ret = -ENODEV;
811 struct pci_dev *dev = NULL;
812
813 for (i = 0; i <= num_online_nodes() * MAX_NUM_OF_PHBS; i++) {
814 dev = pci_get_device(PCI_VENDOR_ID_IBM,
815 PCI_DEVICE_ID_IBM_CALGARY,
816 dev);
817 if (!dev)
818 break;
819 if (!translate_phb(dev)) {
820 calgary_init_one_nontraslated(dev);
821 continue;
822 }
823 if (!tce_table_kva[i] && !translate_empty_slots) {
824 pci_dev_put(dev);
825 continue;
826 }
827 ret = calgary_init_one(dev);
828 if (ret)
829 goto error;
830 }
831
832 return ret;
833
834error:
835 for (i--; i >= 0; i--) {
836 dev = pci_find_device_reverse(PCI_VENDOR_ID_IBM,
837 PCI_DEVICE_ID_IBM_CALGARY,
838 dev);
839 if (!translate_phb(dev)) {
840 pci_dev_put(dev);
841 continue;
842 }
843 if (!tce_table_kva[i] && !translate_empty_slots)
844 continue;
845 calgary_disable_translation(dev);
846 calgary_free_tar(dev);
847 pci_dev_put(dev);
848 }
849
850 return ret;
851}
852
853static inline int __init determine_tce_table_size(u64 ram)
854{
855 int ret;
856
857 if (specified_table_size != TCE_TABLE_SIZE_UNSPECIFIED)
858 return specified_table_size;
859
860 /*
861 * Table sizes are from 0 to 7 (TCE_TABLE_SIZE_64K to
862 * TCE_TABLE_SIZE_8M). Table size 0 has 8K entries and each
863 * larger table size has twice as many entries, so shift the
864 * max ram address by 13 to divide by 8K and then look at the
865 * order of the result to choose between 0-7.
866 */
867 ret = get_order(ram >> 13);
868 if (ret > TCE_TABLE_SIZE_8M)
869 ret = TCE_TABLE_SIZE_8M;
870
871 return ret;
872}
873
874void __init detect_calgary(void)
875{
876 u32 val;
877 int bus, table_idx;
878 void *tbl;
879 int detected = 0;
880
881 /*
882 * if the user specified iommu=off or iommu=soft or we found
883 * another HW IOMMU already, bail out.
884 */
885 if (swiotlb || no_iommu || iommu_detected)
886 return;
887
888 specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE);
889
890 for (bus = 0, table_idx = 0;
891 bus <= num_online_nodes() * MAX_PHB_BUS_NUM;
892 bus++) {
893 BUG_ON(bus > MAX_NUMNODES * MAX_PHB_BUS_NUM);
894 if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY)
895 continue;
896 if (test_bit(bus, translation_disabled)) {
897 printk(KERN_INFO "Calgary: translation is disabled for "
898 "PHB 0x%x\n", bus);
899 /* skip this phb, don't allocate a tbl for it */
900 tce_table_kva[table_idx] = NULL;
901 table_idx++;
902 continue;
903 }
904 /*
905 * scan the first slot of the PCI bus to see if there
906 * are any devices present
907 */
908 val = read_pci_config(bus, 1, 0, 0);
909 if (val != 0xffffffff || translate_empty_slots) {
910 tbl = alloc_tce_table();
911 if (!tbl)
912 goto cleanup;
913 detected = 1;
914 } else
915 tbl = NULL;
916
917 tce_table_kva[table_idx] = tbl;
918 table_idx++;
919 }
920
921 if (detected) {
922 iommu_detected = 1;
923 calgary_detected = 1;
924 printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected. "
925 "TCE table spec is %d.\n", specified_table_size);
926 }
927 return;
928
929cleanup:
930 for (--table_idx; table_idx >= 0; --table_idx)
931 if (tce_table_kva[table_idx])
932 free_tce_table(tce_table_kva[table_idx]);
933}
934
935int __init calgary_iommu_init(void)
936{
937 int ret;
938
939 if (no_iommu || swiotlb)
940 return -ENODEV;
941
942 if (!calgary_detected)
943 return -ENODEV;
944
945 /* ok, we're trying to use Calgary - let's roll */
946 printk(KERN_INFO "PCI-DMA: Using Calgary IOMMU\n");
947
948 ret = calgary_init();
949 if (ret) {
950 printk(KERN_ERR "PCI-DMA: Calgary init failed %d, "
951 "falling back to no_iommu\n", ret);
952 if (end_pfn > MAX_DMA32_PFN)
953 printk(KERN_ERR "WARNING more than 4GB of memory, "
954 "32bit PCI may malfunction.\n");
955 return ret;
956 }
957
958 force_iommu = 1;
959 dma_ops = &calgary_dma_ops;
960
961 return 0;
962}
963
964static int __init calgary_parse_options(char *p)
965{
966 unsigned int bridge;
967 size_t len;
968 char* endp;
969
970 while (*p) {
971 if (!strncmp(p, "64k", 3))
972 specified_table_size = TCE_TABLE_SIZE_64K;
973 else if (!strncmp(p, "128k", 4))
974 specified_table_size = TCE_TABLE_SIZE_128K;
975 else if (!strncmp(p, "256k", 4))
976 specified_table_size = TCE_TABLE_SIZE_256K;
977 else if (!strncmp(p, "512k", 4))
978 specified_table_size = TCE_TABLE_SIZE_512K;
979 else if (!strncmp(p, "1M", 2))
980 specified_table_size = TCE_TABLE_SIZE_1M;
981 else if (!strncmp(p, "2M", 2))
982 specified_table_size = TCE_TABLE_SIZE_2M;
983 else if (!strncmp(p, "4M", 2))
984 specified_table_size = TCE_TABLE_SIZE_4M;
985 else if (!strncmp(p, "8M", 2))
986 specified_table_size = TCE_TABLE_SIZE_8M;
987
988 len = strlen("translate_empty_slots");
989 if (!strncmp(p, "translate_empty_slots", len))
990 translate_empty_slots = 1;
991
992 len = strlen("disable");
993 if (!strncmp(p, "disable", len)) {
994 p += len;
995 if (*p == '=')
996 ++p;
997 if (*p == '\0')
998 break;
999 bridge = simple_strtol(p, &endp, 0);
1000 if (p == endp)
1001 break;
1002
1003 if (bridge <= (num_online_nodes() * MAX_PHB_BUS_NUM)) {
1004 printk(KERN_INFO "Calgary: disabling "
1005 "translation for PHB 0x%x\n", bridge);
1006 set_bit(bridge, translation_disabled);
1007 }
1008 }
1009
1010 p = strpbrk(p, ",");
1011 if (!p)
1012 break;
1013
1014 p++; /* skip ',' */
1015 }
1016 return 1;
1017}
1018__setup("calgary=", calgary_parse_options);
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index a9275c9557cf..9c44f4f2433d 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -9,6 +9,7 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <asm/io.h> 10#include <asm/io.h>
11#include <asm/proto.h> 11#include <asm/proto.h>
12#include <asm/calgary.h>
12 13
13int iommu_merge __read_mostly = 0; 14int iommu_merge __read_mostly = 0;
14EXPORT_SYMBOL(iommu_merge); 15EXPORT_SYMBOL(iommu_merge);
@@ -33,12 +34,15 @@ int panic_on_overflow __read_mostly = 0;
33int force_iommu __read_mostly= 0; 34int force_iommu __read_mostly= 0;
34#endif 35#endif
35 36
37/* Set this to 1 if there is a HW IOMMU in the system */
38int iommu_detected __read_mostly = 0;
39
36/* Dummy device used for NULL arguments (normally ISA). Better would 40/* Dummy device used for NULL arguments (normally ISA). Better would
37 be probably a smaller DMA mask, but this is bug-to-bug compatible 41 be probably a smaller DMA mask, but this is bug-to-bug compatible
38 to i386. */ 42 to i386. */
39struct device fallback_dev = { 43struct device fallback_dev = {
40 .bus_id = "fallback device", 44 .bus_id = "fallback device",
41 .coherent_dma_mask = 0xffffffff, 45 .coherent_dma_mask = DMA_32BIT_MASK,
42 .dma_mask = &fallback_dev.coherent_dma_mask, 46 .dma_mask = &fallback_dev.coherent_dma_mask,
43}; 47};
44 48
@@ -77,7 +81,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
77 dev = &fallback_dev; 81 dev = &fallback_dev;
78 dma_mask = dev->coherent_dma_mask; 82 dma_mask = dev->coherent_dma_mask;
79 if (dma_mask == 0) 83 if (dma_mask == 0)
80 dma_mask = 0xffffffff; 84 dma_mask = DMA_32BIT_MASK;
81 85
82 /* Don't invoke OOM killer */ 86 /* Don't invoke OOM killer */
83 gfp |= __GFP_NORETRY; 87 gfp |= __GFP_NORETRY;
@@ -90,7 +94,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
90 larger than 16MB and in this case we have a chance of 94 larger than 16MB and in this case we have a chance of
91 finding fitting memory in the next higher zone first. If 95 finding fitting memory in the next higher zone first. If
92 not retry with true GFP_DMA. -AK */ 96 not retry with true GFP_DMA. -AK */
93 if (dma_mask <= 0xffffffff) 97 if (dma_mask <= DMA_32BIT_MASK)
94 gfp |= GFP_DMA32; 98 gfp |= GFP_DMA32;
95 99
96 again: 100 again:
@@ -111,7 +115,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
111 115
112 /* Don't use the 16MB ZONE_DMA unless absolutely 116 /* Don't use the 16MB ZONE_DMA unless absolutely
113 needed. It's better to use remapping first. */ 117 needed. It's better to use remapping first. */
114 if (dma_mask < 0xffffffff && !(gfp & GFP_DMA)) { 118 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
115 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; 119 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
116 goto again; 120 goto again;
117 } 121 }
@@ -174,7 +178,7 @@ int dma_supported(struct device *dev, u64 mask)
174 /* Copied from i386. Doesn't make much sense, because it will 178 /* Copied from i386. Doesn't make much sense, because it will
175 only work for pci_alloc_coherent. 179 only work for pci_alloc_coherent.
176 The caller just has to use GFP_DMA in this case. */ 180 The caller just has to use GFP_DMA in this case. */
177 if (mask < 0x00ffffff) 181 if (mask < DMA_24BIT_MASK)
178 return 0; 182 return 0;
179 183
180 /* Tell the device to use SAC when IOMMU force is on. This 184 /* Tell the device to use SAC when IOMMU force is on. This
@@ -189,7 +193,7 @@ int dma_supported(struct device *dev, u64 mask)
189 SAC for these. Assume all masks <= 40 bits are of this 193 SAC for these. Assume all masks <= 40 bits are of this
190 type. Normally this doesn't make any difference, but gives 194 type. Normally this doesn't make any difference, but gives
191 more gentle handling of IOMMU overflow. */ 195 more gentle handling of IOMMU overflow. */
192 if (iommu_sac_force && (mask >= 0xffffffffffULL)) { 196 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
193 printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask); 197 printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask);
194 return 0; 198 return 0;
195 } 199 }
@@ -266,7 +270,7 @@ __init int iommu_setup(char *p)
266 swiotlb = 1; 270 swiotlb = 1;
267#endif 271#endif
268 272
269#ifdef CONFIG_GART_IOMMU 273#ifdef CONFIG_IOMMU
270 gart_parse_options(p); 274 gart_parse_options(p);
271#endif 275#endif
272 276
@@ -276,3 +280,40 @@ __init int iommu_setup(char *p)
276 } 280 }
277 return 1; 281 return 1;
278} 282}
283__setup("iommu=", iommu_setup);
284
285void __init pci_iommu_alloc(void)
286{
287 /*
288 * The order of these functions is important for
289 * fall-back/fail-over reasons
290 */
291#ifdef CONFIG_IOMMU
292 iommu_hole_init();
293#endif
294
295#ifdef CONFIG_CALGARY_IOMMU
296 detect_calgary();
297#endif
298
299#ifdef CONFIG_SWIOTLB
300 pci_swiotlb_init();
301#endif
302}
303
304static int __init pci_iommu_init(void)
305{
306#ifdef CONFIG_CALGARY_IOMMU
307 calgary_iommu_init();
308#endif
309
310#ifdef CONFIG_IOMMU
311 gart_iommu_init();
312#endif
313
314 no_iommu_init();
315 return 0;
316}
317
318/* Must execute after PCI subsystem */
319fs_initcall(pci_iommu_init);
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index 82a7c9bfdfa0..4ca674d16b09 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -32,6 +32,7 @@
32#include <asm/kdebug.h> 32#include <asm/kdebug.h>
33#include <asm/swiotlb.h> 33#include <asm/swiotlb.h>
34#include <asm/dma.h> 34#include <asm/dma.h>
35#include <asm/k8.h>
35 36
36unsigned long iommu_bus_base; /* GART remapping area (physical) */ 37unsigned long iommu_bus_base; /* GART remapping area (physical) */
37static unsigned long iommu_size; /* size of remapping area bytes */ 38static unsigned long iommu_size; /* size of remapping area bytes */
@@ -46,8 +47,6 @@ u32 *iommu_gatt_base; /* Remapping table */
46 also seen with Qlogic at least). */ 47 also seen with Qlogic at least). */
47int iommu_fullflush = 1; 48int iommu_fullflush = 1;
48 49
49#define MAX_NB 8
50
51/* Allocation bitmap for the remapping area */ 50/* Allocation bitmap for the remapping area */
52static DEFINE_SPINLOCK(iommu_bitmap_lock); 51static DEFINE_SPINLOCK(iommu_bitmap_lock);
53static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */ 52static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
@@ -63,13 +62,6 @@ static u32 gart_unmapped_entry;
63#define to_pages(addr,size) \ 62#define to_pages(addr,size) \
64 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) 63 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
65 64
66#define for_all_nb(dev) \
67 dev = NULL; \
68 while ((dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL)
69
70static struct pci_dev *northbridges[MAX_NB];
71static u32 northbridge_flush_word[MAX_NB];
72
73#define EMERGENCY_PAGES 32 /* = 128KB */ 65#define EMERGENCY_PAGES 32 /* = 128KB */
74 66
75#ifdef CONFIG_AGP 67#ifdef CONFIG_AGP
@@ -93,7 +85,7 @@ static unsigned long alloc_iommu(int size)
93 offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size); 85 offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
94 if (offset == -1) { 86 if (offset == -1) {
95 need_flush = 1; 87 need_flush = 1;
96 offset = find_next_zero_string(iommu_gart_bitmap,0,next_bit,size); 88 offset = find_next_zero_string(iommu_gart_bitmap,0,iommu_pages,size);
97 } 89 }
98 if (offset != -1) { 90 if (offset != -1) {
99 set_bit_string(iommu_gart_bitmap, offset, size); 91 set_bit_string(iommu_gart_bitmap, offset, size);
@@ -120,44 +112,17 @@ static void free_iommu(unsigned long offset, int size)
120/* 112/*
121 * Use global flush state to avoid races with multiple flushers. 113 * Use global flush state to avoid races with multiple flushers.
122 */ 114 */
123static void flush_gart(struct device *dev) 115static void flush_gart(void)
124{ 116{
125 unsigned long flags; 117 unsigned long flags;
126 int flushed = 0;
127 int i, max;
128
129 spin_lock_irqsave(&iommu_bitmap_lock, flags); 118 spin_lock_irqsave(&iommu_bitmap_lock, flags);
130 if (need_flush) { 119 if (need_flush) {
131 max = 0; 120 k8_flush_garts();
132 for (i = 0; i < MAX_NB; i++) {
133 if (!northbridges[i])
134 continue;
135 pci_write_config_dword(northbridges[i], 0x9c,
136 northbridge_flush_word[i] | 1);
137 flushed++;
138 max = i;
139 }
140 for (i = 0; i <= max; i++) {
141 u32 w;
142 if (!northbridges[i])
143 continue;
144 /* Make sure the hardware actually executed the flush. */
145 for (;;) {
146 pci_read_config_dword(northbridges[i], 0x9c, &w);
147 if (!(w & 1))
148 break;
149 cpu_relax();
150 }
151 }
152 if (!flushed)
153 printk("nothing to flush?\n");
154 need_flush = 0; 121 need_flush = 0;
155 } 122 }
156 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 123 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
157} 124}
158 125
159
160
161#ifdef CONFIG_IOMMU_LEAK 126#ifdef CONFIG_IOMMU_LEAK
162 127
163#define SET_LEAK(x) if (iommu_leak_tab) \ 128#define SET_LEAK(x) if (iommu_leak_tab) \
@@ -266,7 +231,7 @@ static dma_addr_t gart_map_simple(struct device *dev, char *buf,
266 size_t size, int dir) 231 size_t size, int dir)
267{ 232{
268 dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir); 233 dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
269 flush_gart(dev); 234 flush_gart();
270 return map; 235 return map;
271} 236}
272 237
@@ -289,6 +254,28 @@ dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
289} 254}
290 255
291/* 256/*
257 * Free a DMA mapping.
258 */
259void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
260 size_t size, int direction)
261{
262 unsigned long iommu_page;
263 int npages;
264 int i;
265
266 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
267 dma_addr >= iommu_bus_base + iommu_size)
268 return;
269 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
270 npages = to_pages(dma_addr, size);
271 for (i = 0; i < npages; i++) {
272 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
273 CLEAR_LEAK(iommu_page + i);
274 }
275 free_iommu(iommu_page, npages);
276}
277
278/*
292 * Wrapper for pci_unmap_single working with scatterlists. 279 * Wrapper for pci_unmap_single working with scatterlists.
293 */ 280 */
294void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) 281void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
@@ -299,7 +286,7 @@ void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int di
299 struct scatterlist *s = &sg[i]; 286 struct scatterlist *s = &sg[i];
300 if (!s->dma_length || !s->length) 287 if (!s->dma_length || !s->length)
301 break; 288 break;
302 dma_unmap_single(dev, s->dma_address, s->dma_length, dir); 289 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
303 } 290 }
304} 291}
305 292
@@ -329,7 +316,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
329 s->dma_address = addr; 316 s->dma_address = addr;
330 s->dma_length = s->length; 317 s->dma_length = s->length;
331 } 318 }
332 flush_gart(dev); 319 flush_gart();
333 return nents; 320 return nents;
334} 321}
335 322
@@ -436,13 +423,13 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
436 if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0) 423 if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0)
437 goto error; 424 goto error;
438 out++; 425 out++;
439 flush_gart(dev); 426 flush_gart();
440 if (out < nents) 427 if (out < nents)
441 sg[out].dma_length = 0; 428 sg[out].dma_length = 0;
442 return out; 429 return out;
443 430
444error: 431error:
445 flush_gart(NULL); 432 flush_gart();
446 gart_unmap_sg(dev, sg, nents, dir); 433 gart_unmap_sg(dev, sg, nents, dir);
447 /* When it was forced or merged try again in a dumb way */ 434 /* When it was forced or merged try again in a dumb way */
448 if (force_iommu || iommu_merge) { 435 if (force_iommu || iommu_merge) {
@@ -458,28 +445,6 @@ error:
458 return 0; 445 return 0;
459} 446}
460 447
461/*
462 * Free a DMA mapping.
463 */
464void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
465 size_t size, int direction)
466{
467 unsigned long iommu_page;
468 int npages;
469 int i;
470
471 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
472 dma_addr >= iommu_bus_base + iommu_size)
473 return;
474 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
475 npages = to_pages(dma_addr, size);
476 for (i = 0; i < npages; i++) {
477 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
478 CLEAR_LEAK(iommu_page + i);
479 }
480 free_iommu(iommu_page, npages);
481}
482
483static int no_agp; 448static int no_agp;
484 449
485static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) 450static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
@@ -532,10 +497,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
532 void *gatt; 497 void *gatt;
533 unsigned aper_base, new_aper_base; 498 unsigned aper_base, new_aper_base;
534 unsigned aper_size, gatt_size, new_aper_size; 499 unsigned aper_size, gatt_size, new_aper_size;
535 500 int i;
501
536 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); 502 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
537 aper_size = aper_base = info->aper_size = 0; 503 aper_size = aper_base = info->aper_size = 0;
538 for_all_nb(dev) { 504 dev = NULL;
505 for (i = 0; i < num_k8_northbridges; i++) {
506 dev = k8_northbridges[i];
539 new_aper_base = read_aperture(dev, &new_aper_size); 507 new_aper_base = read_aperture(dev, &new_aper_size);
540 if (!new_aper_base) 508 if (!new_aper_base)
541 goto nommu; 509 goto nommu;
@@ -558,11 +526,12 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
558 panic("Cannot allocate GATT table"); 526 panic("Cannot allocate GATT table");
559 memset(gatt, 0, gatt_size); 527 memset(gatt, 0, gatt_size);
560 agp_gatt_table = gatt; 528 agp_gatt_table = gatt;
561 529
562 for_all_nb(dev) { 530 for (i = 0; i < num_k8_northbridges; i++) {
563 u32 ctl; 531 u32 ctl;
564 u32 gatt_reg; 532 u32 gatt_reg;
565 533
534 dev = k8_northbridges[i];
566 gatt_reg = __pa(gatt) >> 12; 535 gatt_reg = __pa(gatt) >> 12;
567 gatt_reg <<= 4; 536 gatt_reg <<= 4;
568 pci_write_config_dword(dev, 0x98, gatt_reg); 537 pci_write_config_dword(dev, 0x98, gatt_reg);
@@ -573,7 +542,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
573 542
574 pci_write_config_dword(dev, 0x90, ctl); 543 pci_write_config_dword(dev, 0x90, ctl);
575 } 544 }
576 flush_gart(NULL); 545 flush_gart();
577 546
578 printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10); 547 printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
579 return 0; 548 return 0;
@@ -602,15 +571,19 @@ static struct dma_mapping_ops gart_dma_ops = {
602 .unmap_sg = gart_unmap_sg, 571 .unmap_sg = gart_unmap_sg,
603}; 572};
604 573
605static int __init pci_iommu_init(void) 574void __init gart_iommu_init(void)
606{ 575{
607 struct agp_kern_info info; 576 struct agp_kern_info info;
608 unsigned long aper_size; 577 unsigned long aper_size;
609 unsigned long iommu_start; 578 unsigned long iommu_start;
610 struct pci_dev *dev;
611 unsigned long scratch; 579 unsigned long scratch;
612 long i; 580 long i;
613 581
582 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
583 printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
584 return;
585 }
586
614#ifndef CONFIG_AGP_AMD64 587#ifndef CONFIG_AGP_AMD64
615 no_agp = 1; 588 no_agp = 1;
616#else 589#else
@@ -622,7 +595,11 @@ static int __init pci_iommu_init(void)
622#endif 595#endif
623 596
624 if (swiotlb) 597 if (swiotlb)
625 return -1; 598 return;
599
600 /* Did we detect a different HW IOMMU? */
601 if (iommu_detected && !iommu_aperture)
602 return;
626 603
627 if (no_iommu || 604 if (no_iommu ||
628 (!force_iommu && end_pfn <= MAX_DMA32_PFN) || 605 (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
@@ -634,15 +611,7 @@ static int __init pci_iommu_init(void)
634 "but IOMMU not available.\n" 611 "but IOMMU not available.\n"
635 KERN_ERR "WARNING 32bit PCI may malfunction.\n"); 612 KERN_ERR "WARNING 32bit PCI may malfunction.\n");
636 } 613 }
637 return -1; 614 return;
638 }
639
640 i = 0;
641 for_all_nb(dev)
642 i++;
643 if (i > MAX_NB) {
644 printk(KERN_ERR "PCI-GART: Too many northbridges (%ld). Disabled\n", i);
645 return -1;
646 } 615 }
647 616
648 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); 617 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
@@ -707,26 +676,10 @@ static int __init pci_iommu_init(void)
707 for (i = EMERGENCY_PAGES; i < iommu_pages; i++) 676 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
708 iommu_gatt_base[i] = gart_unmapped_entry; 677 iommu_gatt_base[i] = gart_unmapped_entry;
709 678
710 for_all_nb(dev) { 679 flush_gart();
711 u32 flag;
712 int cpu = PCI_SLOT(dev->devfn) - 24;
713 if (cpu >= MAX_NB)
714 continue;
715 northbridges[cpu] = dev;
716 pci_read_config_dword(dev, 0x9c, &flag); /* cache flush word */
717 northbridge_flush_word[cpu] = flag;
718 }
719
720 flush_gart(NULL);
721
722 dma_ops = &gart_dma_ops; 680 dma_ops = &gart_dma_ops;
723
724 return 0;
725} 681}
726 682
727/* Must execute after PCI subsystem */
728fs_initcall(pci_iommu_init);
729
730void gart_parse_options(char *p) 683void gart_parse_options(char *p)
731{ 684{
732 int arg; 685 int arg;
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c
index 1f6ecc62061d..c4c3cc36ac5b 100644
--- a/arch/x86_64/kernel/pci-nommu.c
+++ b/arch/x86_64/kernel/pci-nommu.c
@@ -4,6 +4,8 @@
4#include <linux/init.h> 4#include <linux/init.h>
5#include <linux/pci.h> 5#include <linux/pci.h>
6#include <linux/string.h> 6#include <linux/string.h>
7#include <linux/dma-mapping.h>
8
7#include <asm/proto.h> 9#include <asm/proto.h>
8#include <asm/processor.h> 10#include <asm/processor.h>
9#include <asm/dma.h> 11#include <asm/dma.h>
@@ -12,10 +14,11 @@ static int
12check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) 14check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
13{ 15{
14 if (hwdev && bus + size > *hwdev->dma_mask) { 16 if (hwdev && bus + size > *hwdev->dma_mask) {
15 if (*hwdev->dma_mask >= 0xffffffffULL) 17 if (*hwdev->dma_mask >= DMA_32BIT_MASK)
16 printk(KERN_ERR 18 printk(KERN_ERR
17 "nommu_%s: overflow %Lx+%lu of device mask %Lx\n", 19 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
18 name, (long long)bus, size, (long long)*hwdev->dma_mask); 20 name, (long long)bus, size,
21 (long long)*hwdev->dma_mask);
19 return 0; 22 return 0;
20 } 23 }
21 return 1; 24 return 1;
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 990ed67896f2..ebdb77fe2057 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -31,7 +31,7 @@ struct dma_mapping_ops swiotlb_dma_ops = {
31void pci_swiotlb_init(void) 31void pci_swiotlb_init(void)
32{ 32{
33 /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 33 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
34 if (!iommu_aperture && !no_iommu && 34 if (!iommu_detected && !no_iommu &&
35 (end_pfn > MAX_DMA32_PFN || force_iommu)) 35 (end_pfn > MAX_DMA32_PFN || force_iommu))
36 swiotlb = 1; 36 swiotlb = 1;
37 if (swiotlb) { 37 if (swiotlb) {
diff --git a/arch/x86_64/kernel/pmtimer.c b/arch/x86_64/kernel/pmtimer.c
index bf421ed26808..7554458dc9cb 100644
--- a/arch/x86_64/kernel/pmtimer.c
+++ b/arch/x86_64/kernel/pmtimer.c
@@ -27,7 +27,7 @@
27/* The I/O port the PMTMR resides at. 27/* The I/O port the PMTMR resides at.
28 * The location is detected during setup_arch(), 28 * The location is detected during setup_arch(),
29 * in arch/i386/kernel/acpi/boot.c */ 29 * in arch/i386/kernel/acpi/boot.c */
30u32 pmtmr_ioport; 30u32 pmtmr_ioport __read_mostly;
31 31
32/* value of the Power timer at last timer interrupt */ 32/* value of the Power timer at last timer interrupt */
33static u32 offset_delay; 33static u32 offset_delay;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index fb903e65e079..ca56e19b8b6e 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -10,7 +10,6 @@
10 * Andi Kleen. 10 * Andi Kleen.
11 * 11 *
12 * CPU hotplug support - ashok.raj@intel.com 12 * CPU hotplug support - ashok.raj@intel.com
13 * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
14 */ 13 */
15 14
16/* 15/*
@@ -64,6 +63,7 @@ EXPORT_SYMBOL(boot_option_idle_override);
64 * Powermanagement idle function, if any.. 63 * Powermanagement idle function, if any..
65 */ 64 */
66void (*pm_idle)(void); 65void (*pm_idle)(void);
66EXPORT_SYMBOL(pm_idle);
67static DEFINE_PER_CPU(unsigned int, cpu_idle_state); 67static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
68 68
69static ATOMIC_NOTIFIER_HEAD(idle_notifier); 69static ATOMIC_NOTIFIER_HEAD(idle_notifier);
@@ -111,7 +111,7 @@ static void default_idle(void)
111{ 111{
112 local_irq_enable(); 112 local_irq_enable();
113 113
114 clear_thread_flag(TIF_POLLING_NRFLAG); 114 current_thread_info()->status &= ~TS_POLLING;
115 smp_mb__after_clear_bit(); 115 smp_mb__after_clear_bit();
116 while (!need_resched()) { 116 while (!need_resched()) {
117 local_irq_disable(); 117 local_irq_disable();
@@ -120,7 +120,7 @@ static void default_idle(void)
120 else 120 else
121 local_irq_enable(); 121 local_irq_enable();
122 } 122 }
123 set_thread_flag(TIF_POLLING_NRFLAG); 123 current_thread_info()->status |= TS_POLLING;
124} 124}
125 125
126/* 126/*
@@ -203,8 +203,7 @@ static inline void play_dead(void)
203 */ 203 */
204void cpu_idle (void) 204void cpu_idle (void)
205{ 205{
206 set_thread_flag(TIF_POLLING_NRFLAG); 206 current_thread_info()->status |= TS_POLLING;
207
208 /* endless idle loop with no priority at all */ 207 /* endless idle loop with no priority at all */
209 while (1) { 208 while (1) {
210 while (!need_resched()) { 209 while (!need_resched()) {
@@ -335,7 +334,7 @@ void show_regs(struct pt_regs *regs)
335{ 334{
336 printk("CPU %d:", smp_processor_id()); 335 printk("CPU %d:", smp_processor_id());
337 __show_regs(regs); 336 __show_regs(regs);
338 show_trace(&regs->rsp); 337 show_trace(NULL, regs, (void *)(regs + 1));
339} 338}
340 339
341/* 340/*
@@ -365,8 +364,11 @@ void flush_thread(void)
365 struct task_struct *tsk = current; 364 struct task_struct *tsk = current;
366 struct thread_info *t = current_thread_info(); 365 struct thread_info *t = current_thread_info();
367 366
368 if (t->flags & _TIF_ABI_PENDING) 367 if (t->flags & _TIF_ABI_PENDING) {
369 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32); 368 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
369 if (t->flags & _TIF_IA32)
370 current_thread_info()->status |= TS_COMPAT;
371 }
370 372
371 tsk->thread.debugreg0 = 0; 373 tsk->thread.debugreg0 = 0;
372 tsk->thread.debugreg1 = 0; 374 tsk->thread.debugreg1 = 0;
diff --git a/arch/x86_64/kernel/reboot.c b/arch/x86_64/kernel/reboot.c
index 57117b8beb2b..2d6769847456 100644
--- a/arch/x86_64/kernel/reboot.c
+++ b/arch/x86_64/kernel/reboot.c
@@ -20,6 +20,7 @@
20 * Power off function, if any 20 * Power off function, if any
21 */ 21 */
22void (*pm_power_off)(void); 22void (*pm_power_off)(void);
23EXPORT_SYMBOL(pm_power_off);
23 24
24static long no_idt[3]; 25static long no_idt[3];
25static enum { 26static enum {
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index fb850b52b4da..1129918ede82 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Nov 2001 Dave Jones <davej@suse.de> 6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code. 7 * Forked from i386 setup code.
8 *
9 * $Id$
10 */ 8 */
11 9
12/* 10/*
@@ -65,9 +63,7 @@
65#include <asm/setup.h> 63#include <asm/setup.h>
66#include <asm/mach_apic.h> 64#include <asm/mach_apic.h>
67#include <asm/numa.h> 65#include <asm/numa.h>
68#include <asm/swiotlb.h>
69#include <asm/sections.h> 66#include <asm/sections.h>
70#include <asm/gart-mapping.h>
71#include <asm/dmi.h> 67#include <asm/dmi.h>
72 68
73/* 69/*
@@ -75,6 +71,7 @@
75 */ 71 */
76 72
77struct cpuinfo_x86 boot_cpu_data __read_mostly; 73struct cpuinfo_x86 boot_cpu_data __read_mostly;
74EXPORT_SYMBOL(boot_cpu_data);
78 75
79unsigned long mmu_cr4_features; 76unsigned long mmu_cr4_features;
80 77
@@ -103,12 +100,14 @@ char dmi_alloc_data[DMI_MAX_DATA];
103 * Setup options 100 * Setup options
104 */ 101 */
105struct screen_info screen_info; 102struct screen_info screen_info;
103EXPORT_SYMBOL(screen_info);
106struct sys_desc_table_struct { 104struct sys_desc_table_struct {
107 unsigned short length; 105 unsigned short length;
108 unsigned char table[0]; 106 unsigned char table[0];
109}; 107};
110 108
111struct edid_info edid_info; 109struct edid_info edid_info;
110EXPORT_SYMBOL_GPL(edid_info);
112struct e820map e820; 111struct e820map e820;
113 112
114extern int root_mountflags; 113extern int root_mountflags;
@@ -473,80 +472,6 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
473} 472}
474#endif 473#endif
475 474
476/* Use inline assembly to define this because the nops are defined
477 as inline assembly strings in the include files and we cannot
478 get them easily into strings. */
479asm("\t.data\nk8nops: "
480 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
481 K8_NOP7 K8_NOP8);
482
483extern unsigned char k8nops[];
484static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
485 NULL,
486 k8nops,
487 k8nops + 1,
488 k8nops + 1 + 2,
489 k8nops + 1 + 2 + 3,
490 k8nops + 1 + 2 + 3 + 4,
491 k8nops + 1 + 2 + 3 + 4 + 5,
492 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
493 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
494};
495
496extern char __vsyscall_0;
497
498/* Replace instructions with better alternatives for this CPU type.
499
500 This runs before SMP is initialized to avoid SMP problems with
501 self modifying code. This implies that assymetric systems where
502 APs have less capabilities than the boot processor are not handled.
503 In this case boot with "noreplacement". */
504void apply_alternatives(void *start, void *end)
505{
506 struct alt_instr *a;
507 int diff, i, k;
508 for (a = start; (void *)a < end; a++) {
509 u8 *instr;
510
511 if (!boot_cpu_has(a->cpuid))
512 continue;
513
514 BUG_ON(a->replacementlen > a->instrlen);
515 instr = a->instr;
516 /* vsyscall code is not mapped yet. resolve it manually. */
517 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END)
518 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
519 __inline_memcpy(instr, a->replacement, a->replacementlen);
520 diff = a->instrlen - a->replacementlen;
521
522 /* Pad the rest with nops */
523 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
524 k = diff;
525 if (k > ASM_NOP_MAX)
526 k = ASM_NOP_MAX;
527 __inline_memcpy(instr + i, k8_nops[k], k);
528 }
529 }
530}
531
532static int no_replacement __initdata = 0;
533
534void __init alternative_instructions(void)
535{
536 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
537 if (no_replacement)
538 return;
539 apply_alternatives(__alt_instructions, __alt_instructions_end);
540}
541
542static int __init noreplacement_setup(char *s)
543{
544 no_replacement = 1;
545 return 1;
546}
547
548__setup("noreplacement", noreplacement_setup);
549
550#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) 475#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
551struct edd edd; 476struct edd edd;
552#ifdef CONFIG_EDD_MODULE 477#ifdef CONFIG_EDD_MODULE
@@ -779,10 +704,6 @@ void __init setup_arch(char **cmdline_p)
779 704
780 e820_setup_gap(); 705 e820_setup_gap();
781 706
782#ifdef CONFIG_GART_IOMMU
783 iommu_hole_init();
784#endif
785
786#ifdef CONFIG_VT 707#ifdef CONFIG_VT
787#if defined(CONFIG_VGA_CONSOLE) 708#if defined(CONFIG_VGA_CONSOLE)
788 conswitchp = &vga_con; 709 conswitchp = &vga_con;
@@ -867,24 +788,32 @@ static int nearby_node(int apicid)
867static void __init amd_detect_cmp(struct cpuinfo_x86 *c) 788static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
868{ 789{
869#ifdef CONFIG_SMP 790#ifdef CONFIG_SMP
870 int cpu = smp_processor_id();
871 unsigned bits; 791 unsigned bits;
872#ifdef CONFIG_NUMA 792#ifdef CONFIG_NUMA
793 int cpu = smp_processor_id();
873 int node = 0; 794 int node = 0;
874 unsigned apicid = hard_smp_processor_id(); 795 unsigned apicid = hard_smp_processor_id();
875#endif 796#endif
797 unsigned ecx = cpuid_ecx(0x80000008);
798
799 c->x86_max_cores = (ecx & 0xff) + 1;
876 800
877 bits = 0; 801 /* CPU telling us the core id bits shift? */
878 while ((1 << bits) < c->x86_max_cores) 802 bits = (ecx >> 12) & 0xF;
879 bits++; 803
804 /* Otherwise recompute */
805 if (bits == 0) {
806 while ((1 << bits) < c->x86_max_cores)
807 bits++;
808 }
880 809
881 /* Low order bits define the core id (index of core in socket) */ 810 /* Low order bits define the core id (index of core in socket) */
882 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1); 811 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
883 /* Convert the APIC ID into the socket ID */ 812 /* Convert the APIC ID into the socket ID */
884 phys_proc_id[cpu] = phys_pkg_id(bits); 813 c->phys_proc_id = phys_pkg_id(bits);
885 814
886#ifdef CONFIG_NUMA 815#ifdef CONFIG_NUMA
887 node = phys_proc_id[cpu]; 816 node = c->phys_proc_id;
888 if (apicid_to_node[apicid] != NUMA_NO_NODE) 817 if (apicid_to_node[apicid] != NUMA_NO_NODE)
889 node = apicid_to_node[apicid]; 818 node = apicid_to_node[apicid];
890 if (!node_online(node)) { 819 if (!node_online(node)) {
@@ -897,7 +826,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
897 but in the same order as the HT nodeids. 826 but in the same order as the HT nodeids.
898 If that doesn't result in a usable node fall back to the 827 If that doesn't result in a usable node fall back to the
899 path for the previous case. */ 828 path for the previous case. */
900 int ht_nodeid = apicid - (phys_proc_id[0] << bits); 829 int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
901 if (ht_nodeid >= 0 && 830 if (ht_nodeid >= 0 &&
902 apicid_to_node[ht_nodeid] != NUMA_NO_NODE) 831 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
903 node = apicid_to_node[ht_nodeid]; 832 node = apicid_to_node[ht_nodeid];
@@ -907,15 +836,13 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
907 } 836 }
908 numa_set_node(cpu, node); 837 numa_set_node(cpu, node);
909 838
910 printk(KERN_INFO "CPU %d/%x(%d) -> Node %d -> Core %d\n", 839 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
911 cpu, apicid, c->x86_max_cores, node, cpu_core_id[cpu]);
912#endif 840#endif
913#endif 841#endif
914} 842}
915 843
916static int __init init_amd(struct cpuinfo_x86 *c) 844static void __init init_amd(struct cpuinfo_x86 *c)
917{ 845{
918 int r;
919 unsigned level; 846 unsigned level;
920 847
921#ifdef CONFIG_SMP 848#ifdef CONFIG_SMP
@@ -948,8 +875,8 @@ static int __init init_amd(struct cpuinfo_x86 *c)
948 if (c->x86 >= 6) 875 if (c->x86 >= 6)
949 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability); 876 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
950 877
951 r = get_model_name(c); 878 level = get_model_name(c);
952 if (!r) { 879 if (!level) {
953 switch (c->x86) { 880 switch (c->x86) {
954 case 15: 881 case 15:
955 /* Should distinguish Models here, but this is only 882 /* Should distinguish Models here, but this is only
@@ -964,13 +891,12 @@ static int __init init_amd(struct cpuinfo_x86 *c)
964 if (c->x86_power & (1<<8)) 891 if (c->x86_power & (1<<8))
965 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability); 892 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
966 893
967 if (c->extended_cpuid_level >= 0x80000008) { 894 /* Multi core CPU? */
968 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; 895 if (c->extended_cpuid_level >= 0x80000008)
969
970 amd_detect_cmp(c); 896 amd_detect_cmp(c);
971 }
972 897
973 return r; 898 /* Fix cpuid4 emulation for more */
899 num_cache_leaves = 3;
974} 900}
975 901
976static void __cpuinit detect_ht(struct cpuinfo_x86 *c) 902static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
@@ -978,13 +904,14 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
978#ifdef CONFIG_SMP 904#ifdef CONFIG_SMP
979 u32 eax, ebx, ecx, edx; 905 u32 eax, ebx, ecx, edx;
980 int index_msb, core_bits; 906 int index_msb, core_bits;
981 int cpu = smp_processor_id();
982 907
983 cpuid(1, &eax, &ebx, &ecx, &edx); 908 cpuid(1, &eax, &ebx, &ecx, &edx);
984 909
985 910
986 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) 911 if (!cpu_has(c, X86_FEATURE_HT))
987 return; 912 return;
913 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
914 goto out;
988 915
989 smp_num_siblings = (ebx & 0xff0000) >> 16; 916 smp_num_siblings = (ebx & 0xff0000) >> 16;
990 917
@@ -999,10 +926,7 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
999 } 926 }
1000 927
1001 index_msb = get_count_order(smp_num_siblings); 928 index_msb = get_count_order(smp_num_siblings);
1002 phys_proc_id[cpu] = phys_pkg_id(index_msb); 929 c->phys_proc_id = phys_pkg_id(index_msb);
1003
1004 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
1005 phys_proc_id[cpu]);
1006 930
1007 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 931 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
1008 932
@@ -1010,13 +934,15 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1010 934
1011 core_bits = get_count_order(c->x86_max_cores); 935 core_bits = get_count_order(c->x86_max_cores);
1012 936
1013 cpu_core_id[cpu] = phys_pkg_id(index_msb) & 937 c->cpu_core_id = phys_pkg_id(index_msb) &
1014 ((1 << core_bits) - 1); 938 ((1 << core_bits) - 1);
1015
1016 if (c->x86_max_cores > 1)
1017 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
1018 cpu_core_id[cpu]);
1019 } 939 }
940out:
941 if ((c->x86_max_cores * smp_num_siblings) > 1) {
942 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
943 printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
944 }
945
1020#endif 946#endif
1021} 947}
1022 948
@@ -1025,15 +951,12 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1025 */ 951 */
1026static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) 952static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
1027{ 953{
1028 unsigned int eax; 954 unsigned int eax, t;
1029 955
1030 if (c->cpuid_level < 4) 956 if (c->cpuid_level < 4)
1031 return 1; 957 return 1;
1032 958
1033 __asm__("cpuid" 959 cpuid_count(4, 0, &eax, &t, &t, &t);
1034 : "=a" (eax)
1035 : "0" (4), "c" (0)
1036 : "bx", "dx");
1037 960
1038 if (eax & 0x1f) 961 if (eax & 0x1f)
1039 return ((eax >> 26) + 1); 962 return ((eax >> 26) + 1);
@@ -1046,16 +969,17 @@ static void srat_detect_node(void)
1046#ifdef CONFIG_NUMA 969#ifdef CONFIG_NUMA
1047 unsigned node; 970 unsigned node;
1048 int cpu = smp_processor_id(); 971 int cpu = smp_processor_id();
972 int apicid = hard_smp_processor_id();
1049 973
1050 /* Don't do the funky fallback heuristics the AMD version employs 974 /* Don't do the funky fallback heuristics the AMD version employs
1051 for now. */ 975 for now. */
1052 node = apicid_to_node[hard_smp_processor_id()]; 976 node = apicid_to_node[apicid];
1053 if (node == NUMA_NO_NODE) 977 if (node == NUMA_NO_NODE)
1054 node = first_node(node_online_map); 978 node = first_node(node_online_map);
1055 numa_set_node(cpu, node); 979 numa_set_node(cpu, node);
1056 980
1057 if (acpi_numa > 0) 981 if (acpi_numa > 0)
1058 printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node); 982 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
1059#endif 983#endif
1060} 984}
1061 985
@@ -1065,6 +989,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1065 unsigned n; 989 unsigned n;
1066 990
1067 init_intel_cacheinfo(c); 991 init_intel_cacheinfo(c);
992 if (c->cpuid_level > 9 ) {
993 unsigned eax = cpuid_eax(10);
994 /* Check for version and the number of counters */
995 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
996 set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
997 }
998
1068 n = c->extended_cpuid_level; 999 n = c->extended_cpuid_level;
1069 if (n >= 0x80000008) { 1000 if (n >= 0x80000008) {
1070 unsigned eax = cpuid_eax(0x80000008); 1001 unsigned eax = cpuid_eax(0x80000008);
@@ -1156,7 +1087,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1156 } 1087 }
1157 1088
1158#ifdef CONFIG_SMP 1089#ifdef CONFIG_SMP
1159 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff; 1090 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
1160#endif 1091#endif
1161} 1092}
1162 1093
@@ -1283,7 +1214,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1283 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1214 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1284 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, 1215 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1285 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL, 1216 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1286 NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow", 1217 NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow",
1287 1218
1288 /* Transmeta-defined */ 1219 /* Transmeta-defined */
1289 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL, 1220 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
@@ -1294,7 +1225,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1294 /* Other (Linux-defined) */ 1225 /* Other (Linux-defined) */
1295 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL, 1226 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
1296 "constant_tsc", NULL, NULL, 1227 "constant_tsc", NULL, NULL,
1297 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1228 "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1298 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1229 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1299 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1230 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1300 1231
@@ -1364,9 +1295,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1364#ifdef CONFIG_SMP 1295#ifdef CONFIG_SMP
1365 if (smp_num_siblings * c->x86_max_cores > 1) { 1296 if (smp_num_siblings * c->x86_max_cores > 1) {
1366 int cpu = c - cpu_data; 1297 int cpu = c - cpu_data;
1367 seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]); 1298 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1368 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu])); 1299 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
1369 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]); 1300 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1370 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 1301 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1371 } 1302 }
1372#endif 1303#endif
@@ -1440,7 +1371,7 @@ struct seq_operations cpuinfo_op = {
1440 .show = show_cpuinfo, 1371 .show = show_cpuinfo,
1441}; 1372};
1442 1373
1443#ifdef CONFIG_INPUT_PCSPKR 1374#if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
1444#include <linux/platform_device.h> 1375#include <linux/platform_device.h>
1445static __init int add_pcspkr(void) 1376static __init int add_pcspkr(void)
1446{ 1377{
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index 8a691fa6d393..f5934cb4a2b6 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -3,7 +3,6 @@
3 * Copyright (C) 1995 Linus Torvalds 3 * Copyright (C) 1995 Linus Torvalds
4 * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen. 4 * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
5 * See setup.c for older changelog. 5 * See setup.c for older changelog.
6 * $Id: setup64.c,v 1.12 2002/03/21 10:09:17 ak Exp $
7 */ 6 */
8#include <linux/config.h> 7#include <linux/config.h>
9#include <linux/init.h> 8#include <linux/init.h>
@@ -31,6 +30,7 @@ char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
31cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; 30cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
32 31
33struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly; 32struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
33EXPORT_SYMBOL(_cpu_pda);
34struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned; 34struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
35 35
36struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; 36struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
@@ -38,6 +38,7 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
38char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned"))); 38char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
39 39
40unsigned long __supported_pte_mask __read_mostly = ~0UL; 40unsigned long __supported_pte_mask __read_mostly = ~0UL;
41EXPORT_SYMBOL(__supported_pte_mask);
41static int do_not_nx __cpuinitdata = 0; 42static int do_not_nx __cpuinitdata = 0;
42 43
43/* noexec=on|off 44/* noexec=on|off
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c
index e5f5ce7909a3..28161170fb0a 100644
--- a/arch/x86_64/kernel/signal.c
+++ b/arch/x86_64/kernel/signal.c
@@ -7,8 +7,6 @@
7 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson 7 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
8 * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes 8 * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
9 * 2000-2002 x86-64 support by Andi Kleen 9 * 2000-2002 x86-64 support by Andi Kleen
10 *
11 * $Id: signal.c,v 1.18 2001/10/17 22:30:37 ak Exp $
12 */ 10 */
13 11
14#include <linux/sched.h> 12#include <linux/sched.h>
@@ -239,7 +237,6 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
239 rsp = regs->rsp - 128; 237 rsp = regs->rsp - 128;
240 238
241 /* This is the X/Open sanctioned signal stack switching. */ 239 /* This is the X/Open sanctioned signal stack switching. */
242 /* RED-PEN: redzone on that stack? */
243 if (ka->sa.sa_flags & SA_ONSTACK) { 240 if (ka->sa.sa_flags & SA_ONSTACK) {
244 if (sas_ss_flags(rsp) == 0) 241 if (sas_ss_flags(rsp) == 0)
245 rsp = current->sas_ss_sp + current->sas_ss_size; 242 rsp = current->sas_ss_sp + current->sas_ss_size;
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 4a6628b14d99..5a1c0a3bf872 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -135,10 +135,10 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
135 135
136 cpu = smp_processor_id(); 136 cpu = smp_processor_id();
137 /* 137 /*
138 * orig_rax contains the interrupt vector - 256. 138 * orig_rax contains the negated interrupt vector.
139 * Use that to determine where the sender put the data. 139 * Use that to determine where the sender put the data.
140 */ 140 */
141 sender = regs->orig_rax + 256 - INVALIDATE_TLB_VECTOR_START; 141 sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
142 f = &per_cpu(flush_state, sender); 142 f = &per_cpu(flush_state, sender);
143 143
144 if (!cpu_isset(cpu, f->flush_cpumask)) 144 if (!cpu_isset(cpu, f->flush_cpumask))
@@ -224,6 +224,7 @@ void flush_tlb_current_task(void)
224 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 224 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
225 preempt_enable(); 225 preempt_enable();
226} 226}
227EXPORT_SYMBOL(flush_tlb_current_task);
227 228
228void flush_tlb_mm (struct mm_struct * mm) 229void flush_tlb_mm (struct mm_struct * mm)
229{ 230{
@@ -244,6 +245,7 @@ void flush_tlb_mm (struct mm_struct * mm)
244 245
245 preempt_enable(); 246 preempt_enable();
246} 247}
248EXPORT_SYMBOL(flush_tlb_mm);
247 249
248void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) 250void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
249{ 251{
@@ -266,6 +268,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
266 268
267 preempt_enable(); 269 preempt_enable();
268} 270}
271EXPORT_SYMBOL(flush_tlb_page);
269 272
270static void do_flush_tlb_all(void* info) 273static void do_flush_tlb_all(void* info)
271{ 274{
@@ -443,6 +446,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
443 spin_unlock(&call_lock); 446 spin_unlock(&call_lock);
444 return 0; 447 return 0;
445} 448}
449EXPORT_SYMBOL(smp_call_function);
446 450
447void smp_stop_cpu(void) 451void smp_stop_cpu(void)
448{ 452{
@@ -460,7 +464,7 @@ static void smp_really_stop_cpu(void *dummy)
460{ 464{
461 smp_stop_cpu(); 465 smp_stop_cpu();
462 for (;;) 466 for (;;)
463 asm("hlt"); 467 halt();
464} 468}
465 469
466void smp_send_stop(void) 470void smp_send_stop(void)
@@ -470,7 +474,7 @@ void smp_send_stop(void)
470 return; 474 return;
471 /* Don't deadlock on the call lock in panic */ 475 /* Don't deadlock on the call lock in panic */
472 if (!spin_trylock(&call_lock)) { 476 if (!spin_trylock(&call_lock)) {
473 /* ignore locking because we have paniced anyways */ 477 /* ignore locking because we have panicked anyways */
474 nolock = 1; 478 nolock = 1;
475 } 479 }
476 __smp_call_function(smp_really_stop_cpu, NULL, 0, 0); 480 __smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
@@ -520,13 +524,13 @@ asmlinkage void smp_call_function_interrupt(void)
520 524
521int safe_smp_processor_id(void) 525int safe_smp_processor_id(void)
522{ 526{
523 int apicid, i; 527 unsigned apicid, i;
524 528
525 if (disable_apic) 529 if (disable_apic)
526 return 0; 530 return 0;
527 531
528 apicid = hard_smp_processor_id(); 532 apicid = hard_smp_processor_id();
529 if (x86_cpu_to_apicid[apicid] == apicid) 533 if (apicid < NR_CPUS && x86_cpu_to_apicid[apicid] == apicid)
530 return apicid; 534 return apicid;
531 535
532 for (i = 0; i < NR_CPUS; ++i) { 536 for (i = 0; i < NR_CPUS; ++i) {
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 71a7222cf9ce..540c0ccbcccc 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -63,13 +63,11 @@
63 63
64/* Number of siblings per CPU package */ 64/* Number of siblings per CPU package */
65int smp_num_siblings = 1; 65int smp_num_siblings = 1;
66/* Package ID of each logical CPU */ 66EXPORT_SYMBOL(smp_num_siblings);
67u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
68/* core ID of each logical CPU */
69u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
70 67
71/* Last level cache ID of each logical CPU */ 68/* Last level cache ID of each logical CPU */
72u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; 69u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
70EXPORT_SYMBOL(cpu_llc_id);
73 71
74/* Bitmask of currently online CPUs */ 72/* Bitmask of currently online CPUs */
75cpumask_t cpu_online_map __read_mostly; 73cpumask_t cpu_online_map __read_mostly;
@@ -82,18 +80,21 @@ EXPORT_SYMBOL(cpu_online_map);
82 */ 80 */
83cpumask_t cpu_callin_map; 81cpumask_t cpu_callin_map;
84cpumask_t cpu_callout_map; 82cpumask_t cpu_callout_map;
83EXPORT_SYMBOL(cpu_callout_map);
85 84
86cpumask_t cpu_possible_map; 85cpumask_t cpu_possible_map;
87EXPORT_SYMBOL(cpu_possible_map); 86EXPORT_SYMBOL(cpu_possible_map);
88 87
89/* Per CPU bogomips and other parameters */ 88/* Per CPU bogomips and other parameters */
90struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; 89struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
90EXPORT_SYMBOL(cpu_data);
91 91
92/* Set when the idlers are all forked */ 92/* Set when the idlers are all forked */
93int smp_threads_ready; 93int smp_threads_ready;
94 94
95/* representing HT siblings of each logical CPU */ 95/* representing HT siblings of each logical CPU */
96cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 96cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
97EXPORT_SYMBOL(cpu_sibling_map);
97 98
98/* representing HT and core siblings of each logical CPU */ 99/* representing HT and core siblings of each logical CPU */
99cpumask_t cpu_core_map[NR_CPUS] __read_mostly; 100cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
@@ -454,10 +455,12 @@ cpumask_t cpu_coregroup_map(int cpu)
454 struct cpuinfo_x86 *c = cpu_data + cpu; 455 struct cpuinfo_x86 *c = cpu_data + cpu;
455 /* 456 /*
456 * For perf, we return last level cache shared map. 457 * For perf, we return last level cache shared map.
457 * TBD: when power saving sched policy is added, we will return 458 * And for power savings, we return cpu_core_map
458 * cpu_core_map when power saving policy is enabled
459 */ 459 */
460 return c->llc_shared_map; 460 if (sched_mc_power_savings || sched_smt_power_savings)
461 return cpu_core_map[cpu];
462 else
463 return c->llc_shared_map;
461} 464}
462 465
463/* representing cpus for which sibling maps can be computed */ 466/* representing cpus for which sibling maps can be computed */
@@ -472,8 +475,8 @@ static inline void set_cpu_sibling_map(int cpu)
472 475
473 if (smp_num_siblings > 1) { 476 if (smp_num_siblings > 1) {
474 for_each_cpu_mask(i, cpu_sibling_setup_map) { 477 for_each_cpu_mask(i, cpu_sibling_setup_map) {
475 if (phys_proc_id[cpu] == phys_proc_id[i] && 478 if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
476 cpu_core_id[cpu] == cpu_core_id[i]) { 479 c[cpu].cpu_core_id == c[i].cpu_core_id) {
477 cpu_set(i, cpu_sibling_map[cpu]); 480 cpu_set(i, cpu_sibling_map[cpu]);
478 cpu_set(cpu, cpu_sibling_map[i]); 481 cpu_set(cpu, cpu_sibling_map[i]);
479 cpu_set(i, cpu_core_map[cpu]); 482 cpu_set(i, cpu_core_map[cpu]);
@@ -500,7 +503,7 @@ static inline void set_cpu_sibling_map(int cpu)
500 cpu_set(i, c[cpu].llc_shared_map); 503 cpu_set(i, c[cpu].llc_shared_map);
501 cpu_set(cpu, c[i].llc_shared_map); 504 cpu_set(cpu, c[i].llc_shared_map);
502 } 505 }
503 if (phys_proc_id[cpu] == phys_proc_id[i]) { 506 if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
504 cpu_set(i, cpu_core_map[cpu]); 507 cpu_set(i, cpu_core_map[cpu]);
505 cpu_set(cpu, cpu_core_map[i]); 508 cpu_set(cpu, cpu_core_map[i]);
506 /* 509 /*
@@ -797,6 +800,8 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
797 } 800 }
798 801
799 802
803 alternatives_smp_switch(1);
804
800 c_idle.idle = get_idle_for_cpu(cpu); 805 c_idle.idle = get_idle_for_cpu(cpu);
801 806
802 if (c_idle.idle) { 807 if (c_idle.idle) {
@@ -1199,8 +1204,8 @@ static void remove_siblinginfo(int cpu)
1199 cpu_clear(cpu, cpu_sibling_map[sibling]); 1204 cpu_clear(cpu, cpu_sibling_map[sibling]);
1200 cpus_clear(cpu_sibling_map[cpu]); 1205 cpus_clear(cpu_sibling_map[cpu]);
1201 cpus_clear(cpu_core_map[cpu]); 1206 cpus_clear(cpu_core_map[cpu]);
1202 phys_proc_id[cpu] = BAD_APICID; 1207 c[cpu].phys_proc_id = 0;
1203 cpu_core_id[cpu] = BAD_APICID; 1208 c[cpu].cpu_core_id = 0;
1204 cpu_clear(cpu, cpu_sibling_setup_map); 1209 cpu_clear(cpu, cpu_sibling_setup_map);
1205} 1210}
1206 1211
@@ -1259,6 +1264,8 @@ void __cpu_die(unsigned int cpu)
1259 /* They ack this in play_dead by setting CPU_DEAD */ 1264 /* They ack this in play_dead by setting CPU_DEAD */
1260 if (per_cpu(cpu_state, cpu) == CPU_DEAD) { 1265 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1261 printk ("CPU %d is now offline\n", cpu); 1266 printk ("CPU %d is now offline\n", cpu);
1267 if (1 == num_online_cpus())
1268 alternatives_smp_switch(0);
1262 return; 1269 return;
1263 } 1270 }
1264 msleep(100); 1271 msleep(100);
diff --git a/arch/x86_64/kernel/tce.c b/arch/x86_64/kernel/tce.c
new file mode 100644
index 000000000000..8d4c67f61b8e
--- /dev/null
+++ b/arch/x86_64/kernel/tce.c
@@ -0,0 +1,202 @@
1/*
2 * Derived from arch/powerpc/platforms/pseries/iommu.c
3 *
4 * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation
5 * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/config.h>
23#include <linux/types.h>
24#include <linux/slab.h>
25#include <linux/mm.h>
26#include <linux/spinlock.h>
27#include <linux/string.h>
28#include <linux/pci.h>
29#include <linux/dma-mapping.h>
30#include <linux/bootmem.h>
31#include <asm/tce.h>
32#include <asm/calgary.h>
33#include <asm/proto.h>
34
35/* flush a tce at 'tceaddr' to main memory */
36static inline void flush_tce(void* tceaddr)
37{
38 /* a single tce can't cross a cache line */
39 if (cpu_has_clflush)
40 asm volatile("clflush (%0)" :: "r" (tceaddr));
41 else
42 asm volatile("wbinvd":::"memory");
43}
44
45void tce_build(struct iommu_table *tbl, unsigned long index,
46 unsigned int npages, unsigned long uaddr, int direction)
47{
48 u64* tp;
49 u64 t;
50 u64 rpn;
51
52 t = (1 << TCE_READ_SHIFT);
53 if (direction != DMA_TO_DEVICE)
54 t |= (1 << TCE_WRITE_SHIFT);
55
56 tp = ((u64*)tbl->it_base) + index;
57
58 while (npages--) {
59 rpn = (virt_to_bus((void*)uaddr)) >> PAGE_SHIFT;
60 t &= ~TCE_RPN_MASK;
61 t |= (rpn << TCE_RPN_SHIFT);
62
63 *tp = cpu_to_be64(t);
64 flush_tce(tp);
65
66 uaddr += PAGE_SIZE;
67 tp++;
68 }
69}
70
71void tce_free(struct iommu_table *tbl, long index, unsigned int npages)
72{
73 u64* tp;
74
75 tp = ((u64*)tbl->it_base) + index;
76
77 while (npages--) {
78 *tp = cpu_to_be64(0);
79 flush_tce(tp);
80 tp++;
81 }
82}
83
84static inline unsigned int table_size_to_number_of_entries(unsigned char size)
85{
86 /*
87 * size is the order of the table, 0-7
88 * smallest table is 8K entries, so shift result by 13 to
89 * multiply by 8K
90 */
91 return (1 << size) << 13;
92}
93
94static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
95{
96 unsigned int bitmapsz;
97 unsigned int tce_table_index;
98 unsigned long bmppages;
99 int ret;
100
101 tbl->it_busno = dev->bus->number;
102
103 /* set the tce table size - measured in entries */
104 tbl->it_size = table_size_to_number_of_entries(specified_table_size);
105
106 tce_table_index = bus_to_phb(tbl->it_busno);
107 tbl->it_base = (unsigned long)tce_table_kva[tce_table_index];
108 if (!tbl->it_base) {
109 printk(KERN_ERR "Calgary: iommu_table_setparms: "
110 "no table allocated?!\n");
111 ret = -ENOMEM;
112 goto done;
113 }
114
115 /*
116 * number of bytes needed for the bitmap size in number of
117 * entries; we need one bit per entry
118 */
119 bitmapsz = tbl->it_size / BITS_PER_BYTE;
120 bmppages = __get_free_pages(GFP_KERNEL, get_order(bitmapsz));
121 if (!bmppages) {
122 printk(KERN_ERR "Calgary: cannot allocate bitmap\n");
123 ret = -ENOMEM;
124 goto done;
125 }
126
127 tbl->it_map = (unsigned long*)bmppages;
128
129 memset(tbl->it_map, 0, bitmapsz);
130
131 tbl->it_hint = 0;
132
133 spin_lock_init(&tbl->it_lock);
134
135 return 0;
136
137done:
138 return ret;
139}
140
141int build_tce_table(struct pci_dev *dev, void __iomem *bbar)
142{
143 struct iommu_table *tbl;
144 int ret;
145
146 if (dev->sysdata) {
147 printk(KERN_ERR "Calgary: dev %p has sysdata %p\n",
148 dev, dev->sysdata);
149 BUG();
150 }
151
152 tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
153 if (!tbl) {
154 printk(KERN_ERR "Calgary: error allocating iommu_table\n");
155 ret = -ENOMEM;
156 goto done;
157 }
158
159 ret = tce_table_setparms(dev, tbl);
160 if (ret)
161 goto free_tbl;
162
163 tce_free(tbl, 0, tbl->it_size);
164
165 tbl->bbar = bbar;
166
167 /*
168 * NUMA is already using the bus's sysdata pointer, so we use
169 * the bus's pci_dev's sysdata instead.
170 */
171 dev->sysdata = tbl;
172
173 return 0;
174
175free_tbl:
176 kfree(tbl);
177done:
178 return ret;
179}
180
181void* alloc_tce_table(void)
182{
183 unsigned int size;
184
185 size = table_size_to_number_of_entries(specified_table_size);
186 size *= TCE_ENTRY_SIZE;
187
188 return __alloc_bootmem_low(size, size, 0);
189}
190
191void free_tce_table(void *tbl)
192{
193 unsigned int size;
194
195 if (!tbl)
196 return;
197
198 size = table_size_to_number_of_entries(specified_table_size);
199 size *= TCE_ENTRY_SIZE;
200
201 free_bootmem(__pa(tbl), size);
202}
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 7392570f975d..ebbee6f59ff5 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -8,7 +8,7 @@
8 * Copyright (c) 1995 Markus Kuhn 8 * Copyright (c) 1995 Markus Kuhn
9 * Copyright (c) 1996 Ingo Molnar 9 * Copyright (c) 1996 Ingo Molnar
10 * Copyright (c) 1998 Andrea Arcangeli 10 * Copyright (c) 1998 Andrea Arcangeli
11 * Copyright (c) 2002 Vojtech Pavlik 11 * Copyright (c) 2002,2006 Vojtech Pavlik
12 * Copyright (c) 2003 Andi Kleen 12 * Copyright (c) 2003 Andi Kleen
13 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c 13 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
14 */ 14 */
@@ -51,14 +51,21 @@ extern int using_apic_timer;
51static char *time_init_gtod(void); 51static char *time_init_gtod(void);
52 52
53DEFINE_SPINLOCK(rtc_lock); 53DEFINE_SPINLOCK(rtc_lock);
54EXPORT_SYMBOL(rtc_lock);
54DEFINE_SPINLOCK(i8253_lock); 55DEFINE_SPINLOCK(i8253_lock);
55 56
56int nohpet __initdata = 0; 57int nohpet __initdata = 0;
57static int notsc __initdata = 0; 58static int notsc __initdata = 0;
58 59
59#undef HPET_HACK_ENABLE_DANGEROUS 60#define USEC_PER_TICK (USEC_PER_SEC / HZ)
61#define NSEC_PER_TICK (NSEC_PER_SEC / HZ)
62#define FSEC_PER_TICK (FSEC_PER_SEC / HZ)
63
64#define NS_SCALE 10 /* 2^10, carefully chosen */
65#define US_SCALE 32 /* 2^32, arbitralrily chosen */
60 66
61unsigned int cpu_khz; /* TSC clocks / usec, not used here */ 67unsigned int cpu_khz; /* TSC clocks / usec, not used here */
68EXPORT_SYMBOL(cpu_khz);
62static unsigned long hpet_period; /* fsecs / HPET clock */ 69static unsigned long hpet_period; /* fsecs / HPET clock */
63unsigned long hpet_tick; /* HPET clocks / interrupt */ 70unsigned long hpet_tick; /* HPET clocks / interrupt */
64int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */ 71int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */
@@ -90,7 +97,7 @@ static inline unsigned int do_gettimeoffset_tsc(void)
90 t = get_cycles_sync(); 97 t = get_cycles_sync();
91 if (t < vxtime.last_tsc) 98 if (t < vxtime.last_tsc)
92 t = vxtime.last_tsc; /* hack */ 99 t = vxtime.last_tsc; /* hack */
93 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32; 100 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> US_SCALE;
94 return x; 101 return x;
95} 102}
96 103
@@ -98,7 +105,7 @@ static inline unsigned int do_gettimeoffset_hpet(void)
98{ 105{
99 /* cap counter read to one tick to avoid inconsistencies */ 106 /* cap counter read to one tick to avoid inconsistencies */
100 unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last; 107 unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last;
101 return (min(counter,hpet_tick) * vxtime.quot) >> 32; 108 return (min(counter,hpet_tick) * vxtime.quot) >> US_SCALE;
102} 109}
103 110
104unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc; 111unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
@@ -118,7 +125,7 @@ void do_gettimeofday(struct timeval *tv)
118 seq = read_seqbegin(&xtime_lock); 125 seq = read_seqbegin(&xtime_lock);
119 126
120 sec = xtime.tv_sec; 127 sec = xtime.tv_sec;
121 usec = xtime.tv_nsec / 1000; 128 usec = xtime.tv_nsec / NSEC_PER_USEC;
122 129
123 /* i386 does some correction here to keep the clock 130 /* i386 does some correction here to keep the clock
124 monotonous even when ntpd is fixing drift. 131 monotonous even when ntpd is fixing drift.
@@ -129,14 +136,14 @@ void do_gettimeofday(struct timeval *tv)
129 in arch/x86_64/kernel/vsyscall.c and export all needed 136 in arch/x86_64/kernel/vsyscall.c and export all needed
130 variables in vmlinux.lds. -AK */ 137 variables in vmlinux.lds. -AK */
131 138
132 t = (jiffies - wall_jiffies) * (1000000L / HZ) + 139 t = (jiffies - wall_jiffies) * USEC_PER_TICK +
133 do_gettimeoffset(); 140 do_gettimeoffset();
134 usec += t; 141 usec += t;
135 142
136 } while (read_seqretry(&xtime_lock, seq)); 143 } while (read_seqretry(&xtime_lock, seq));
137 144
138 tv->tv_sec = sec + usec / 1000000; 145 tv->tv_sec = sec + usec / USEC_PER_SEC;
139 tv->tv_usec = usec % 1000000; 146 tv->tv_usec = usec % USEC_PER_SEC;
140} 147}
141 148
142EXPORT_SYMBOL(do_gettimeofday); 149EXPORT_SYMBOL(do_gettimeofday);
@@ -157,8 +164,8 @@ int do_settimeofday(struct timespec *tv)
157 164
158 write_seqlock_irq(&xtime_lock); 165 write_seqlock_irq(&xtime_lock);
159 166
160 nsec -= do_gettimeoffset() * 1000 + 167 nsec -= do_gettimeoffset() * NSEC_PER_USEC +
161 (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ); 168 (jiffies - wall_jiffies) * NSEC_PER_TICK;
162 169
163 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); 170 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
164 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); 171 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
@@ -288,7 +295,7 @@ unsigned long long monotonic_clock(void)
288 this_offset = hpet_readl(HPET_COUNTER); 295 this_offset = hpet_readl(HPET_COUNTER);
289 } while (read_seqretry(&xtime_lock, seq)); 296 } while (read_seqretry(&xtime_lock, seq));
290 offset = (this_offset - last_offset); 297 offset = (this_offset - last_offset);
291 offset *= (NSEC_PER_SEC/HZ) / hpet_tick; 298 offset *= NSEC_PER_TICK / hpet_tick;
292 } else { 299 } else {
293 do { 300 do {
294 seq = read_seqbegin(&xtime_lock); 301 seq = read_seqbegin(&xtime_lock);
@@ -297,7 +304,8 @@ unsigned long long monotonic_clock(void)
297 base = monotonic_base; 304 base = monotonic_base;
298 } while (read_seqretry(&xtime_lock, seq)); 305 } while (read_seqretry(&xtime_lock, seq));
299 this_offset = get_cycles_sync(); 306 this_offset = get_cycles_sync();
300 offset = (this_offset - last_offset)*1000 / cpu_khz; 307 /* FIXME: 1000 or 1000000? */
308 offset = (this_offset - last_offset)*1000 / cpu_khz;
301 } 309 }
302 return base + offset; 310 return base + offset;
303} 311}
@@ -382,7 +390,7 @@ void main_timer_handler(struct pt_regs *regs)
382 } 390 }
383 391
384 monotonic_base += 392 monotonic_base +=
385 (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick; 393 (offset - vxtime.last) * NSEC_PER_TICK / hpet_tick;
386 394
387 vxtime.last = offset; 395 vxtime.last = offset;
388#ifdef CONFIG_X86_PM_TIMER 396#ifdef CONFIG_X86_PM_TIMER
@@ -391,24 +399,25 @@ void main_timer_handler(struct pt_regs *regs)
391#endif 399#endif
392 } else { 400 } else {
393 offset = (((tsc - vxtime.last_tsc) * 401 offset = (((tsc - vxtime.last_tsc) *
394 vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ); 402 vxtime.tsc_quot) >> US_SCALE) - USEC_PER_TICK;
395 403
396 if (offset < 0) 404 if (offset < 0)
397 offset = 0; 405 offset = 0;
398 406
399 if (offset > (USEC_PER_SEC / HZ)) { 407 if (offset > USEC_PER_TICK) {
400 lost = offset / (USEC_PER_SEC / HZ); 408 lost = offset / USEC_PER_TICK;
401 offset %= (USEC_PER_SEC / HZ); 409 offset %= USEC_PER_TICK;
402 } 410 }
403 411
404 monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ; 412 /* FIXME: 1000 or 1000000? */
413 monotonic_base += (tsc - vxtime.last_tsc) * 1000000 / cpu_khz;
405 414
406 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot; 415 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
407 416
408 if ((((tsc - vxtime.last_tsc) * 417 if ((((tsc - vxtime.last_tsc) *
409 vxtime.tsc_quot) >> 32) < offset) 418 vxtime.tsc_quot) >> US_SCALE) < offset)
410 vxtime.last_tsc = tsc - 419 vxtime.last_tsc = tsc -
411 (((long) offset << 32) / vxtime.tsc_quot) - 1; 420 (((long) offset << US_SCALE) / vxtime.tsc_quot) - 1;
412 } 421 }
413 422
414 if (lost > 0) { 423 if (lost > 0) {
@@ -468,16 +477,15 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
468} 477}
469 478
470static unsigned int cyc2ns_scale __read_mostly; 479static unsigned int cyc2ns_scale __read_mostly;
471#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
472 480
473static inline void set_cyc2ns_scale(unsigned long cpu_khz) 481static inline void set_cyc2ns_scale(unsigned long cpu_khz)
474{ 482{
475 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz; 483 cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / cpu_khz;
476} 484}
477 485
478static inline unsigned long long cycles_2_ns(unsigned long long cyc) 486static inline unsigned long long cycles_2_ns(unsigned long long cyc)
479{ 487{
480 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; 488 return (cyc * cyc2ns_scale) >> NS_SCALE;
481} 489}
482 490
483unsigned long long sched_clock(void) 491unsigned long long sched_clock(void)
@@ -490,7 +498,7 @@ unsigned long long sched_clock(void)
490 Disadvantage is a small drift between CPUs in some configurations, 498 Disadvantage is a small drift between CPUs in some configurations,
491 but that should be tolerable. */ 499 but that should be tolerable. */
492 if (__vxtime.mode == VXTIME_HPET) 500 if (__vxtime.mode == VXTIME_HPET)
493 return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32; 501 return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> US_SCALE;
494#endif 502#endif
495 503
496 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively, 504 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
@@ -633,7 +641,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
633 641
634 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new); 642 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
635 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 643 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
636 vxtime.tsc_quot = (1000L << 32) / cpu_khz; 644 vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz;
637 } 645 }
638 646
639 set_cyc2ns_scale(cpu_khz_ref); 647 set_cyc2ns_scale(cpu_khz_ref);
@@ -789,8 +797,8 @@ static int hpet_timer_stop_set_go(unsigned long tick)
789 if (hpet_use_timer) { 797 if (hpet_use_timer) {
790 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL | 798 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
791 HPET_TN_32BIT, HPET_T0_CFG); 799 HPET_TN_32BIT, HPET_T0_CFG);
792 hpet_writel(hpet_tick, HPET_T0_CMP); 800 hpet_writel(hpet_tick, HPET_T0_CMP); /* next interrupt */
793 hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */ 801 hpet_writel(hpet_tick, HPET_T0_CMP); /* period */
794 cfg |= HPET_CFG_LEGACY; 802 cfg |= HPET_CFG_LEGACY;
795 } 803 }
796/* 804/*
@@ -825,8 +833,7 @@ static int hpet_init(void)
825 if (hpet_period < 100000 || hpet_period > 100000000) 833 if (hpet_period < 100000 || hpet_period > 100000000)
826 return -1; 834 return -1;
827 835
828 hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) / 836 hpet_tick = (FSEC_PER_TICK + hpet_period / 2) / hpet_period;
829 hpet_period;
830 837
831 hpet_use_timer = (id & HPET_ID_LEGSUP); 838 hpet_use_timer = (id & HPET_ID_LEGSUP);
832 839
@@ -890,18 +897,6 @@ void __init time_init(void)
890 char *timename; 897 char *timename;
891 char *gtod; 898 char *gtod;
892 899
893#ifdef HPET_HACK_ENABLE_DANGEROUS
894 if (!vxtime.hpet_address) {
895 printk(KERN_WARNING "time.c: WARNING: Enabling HPET base "
896 "manually!\n");
897 outl(0x800038a0, 0xcf8);
898 outl(0xff000001, 0xcfc);
899 outl(0x800038a0, 0xcf8);
900 vxtime.hpet_address = inl(0xcfc) & 0xfffffffe;
901 printk(KERN_WARNING "time.c: WARNING: Enabled HPET "
902 "at %#lx.\n", vxtime.hpet_address);
903 }
904#endif
905 if (nohpet) 900 if (nohpet)
906 vxtime.hpet_address = 0; 901 vxtime.hpet_address = 0;
907 902
@@ -912,7 +907,7 @@ void __init time_init(void)
912 -xtime.tv_sec, -xtime.tv_nsec); 907 -xtime.tv_sec, -xtime.tv_nsec);
913 908
914 if (!hpet_init()) 909 if (!hpet_init())
915 vxtime_hz = (1000000000000000L + hpet_period / 2) / hpet_period; 910 vxtime_hz = (FSEC_PER_SEC + hpet_period / 2) / hpet_period;
916 else 911 else
917 vxtime.hpet_address = 0; 912 vxtime.hpet_address = 0;
918 913
@@ -941,8 +936,8 @@ void __init time_init(void)
941 vxtime_hz / 1000000, vxtime_hz % 1000000, timename, gtod); 936 vxtime_hz / 1000000, vxtime_hz % 1000000, timename, gtod);
942 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", 937 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
943 cpu_khz / 1000, cpu_khz % 1000); 938 cpu_khz / 1000, cpu_khz % 1000);
944 vxtime.quot = (1000000L << 32) / vxtime_hz; 939 vxtime.quot = (USEC_PER_SEC << US_SCALE) / vxtime_hz;
945 vxtime.tsc_quot = (1000L << 32) / cpu_khz; 940 vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz;
946 vxtime.last_tsc = get_cycles_sync(); 941 vxtime.last_tsc = get_cycles_sync();
947 setup_irq(0, &irq0); 942 setup_irq(0, &irq0);
948 943
@@ -956,10 +951,10 @@ void __init time_init(void)
956__cpuinit int unsynchronized_tsc(void) 951__cpuinit int unsynchronized_tsc(void)
957{ 952{
958#ifdef CONFIG_SMP 953#ifdef CONFIG_SMP
959 if (oem_force_hpet_timer()) 954 if (apic_is_clustered_box())
960 return 1; 955 return 1;
961 /* Intel systems are normally all synchronized. Exceptions 956 /* Intel systems are normally all synchronized. Exceptions
962 are handled in the OEM check above. */ 957 are handled in the check above. */
963 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 958 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
964 return 0; 959 return 0;
965#endif 960#endif
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index cea335e8746c..3d11a2fe45b7 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -6,8 +6,6 @@
6 * 6 *
7 * Pentium III FXSR, SSE support 7 * Pentium III FXSR, SSE support
8 * Gareth Hughes <gareth@valinux.com>, May 2000 8 * Gareth Hughes <gareth@valinux.com>, May 2000
9 *
10 * $Id: traps.c,v 1.36 2002/03/24 11:09:10 ak Exp $
11 */ 9 */
12 10
13/* 11/*
@@ -31,6 +29,7 @@
31#include <linux/nmi.h> 29#include <linux/nmi.h>
32#include <linux/kprobes.h> 30#include <linux/kprobes.h>
33#include <linux/kexec.h> 31#include <linux/kexec.h>
32#include <linux/unwind.h>
34 33
35#include <asm/system.h> 34#include <asm/system.h>
36#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -41,7 +40,7 @@
41#include <asm/i387.h> 40#include <asm/i387.h>
42#include <asm/kdebug.h> 41#include <asm/kdebug.h>
43#include <asm/processor.h> 42#include <asm/processor.h>
44 43#include <asm/unwind.h>
45#include <asm/smp.h> 44#include <asm/smp.h>
46#include <asm/pgalloc.h> 45#include <asm/pgalloc.h>
47#include <asm/pda.h> 46#include <asm/pda.h>
@@ -71,6 +70,7 @@ asmlinkage void machine_check(void);
71asmlinkage void spurious_interrupt_bug(void); 70asmlinkage void spurious_interrupt_bug(void);
72 71
73ATOMIC_NOTIFIER_HEAD(die_chain); 72ATOMIC_NOTIFIER_HEAD(die_chain);
73EXPORT_SYMBOL(die_chain);
74 74
75int register_die_notifier(struct notifier_block *nb) 75int register_die_notifier(struct notifier_block *nb)
76{ 76{
@@ -107,7 +107,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
107 preempt_enable_no_resched(); 107 preempt_enable_no_resched();
108} 108}
109 109
110static int kstack_depth_to_print = 10; 110static int kstack_depth_to_print = 12;
111static int call_trace = 1;
111 112
112#ifdef CONFIG_KALLSYMS 113#ifdef CONFIG_KALLSYMS
113#include <linux/kallsyms.h> 114#include <linux/kallsyms.h>
@@ -191,6 +192,25 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
191 return NULL; 192 return NULL;
192} 193}
193 194
195static int show_trace_unwind(struct unwind_frame_info *info, void *context)
196{
197 int i = 11, n = 0;
198
199 while (unwind(info) == 0 && UNW_PC(info)) {
200 ++n;
201 if (i > 50) {
202 printk("\n ");
203 i = 7;
204 } else
205 i += printk(" ");
206 i += printk_address(UNW_PC(info));
207 if (arch_unw_user_mode(info))
208 break;
209 }
210 printk("\n");
211 return n;
212}
213
194/* 214/*
195 * x86-64 can have upto three kernel stacks: 215 * x86-64 can have upto three kernel stacks:
196 * process stack 216 * process stack
@@ -198,15 +218,39 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
198 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack 218 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
199 */ 219 */
200 220
201void show_trace(unsigned long *stack) 221void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack)
202{ 222{
203 const unsigned cpu = safe_smp_processor_id(); 223 const unsigned cpu = safe_smp_processor_id();
204 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; 224 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
205 int i; 225 int i = 11;
206 unsigned used = 0; 226 unsigned used = 0;
207 227
208 printk("\nCall Trace:"); 228 printk("\nCall Trace:");
209 229
230 if (!tsk)
231 tsk = current;
232
233 if (call_trace >= 0) {
234 int unw_ret = 0;
235 struct unwind_frame_info info;
236
237 if (regs) {
238 if (unwind_init_frame_info(&info, tsk, regs) == 0)
239 unw_ret = show_trace_unwind(&info, NULL);
240 } else if (tsk == current)
241 unw_ret = unwind_init_running(&info, show_trace_unwind, NULL);
242 else {
243 if (unwind_init_blocked(&info, tsk) == 0)
244 unw_ret = show_trace_unwind(&info, NULL);
245 }
246 if (unw_ret > 0) {
247 if (call_trace > 0)
248 return;
249 printk("Legacy call trace:");
250 i = 18;
251 }
252 }
253
210#define HANDLE_STACK(cond) \ 254#define HANDLE_STACK(cond) \
211 do while (cond) { \ 255 do while (cond) { \
212 unsigned long addr = *stack++; \ 256 unsigned long addr = *stack++; \
@@ -229,7 +273,7 @@ void show_trace(unsigned long *stack)
229 } \ 273 } \
230 } while (0) 274 } while (0)
231 275
232 for(i = 11; ; ) { 276 for(; ; ) {
233 const char *id; 277 const char *id;
234 unsigned long *estack_end; 278 unsigned long *estack_end;
235 estack_end = in_exception_stack(cpu, (unsigned long)stack, 279 estack_end = in_exception_stack(cpu, (unsigned long)stack,
@@ -264,7 +308,7 @@ void show_trace(unsigned long *stack)
264 printk("\n"); 308 printk("\n");
265} 309}
266 310
267void show_stack(struct task_struct *tsk, unsigned long * rsp) 311static void _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long * rsp)
268{ 312{
269 unsigned long *stack; 313 unsigned long *stack;
270 int i; 314 int i;
@@ -298,7 +342,12 @@ void show_stack(struct task_struct *tsk, unsigned long * rsp)
298 printk("%016lx ", *stack++); 342 printk("%016lx ", *stack++);
299 touch_nmi_watchdog(); 343 touch_nmi_watchdog();
300 } 344 }
301 show_trace((unsigned long *)rsp); 345 show_trace(tsk, regs, rsp);
346}
347
348void show_stack(struct task_struct *tsk, unsigned long * rsp)
349{
350 _show_stack(tsk, NULL, rsp);
302} 351}
303 352
304/* 353/*
@@ -307,7 +356,7 @@ void show_stack(struct task_struct *tsk, unsigned long * rsp)
307void dump_stack(void) 356void dump_stack(void)
308{ 357{
309 unsigned long dummy; 358 unsigned long dummy;
310 show_trace(&dummy); 359 show_trace(NULL, NULL, &dummy);
311} 360}
312 361
313EXPORT_SYMBOL(dump_stack); 362EXPORT_SYMBOL(dump_stack);
@@ -334,7 +383,7 @@ void show_registers(struct pt_regs *regs)
334 if (in_kernel) { 383 if (in_kernel) {
335 384
336 printk("Stack: "); 385 printk("Stack: ");
337 show_stack(NULL, (unsigned long*)rsp); 386 _show_stack(NULL, regs, (unsigned long*)rsp);
338 387
339 printk("\nCode: "); 388 printk("\nCode: ");
340 if (regs->rip < PAGE_OFFSET) 389 if (regs->rip < PAGE_OFFSET)
@@ -383,6 +432,7 @@ void out_of_line_bug(void)
383{ 432{
384 BUG(); 433 BUG();
385} 434}
435EXPORT_SYMBOL(out_of_line_bug);
386#endif 436#endif
387 437
388static DEFINE_SPINLOCK(die_lock); 438static DEFINE_SPINLOCK(die_lock);
@@ -1012,3 +1062,14 @@ static int __init kstack_setup(char *s)
1012} 1062}
1013__setup("kstack=", kstack_setup); 1063__setup("kstack=", kstack_setup);
1014 1064
1065static int __init call_trace_setup(char *s)
1066{
1067 if (strcmp(s, "old") == 0)
1068 call_trace = -1;
1069 else if (strcmp(s, "both") == 0)
1070 call_trace = 0;
1071 else if (strcmp(s, "new") == 0)
1072 call_trace = 1;
1073 return 1;
1074}
1075__setup("call_trace=", call_trace_setup);
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index b81f473c4a19..1c6a5f322919 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -45,6 +45,15 @@ SECTIONS
45 45
46 RODATA 46 RODATA
47 47
48#ifdef CONFIG_STACK_UNWIND
49 . = ALIGN(8);
50 .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {
51 __start_unwind = .;
52 *(.eh_frame)
53 __end_unwind = .;
54 }
55#endif
56
48 /* Data */ 57 /* Data */
49 .data : AT(ADDR(.data) - LOAD_OFFSET) { 58 .data : AT(ADDR(.data) - LOAD_OFFSET) {
50 *(.data) 59 *(.data)
@@ -131,6 +140,26 @@ SECTIONS
131 *(.data.page_aligned) 140 *(.data.page_aligned)
132 } 141 }
133 142
143 /* might get freed after init */
144 . = ALIGN(4096);
145 __smp_alt_begin = .;
146 __smp_alt_instructions = .;
147 .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
148 *(.smp_altinstructions)
149 }
150 __smp_alt_instructions_end = .;
151 . = ALIGN(8);
152 __smp_locks = .;
153 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
154 *(.smp_locks)
155 }
156 __smp_locks_end = .;
157 .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
158 *(.smp_altinstr_replacement)
159 }
160 . = ALIGN(4096);
161 __smp_alt_end = .;
162
134 . = ALIGN(4096); /* Init code and data */ 163 . = ALIGN(4096); /* Init code and data */
135 __init_begin = .; 164 __init_begin = .;
136 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { 165 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index 9468fb20b0bc..f603037df162 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -107,7 +107,7 @@ static __always_inline long time_syscall(long *t)
107 107
108int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz) 108int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
109{ 109{
110 if (unlikely(!__sysctl_vsyscall)) 110 if (!__sysctl_vsyscall)
111 return gettimeofday(tv,tz); 111 return gettimeofday(tv,tz);
112 if (tv) 112 if (tv)
113 do_vgettimeofday(tv); 113 do_vgettimeofday(tv);
@@ -120,7 +120,7 @@ int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
120 * unlikely */ 120 * unlikely */
121time_t __vsyscall(1) vtime(time_t *t) 121time_t __vsyscall(1) vtime(time_t *t)
122{ 122{
123 if (unlikely(!__sysctl_vsyscall)) 123 if (!__sysctl_vsyscall)
124 return time_syscall(t); 124 return time_syscall(t);
125 else if (t) 125 else if (t)
126 *t = __xtime.tv_sec; 126 *t = __xtime.tv_sec;
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index 1def21c9f7cd..370952c4ff22 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -1,66 +1,21 @@
1/* Exports for assembly files.
2 All C exports should go in the respective C files. */
3
1#include <linux/config.h> 4#include <linux/config.h>
2#include <linux/module.h> 5#include <linux/module.h>
3#include <linux/smp.h> 6#include <linux/smp.h>
4#include <linux/user.h>
5#include <linux/sched.h>
6#include <linux/in6.h>
7#include <linux/interrupt.h>
8#include <linux/smp_lock.h>
9#include <linux/pm.h>
10#include <linux/pci.h>
11#include <linux/apm_bios.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/syscalls.h>
15#include <linux/tty.h>
16 7
17#include <asm/semaphore.h> 8#include <asm/semaphore.h>
18#include <asm/processor.h> 9#include <asm/processor.h>
19#include <asm/i387.h>
20#include <asm/uaccess.h> 10#include <asm/uaccess.h>
21#include <asm/checksum.h>
22#include <asm/io.h>
23#include <asm/delay.h>
24#include <asm/irq.h>
25#include <asm/mmx.h>
26#include <asm/desc.h>
27#include <asm/pgtable.h> 11#include <asm/pgtable.h>
28#include <asm/pgalloc.h>
29#include <asm/nmi.h>
30#include <asm/kdebug.h>
31#include <asm/unistd.h>
32#include <asm/tlbflush.h>
33#include <asm/kdebug.h>
34
35extern spinlock_t rtc_lock;
36 12
37#ifdef CONFIG_SMP
38extern void __write_lock_failed(rwlock_t *rw);
39extern void __read_lock_failed(rwlock_t *rw);
40#endif
41
42/* platform dependent support */
43EXPORT_SYMBOL(boot_cpu_data);
44//EXPORT_SYMBOL(dump_fpu);
45EXPORT_SYMBOL(__ioremap);
46EXPORT_SYMBOL(ioremap_nocache);
47EXPORT_SYMBOL(iounmap);
48EXPORT_SYMBOL(kernel_thread); 13EXPORT_SYMBOL(kernel_thread);
49EXPORT_SYMBOL(pm_idle);
50EXPORT_SYMBOL(pm_power_off);
51 14
52EXPORT_SYMBOL(__down_failed); 15EXPORT_SYMBOL(__down_failed);
53EXPORT_SYMBOL(__down_failed_interruptible); 16EXPORT_SYMBOL(__down_failed_interruptible);
54EXPORT_SYMBOL(__down_failed_trylock); 17EXPORT_SYMBOL(__down_failed_trylock);
55EXPORT_SYMBOL(__up_wakeup); 18EXPORT_SYMBOL(__up_wakeup);
56/* Networking helper routines. */
57EXPORT_SYMBOL(csum_partial_copy_nocheck);
58EXPORT_SYMBOL(ip_compute_csum);
59/* Delay loops */
60EXPORT_SYMBOL(__udelay);
61EXPORT_SYMBOL(__ndelay);
62EXPORT_SYMBOL(__delay);
63EXPORT_SYMBOL(__const_udelay);
64 19
65EXPORT_SYMBOL(__get_user_1); 20EXPORT_SYMBOL(__get_user_1);
66EXPORT_SYMBOL(__get_user_2); 21EXPORT_SYMBOL(__get_user_2);
@@ -71,42 +26,20 @@ EXPORT_SYMBOL(__put_user_2);
71EXPORT_SYMBOL(__put_user_4); 26EXPORT_SYMBOL(__put_user_4);
72EXPORT_SYMBOL(__put_user_8); 27EXPORT_SYMBOL(__put_user_8);
73 28
74EXPORT_SYMBOL(strncpy_from_user);
75EXPORT_SYMBOL(__strncpy_from_user);
76EXPORT_SYMBOL(clear_user);
77EXPORT_SYMBOL(__clear_user);
78EXPORT_SYMBOL(copy_user_generic); 29EXPORT_SYMBOL(copy_user_generic);
79EXPORT_SYMBOL(copy_from_user); 30EXPORT_SYMBOL(copy_from_user);
80EXPORT_SYMBOL(copy_to_user); 31EXPORT_SYMBOL(copy_to_user);
81EXPORT_SYMBOL(copy_in_user);
82EXPORT_SYMBOL(strnlen_user);
83
84#ifdef CONFIG_PCI
85EXPORT_SYMBOL(pci_mem_start);
86#endif
87 32
88EXPORT_SYMBOL(copy_page); 33EXPORT_SYMBOL(copy_page);
89EXPORT_SYMBOL(clear_page); 34EXPORT_SYMBOL(clear_page);
90 35
91EXPORT_SYMBOL(_cpu_pda);
92#ifdef CONFIG_SMP 36#ifdef CONFIG_SMP
93EXPORT_SYMBOL(cpu_data); 37extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
38extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
94EXPORT_SYMBOL(__write_lock_failed); 39EXPORT_SYMBOL(__write_lock_failed);
95EXPORT_SYMBOL(__read_lock_failed); 40EXPORT_SYMBOL(__read_lock_failed);
96
97EXPORT_SYMBOL(smp_call_function);
98EXPORT_SYMBOL(cpu_callout_map);
99#endif
100
101#ifdef CONFIG_VT
102EXPORT_SYMBOL(screen_info);
103#endif 41#endif
104 42
105EXPORT_SYMBOL(rtc_lock);
106
107EXPORT_SYMBOL_GPL(set_nmi_callback);
108EXPORT_SYMBOL_GPL(unset_nmi_callback);
109
110/* Export string functions. We normally rely on gcc builtin for most of these, 43/* Export string functions. We normally rely on gcc builtin for most of these,
111 but gcc sometimes decides not to inline them. */ 44 but gcc sometimes decides not to inline them. */
112#undef memcpy 45#undef memcpy
@@ -114,51 +47,14 @@ EXPORT_SYMBOL_GPL(unset_nmi_callback);
114#undef memmove 47#undef memmove
115 48
116extern void * memset(void *,int,__kernel_size_t); 49extern void * memset(void *,int,__kernel_size_t);
117extern size_t strlen(const char *);
118extern void * memmove(void * dest,const void *src,size_t count);
119extern void * memcpy(void *,const void *,__kernel_size_t); 50extern void * memcpy(void *,const void *,__kernel_size_t);
120extern void * __memcpy(void *,const void *,__kernel_size_t); 51extern void * __memcpy(void *,const void *,__kernel_size_t);
121 52
122EXPORT_SYMBOL(memset); 53EXPORT_SYMBOL(memset);
123EXPORT_SYMBOL(memmove);
124EXPORT_SYMBOL(memcpy); 54EXPORT_SYMBOL(memcpy);
125EXPORT_SYMBOL(__memcpy); 55EXPORT_SYMBOL(__memcpy);
126 56
127#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
128/* prototypes are wrong, these are assembly with custom calling functions */
129extern void rwsem_down_read_failed_thunk(void);
130extern void rwsem_wake_thunk(void);
131extern void rwsem_downgrade_thunk(void);
132extern void rwsem_down_write_failed_thunk(void);
133EXPORT_SYMBOL(rwsem_down_read_failed_thunk);
134EXPORT_SYMBOL(rwsem_wake_thunk);
135EXPORT_SYMBOL(rwsem_downgrade_thunk);
136EXPORT_SYMBOL(rwsem_down_write_failed_thunk);
137#endif
138
139EXPORT_SYMBOL(empty_zero_page); 57EXPORT_SYMBOL(empty_zero_page);
140
141EXPORT_SYMBOL(die_chain);
142
143#ifdef CONFIG_SMP
144EXPORT_SYMBOL(cpu_sibling_map);
145EXPORT_SYMBOL(smp_num_siblings);
146#endif
147
148#ifdef CONFIG_BUG
149EXPORT_SYMBOL(out_of_line_bug);
150#endif
151
152EXPORT_SYMBOL(init_level4_pgt); 58EXPORT_SYMBOL(init_level4_pgt);
153
154extern unsigned long __supported_pte_mask;
155EXPORT_SYMBOL(__supported_pte_mask);
156
157#ifdef CONFIG_SMP
158EXPORT_SYMBOL(flush_tlb_page);
159#endif
160
161EXPORT_SYMBOL(cpu_khz);
162
163EXPORT_SYMBOL(load_gs_index); 59EXPORT_SYMBOL(load_gs_index);
164 60
diff --git a/arch/x86_64/lib/csum-partial.c b/arch/x86_64/lib/csum-partial.c
index 5384e227cdf6..c493735218da 100644
--- a/arch/x86_64/lib/csum-partial.c
+++ b/arch/x86_64/lib/csum-partial.c
@@ -147,4 +147,5 @@ unsigned short ip_compute_csum(unsigned char * buff, int len)
147{ 147{
148 return csum_fold(csum_partial(buff,len,0)); 148 return csum_fold(csum_partial(buff,len,0));
149} 149}
150EXPORT_SYMBOL(ip_compute_csum);
150 151
diff --git a/arch/x86_64/lib/csum-wrappers.c b/arch/x86_64/lib/csum-wrappers.c
index 94323f20816e..b1320ec58428 100644
--- a/arch/x86_64/lib/csum-wrappers.c
+++ b/arch/x86_64/lib/csum-wrappers.c
@@ -109,6 +109,7 @@ csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len,
109{ 109{
110 return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL); 110 return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL);
111} 111}
112EXPORT_SYMBOL(csum_partial_copy_nocheck);
112 113
113unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, 114unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
114 __u32 len, unsigned short proto, unsigned int sum) 115 __u32 len, unsigned short proto, unsigned int sum)
diff --git a/arch/x86_64/lib/delay.c b/arch/x86_64/lib/delay.c
index 03c460cbdd1c..b6cd3cca2f45 100644
--- a/arch/x86_64/lib/delay.c
+++ b/arch/x86_64/lib/delay.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/config.h> 11#include <linux/config.h>
12#include <linux/module.h>
12#include <linux/sched.h> 13#include <linux/sched.h>
13#include <linux/delay.h> 14#include <linux/delay.h>
14#include <asm/delay.h> 15#include <asm/delay.h>
@@ -36,18 +37,22 @@ void __delay(unsigned long loops)
36 } 37 }
37 while((now-bclock) < loops); 38 while((now-bclock) < loops);
38} 39}
40EXPORT_SYMBOL(__delay);
39 41
40inline void __const_udelay(unsigned long xloops) 42inline void __const_udelay(unsigned long xloops)
41{ 43{
42 __delay((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32); 44 __delay((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32);
43} 45}
46EXPORT_SYMBOL(__const_udelay);
44 47
45void __udelay(unsigned long usecs) 48void __udelay(unsigned long usecs)
46{ 49{
47 __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */ 50 __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
48} 51}
52EXPORT_SYMBOL(__udelay);
49 53
50void __ndelay(unsigned long nsecs) 54void __ndelay(unsigned long nsecs)
51{ 55{
52 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ 56 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
53} 57}
58EXPORT_SYMBOL(__ndelay);
diff --git a/arch/x86_64/lib/memmove.c b/arch/x86_64/lib/memmove.c
index e93d5255fdc9..751ebae8ec42 100644
--- a/arch/x86_64/lib/memmove.c
+++ b/arch/x86_64/lib/memmove.c
@@ -3,12 +3,13 @@
3 */ 3 */
4#define _STRING_C 4#define _STRING_C
5#include <linux/string.h> 5#include <linux/string.h>
6#include <linux/module.h>
6 7
7#undef memmove 8#undef memmove
8void *memmove(void * dest,const void *src,size_t count) 9void *memmove(void * dest,const void *src,size_t count)
9{ 10{
10 if (dest < src) { 11 if (dest < src) {
11 __inline_memcpy(dest,src,count); 12 return memcpy(dest,src,count);
12 } else { 13 } else {
13 char *p = (char *) dest + count; 14 char *p = (char *) dest + count;
14 char *s = (char *) src + count; 15 char *s = (char *) src + count;
@@ -17,3 +18,4 @@ void *memmove(void * dest,const void *src,size_t count)
17 } 18 }
18 return dest; 19 return dest;
19} 20}
21EXPORT_SYMBOL(memmove);
diff --git a/arch/x86_64/lib/usercopy.c b/arch/x86_64/lib/usercopy.c
index 9bc2c295818e..893d43f838cc 100644
--- a/arch/x86_64/lib/usercopy.c
+++ b/arch/x86_64/lib/usercopy.c
@@ -5,6 +5,7 @@
5 * Copyright 1997 Linus Torvalds 5 * Copyright 1997 Linus Torvalds
6 * Copyright 2002 Andi Kleen <ak@suse.de> 6 * Copyright 2002 Andi Kleen <ak@suse.de>
7 */ 7 */
8#include <linux/module.h>
8#include <asm/uaccess.h> 9#include <asm/uaccess.h>
9 10
10/* 11/*
@@ -47,15 +48,17 @@ __strncpy_from_user(char *dst, const char __user *src, long count)
47 __do_strncpy_from_user(dst, src, count, res); 48 __do_strncpy_from_user(dst, src, count, res);
48 return res; 49 return res;
49} 50}
51EXPORT_SYMBOL(__strncpy_from_user);
50 52
51long 53long
52strncpy_from_user(char *dst, const char __user *src, long count) 54strncpy_from_user(char *dst, const char __user *src, long count)
53{ 55{
54 long res = -EFAULT; 56 long res = -EFAULT;
55 if (access_ok(VERIFY_READ, src, 1)) 57 if (access_ok(VERIFY_READ, src, 1))
56 __do_strncpy_from_user(dst, src, count, res); 58 return __strncpy_from_user(dst, src, count);
57 return res; 59 return res;
58} 60}
61EXPORT_SYMBOL(strncpy_from_user);
59 62
60/* 63/*
61 * Zero Userspace 64 * Zero Userspace
@@ -94,7 +97,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
94 [zero] "r" (0UL), [eight] "r" (8UL)); 97 [zero] "r" (0UL), [eight] "r" (8UL));
95 return size; 98 return size;
96} 99}
97 100EXPORT_SYMBOL(__clear_user);
98 101
99unsigned long clear_user(void __user *to, unsigned long n) 102unsigned long clear_user(void __user *to, unsigned long n)
100{ 103{
@@ -102,6 +105,7 @@ unsigned long clear_user(void __user *to, unsigned long n)
102 return __clear_user(to, n); 105 return __clear_user(to, n);
103 return n; 106 return n;
104} 107}
108EXPORT_SYMBOL(clear_user);
105 109
106/* 110/*
107 * Return the size of a string (including the ending 0) 111 * Return the size of a string (including the ending 0)
@@ -125,6 +129,7 @@ long __strnlen_user(const char __user *s, long n)
125 s++; 129 s++;
126 } 130 }
127} 131}
132EXPORT_SYMBOL(__strnlen_user);
128 133
129long strnlen_user(const char __user *s, long n) 134long strnlen_user(const char __user *s, long n)
130{ 135{
@@ -132,6 +137,7 @@ long strnlen_user(const char __user *s, long n)
132 return 0; 137 return 0;
133 return __strnlen_user(s, n); 138 return __strnlen_user(s, n);
134} 139}
140EXPORT_SYMBOL(strnlen_user);
135 141
136long strlen_user(const char __user *s) 142long strlen_user(const char __user *s)
137{ 143{
@@ -147,6 +153,7 @@ long strlen_user(const char __user *s)
147 s++; 153 s++;
148 } 154 }
149} 155}
156EXPORT_SYMBOL(strlen_user);
150 157
151unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len) 158unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
152{ 159{
@@ -155,3 +162,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from, unsigned le
155 } 162 }
156 return len; 163 return len;
157} 164}
165EXPORT_SYMBOL(copy_in_user);
166
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 55250593d8c9..08dc696f54ee 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -41,6 +41,41 @@
41#define PF_RSVD (1<<3) 41#define PF_RSVD (1<<3)
42#define PF_INSTR (1<<4) 42#define PF_INSTR (1<<4)
43 43
44#ifdef CONFIG_KPROBES
45ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
46
47/* Hook to register for page fault notifications */
48int register_page_fault_notifier(struct notifier_block *nb)
49{
50 vmalloc_sync_all();
51 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
52}
53
54int unregister_page_fault_notifier(struct notifier_block *nb)
55{
56 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
57}
58
59static inline int notify_page_fault(enum die_val val, const char *str,
60 struct pt_regs *regs, long err, int trap, int sig)
61{
62 struct die_args args = {
63 .regs = regs,
64 .str = str,
65 .err = err,
66 .trapnr = trap,
67 .signr = sig
68 };
69 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
70}
71#else
72static inline int notify_page_fault(enum die_val val, const char *str,
73 struct pt_regs *regs, long err, int trap, int sig)
74{
75 return NOTIFY_DONE;
76}
77#endif
78
44void bust_spinlocks(int yes) 79void bust_spinlocks(int yes)
45{ 80{
46 int loglevel_save = console_loglevel; 81 int loglevel_save = console_loglevel;
@@ -160,7 +195,7 @@ void dump_pagetable(unsigned long address)
160 printk("PGD %lx ", pgd_val(*pgd)); 195 printk("PGD %lx ", pgd_val(*pgd));
161 if (!pgd_present(*pgd)) goto ret; 196 if (!pgd_present(*pgd)) goto ret;
162 197
163 pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address); 198 pud = pud_offset(pgd, address);
164 if (bad_address(pud)) goto bad; 199 if (bad_address(pud)) goto bad;
165 printk("PUD %lx ", pud_val(*pud)); 200 printk("PUD %lx ", pud_val(*pud));
166 if (!pud_present(*pud)) goto ret; 201 if (!pud_present(*pud)) goto ret;
@@ -348,7 +383,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
348 if (vmalloc_fault(address) >= 0) 383 if (vmalloc_fault(address) >= 0)
349 return; 384 return;
350 } 385 }
351 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, 386 if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
352 SIGSEGV) == NOTIFY_STOP) 387 SIGSEGV) == NOTIFY_STOP)
353 return; 388 return;
354 /* 389 /*
@@ -358,7 +393,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
358 goto bad_area_nosemaphore; 393 goto bad_area_nosemaphore;
359 } 394 }
360 395
361 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, 396 if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
362 SIGSEGV) == NOTIFY_STOP) 397 SIGSEGV) == NOTIFY_STOP)
363 return; 398 return;
364 399
@@ -410,8 +445,10 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
410 if (!(vma->vm_flags & VM_GROWSDOWN)) 445 if (!(vma->vm_flags & VM_GROWSDOWN))
411 goto bad_area; 446 goto bad_area;
412 if (error_code & 4) { 447 if (error_code & 4) {
413 // XXX: align red zone size with ABI 448 /* Allow userspace just enough access below the stack pointer
414 if (address + 128 < regs->rsp) 449 * to let the 'enter' instruction work.
450 */
451 if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp)
415 goto bad_area; 452 goto bad_area;
416 } 453 }
417 if (expand_stack(vma, address)) 454 if (expand_stack(vma, address))
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 4ba34e95d835..95bd232ff0cf 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -23,6 +23,7 @@
23#include <linux/bootmem.h> 23#include <linux/bootmem.h>
24#include <linux/proc_fs.h> 24#include <linux/proc_fs.h>
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <linux/poison.h>
26#include <linux/dma-mapping.h> 27#include <linux/dma-mapping.h>
27#include <linux/module.h> 28#include <linux/module.h>
28#include <linux/memory_hotplug.h> 29#include <linux/memory_hotplug.h>
@@ -41,8 +42,6 @@
41#include <asm/proto.h> 42#include <asm/proto.h>
42#include <asm/smp.h> 43#include <asm/smp.h>
43#include <asm/sections.h> 44#include <asm/sections.h>
44#include <asm/dma-mapping.h>
45#include <asm/swiotlb.h>
46 45
47#ifndef Dprintk 46#ifndef Dprintk
48#define Dprintk(x...) 47#define Dprintk(x...)
@@ -90,8 +89,6 @@ void show_mem(void)
90 printk(KERN_INFO "%lu pages swap cached\n",cached); 89 printk(KERN_INFO "%lu pages swap cached\n",cached);
91} 90}
92 91
93/* References to section boundaries */
94
95int after_bootmem; 92int after_bootmem;
96 93
97static __init void *spp_getpage(void) 94static __init void *spp_getpage(void)
@@ -261,9 +258,10 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
261 for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) { 258 for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) {
262 unsigned long entry; 259 unsigned long entry;
263 260
264 if (address > end) { 261 if (address >= end) {
265 for (; i < PTRS_PER_PMD; i++, pmd++) 262 if (!after_bootmem)
266 set_pmd(pmd, __pmd(0)); 263 for (; i < PTRS_PER_PMD; i++, pmd++)
264 set_pmd(pmd, __pmd(0));
267 break; 265 break;
268 } 266 }
269 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address; 267 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
@@ -341,7 +339,8 @@ static void __init find_early_table_space(unsigned long end)
341 table_end = table_start; 339 table_end = table_start;
342 340
343 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n", 341 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
344 end, table_start << PAGE_SHIFT, table_end << PAGE_SHIFT); 342 end, table_start << PAGE_SHIFT,
343 (table_start << PAGE_SHIFT) + tables);
345} 344}
346 345
347/* Setup the direct mapping of the physical memory at PAGE_OFFSET. 346/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
@@ -372,7 +371,7 @@ void __meminit init_memory_mapping(unsigned long start, unsigned long end)
372 pud_t *pud; 371 pud_t *pud;
373 372
374 if (after_bootmem) 373 if (after_bootmem)
375 pud = pud_offset_k(pgd, start & PGDIR_MASK); 374 pud = pud_offset(pgd, start & PGDIR_MASK);
376 else 375 else
377 pud = alloc_low_page(&map, &pud_phys); 376 pud = alloc_low_page(&map, &pud_phys);
378 377
@@ -508,8 +507,6 @@ void __init clear_kernel_mapping(unsigned long address, unsigned long size)
508/* 507/*
509 * Memory hotplug specific functions 508 * Memory hotplug specific functions
510 */ 509 */
511#if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)
512
513void online_page(struct page *page) 510void online_page(struct page *page)
514{ 511{
515 ClearPageReserved(page); 512 ClearPageReserved(page);
@@ -519,31 +516,17 @@ void online_page(struct page *page)
519 num_physpages++; 516 num_physpages++;
520} 517}
521 518
522#ifndef CONFIG_MEMORY_HOTPLUG 519#ifdef CONFIG_MEMORY_HOTPLUG
523/* 520/*
524 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance, 521 * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
525 * just online the pages. 522 * via probe interface of sysfs. If acpi notifies hot-add event, then it
523 * can tell node id by searching dsdt. But, probe interface doesn't have
524 * node id. So, return 0 as node id at this time.
526 */ 525 */
527int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) 526#ifdef CONFIG_NUMA
527int memory_add_physaddr_to_nid(u64 start)
528{ 528{
529 int err = -EIO; 529 return 0;
530 unsigned long pfn;
531 unsigned long total = 0, mem = 0;
532 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
533 if (pfn_valid(pfn)) {
534 online_page(pfn_to_page(pfn));
535 err = 0;
536 mem++;
537 }
538 total++;
539 }
540 if (!err) {
541 z->spanned_pages += total;
542 z->present_pages += mem;
543 z->zone_pgdat->node_spanned_pages += total;
544 z->zone_pgdat->node_present_pages += mem;
545 }
546 return err;
547} 530}
548#endif 531#endif
549 532
@@ -551,9 +534,9 @@ int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
551 * Memory is added always to NORMAL zone. This means you will never get 534 * Memory is added always to NORMAL zone. This means you will never get
552 * additional DMA/DMA32 memory. 535 * additional DMA/DMA32 memory.
553 */ 536 */
554int add_memory(u64 start, u64 size) 537int arch_add_memory(int nid, u64 start, u64 size)
555{ 538{
556 struct pglist_data *pgdat = NODE_DATA(0); 539 struct pglist_data *pgdat = NODE_DATA(nid);
557 struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2; 540 struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
558 unsigned long start_pfn = start >> PAGE_SHIFT; 541 unsigned long start_pfn = start >> PAGE_SHIFT;
559 unsigned long nr_pages = size >> PAGE_SHIFT; 542 unsigned long nr_pages = size >> PAGE_SHIFT;
@@ -570,7 +553,7 @@ error:
570 printk("%s: Problem encountered in __add_pages!\n", __func__); 553 printk("%s: Problem encountered in __add_pages!\n", __func__);
571 return ret; 554 return ret;
572} 555}
573EXPORT_SYMBOL_GPL(add_memory); 556EXPORT_SYMBOL_GPL(arch_add_memory);
574 557
575int remove_memory(u64 start, u64 size) 558int remove_memory(u64 start, u64 size)
576{ 559{
@@ -578,7 +561,33 @@ int remove_memory(u64 start, u64 size)
578} 561}
579EXPORT_SYMBOL_GPL(remove_memory); 562EXPORT_SYMBOL_GPL(remove_memory);
580 563
581#endif 564#else /* CONFIG_MEMORY_HOTPLUG */
565/*
566 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
567 * just online the pages.
568 */
569int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
570{
571 int err = -EIO;
572 unsigned long pfn;
573 unsigned long total = 0, mem = 0;
574 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
575 if (pfn_valid(pfn)) {
576 online_page(pfn_to_page(pfn));
577 err = 0;
578 mem++;
579 }
580 total++;
581 }
582 if (!err) {
583 z->spanned_pages += total;
584 z->present_pages += mem;
585 z->zone_pgdat->node_spanned_pages += total;
586 z->zone_pgdat->node_present_pages += mem;
587 }
588 return err;
589}
590#endif /* CONFIG_MEMORY_HOTPLUG */
582 591
583static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules, 592static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
584 kcore_vsyscall; 593 kcore_vsyscall;
@@ -587,10 +596,7 @@ void __init mem_init(void)
587{ 596{
588 long codesize, reservedpages, datasize, initsize; 597 long codesize, reservedpages, datasize, initsize;
589 598
590#ifdef CONFIG_SWIOTLB 599 pci_iommu_alloc();
591 pci_swiotlb_init();
592#endif
593 no_iommu_init();
594 600
595 /* How many end-of-memory variables you have, grandma! */ 601 /* How many end-of-memory variables you have, grandma! */
596 max_low_pfn = end_pfn; 602 max_low_pfn = end_pfn;
@@ -644,20 +650,31 @@ void __init mem_init(void)
644#endif 650#endif
645} 651}
646 652
647void free_initmem(void) 653void free_init_pages(char *what, unsigned long begin, unsigned long end)
648{ 654{
649 unsigned long addr; 655 unsigned long addr;
650 656
651 addr = (unsigned long)(&__init_begin); 657 if (begin >= end)
652 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { 658 return;
659
660 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
661 for (addr = begin; addr < end; addr += PAGE_SIZE) {
653 ClearPageReserved(virt_to_page(addr)); 662 ClearPageReserved(virt_to_page(addr));
654 init_page_count(virt_to_page(addr)); 663 init_page_count(virt_to_page(addr));
655 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE); 664 memset((void *)(addr & ~(PAGE_SIZE-1)),
665 POISON_FREE_INITMEM, PAGE_SIZE);
656 free_page(addr); 666 free_page(addr);
657 totalram_pages++; 667 totalram_pages++;
658 } 668 }
659 memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin); 669}
660 printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10); 670
671void free_initmem(void)
672{
673 memset(__initdata_begin, POISON_FREE_INITDATA,
674 __initdata_end - __initdata_begin);
675 free_init_pages("unused kernel memory",
676 (unsigned long)(&__init_begin),
677 (unsigned long)(&__init_end));
661} 678}
662 679
663#ifdef CONFIG_DEBUG_RODATA 680#ifdef CONFIG_DEBUG_RODATA
@@ -686,15 +703,7 @@ void mark_rodata_ro(void)
686#ifdef CONFIG_BLK_DEV_INITRD 703#ifdef CONFIG_BLK_DEV_INITRD
687void free_initrd_mem(unsigned long start, unsigned long end) 704void free_initrd_mem(unsigned long start, unsigned long end)
688{ 705{
689 if (start >= end) 706 free_init_pages("initrd memory", start, end);
690 return;
691 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
692 for (; start < end; start += PAGE_SIZE) {
693 ClearPageReserved(virt_to_page(start));
694 init_page_count(virt_to_page(start));
695 free_page(start);
696 totalram_pages++;
697 }
698} 707}
699#endif 708#endif
700 709
diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c
index ae207064201e..45d7d823c3b8 100644
--- a/arch/x86_64/mm/ioremap.c
+++ b/arch/x86_64/mm/ioremap.c
@@ -11,6 +11,7 @@
11#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/module.h>
14#include <asm/io.h> 15#include <asm/io.h>
15#include <asm/pgalloc.h> 16#include <asm/pgalloc.h>
16#include <asm/fixmap.h> 17#include <asm/fixmap.h>
@@ -219,6 +220,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
219 } 220 }
220 return (__force void __iomem *) (offset + (char *)addr); 221 return (__force void __iomem *) (offset + (char *)addr);
221} 222}
223EXPORT_SYMBOL(__ioremap);
222 224
223/** 225/**
224 * ioremap_nocache - map bus memory into CPU space 226 * ioremap_nocache - map bus memory into CPU space
@@ -246,6 +248,7 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
246{ 248{
247 return __ioremap(phys_addr, size, _PAGE_PCD); 249 return __ioremap(phys_addr, size, _PAGE_PCD);
248} 250}
251EXPORT_SYMBOL(ioremap_nocache);
249 252
250/** 253/**
251 * iounmap - Free a IO remapping 254 * iounmap - Free a IO remapping
@@ -291,3 +294,5 @@ void iounmap(volatile void __iomem *addr)
291 BUG_ON(p != o || o == NULL); 294 BUG_ON(p != o || o == NULL);
292 kfree(p); 295 kfree(p);
293} 296}
297EXPORT_SYMBOL(iounmap);
298
diff --git a/arch/x86_64/pci/k8-bus.c b/arch/x86_64/pci/k8-bus.c
index 3acf60ded2a0..b50a7c7c47f8 100644
--- a/arch/x86_64/pci/k8-bus.c
+++ b/arch/x86_64/pci/k8-bus.c
@@ -2,6 +2,7 @@
2#include <linux/pci.h> 2#include <linux/pci.h>
3#include <asm/mpspec.h> 3#include <asm/mpspec.h>
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5#include <asm/k8.h>
5 6
6/* 7/*
7 * This discovers the pcibus <-> node mapping on AMD K8. 8 * This discovers the pcibus <-> node mapping on AMD K8.
@@ -18,7 +19,6 @@
18#define NR_LDT_BUS_NUMBER_REGISTERS 3 19#define NR_LDT_BUS_NUMBER_REGISTERS 3
19#define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF) 20#define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF)
20#define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF) 21#define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF)
21#define PCI_DEVICE_ID_K8HTCONFIG 0x1100
22 22
23/** 23/**
24 * fill_mp_bus_to_cpumask() 24 * fill_mp_bus_to_cpumask()
@@ -28,8 +28,7 @@
28__init static int 28__init static int
29fill_mp_bus_to_cpumask(void) 29fill_mp_bus_to_cpumask(void)
30{ 30{
31 struct pci_dev *nb_dev = NULL; 31 int i, j, k;
32 int i, j;
33 u32 ldtbus, nid; 32 u32 ldtbus, nid;
34 static int lbnr[3] = { 33 static int lbnr[3] = {
35 LDT_BUS_NUMBER_REGISTER_0, 34 LDT_BUS_NUMBER_REGISTER_0,
@@ -37,8 +36,9 @@ fill_mp_bus_to_cpumask(void)
37 LDT_BUS_NUMBER_REGISTER_2 36 LDT_BUS_NUMBER_REGISTER_2
38 }; 37 };
39 38
40 while ((nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 39 cache_k8_northbridges();
41 PCI_DEVICE_ID_K8HTCONFIG, nb_dev))) { 40 for (k = 0; k < num_k8_northbridges; k++) {
41 struct pci_dev *nb_dev = k8_northbridges[k];
42 pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid); 42 pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid);
43 43
44 for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) { 44 for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) {
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 98fac8489aed..3a3a4c66ef87 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -71,7 +71,7 @@ archprepare: $(archinc)/.platform
71# Update machine cpu and platform symlinks if something which affects 71# Update machine cpu and platform symlinks if something which affects
72# them changed. 72# them changed.
73 73
74$(archinc)/.platform: $(wildcard include/config/arch/*.h) include/config/MARKER 74$(archinc)/.platform: $(wildcard include/config/arch/*.h) include/config/auto.conf
75 @echo ' SYMLINK $(archinc)/xtensa/config -> $(archinc)/xtensa/config-$(CPU)' 75 @echo ' SYMLINK $(archinc)/xtensa/config -> $(archinc)/xtensa/config-$(CPU)'
76 $(Q)mkdir -p $(archinc) 76 $(Q)mkdir -p $(archinc)
77 $(Q)mkdir -p $(archinc)/xtensa 77 $(Q)mkdir -p $(archinc)/xtensa
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 937d81f62f43..fe14909f45e0 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -29,7 +29,7 @@
29 29
30extern volatile unsigned long wall_jiffies; 30extern volatile unsigned long wall_jiffies;
31 31
32spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED; 32DEFINE_SPINLOCK(rtc_lock);
33EXPORT_SYMBOL(rtc_lock); 33EXPORT_SYMBOL(rtc_lock);
34 34
35 35
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 225d64d73f04..27e409089a7b 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -461,7 +461,7 @@ void show_code(unsigned int *pc)
461 } 461 }
462} 462}
463 463
464spinlock_t die_lock = SPIN_LOCK_UNLOCKED; 464DEFINE_SPINLOCK(die_lock);
465 465
466void die(const char * str, struct pt_regs * regs, long err) 466void die(const char * str, struct pt_regs * regs, long err)
467{ 467{