aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig5
-rw-r--r--arch/x86/Makefile29
-rw-r--r--arch/x86/boot/Makefile30
-rwxr-xr-xarch/x86/include/asm/cpu_debug.h16
-rw-r--r--arch/x86/include/asm/percpu.h8
-rw-r--r--arch/x86/include/asm/processor.h5
-rw-r--r--arch/x86/include/asm/xen/hypercall.h2
-rw-r--r--arch/x86/kernel/check.c2
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/centaur.c2
-rw-r--r--arch/x86/kernel/cpu/centaur_64.c2
-rw-r--r--arch/x86/kernel/cpu/common.c390
-rw-r--r--arch/x86/kernel/cpu/cpu.h11
-rwxr-xr-xarch/x86/kernel/cpu/cpu_debug.c118
-rw-r--r--arch/x86/kernel/cpu/cyrix.c16
-rw-r--r--arch/x86/kernel/cpu/intel.c7
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c8
-rw-r--r--arch/x86/kernel/cpu/transmeta.c2
-rw-r--r--arch/x86/kernel/cpu/umc.c2
-rw-r--r--arch/x86/kernel/e820.c53
-rw-r--r--arch/x86/kernel/early_printk.c20
-rw-r--r--arch/x86/kernel/entry_32.S18
-rw-r--r--arch/x86/kernel/entry_64.S4
-rw-r--r--arch/x86/kernel/irq.c33
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c2
-rw-r--r--arch/x86/kernel/mpparse.c6
-rw-r--r--arch/x86/kernel/setup_percpu.c63
-rw-r--r--arch/x86/lib/memcpy_64.S143
-rw-r--r--arch/x86/mm/highmem_32.c20
-rw-r--r--arch/x86/mm/init.c6
-rw-r--r--arch/x86/mm/iomap_32.c18
-rw-r--r--arch/x86/mm/ioremap.c36
-rw-r--r--arch/x86/mm/pat.c5
-rw-r--r--arch/x86/pci/common.c4
-rw-r--r--arch/x86/pci/fixup.c4
36 files changed, 607 insertions, 487 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7fcf8518268..34bc3a89228 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -169,6 +169,9 @@ config GENERIC_HARDIRQS
169 bool 169 bool
170 default y 170 default y
171 171
172config GENERIC_HARDIRQS_NO__DO_IRQ
173 def_bool y
174
172config GENERIC_IRQ_PROBE 175config GENERIC_IRQ_PROBE
173 bool 176 bool
174 default y 177 default y
@@ -1129,7 +1132,7 @@ config NUMA_EMU
1129 1132
1130config NODES_SHIFT 1133config NODES_SHIFT
1131 int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP 1134 int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP
1132 range 1 9 if X86_64 1135 range 1 9
1133 default "9" if MAXSMP 1136 default "9" if MAXSMP
1134 default "6" if X86_64 1137 default "6" if X86_64
1135 default "4" if X86_NUMAQ 1138 default "4" if X86_NUMAQ
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 1836191839e..f05d8c91d9e 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -153,34 +153,23 @@ endif
153 153
154boot := arch/x86/boot 154boot := arch/x86/boot
155 155
156PHONY += zImage bzImage compressed zlilo bzlilo \ 156BOOT_TARGETS = bzlilo bzdisk fdimage fdimage144 fdimage288 isoimage install
157 zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install 157
158PHONY += bzImage $(BOOT_TARGETS)
158 159
159# Default kernel to build 160# Default kernel to build
160all: bzImage 161all: bzImage
161 162
162# KBUILD_IMAGE specify target image being built 163# KBUILD_IMAGE specify target image being built
163 KBUILD_IMAGE := $(boot)/bzImage 164KBUILD_IMAGE := $(boot)/bzImage
164zImage zlilo zdisk: KBUILD_IMAGE := $(boot)/zImage
165 165
166zImage bzImage: vmlinux 166bzImage: vmlinux
167 $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE) 167 $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE)
168 $(Q)mkdir -p $(objtree)/arch/$(UTS_MACHINE)/boot 168 $(Q)mkdir -p $(objtree)/arch/$(UTS_MACHINE)/boot
169 $(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/$(UTS_MACHINE)/boot/$@ 169 $(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/$(UTS_MACHINE)/boot/$@
170 170
171compressed: zImage 171$(BOOT_TARGETS): vmlinux
172 172 $(Q)$(MAKE) $(build)=$(boot) $@
173zlilo bzlilo: vmlinux
174 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zlilo
175
176zdisk bzdisk: vmlinux
177 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zdisk
178
179fdimage fdimage144 fdimage288 isoimage: vmlinux
180 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@
181
182install:
183 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
184 173
185PHONY += vdso_install 174PHONY += vdso_install
186vdso_install: 175vdso_install:
@@ -205,7 +194,3 @@ define archhelp
205 echo ' FDARGS="..." arguments for the booted kernel' 194 echo ' FDARGS="..." arguments for the booted kernel'
206 echo ' FDINITRD=file initrd for the booted kernel' 195 echo ' FDINITRD=file initrd for the booted kernel'
207endef 196endef
208
209CLEAN_FILES += arch/x86/boot/fdimage \
210 arch/x86/boot/image.iso \
211 arch/x86/boot/mtools.conf
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 57a29fecf6b..fb737ce5888 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -23,6 +23,7 @@ ROOT_DEV := CURRENT
23SVGA_MODE := -DSVGA_MODE=NORMAL_VGA 23SVGA_MODE := -DSVGA_MODE=NORMAL_VGA
24 24
25targets := vmlinux.bin setup.bin setup.elf bzImage 25targets := vmlinux.bin setup.bin setup.elf bzImage
26targets += fdimage fdimage144 fdimage288 image.iso mtools.conf
26subdir- := compressed 27subdir- := compressed
27 28
28setup-y += a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o 29setup-y += a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o
@@ -109,9 +110,11 @@ $(obj)/setup.bin: $(obj)/setup.elf FORCE
109$(obj)/compressed/vmlinux: FORCE 110$(obj)/compressed/vmlinux: FORCE
110 $(Q)$(MAKE) $(build)=$(obj)/compressed $@ 111 $(Q)$(MAKE) $(build)=$(obj)/compressed $@
111 112
112# Set this if you want to pass append arguments to the zdisk/fdimage/isoimage kernel 113# Set this if you want to pass append arguments to the
114# bzdisk/fdimage/isoimage kernel
113FDARGS = 115FDARGS =
114# Set this if you want an initrd included with the zdisk/fdimage/isoimage kernel 116# Set this if you want an initrd included with the
117# bzdisk/fdimage/isoimage kernel
115FDINITRD = 118FDINITRD =
116 119
117image_cmdline = default linux $(FDARGS) $(if $(FDINITRD),initrd=initrd.img,) 120image_cmdline = default linux $(FDARGS) $(if $(FDINITRD),initrd=initrd.img,)
@@ -120,7 +123,7 @@ $(obj)/mtools.conf: $(src)/mtools.conf.in
120 sed -e 's|@OBJ@|$(obj)|g' < $< > $@ 123 sed -e 's|@OBJ@|$(obj)|g' < $< > $@
121 124
122# This requires write access to /dev/fd0 125# This requires write access to /dev/fd0
123zdisk: $(BOOTIMAGE) $(obj)/mtools.conf 126bzdisk: $(obj)/bzImage $(obj)/mtools.conf
124 MTOOLSRC=$(obj)/mtools.conf mformat a: ; sync 127 MTOOLSRC=$(obj)/mtools.conf mformat a: ; sync
125 syslinux /dev/fd0 ; sync 128 syslinux /dev/fd0 ; sync
126 echo '$(image_cmdline)' | \ 129 echo '$(image_cmdline)' | \
@@ -128,10 +131,10 @@ zdisk: $(BOOTIMAGE) $(obj)/mtools.conf
128 if [ -f '$(FDINITRD)' ] ; then \ 131 if [ -f '$(FDINITRD)' ] ; then \
129 MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' a:initrd.img ; \ 132 MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' a:initrd.img ; \
130 fi 133 fi
131 MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) a:linux ; sync 134 MTOOLSRC=$(obj)/mtools.conf mcopy $(obj)/bzImage a:linux ; sync
132 135
133# These require being root or having syslinux 2.02 or higher installed 136# These require being root or having syslinux 2.02 or higher installed
134fdimage fdimage144: $(BOOTIMAGE) $(obj)/mtools.conf 137fdimage fdimage144: $(obj)/bzImage $(obj)/mtools.conf
135 dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=1440 138 dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=1440
136 MTOOLSRC=$(obj)/mtools.conf mformat v: ; sync 139 MTOOLSRC=$(obj)/mtools.conf mformat v: ; sync
137 syslinux $(obj)/fdimage ; sync 140 syslinux $(obj)/fdimage ; sync
@@ -140,9 +143,9 @@ fdimage fdimage144: $(BOOTIMAGE) $(obj)/mtools.conf
140 if [ -f '$(FDINITRD)' ] ; then \ 143 if [ -f '$(FDINITRD)' ] ; then \
141 MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' v:initrd.img ; \ 144 MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' v:initrd.img ; \
142 fi 145 fi
143 MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) v:linux ; sync 146 MTOOLSRC=$(obj)/mtools.conf mcopy $(obj)/bzImage v:linux ; sync
144 147
145fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf 148fdimage288: $(obj)/bzImage $(obj)/mtools.conf
146 dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=2880 149 dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=2880
147 MTOOLSRC=$(obj)/mtools.conf mformat w: ; sync 150 MTOOLSRC=$(obj)/mtools.conf mformat w: ; sync
148 syslinux $(obj)/fdimage ; sync 151 syslinux $(obj)/fdimage ; sync
@@ -151,9 +154,9 @@ fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf
151 if [ -f '$(FDINITRD)' ] ; then \ 154 if [ -f '$(FDINITRD)' ] ; then \
152 MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' w:initrd.img ; \ 155 MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' w:initrd.img ; \
153 fi 156 fi
154 MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) w:linux ; sync 157 MTOOLSRC=$(obj)/mtools.conf mcopy $(obj)/bzImage w:linux ; sync
155 158
156isoimage: $(BOOTIMAGE) 159isoimage: $(obj)/bzImage
157 -rm -rf $(obj)/isoimage 160 -rm -rf $(obj)/isoimage
158 mkdir $(obj)/isoimage 161 mkdir $(obj)/isoimage
159 for i in lib lib64 share end ; do \ 162 for i in lib lib64 share end ; do \
@@ -163,7 +166,7 @@ isoimage: $(BOOTIMAGE)
163 fi ; \ 166 fi ; \
164 if [ $$i = end ] ; then exit 1 ; fi ; \ 167 if [ $$i = end ] ; then exit 1 ; fi ; \
165 done 168 done
166 cp $(BOOTIMAGE) $(obj)/isoimage/linux 169 cp $(obj)/bzImage $(obj)/isoimage/linux
167 echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg 170 echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg
168 if [ -f '$(FDINITRD)' ] ; then \ 171 if [ -f '$(FDINITRD)' ] ; then \
169 cp '$(FDINITRD)' $(obj)/isoimage/initrd.img ; \ 172 cp '$(FDINITRD)' $(obj)/isoimage/initrd.img ; \
@@ -174,12 +177,13 @@ isoimage: $(BOOTIMAGE)
174 isohybrid $(obj)/image.iso 2>/dev/null || true 177 isohybrid $(obj)/image.iso 2>/dev/null || true
175 rm -rf $(obj)/isoimage 178 rm -rf $(obj)/isoimage
176 179
177zlilo: $(BOOTIMAGE) 180bzlilo: $(obj)/bzImage
178 if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi 181 if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi
179 if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi 182 if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi
180 cat $(BOOTIMAGE) > $(INSTALL_PATH)/vmlinuz 183 cat $(obj)/bzImage > $(INSTALL_PATH)/vmlinuz
181 cp System.map $(INSTALL_PATH)/ 184 cp System.map $(INSTALL_PATH)/
182 if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi 185 if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
183 186
184install: 187install:
185 sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)" 188 sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
189 System.map "$(INSTALL_PATH)"
diff --git a/arch/x86/include/asm/cpu_debug.h b/arch/x86/include/asm/cpu_debug.h
index d24d64fcee0..56f1635e461 100755
--- a/arch/x86/include/asm/cpu_debug.h
+++ b/arch/x86/include/asm/cpu_debug.h
@@ -171,16 +171,22 @@ struct cpu_private {
171struct cpu_debug_base { 171struct cpu_debug_base {
172 char *name; /* Register name */ 172 char *name; /* Register name */
173 unsigned flag; /* Register flag */ 173 unsigned flag; /* Register flag */
174 unsigned write; /* Register write flag */
174}; 175};
175 176
176struct cpu_cpuX_base { 177/*
177 struct dentry *dentry; /* Register dentry */ 178 * Currently it looks similar to cpu_debug_base but once we add more files
178 int init; /* Register index file */ 179 * cpu_file_base will go in different direction
179}; 180 */
180
181struct cpu_file_base { 181struct cpu_file_base {
182 char *name; /* Register file name */ 182 char *name; /* Register file name */
183 unsigned flag; /* Register file flag */ 183 unsigned flag; /* Register file flag */
184 unsigned write; /* Register write flag */
185};
186
187struct cpu_cpuX_base {
188 struct dentry *dentry; /* Register dentry */
189 int init; /* Register index file */
184}; 190};
185 191
186struct cpu_debug_range { 192struct cpu_debug_range {
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 8f1d2fbec1d..aee103b26d0 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -43,14 +43,6 @@
43#else /* ...!ASSEMBLY */ 43#else /* ...!ASSEMBLY */
44 44
45#include <linux/stringify.h> 45#include <linux/stringify.h>
46#include <asm/sections.h>
47
48#define __addr_to_pcpu_ptr(addr) \
49 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
50 + (unsigned long)__per_cpu_start)
51#define __pcpu_ptr_to_addr(ptr) \
52 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
53 - (unsigned long)__per_cpu_start)
54 46
55#ifdef CONFIG_SMP 47#ifdef CONFIG_SMP
56#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x 48#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 76139506c3e..ae85a8d66a3 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -75,9 +75,9 @@ struct cpuinfo_x86 {
75#else 75#else
76 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 76 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
77 int x86_tlbsize; 77 int x86_tlbsize;
78#endif
78 __u8 x86_virt_bits; 79 __u8 x86_virt_bits;
79 __u8 x86_phys_bits; 80 __u8 x86_phys_bits;
80#endif
81 /* CPUID returned core id bits: */ 81 /* CPUID returned core id bits: */
82 __u8 x86_coreid_bits; 82 __u8 x86_coreid_bits;
83 /* Max extended CPUID function supported: */ 83 /* Max extended CPUID function supported: */
@@ -391,6 +391,9 @@ DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
391DECLARE_INIT_PER_CPU(irq_stack_union); 391DECLARE_INIT_PER_CPU(irq_stack_union);
392 392
393DECLARE_PER_CPU(char *, irq_stack_ptr); 393DECLARE_PER_CPU(char *, irq_stack_ptr);
394DECLARE_PER_CPU(unsigned int, irq_count);
395extern unsigned long kernel_eflags;
396extern asmlinkage void ignore_sysret(void);
394#else /* X86_64 */ 397#else /* X86_64 */
395#ifdef CONFIG_CC_STACKPROTECTOR 398#ifdef CONFIG_CC_STACKPROTECTOR
396DECLARE_PER_CPU(unsigned long, stack_canary); 399DECLARE_PER_CPU(unsigned long, stack_canary);
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 5e79ca69432..9c371e4a9fa 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -296,6 +296,8 @@ HYPERVISOR_get_debugreg(int reg)
296static inline int 296static inline int
297HYPERVISOR_update_descriptor(u64 ma, u64 desc) 297HYPERVISOR_update_descriptor(u64 ma, u64 desc)
298{ 298{
299 if (sizeof(u64) == sizeof(long))
300 return _hypercall2(int, update_descriptor, ma, desc);
299 return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); 301 return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
300} 302}
301 303
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
index 2ac0ab71412..b617b1164f1 100644
--- a/arch/x86/kernel/check.c
+++ b/arch/x86/kernel/check.c
@@ -83,7 +83,7 @@ void __init setup_bios_corruption_check(void)
83 u64 size; 83 u64 size;
84 addr = find_e820_area_size(addr, &size, PAGE_SIZE); 84 addr = find_e820_area_size(addr, &size, PAGE_SIZE);
85 85
86 if (addr == 0) 86 if (!(addr + 1))
87 break; 87 break;
88 88
89 if ((addr + size) > corruption_check_size) 89 if ((addr + size) > corruption_check_size)
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 6882a735d9c..8220ae69849 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -29,7 +29,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
29 u32 regs[4]; 29 u32 regs[4];
30 const struct cpuid_bit *cb; 30 const struct cpuid_bit *cb;
31 31
32 static const struct cpuid_bit cpuid_bits[] = { 32 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, 33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
34 { 0, 0, 0, 0 } 34 { 0, 0, 0, 0 }
35 }; 35 };
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f47df59016c..7e4a459daa6 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -502,7 +502,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int
502} 502}
503#endif 503#endif
504 504
505static struct cpu_dev amd_cpu_dev __cpuinitdata = { 505static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
506 .c_vendor = "AMD", 506 .c_vendor = "AMD",
507 .c_ident = { "AuthenticAMD" }, 507 .c_ident = { "AuthenticAMD" },
508#ifdef CONFIG_X86_32 508#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 89bfdd9cacc..983e0830f0d 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -468,7 +468,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
468 return size; 468 return size;
469} 469}
470 470
471static struct cpu_dev centaur_cpu_dev __cpuinitdata = { 471static const struct cpu_dev __cpuinitconst centaur_cpu_dev = {
472 .c_vendor = "Centaur", 472 .c_vendor = "Centaur",
473 .c_ident = { "CentaurHauls" }, 473 .c_ident = { "CentaurHauls" },
474 .c_early_init = early_init_centaur, 474 .c_early_init = early_init_centaur,
diff --git a/arch/x86/kernel/cpu/centaur_64.c b/arch/x86/kernel/cpu/centaur_64.c
index a1625f5a1e7..51b09c48c9c 100644
--- a/arch/x86/kernel/cpu/centaur_64.c
+++ b/arch/x86/kernel/cpu/centaur_64.c
@@ -25,7 +25,7 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
25 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 25 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
26} 26}
27 27
28static struct cpu_dev centaur_cpu_dev __cpuinitdata = { 28static const struct cpu_dev centaur_cpu_dev __cpuinitconst = {
29 .c_vendor = "Centaur", 29 .c_vendor = "Centaur",
30 .c_ident = { "CentaurHauls" }, 30 .c_ident = { "CentaurHauls" },
31 .c_early_init = early_init_centaur, 31 .c_early_init = early_init_centaur,
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index f8869978bbb..e2962cc1e27 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1,52 +1,52 @@
1#include <linux/init.h>
2#include <linux/kernel.h>
3#include <linux/sched.h>
4#include <linux/string.h>
5#include <linux/bootmem.h> 1#include <linux/bootmem.h>
2#include <linux/linkage.h>
6#include <linux/bitops.h> 3#include <linux/bitops.h>
4#include <linux/kernel.h>
7#include <linux/module.h> 5#include <linux/module.h>
8#include <linux/kgdb.h> 6#include <linux/percpu.h>
9#include <linux/topology.h> 7#include <linux/string.h>
10#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/sched.h>
10#include <linux/init.h>
11#include <linux/kgdb.h>
11#include <linux/smp.h> 12#include <linux/smp.h>
12#include <linux/percpu.h> 13#include <linux/io.h>
13#include <asm/i387.h> 14
14#include <asm/msr.h> 15#include <asm/stackprotector.h>
15#include <asm/io.h>
16#include <asm/linkage.h>
17#include <asm/mmu_context.h> 16#include <asm/mmu_context.h>
17#include <asm/hypervisor.h>
18#include <asm/processor.h>
19#include <asm/sections.h>
20#include <asm/topology.h>
21#include <asm/cpumask.h>
22#include <asm/pgtable.h>
23#include <asm/atomic.h>
24#include <asm/proto.h>
25#include <asm/setup.h>
26#include <asm/apic.h>
27#include <asm/desc.h>
28#include <asm/i387.h>
18#include <asm/mtrr.h> 29#include <asm/mtrr.h>
30#include <asm/numa.h>
31#include <asm/asm.h>
32#include <asm/cpu.h>
19#include <asm/mce.h> 33#include <asm/mce.h>
34#include <asm/msr.h>
20#include <asm/pat.h> 35#include <asm/pat.h>
21#include <asm/asm.h>
22#include <asm/numa.h>
23#include <asm/smp.h> 36#include <asm/smp.h>
24#include <asm/cpu.h>
25#include <asm/cpumask.h>
26#include <asm/apic.h>
27 37
28#ifdef CONFIG_X86_LOCAL_APIC 38#ifdef CONFIG_X86_LOCAL_APIC
29#include <asm/uv/uv.h> 39#include <asm/uv/uv.h>
30#endif 40#endif
31 41
32#include <asm/pgtable.h>
33#include <asm/processor.h>
34#include <asm/desc.h>
35#include <asm/atomic.h>
36#include <asm/proto.h>
37#include <asm/sections.h>
38#include <asm/setup.h>
39#include <asm/hypervisor.h>
40#include <asm/stackprotector.h>
41
42#include "cpu.h" 42#include "cpu.h"
43 43
44#ifdef CONFIG_X86_64 44#ifdef CONFIG_X86_64
45 45
46/* all of these masks are initialized in setup_cpu_local_masks() */ 46/* all of these masks are initialized in setup_cpu_local_masks() */
47cpumask_var_t cpu_callin_mask;
48cpumask_var_t cpu_callout_mask;
49cpumask_var_t cpu_initialized_mask; 47cpumask_var_t cpu_initialized_mask;
48cpumask_var_t cpu_callout_mask;
49cpumask_var_t cpu_callin_mask;
50 50
51/* representing cpus for which sibling maps can be computed */ 51/* representing cpus for which sibling maps can be computed */
52cpumask_var_t cpu_sibling_setup_mask; 52cpumask_var_t cpu_sibling_setup_mask;
@@ -62,15 +62,15 @@ void __init setup_cpu_local_masks(void)
62 62
63#else /* CONFIG_X86_32 */ 63#else /* CONFIG_X86_32 */
64 64
65cpumask_t cpu_callin_map; 65cpumask_t cpu_sibling_setup_map;
66cpumask_t cpu_callout_map; 66cpumask_t cpu_callout_map;
67cpumask_t cpu_initialized; 67cpumask_t cpu_initialized;
68cpumask_t cpu_sibling_setup_map; 68cpumask_t cpu_callin_map;
69 69
70#endif /* CONFIG_X86_32 */ 70#endif /* CONFIG_X86_32 */
71 71
72 72
73static struct cpu_dev *this_cpu __cpuinitdata; 73static const struct cpu_dev *this_cpu __cpuinitdata;
74 74
75DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 75DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
76#ifdef CONFIG_X86_64 76#ifdef CONFIG_X86_64
@@ -79,48 +79,48 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
79 * IRET will check the segment types kkeil 2000/10/28 79 * IRET will check the segment types kkeil 2000/10/28
80 * Also sysret mandates a special GDT layout 80 * Also sysret mandates a special GDT layout
81 * 81 *
82 * The TLS descriptors are currently at a different place compared to i386. 82 * TLS descriptors are currently at a different place compared to i386.
83 * Hopefully nobody expects them at a fixed place (Wine?) 83 * Hopefully nobody expects them at a fixed place (Wine?)
84 */ 84 */
85 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, 85 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
86 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, 86 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
87 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, 87 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
88 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, 88 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
89 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, 89 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
90 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, 90 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
91#else 91#else
92 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, 92 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
93 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, 93 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
94 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, 94 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
95 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, 95 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
96 /* 96 /*
97 * Segments used for calling PnP BIOS have byte granularity. 97 * Segments used for calling PnP BIOS have byte granularity.
98 * They code segments and data segments have fixed 64k limits, 98 * They code segments and data segments have fixed 64k limits,
99 * the transfer segment sizes are set at run time. 99 * the transfer segment sizes are set at run time.
100 */ 100 */
101 /* 32-bit code */ 101 /* 32-bit code */
102 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, 102 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
103 /* 16-bit code */ 103 /* 16-bit code */
104 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, 104 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
105 /* 16-bit data */ 105 /* 16-bit data */
106 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, 106 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
107 /* 16-bit data */ 107 /* 16-bit data */
108 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, 108 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
109 /* 16-bit data */ 109 /* 16-bit data */
110 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, 110 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
111 /* 111 /*
112 * The APM segments have byte granularity and their bases 112 * The APM segments have byte granularity and their bases
113 * are set at run time. All have 64k limits. 113 * are set at run time. All have 64k limits.
114 */ 114 */
115 /* 32-bit code */ 115 /* 32-bit code */
116 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, 116 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
117 /* 16-bit code */ 117 /* 16-bit code */
118 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, 118 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
119 /* data */ 119 /* data */
120 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, 120 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
121 121
122 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, 122 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
123 [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, 123 [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
124 GDT_STACK_CANARY_INIT 124 GDT_STACK_CANARY_INIT
125#endif 125#endif
126} }; 126} };
@@ -164,16 +164,17 @@ static inline int flag_is_changeable_p(u32 flag)
164 * the CPUID. Add "volatile" to not allow gcc to 164 * the CPUID. Add "volatile" to not allow gcc to
165 * optimize the subsequent calls to this function. 165 * optimize the subsequent calls to this function.
166 */ 166 */
167 asm volatile ("pushfl\n\t" 167 asm volatile ("pushfl \n\t"
168 "pushfl\n\t" 168 "pushfl \n\t"
169 "popl %0\n\t" 169 "popl %0 \n\t"
170 "movl %0,%1\n\t" 170 "movl %0, %1 \n\t"
171 "xorl %2,%0\n\t" 171 "xorl %2, %0 \n\t"
172 "pushl %0\n\t" 172 "pushl %0 \n\t"
173 "popfl\n\t" 173 "popfl \n\t"
174 "pushfl\n\t" 174 "pushfl \n\t"
175 "popl %0\n\t" 175 "popl %0 \n\t"
176 "popfl\n\t" 176 "popfl \n\t"
177
177 : "=&r" (f1), "=&r" (f2) 178 : "=&r" (f1), "=&r" (f2)
178 : "ir" (flag)); 179 : "ir" (flag));
179 180
@@ -188,18 +189,22 @@ static int __cpuinit have_cpuid_p(void)
188 189
189static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 190static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
190{ 191{
191 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { 192 unsigned long lo, hi;
192 /* Disable processor serial number */ 193
193 unsigned long lo, hi; 194 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
194 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 195 return;
195 lo |= 0x200000; 196
196 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 197 /* Disable processor serial number: */
197 printk(KERN_NOTICE "CPU serial number disabled.\n"); 198
198 clear_cpu_cap(c, X86_FEATURE_PN); 199 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
199 200 lo |= 0x200000;
200 /* Disabling the serial number may affect the cpuid level */ 201 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
201 c->cpuid_level = cpuid_eax(0); 202
202 } 203 printk(KERN_NOTICE "CPU serial number disabled.\n");
204 clear_cpu_cap(c, X86_FEATURE_PN);
205
206 /* Disabling the serial number may affect the cpuid level */
207 c->cpuid_level = cpuid_eax(0);
203} 208}
204 209
205static int __init x86_serial_nr_setup(char *s) 210static int __init x86_serial_nr_setup(char *s)
@@ -232,6 +237,7 @@ struct cpuid_dependent_feature {
232 u32 feature; 237 u32 feature;
233 u32 level; 238 u32 level;
234}; 239};
240
235static const struct cpuid_dependent_feature __cpuinitconst 241static const struct cpuid_dependent_feature __cpuinitconst
236cpuid_dependent_features[] = { 242cpuid_dependent_features[] = {
237 { X86_FEATURE_MWAIT, 0x00000005 }, 243 { X86_FEATURE_MWAIT, 0x00000005 },
@@ -243,7 +249,11 @@ cpuid_dependent_features[] = {
243static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 249static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
244{ 250{
245 const struct cpuid_dependent_feature *df; 251 const struct cpuid_dependent_feature *df;
252
246 for (df = cpuid_dependent_features; df->feature; df++) { 253 for (df = cpuid_dependent_features; df->feature; df++) {
254
255 if (!cpu_has(c, df->feature))
256 continue;
247 /* 257 /*
248 * Note: cpuid_level is set to -1 if unavailable, but 258 * Note: cpuid_level is set to -1 if unavailable, but
249 * extended_extended_level is set to 0 if unavailable 259 * extended_extended_level is set to 0 if unavailable
@@ -251,32 +261,32 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
251 * when signed; hence the weird messing around with 261 * when signed; hence the weird messing around with
252 * signs here... 262 * signs here...
253 */ 263 */
254 if (cpu_has(c, df->feature) && 264 if (!((s32)df->level < 0 ?
255 ((s32)df->level < 0 ?
256 (u32)df->level > (u32)c->extended_cpuid_level : 265 (u32)df->level > (u32)c->extended_cpuid_level :
257 (s32)df->level > (s32)c->cpuid_level)) { 266 (s32)df->level > (s32)c->cpuid_level))
258 clear_cpu_cap(c, df->feature); 267 continue;
259 if (warn) 268
260 printk(KERN_WARNING 269 clear_cpu_cap(c, df->feature);
261 "CPU: CPU feature %s disabled " 270 if (!warn)
262 "due to lack of CPUID level 0x%x\n", 271 continue;
263 x86_cap_flags[df->feature], 272
264 df->level); 273 printk(KERN_WARNING
265 } 274 "CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
275 x86_cap_flags[df->feature], df->level);
266 } 276 }
267} 277}
268 278
269/* 279/*
270 * Naming convention should be: <Name> [(<Codename>)] 280 * Naming convention should be: <Name> [(<Codename>)]
271 * This table only is used unless init_<vendor>() below doesn't set it; 281 * This table only is used unless init_<vendor>() below doesn't set it;
272 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used 282 * in particular, if CPUID levels 0x80000002..4 are supported, this
273 * 283 * isn't used
274 */ 284 */
275 285
276/* Look up CPU names by table lookup. */ 286/* Look up CPU names by table lookup. */
277static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) 287static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
278{ 288{
279 struct cpu_model_info *info; 289 const struct cpu_model_info *info;
280 290
281 if (c->x86_model >= 16) 291 if (c->x86_model >= 16)
282 return NULL; /* Range check */ 292 return NULL; /* Range check */
@@ -307,8 +317,10 @@ void load_percpu_segment(int cpu)
307 load_stack_canary_segment(); 317 load_stack_canary_segment();
308} 318}
309 319
310/* Current gdt points %fs at the "master" per-cpu area: after this, 320/*
311 * it's on the real one. */ 321 * Current gdt points %fs at the "master" per-cpu area: after this,
322 * it's on the real one.
323 */
312void switch_to_new_gdt(int cpu) 324void switch_to_new_gdt(int cpu)
313{ 325{
314 struct desc_ptr gdt_descr; 326 struct desc_ptr gdt_descr;
@@ -321,7 +333,7 @@ void switch_to_new_gdt(int cpu)
321 load_percpu_segment(cpu); 333 load_percpu_segment(cpu);
322} 334}
323 335
324static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 336static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
325 337
326static void __cpuinit default_init(struct cpuinfo_x86 *c) 338static void __cpuinit default_init(struct cpuinfo_x86 *c)
327{ 339{
@@ -340,7 +352,7 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
340#endif 352#endif
341} 353}
342 354
343static struct cpu_dev __cpuinitdata default_cpu = { 355static const struct cpu_dev __cpuinitconst default_cpu = {
344 .c_init = default_init, 356 .c_init = default_init,
345 .c_vendor = "Unknown", 357 .c_vendor = "Unknown",
346 .c_x86_vendor = X86_VENDOR_UNKNOWN, 358 .c_x86_vendor = X86_VENDOR_UNKNOWN,
@@ -354,22 +366,24 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
354 if (c->extended_cpuid_level < 0x80000004) 366 if (c->extended_cpuid_level < 0x80000004)
355 return; 367 return;
356 368
357 v = (unsigned int *) c->x86_model_id; 369 v = (unsigned int *)c->x86_model_id;
358 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 370 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
359 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 371 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
360 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 372 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
361 c->x86_model_id[48] = 0; 373 c->x86_model_id[48] = 0;
362 374
363 /* Intel chips right-justify this string for some dumb reason; 375 /*
364 undo that brain damage */ 376 * Intel chips right-justify this string for some dumb reason;
377 * undo that brain damage:
378 */
365 p = q = &c->x86_model_id[0]; 379 p = q = &c->x86_model_id[0];
366 while (*p == ' ') 380 while (*p == ' ')
367 p++; 381 p++;
368 if (p != q) { 382 if (p != q) {
369 while (*p) 383 while (*p)
370 *q++ = *p++; 384 *q++ = *p++;
371 while (q <= &c->x86_model_id[48]) 385 while (q <= &c->x86_model_id[48])
372 *q++ = '\0'; /* Zero-pad the rest */ 386 *q++ = '\0'; /* Zero-pad the rest */
373 } 387 }
374} 388}
375 389
@@ -438,27 +452,30 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
438 452
439 if (smp_num_siblings == 1) { 453 if (smp_num_siblings == 1) {
440 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 454 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
441 } else if (smp_num_siblings > 1) { 455 goto out;
456 }
442 457
443 if (smp_num_siblings > nr_cpu_ids) { 458 if (smp_num_siblings <= 1)
444 printk(KERN_WARNING "CPU: Unsupported number of siblings %d", 459 goto out;
445 smp_num_siblings); 460
446 smp_num_siblings = 1; 461 if (smp_num_siblings > nr_cpu_ids) {
447 return; 462 pr_warning("CPU: Unsupported number of siblings %d",
448 } 463 smp_num_siblings);
464 smp_num_siblings = 1;
465 return;
466 }
449 467
450 index_msb = get_count_order(smp_num_siblings); 468 index_msb = get_count_order(smp_num_siblings);
451 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); 469 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
452 470
453 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 471 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
454 472
455 index_msb = get_count_order(smp_num_siblings); 473 index_msb = get_count_order(smp_num_siblings);
456 474
457 core_bits = get_count_order(c->x86_max_cores); 475 core_bits = get_count_order(c->x86_max_cores);
458 476
459 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & 477 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
460 ((1 << core_bits) - 1); 478 ((1 << core_bits) - 1);
461 }
462 479
463out: 480out:
464 if ((c->x86_max_cores * smp_num_siblings) > 1) { 481 if ((c->x86_max_cores * smp_num_siblings) > 1) {
@@ -473,8 +490,8 @@ out:
473static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) 490static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
474{ 491{
475 char *v = c->x86_vendor_id; 492 char *v = c->x86_vendor_id;
476 int i;
477 static int printed; 493 static int printed;
494 int i;
478 495
479 for (i = 0; i < X86_VENDOR_NUM; i++) { 496 for (i = 0; i < X86_VENDOR_NUM; i++) {
480 if (!cpu_devs[i]) 497 if (!cpu_devs[i])
@@ -483,6 +500,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
483 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 500 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
484 (cpu_devs[i]->c_ident[1] && 501 (cpu_devs[i]->c_ident[1] &&
485 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 502 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
503
486 this_cpu = cpu_devs[i]; 504 this_cpu = cpu_devs[i];
487 c->x86_vendor = this_cpu->c_x86_vendor; 505 c->x86_vendor = this_cpu->c_x86_vendor;
488 return; 506 return;
@@ -491,7 +509,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
491 509
492 if (!printed) { 510 if (!printed) {
493 printed++; 511 printed++;
494 printk(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n", v); 512 printk(KERN_ERR
513 "CPU: vendor_id '%s' unknown, using generic init.\n", v);
514
495 printk(KERN_ERR "CPU: Your system may be unstable.\n"); 515 printk(KERN_ERR "CPU: Your system may be unstable.\n");
496 } 516 }
497 517
@@ -511,14 +531,17 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
511 /* Intel-defined flags: level 0x00000001 */ 531 /* Intel-defined flags: level 0x00000001 */
512 if (c->cpuid_level >= 0x00000001) { 532 if (c->cpuid_level >= 0x00000001) {
513 u32 junk, tfms, cap0, misc; 533 u32 junk, tfms, cap0, misc;
534
514 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 535 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
515 c->x86 = (tfms >> 8) & 0xf; 536 c->x86 = (tfms >> 8) & 0xf;
516 c->x86_model = (tfms >> 4) & 0xf; 537 c->x86_model = (tfms >> 4) & 0xf;
517 c->x86_mask = tfms & 0xf; 538 c->x86_mask = tfms & 0xf;
539
518 if (c->x86 == 0xf) 540 if (c->x86 == 0xf)
519 c->x86 += (tfms >> 20) & 0xff; 541 c->x86 += (tfms >> 20) & 0xff;
520 if (c->x86 >= 0x6) 542 if (c->x86 >= 0x6)
521 c->x86_model += ((tfms >> 16) & 0xf) << 4; 543 c->x86_model += ((tfms >> 16) & 0xf) << 4;
544
522 if (cap0 & (1<<19)) { 545 if (cap0 & (1<<19)) {
523 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 546 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
524 c->x86_cache_alignment = c->x86_clflush_size; 547 c->x86_cache_alignment = c->x86_clflush_size;
@@ -534,6 +557,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
534 /* Intel-defined flags: level 0x00000001 */ 557 /* Intel-defined flags: level 0x00000001 */
535 if (c->cpuid_level >= 0x00000001) { 558 if (c->cpuid_level >= 0x00000001) {
536 u32 capability, excap; 559 u32 capability, excap;
560
537 cpuid(0x00000001, &tfms, &ebx, &excap, &capability); 561 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
538 c->x86_capability[0] = capability; 562 c->x86_capability[0] = capability;
539 c->x86_capability[4] = excap; 563 c->x86_capability[4] = excap;
@@ -542,6 +566,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
542 /* AMD-defined flags: level 0x80000001 */ 566 /* AMD-defined flags: level 0x80000001 */
543 xlvl = cpuid_eax(0x80000000); 567 xlvl = cpuid_eax(0x80000000);
544 c->extended_cpuid_level = xlvl; 568 c->extended_cpuid_level = xlvl;
569
545 if ((xlvl & 0xffff0000) == 0x80000000) { 570 if ((xlvl & 0xffff0000) == 0x80000000) {
546 if (xlvl >= 0x80000001) { 571 if (xlvl >= 0x80000001) {
547 c->x86_capability[1] = cpuid_edx(0x80000001); 572 c->x86_capability[1] = cpuid_edx(0x80000001);
@@ -549,13 +574,15 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
549 } 574 }
550 } 575 }
551 576
552#ifdef CONFIG_X86_64
553 if (c->extended_cpuid_level >= 0x80000008) { 577 if (c->extended_cpuid_level >= 0x80000008) {
554 u32 eax = cpuid_eax(0x80000008); 578 u32 eax = cpuid_eax(0x80000008);
555 579
556 c->x86_virt_bits = (eax >> 8) & 0xff; 580 c->x86_virt_bits = (eax >> 8) & 0xff;
557 c->x86_phys_bits = eax & 0xff; 581 c->x86_phys_bits = eax & 0xff;
558 } 582 }
583#ifdef CONFIG_X86_32
584 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
585 c->x86_phys_bits = 36;
559#endif 586#endif
560 587
561 if (c->extended_cpuid_level >= 0x80000007) 588 if (c->extended_cpuid_level >= 0x80000007)
@@ -602,8 +629,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
602{ 629{
603#ifdef CONFIG_X86_64 630#ifdef CONFIG_X86_64
604 c->x86_clflush_size = 64; 631 c->x86_clflush_size = 64;
632 c->x86_phys_bits = 36;
633 c->x86_virt_bits = 48;
605#else 634#else
606 c->x86_clflush_size = 32; 635 c->x86_clflush_size = 32;
636 c->x86_phys_bits = 32;
637 c->x86_virt_bits = 32;
607#endif 638#endif
608 c->x86_cache_alignment = c->x86_clflush_size; 639 c->x86_cache_alignment = c->x86_clflush_size;
609 640
@@ -634,12 +665,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
634 665
635void __init early_cpu_init(void) 666void __init early_cpu_init(void)
636{ 667{
637 struct cpu_dev **cdev; 668 const struct cpu_dev *const *cdev;
638 int count = 0; 669 int count = 0;
639 670
640 printk("KERNEL supported cpus:\n"); 671 printk(KERN_INFO "KERNEL supported cpus:\n");
641 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { 672 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
642 struct cpu_dev *cpudev = *cdev; 673 const struct cpu_dev *cpudev = *cdev;
643 unsigned int j; 674 unsigned int j;
644 675
645 if (count >= X86_VENDOR_NUM) 676 if (count >= X86_VENDOR_NUM)
@@ -650,7 +681,7 @@ void __init early_cpu_init(void)
650 for (j = 0; j < 2; j++) { 681 for (j = 0; j < 2; j++) {
651 if (!cpudev->c_ident[j]) 682 if (!cpudev->c_ident[j])
652 continue; 683 continue;
653 printk(" %s %s\n", cpudev->c_vendor, 684 printk(KERN_INFO " %s %s\n", cpudev->c_vendor,
654 cpudev->c_ident[j]); 685 cpudev->c_ident[j]);
655 } 686 }
656 } 687 }
@@ -726,9 +757,13 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
726 c->x86_coreid_bits = 0; 757 c->x86_coreid_bits = 0;
727#ifdef CONFIG_X86_64 758#ifdef CONFIG_X86_64
728 c->x86_clflush_size = 64; 759 c->x86_clflush_size = 64;
760 c->x86_phys_bits = 36;
761 c->x86_virt_bits = 48;
729#else 762#else
730 c->cpuid_level = -1; /* CPUID not detected */ 763 c->cpuid_level = -1; /* CPUID not detected */
731 c->x86_clflush_size = 32; 764 c->x86_clflush_size = 32;
765 c->x86_phys_bits = 32;
766 c->x86_virt_bits = 32;
732#endif 767#endif
733 c->x86_cache_alignment = c->x86_clflush_size; 768 c->x86_cache_alignment = c->x86_clflush_size;
734 memset(&c->x86_capability, 0, sizeof c->x86_capability); 769 memset(&c->x86_capability, 0, sizeof c->x86_capability);
@@ -759,8 +794,8 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
759 squash_the_stupid_serial_number(c); 794 squash_the_stupid_serial_number(c);
760 795
761 /* 796 /*
762 * The vendor-specific functions might have changed features. Now 797 * The vendor-specific functions might have changed features.
763 * we do "generic changes." 798 * Now we do "generic changes."
764 */ 799 */
765 800
766 /* Filter out anything that depends on CPUID levels we don't have */ 801 /* Filter out anything that depends on CPUID levels we don't have */
@@ -768,7 +803,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
768 803
769 /* If the model name is still unset, do table lookup. */ 804 /* If the model name is still unset, do table lookup. */
770 if (!c->x86_model_id[0]) { 805 if (!c->x86_model_id[0]) {
771 char *p; 806 const char *p;
772 p = table_lookup_model(c); 807 p = table_lookup_model(c);
773 if (p) 808 if (p)
774 strcpy(c->x86_model_id, p); 809 strcpy(c->x86_model_id, p);
@@ -843,11 +878,11 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
843} 878}
844 879
845struct msr_range { 880struct msr_range {
846 unsigned min; 881 unsigned min;
847 unsigned max; 882 unsigned max;
848}; 883};
849 884
850static struct msr_range msr_range_array[] __cpuinitdata = { 885static const struct msr_range msr_range_array[] __cpuinitconst = {
851 { 0x00000000, 0x00000418}, 886 { 0x00000000, 0x00000418},
852 { 0xc0000000, 0xc000040b}, 887 { 0xc0000000, 0xc000040b},
853 { 0xc0010000, 0xc0010142}, 888 { 0xc0010000, 0xc0010142},
@@ -856,14 +891,15 @@ static struct msr_range msr_range_array[] __cpuinitdata = {
856 891
857static void __cpuinit print_cpu_msr(void) 892static void __cpuinit print_cpu_msr(void)
858{ 893{
894 unsigned index_min, index_max;
859 unsigned index; 895 unsigned index;
860 u64 val; 896 u64 val;
861 int i; 897 int i;
862 unsigned index_min, index_max;
863 898
864 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { 899 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
865 index_min = msr_range_array[i].min; 900 index_min = msr_range_array[i].min;
866 index_max = msr_range_array[i].max; 901 index_max = msr_range_array[i].max;
902
867 for (index = index_min; index < index_max; index++) { 903 for (index = index_min; index < index_max; index++) {
868 if (rdmsrl_amd_safe(index, &val)) 904 if (rdmsrl_amd_safe(index, &val))
869 continue; 905 continue;
@@ -873,6 +909,7 @@ static void __cpuinit print_cpu_msr(void)
873} 909}
874 910
875static int show_msr __cpuinitdata; 911static int show_msr __cpuinitdata;
912
876static __init int setup_show_msr(char *arg) 913static __init int setup_show_msr(char *arg)
877{ 914{
878 int num; 915 int num;
@@ -894,12 +931,14 @@ __setup("noclflush", setup_noclflush);
894 931
895void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 932void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
896{ 933{
897 char *vendor = NULL; 934 const char *vendor = NULL;
898 935
899 if (c->x86_vendor < X86_VENDOR_NUM) 936 if (c->x86_vendor < X86_VENDOR_NUM) {
900 vendor = this_cpu->c_vendor; 937 vendor = this_cpu->c_vendor;
901 else if (c->cpuid_level >= 0) 938 } else {
902 vendor = c->x86_vendor_id; 939 if (c->cpuid_level >= 0)
940 vendor = c->x86_vendor_id;
941 }
903 942
904 if (vendor && !strstr(c->x86_model_id, vendor)) 943 if (vendor && !strstr(c->x86_model_id, vendor))
905 printk(KERN_CONT "%s ", vendor); 944 printk(KERN_CONT "%s ", vendor);
@@ -926,10 +965,12 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
926static __init int setup_disablecpuid(char *arg) 965static __init int setup_disablecpuid(char *arg)
927{ 966{
928 int bit; 967 int bit;
968
929 if (get_option(&arg, &bit) && bit < NCAPINTS*32) 969 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
930 setup_clear_cpu_cap(bit); 970 setup_clear_cpu_cap(bit);
931 else 971 else
932 return 0; 972 return 0;
973
933 return 1; 974 return 1;
934} 975}
935__setup("clearcpuid=", setup_disablecpuid); 976__setup("clearcpuid=", setup_disablecpuid);
@@ -939,6 +980,7 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
939 980
940DEFINE_PER_CPU_FIRST(union irq_stack_union, 981DEFINE_PER_CPU_FIRST(union irq_stack_union,
941 irq_stack_union) __aligned(PAGE_SIZE); 982 irq_stack_union) __aligned(PAGE_SIZE);
983
942DEFINE_PER_CPU(char *, irq_stack_ptr) = 984DEFINE_PER_CPU(char *, irq_stack_ptr) =
943 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; 985 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
944 986
@@ -948,12 +990,21 @@ EXPORT_PER_CPU_SYMBOL(kernel_stack);
948 990
949DEFINE_PER_CPU(unsigned int, irq_count) = -1; 991DEFINE_PER_CPU(unsigned int, irq_count) = -1;
950 992
993/*
994 * Special IST stacks which the CPU switches to when it calls
995 * an IST-marked descriptor entry. Up to 7 stacks (hardware
996 * limit), all of them are 4K, except the debug stack which
997 * is 8K.
998 */
999static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
1000 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1001 [DEBUG_STACK - 1] = DEBUG_STKSZ
1002};
1003
951static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks 1004static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
952 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) 1005 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
953 __aligned(PAGE_SIZE); 1006 __aligned(PAGE_SIZE);
954 1007
955extern asmlinkage void ignore_sysret(void);
956
957/* May not be marked __init: used by software suspend */ 1008/* May not be marked __init: used by software suspend */
958void syscall_init(void) 1009void syscall_init(void)
959{ 1010{
@@ -983,7 +1034,7 @@ unsigned long kernel_eflags;
983 */ 1034 */
984DEFINE_PER_CPU(struct orig_ist, orig_ist); 1035DEFINE_PER_CPU(struct orig_ist, orig_ist);
985 1036
986#else /* x86_64 */ 1037#else /* CONFIG_X86_64 */
987 1038
988#ifdef CONFIG_CC_STACKPROTECTOR 1039#ifdef CONFIG_CC_STACKPROTECTOR
989DEFINE_PER_CPU(unsigned long, stack_canary); 1040DEFINE_PER_CPU(unsigned long, stack_canary);
@@ -995,9 +1046,26 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
995 memset(regs, 0, sizeof(struct pt_regs)); 1046 memset(regs, 0, sizeof(struct pt_regs));
996 regs->fs = __KERNEL_PERCPU; 1047 regs->fs = __KERNEL_PERCPU;
997 regs->gs = __KERNEL_STACK_CANARY; 1048 regs->gs = __KERNEL_STACK_CANARY;
1049
998 return regs; 1050 return regs;
999} 1051}
1000#endif /* x86_64 */ 1052#endif /* CONFIG_X86_64 */
1053
1054/*
1055 * Clear all 6 debug registers:
1056 */
1057static void clear_all_debug_regs(void)
1058{
1059 int i;
1060
1061 for (i = 0; i < 8; i++) {
1062 /* Ignore db4, db5 */
1063 if ((i == 4) || (i == 5))
1064 continue;
1065
1066 set_debugreg(0, i);
1067 }
1068}
1001 1069
1002/* 1070/*
1003 * cpu_init() initializes state that is per-CPU. Some data is already 1071 * cpu_init() initializes state that is per-CPU. Some data is already
@@ -1007,15 +1075,20 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
1007 * A lot of state is already set up in PDA init for 64 bit 1075 * A lot of state is already set up in PDA init for 64 bit
1008 */ 1076 */
1009#ifdef CONFIG_X86_64 1077#ifdef CONFIG_X86_64
1078
1010void __cpuinit cpu_init(void) 1079void __cpuinit cpu_init(void)
1011{ 1080{
1012 int cpu = stack_smp_processor_id(); 1081 struct orig_ist *orig_ist;
1013 struct tss_struct *t = &per_cpu(init_tss, cpu);
1014 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
1015 unsigned long v;
1016 struct task_struct *me; 1082 struct task_struct *me;
1083 struct tss_struct *t;
1084 unsigned long v;
1085 int cpu;
1017 int i; 1086 int i;
1018 1087
1088 cpu = stack_smp_processor_id();
1089 t = &per_cpu(init_tss, cpu);
1090 orig_ist = &per_cpu(orig_ist, cpu);
1091
1019#ifdef CONFIG_NUMA 1092#ifdef CONFIG_NUMA
1020 if (cpu != 0 && percpu_read(node_number) == 0 && 1093 if (cpu != 0 && percpu_read(node_number) == 0 &&
1021 cpu_to_node(cpu) != NUMA_NO_NODE) 1094 cpu_to_node(cpu) != NUMA_NO_NODE)
@@ -1056,19 +1129,17 @@ void __cpuinit cpu_init(void)
1056 * set up and load the per-CPU TSS 1129 * set up and load the per-CPU TSS
1057 */ 1130 */
1058 if (!orig_ist->ist[0]) { 1131 if (!orig_ist->ist[0]) {
1059 static const unsigned int sizes[N_EXCEPTION_STACKS] = {
1060 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1061 [DEBUG_STACK - 1] = DEBUG_STKSZ
1062 };
1063 char *estacks = per_cpu(exception_stacks, cpu); 1132 char *estacks = per_cpu(exception_stacks, cpu);
1133
1064 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1134 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1065 estacks += sizes[v]; 1135 estacks += exception_stack_sizes[v];
1066 orig_ist->ist[v] = t->x86_tss.ist[v] = 1136 orig_ist->ist[v] = t->x86_tss.ist[v] =
1067 (unsigned long)estacks; 1137 (unsigned long)estacks;
1068 } 1138 }
1069 } 1139 }
1070 1140
1071 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1141 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1142
1072 /* 1143 /*
1073 * <= is required because the CPU will access up to 1144 * <= is required because the CPU will access up to
1074 * 8 bits beyond the end of the IO permission bitmap. 1145 * 8 bits beyond the end of the IO permission bitmap.
@@ -1097,17 +1168,7 @@ void __cpuinit cpu_init(void)
1097 arch_kgdb_ops.correct_hw_break(); 1168 arch_kgdb_ops.correct_hw_break();
1098 else 1169 else
1099#endif 1170#endif
1100 { 1171 clear_all_debug_regs();
1101 /*
1102 * Clear all 6 debug registers:
1103 */
1104 set_debugreg(0UL, 0);
1105 set_debugreg(0UL, 1);
1106 set_debugreg(0UL, 2);
1107 set_debugreg(0UL, 3);
1108 set_debugreg(0UL, 6);
1109 set_debugreg(0UL, 7);
1110 }
1111 1172
1112 fpu_init(); 1173 fpu_init();
1113 1174
@@ -1128,7 +1189,8 @@ void __cpuinit cpu_init(void)
1128 1189
1129 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { 1190 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
1130 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); 1191 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
1131 for (;;) local_irq_enable(); 1192 for (;;)
1193 local_irq_enable();
1132 } 1194 }
1133 1195
1134 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1196 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
@@ -1157,13 +1219,7 @@ void __cpuinit cpu_init(void)
1157 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1219 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1158#endif 1220#endif
1159 1221
1160 /* Clear all 6 debug registers: */ 1222 clear_all_debug_regs();
1161 set_debugreg(0, 0);
1162 set_debugreg(0, 1);
1163 set_debugreg(0, 2);
1164 set_debugreg(0, 3);
1165 set_debugreg(0, 6);
1166 set_debugreg(0, 7);
1167 1223
1168 /* 1224 /*
1169 * Force FPU initialization: 1225 * Force FPU initialization:
@@ -1183,6 +1239,4 @@ void __cpuinit cpu_init(void)
1183 1239
1184 xsave_init(); 1240 xsave_init();
1185} 1241}
1186
1187
1188#endif 1242#endif
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index de4094a3921..9469ecb5aeb 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -5,15 +5,15 @@
5struct cpu_model_info { 5struct cpu_model_info {
6 int vendor; 6 int vendor;
7 int family; 7 int family;
8 char *model_names[16]; 8 const char *model_names[16];
9}; 9};
10 10
11/* attempt to consolidate cpu attributes */ 11/* attempt to consolidate cpu attributes */
12struct cpu_dev { 12struct cpu_dev {
13 char * c_vendor; 13 const char * c_vendor;
14 14
15 /* some have two possibilities for cpuid string */ 15 /* some have two possibilities for cpuid string */
16 char * c_ident[2]; 16 const char * c_ident[2];
17 17
18 struct cpu_model_info c_models[4]; 18 struct cpu_model_info c_models[4];
19 19
@@ -25,11 +25,12 @@ struct cpu_dev {
25}; 25};
26 26
27#define cpu_dev_register(cpu_devX) \ 27#define cpu_dev_register(cpu_devX) \
28 static struct cpu_dev *__cpu_dev_##cpu_devX __used \ 28 static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \
29 __attribute__((__section__(".x86_cpu_dev.init"))) = \ 29 __attribute__((__section__(".x86_cpu_dev.init"))) = \
30 &cpu_devX; 30 &cpu_devX;
31 31
32extern struct cpu_dev *__x86_cpu_dev_start[], *__x86_cpu_dev_end[]; 32extern const struct cpu_dev *const __x86_cpu_dev_start[],
33 *const __x86_cpu_dev_end[];
33 34
34extern void display_cacheinfo(struct cpuinfo_x86 *c); 35extern void display_cacheinfo(struct cpuinfo_x86 *c);
35 36
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
index 9abbcbd933c..21c0cf8ced1 100755
--- a/arch/x86/kernel/cpu/cpu_debug.c
+++ b/arch/x86/kernel/cpu/cpu_debug.c
@@ -11,6 +11,7 @@
11#include <linux/seq_file.h> 11#include <linux/seq_file.h>
12#include <linux/debugfs.h> 12#include <linux/debugfs.h>
13#include <linux/kprobes.h> 13#include <linux/kprobes.h>
14#include <linux/uaccess.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/percpu.h> 17#include <linux/percpu.h>
@@ -40,41 +41,41 @@ static DEFINE_MUTEX(cpu_debug_lock);
40static struct dentry *cpu_debugfs_dir; 41static struct dentry *cpu_debugfs_dir;
41 42
42static struct cpu_debug_base cpu_base[] = { 43static struct cpu_debug_base cpu_base[] = {
43 { "mc", CPU_MC }, /* Machine Check */ 44 { "mc", CPU_MC, 0 },
44 { "monitor", CPU_MONITOR }, /* Monitor */ 45 { "monitor", CPU_MONITOR, 0 },
45 { "time", CPU_TIME }, /* Time */ 46 { "time", CPU_TIME, 0 },
46 { "pmc", CPU_PMC }, /* Performance Monitor */ 47 { "pmc", CPU_PMC, 1 },
47 { "platform", CPU_PLATFORM }, /* Platform */ 48 { "platform", CPU_PLATFORM, 0 },
48 { "apic", CPU_APIC }, /* APIC */ 49 { "apic", CPU_APIC, 0 },
49 { "poweron", CPU_POWERON }, /* Power-on */ 50 { "poweron", CPU_POWERON, 0 },
50 { "control", CPU_CONTROL }, /* Control */ 51 { "control", CPU_CONTROL, 0 },
51 { "features", CPU_FEATURES }, /* Features control */ 52 { "features", CPU_FEATURES, 0 },
52 { "lastbranch", CPU_LBRANCH }, /* Last Branch */ 53 { "lastbranch", CPU_LBRANCH, 0 },
53 { "bios", CPU_BIOS }, /* BIOS */ 54 { "bios", CPU_BIOS, 0 },
54 { "freq", CPU_FREQ }, /* Frequency */ 55 { "freq", CPU_FREQ, 0 },
55 { "mtrr", CPU_MTRR }, /* MTRR */ 56 { "mtrr", CPU_MTRR, 0 },
56 { "perf", CPU_PERF }, /* Performance */ 57 { "perf", CPU_PERF, 0 },
57 { "cache", CPU_CACHE }, /* Cache */ 58 { "cache", CPU_CACHE, 0 },
58 { "sysenter", CPU_SYSENTER }, /* Sysenter */ 59 { "sysenter", CPU_SYSENTER, 0 },
59 { "therm", CPU_THERM }, /* Thermal */ 60 { "therm", CPU_THERM, 0 },
60 { "misc", CPU_MISC }, /* Miscellaneous */ 61 { "misc", CPU_MISC, 0 },
61 { "debug", CPU_DEBUG }, /* Debug */ 62 { "debug", CPU_DEBUG, 0 },
62 { "pat", CPU_PAT }, /* PAT */ 63 { "pat", CPU_PAT, 0 },
63 { "vmx", CPU_VMX }, /* VMX */ 64 { "vmx", CPU_VMX, 0 },
64 { "call", CPU_CALL }, /* System Call */ 65 { "call", CPU_CALL, 0 },
65 { "base", CPU_BASE }, /* BASE Address */ 66 { "base", CPU_BASE, 0 },
66 { "smm", CPU_SMM }, /* System mgmt mode */ 67 { "smm", CPU_SMM, 0 },
67 { "svm", CPU_SVM }, /*Secure Virtial Machine*/ 68 { "svm", CPU_SVM, 0 },
68 { "osvm", CPU_OSVM }, /* OS-Visible Workaround*/ 69 { "osvm", CPU_OSVM, 0 },
69 { "tss", CPU_TSS }, /* Task Stack Segment */ 70 { "tss", CPU_TSS, 0 },
70 { "cr", CPU_CR }, /* Control Registers */ 71 { "cr", CPU_CR, 0 },
71 { "dt", CPU_DT }, /* Descriptor Table */ 72 { "dt", CPU_DT, 0 },
72 { "registers", CPU_REG_ALL }, /* Select all Registers */ 73 { "registers", CPU_REG_ALL, 0 },
73}; 74};
74 75
75static struct cpu_file_base cpu_file[] = { 76static struct cpu_file_base cpu_file[] = {
76 { "index", CPU_REG_ALL }, /* index */ 77 { "index", CPU_REG_ALL, 0 },
77 { "value", CPU_REG_ALL }, /* value */ 78 { "value", CPU_REG_ALL, 1 },
78}; 79};
79 80
80/* Intel Registers Range */ 81/* Intel Registers Range */
@@ -608,9 +609,62 @@ static int cpu_seq_open(struct inode *inode, struct file *file)
608 return err; 609 return err;
609} 610}
610 611
612static int write_msr(struct cpu_private *priv, u64 val)
613{
614 u32 low, high;
615
616 high = (val >> 32) & 0xffffffff;
617 low = val & 0xffffffff;
618
619 if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high))
620 return 0;
621
622 return -EPERM;
623}
624
625static int write_cpu_register(struct cpu_private *priv, const char *buf)
626{
627 int ret = -EPERM;
628 u64 val;
629
630 ret = strict_strtoull(buf, 0, &val);
631 if (ret < 0)
632 return ret;
633
634 /* Supporting only MSRs */
635 if (priv->type < CPU_TSS_BIT)
636 return write_msr(priv, val);
637
638 return ret;
639}
640
641static ssize_t cpu_write(struct file *file, const char __user *ubuf,
642 size_t count, loff_t *off)
643{
644 struct seq_file *seq = file->private_data;
645 struct cpu_private *priv = seq->private;
646 char buf[19];
647
648 if ((priv == NULL) || (count >= sizeof(buf)))
649 return -EINVAL;
650
651 if (copy_from_user(&buf, ubuf, count))
652 return -EFAULT;
653
654 buf[count] = 0;
655
656 if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write))
657 if (!write_cpu_register(priv, buf))
658 return count;
659
660 return -EACCES;
661}
662
611static const struct file_operations cpu_fops = { 663static const struct file_operations cpu_fops = {
664 .owner = THIS_MODULE,
612 .open = cpu_seq_open, 665 .open = cpu_seq_open,
613 .read = seq_read, 666 .read = seq_read,
667 .write = cpu_write,
614 .llseek = seq_lseek, 668 .llseek = seq_lseek,
615 .release = seq_release, 669 .release = seq_release,
616}; 670};
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index ffd0f5ed071..593171e967e 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -61,23 +61,23 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
61 */ 61 */
62static unsigned char Cx86_dir0_msb __cpuinitdata = 0; 62static unsigned char Cx86_dir0_msb __cpuinitdata = 0;
63 63
64static char Cx86_model[][9] __cpuinitdata = { 64static const char __cpuinitconst Cx86_model[][9] = {
65 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", 65 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
66 "M II ", "Unknown" 66 "M II ", "Unknown"
67}; 67};
68static char Cx486_name[][5] __cpuinitdata = { 68static const char __cpuinitconst Cx486_name[][5] = {
69 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", 69 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
70 "SRx2", "DRx2" 70 "SRx2", "DRx2"
71}; 71};
72static char Cx486S_name[][4] __cpuinitdata = { 72static const char __cpuinitconst Cx486S_name[][4] = {
73 "S", "S2", "Se", "S2e" 73 "S", "S2", "Se", "S2e"
74}; 74};
75static char Cx486D_name[][4] __cpuinitdata = { 75static const char __cpuinitconst Cx486D_name[][4] = {
76 "DX", "DX2", "?", "?", "?", "DX4" 76 "DX", "DX2", "?", "?", "?", "DX4"
77}; 77};
78static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock"; 78static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock";
79static char cyrix_model_mult1[] __cpuinitdata = "12??43"; 79static const char __cpuinitconst cyrix_model_mult1[] = "12??43";
80static char cyrix_model_mult2[] __cpuinitdata = "12233445"; 80static const char __cpuinitconst cyrix_model_mult2[] = "12233445";
81 81
82/* 82/*
83 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old 83 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
@@ -435,7 +435,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
435 } 435 }
436} 436}
437 437
438static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { 438static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = {
439 .c_vendor = "Cyrix", 439 .c_vendor = "Cyrix",
440 .c_ident = { "CyrixInstead" }, 440 .c_ident = { "CyrixInstead" },
441 .c_early_init = early_init_cyrix, 441 .c_early_init = early_init_cyrix,
@@ -446,7 +446,7 @@ static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
446 446
447cpu_dev_register(cyrix_cpu_dev); 447cpu_dev_register(cyrix_cpu_dev);
448 448
449static struct cpu_dev nsc_cpu_dev __cpuinitdata = { 449static const struct cpu_dev __cpuinitconst nsc_cpu_dev = {
450 .c_vendor = "NSC", 450 .c_vendor = "NSC",
451 .c_ident = { "Geode by NSC" }, 451 .c_ident = { "Geode by NSC" },
452 .c_init = init_nsc, 452 .c_init = init_nsc,
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 191117f1ad5..b09d4eb52bb 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -54,6 +54,11 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
54 c->x86_cache_alignment = 128; 54 c->x86_cache_alignment = 128;
55#endif 55#endif
56 56
57 /* CPUID workaround for 0F33/0F34 CPU */
58 if (c->x86 == 0xF && c->x86_model == 0x3
59 && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
60 c->x86_phys_bits = 36;
61
57 /* 62 /*
58 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 63 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
59 * with P/T states and does not stop in deep C-states 64 * with P/T states and does not stop in deep C-states
@@ -410,7 +415,7 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i
410} 415}
411#endif 416#endif
412 417
413static struct cpu_dev intel_cpu_dev __cpuinitdata = { 418static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
414 .c_vendor = "Intel", 419 .c_vendor = "Intel",
415 .c_ident = { "GenuineIntel" }, 420 .c_ident = { "GenuineIntel" },
416#ifdef CONFIG_X86_32 421#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 7293508d8f5..c471eb1a389 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -32,7 +32,7 @@ struct _cache_table
32}; 32};
33 33
34/* all the cache descriptor types we care about (no TLB or trace cache entries) */ 34/* all the cache descriptor types we care about (no TLB or trace cache entries) */
35static struct _cache_table cache_table[] __cpuinitdata = 35static const struct _cache_table __cpuinitconst cache_table[] =
36{ 36{
37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
@@ -206,15 +206,15 @@ union l3_cache {
206 unsigned val; 206 unsigned val;
207}; 207};
208 208
209static unsigned short assocs[] __cpuinitdata = { 209static const unsigned short __cpuinitconst assocs[] = {
210 [1] = 1, [2] = 2, [4] = 4, [6] = 8, 210 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
211 [8] = 16, [0xa] = 32, [0xb] = 48, 211 [8] = 16, [0xa] = 32, [0xb] = 48,
212 [0xc] = 64, 212 [0xc] = 64,
213 [0xf] = 0xffff // ?? 213 [0xf] = 0xffff // ??
214}; 214};
215 215
216static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; 216static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
217static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; 217static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
218 218
219static void __cpuinit 219static void __cpuinit
220amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, 220amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index 52b3fefbd5a..bb62b3e5caa 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -98,7 +98,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
98#endif 98#endif
99} 99}
100 100
101static struct cpu_dev transmeta_cpu_dev __cpuinitdata = { 101static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = {
102 .c_vendor = "Transmeta", 102 .c_vendor = "Transmeta",
103 .c_ident = { "GenuineTMx86", "TransmetaCPU" }, 103 .c_ident = { "GenuineTMx86", "TransmetaCPU" },
104 .c_early_init = early_init_transmeta, 104 .c_early_init = early_init_transmeta,
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c
index e777f79e096..fd2c37bf7ac 100644
--- a/arch/x86/kernel/cpu/umc.c
+++ b/arch/x86/kernel/cpu/umc.c
@@ -8,7 +8,7 @@
8 * so no special init takes place. 8 * so no special init takes place.
9 */ 9 */
10 10
11static struct cpu_dev umc_cpu_dev __cpuinitdata = { 11static const struct cpu_dev __cpuinitconst umc_cpu_dev = {
12 .c_vendor = "UMC", 12 .c_vendor = "UMC",
13 .c_ident = { "UMC UMC UMC" }, 13 .c_ident = { "UMC UMC UMC" },
14 .c_models = { 14 .c_models = {
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 508bec1cee2..95b81c18b6b 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -110,19 +110,25 @@ int __init e820_all_mapped(u64 start, u64 end, unsigned type)
110/* 110/*
111 * Add a memory region to the kernel e820 map. 111 * Add a memory region to the kernel e820 map.
112 */ 112 */
113void __init e820_add_region(u64 start, u64 size, int type) 113static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size,
114 int type)
114{ 115{
115 int x = e820.nr_map; 116 int x = e820x->nr_map;
116 117
117 if (x == ARRAY_SIZE(e820.map)) { 118 if (x == ARRAY_SIZE(e820x->map)) {
118 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); 119 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
119 return; 120 return;
120 } 121 }
121 122
122 e820.map[x].addr = start; 123 e820x->map[x].addr = start;
123 e820.map[x].size = size; 124 e820x->map[x].size = size;
124 e820.map[x].type = type; 125 e820x->map[x].type = type;
125 e820.nr_map++; 126 e820x->nr_map++;
127}
128
129void __init e820_add_region(u64 start, u64 size, int type)
130{
131 __e820_add_region(&e820, start, size, type);
126} 132}
127 133
128void __init e820_print_map(char *who) 134void __init e820_print_map(char *who)
@@ -417,11 +423,11 @@ static int __init append_e820_map(struct e820entry *biosmap, int nr_map)
417 return __append_e820_map(biosmap, nr_map); 423 return __append_e820_map(biosmap, nr_map);
418} 424}
419 425
420static u64 __init e820_update_range_map(struct e820map *e820x, u64 start, 426static u64 __init __e820_update_range(struct e820map *e820x, u64 start,
421 u64 size, unsigned old_type, 427 u64 size, unsigned old_type,
422 unsigned new_type) 428 unsigned new_type)
423{ 429{
424 int i; 430 unsigned int i;
425 u64 real_updated_size = 0; 431 u64 real_updated_size = 0;
426 432
427 BUG_ON(old_type == new_type); 433 BUG_ON(old_type == new_type);
@@ -429,7 +435,7 @@ static u64 __init e820_update_range_map(struct e820map *e820x, u64 start,
429 if (size > (ULLONG_MAX - start)) 435 if (size > (ULLONG_MAX - start))
430 size = ULLONG_MAX - start; 436 size = ULLONG_MAX - start;
431 437
432 for (i = 0; i < e820.nr_map; i++) { 438 for (i = 0; i < e820x->nr_map; i++) {
433 struct e820entry *ei = &e820x->map[i]; 439 struct e820entry *ei = &e820x->map[i];
434 u64 final_start, final_end; 440 u64 final_start, final_end;
435 if (ei->type != old_type) 441 if (ei->type != old_type)
@@ -446,10 +452,16 @@ static u64 __init e820_update_range_map(struct e820map *e820x, u64 start,
446 final_end = min(start + size, ei->addr + ei->size); 452 final_end = min(start + size, ei->addr + ei->size);
447 if (final_start >= final_end) 453 if (final_start >= final_end)
448 continue; 454 continue;
449 e820_add_region(final_start, final_end - final_start, 455
450 new_type); 456 __e820_add_region(e820x, final_start, final_end - final_start,
457 new_type);
458
451 real_updated_size += final_end - final_start; 459 real_updated_size += final_end - final_start;
452 460
461 /*
462 * left range could be head or tail, so need to update
463 * size at first.
464 */
453 ei->size -= final_end - final_start; 465 ei->size -= final_end - final_start;
454 if (ei->addr < final_start) 466 if (ei->addr < final_start)
455 continue; 467 continue;
@@ -461,13 +473,13 @@ static u64 __init e820_update_range_map(struct e820map *e820x, u64 start,
461u64 __init e820_update_range(u64 start, u64 size, unsigned old_type, 473u64 __init e820_update_range(u64 start, u64 size, unsigned old_type,
462 unsigned new_type) 474 unsigned new_type)
463{ 475{
464 return e820_update_range_map(&e820, start, size, old_type, new_type); 476 return __e820_update_range(&e820, start, size, old_type, new_type);
465} 477}
466 478
467static u64 __init e820_update_range_saved(u64 start, u64 size, 479static u64 __init e820_update_range_saved(u64 start, u64 size,
468 unsigned old_type, unsigned new_type) 480 unsigned old_type, unsigned new_type)
469{ 481{
470 return e820_update_range_map(&e820_saved, start, size, old_type, 482 return __e820_update_range(&e820_saved, start, size, old_type,
471 new_type); 483 new_type);
472} 484}
473 485
@@ -1020,8 +1032,8 @@ u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
1020 continue; 1032 continue;
1021 return addr; 1033 return addr;
1022 } 1034 }
1023 return -1UL;
1024 1035
1036 return -1ULL;
1025} 1037}
1026 1038
1027/* 1039/*
@@ -1034,13 +1046,22 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
1034 u64 start; 1046 u64 start;
1035 1047
1036 start = startt; 1048 start = startt;
1037 while (size < sizet) 1049 while (size < sizet && (start + 1))
1038 start = find_e820_area_size(start, &size, align); 1050 start = find_e820_area_size(start, &size, align);
1039 1051
1040 if (size < sizet) 1052 if (size < sizet)
1041 return 0; 1053 return 0;
1042 1054
1055#ifdef CONFIG_X86_32
1056 if (start >= MAXMEM)
1057 return 0;
1058 if (start + size > MAXMEM)
1059 size = MAXMEM - start;
1060#endif
1061
1043 addr = round_down(start + size - sizet, align); 1062 addr = round_down(start + size - sizet, align);
1063 if (addr < start)
1064 return 0;
1044 e820_update_range(addr, sizet, E820_RAM, E820_RESERVED); 1065 e820_update_range(addr, sizet, E820_RAM, E820_RESERVED);
1045 e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED); 1066 e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED);
1046 printk(KERN_INFO "update e820 for early_reserve_e820\n"); 1067 printk(KERN_INFO "update e820 for early_reserve_e820\n");
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 639ad98238a..335f049d110 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -250,7 +250,7 @@ static int dbgp_wait_until_complete(void)
250 return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl); 250 return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl);
251} 251}
252 252
253static void dbgp_mdelay(int ms) 253static void __init dbgp_mdelay(int ms)
254{ 254{
255 int i; 255 int i;
256 256
@@ -311,7 +311,7 @@ static void dbgp_set_data(const void *buf, int size)
311 writel(hi, &ehci_debug->data47); 311 writel(hi, &ehci_debug->data47);
312} 312}
313 313
314static void dbgp_get_data(void *buf, int size) 314static void __init dbgp_get_data(void *buf, int size)
315{ 315{
316 unsigned char *bytes = buf; 316 unsigned char *bytes = buf;
317 u32 lo, hi; 317 u32 lo, hi;
@@ -355,7 +355,7 @@ static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
355 return ret; 355 return ret;
356} 356}
357 357
358static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data, 358static int __init dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
359 int size) 359 int size)
360{ 360{
361 u32 pids, addr, ctrl; 361 u32 pids, addr, ctrl;
@@ -386,8 +386,8 @@ static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
386 return ret; 386 return ret;
387} 387}
388 388
389static int dbgp_control_msg(unsigned devnum, int requesttype, int request, 389static int __init dbgp_control_msg(unsigned devnum, int requesttype,
390 int value, int index, void *data, int size) 390 int request, int value, int index, void *data, int size)
391{ 391{
392 u32 pids, addr, ctrl; 392 u32 pids, addr, ctrl;
393 struct usb_ctrlrequest req; 393 struct usb_ctrlrequest req;
@@ -489,7 +489,7 @@ static u32 __init find_dbgp(int ehci_num, u32 *rbus, u32 *rslot, u32 *rfunc)
489 return 0; 489 return 0;
490} 490}
491 491
492static int ehci_reset_port(int port) 492static int __init ehci_reset_port(int port)
493{ 493{
494 u32 portsc; 494 u32 portsc;
495 u32 delay_time, delay; 495 u32 delay_time, delay;
@@ -532,7 +532,7 @@ static int ehci_reset_port(int port)
532 return -EBUSY; 532 return -EBUSY;
533} 533}
534 534
535static int ehci_wait_for_port(int port) 535static int __init ehci_wait_for_port(int port)
536{ 536{
537 u32 status; 537 u32 status;
538 int ret, reps; 538 int ret, reps;
@@ -557,13 +557,13 @@ static inline void dbgp_printk(const char *fmt, ...) { }
557 557
558typedef void (*set_debug_port_t)(int port); 558typedef void (*set_debug_port_t)(int port);
559 559
560static void default_set_debug_port(int port) 560static void __init default_set_debug_port(int port)
561{ 561{
562} 562}
563 563
564static set_debug_port_t set_debug_port = default_set_debug_port; 564static set_debug_port_t __initdata set_debug_port = default_set_debug_port;
565 565
566static void nvidia_set_debug_port(int port) 566static void __init nvidia_set_debug_port(int port)
567{ 567{
568 u32 dword; 568 u32 dword;
569 dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, 569 dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 899e8938e79..c929add475c 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -442,8 +442,7 @@ sysenter_past_esp:
442 442
443 GET_THREAD_INFO(%ebp) 443 GET_THREAD_INFO(%ebp)
444 444
445 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ 445 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
446 testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
447 jnz sysenter_audit 446 jnz sysenter_audit
448sysenter_do_call: 447sysenter_do_call:
449 cmpl $(nr_syscalls), %eax 448 cmpl $(nr_syscalls), %eax
@@ -454,7 +453,7 @@ sysenter_do_call:
454 DISABLE_INTERRUPTS(CLBR_ANY) 453 DISABLE_INTERRUPTS(CLBR_ANY)
455 TRACE_IRQS_OFF 454 TRACE_IRQS_OFF
456 movl TI_flags(%ebp), %ecx 455 movl TI_flags(%ebp), %ecx
457 testw $_TIF_ALLWORK_MASK, %cx 456 testl $_TIF_ALLWORK_MASK, %ecx
458 jne sysexit_audit 457 jne sysexit_audit
459sysenter_exit: 458sysenter_exit:
460/* if something modifies registers it must also disable sysexit */ 459/* if something modifies registers it must also disable sysexit */
@@ -468,7 +467,7 @@ sysenter_exit:
468 467
469#ifdef CONFIG_AUDITSYSCALL 468#ifdef CONFIG_AUDITSYSCALL
470sysenter_audit: 469sysenter_audit:
471 testw $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) 470 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
472 jnz syscall_trace_entry 471 jnz syscall_trace_entry
473 addl $4,%esp 472 addl $4,%esp
474 CFI_ADJUST_CFA_OFFSET -4 473 CFI_ADJUST_CFA_OFFSET -4
@@ -485,7 +484,7 @@ sysenter_audit:
485 jmp sysenter_do_call 484 jmp sysenter_do_call
486 485
487sysexit_audit: 486sysexit_audit:
488 testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx 487 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
489 jne syscall_exit_work 488 jne syscall_exit_work
490 TRACE_IRQS_ON 489 TRACE_IRQS_ON
491 ENABLE_INTERRUPTS(CLBR_ANY) 490 ENABLE_INTERRUPTS(CLBR_ANY)
@@ -498,7 +497,7 @@ sysexit_audit:
498 DISABLE_INTERRUPTS(CLBR_ANY) 497 DISABLE_INTERRUPTS(CLBR_ANY)
499 TRACE_IRQS_OFF 498 TRACE_IRQS_OFF
500 movl TI_flags(%ebp), %ecx 499 movl TI_flags(%ebp), %ecx
501 testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx 500 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
502 jne syscall_exit_work 501 jne syscall_exit_work
503 movl PT_EAX(%esp),%eax /* reload syscall return value */ 502 movl PT_EAX(%esp),%eax /* reload syscall return value */
504 jmp sysenter_exit 503 jmp sysenter_exit
@@ -523,8 +522,7 @@ ENTRY(system_call)
523 SAVE_ALL 522 SAVE_ALL
524 GET_THREAD_INFO(%ebp) 523 GET_THREAD_INFO(%ebp)
525 # system call tracing in operation / emulation 524 # system call tracing in operation / emulation
526 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ 525 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
527 testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
528 jnz syscall_trace_entry 526 jnz syscall_trace_entry
529 cmpl $(nr_syscalls), %eax 527 cmpl $(nr_syscalls), %eax
530 jae syscall_badsys 528 jae syscall_badsys
@@ -538,7 +536,7 @@ syscall_exit:
538 # between sampling and the iret 536 # between sampling and the iret
539 TRACE_IRQS_OFF 537 TRACE_IRQS_OFF
540 movl TI_flags(%ebp), %ecx 538 movl TI_flags(%ebp), %ecx
541 testw $_TIF_ALLWORK_MASK, %cx # current->work 539 testl $_TIF_ALLWORK_MASK, %ecx # current->work
542 jne syscall_exit_work 540 jne syscall_exit_work
543 541
544restore_all: 542restore_all:
@@ -673,7 +671,7 @@ END(syscall_trace_entry)
673 # perform syscall exit tracing 671 # perform syscall exit tracing
674 ALIGN 672 ALIGN
675syscall_exit_work: 673syscall_exit_work:
676 testb $_TIF_WORK_SYSCALL_EXIT, %cl 674 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
677 jz work_pending 675 jz work_pending
678 TRACE_IRQS_ON 676 TRACE_IRQS_ON
679 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call 677 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 7ba4621c0df..a331ec38af9 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -368,6 +368,7 @@ ENTRY(save_rest)
368END(save_rest) 368END(save_rest)
369 369
370/* save complete stack frame */ 370/* save complete stack frame */
371 .pushsection .kprobes.text, "ax"
371ENTRY(save_paranoid) 372ENTRY(save_paranoid)
372 XCPT_FRAME 1 RDI+8 373 XCPT_FRAME 1 RDI+8
373 cld 374 cld
@@ -396,6 +397,7 @@ ENTRY(save_paranoid)
3961: ret 3971: ret
397 CFI_ENDPROC 398 CFI_ENDPROC
398END(save_paranoid) 399END(save_paranoid)
400 .popsection
399 401
400/* 402/*
401 * A newly forked process directly context switches into this address. 403 * A newly forked process directly context switches into this address.
@@ -416,7 +418,6 @@ ENTRY(ret_from_fork)
416 418
417 GET_THREAD_INFO(%rcx) 419 GET_THREAD_INFO(%rcx)
418 420
419 CFI_REMEMBER_STATE
420 RESTORE_REST 421 RESTORE_REST
421 422
422 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? 423 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
@@ -428,7 +429,6 @@ ENTRY(ret_from_fork)
428 RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET 429 RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
429 jmp ret_from_sys_call # go to the SYSRET fastpath 430 jmp ret_from_sys_call # go to the SYSRET fastpath
430 431
431 CFI_RESTORE_STATE
432 CFI_ENDPROC 432 CFI_ENDPROC
433END(ret_from_fork) 433END(ret_from_fork)
434 434
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index b864341dcc4..b8ac3b6cf77 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -45,16 +45,16 @@ void ack_bad_irq(unsigned int irq)
45/* 45/*
46 * /proc/interrupts printing: 46 * /proc/interrupts printing:
47 */ 47 */
48static int show_other_interrupts(struct seq_file *p) 48static int show_other_interrupts(struct seq_file *p, int prec)
49{ 49{
50 int j; 50 int j;
51 51
52 seq_printf(p, "NMI: "); 52 seq_printf(p, "%*s: ", prec, "NMI");
53 for_each_online_cpu(j) 53 for_each_online_cpu(j)
54 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count); 54 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
55 seq_printf(p, " Non-maskable interrupts\n"); 55 seq_printf(p, " Non-maskable interrupts\n");
56#ifdef CONFIG_X86_LOCAL_APIC 56#ifdef CONFIG_X86_LOCAL_APIC
57 seq_printf(p, "LOC: "); 57 seq_printf(p, "%*s: ", prec, "LOC");
58 for_each_online_cpu(j) 58 for_each_online_cpu(j)
59 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); 59 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
60 seq_printf(p, " Local timer interrupts\n"); 60 seq_printf(p, " Local timer interrupts\n");
@@ -66,40 +66,40 @@ static int show_other_interrupts(struct seq_file *p)
66 seq_printf(p, " Platform interrupts\n"); 66 seq_printf(p, " Platform interrupts\n");
67 } 67 }
68#ifdef CONFIG_SMP 68#ifdef CONFIG_SMP
69 seq_printf(p, "RES: "); 69 seq_printf(p, "%*s: ", prec, "RES");
70 for_each_online_cpu(j) 70 for_each_online_cpu(j)
71 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); 71 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
72 seq_printf(p, " Rescheduling interrupts\n"); 72 seq_printf(p, " Rescheduling interrupts\n");
73 seq_printf(p, "CAL: "); 73 seq_printf(p, "%*s: ", prec, "CAL");
74 for_each_online_cpu(j) 74 for_each_online_cpu(j)
75 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); 75 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
76 seq_printf(p, " Function call interrupts\n"); 76 seq_printf(p, " Function call interrupts\n");
77 seq_printf(p, "TLB: "); 77 seq_printf(p, "%*s: ", prec, "TLB");
78 for_each_online_cpu(j) 78 for_each_online_cpu(j)
79 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); 79 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
80 seq_printf(p, " TLB shootdowns\n"); 80 seq_printf(p, " TLB shootdowns\n");
81#endif 81#endif
82#ifdef CONFIG_X86_MCE 82#ifdef CONFIG_X86_MCE
83 seq_printf(p, "TRM: "); 83 seq_printf(p, "%*s: ", prec, "TRM");
84 for_each_online_cpu(j) 84 for_each_online_cpu(j)
85 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); 85 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
86 seq_printf(p, " Thermal event interrupts\n"); 86 seq_printf(p, " Thermal event interrupts\n");
87# ifdef CONFIG_X86_64 87# ifdef CONFIG_X86_64
88 seq_printf(p, "THR: "); 88 seq_printf(p, "%*s: ", prec, "THR");
89 for_each_online_cpu(j) 89 for_each_online_cpu(j)
90 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); 90 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
91 seq_printf(p, " Threshold APIC interrupts\n"); 91 seq_printf(p, " Threshold APIC interrupts\n");
92# endif 92# endif
93#endif 93#endif
94#ifdef CONFIG_X86_LOCAL_APIC 94#ifdef CONFIG_X86_LOCAL_APIC
95 seq_printf(p, "SPU: "); 95 seq_printf(p, "%*s: ", prec, "SPU");
96 for_each_online_cpu(j) 96 for_each_online_cpu(j)
97 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); 97 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
98 seq_printf(p, " Spurious interrupts\n"); 98 seq_printf(p, " Spurious interrupts\n");
99#endif 99#endif
100 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 100 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
101#if defined(CONFIG_X86_IO_APIC) 101#if defined(CONFIG_X86_IO_APIC)
102 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count)); 102 seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
103#endif 103#endif
104 return 0; 104 return 0;
105} 105}
@@ -107,19 +107,22 @@ static int show_other_interrupts(struct seq_file *p)
107int show_interrupts(struct seq_file *p, void *v) 107int show_interrupts(struct seq_file *p, void *v)
108{ 108{
109 unsigned long flags, any_count = 0; 109 unsigned long flags, any_count = 0;
110 int i = *(loff_t *) v, j; 110 int i = *(loff_t *) v, j, prec;
111 struct irqaction *action; 111 struct irqaction *action;
112 struct irq_desc *desc; 112 struct irq_desc *desc;
113 113
114 if (i > nr_irqs) 114 if (i > nr_irqs)
115 return 0; 115 return 0;
116 116
117 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
118 j *= 10;
119
117 if (i == nr_irqs) 120 if (i == nr_irqs)
118 return show_other_interrupts(p); 121 return show_other_interrupts(p, prec);
119 122
120 /* print header */ 123 /* print header */
121 if (i == 0) { 124 if (i == 0) {
122 seq_printf(p, " "); 125 seq_printf(p, "%*s", prec + 8, "");
123 for_each_online_cpu(j) 126 for_each_online_cpu(j)
124 seq_printf(p, "CPU%-8d", j); 127 seq_printf(p, "CPU%-8d", j);
125 seq_putc(p, '\n'); 128 seq_putc(p, '\n');
@@ -140,7 +143,7 @@ int show_interrupts(struct seq_file *p, void *v)
140 if (!action && !any_count) 143 if (!action && !any_count)
141 goto out; 144 goto out;
142 145
143 seq_printf(p, "%3d: ", i); 146 seq_printf(p, "%*d: ", prec, i);
144#ifndef CONFIG_SMP 147#ifndef CONFIG_SMP
145 seq_printf(p, "%10u ", kstat_irqs(i)); 148 seq_printf(p, "%10u ", kstat_irqs(i));
146#else 149#else
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index 666e43df51f..712d15fdc41 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -226,7 +226,7 @@ static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d)
226 return 0; 226 return 0;
227} 227}
228 228
229static struct dmi_system_id __devinitdata mmconf_dmi_table[] = { 229static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = {
230 { 230 {
231 .callback = set_check_enable_amd_mmconf, 231 .callback = set_check_enable_amd_mmconf,
232 .ident = "Sun Microsystems Machine", 232 .ident = "Sun Microsystems Machine",
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index e8192401da4..47673e02ae5 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -890,12 +890,12 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
890#ifdef CONFIG_X86_IO_APIC 890#ifdef CONFIG_X86_IO_APIC
891 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt; 891 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
892 892
893 printk(KERN_INFO "OLD "); 893 apic_printk(APIC_VERBOSE, "OLD ");
894 print_MP_intsrc_info(m); 894 print_MP_intsrc_info(m);
895 i = get_MP_intsrc_index(m); 895 i = get_MP_intsrc_index(m);
896 if (i > 0) { 896 if (i > 0) {
897 assign_to_mpc_intsrc(&mp_irqs[i], m); 897 assign_to_mpc_intsrc(&mp_irqs[i], m);
898 printk(KERN_INFO "NEW "); 898 apic_printk(APIC_VERBOSE, "NEW ");
899 print_mp_irq_info(&mp_irqs[i]); 899 print_mp_irq_info(&mp_irqs[i]);
900 } else if (!i) { 900 } else if (!i) {
901 /* legacy, do nothing */ 901 /* legacy, do nothing */
@@ -943,7 +943,7 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
943 continue; 943 continue;
944 944
945 if (nr_m_spare > 0) { 945 if (nr_m_spare > 0) {
946 printk(KERN_INFO "*NEW* found "); 946 apic_printk(APIC_VERBOSE, "*NEW* found\n");
947 nr_m_spare--; 947 nr_m_spare--;
948 assign_to_mpc_intsrc(&mp_irqs[i], m_spare[nr_m_spare]); 948 assign_to_mpc_intsrc(&mp_irqs[i], m_spare[nr_m_spare]);
949 m_spare[nr_m_spare] = NULL; 949 m_spare[nr_m_spare] = NULL;
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index efa615f2bf4..400331b50a5 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -233,8 +233,8 @@ proceed:
233 "%zu bytes\n", vm.addr, static_size); 233 "%zu bytes\n", vm.addr, static_size);
234 234
235 ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, 235 ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
236 PERCPU_FIRST_CHUNK_RESERVE, 236 PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
237 PMD_SIZE, dyn_size, vm.addr, NULL); 237 PMD_SIZE, vm.addr, NULL);
238 goto out_free_ar; 238 goto out_free_ar;
239 239
240enomem: 240enomem:
@@ -257,31 +257,13 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
257 * Embedding allocator 257 * Embedding allocator
258 * 258 *
259 * The first chunk is sized to just contain the static area plus 259 * The first chunk is sized to just contain the static area plus
260 * module and dynamic reserves, and allocated as a contiguous area 260 * module and dynamic reserves and embedded into linear physical
261 * using bootmem allocator and used as-is without being mapped into 261 * mapping so that it can use PMD mapping without additional TLB
262 * vmalloc area. This enables the first chunk to piggy back on the 262 * pressure.
263 * linear physical PMD mapping and doesn't add any additional pressure
264 * to TLB. Note that if the needed size is smaller than the minimum
265 * unit size, the leftover is returned to the bootmem allocator.
266 */ 263 */
267static void *pcpue_ptr __initdata;
268static size_t pcpue_size __initdata;
269static size_t pcpue_unit_size __initdata;
270
271static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
272{
273 size_t off = (size_t)pageno << PAGE_SHIFT;
274
275 if (off >= pcpue_size)
276 return NULL;
277
278 return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off);
279}
280
281static ssize_t __init setup_pcpu_embed(size_t static_size) 264static ssize_t __init setup_pcpu_embed(size_t static_size)
282{ 265{
283 unsigned int cpu; 266 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
284 size_t dyn_size;
285 267
286 /* 268 /*
287 * If large page isn't supported, there's no benefit in doing 269 * If large page isn't supported, there's no benefit in doing
@@ -291,33 +273,8 @@ static ssize_t __init setup_pcpu_embed(size_t static_size)
291 if (!cpu_has_pse || pcpu_need_numa()) 273 if (!cpu_has_pse || pcpu_need_numa())
292 return -EINVAL; 274 return -EINVAL;
293 275
294 /* allocate and copy */ 276 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
295 pcpue_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + 277 reserve - PERCPU_FIRST_CHUNK_RESERVE, -1);
296 PERCPU_DYNAMIC_RESERVE);
297 pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
298 dyn_size = pcpue_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
299
300 pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size,
301 PAGE_SIZE);
302 if (!pcpue_ptr)
303 return -ENOMEM;
304
305 for_each_possible_cpu(cpu) {
306 void *ptr = pcpue_ptr + cpu * pcpue_unit_size;
307
308 free_bootmem(__pa(ptr + pcpue_size),
309 pcpue_unit_size - pcpue_size);
310 memcpy(ptr, __per_cpu_load, static_size);
311 }
312
313 /* we're ready, commit */
314 pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
315 pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
316
317 return pcpu_setup_first_chunk(pcpue_get_page, static_size,
318 PERCPU_FIRST_CHUNK_RESERVE,
319 pcpue_unit_size, dyn_size,
320 pcpue_ptr, NULL);
321} 278}
322 279
323/* 280/*
@@ -375,8 +332,8 @@ static ssize_t __init setup_pcpu_4k(size_t static_size)
375 pcpu4k_nr_static_pages, static_size); 332 pcpu4k_nr_static_pages, static_size);
376 333
377 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 334 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
378 PERCPU_FIRST_CHUNK_RESERVE, -1, -1, NULL, 335 PERCPU_FIRST_CHUNK_RESERVE, -1,
379 pcpu4k_populate_pte); 336 -1, NULL, pcpu4k_populate_pte);
380 goto out_free_ar; 337 goto out_free_ar;
381 338
382enomem: 339enomem:
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index c22981fa2f3..ad5441ed1b5 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -1,30 +1,38 @@
1/* Copyright 2002 Andi Kleen */ 1/* Copyright 2002 Andi Kleen */
2 2
3#include <linux/linkage.h> 3#include <linux/linkage.h>
4#include <asm/dwarf2.h> 4
5#include <asm/cpufeature.h> 5#include <asm/cpufeature.h>
6#include <asm/dwarf2.h>
6 7
7/* 8/*
8 * memcpy - Copy a memory block. 9 * memcpy - Copy a memory block.
9 * 10 *
10 * Input: 11 * Input:
11 * rdi destination 12 * rdi destination
12 * rsi source 13 * rsi source
13 * rdx count 14 * rdx count
14 * 15 *
15 * Output: 16 * Output:
16 * rax original destination 17 * rax original destination
17 */ 18 */
18 19
20/*
21 * memcpy_c() - fast string ops (REP MOVSQ) based variant.
22 *
23 * Calls to this get patched into the kernel image via the
24 * alternative instructions framework:
25 */
19 ALIGN 26 ALIGN
20memcpy_c: 27memcpy_c:
21 CFI_STARTPROC 28 CFI_STARTPROC
22 movq %rdi,%rax 29 movq %rdi, %rax
23 movl %edx,%ecx 30
24 shrl $3,%ecx 31 movl %edx, %ecx
25 andl $7,%edx 32 shrl $3, %ecx
33 andl $7, %edx
26 rep movsq 34 rep movsq
27 movl %edx,%ecx 35 movl %edx, %ecx
28 rep movsb 36 rep movsb
29 ret 37 ret
30 CFI_ENDPROC 38 CFI_ENDPROC
@@ -33,99 +41,110 @@ ENDPROC(memcpy_c)
33ENTRY(__memcpy) 41ENTRY(__memcpy)
34ENTRY(memcpy) 42ENTRY(memcpy)
35 CFI_STARTPROC 43 CFI_STARTPROC
36 pushq %rbx
37 CFI_ADJUST_CFA_OFFSET 8
38 CFI_REL_OFFSET rbx, 0
39 movq %rdi,%rax
40 44
41 movl %edx,%ecx 45 /*
42 shrl $6,%ecx 46 * Put the number of full 64-byte blocks into %ecx.
47 * Tail portion is handled at the end:
48 */
49 movq %rdi, %rax
50 movl %edx, %ecx
51 shrl $6, %ecx
43 jz .Lhandle_tail 52 jz .Lhandle_tail
44 53
45 .p2align 4 54 .p2align 4
46.Lloop_64: 55.Lloop_64:
56 /*
57 * We decrement the loop index here - and the zero-flag is
58 * checked at the end of the loop (instructions inbetween do
59 * not change the zero flag):
60 */
47 decl %ecx 61 decl %ecx
48 62
49 movq (%rsi),%r11 63 /*
50 movq 8(%rsi),%r8 64 * Move in blocks of 4x16 bytes:
65 */
66 movq 0*8(%rsi), %r11
67 movq 1*8(%rsi), %r8
68 movq %r11, 0*8(%rdi)
69 movq %r8, 1*8(%rdi)
51 70
52 movq %r11,(%rdi) 71 movq 2*8(%rsi), %r9
53 movq %r8,1*8(%rdi) 72 movq 3*8(%rsi), %r10
73 movq %r9, 2*8(%rdi)
74 movq %r10, 3*8(%rdi)
54 75
55 movq 2*8(%rsi),%r9 76 movq 4*8(%rsi), %r11
56 movq 3*8(%rsi),%r10 77 movq 5*8(%rsi), %r8
78 movq %r11, 4*8(%rdi)
79 movq %r8, 5*8(%rdi)
57 80
58 movq %r9,2*8(%rdi) 81 movq 6*8(%rsi), %r9
59 movq %r10,3*8(%rdi) 82 movq 7*8(%rsi), %r10
83 movq %r9, 6*8(%rdi)
84 movq %r10, 7*8(%rdi)
60 85
61 movq 4*8(%rsi),%r11 86 leaq 64(%rsi), %rsi
62 movq 5*8(%rsi),%r8 87 leaq 64(%rdi), %rdi
63 88
64 movq %r11,4*8(%rdi)
65 movq %r8,5*8(%rdi)
66
67 movq 6*8(%rsi),%r9
68 movq 7*8(%rsi),%r10
69
70 movq %r9,6*8(%rdi)
71 movq %r10,7*8(%rdi)
72
73 leaq 64(%rsi),%rsi
74 leaq 64(%rdi),%rdi
75 jnz .Lloop_64 89 jnz .Lloop_64
76 90
77.Lhandle_tail: 91.Lhandle_tail:
78 movl %edx,%ecx 92 movl %edx, %ecx
79 andl $63,%ecx 93 andl $63, %ecx
80 shrl $3,%ecx 94 shrl $3, %ecx
81 jz .Lhandle_7 95 jz .Lhandle_7
96
82 .p2align 4 97 .p2align 4
83.Lloop_8: 98.Lloop_8:
84 decl %ecx 99 decl %ecx
85 movq (%rsi),%r8 100 movq (%rsi), %r8
86 movq %r8,(%rdi) 101 movq %r8, (%rdi)
87 leaq 8(%rdi),%rdi 102 leaq 8(%rdi), %rdi
88 leaq 8(%rsi),%rsi 103 leaq 8(%rsi), %rsi
89 jnz .Lloop_8 104 jnz .Lloop_8
90 105
91.Lhandle_7: 106.Lhandle_7:
92 movl %edx,%ecx 107 movl %edx, %ecx
93 andl $7,%ecx 108 andl $7, %ecx
94 jz .Lende 109 jz .Lend
110
95 .p2align 4 111 .p2align 4
96.Lloop_1: 112.Lloop_1:
97 movb (%rsi),%r8b 113 movb (%rsi), %r8b
98 movb %r8b,(%rdi) 114 movb %r8b, (%rdi)
99 incq %rdi 115 incq %rdi
100 incq %rsi 116 incq %rsi
101 decl %ecx 117 decl %ecx
102 jnz .Lloop_1 118 jnz .Lloop_1
103 119
104.Lende: 120.Lend:
105 popq %rbx
106 CFI_ADJUST_CFA_OFFSET -8
107 CFI_RESTORE rbx
108 ret 121 ret
109.Lfinal:
110 CFI_ENDPROC 122 CFI_ENDPROC
111ENDPROC(memcpy) 123ENDPROC(memcpy)
112ENDPROC(__memcpy) 124ENDPROC(__memcpy)
113 125
114 /* Some CPUs run faster using the string copy instructions. 126 /*
115 It is also a lot simpler. Use this when possible */ 127 * Some CPUs run faster using the string copy instructions.
128 * It is also a lot simpler. Use this when possible:
129 */
116 130
117 .section .altinstr_replacement,"ax" 131 .section .altinstr_replacement, "ax"
1181: .byte 0xeb /* jmp <disp8> */ 1321: .byte 0xeb /* jmp <disp8> */
119 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */ 133 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
1202: 1342:
121 .previous 135 .previous
122 .section .altinstructions,"a" 136
137 .section .altinstructions, "a"
123 .align 8 138 .align 8
124 .quad memcpy 139 .quad memcpy
125 .quad 1b 140 .quad 1b
126 .byte X86_FEATURE_REP_GOOD 141 .byte X86_FEATURE_REP_GOOD
127 /* Replace only beginning, memcpy is used to apply alternatives, so it 142
128 * is silly to overwrite itself with nops - reboot is only outcome... */ 143 /*
144 * Replace only beginning, memcpy is used to apply alternatives,
145 * so it is silly to overwrite itself with nops - reboot is the
146 * only outcome...
147 */
129 .byte 2b - 1b 148 .byte 2b - 1b
130 .byte 2b - 1b 149 .byte 2b - 1b
131 .previous 150 .previous
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index f256e73542d..522db5e3d0b 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -121,24 +121,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
121 pagefault_enable(); 121 pagefault_enable();
122} 122}
123 123
124void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 124/*
125{ 125 * This is the same as kmap_atomic() but can map memory that doesn't
126 enum fixed_addresses idx;
127 unsigned long vaddr;
128
129 pagefault_disable();
130
131 debug_kmap_atomic_prot(type);
132
133 idx = type + KM_TYPE_NR * smp_processor_id();
134 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
135 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
136 arch_flush_lazy_mmu_mode();
137
138 return (void*) vaddr;
139}
140
141/* This is the same as kmap_atomic() but can map memory that doesn't
142 * have a struct page associated with it. 126 * have a struct page associated with it.
143 */ 127 */
144void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) 128void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 15219e0d124..fd3da1dda1c 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -94,9 +94,9 @@ struct map_range {
94#define NR_RANGE_MR 5 94#define NR_RANGE_MR 5
95#endif 95#endif
96 96
97static int save_mr(struct map_range *mr, int nr_range, 97static int __meminit save_mr(struct map_range *mr, int nr_range,
98 unsigned long start_pfn, unsigned long end_pfn, 98 unsigned long start_pfn, unsigned long end_pfn,
99 unsigned long page_size_mask) 99 unsigned long page_size_mask)
100{ 100{
101 if (start_pfn < end_pfn) { 101 if (start_pfn < end_pfn) {
102 if (nr_range >= NR_RANGE_MR) 102 if (nr_range >= NR_RANGE_MR)
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 592984e5496..6e60ba698ce 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -32,7 +32,23 @@ int is_io_mapping_possible(resource_size_t base, unsigned long size)
32} 32}
33EXPORT_SYMBOL_GPL(is_io_mapping_possible); 33EXPORT_SYMBOL_GPL(is_io_mapping_possible);
34 34
35/* Map 'pfn' using fixed map 'type' and protections 'prot' 35void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
36{
37 enum fixed_addresses idx;
38 unsigned long vaddr;
39
40 pagefault_disable();
41
42 idx = type + KM_TYPE_NR * smp_processor_id();
43 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
44 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
45 arch_flush_lazy_mmu_mode();
46
47 return (void *)vaddr;
48}
49
50/*
51 * Map 'pfn' using fixed map 'type' and protections 'prot'
36 */ 52 */
37void * 53void *
38iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 54iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index aca924a30ee..55e127f71ed 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -22,13 +22,17 @@
22#include <asm/pgalloc.h> 22#include <asm/pgalloc.h>
23#include <asm/pat.h> 23#include <asm/pat.h>
24 24
25#ifdef CONFIG_X86_64 25static inline int phys_addr_valid(resource_size_t addr)
26
27static inline int phys_addr_valid(unsigned long addr)
28{ 26{
29 return addr < (1UL << boot_cpu_data.x86_phys_bits); 27#ifdef CONFIG_PHYS_ADDR_T_64BIT
28 return !(addr >> boot_cpu_data.x86_phys_bits);
29#else
30 return 1;
31#endif
30} 32}
31 33
34#ifdef CONFIG_X86_64
35
32unsigned long __phys_addr(unsigned long x) 36unsigned long __phys_addr(unsigned long x)
33{ 37{
34 if (x >= __START_KERNEL_map) { 38 if (x >= __START_KERNEL_map) {
@@ -65,11 +69,6 @@ EXPORT_SYMBOL(__virt_addr_valid);
65 69
66#else 70#else
67 71
68static inline int phys_addr_valid(unsigned long addr)
69{
70 return 1;
71}
72
73#ifdef CONFIG_DEBUG_VIRTUAL 72#ifdef CONFIG_DEBUG_VIRTUAL
74unsigned long __phys_addr(unsigned long x) 73unsigned long __phys_addr(unsigned long x)
75{ 74{
@@ -488,7 +487,12 @@ static int __init early_ioremap_debug_setup(char *str)
488early_param("early_ioremap_debug", early_ioremap_debug_setup); 487early_param("early_ioremap_debug", early_ioremap_debug_setup);
489 488
490static __initdata int after_paging_init; 489static __initdata int after_paging_init;
491static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; 490#define __FIXADDR_TOP (-PAGE_SIZE)
491static pte_t bm_pte[(__fix_to_virt(FIX_DBGP_BASE)
492 ^ __fix_to_virt(FIX_BTMAP_BEGIN)) >> PMD_SHIFT
493 ? PAGE_SIZE / sizeof(pte_t) : 0] __page_aligned_bss;
494#undef __FIXADDR_TOP
495static __initdata pte_t *bm_ptep;
492 496
493static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) 497static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
494{ 498{
@@ -503,6 +507,8 @@ static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
503 507
504static inline pte_t * __init early_ioremap_pte(unsigned long addr) 508static inline pte_t * __init early_ioremap_pte(unsigned long addr)
505{ 509{
510 if (!sizeof(bm_pte))
511 return &bm_ptep[pte_index(addr)];
506 return &bm_pte[pte_index(addr)]; 512 return &bm_pte[pte_index(addr)];
507} 513}
508 514
@@ -520,8 +526,14 @@ void __init early_ioremap_init(void)
520 slot_virt[i] = fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); 526 slot_virt[i] = fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
521 527
522 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); 528 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
523 memset(bm_pte, 0, sizeof(bm_pte)); 529 if (sizeof(bm_pte)) {
524 pmd_populate_kernel(&init_mm, pmd, bm_pte); 530 memset(bm_pte, 0, sizeof(bm_pte));
531 pmd_populate_kernel(&init_mm, pmd, bm_pte);
532 } else {
533 bm_ptep = pte_offset_kernel(pmd, 0);
534 if (early_ioremap_debug)
535 printk(KERN_INFO "bm_ptep=%p\n", bm_ptep);
536 }
525 537
526 /* 538 /*
527 * The boot-ioremap range spans multiple pmds, for which 539 * The boot-ioremap range spans multiple pmds, for which
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 2ed37158012..640339ee4fb 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -677,10 +677,11 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
677 is_ram = pat_pagerange_is_ram(paddr, paddr + size); 677 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
678 678
679 /* 679 /*
680 * reserve_pfn_range() doesn't support RAM pages. 680 * reserve_pfn_range() doesn't support RAM pages. Maintain the current
681 * behavior with RAM pages by returning success.
681 */ 682 */
682 if (is_ram != 0) 683 if (is_ram != 0)
683 return -EINVAL; 684 return 0;
684 685
685 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); 686 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
686 if (ret) 687 if (ret)
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 82d22fc601a..8c362b96b64 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -90,7 +90,7 @@ static int __devinit can_skip_ioresource_align(const struct dmi_system_id *d)
90 return 0; 90 return 0;
91} 91}
92 92
93static struct dmi_system_id can_skip_pciprobe_dmi_table[] __devinitdata = { 93static const struct dmi_system_id can_skip_pciprobe_dmi_table[] __devinitconst = {
94/* 94/*
95 * Systems where PCI IO resource ISA alignment can be skipped 95 * Systems where PCI IO resource ISA alignment can be skipped
96 * when the ISA enable bit in the bridge control is not set 96 * when the ISA enable bit in the bridge control is not set
@@ -183,7 +183,7 @@ static int __devinit assign_all_busses(const struct dmi_system_id *d)
183} 183}
184#endif 184#endif
185 185
186static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = { 186static const struct dmi_system_id __devinitconst pciprobe_dmi_table[] = {
187#ifdef __i386__ 187#ifdef __i386__
188/* 188/*
189 * Laptops which need pci=assign-busses to see Cardbus cards 189 * Laptops which need pci=assign-busses to see Cardbus cards
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 7d388d5cf54..9c49919e4d1 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -356,7 +356,7 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev)
356DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video); 356DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video);
357 357
358 358
359static struct dmi_system_id __devinitdata msi_k8t_dmi_table[] = { 359static const struct dmi_system_id __devinitconst msi_k8t_dmi_table[] = {
360 { 360 {
361 .ident = "MSI-K8T-Neo2Fir", 361 .ident = "MSI-K8T-Neo2Fir",
362 .matches = { 362 .matches = {
@@ -413,7 +413,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
413 */ 413 */
414static u16 toshiba_line_size; 414static u16 toshiba_line_size;
415 415
416static struct dmi_system_id __devinitdata toshiba_ohci1394_dmi_table[] = { 416static const struct dmi_system_id __devinitconst toshiba_ohci1394_dmi_table[] = {
417 { 417 {
418 .ident = "Toshiba PS5 based laptop", 418 .ident = "Toshiba PS5 based laptop",
419 .matches = { 419 .matches = {