aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig11
-rwxr-xr-xarch/x86/include/asm/cpu_debug.h199
-rw-r--r--arch/x86/include/asm/desc.h3
-rw-r--r--arch/x86/include/asm/highmem.h1
-rw-r--r--arch/x86/include/asm/linkage.h13
-rw-r--r--arch/x86/include/asm/percpu.h8
-rw-r--r--arch/x86/include/asm/processor.h5
-rw-r--r--arch/x86/include/asm/xen/hypercall.h2
-rw-r--r--arch/x86/kernel/check.c2
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/centaur.c2
-rw-r--r--arch/x86/kernel/cpu/centaur_64.c2
-rw-r--r--arch/x86/kernel/cpu/common.c396
-rw-r--r--arch/x86/kernel/cpu/cpu.h11
-rwxr-xr-xarch/x86/kernel/cpu/cpu_debug.c839
-rw-r--r--arch/x86/kernel/cpu/cyrix.c16
-rw-r--r--arch/x86/kernel/cpu/intel.c7
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c8
-rw-r--r--arch/x86/kernel/cpu/transmeta.c2
-rw-r--r--arch/x86/kernel/cpu/umc.c2
-rw-r--r--arch/x86/kernel/e820.c53
-rw-r--r--arch/x86/kernel/early_printk.c20
-rw-r--r--arch/x86/kernel/entry_32.S18
-rw-r--r--arch/x86/kernel/entry_64.S4
-rw-r--r--arch/x86/kernel/irq.c33
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c2
-rw-r--r--arch/x86/kernel/mpparse.c6
-rw-r--r--arch/x86/kernel/ptrace.c3
-rw-r--r--arch/x86/kernel/quirks.c3
-rw-r--r--arch/x86/kernel/setup_percpu.c63
-rw-r--r--arch/x86/lib/memcpy_64.S143
-rw-r--r--arch/x86/mm/highmem_32.c15
-rw-r--r--arch/x86/mm/init.c6
-rw-r--r--arch/x86/mm/iomap_32.c27
-rw-r--r--arch/x86/mm/ioremap.c36
-rw-r--r--arch/x86/mm/kmmio.c2
-rw-r--r--arch/x86/mm/pageattr.c11
-rw-r--r--arch/x86/mm/pat.c5
-rw-r--r--arch/x86/pci/common.c4
-rw-r--r--arch/x86/pci/fixup.c4
42 files changed, 1557 insertions, 436 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 87717f3687d2..34bc3a89228b 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -169,6 +169,9 @@ config GENERIC_HARDIRQS
169 bool 169 bool
170 default y 170 default y
171 171
172config GENERIC_HARDIRQS_NO__DO_IRQ
173 def_bool y
174
172config GENERIC_IRQ_PROBE 175config GENERIC_IRQ_PROBE
173 bool 176 bool
174 default y 177 default y
@@ -931,6 +934,12 @@ config X86_CPUID
931 with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to 934 with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
932 /dev/cpu/31/cpuid. 935 /dev/cpu/31/cpuid.
933 936
937config X86_CPU_DEBUG
938 tristate "/sys/kernel/debug/x86/cpu/* - CPU Debug support"
939 ---help---
940 If you select this option, this will provide various x86 CPUs
941 information through debugfs.
942
934choice 943choice
935 prompt "High Memory Support" 944 prompt "High Memory Support"
936 default HIGHMEM4G if !X86_NUMAQ 945 default HIGHMEM4G if !X86_NUMAQ
@@ -1123,7 +1132,7 @@ config NUMA_EMU
1123 1132
1124config NODES_SHIFT 1133config NODES_SHIFT
1125 int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP 1134 int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP
1126 range 1 9 if X86_64 1135 range 1 9
1127 default "9" if MAXSMP 1136 default "9" if MAXSMP
1128 default "6" if X86_64 1137 default "6" if X86_64
1129 default "4" if X86_NUMAQ 1138 default "4" if X86_NUMAQ
diff --git a/arch/x86/include/asm/cpu_debug.h b/arch/x86/include/asm/cpu_debug.h
new file mode 100755
index 000000000000..56f1635e4617
--- /dev/null
+++ b/arch/x86/include/asm/cpu_debug.h
@@ -0,0 +1,199 @@
1#ifndef _ASM_X86_CPU_DEBUG_H
2#define _ASM_X86_CPU_DEBUG_H
3
4/*
5 * CPU x86 architecture debug
6 *
7 * Copyright(C) 2009 Jaswinder Singh Rajput
8 */
9
10/* Register flags */
11enum cpu_debug_bit {
12/* Model Specific Registers (MSRs) */
13 CPU_MC_BIT, /* Machine Check */
14 CPU_MONITOR_BIT, /* Monitor */
15 CPU_TIME_BIT, /* Time */
16 CPU_PMC_BIT, /* Performance Monitor */
17 CPU_PLATFORM_BIT, /* Platform */
18 CPU_APIC_BIT, /* APIC */
19 CPU_POWERON_BIT, /* Power-on */
20 CPU_CONTROL_BIT, /* Control */
21 CPU_FEATURES_BIT, /* Features control */
22 CPU_LBRANCH_BIT, /* Last Branch */
23 CPU_BIOS_BIT, /* BIOS */
24 CPU_FREQ_BIT, /* Frequency */
25 CPU_MTTR_BIT, /* MTRR */
26 CPU_PERF_BIT, /* Performance */
27 CPU_CACHE_BIT, /* Cache */
28 CPU_SYSENTER_BIT, /* Sysenter */
29 CPU_THERM_BIT, /* Thermal */
30 CPU_MISC_BIT, /* Miscellaneous */
31 CPU_DEBUG_BIT, /* Debug */
32 CPU_PAT_BIT, /* PAT */
33 CPU_VMX_BIT, /* VMX */
34 CPU_CALL_BIT, /* System Call */
35 CPU_BASE_BIT, /* BASE Address */
36 CPU_SMM_BIT, /* System mgmt mode */
37 CPU_SVM_BIT, /*Secure Virtual Machine*/
38 CPU_OSVM_BIT, /* OS-Visible Workaround*/
39/* Standard Registers */
40 CPU_TSS_BIT, /* Task Stack Segment */
41 CPU_CR_BIT, /* Control Registers */
42 CPU_DT_BIT, /* Descriptor Table */
43/* End of Registers flags */
44 CPU_REG_ALL_BIT, /* Select all Registers */
45};
46
47#define CPU_REG_ALL (~0) /* Select all Registers */
48
49#define CPU_MC (1 << CPU_MC_BIT)
50#define CPU_MONITOR (1 << CPU_MONITOR_BIT)
51#define CPU_TIME (1 << CPU_TIME_BIT)
52#define CPU_PMC (1 << CPU_PMC_BIT)
53#define CPU_PLATFORM (1 << CPU_PLATFORM_BIT)
54#define CPU_APIC (1 << CPU_APIC_BIT)
55#define CPU_POWERON (1 << CPU_POWERON_BIT)
56#define CPU_CONTROL (1 << CPU_CONTROL_BIT)
57#define CPU_FEATURES (1 << CPU_FEATURES_BIT)
58#define CPU_LBRANCH (1 << CPU_LBRANCH_BIT)
59#define CPU_BIOS (1 << CPU_BIOS_BIT)
60#define CPU_FREQ (1 << CPU_FREQ_BIT)
61#define CPU_MTRR (1 << CPU_MTTR_BIT)
62#define CPU_PERF (1 << CPU_PERF_BIT)
63#define CPU_CACHE (1 << CPU_CACHE_BIT)
64#define CPU_SYSENTER (1 << CPU_SYSENTER_BIT)
65#define CPU_THERM (1 << CPU_THERM_BIT)
66#define CPU_MISC (1 << CPU_MISC_BIT)
67#define CPU_DEBUG (1 << CPU_DEBUG_BIT)
68#define CPU_PAT (1 << CPU_PAT_BIT)
69#define CPU_VMX (1 << CPU_VMX_BIT)
70#define CPU_CALL (1 << CPU_CALL_BIT)
71#define CPU_BASE (1 << CPU_BASE_BIT)
72#define CPU_SMM (1 << CPU_SMM_BIT)
73#define CPU_SVM (1 << CPU_SVM_BIT)
74#define CPU_OSVM (1 << CPU_OSVM_BIT)
75#define CPU_TSS (1 << CPU_TSS_BIT)
76#define CPU_CR (1 << CPU_CR_BIT)
77#define CPU_DT (1 << CPU_DT_BIT)
78
79/* Register file flags */
80enum cpu_file_bit {
81 CPU_INDEX_BIT, /* index */
82 CPU_VALUE_BIT, /* value */
83};
84
85#define CPU_FILE_VALUE (1 << CPU_VALUE_BIT)
86
87/*
88 * DisplayFamily_DisplayModel Processor Families/Processor Number Series
89 * -------------------------- ------------------------------------------
90 * 05_01, 05_02, 05_04 Pentium, Pentium with MMX
91 *
92 * 06_01 Pentium Pro
93 * 06_03, 06_05 Pentium II Xeon, Pentium II
94 * 06_07, 06_08, 06_0A, 06_0B Pentium III Xeon, Pentum III
95 *
96 * 06_09, 060D Pentium M
97 *
98 * 06_0E Core Duo, Core Solo
99 *
100 * 06_0F Xeon 3000, 3200, 5100, 5300, 7300 series,
101 * Core 2 Quad, Core 2 Extreme, Core 2 Duo,
102 * Pentium dual-core
103 * 06_17 Xeon 5200, 5400 series, Core 2 Quad Q9650
104 *
105 * 06_1C Atom
106 *
107 * 0F_00, 0F_01, 0F_02 Xeon, Xeon MP, Pentium 4
108 * 0F_03, 0F_04 Xeon, Xeon MP, Pentium 4, Pentium D
109 *
110 * 0F_06 Xeon 7100, 5000 Series, Xeon MP,
111 * Pentium 4, Pentium D
112 */
113
114/* Register processors bits */
115enum cpu_processor_bit {
116 CPU_NONE,
117/* Intel */
118 CPU_INTEL_PENTIUM_BIT,
119 CPU_INTEL_P6_BIT,
120 CPU_INTEL_PENTIUM_M_BIT,
121 CPU_INTEL_CORE_BIT,
122 CPU_INTEL_CORE2_BIT,
123 CPU_INTEL_ATOM_BIT,
124 CPU_INTEL_XEON_P4_BIT,
125 CPU_INTEL_XEON_MP_BIT,
126};
127
128#define CPU_ALL (~0) /* Select all CPUs */
129
130#define CPU_INTEL_PENTIUM (1 << CPU_INTEL_PENTIUM_BIT)
131#define CPU_INTEL_P6 (1 << CPU_INTEL_P6_BIT)
132#define CPU_INTEL_PENTIUM_M (1 << CPU_INTEL_PENTIUM_M_BIT)
133#define CPU_INTEL_CORE (1 << CPU_INTEL_CORE_BIT)
134#define CPU_INTEL_CORE2 (1 << CPU_INTEL_CORE2_BIT)
135#define CPU_INTEL_ATOM (1 << CPU_INTEL_ATOM_BIT)
136#define CPU_INTEL_XEON_P4 (1 << CPU_INTEL_XEON_P4_BIT)
137#define CPU_INTEL_XEON_MP (1 << CPU_INTEL_XEON_MP_BIT)
138
139#define CPU_INTEL_PX (CPU_INTEL_P6 | CPU_INTEL_PENTIUM_M)
140#define CPU_INTEL_COREX (CPU_INTEL_CORE | CPU_INTEL_CORE2)
141#define CPU_INTEL_XEON (CPU_INTEL_XEON_P4 | CPU_INTEL_XEON_MP)
142#define CPU_CO_AT (CPU_INTEL_CORE | CPU_INTEL_ATOM)
143#define CPU_C2_AT (CPU_INTEL_CORE2 | CPU_INTEL_ATOM)
144#define CPU_CX_AT (CPU_INTEL_COREX | CPU_INTEL_ATOM)
145#define CPU_CX_XE (CPU_INTEL_COREX | CPU_INTEL_XEON)
146#define CPU_P6_XE (CPU_INTEL_P6 | CPU_INTEL_XEON)
147#define CPU_PM_CO_AT (CPU_INTEL_PENTIUM_M | CPU_CO_AT)
148#define CPU_C2_AT_XE (CPU_C2_AT | CPU_INTEL_XEON)
149#define CPU_CX_AT_XE (CPU_CX_AT | CPU_INTEL_XEON)
150#define CPU_P6_CX_AT (CPU_INTEL_P6 | CPU_CX_AT)
151#define CPU_P6_CX_XE (CPU_P6_XE | CPU_INTEL_COREX)
152#define CPU_P6_CX_AT_XE (CPU_INTEL_P6 | CPU_CX_AT_XE)
153#define CPU_PM_CX_AT_XE (CPU_INTEL_PENTIUM_M | CPU_CX_AT_XE)
154#define CPU_PM_CX_AT (CPU_INTEL_PENTIUM_M | CPU_CX_AT)
155#define CPU_PM_CX_XE (CPU_INTEL_PENTIUM_M | CPU_CX_XE)
156#define CPU_PX_CX_AT (CPU_INTEL_PX | CPU_CX_AT)
157#define CPU_PX_CX_AT_XE (CPU_INTEL_PX | CPU_CX_AT_XE)
158
159/* Select all Intel CPUs*/
160#define CPU_INTEL_ALL (CPU_INTEL_PENTIUM | CPU_PX_CX_AT_XE)
161
162#define MAX_CPU_FILES 512
163
164struct cpu_private {
165 unsigned cpu;
166 unsigned type;
167 unsigned reg;
168 unsigned file;
169};
170
171struct cpu_debug_base {
172 char *name; /* Register name */
173 unsigned flag; /* Register flag */
174 unsigned write; /* Register write flag */
175};
176
177/*
178 * Currently it looks similar to cpu_debug_base but once we add more files
179 * cpu_file_base will go in different direction
180 */
181struct cpu_file_base {
182 char *name; /* Register file name */
183 unsigned flag; /* Register file flag */
184 unsigned write; /* Register write flag */
185};
186
187struct cpu_cpuX_base {
188 struct dentry *dentry; /* Register dentry */
189 int init; /* Register index file */
190};
191
192struct cpu_debug_range {
193 unsigned min; /* Register range min */
194 unsigned max; /* Register range max */
195 unsigned flag; /* Supported flags */
196 unsigned model; /* Supported models */
197};
198
199#endif /* _ASM_X86_CPU_DEBUG_H */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index dc27705f5443..5623c50d67b2 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -91,7 +91,6 @@ static inline int desc_empty(const void *ptr)
91#define store_gdt(dtr) native_store_gdt(dtr) 91#define store_gdt(dtr) native_store_gdt(dtr)
92#define store_idt(dtr) native_store_idt(dtr) 92#define store_idt(dtr) native_store_idt(dtr)
93#define store_tr(tr) (tr = native_store_tr()) 93#define store_tr(tr) (tr = native_store_tr())
94#define store_ldt(ldt) asm("sldt %0":"=m" (ldt))
95 94
96#define load_TLS(t, cpu) native_load_tls(t, cpu) 95#define load_TLS(t, cpu) native_load_tls(t, cpu)
97#define set_ldt native_set_ldt 96#define set_ldt native_set_ldt
@@ -112,6 +111,8 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
112} 111}
113#endif /* CONFIG_PARAVIRT */ 112#endif /* CONFIG_PARAVIRT */
114 113
114#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
115
115static inline void native_write_idt_entry(gate_desc *idt, int entry, 116static inline void native_write_idt_entry(gate_desc *idt, int entry,
116 const gate_desc *gate) 117 const gate_desc *gate)
117{ 118{
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index bf9276bea660..014c2b85ae45 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -63,6 +63,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
63void *kmap_atomic(struct page *page, enum km_type type); 63void *kmap_atomic(struct page *page, enum km_type type);
64void kunmap_atomic(void *kvaddr, enum km_type type); 64void kunmap_atomic(void *kvaddr, enum km_type type);
65void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 65void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
66void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
66struct page *kmap_atomic_to_page(void *ptr); 67struct page *kmap_atomic_to_page(void *ptr);
67 68
68#ifndef CONFIG_PARAVIRT 69#ifndef CONFIG_PARAVIRT
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index a0d70b46c27c..12d55e773eb6 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_LINKAGE_H 1#ifndef _ASM_X86_LINKAGE_H
2#define _ASM_X86_LINKAGE_H 2#define _ASM_X86_LINKAGE_H
3 3
4#include <linux/stringify.h>
5
4#undef notrace 6#undef notrace
5#define notrace __attribute__((no_instrument_function)) 7#define notrace __attribute__((no_instrument_function))
6 8
@@ -53,14 +55,9 @@
53 .globl name; \ 55 .globl name; \
54 name: 56 name:
55 57
56#ifdef CONFIG_X86_64 58#if defined(CONFIG_X86_64) || defined(CONFIG_X86_ALIGNMENT_16)
57#define __ALIGN .p2align 4,,15 59#define __ALIGN .p2align 4, 0x90
58#define __ALIGN_STR ".p2align 4,,15" 60#define __ALIGN_STR __stringify(__ALIGN)
59#endif
60
61#ifdef CONFIG_X86_ALIGNMENT_16
62#define __ALIGN .align 16,0x90
63#define __ALIGN_STR ".align 16,0x90"
64#endif 61#endif
65 62
66#endif /* __ASSEMBLY__ */ 63#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 8f1d2fbec1d4..aee103b26d01 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -43,14 +43,6 @@
43#else /* ...!ASSEMBLY */ 43#else /* ...!ASSEMBLY */
44 44
45#include <linux/stringify.h> 45#include <linux/stringify.h>
46#include <asm/sections.h>
47
48#define __addr_to_pcpu_ptr(addr) \
49 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
50 + (unsigned long)__per_cpu_start)
51#define __pcpu_ptr_to_addr(ptr) \
52 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
53 - (unsigned long)__per_cpu_start)
54 46
55#ifdef CONFIG_SMP 47#ifdef CONFIG_SMP
56#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x 48#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 76139506c3e4..ae85a8d66a30 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -75,9 +75,9 @@ struct cpuinfo_x86 {
75#else 75#else
76 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 76 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
77 int x86_tlbsize; 77 int x86_tlbsize;
78#endif
78 __u8 x86_virt_bits; 79 __u8 x86_virt_bits;
79 __u8 x86_phys_bits; 80 __u8 x86_phys_bits;
80#endif
81 /* CPUID returned core id bits: */ 81 /* CPUID returned core id bits: */
82 __u8 x86_coreid_bits; 82 __u8 x86_coreid_bits;
83 /* Max extended CPUID function supported: */ 83 /* Max extended CPUID function supported: */
@@ -391,6 +391,9 @@ DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
391DECLARE_INIT_PER_CPU(irq_stack_union); 391DECLARE_INIT_PER_CPU(irq_stack_union);
392 392
393DECLARE_PER_CPU(char *, irq_stack_ptr); 393DECLARE_PER_CPU(char *, irq_stack_ptr);
394DECLARE_PER_CPU(unsigned int, irq_count);
395extern unsigned long kernel_eflags;
396extern asmlinkage void ignore_sysret(void);
394#else /* X86_64 */ 397#else /* X86_64 */
395#ifdef CONFIG_CC_STACKPROTECTOR 398#ifdef CONFIG_CC_STACKPROTECTOR
396DECLARE_PER_CPU(unsigned long, stack_canary); 399DECLARE_PER_CPU(unsigned long, stack_canary);
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 5e79ca694326..9c371e4a9fa6 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -296,6 +296,8 @@ HYPERVISOR_get_debugreg(int reg)
296static inline int 296static inline int
297HYPERVISOR_update_descriptor(u64 ma, u64 desc) 297HYPERVISOR_update_descriptor(u64 ma, u64 desc)
298{ 298{
299 if (sizeof(u64) == sizeof(long))
300 return _hypercall2(int, update_descriptor, ma, desc);
299 return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); 301 return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
300} 302}
301 303
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
index 2ac0ab71412a..b617b1164f1e 100644
--- a/arch/x86/kernel/check.c
+++ b/arch/x86/kernel/check.c
@@ -83,7 +83,7 @@ void __init setup_bios_corruption_check(void)
83 u64 size; 83 u64 size;
84 addr = find_e820_area_size(addr, &size, PAGE_SIZE); 84 addr = find_e820_area_size(addr, &size, PAGE_SIZE);
85 85
86 if (addr == 0) 86 if (!(addr + 1))
87 break; 87 break;
88 88
89 if ((addr + size) > corruption_check_size) 89 if ((addr + size) > corruption_check_size)
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 82db7f45e2de..d4356f8b7522 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -14,6 +14,8 @@ obj-y += vmware.o hypervisor.o
14obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o 14obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o
15obj-$(CONFIG_X86_64) += bugs_64.o 15obj-$(CONFIG_X86_64) += bugs_64.o
16 16
17obj-$(CONFIG_X86_CPU_DEBUG) += cpu_debug.o
18
17obj-$(CONFIG_CPU_SUP_INTEL) += intel.o 19obj-$(CONFIG_CPU_SUP_INTEL) += intel.o
18obj-$(CONFIG_CPU_SUP_AMD) += amd.o 20obj-$(CONFIG_CPU_SUP_AMD) += amd.o
19obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o 21obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 6882a735d9c0..8220ae69849d 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -29,7 +29,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
29 u32 regs[4]; 29 u32 regs[4];
30 const struct cpuid_bit *cb; 30 const struct cpuid_bit *cb;
31 31
32 static const struct cpuid_bit cpuid_bits[] = { 32 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, 33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
34 { 0, 0, 0, 0 } 34 { 0, 0, 0, 0 }
35 }; 35 };
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f47df59016c5..7e4a459daa64 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -502,7 +502,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int
502} 502}
503#endif 503#endif
504 504
505static struct cpu_dev amd_cpu_dev __cpuinitdata = { 505static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
506 .c_vendor = "AMD", 506 .c_vendor = "AMD",
507 .c_ident = { "AuthenticAMD" }, 507 .c_ident = { "AuthenticAMD" },
508#ifdef CONFIG_X86_32 508#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 89bfdd9cacc6..983e0830f0da 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -468,7 +468,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
468 return size; 468 return size;
469} 469}
470 470
471static struct cpu_dev centaur_cpu_dev __cpuinitdata = { 471static const struct cpu_dev __cpuinitconst centaur_cpu_dev = {
472 .c_vendor = "Centaur", 472 .c_vendor = "Centaur",
473 .c_ident = { "CentaurHauls" }, 473 .c_ident = { "CentaurHauls" },
474 .c_early_init = early_init_centaur, 474 .c_early_init = early_init_centaur,
diff --git a/arch/x86/kernel/cpu/centaur_64.c b/arch/x86/kernel/cpu/centaur_64.c
index a1625f5a1e78..51b09c48c9c7 100644
--- a/arch/x86/kernel/cpu/centaur_64.c
+++ b/arch/x86/kernel/cpu/centaur_64.c
@@ -25,7 +25,7 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
25 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 25 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
26} 26}
27 27
28static struct cpu_dev centaur_cpu_dev __cpuinitdata = { 28static const struct cpu_dev centaur_cpu_dev __cpuinitconst = {
29 .c_vendor = "Centaur", 29 .c_vendor = "Centaur",
30 .c_ident = { "CentaurHauls" }, 30 .c_ident = { "CentaurHauls" },
31 .c_early_init = early_init_centaur, 31 .c_early_init = early_init_centaur,
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 826d5c876278..e2962cc1e27b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1,52 +1,52 @@
1#include <linux/init.h>
2#include <linux/kernel.h>
3#include <linux/sched.h>
4#include <linux/string.h>
5#include <linux/bootmem.h> 1#include <linux/bootmem.h>
2#include <linux/linkage.h>
6#include <linux/bitops.h> 3#include <linux/bitops.h>
4#include <linux/kernel.h>
7#include <linux/module.h> 5#include <linux/module.h>
8#include <linux/kgdb.h> 6#include <linux/percpu.h>
9#include <linux/topology.h> 7#include <linux/string.h>
10#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/sched.h>
10#include <linux/init.h>
11#include <linux/kgdb.h>
11#include <linux/smp.h> 12#include <linux/smp.h>
12#include <linux/percpu.h> 13#include <linux/io.h>
13#include <asm/i387.h> 14
14#include <asm/msr.h> 15#include <asm/stackprotector.h>
15#include <asm/io.h>
16#include <asm/linkage.h>
17#include <asm/mmu_context.h> 16#include <asm/mmu_context.h>
17#include <asm/hypervisor.h>
18#include <asm/processor.h>
19#include <asm/sections.h>
20#include <asm/topology.h>
21#include <asm/cpumask.h>
22#include <asm/pgtable.h>
23#include <asm/atomic.h>
24#include <asm/proto.h>
25#include <asm/setup.h>
26#include <asm/apic.h>
27#include <asm/desc.h>
28#include <asm/i387.h>
18#include <asm/mtrr.h> 29#include <asm/mtrr.h>
30#include <asm/numa.h>
31#include <asm/asm.h>
32#include <asm/cpu.h>
19#include <asm/mce.h> 33#include <asm/mce.h>
34#include <asm/msr.h>
20#include <asm/pat.h> 35#include <asm/pat.h>
21#include <asm/asm.h>
22#include <asm/numa.h>
23#include <asm/smp.h> 36#include <asm/smp.h>
24#include <asm/cpu.h>
25#include <asm/cpumask.h>
26#include <asm/apic.h>
27 37
28#ifdef CONFIG_X86_LOCAL_APIC 38#ifdef CONFIG_X86_LOCAL_APIC
29#include <asm/uv/uv.h> 39#include <asm/uv/uv.h>
30#endif 40#endif
31 41
32#include <asm/pgtable.h>
33#include <asm/processor.h>
34#include <asm/desc.h>
35#include <asm/atomic.h>
36#include <asm/proto.h>
37#include <asm/sections.h>
38#include <asm/setup.h>
39#include <asm/hypervisor.h>
40#include <asm/stackprotector.h>
41
42#include "cpu.h" 42#include "cpu.h"
43 43
44#ifdef CONFIG_X86_64 44#ifdef CONFIG_X86_64
45 45
46/* all of these masks are initialized in setup_cpu_local_masks() */ 46/* all of these masks are initialized in setup_cpu_local_masks() */
47cpumask_var_t cpu_callin_mask;
48cpumask_var_t cpu_callout_mask;
49cpumask_var_t cpu_initialized_mask; 47cpumask_var_t cpu_initialized_mask;
48cpumask_var_t cpu_callout_mask;
49cpumask_var_t cpu_callin_mask;
50 50
51/* representing cpus for which sibling maps can be computed */ 51/* representing cpus for which sibling maps can be computed */
52cpumask_var_t cpu_sibling_setup_mask; 52cpumask_var_t cpu_sibling_setup_mask;
@@ -62,15 +62,15 @@ void __init setup_cpu_local_masks(void)
62 62
63#else /* CONFIG_X86_32 */ 63#else /* CONFIG_X86_32 */
64 64
65cpumask_t cpu_callin_map; 65cpumask_t cpu_sibling_setup_map;
66cpumask_t cpu_callout_map; 66cpumask_t cpu_callout_map;
67cpumask_t cpu_initialized; 67cpumask_t cpu_initialized;
68cpumask_t cpu_sibling_setup_map; 68cpumask_t cpu_callin_map;
69 69
70#endif /* CONFIG_X86_32 */ 70#endif /* CONFIG_X86_32 */
71 71
72 72
73static struct cpu_dev *this_cpu __cpuinitdata; 73static const struct cpu_dev *this_cpu __cpuinitdata;
74 74
75DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 75DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
76#ifdef CONFIG_X86_64 76#ifdef CONFIG_X86_64
@@ -79,48 +79,48 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
79 * IRET will check the segment types kkeil 2000/10/28 79 * IRET will check the segment types kkeil 2000/10/28
80 * Also sysret mandates a special GDT layout 80 * Also sysret mandates a special GDT layout
81 * 81 *
82 * The TLS descriptors are currently at a different place compared to i386. 82 * TLS descriptors are currently at a different place compared to i386.
83 * Hopefully nobody expects them at a fixed place (Wine?) 83 * Hopefully nobody expects them at a fixed place (Wine?)
84 */ 84 */
85 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, 85 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
86 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, 86 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
87 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, 87 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
88 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, 88 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
89 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, 89 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
90 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, 90 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
91#else 91#else
92 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, 92 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
93 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, 93 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
94 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, 94 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
95 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, 95 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
96 /* 96 /*
97 * Segments used for calling PnP BIOS have byte granularity. 97 * Segments used for calling PnP BIOS have byte granularity.
98 * They code segments and data segments have fixed 64k limits, 98 * They code segments and data segments have fixed 64k limits,
99 * the transfer segment sizes are set at run time. 99 * the transfer segment sizes are set at run time.
100 */ 100 */
101 /* 32-bit code */ 101 /* 32-bit code */
102 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, 102 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
103 /* 16-bit code */ 103 /* 16-bit code */
104 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, 104 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
105 /* 16-bit data */ 105 /* 16-bit data */
106 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, 106 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
107 /* 16-bit data */ 107 /* 16-bit data */
108 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, 108 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
109 /* 16-bit data */ 109 /* 16-bit data */
110 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, 110 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
111 /* 111 /*
112 * The APM segments have byte granularity and their bases 112 * The APM segments have byte granularity and their bases
113 * are set at run time. All have 64k limits. 113 * are set at run time. All have 64k limits.
114 */ 114 */
115 /* 32-bit code */ 115 /* 32-bit code */
116 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, 116 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
117 /* 16-bit code */ 117 /* 16-bit code */
118 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, 118 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
119 /* data */ 119 /* data */
120 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, 120 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
121 121
122 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, 122 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
123 [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, 123 [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
124 GDT_STACK_CANARY_INIT 124 GDT_STACK_CANARY_INIT
125#endif 125#endif
126} }; 126} };
@@ -164,16 +164,17 @@ static inline int flag_is_changeable_p(u32 flag)
164 * the CPUID. Add "volatile" to not allow gcc to 164 * the CPUID. Add "volatile" to not allow gcc to
165 * optimize the subsequent calls to this function. 165 * optimize the subsequent calls to this function.
166 */ 166 */
167 asm volatile ("pushfl\n\t" 167 asm volatile ("pushfl \n\t"
168 "pushfl\n\t" 168 "pushfl \n\t"
169 "popl %0\n\t" 169 "popl %0 \n\t"
170 "movl %0,%1\n\t" 170 "movl %0, %1 \n\t"
171 "xorl %2,%0\n\t" 171 "xorl %2, %0 \n\t"
172 "pushl %0\n\t" 172 "pushl %0 \n\t"
173 "popfl\n\t" 173 "popfl \n\t"
174 "pushfl\n\t" 174 "pushfl \n\t"
175 "popl %0\n\t" 175 "popl %0 \n\t"
176 "popfl\n\t" 176 "popfl \n\t"
177
177 : "=&r" (f1), "=&r" (f2) 178 : "=&r" (f1), "=&r" (f2)
178 : "ir" (flag)); 179 : "ir" (flag));
179 180
@@ -188,18 +189,22 @@ static int __cpuinit have_cpuid_p(void)
188 189
189static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 190static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
190{ 191{
191 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { 192 unsigned long lo, hi;
192 /* Disable processor serial number */ 193
193 unsigned long lo, hi; 194 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
194 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 195 return;
195 lo |= 0x200000; 196
196 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 197 /* Disable processor serial number: */
197 printk(KERN_NOTICE "CPU serial number disabled.\n"); 198
198 clear_cpu_cap(c, X86_FEATURE_PN); 199 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
199 200 lo |= 0x200000;
200 /* Disabling the serial number may affect the cpuid level */ 201 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
201 c->cpuid_level = cpuid_eax(0); 202
202 } 203 printk(KERN_NOTICE "CPU serial number disabled.\n");
204 clear_cpu_cap(c, X86_FEATURE_PN);
205
206 /* Disabling the serial number may affect the cpuid level */
207 c->cpuid_level = cpuid_eax(0);
203} 208}
204 209
205static int __init x86_serial_nr_setup(char *s) 210static int __init x86_serial_nr_setup(char *s)
@@ -232,6 +237,7 @@ struct cpuid_dependent_feature {
232 u32 feature; 237 u32 feature;
233 u32 level; 238 u32 level;
234}; 239};
240
235static const struct cpuid_dependent_feature __cpuinitconst 241static const struct cpuid_dependent_feature __cpuinitconst
236cpuid_dependent_features[] = { 242cpuid_dependent_features[] = {
237 { X86_FEATURE_MWAIT, 0x00000005 }, 243 { X86_FEATURE_MWAIT, 0x00000005 },
@@ -243,7 +249,11 @@ cpuid_dependent_features[] = {
243static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 249static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
244{ 250{
245 const struct cpuid_dependent_feature *df; 251 const struct cpuid_dependent_feature *df;
252
246 for (df = cpuid_dependent_features; df->feature; df++) { 253 for (df = cpuid_dependent_features; df->feature; df++) {
254
255 if (!cpu_has(c, df->feature))
256 continue;
247 /* 257 /*
248 * Note: cpuid_level is set to -1 if unavailable, but 258 * Note: cpuid_level is set to -1 if unavailable, but
249 * extended_extended_level is set to 0 if unavailable 259 * extended_extended_level is set to 0 if unavailable
@@ -251,32 +261,32 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
251 * when signed; hence the weird messing around with 261 * when signed; hence the weird messing around with
252 * signs here... 262 * signs here...
253 */ 263 */
254 if (cpu_has(c, df->feature) && 264 if (!((s32)df->level < 0 ?
255 ((s32)df->level < 0 ?
256 (u32)df->level > (u32)c->extended_cpuid_level : 265 (u32)df->level > (u32)c->extended_cpuid_level :
257 (s32)df->level > (s32)c->cpuid_level)) { 266 (s32)df->level > (s32)c->cpuid_level))
258 clear_cpu_cap(c, df->feature); 267 continue;
259 if (warn) 268
260 printk(KERN_WARNING 269 clear_cpu_cap(c, df->feature);
261 "CPU: CPU feature %s disabled " 270 if (!warn)
262 "due to lack of CPUID level 0x%x\n", 271 continue;
263 x86_cap_flags[df->feature], 272
264 df->level); 273 printk(KERN_WARNING
265 } 274 "CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
275 x86_cap_flags[df->feature], df->level);
266 } 276 }
267} 277}
268 278
269/* 279/*
270 * Naming convention should be: <Name> [(<Codename>)] 280 * Naming convention should be: <Name> [(<Codename>)]
271 * This table only is used unless init_<vendor>() below doesn't set it; 281 * This table only is used unless init_<vendor>() below doesn't set it;
272 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used 282 * in particular, if CPUID levels 0x80000002..4 are supported, this
273 * 283 * isn't used
274 */ 284 */
275 285
276/* Look up CPU names by table lookup. */ 286/* Look up CPU names by table lookup. */
277static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) 287static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
278{ 288{
279 struct cpu_model_info *info; 289 const struct cpu_model_info *info;
280 290
281 if (c->x86_model >= 16) 291 if (c->x86_model >= 16)
282 return NULL; /* Range check */ 292 return NULL; /* Range check */
@@ -307,8 +317,10 @@ void load_percpu_segment(int cpu)
307 load_stack_canary_segment(); 317 load_stack_canary_segment();
308} 318}
309 319
310/* Current gdt points %fs at the "master" per-cpu area: after this, 320/*
311 * it's on the real one. */ 321 * Current gdt points %fs at the "master" per-cpu area: after this,
322 * it's on the real one.
323 */
312void switch_to_new_gdt(int cpu) 324void switch_to_new_gdt(int cpu)
313{ 325{
314 struct desc_ptr gdt_descr; 326 struct desc_ptr gdt_descr;
@@ -321,7 +333,7 @@ void switch_to_new_gdt(int cpu)
321 load_percpu_segment(cpu); 333 load_percpu_segment(cpu);
322} 334}
323 335
324static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 336static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
325 337
326static void __cpuinit default_init(struct cpuinfo_x86 *c) 338static void __cpuinit default_init(struct cpuinfo_x86 *c)
327{ 339{
@@ -340,7 +352,7 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
340#endif 352#endif
341} 353}
342 354
343static struct cpu_dev __cpuinitdata default_cpu = { 355static const struct cpu_dev __cpuinitconst default_cpu = {
344 .c_init = default_init, 356 .c_init = default_init,
345 .c_vendor = "Unknown", 357 .c_vendor = "Unknown",
346 .c_x86_vendor = X86_VENDOR_UNKNOWN, 358 .c_x86_vendor = X86_VENDOR_UNKNOWN,
@@ -354,22 +366,24 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
354 if (c->extended_cpuid_level < 0x80000004) 366 if (c->extended_cpuid_level < 0x80000004)
355 return; 367 return;
356 368
357 v = (unsigned int *) c->x86_model_id; 369 v = (unsigned int *)c->x86_model_id;
358 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 370 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
359 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 371 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
360 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 372 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
361 c->x86_model_id[48] = 0; 373 c->x86_model_id[48] = 0;
362 374
363 /* Intel chips right-justify this string for some dumb reason; 375 /*
364 undo that brain damage */ 376 * Intel chips right-justify this string for some dumb reason;
377 * undo that brain damage:
378 */
365 p = q = &c->x86_model_id[0]; 379 p = q = &c->x86_model_id[0];
366 while (*p == ' ') 380 while (*p == ' ')
367 p++; 381 p++;
368 if (p != q) { 382 if (p != q) {
369 while (*p) 383 while (*p)
370 *q++ = *p++; 384 *q++ = *p++;
371 while (q <= &c->x86_model_id[48]) 385 while (q <= &c->x86_model_id[48])
372 *q++ = '\0'; /* Zero-pad the rest */ 386 *q++ = '\0'; /* Zero-pad the rest */
373 } 387 }
374} 388}
375 389
@@ -438,27 +452,30 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
438 452
439 if (smp_num_siblings == 1) { 453 if (smp_num_siblings == 1) {
440 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 454 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
441 } else if (smp_num_siblings > 1) { 455 goto out;
456 }
442 457
443 if (smp_num_siblings > nr_cpu_ids) { 458 if (smp_num_siblings <= 1)
444 printk(KERN_WARNING "CPU: Unsupported number of siblings %d", 459 goto out;
445 smp_num_siblings); 460
446 smp_num_siblings = 1; 461 if (smp_num_siblings > nr_cpu_ids) {
447 return; 462 pr_warning("CPU: Unsupported number of siblings %d",
448 } 463 smp_num_siblings);
464 smp_num_siblings = 1;
465 return;
466 }
449 467
450 index_msb = get_count_order(smp_num_siblings); 468 index_msb = get_count_order(smp_num_siblings);
451 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); 469 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
452 470
453 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 471 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
454 472
455 index_msb = get_count_order(smp_num_siblings); 473 index_msb = get_count_order(smp_num_siblings);
456 474
457 core_bits = get_count_order(c->x86_max_cores); 475 core_bits = get_count_order(c->x86_max_cores);
458 476
459 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & 477 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
460 ((1 << core_bits) - 1); 478 ((1 << core_bits) - 1);
461 }
462 479
463out: 480out:
464 if ((c->x86_max_cores * smp_num_siblings) > 1) { 481 if ((c->x86_max_cores * smp_num_siblings) > 1) {
@@ -473,8 +490,8 @@ out:
473static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) 490static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
474{ 491{
475 char *v = c->x86_vendor_id; 492 char *v = c->x86_vendor_id;
476 int i;
477 static int printed; 493 static int printed;
494 int i;
478 495
479 for (i = 0; i < X86_VENDOR_NUM; i++) { 496 for (i = 0; i < X86_VENDOR_NUM; i++) {
480 if (!cpu_devs[i]) 497 if (!cpu_devs[i])
@@ -483,6 +500,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
483 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 500 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
484 (cpu_devs[i]->c_ident[1] && 501 (cpu_devs[i]->c_ident[1] &&
485 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 502 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
503
486 this_cpu = cpu_devs[i]; 504 this_cpu = cpu_devs[i];
487 c->x86_vendor = this_cpu->c_x86_vendor; 505 c->x86_vendor = this_cpu->c_x86_vendor;
488 return; 506 return;
@@ -491,7 +509,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
491 509
492 if (!printed) { 510 if (!printed) {
493 printed++; 511 printed++;
494 printk(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n", v); 512 printk(KERN_ERR
513 "CPU: vendor_id '%s' unknown, using generic init.\n", v);
514
495 printk(KERN_ERR "CPU: Your system may be unstable.\n"); 515 printk(KERN_ERR "CPU: Your system may be unstable.\n");
496 } 516 }
497 517
@@ -511,14 +531,17 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
511 /* Intel-defined flags: level 0x00000001 */ 531 /* Intel-defined flags: level 0x00000001 */
512 if (c->cpuid_level >= 0x00000001) { 532 if (c->cpuid_level >= 0x00000001) {
513 u32 junk, tfms, cap0, misc; 533 u32 junk, tfms, cap0, misc;
534
514 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 535 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
515 c->x86 = (tfms >> 8) & 0xf; 536 c->x86 = (tfms >> 8) & 0xf;
516 c->x86_model = (tfms >> 4) & 0xf; 537 c->x86_model = (tfms >> 4) & 0xf;
517 c->x86_mask = tfms & 0xf; 538 c->x86_mask = tfms & 0xf;
539
518 if (c->x86 == 0xf) 540 if (c->x86 == 0xf)
519 c->x86 += (tfms >> 20) & 0xff; 541 c->x86 += (tfms >> 20) & 0xff;
520 if (c->x86 >= 0x6) 542 if (c->x86 >= 0x6)
521 c->x86_model += ((tfms >> 16) & 0xf) << 4; 543 c->x86_model += ((tfms >> 16) & 0xf) << 4;
544
522 if (cap0 & (1<<19)) { 545 if (cap0 & (1<<19)) {
523 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 546 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
524 c->x86_cache_alignment = c->x86_clflush_size; 547 c->x86_cache_alignment = c->x86_clflush_size;
@@ -534,6 +557,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
534 /* Intel-defined flags: level 0x00000001 */ 557 /* Intel-defined flags: level 0x00000001 */
535 if (c->cpuid_level >= 0x00000001) { 558 if (c->cpuid_level >= 0x00000001) {
536 u32 capability, excap; 559 u32 capability, excap;
560
537 cpuid(0x00000001, &tfms, &ebx, &excap, &capability); 561 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
538 c->x86_capability[0] = capability; 562 c->x86_capability[0] = capability;
539 c->x86_capability[4] = excap; 563 c->x86_capability[4] = excap;
@@ -542,6 +566,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
542 /* AMD-defined flags: level 0x80000001 */ 566 /* AMD-defined flags: level 0x80000001 */
543 xlvl = cpuid_eax(0x80000000); 567 xlvl = cpuid_eax(0x80000000);
544 c->extended_cpuid_level = xlvl; 568 c->extended_cpuid_level = xlvl;
569
545 if ((xlvl & 0xffff0000) == 0x80000000) { 570 if ((xlvl & 0xffff0000) == 0x80000000) {
546 if (xlvl >= 0x80000001) { 571 if (xlvl >= 0x80000001) {
547 c->x86_capability[1] = cpuid_edx(0x80000001); 572 c->x86_capability[1] = cpuid_edx(0x80000001);
@@ -549,13 +574,15 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
549 } 574 }
550 } 575 }
551 576
552#ifdef CONFIG_X86_64
553 if (c->extended_cpuid_level >= 0x80000008) { 577 if (c->extended_cpuid_level >= 0x80000008) {
554 u32 eax = cpuid_eax(0x80000008); 578 u32 eax = cpuid_eax(0x80000008);
555 579
556 c->x86_virt_bits = (eax >> 8) & 0xff; 580 c->x86_virt_bits = (eax >> 8) & 0xff;
557 c->x86_phys_bits = eax & 0xff; 581 c->x86_phys_bits = eax & 0xff;
558 } 582 }
583#ifdef CONFIG_X86_32
584 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
585 c->x86_phys_bits = 36;
559#endif 586#endif
560 587
561 if (c->extended_cpuid_level >= 0x80000007) 588 if (c->extended_cpuid_level >= 0x80000007)
@@ -602,8 +629,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
602{ 629{
603#ifdef CONFIG_X86_64 630#ifdef CONFIG_X86_64
604 c->x86_clflush_size = 64; 631 c->x86_clflush_size = 64;
632 c->x86_phys_bits = 36;
633 c->x86_virt_bits = 48;
605#else 634#else
606 c->x86_clflush_size = 32; 635 c->x86_clflush_size = 32;
636 c->x86_phys_bits = 32;
637 c->x86_virt_bits = 32;
607#endif 638#endif
608 c->x86_cache_alignment = c->x86_clflush_size; 639 c->x86_cache_alignment = c->x86_clflush_size;
609 640
@@ -634,12 +665,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
634 665
635void __init early_cpu_init(void) 666void __init early_cpu_init(void)
636{ 667{
637 struct cpu_dev **cdev; 668 const struct cpu_dev *const *cdev;
638 int count = 0; 669 int count = 0;
639 670
640 printk("KERNEL supported cpus:\n"); 671 printk(KERN_INFO "KERNEL supported cpus:\n");
641 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { 672 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
642 struct cpu_dev *cpudev = *cdev; 673 const struct cpu_dev *cpudev = *cdev;
643 unsigned int j; 674 unsigned int j;
644 675
645 if (count >= X86_VENDOR_NUM) 676 if (count >= X86_VENDOR_NUM)
@@ -650,7 +681,7 @@ void __init early_cpu_init(void)
650 for (j = 0; j < 2; j++) { 681 for (j = 0; j < 2; j++) {
651 if (!cpudev->c_ident[j]) 682 if (!cpudev->c_ident[j])
652 continue; 683 continue;
653 printk(" %s %s\n", cpudev->c_vendor, 684 printk(KERN_INFO " %s %s\n", cpudev->c_vendor,
654 cpudev->c_ident[j]); 685 cpudev->c_ident[j]);
655 } 686 }
656 } 687 }
@@ -726,9 +757,13 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
726 c->x86_coreid_bits = 0; 757 c->x86_coreid_bits = 0;
727#ifdef CONFIG_X86_64 758#ifdef CONFIG_X86_64
728 c->x86_clflush_size = 64; 759 c->x86_clflush_size = 64;
760 c->x86_phys_bits = 36;
761 c->x86_virt_bits = 48;
729#else 762#else
730 c->cpuid_level = -1; /* CPUID not detected */ 763 c->cpuid_level = -1; /* CPUID not detected */
731 c->x86_clflush_size = 32; 764 c->x86_clflush_size = 32;
765 c->x86_phys_bits = 32;
766 c->x86_virt_bits = 32;
732#endif 767#endif
733 c->x86_cache_alignment = c->x86_clflush_size; 768 c->x86_cache_alignment = c->x86_clflush_size;
734 memset(&c->x86_capability, 0, sizeof c->x86_capability); 769 memset(&c->x86_capability, 0, sizeof c->x86_capability);
@@ -759,8 +794,8 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
759 squash_the_stupid_serial_number(c); 794 squash_the_stupid_serial_number(c);
760 795
761 /* 796 /*
762 * The vendor-specific functions might have changed features. Now 797 * The vendor-specific functions might have changed features.
763 * we do "generic changes." 798 * Now we do "generic changes."
764 */ 799 */
765 800
766 /* Filter out anything that depends on CPUID levels we don't have */ 801 /* Filter out anything that depends on CPUID levels we don't have */
@@ -768,7 +803,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
768 803
769 /* If the model name is still unset, do table lookup. */ 804 /* If the model name is still unset, do table lookup. */
770 if (!c->x86_model_id[0]) { 805 if (!c->x86_model_id[0]) {
771 char *p; 806 const char *p;
772 p = table_lookup_model(c); 807 p = table_lookup_model(c);
773 if (p) 808 if (p)
774 strcpy(c->x86_model_id, p); 809 strcpy(c->x86_model_id, p);
@@ -843,11 +878,11 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
843} 878}
844 879
845struct msr_range { 880struct msr_range {
846 unsigned min; 881 unsigned min;
847 unsigned max; 882 unsigned max;
848}; 883};
849 884
850static struct msr_range msr_range_array[] __cpuinitdata = { 885static const struct msr_range msr_range_array[] __cpuinitconst = {
851 { 0x00000000, 0x00000418}, 886 { 0x00000000, 0x00000418},
852 { 0xc0000000, 0xc000040b}, 887 { 0xc0000000, 0xc000040b},
853 { 0xc0010000, 0xc0010142}, 888 { 0xc0010000, 0xc0010142},
@@ -856,14 +891,15 @@ static struct msr_range msr_range_array[] __cpuinitdata = {
856 891
857static void __cpuinit print_cpu_msr(void) 892static void __cpuinit print_cpu_msr(void)
858{ 893{
894 unsigned index_min, index_max;
859 unsigned index; 895 unsigned index;
860 u64 val; 896 u64 val;
861 int i; 897 int i;
862 unsigned index_min, index_max;
863 898
864 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { 899 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
865 index_min = msr_range_array[i].min; 900 index_min = msr_range_array[i].min;
866 index_max = msr_range_array[i].max; 901 index_max = msr_range_array[i].max;
902
867 for (index = index_min; index < index_max; index++) { 903 for (index = index_min; index < index_max; index++) {
868 if (rdmsrl_amd_safe(index, &val)) 904 if (rdmsrl_amd_safe(index, &val))
869 continue; 905 continue;
@@ -873,6 +909,7 @@ static void __cpuinit print_cpu_msr(void)
873} 909}
874 910
875static int show_msr __cpuinitdata; 911static int show_msr __cpuinitdata;
912
876static __init int setup_show_msr(char *arg) 913static __init int setup_show_msr(char *arg)
877{ 914{
878 int num; 915 int num;
@@ -894,12 +931,14 @@ __setup("noclflush", setup_noclflush);
894 931
895void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 932void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
896{ 933{
897 char *vendor = NULL; 934 const char *vendor = NULL;
898 935
899 if (c->x86_vendor < X86_VENDOR_NUM) 936 if (c->x86_vendor < X86_VENDOR_NUM) {
900 vendor = this_cpu->c_vendor; 937 vendor = this_cpu->c_vendor;
901 else if (c->cpuid_level >= 0) 938 } else {
902 vendor = c->x86_vendor_id; 939 if (c->cpuid_level >= 0)
940 vendor = c->x86_vendor_id;
941 }
903 942
904 if (vendor && !strstr(c->x86_model_id, vendor)) 943 if (vendor && !strstr(c->x86_model_id, vendor))
905 printk(KERN_CONT "%s ", vendor); 944 printk(KERN_CONT "%s ", vendor);
@@ -926,10 +965,12 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
926static __init int setup_disablecpuid(char *arg) 965static __init int setup_disablecpuid(char *arg)
927{ 966{
928 int bit; 967 int bit;
968
929 if (get_option(&arg, &bit) && bit < NCAPINTS*32) 969 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
930 setup_clear_cpu_cap(bit); 970 setup_clear_cpu_cap(bit);
931 else 971 else
932 return 0; 972 return 0;
973
933 return 1; 974 return 1;
934} 975}
935__setup("clearcpuid=", setup_disablecpuid); 976__setup("clearcpuid=", setup_disablecpuid);
@@ -939,6 +980,7 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
939 980
940DEFINE_PER_CPU_FIRST(union irq_stack_union, 981DEFINE_PER_CPU_FIRST(union irq_stack_union,
941 irq_stack_union) __aligned(PAGE_SIZE); 982 irq_stack_union) __aligned(PAGE_SIZE);
983
942DEFINE_PER_CPU(char *, irq_stack_ptr) = 984DEFINE_PER_CPU(char *, irq_stack_ptr) =
943 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; 985 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
944 986
@@ -948,12 +990,21 @@ EXPORT_PER_CPU_SYMBOL(kernel_stack);
948 990
949DEFINE_PER_CPU(unsigned int, irq_count) = -1; 991DEFINE_PER_CPU(unsigned int, irq_count) = -1;
950 992
993/*
994 * Special IST stacks which the CPU switches to when it calls
995 * an IST-marked descriptor entry. Up to 7 stacks (hardware
996 * limit), all of them are 4K, except the debug stack which
997 * is 8K.
998 */
999static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
1000 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1001 [DEBUG_STACK - 1] = DEBUG_STKSZ
1002};
1003
951static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks 1004static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
952 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) 1005 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
953 __aligned(PAGE_SIZE); 1006 __aligned(PAGE_SIZE);
954 1007
955extern asmlinkage void ignore_sysret(void);
956
957/* May not be marked __init: used by software suspend */ 1008/* May not be marked __init: used by software suspend */
958void syscall_init(void) 1009void syscall_init(void)
959{ 1010{
@@ -983,7 +1034,7 @@ unsigned long kernel_eflags;
983 */ 1034 */
984DEFINE_PER_CPU(struct orig_ist, orig_ist); 1035DEFINE_PER_CPU(struct orig_ist, orig_ist);
985 1036
986#else /* x86_64 */ 1037#else /* CONFIG_X86_64 */
987 1038
988#ifdef CONFIG_CC_STACKPROTECTOR 1039#ifdef CONFIG_CC_STACKPROTECTOR
989DEFINE_PER_CPU(unsigned long, stack_canary); 1040DEFINE_PER_CPU(unsigned long, stack_canary);
@@ -995,9 +1046,26 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
995 memset(regs, 0, sizeof(struct pt_regs)); 1046 memset(regs, 0, sizeof(struct pt_regs));
996 regs->fs = __KERNEL_PERCPU; 1047 regs->fs = __KERNEL_PERCPU;
997 regs->gs = __KERNEL_STACK_CANARY; 1048 regs->gs = __KERNEL_STACK_CANARY;
1049
998 return regs; 1050 return regs;
999} 1051}
1000#endif /* x86_64 */ 1052#endif /* CONFIG_X86_64 */
1053
1054/*
1055 * Clear all 6 debug registers:
1056 */
1057static void clear_all_debug_regs(void)
1058{
1059 int i;
1060
1061 for (i = 0; i < 8; i++) {
1062 /* Ignore db4, db5 */
1063 if ((i == 4) || (i == 5))
1064 continue;
1065
1066 set_debugreg(0, i);
1067 }
1068}
1001 1069
1002/* 1070/*
1003 * cpu_init() initializes state that is per-CPU. Some data is already 1071 * cpu_init() initializes state that is per-CPU. Some data is already
@@ -1007,15 +1075,20 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
1007 * A lot of state is already set up in PDA init for 64 bit 1075 * A lot of state is already set up in PDA init for 64 bit
1008 */ 1076 */
1009#ifdef CONFIG_X86_64 1077#ifdef CONFIG_X86_64
1078
1010void __cpuinit cpu_init(void) 1079void __cpuinit cpu_init(void)
1011{ 1080{
1012 int cpu = stack_smp_processor_id(); 1081 struct orig_ist *orig_ist;
1013 struct tss_struct *t = &per_cpu(init_tss, cpu);
1014 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
1015 unsigned long v;
1016 struct task_struct *me; 1082 struct task_struct *me;
1083 struct tss_struct *t;
1084 unsigned long v;
1085 int cpu;
1017 int i; 1086 int i;
1018 1087
1088 cpu = stack_smp_processor_id();
1089 t = &per_cpu(init_tss, cpu);
1090 orig_ist = &per_cpu(orig_ist, cpu);
1091
1019#ifdef CONFIG_NUMA 1092#ifdef CONFIG_NUMA
1020 if (cpu != 0 && percpu_read(node_number) == 0 && 1093 if (cpu != 0 && percpu_read(node_number) == 0 &&
1021 cpu_to_node(cpu) != NUMA_NO_NODE) 1094 cpu_to_node(cpu) != NUMA_NO_NODE)
@@ -1056,19 +1129,17 @@ void __cpuinit cpu_init(void)
1056 * set up and load the per-CPU TSS 1129 * set up and load the per-CPU TSS
1057 */ 1130 */
1058 if (!orig_ist->ist[0]) { 1131 if (!orig_ist->ist[0]) {
1059 static const unsigned int sizes[N_EXCEPTION_STACKS] = {
1060 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1061 [DEBUG_STACK - 1] = DEBUG_STKSZ
1062 };
1063 char *estacks = per_cpu(exception_stacks, cpu); 1132 char *estacks = per_cpu(exception_stacks, cpu);
1133
1064 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1134 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1065 estacks += sizes[v]; 1135 estacks += exception_stack_sizes[v];
1066 orig_ist->ist[v] = t->x86_tss.ist[v] = 1136 orig_ist->ist[v] = t->x86_tss.ist[v] =
1067 (unsigned long)estacks; 1137 (unsigned long)estacks;
1068 } 1138 }
1069 } 1139 }
1070 1140
1071 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1141 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1142
1072 /* 1143 /*
1073 * <= is required because the CPU will access up to 1144 * <= is required because the CPU will access up to
1074 * 8 bits beyond the end of the IO permission bitmap. 1145 * 8 bits beyond the end of the IO permission bitmap.
@@ -1078,8 +1149,7 @@ void __cpuinit cpu_init(void)
1078 1149
1079 atomic_inc(&init_mm.mm_count); 1150 atomic_inc(&init_mm.mm_count);
1080 me->active_mm = &init_mm; 1151 me->active_mm = &init_mm;
1081 if (me->mm) 1152 BUG_ON(me->mm);
1082 BUG();
1083 enter_lazy_tlb(&init_mm, me); 1153 enter_lazy_tlb(&init_mm, me);
1084 1154
1085 load_sp0(t, &current->thread); 1155 load_sp0(t, &current->thread);
@@ -1098,17 +1168,7 @@ void __cpuinit cpu_init(void)
1098 arch_kgdb_ops.correct_hw_break(); 1168 arch_kgdb_ops.correct_hw_break();
1099 else 1169 else
1100#endif 1170#endif
1101 { 1171 clear_all_debug_regs();
1102 /*
1103 * Clear all 6 debug registers:
1104 */
1105 set_debugreg(0UL, 0);
1106 set_debugreg(0UL, 1);
1107 set_debugreg(0UL, 2);
1108 set_debugreg(0UL, 3);
1109 set_debugreg(0UL, 6);
1110 set_debugreg(0UL, 7);
1111 }
1112 1172
1113 fpu_init(); 1173 fpu_init();
1114 1174
@@ -1129,7 +1189,8 @@ void __cpuinit cpu_init(void)
1129 1189
1130 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { 1190 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
1131 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); 1191 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
1132 for (;;) local_irq_enable(); 1192 for (;;)
1193 local_irq_enable();
1133 } 1194 }
1134 1195
1135 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1196 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
@@ -1145,8 +1206,7 @@ void __cpuinit cpu_init(void)
1145 */ 1206 */
1146 atomic_inc(&init_mm.mm_count); 1207 atomic_inc(&init_mm.mm_count);
1147 curr->active_mm = &init_mm; 1208 curr->active_mm = &init_mm;
1148 if (curr->mm) 1209 BUG_ON(curr->mm);
1149 BUG();
1150 enter_lazy_tlb(&init_mm, curr); 1210 enter_lazy_tlb(&init_mm, curr);
1151 1211
1152 load_sp0(t, thread); 1212 load_sp0(t, thread);
@@ -1159,13 +1219,7 @@ void __cpuinit cpu_init(void)
1159 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1219 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1160#endif 1220#endif
1161 1221
1162 /* Clear all 6 debug registers: */ 1222 clear_all_debug_regs();
1163 set_debugreg(0, 0);
1164 set_debugreg(0, 1);
1165 set_debugreg(0, 2);
1166 set_debugreg(0, 3);
1167 set_debugreg(0, 6);
1168 set_debugreg(0, 7);
1169 1223
1170 /* 1224 /*
1171 * Force FPU initialization: 1225 * Force FPU initialization:
@@ -1185,6 +1239,4 @@ void __cpuinit cpu_init(void)
1185 1239
1186 xsave_init(); 1240 xsave_init();
1187} 1241}
1188
1189
1190#endif 1242#endif
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index de4094a39210..9469ecb5aeb8 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -5,15 +5,15 @@
5struct cpu_model_info { 5struct cpu_model_info {
6 int vendor; 6 int vendor;
7 int family; 7 int family;
8 char *model_names[16]; 8 const char *model_names[16];
9}; 9};
10 10
11/* attempt to consolidate cpu attributes */ 11/* attempt to consolidate cpu attributes */
12struct cpu_dev { 12struct cpu_dev {
13 char * c_vendor; 13 const char * c_vendor;
14 14
15 /* some have two possibilities for cpuid string */ 15 /* some have two possibilities for cpuid string */
16 char * c_ident[2]; 16 const char * c_ident[2];
17 17
18 struct cpu_model_info c_models[4]; 18 struct cpu_model_info c_models[4];
19 19
@@ -25,11 +25,12 @@ struct cpu_dev {
25}; 25};
26 26
27#define cpu_dev_register(cpu_devX) \ 27#define cpu_dev_register(cpu_devX) \
28 static struct cpu_dev *__cpu_dev_##cpu_devX __used \ 28 static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \
29 __attribute__((__section__(".x86_cpu_dev.init"))) = \ 29 __attribute__((__section__(".x86_cpu_dev.init"))) = \
30 &cpu_devX; 30 &cpu_devX;
31 31
32extern struct cpu_dev *__x86_cpu_dev_start[], *__x86_cpu_dev_end[]; 32extern const struct cpu_dev *const __x86_cpu_dev_start[],
33 *const __x86_cpu_dev_end[];
33 34
34extern void display_cacheinfo(struct cpuinfo_x86 *c); 35extern void display_cacheinfo(struct cpuinfo_x86 *c);
35 36
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
new file mode 100755
index 000000000000..21c0cf8ced18
--- /dev/null
+++ b/arch/x86/kernel/cpu/cpu_debug.c
@@ -0,0 +1,839 @@
1/*
2 * CPU x86 architecture debug code
3 *
4 * Copyright(C) 2009 Jaswinder Singh Rajput
5 *
6 * For licencing details see kernel-base/COPYING
7 */
8
9#include <linux/interrupt.h>
10#include <linux/compiler.h>
11#include <linux/seq_file.h>
12#include <linux/debugfs.h>
13#include <linux/kprobes.h>
14#include <linux/uaccess.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/percpu.h>
18#include <linux/signal.h>
19#include <linux/errno.h>
20#include <linux/sched.h>
21#include <linux/types.h>
22#include <linux/init.h>
23#include <linux/slab.h>
24#include <linux/smp.h>
25
26#include <asm/cpu_debug.h>
27#include <asm/paravirt.h>
28#include <asm/system.h>
29#include <asm/traps.h>
30#include <asm/apic.h>
31#include <asm/desc.h>
32
33static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]);
34static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]);
35static DEFINE_PER_CPU(unsigned, cpu_modelflag);
36static DEFINE_PER_CPU(int, cpu_priv_count);
37static DEFINE_PER_CPU(unsigned, cpu_model);
38
39static DEFINE_MUTEX(cpu_debug_lock);
40
41static struct dentry *cpu_debugfs_dir;
42
43static struct cpu_debug_base cpu_base[] = {
44 { "mc", CPU_MC, 0 },
45 { "monitor", CPU_MONITOR, 0 },
46 { "time", CPU_TIME, 0 },
47 { "pmc", CPU_PMC, 1 },
48 { "platform", CPU_PLATFORM, 0 },
49 { "apic", CPU_APIC, 0 },
50 { "poweron", CPU_POWERON, 0 },
51 { "control", CPU_CONTROL, 0 },
52 { "features", CPU_FEATURES, 0 },
53 { "lastbranch", CPU_LBRANCH, 0 },
54 { "bios", CPU_BIOS, 0 },
55 { "freq", CPU_FREQ, 0 },
56 { "mtrr", CPU_MTRR, 0 },
57 { "perf", CPU_PERF, 0 },
58 { "cache", CPU_CACHE, 0 },
59 { "sysenter", CPU_SYSENTER, 0 },
60 { "therm", CPU_THERM, 0 },
61 { "misc", CPU_MISC, 0 },
62 { "debug", CPU_DEBUG, 0 },
63 { "pat", CPU_PAT, 0 },
64 { "vmx", CPU_VMX, 0 },
65 { "call", CPU_CALL, 0 },
66 { "base", CPU_BASE, 0 },
67 { "smm", CPU_SMM, 0 },
68 { "svm", CPU_SVM, 0 },
69 { "osvm", CPU_OSVM, 0 },
70 { "tss", CPU_TSS, 0 },
71 { "cr", CPU_CR, 0 },
72 { "dt", CPU_DT, 0 },
73 { "registers", CPU_REG_ALL, 0 },
74};
75
76static struct cpu_file_base cpu_file[] = {
77 { "index", CPU_REG_ALL, 0 },
78 { "value", CPU_REG_ALL, 1 },
79};
80
81/* Intel Registers Range */
82static struct cpu_debug_range cpu_intel_range[] = {
83 { 0x00000000, 0x00000001, CPU_MC, CPU_INTEL_ALL },
84 { 0x00000006, 0x00000007, CPU_MONITOR, CPU_CX_AT_XE },
85 { 0x00000010, 0x00000010, CPU_TIME, CPU_INTEL_ALL },
86 { 0x00000011, 0x00000013, CPU_PMC, CPU_INTEL_PENTIUM },
87 { 0x00000017, 0x00000017, CPU_PLATFORM, CPU_PX_CX_AT_XE },
88 { 0x0000001B, 0x0000001B, CPU_APIC, CPU_P6_CX_AT_XE },
89
90 { 0x0000002A, 0x0000002A, CPU_POWERON, CPU_PX_CX_AT_XE },
91 { 0x0000002B, 0x0000002B, CPU_POWERON, CPU_INTEL_XEON },
92 { 0x0000002C, 0x0000002C, CPU_FREQ, CPU_INTEL_XEON },
93 { 0x0000003A, 0x0000003A, CPU_CONTROL, CPU_CX_AT_XE },
94
95 { 0x00000040, 0x00000043, CPU_LBRANCH, CPU_PM_CX_AT_XE },
96 { 0x00000044, 0x00000047, CPU_LBRANCH, CPU_PM_CO_AT },
97 { 0x00000060, 0x00000063, CPU_LBRANCH, CPU_C2_AT },
98 { 0x00000064, 0x00000067, CPU_LBRANCH, CPU_INTEL_ATOM },
99
100 { 0x00000079, 0x00000079, CPU_BIOS, CPU_P6_CX_AT_XE },
101 { 0x00000088, 0x0000008A, CPU_CACHE, CPU_INTEL_P6 },
102 { 0x0000008B, 0x0000008B, CPU_BIOS, CPU_P6_CX_AT_XE },
103 { 0x0000009B, 0x0000009B, CPU_MONITOR, CPU_INTEL_XEON },
104
105 { 0x000000C1, 0x000000C2, CPU_PMC, CPU_P6_CX_AT },
106 { 0x000000CD, 0x000000CD, CPU_FREQ, CPU_CX_AT },
107 { 0x000000E7, 0x000000E8, CPU_PERF, CPU_CX_AT },
108 { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_P6_CX_XE },
109
110 { 0x00000116, 0x00000116, CPU_CACHE, CPU_INTEL_P6 },
111 { 0x00000118, 0x00000118, CPU_CACHE, CPU_INTEL_P6 },
112 { 0x00000119, 0x00000119, CPU_CACHE, CPU_INTEL_PX },
113 { 0x0000011A, 0x0000011B, CPU_CACHE, CPU_INTEL_P6 },
114 { 0x0000011E, 0x0000011E, CPU_CACHE, CPU_PX_CX_AT },
115
116 { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_P6_CX_AT_XE },
117 { 0x00000179, 0x0000017A, CPU_MC, CPU_PX_CX_AT_XE },
118 { 0x0000017B, 0x0000017B, CPU_MC, CPU_P6_XE },
119 { 0x00000186, 0x00000187, CPU_PMC, CPU_P6_CX_AT },
120 { 0x00000198, 0x00000199, CPU_PERF, CPU_PM_CX_AT_XE },
121 { 0x0000019A, 0x0000019A, CPU_TIME, CPU_PM_CX_AT_XE },
122 { 0x0000019B, 0x0000019D, CPU_THERM, CPU_PM_CX_AT_XE },
123 { 0x000001A0, 0x000001A0, CPU_MISC, CPU_PM_CX_AT_XE },
124
125 { 0x000001C9, 0x000001C9, CPU_LBRANCH, CPU_PM_CX_AT },
126 { 0x000001D7, 0x000001D8, CPU_LBRANCH, CPU_INTEL_XEON },
127 { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_CX_AT_XE },
128 { 0x000001DA, 0x000001DA, CPU_LBRANCH, CPU_INTEL_XEON },
129 { 0x000001DB, 0x000001DB, CPU_LBRANCH, CPU_P6_XE },
130 { 0x000001DC, 0x000001DC, CPU_LBRANCH, CPU_INTEL_P6 },
131 { 0x000001DD, 0x000001DE, CPU_LBRANCH, CPU_PX_CX_AT_XE },
132 { 0x000001E0, 0x000001E0, CPU_LBRANCH, CPU_INTEL_P6 },
133
134 { 0x00000200, 0x0000020F, CPU_MTRR, CPU_P6_CX_XE },
135 { 0x00000250, 0x00000250, CPU_MTRR, CPU_P6_CX_XE },
136 { 0x00000258, 0x00000259, CPU_MTRR, CPU_P6_CX_XE },
137 { 0x00000268, 0x0000026F, CPU_MTRR, CPU_P6_CX_XE },
138 { 0x00000277, 0x00000277, CPU_PAT, CPU_C2_AT_XE },
139 { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_P6_CX_XE },
140
141 { 0x00000300, 0x00000308, CPU_PMC, CPU_INTEL_XEON },
142 { 0x00000309, 0x0000030B, CPU_PMC, CPU_C2_AT_XE },
143 { 0x0000030C, 0x00000311, CPU_PMC, CPU_INTEL_XEON },
144 { 0x00000345, 0x00000345, CPU_PMC, CPU_C2_AT },
145 { 0x00000360, 0x00000371, CPU_PMC, CPU_INTEL_XEON },
146 { 0x0000038D, 0x00000390, CPU_PMC, CPU_C2_AT },
147 { 0x000003A0, 0x000003BE, CPU_PMC, CPU_INTEL_XEON },
148 { 0x000003C0, 0x000003CD, CPU_PMC, CPU_INTEL_XEON },
149 { 0x000003E0, 0x000003E1, CPU_PMC, CPU_INTEL_XEON },
150 { 0x000003F0, 0x000003F0, CPU_PMC, CPU_INTEL_XEON },
151 { 0x000003F1, 0x000003F1, CPU_PMC, CPU_C2_AT_XE },
152 { 0x000003F2, 0x000003F2, CPU_PMC, CPU_INTEL_XEON },
153
154 { 0x00000400, 0x00000402, CPU_MC, CPU_PM_CX_AT_XE },
155 { 0x00000403, 0x00000403, CPU_MC, CPU_INTEL_XEON },
156 { 0x00000404, 0x00000406, CPU_MC, CPU_PM_CX_AT_XE },
157 { 0x00000407, 0x00000407, CPU_MC, CPU_INTEL_XEON },
158 { 0x00000408, 0x0000040A, CPU_MC, CPU_PM_CX_AT_XE },
159 { 0x0000040B, 0x0000040B, CPU_MC, CPU_INTEL_XEON },
160 { 0x0000040C, 0x0000040E, CPU_MC, CPU_PM_CX_XE },
161 { 0x0000040F, 0x0000040F, CPU_MC, CPU_INTEL_XEON },
162 { 0x00000410, 0x00000412, CPU_MC, CPU_PM_CX_AT_XE },
163 { 0x00000413, 0x00000417, CPU_MC, CPU_CX_AT_XE },
164 { 0x00000480, 0x0000048B, CPU_VMX, CPU_CX_AT_XE },
165
166 { 0x00000600, 0x00000600, CPU_DEBUG, CPU_PM_CX_AT_XE },
167 { 0x00000680, 0x0000068F, CPU_LBRANCH, CPU_INTEL_XEON },
168 { 0x000006C0, 0x000006CF, CPU_LBRANCH, CPU_INTEL_XEON },
169
170 { 0x000107CC, 0x000107D3, CPU_PMC, CPU_INTEL_XEON_MP },
171
172 { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_INTEL_XEON },
173 { 0xC0000081, 0xC0000082, CPU_CALL, CPU_INTEL_XEON },
174 { 0xC0000084, 0xC0000084, CPU_CALL, CPU_INTEL_XEON },
175 { 0xC0000100, 0xC0000102, CPU_BASE, CPU_INTEL_XEON },
176};
177
178/* AMD Registers Range */
179static struct cpu_debug_range cpu_amd_range[] = {
180 { 0x00000010, 0x00000010, CPU_TIME, CPU_ALL, },
181 { 0x0000001B, 0x0000001B, CPU_APIC, CPU_ALL, },
182 { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_ALL, },
183
184 { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_ALL, },
185 { 0x00000179, 0x0000017A, CPU_MC, CPU_ALL, },
186 { 0x0000017B, 0x0000017B, CPU_MC, CPU_ALL, },
187 { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_ALL, },
188 { 0x000001DB, 0x000001DE, CPU_LBRANCH, CPU_ALL, },
189
190 { 0x00000200, 0x0000020F, CPU_MTRR, CPU_ALL, },
191 { 0x00000250, 0x00000250, CPU_MTRR, CPU_ALL, },
192 { 0x00000258, 0x00000259, CPU_MTRR, CPU_ALL, },
193 { 0x00000268, 0x0000026F, CPU_MTRR, CPU_ALL, },
194 { 0x00000277, 0x00000277, CPU_PAT, CPU_ALL, },
195 { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_ALL, },
196
197 { 0x00000400, 0x00000417, CPU_MC, CPU_ALL, },
198
199 { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_ALL, },
200 { 0xC0000081, 0xC0000084, CPU_CALL, CPU_ALL, },
201 { 0xC0000100, 0xC0000102, CPU_BASE, CPU_ALL, },
202 { 0xC0000103, 0xC0000103, CPU_TIME, CPU_ALL, },
203
204 { 0xC0000408, 0xC000040A, CPU_MC, CPU_ALL, },
205
206 { 0xc0010000, 0xc0010007, CPU_PMC, CPU_ALL, },
207 { 0xc0010010, 0xc0010010, CPU_MTRR, CPU_ALL, },
208 { 0xc0010016, 0xc001001A, CPU_MTRR, CPU_ALL, },
209 { 0xc001001D, 0xc001001D, CPU_MTRR, CPU_ALL, },
210 { 0xc0010030, 0xc0010035, CPU_BIOS, CPU_ALL, },
211 { 0xc0010056, 0xc0010056, CPU_SMM, CPU_ALL, },
212 { 0xc0010061, 0xc0010063, CPU_SMM, CPU_ALL, },
213 { 0xc0010074, 0xc0010074, CPU_MC, CPU_ALL, },
214 { 0xc0010111, 0xc0010113, CPU_SMM, CPU_ALL, },
215 { 0xc0010114, 0xc0010118, CPU_SVM, CPU_ALL, },
216 { 0xc0010119, 0xc001011A, CPU_SMM, CPU_ALL, },
217 { 0xc0010140, 0xc0010141, CPU_OSVM, CPU_ALL, },
218 { 0xc0010156, 0xc0010156, CPU_SMM, CPU_ALL, },
219};
220
221
222static int get_cpu_modelflag(unsigned cpu)
223{
224 int flag;
225
226 switch (per_cpu(cpu_model, cpu)) {
227 /* Intel */
228 case 0x0501:
229 case 0x0502:
230 case 0x0504:
231 flag = CPU_INTEL_PENTIUM;
232 break;
233 case 0x0601:
234 case 0x0603:
235 case 0x0605:
236 case 0x0607:
237 case 0x0608:
238 case 0x060A:
239 case 0x060B:
240 flag = CPU_INTEL_P6;
241 break;
242 case 0x0609:
243 case 0x060D:
244 flag = CPU_INTEL_PENTIUM_M;
245 break;
246 case 0x060E:
247 flag = CPU_INTEL_CORE;
248 break;
249 case 0x060F:
250 case 0x0617:
251 flag = CPU_INTEL_CORE2;
252 break;
253 case 0x061C:
254 flag = CPU_INTEL_ATOM;
255 break;
256 case 0x0F00:
257 case 0x0F01:
258 case 0x0F02:
259 case 0x0F03:
260 case 0x0F04:
261 flag = CPU_INTEL_XEON_P4;
262 break;
263 case 0x0F06:
264 flag = CPU_INTEL_XEON_MP;
265 break;
266 default:
267 flag = CPU_NONE;
268 break;
269 }
270
271 return flag;
272}
273
274static int get_cpu_range_count(unsigned cpu)
275{
276 int index;
277
278 switch (per_cpu(cpu_model, cpu) >> 16) {
279 case X86_VENDOR_INTEL:
280 index = ARRAY_SIZE(cpu_intel_range);
281 break;
282 case X86_VENDOR_AMD:
283 index = ARRAY_SIZE(cpu_amd_range);
284 break;
285 default:
286 index = 0;
287 break;
288 }
289
290 return index;
291}
292
293static int is_typeflag_valid(unsigned cpu, unsigned flag)
294{
295 unsigned vendor, modelflag;
296 int i, index;
297
298 /* Standard Registers should be always valid */
299 if (flag >= CPU_TSS)
300 return 1;
301
302 modelflag = per_cpu(cpu_modelflag, cpu);
303 vendor = per_cpu(cpu_model, cpu) >> 16;
304 index = get_cpu_range_count(cpu);
305
306 for (i = 0; i < index; i++) {
307 switch (vendor) {
308 case X86_VENDOR_INTEL:
309 if ((cpu_intel_range[i].model & modelflag) &&
310 (cpu_intel_range[i].flag & flag))
311 return 1;
312 break;
313 case X86_VENDOR_AMD:
314 if (cpu_amd_range[i].flag & flag)
315 return 1;
316 break;
317 }
318 }
319
320 /* Invalid */
321 return 0;
322}
323
324static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max,
325 int index, unsigned flag)
326{
327 unsigned modelflag;
328
329 modelflag = per_cpu(cpu_modelflag, cpu);
330 *max = 0;
331 switch (per_cpu(cpu_model, cpu) >> 16) {
332 case X86_VENDOR_INTEL:
333 if ((cpu_intel_range[index].model & modelflag) &&
334 (cpu_intel_range[index].flag & flag)) {
335 *min = cpu_intel_range[index].min;
336 *max = cpu_intel_range[index].max;
337 }
338 break;
339 case X86_VENDOR_AMD:
340 if (cpu_amd_range[index].flag & flag) {
341 *min = cpu_amd_range[index].min;
342 *max = cpu_amd_range[index].max;
343 }
344 break;
345 }
346
347 return *max;
348}
349
350/* This function can also be called with seq = NULL for printk */
351static void print_cpu_data(struct seq_file *seq, unsigned type,
352 u32 low, u32 high)
353{
354 struct cpu_private *priv;
355 u64 val = high;
356
357 if (seq) {
358 priv = seq->private;
359 if (priv->file) {
360 val = (val << 32) | low;
361 seq_printf(seq, "0x%llx\n", val);
362 } else
363 seq_printf(seq, " %08x: %08x_%08x\n",
364 type, high, low);
365 } else
366 printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low);
367}
368
369/* This function can also be called with seq = NULL for printk */
370static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag)
371{
372 unsigned msr, msr_min, msr_max;
373 struct cpu_private *priv;
374 u32 low, high;
375 int i, range;
376
377 if (seq) {
378 priv = seq->private;
379 if (priv->file) {
380 if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg,
381 &low, &high))
382 print_cpu_data(seq, priv->reg, low, high);
383 return;
384 }
385 }
386
387 range = get_cpu_range_count(cpu);
388
389 for (i = 0; i < range; i++) {
390 if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag))
391 continue;
392
393 for (msr = msr_min; msr <= msr_max; msr++) {
394 if (rdmsr_safe_on_cpu(cpu, msr, &low, &high))
395 continue;
396 print_cpu_data(seq, msr, low, high);
397 }
398 }
399}
400
401static void print_tss(void *arg)
402{
403 struct pt_regs *regs = task_pt_regs(current);
404 struct seq_file *seq = arg;
405 unsigned int seg;
406
407 seq_printf(seq, " RAX\t: %016lx\n", regs->ax);
408 seq_printf(seq, " RBX\t: %016lx\n", regs->bx);
409 seq_printf(seq, " RCX\t: %016lx\n", regs->cx);
410 seq_printf(seq, " RDX\t: %016lx\n", regs->dx);
411
412 seq_printf(seq, " RSI\t: %016lx\n", regs->si);
413 seq_printf(seq, " RDI\t: %016lx\n", regs->di);
414 seq_printf(seq, " RBP\t: %016lx\n", regs->bp);
415 seq_printf(seq, " ESP\t: %016lx\n", regs->sp);
416
417#ifdef CONFIG_X86_64
418 seq_printf(seq, " R08\t: %016lx\n", regs->r8);
419 seq_printf(seq, " R09\t: %016lx\n", regs->r9);
420 seq_printf(seq, " R10\t: %016lx\n", regs->r10);
421 seq_printf(seq, " R11\t: %016lx\n", regs->r11);
422 seq_printf(seq, " R12\t: %016lx\n", regs->r12);
423 seq_printf(seq, " R13\t: %016lx\n", regs->r13);
424 seq_printf(seq, " R14\t: %016lx\n", regs->r14);
425 seq_printf(seq, " R15\t: %016lx\n", regs->r15);
426#endif
427
428 asm("movl %%cs,%0" : "=r" (seg));
429 seq_printf(seq, " CS\t: %04x\n", seg);
430 asm("movl %%ds,%0" : "=r" (seg));
431 seq_printf(seq, " DS\t: %04x\n", seg);
432 seq_printf(seq, " SS\t: %04lx\n", regs->ss & 0xffff);
433 asm("movl %%es,%0" : "=r" (seg));
434 seq_printf(seq, " ES\t: %04x\n", seg);
435 asm("movl %%fs,%0" : "=r" (seg));
436 seq_printf(seq, " FS\t: %04x\n", seg);
437 asm("movl %%gs,%0" : "=r" (seg));
438 seq_printf(seq, " GS\t: %04x\n", seg);
439
440 seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags);
441
442 seq_printf(seq, " EIP\t: %016lx\n", regs->ip);
443}
444
445static void print_cr(void *arg)
446{
447 struct seq_file *seq = arg;
448
449 seq_printf(seq, " cr0\t: %016lx\n", read_cr0());
450 seq_printf(seq, " cr2\t: %016lx\n", read_cr2());
451 seq_printf(seq, " cr3\t: %016lx\n", read_cr3());
452 seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe());
453#ifdef CONFIG_X86_64
454 seq_printf(seq, " cr8\t: %016lx\n", read_cr8());
455#endif
456}
457
458static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt)
459{
460 seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size));
461}
462
463static void print_dt(void *seq)
464{
465 struct desc_ptr dt;
466 unsigned long ldt;
467
468 /* IDT */
469 store_idt((struct desc_ptr *)&dt);
470 print_desc_ptr("IDT", seq, dt);
471
472 /* GDT */
473 store_gdt((struct desc_ptr *)&dt);
474 print_desc_ptr("GDT", seq, dt);
475
476 /* LDT */
477 store_ldt(ldt);
478 seq_printf(seq, " LDT\t: %016lx\n", ldt);
479
480 /* TR */
481 store_tr(ldt);
482 seq_printf(seq, " TR\t: %016lx\n", ldt);
483}
484
485static void print_dr(void *arg)
486{
487 struct seq_file *seq = arg;
488 unsigned long dr;
489 int i;
490
491 for (i = 0; i < 8; i++) {
492 /* Ignore db4, db5 */
493 if ((i == 4) || (i == 5))
494 continue;
495 get_debugreg(dr, i);
496 seq_printf(seq, " dr%d\t: %016lx\n", i, dr);
497 }
498
499 seq_printf(seq, "\n MSR\t:\n");
500}
501
502static void print_apic(void *arg)
503{
504 struct seq_file *seq = arg;
505
506#ifdef CONFIG_X86_LOCAL_APIC
507 seq_printf(seq, " LAPIC\t:\n");
508 seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24);
509 seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR));
510 seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI));
511 seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI));
512 seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI));
513 seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR));
514 seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR));
515 seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV));
516 seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR));
517 seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR));
518 seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR));
519 seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2));
520 seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT));
521 seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR));
522 seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC));
523 seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0));
524 seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1));
525 seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR));
526 seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT));
527 seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT));
528 seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR));
529#endif /* CONFIG_X86_LOCAL_APIC */
530
531 seq_printf(seq, "\n MSR\t:\n");
532}
533
534static int cpu_seq_show(struct seq_file *seq, void *v)
535{
536 struct cpu_private *priv = seq->private;
537
538 if (priv == NULL)
539 return -EINVAL;
540
541 switch (cpu_base[priv->type].flag) {
542 case CPU_TSS:
543 smp_call_function_single(priv->cpu, print_tss, seq, 1);
544 break;
545 case CPU_CR:
546 smp_call_function_single(priv->cpu, print_cr, seq, 1);
547 break;
548 case CPU_DT:
549 smp_call_function_single(priv->cpu, print_dt, seq, 1);
550 break;
551 case CPU_DEBUG:
552 if (priv->file == CPU_INDEX_BIT)
553 smp_call_function_single(priv->cpu, print_dr, seq, 1);
554 print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
555 break;
556 case CPU_APIC:
557 if (priv->file == CPU_INDEX_BIT)
558 smp_call_function_single(priv->cpu, print_apic, seq, 1);
559 print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
560 break;
561
562 default:
563 print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
564 break;
565 }
566 seq_printf(seq, "\n");
567
568 return 0;
569}
570
571static void *cpu_seq_start(struct seq_file *seq, loff_t *pos)
572{
573 if (*pos == 0) /* One time is enough ;-) */
574 return seq;
575
576 return NULL;
577}
578
579static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
580{
581 (*pos)++;
582
583 return cpu_seq_start(seq, pos);
584}
585
586static void cpu_seq_stop(struct seq_file *seq, void *v)
587{
588}
589
590static const struct seq_operations cpu_seq_ops = {
591 .start = cpu_seq_start,
592 .next = cpu_seq_next,
593 .stop = cpu_seq_stop,
594 .show = cpu_seq_show,
595};
596
597static int cpu_seq_open(struct inode *inode, struct file *file)
598{
599 struct cpu_private *priv = inode->i_private;
600 struct seq_file *seq;
601 int err;
602
603 err = seq_open(file, &cpu_seq_ops);
604 if (!err) {
605 seq = file->private_data;
606 seq->private = priv;
607 }
608
609 return err;
610}
611
612static int write_msr(struct cpu_private *priv, u64 val)
613{
614 u32 low, high;
615
616 high = (val >> 32) & 0xffffffff;
617 low = val & 0xffffffff;
618
619 if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high))
620 return 0;
621
622 return -EPERM;
623}
624
625static int write_cpu_register(struct cpu_private *priv, const char *buf)
626{
627 int ret = -EPERM;
628 u64 val;
629
630 ret = strict_strtoull(buf, 0, &val);
631 if (ret < 0)
632 return ret;
633
634 /* Supporting only MSRs */
635 if (priv->type < CPU_TSS_BIT)
636 return write_msr(priv, val);
637
638 return ret;
639}
640
641static ssize_t cpu_write(struct file *file, const char __user *ubuf,
642 size_t count, loff_t *off)
643{
644 struct seq_file *seq = file->private_data;
645 struct cpu_private *priv = seq->private;
646 char buf[19];
647
648 if ((priv == NULL) || (count >= sizeof(buf)))
649 return -EINVAL;
650
651 if (copy_from_user(&buf, ubuf, count))
652 return -EFAULT;
653
654 buf[count] = 0;
655
656 if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write))
657 if (!write_cpu_register(priv, buf))
658 return count;
659
660 return -EACCES;
661}
662
663static const struct file_operations cpu_fops = {
664 .owner = THIS_MODULE,
665 .open = cpu_seq_open,
666 .read = seq_read,
667 .write = cpu_write,
668 .llseek = seq_lseek,
669 .release = seq_release,
670};
671
672static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
673 unsigned file, struct dentry *dentry)
674{
675 struct cpu_private *priv = NULL;
676
677 /* Already intialized */
678 if (file == CPU_INDEX_BIT)
679 if (per_cpu(cpu_arr[type].init, cpu))
680 return 0;
681
682 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
683 if (priv == NULL)
684 return -ENOMEM;
685
686 priv->cpu = cpu;
687 priv->type = type;
688 priv->reg = reg;
689 priv->file = file;
690 mutex_lock(&cpu_debug_lock);
691 per_cpu(priv_arr[type], cpu) = priv;
692 per_cpu(cpu_priv_count, cpu)++;
693 mutex_unlock(&cpu_debug_lock);
694
695 if (file)
696 debugfs_create_file(cpu_file[file].name, S_IRUGO,
697 dentry, (void *)priv, &cpu_fops);
698 else {
699 debugfs_create_file(cpu_base[type].name, S_IRUGO,
700 per_cpu(cpu_arr[type].dentry, cpu),
701 (void *)priv, &cpu_fops);
702 mutex_lock(&cpu_debug_lock);
703 per_cpu(cpu_arr[type].init, cpu) = 1;
704 mutex_unlock(&cpu_debug_lock);
705 }
706
707 return 0;
708}
709
710static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg,
711 struct dentry *dentry)
712{
713 unsigned file;
714 int err = 0;
715
716 for (file = 0; file < ARRAY_SIZE(cpu_file); file++) {
717 err = cpu_create_file(cpu, type, reg, file, dentry);
718 if (err)
719 return err;
720 }
721
722 return err;
723}
724
725static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry)
726{
727 struct dentry *cpu_dentry = NULL;
728 unsigned reg, reg_min, reg_max;
729 int i, range, err = 0;
730 char reg_dir[12];
731 u32 low, high;
732
733 range = get_cpu_range_count(cpu);
734
735 for (i = 0; i < range; i++) {
736 if (!get_cpu_range(cpu, &reg_min, &reg_max, i,
737 cpu_base[type].flag))
738 continue;
739
740 for (reg = reg_min; reg <= reg_max; reg++) {
741 if (rdmsr_safe_on_cpu(cpu, reg, &low, &high))
742 continue;
743
744 sprintf(reg_dir, "0x%x", reg);
745 cpu_dentry = debugfs_create_dir(reg_dir, dentry);
746 err = cpu_init_regfiles(cpu, type, reg, cpu_dentry);
747 if (err)
748 return err;
749 }
750 }
751
752 return err;
753}
754
755static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
756{
757 struct dentry *cpu_dentry = NULL;
758 unsigned type;
759 int err = 0;
760
761 for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) {
762 if (!is_typeflag_valid(cpu, cpu_base[type].flag))
763 continue;
764 cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
765 per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
766
767 if (type < CPU_TSS_BIT)
768 err = cpu_init_msr(cpu, type, cpu_dentry);
769 else
770 err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT,
771 cpu_dentry);
772 if (err)
773 return err;
774 }
775
776 return err;
777}
778
779static int cpu_init_cpu(void)
780{
781 struct dentry *cpu_dentry = NULL;
782 struct cpuinfo_x86 *cpui;
783 char cpu_dir[12];
784 unsigned cpu;
785 int err = 0;
786
787 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
788 cpui = &cpu_data(cpu);
789 if (!cpu_has(cpui, X86_FEATURE_MSR))
790 continue;
791 per_cpu(cpu_model, cpu) = ((cpui->x86_vendor << 16) |
792 (cpui->x86 << 8) |
793 (cpui->x86_model));
794 per_cpu(cpu_modelflag, cpu) = get_cpu_modelflag(cpu);
795
796 sprintf(cpu_dir, "cpu%d", cpu);
797 cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir);
798 err = cpu_init_allreg(cpu, cpu_dentry);
799
800 pr_info("cpu%d(%d) debug files %d\n",
801 cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
802 if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
803 pr_err("Register files count %d exceeds limit %d\n",
804 per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
805 per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
806 err = -ENFILE;
807 }
808 if (err)
809 return err;
810 }
811
812 return err;
813}
814
815static int __init cpu_debug_init(void)
816{
817 cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir);
818
819 return cpu_init_cpu();
820}
821
822static void __exit cpu_debug_exit(void)
823{
824 int i, cpu;
825
826 if (cpu_debugfs_dir)
827 debugfs_remove_recursive(cpu_debugfs_dir);
828
829 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
830 for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
831 kfree(per_cpu(priv_arr[i], cpu));
832}
833
834module_init(cpu_debug_init);
835module_exit(cpu_debug_exit);
836
837MODULE_AUTHOR("Jaswinder Singh Rajput");
838MODULE_DESCRIPTION("CPU Debug module");
839MODULE_LICENSE("GPL");
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index ffd0f5ed071a..593171e967ef 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -61,23 +61,23 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
61 */ 61 */
62static unsigned char Cx86_dir0_msb __cpuinitdata = 0; 62static unsigned char Cx86_dir0_msb __cpuinitdata = 0;
63 63
64static char Cx86_model[][9] __cpuinitdata = { 64static const char __cpuinitconst Cx86_model[][9] = {
65 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", 65 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
66 "M II ", "Unknown" 66 "M II ", "Unknown"
67}; 67};
68static char Cx486_name[][5] __cpuinitdata = { 68static const char __cpuinitconst Cx486_name[][5] = {
69 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", 69 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
70 "SRx2", "DRx2" 70 "SRx2", "DRx2"
71}; 71};
72static char Cx486S_name[][4] __cpuinitdata = { 72static const char __cpuinitconst Cx486S_name[][4] = {
73 "S", "S2", "Se", "S2e" 73 "S", "S2", "Se", "S2e"
74}; 74};
75static char Cx486D_name[][4] __cpuinitdata = { 75static const char __cpuinitconst Cx486D_name[][4] = {
76 "DX", "DX2", "?", "?", "?", "DX4" 76 "DX", "DX2", "?", "?", "?", "DX4"
77}; 77};
78static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock"; 78static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock";
79static char cyrix_model_mult1[] __cpuinitdata = "12??43"; 79static const char __cpuinitconst cyrix_model_mult1[] = "12??43";
80static char cyrix_model_mult2[] __cpuinitdata = "12233445"; 80static const char __cpuinitconst cyrix_model_mult2[] = "12233445";
81 81
82/* 82/*
83 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old 83 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
@@ -435,7 +435,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
435 } 435 }
436} 436}
437 437
438static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { 438static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = {
439 .c_vendor = "Cyrix", 439 .c_vendor = "Cyrix",
440 .c_ident = { "CyrixInstead" }, 440 .c_ident = { "CyrixInstead" },
441 .c_early_init = early_init_cyrix, 441 .c_early_init = early_init_cyrix,
@@ -446,7 +446,7 @@ static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
446 446
447cpu_dev_register(cyrix_cpu_dev); 447cpu_dev_register(cyrix_cpu_dev);
448 448
449static struct cpu_dev nsc_cpu_dev __cpuinitdata = { 449static const struct cpu_dev __cpuinitconst nsc_cpu_dev = {
450 .c_vendor = "NSC", 450 .c_vendor = "NSC",
451 .c_ident = { "Geode by NSC" }, 451 .c_ident = { "Geode by NSC" },
452 .c_init = init_nsc, 452 .c_init = init_nsc,
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 191117f1ad51..b09d4eb52bb9 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -54,6 +54,11 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
54 c->x86_cache_alignment = 128; 54 c->x86_cache_alignment = 128;
55#endif 55#endif
56 56
57 /* CPUID workaround for 0F33/0F34 CPU */
58 if (c->x86 == 0xF && c->x86_model == 0x3
59 && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
60 c->x86_phys_bits = 36;
61
57 /* 62 /*
58 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 63 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
59 * with P/T states and does not stop in deep C-states 64 * with P/T states and does not stop in deep C-states
@@ -410,7 +415,7 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i
410} 415}
411#endif 416#endif
412 417
413static struct cpu_dev intel_cpu_dev __cpuinitdata = { 418static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
414 .c_vendor = "Intel", 419 .c_vendor = "Intel",
415 .c_ident = { "GenuineIntel" }, 420 .c_ident = { "GenuineIntel" },
416#ifdef CONFIG_X86_32 421#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 7293508d8f5c..c471eb1a389c 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -32,7 +32,7 @@ struct _cache_table
32}; 32};
33 33
34/* all the cache descriptor types we care about (no TLB or trace cache entries) */ 34/* all the cache descriptor types we care about (no TLB or trace cache entries) */
35static struct _cache_table cache_table[] __cpuinitdata = 35static const struct _cache_table __cpuinitconst cache_table[] =
36{ 36{
37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
@@ -206,15 +206,15 @@ union l3_cache {
206 unsigned val; 206 unsigned val;
207}; 207};
208 208
209static unsigned short assocs[] __cpuinitdata = { 209static const unsigned short __cpuinitconst assocs[] = {
210 [1] = 1, [2] = 2, [4] = 4, [6] = 8, 210 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
211 [8] = 16, [0xa] = 32, [0xb] = 48, 211 [8] = 16, [0xa] = 32, [0xb] = 48,
212 [0xc] = 64, 212 [0xc] = 64,
213 [0xf] = 0xffff // ?? 213 [0xf] = 0xffff // ??
214}; 214};
215 215
216static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; 216static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
217static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; 217static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
218 218
219static void __cpuinit 219static void __cpuinit
220amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, 220amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index 52b3fefbd5af..bb62b3e5caad 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -98,7 +98,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
98#endif 98#endif
99} 99}
100 100
101static struct cpu_dev transmeta_cpu_dev __cpuinitdata = { 101static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = {
102 .c_vendor = "Transmeta", 102 .c_vendor = "Transmeta",
103 .c_ident = { "GenuineTMx86", "TransmetaCPU" }, 103 .c_ident = { "GenuineTMx86", "TransmetaCPU" },
104 .c_early_init = early_init_transmeta, 104 .c_early_init = early_init_transmeta,
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c
index e777f79e0960..fd2c37bf7acb 100644
--- a/arch/x86/kernel/cpu/umc.c
+++ b/arch/x86/kernel/cpu/umc.c
@@ -8,7 +8,7 @@
8 * so no special init takes place. 8 * so no special init takes place.
9 */ 9 */
10 10
11static struct cpu_dev umc_cpu_dev __cpuinitdata = { 11static const struct cpu_dev __cpuinitconst umc_cpu_dev = {
12 .c_vendor = "UMC", 12 .c_vendor = "UMC",
13 .c_ident = { "UMC UMC UMC" }, 13 .c_ident = { "UMC UMC UMC" },
14 .c_models = { 14 .c_models = {
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 508bec1cee27..95b81c18b6bc 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -110,19 +110,25 @@ int __init e820_all_mapped(u64 start, u64 end, unsigned type)
110/* 110/*
111 * Add a memory region to the kernel e820 map. 111 * Add a memory region to the kernel e820 map.
112 */ 112 */
113void __init e820_add_region(u64 start, u64 size, int type) 113static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size,
114 int type)
114{ 115{
115 int x = e820.nr_map; 116 int x = e820x->nr_map;
116 117
117 if (x == ARRAY_SIZE(e820.map)) { 118 if (x == ARRAY_SIZE(e820x->map)) {
118 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); 119 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
119 return; 120 return;
120 } 121 }
121 122
122 e820.map[x].addr = start; 123 e820x->map[x].addr = start;
123 e820.map[x].size = size; 124 e820x->map[x].size = size;
124 e820.map[x].type = type; 125 e820x->map[x].type = type;
125 e820.nr_map++; 126 e820x->nr_map++;
127}
128
129void __init e820_add_region(u64 start, u64 size, int type)
130{
131 __e820_add_region(&e820, start, size, type);
126} 132}
127 133
128void __init e820_print_map(char *who) 134void __init e820_print_map(char *who)
@@ -417,11 +423,11 @@ static int __init append_e820_map(struct e820entry *biosmap, int nr_map)
417 return __append_e820_map(biosmap, nr_map); 423 return __append_e820_map(biosmap, nr_map);
418} 424}
419 425
420static u64 __init e820_update_range_map(struct e820map *e820x, u64 start, 426static u64 __init __e820_update_range(struct e820map *e820x, u64 start,
421 u64 size, unsigned old_type, 427 u64 size, unsigned old_type,
422 unsigned new_type) 428 unsigned new_type)
423{ 429{
424 int i; 430 unsigned int i;
425 u64 real_updated_size = 0; 431 u64 real_updated_size = 0;
426 432
427 BUG_ON(old_type == new_type); 433 BUG_ON(old_type == new_type);
@@ -429,7 +435,7 @@ static u64 __init e820_update_range_map(struct e820map *e820x, u64 start,
429 if (size > (ULLONG_MAX - start)) 435 if (size > (ULLONG_MAX - start))
430 size = ULLONG_MAX - start; 436 size = ULLONG_MAX - start;
431 437
432 for (i = 0; i < e820.nr_map; i++) { 438 for (i = 0; i < e820x->nr_map; i++) {
433 struct e820entry *ei = &e820x->map[i]; 439 struct e820entry *ei = &e820x->map[i];
434 u64 final_start, final_end; 440 u64 final_start, final_end;
435 if (ei->type != old_type) 441 if (ei->type != old_type)
@@ -446,10 +452,16 @@ static u64 __init e820_update_range_map(struct e820map *e820x, u64 start,
446 final_end = min(start + size, ei->addr + ei->size); 452 final_end = min(start + size, ei->addr + ei->size);
447 if (final_start >= final_end) 453 if (final_start >= final_end)
448 continue; 454 continue;
449 e820_add_region(final_start, final_end - final_start, 455
450 new_type); 456 __e820_add_region(e820x, final_start, final_end - final_start,
457 new_type);
458
451 real_updated_size += final_end - final_start; 459 real_updated_size += final_end - final_start;
452 460
461 /*
462 * left range could be head or tail, so need to update
463 * size at first.
464 */
453 ei->size -= final_end - final_start; 465 ei->size -= final_end - final_start;
454 if (ei->addr < final_start) 466 if (ei->addr < final_start)
455 continue; 467 continue;
@@ -461,13 +473,13 @@ static u64 __init e820_update_range_map(struct e820map *e820x, u64 start,
461u64 __init e820_update_range(u64 start, u64 size, unsigned old_type, 473u64 __init e820_update_range(u64 start, u64 size, unsigned old_type,
462 unsigned new_type) 474 unsigned new_type)
463{ 475{
464 return e820_update_range_map(&e820, start, size, old_type, new_type); 476 return __e820_update_range(&e820, start, size, old_type, new_type);
465} 477}
466 478
467static u64 __init e820_update_range_saved(u64 start, u64 size, 479static u64 __init e820_update_range_saved(u64 start, u64 size,
468 unsigned old_type, unsigned new_type) 480 unsigned old_type, unsigned new_type)
469{ 481{
470 return e820_update_range_map(&e820_saved, start, size, old_type, 482 return __e820_update_range(&e820_saved, start, size, old_type,
471 new_type); 483 new_type);
472} 484}
473 485
@@ -1020,8 +1032,8 @@ u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
1020 continue; 1032 continue;
1021 return addr; 1033 return addr;
1022 } 1034 }
1023 return -1UL;
1024 1035
1036 return -1ULL;
1025} 1037}
1026 1038
1027/* 1039/*
@@ -1034,13 +1046,22 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
1034 u64 start; 1046 u64 start;
1035 1047
1036 start = startt; 1048 start = startt;
1037 while (size < sizet) 1049 while (size < sizet && (start + 1))
1038 start = find_e820_area_size(start, &size, align); 1050 start = find_e820_area_size(start, &size, align);
1039 1051
1040 if (size < sizet) 1052 if (size < sizet)
1041 return 0; 1053 return 0;
1042 1054
1055#ifdef CONFIG_X86_32
1056 if (start >= MAXMEM)
1057 return 0;
1058 if (start + size > MAXMEM)
1059 size = MAXMEM - start;
1060#endif
1061
1043 addr = round_down(start + size - sizet, align); 1062 addr = round_down(start + size - sizet, align);
1063 if (addr < start)
1064 return 0;
1044 e820_update_range(addr, sizet, E820_RAM, E820_RESERVED); 1065 e820_update_range(addr, sizet, E820_RAM, E820_RESERVED);
1045 e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED); 1066 e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED);
1046 printk(KERN_INFO "update e820 for early_reserve_e820\n"); 1067 printk(KERN_INFO "update e820 for early_reserve_e820\n");
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 639ad98238a2..335f049d110f 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -250,7 +250,7 @@ static int dbgp_wait_until_complete(void)
250 return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl); 250 return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl);
251} 251}
252 252
253static void dbgp_mdelay(int ms) 253static void __init dbgp_mdelay(int ms)
254{ 254{
255 int i; 255 int i;
256 256
@@ -311,7 +311,7 @@ static void dbgp_set_data(const void *buf, int size)
311 writel(hi, &ehci_debug->data47); 311 writel(hi, &ehci_debug->data47);
312} 312}
313 313
314static void dbgp_get_data(void *buf, int size) 314static void __init dbgp_get_data(void *buf, int size)
315{ 315{
316 unsigned char *bytes = buf; 316 unsigned char *bytes = buf;
317 u32 lo, hi; 317 u32 lo, hi;
@@ -355,7 +355,7 @@ static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
355 return ret; 355 return ret;
356} 356}
357 357
358static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data, 358static int __init dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
359 int size) 359 int size)
360{ 360{
361 u32 pids, addr, ctrl; 361 u32 pids, addr, ctrl;
@@ -386,8 +386,8 @@ static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
386 return ret; 386 return ret;
387} 387}
388 388
389static int dbgp_control_msg(unsigned devnum, int requesttype, int request, 389static int __init dbgp_control_msg(unsigned devnum, int requesttype,
390 int value, int index, void *data, int size) 390 int request, int value, int index, void *data, int size)
391{ 391{
392 u32 pids, addr, ctrl; 392 u32 pids, addr, ctrl;
393 struct usb_ctrlrequest req; 393 struct usb_ctrlrequest req;
@@ -489,7 +489,7 @@ static u32 __init find_dbgp(int ehci_num, u32 *rbus, u32 *rslot, u32 *rfunc)
489 return 0; 489 return 0;
490} 490}
491 491
492static int ehci_reset_port(int port) 492static int __init ehci_reset_port(int port)
493{ 493{
494 u32 portsc; 494 u32 portsc;
495 u32 delay_time, delay; 495 u32 delay_time, delay;
@@ -532,7 +532,7 @@ static int ehci_reset_port(int port)
532 return -EBUSY; 532 return -EBUSY;
533} 533}
534 534
535static int ehci_wait_for_port(int port) 535static int __init ehci_wait_for_port(int port)
536{ 536{
537 u32 status; 537 u32 status;
538 int ret, reps; 538 int ret, reps;
@@ -557,13 +557,13 @@ static inline void dbgp_printk(const char *fmt, ...) { }
557 557
558typedef void (*set_debug_port_t)(int port); 558typedef void (*set_debug_port_t)(int port);
559 559
560static void default_set_debug_port(int port) 560static void __init default_set_debug_port(int port)
561{ 561{
562} 562}
563 563
564static set_debug_port_t set_debug_port = default_set_debug_port; 564static set_debug_port_t __initdata set_debug_port = default_set_debug_port;
565 565
566static void nvidia_set_debug_port(int port) 566static void __init nvidia_set_debug_port(int port)
567{ 567{
568 u32 dword; 568 u32 dword;
569 dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, 569 dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 899e8938e79f..c929add475c9 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -442,8 +442,7 @@ sysenter_past_esp:
442 442
443 GET_THREAD_INFO(%ebp) 443 GET_THREAD_INFO(%ebp)
444 444
445 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ 445 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
446 testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
447 jnz sysenter_audit 446 jnz sysenter_audit
448sysenter_do_call: 447sysenter_do_call:
449 cmpl $(nr_syscalls), %eax 448 cmpl $(nr_syscalls), %eax
@@ -454,7 +453,7 @@ sysenter_do_call:
454 DISABLE_INTERRUPTS(CLBR_ANY) 453 DISABLE_INTERRUPTS(CLBR_ANY)
455 TRACE_IRQS_OFF 454 TRACE_IRQS_OFF
456 movl TI_flags(%ebp), %ecx 455 movl TI_flags(%ebp), %ecx
457 testw $_TIF_ALLWORK_MASK, %cx 456 testl $_TIF_ALLWORK_MASK, %ecx
458 jne sysexit_audit 457 jne sysexit_audit
459sysenter_exit: 458sysenter_exit:
460/* if something modifies registers it must also disable sysexit */ 459/* if something modifies registers it must also disable sysexit */
@@ -468,7 +467,7 @@ sysenter_exit:
468 467
469#ifdef CONFIG_AUDITSYSCALL 468#ifdef CONFIG_AUDITSYSCALL
470sysenter_audit: 469sysenter_audit:
471 testw $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) 470 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
472 jnz syscall_trace_entry 471 jnz syscall_trace_entry
473 addl $4,%esp 472 addl $4,%esp
474 CFI_ADJUST_CFA_OFFSET -4 473 CFI_ADJUST_CFA_OFFSET -4
@@ -485,7 +484,7 @@ sysenter_audit:
485 jmp sysenter_do_call 484 jmp sysenter_do_call
486 485
487sysexit_audit: 486sysexit_audit:
488 testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx 487 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
489 jne syscall_exit_work 488 jne syscall_exit_work
490 TRACE_IRQS_ON 489 TRACE_IRQS_ON
491 ENABLE_INTERRUPTS(CLBR_ANY) 490 ENABLE_INTERRUPTS(CLBR_ANY)
@@ -498,7 +497,7 @@ sysexit_audit:
498 DISABLE_INTERRUPTS(CLBR_ANY) 497 DISABLE_INTERRUPTS(CLBR_ANY)
499 TRACE_IRQS_OFF 498 TRACE_IRQS_OFF
500 movl TI_flags(%ebp), %ecx 499 movl TI_flags(%ebp), %ecx
501 testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx 500 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
502 jne syscall_exit_work 501 jne syscall_exit_work
503 movl PT_EAX(%esp),%eax /* reload syscall return value */ 502 movl PT_EAX(%esp),%eax /* reload syscall return value */
504 jmp sysenter_exit 503 jmp sysenter_exit
@@ -523,8 +522,7 @@ ENTRY(system_call)
523 SAVE_ALL 522 SAVE_ALL
524 GET_THREAD_INFO(%ebp) 523 GET_THREAD_INFO(%ebp)
525 # system call tracing in operation / emulation 524 # system call tracing in operation / emulation
526 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ 525 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
527 testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
528 jnz syscall_trace_entry 526 jnz syscall_trace_entry
529 cmpl $(nr_syscalls), %eax 527 cmpl $(nr_syscalls), %eax
530 jae syscall_badsys 528 jae syscall_badsys
@@ -538,7 +536,7 @@ syscall_exit:
538 # between sampling and the iret 536 # between sampling and the iret
539 TRACE_IRQS_OFF 537 TRACE_IRQS_OFF
540 movl TI_flags(%ebp), %ecx 538 movl TI_flags(%ebp), %ecx
541 testw $_TIF_ALLWORK_MASK, %cx # current->work 539 testl $_TIF_ALLWORK_MASK, %ecx # current->work
542 jne syscall_exit_work 540 jne syscall_exit_work
543 541
544restore_all: 542restore_all:
@@ -673,7 +671,7 @@ END(syscall_trace_entry)
673 # perform syscall exit tracing 671 # perform syscall exit tracing
674 ALIGN 672 ALIGN
675syscall_exit_work: 673syscall_exit_work:
676 testb $_TIF_WORK_SYSCALL_EXIT, %cl 674 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
677 jz work_pending 675 jz work_pending
678 TRACE_IRQS_ON 676 TRACE_IRQS_ON
679 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call 677 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 7ba4621c0dfa..a331ec38af9e 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -368,6 +368,7 @@ ENTRY(save_rest)
368END(save_rest) 368END(save_rest)
369 369
370/* save complete stack frame */ 370/* save complete stack frame */
371 .pushsection .kprobes.text, "ax"
371ENTRY(save_paranoid) 372ENTRY(save_paranoid)
372 XCPT_FRAME 1 RDI+8 373 XCPT_FRAME 1 RDI+8
373 cld 374 cld
@@ -396,6 +397,7 @@ ENTRY(save_paranoid)
3961: ret 3971: ret
397 CFI_ENDPROC 398 CFI_ENDPROC
398END(save_paranoid) 399END(save_paranoid)
400 .popsection
399 401
400/* 402/*
401 * A newly forked process directly context switches into this address. 403 * A newly forked process directly context switches into this address.
@@ -416,7 +418,6 @@ ENTRY(ret_from_fork)
416 418
417 GET_THREAD_INFO(%rcx) 419 GET_THREAD_INFO(%rcx)
418 420
419 CFI_REMEMBER_STATE
420 RESTORE_REST 421 RESTORE_REST
421 422
422 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? 423 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
@@ -428,7 +429,6 @@ ENTRY(ret_from_fork)
428 RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET 429 RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
429 jmp ret_from_sys_call # go to the SYSRET fastpath 430 jmp ret_from_sys_call # go to the SYSRET fastpath
430 431
431 CFI_RESTORE_STATE
432 CFI_ENDPROC 432 CFI_ENDPROC
433END(ret_from_fork) 433END(ret_from_fork)
434 434
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index b864341dcc45..b8ac3b6cf776 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -45,16 +45,16 @@ void ack_bad_irq(unsigned int irq)
45/* 45/*
46 * /proc/interrupts printing: 46 * /proc/interrupts printing:
47 */ 47 */
48static int show_other_interrupts(struct seq_file *p) 48static int show_other_interrupts(struct seq_file *p, int prec)
49{ 49{
50 int j; 50 int j;
51 51
52 seq_printf(p, "NMI: "); 52 seq_printf(p, "%*s: ", prec, "NMI");
53 for_each_online_cpu(j) 53 for_each_online_cpu(j)
54 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count); 54 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
55 seq_printf(p, " Non-maskable interrupts\n"); 55 seq_printf(p, " Non-maskable interrupts\n");
56#ifdef CONFIG_X86_LOCAL_APIC 56#ifdef CONFIG_X86_LOCAL_APIC
57 seq_printf(p, "LOC: "); 57 seq_printf(p, "%*s: ", prec, "LOC");
58 for_each_online_cpu(j) 58 for_each_online_cpu(j)
59 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); 59 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
60 seq_printf(p, " Local timer interrupts\n"); 60 seq_printf(p, " Local timer interrupts\n");
@@ -66,40 +66,40 @@ static int show_other_interrupts(struct seq_file *p)
66 seq_printf(p, " Platform interrupts\n"); 66 seq_printf(p, " Platform interrupts\n");
67 } 67 }
68#ifdef CONFIG_SMP 68#ifdef CONFIG_SMP
69 seq_printf(p, "RES: "); 69 seq_printf(p, "%*s: ", prec, "RES");
70 for_each_online_cpu(j) 70 for_each_online_cpu(j)
71 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); 71 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
72 seq_printf(p, " Rescheduling interrupts\n"); 72 seq_printf(p, " Rescheduling interrupts\n");
73 seq_printf(p, "CAL: "); 73 seq_printf(p, "%*s: ", prec, "CAL");
74 for_each_online_cpu(j) 74 for_each_online_cpu(j)
75 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); 75 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
76 seq_printf(p, " Function call interrupts\n"); 76 seq_printf(p, " Function call interrupts\n");
77 seq_printf(p, "TLB: "); 77 seq_printf(p, "%*s: ", prec, "TLB");
78 for_each_online_cpu(j) 78 for_each_online_cpu(j)
79 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); 79 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
80 seq_printf(p, " TLB shootdowns\n"); 80 seq_printf(p, " TLB shootdowns\n");
81#endif 81#endif
82#ifdef CONFIG_X86_MCE 82#ifdef CONFIG_X86_MCE
83 seq_printf(p, "TRM: "); 83 seq_printf(p, "%*s: ", prec, "TRM");
84 for_each_online_cpu(j) 84 for_each_online_cpu(j)
85 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); 85 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
86 seq_printf(p, " Thermal event interrupts\n"); 86 seq_printf(p, " Thermal event interrupts\n");
87# ifdef CONFIG_X86_64 87# ifdef CONFIG_X86_64
88 seq_printf(p, "THR: "); 88 seq_printf(p, "%*s: ", prec, "THR");
89 for_each_online_cpu(j) 89 for_each_online_cpu(j)
90 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); 90 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
91 seq_printf(p, " Threshold APIC interrupts\n"); 91 seq_printf(p, " Threshold APIC interrupts\n");
92# endif 92# endif
93#endif 93#endif
94#ifdef CONFIG_X86_LOCAL_APIC 94#ifdef CONFIG_X86_LOCAL_APIC
95 seq_printf(p, "SPU: "); 95 seq_printf(p, "%*s: ", prec, "SPU");
96 for_each_online_cpu(j) 96 for_each_online_cpu(j)
97 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); 97 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
98 seq_printf(p, " Spurious interrupts\n"); 98 seq_printf(p, " Spurious interrupts\n");
99#endif 99#endif
100 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 100 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
101#if defined(CONFIG_X86_IO_APIC) 101#if defined(CONFIG_X86_IO_APIC)
102 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count)); 102 seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
103#endif 103#endif
104 return 0; 104 return 0;
105} 105}
@@ -107,19 +107,22 @@ static int show_other_interrupts(struct seq_file *p)
107int show_interrupts(struct seq_file *p, void *v) 107int show_interrupts(struct seq_file *p, void *v)
108{ 108{
109 unsigned long flags, any_count = 0; 109 unsigned long flags, any_count = 0;
110 int i = *(loff_t *) v, j; 110 int i = *(loff_t *) v, j, prec;
111 struct irqaction *action; 111 struct irqaction *action;
112 struct irq_desc *desc; 112 struct irq_desc *desc;
113 113
114 if (i > nr_irqs) 114 if (i > nr_irqs)
115 return 0; 115 return 0;
116 116
117 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
118 j *= 10;
119
117 if (i == nr_irqs) 120 if (i == nr_irqs)
118 return show_other_interrupts(p); 121 return show_other_interrupts(p, prec);
119 122
120 /* print header */ 123 /* print header */
121 if (i == 0) { 124 if (i == 0) {
122 seq_printf(p, " "); 125 seq_printf(p, "%*s", prec + 8, "");
123 for_each_online_cpu(j) 126 for_each_online_cpu(j)
124 seq_printf(p, "CPU%-8d", j); 127 seq_printf(p, "CPU%-8d", j);
125 seq_putc(p, '\n'); 128 seq_putc(p, '\n');
@@ -140,7 +143,7 @@ int show_interrupts(struct seq_file *p, void *v)
140 if (!action && !any_count) 143 if (!action && !any_count)
141 goto out; 144 goto out;
142 145
143 seq_printf(p, "%3d: ", i); 146 seq_printf(p, "%*d: ", prec, i);
144#ifndef CONFIG_SMP 147#ifndef CONFIG_SMP
145 seq_printf(p, "%10u ", kstat_irqs(i)); 148 seq_printf(p, "%10u ", kstat_irqs(i));
146#else 149#else
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index 666e43df51f9..712d15fdc416 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -226,7 +226,7 @@ static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d)
226 return 0; 226 return 0;
227} 227}
228 228
229static struct dmi_system_id __devinitdata mmconf_dmi_table[] = { 229static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = {
230 { 230 {
231 .callback = set_check_enable_amd_mmconf, 231 .callback = set_check_enable_amd_mmconf,
232 .ident = "Sun Microsystems Machine", 232 .ident = "Sun Microsystems Machine",
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index e8192401da47..47673e02ae58 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -890,12 +890,12 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
890#ifdef CONFIG_X86_IO_APIC 890#ifdef CONFIG_X86_IO_APIC
891 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt; 891 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
892 892
893 printk(KERN_INFO "OLD "); 893 apic_printk(APIC_VERBOSE, "OLD ");
894 print_MP_intsrc_info(m); 894 print_MP_intsrc_info(m);
895 i = get_MP_intsrc_index(m); 895 i = get_MP_intsrc_index(m);
896 if (i > 0) { 896 if (i > 0) {
897 assign_to_mpc_intsrc(&mp_irqs[i], m); 897 assign_to_mpc_intsrc(&mp_irqs[i], m);
898 printk(KERN_INFO "NEW "); 898 apic_printk(APIC_VERBOSE, "NEW ");
899 print_mp_irq_info(&mp_irqs[i]); 899 print_mp_irq_info(&mp_irqs[i]);
900 } else if (!i) { 900 } else if (!i) {
901 /* legacy, do nothing */ 901 /* legacy, do nothing */
@@ -943,7 +943,7 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
943 continue; 943 continue;
944 944
945 if (nr_m_spare > 0) { 945 if (nr_m_spare > 0) {
946 printk(KERN_INFO "*NEW* found "); 946 apic_printk(APIC_VERBOSE, "*NEW* found\n");
947 nr_m_spare--; 947 nr_m_spare--;
948 assign_to_mpc_intsrc(&mp_irqs[i], m_spare[nr_m_spare]); 948 assign_to_mpc_intsrc(&mp_irqs[i], m_spare[nr_m_spare]);
949 m_spare[nr_m_spare] = NULL; 949 m_spare[nr_m_spare] = NULL;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 3d9672e59c16..19378715f415 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -685,9 +685,8 @@ static int ptrace_bts_config(struct task_struct *child,
685 if (!cfg.signal) 685 if (!cfg.signal)
686 return -EINVAL; 686 return -EINVAL;
687 687
688 return -EOPNOTSUPP;
689
690 child->thread.bts_ovfl_signal = cfg.signal; 688 child->thread.bts_ovfl_signal = cfg.signal;
689 return -EOPNOTSUPP;
691 } 690 }
692 691
693 if ((cfg.flags & PTRACE_BTS_O_ALLOC) && 692 if ((cfg.flags & PTRACE_BTS_O_ALLOC) &&
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 309949e9e1c1..6a5a2970f4c5 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -74,8 +74,7 @@ static void ich_force_hpet_resume(void)
74 if (!force_hpet_address) 74 if (!force_hpet_address)
75 return; 75 return;
76 76
77 if (rcba_base == NULL) 77 BUG_ON(rcba_base == NULL);
78 BUG();
79 78
80 /* read the Function Disable register, dword mode only */ 79 /* read the Function Disable register, dword mode only */
81 val = readl(rcba_base + 0x3404); 80 val = readl(rcba_base + 0x3404);
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index efa615f2bf43..400331b50a53 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -233,8 +233,8 @@ proceed:
233 "%zu bytes\n", vm.addr, static_size); 233 "%zu bytes\n", vm.addr, static_size);
234 234
235 ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, 235 ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
236 PERCPU_FIRST_CHUNK_RESERVE, 236 PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
237 PMD_SIZE, dyn_size, vm.addr, NULL); 237 PMD_SIZE, vm.addr, NULL);
238 goto out_free_ar; 238 goto out_free_ar;
239 239
240enomem: 240enomem:
@@ -257,31 +257,13 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
257 * Embedding allocator 257 * Embedding allocator
258 * 258 *
259 * The first chunk is sized to just contain the static area plus 259 * The first chunk is sized to just contain the static area plus
260 * module and dynamic reserves, and allocated as a contiguous area 260 * module and dynamic reserves and embedded into linear physical
261 * using bootmem allocator and used as-is without being mapped into 261 * mapping so that it can use PMD mapping without additional TLB
262 * vmalloc area. This enables the first chunk to piggy back on the 262 * pressure.
263 * linear physical PMD mapping and doesn't add any additional pressure
264 * to TLB. Note that if the needed size is smaller than the minimum
265 * unit size, the leftover is returned to the bootmem allocator.
266 */ 263 */
267static void *pcpue_ptr __initdata;
268static size_t pcpue_size __initdata;
269static size_t pcpue_unit_size __initdata;
270
271static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
272{
273 size_t off = (size_t)pageno << PAGE_SHIFT;
274
275 if (off >= pcpue_size)
276 return NULL;
277
278 return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off);
279}
280
281static ssize_t __init setup_pcpu_embed(size_t static_size) 264static ssize_t __init setup_pcpu_embed(size_t static_size)
282{ 265{
283 unsigned int cpu; 266 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
284 size_t dyn_size;
285 267
286 /* 268 /*
287 * If large page isn't supported, there's no benefit in doing 269 * If large page isn't supported, there's no benefit in doing
@@ -291,33 +273,8 @@ static ssize_t __init setup_pcpu_embed(size_t static_size)
291 if (!cpu_has_pse || pcpu_need_numa()) 273 if (!cpu_has_pse || pcpu_need_numa())
292 return -EINVAL; 274 return -EINVAL;
293 275
294 /* allocate and copy */ 276 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
295 pcpue_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + 277 reserve - PERCPU_FIRST_CHUNK_RESERVE, -1);
296 PERCPU_DYNAMIC_RESERVE);
297 pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
298 dyn_size = pcpue_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
299
300 pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size,
301 PAGE_SIZE);
302 if (!pcpue_ptr)
303 return -ENOMEM;
304
305 for_each_possible_cpu(cpu) {
306 void *ptr = pcpue_ptr + cpu * pcpue_unit_size;
307
308 free_bootmem(__pa(ptr + pcpue_size),
309 pcpue_unit_size - pcpue_size);
310 memcpy(ptr, __per_cpu_load, static_size);
311 }
312
313 /* we're ready, commit */
314 pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
315 pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
316
317 return pcpu_setup_first_chunk(pcpue_get_page, static_size,
318 PERCPU_FIRST_CHUNK_RESERVE,
319 pcpue_unit_size, dyn_size,
320 pcpue_ptr, NULL);
321} 278}
322 279
323/* 280/*
@@ -375,8 +332,8 @@ static ssize_t __init setup_pcpu_4k(size_t static_size)
375 pcpu4k_nr_static_pages, static_size); 332 pcpu4k_nr_static_pages, static_size);
376 333
377 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 334 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
378 PERCPU_FIRST_CHUNK_RESERVE, -1, -1, NULL, 335 PERCPU_FIRST_CHUNK_RESERVE, -1,
379 pcpu4k_populate_pte); 336 -1, NULL, pcpu4k_populate_pte);
380 goto out_free_ar; 337 goto out_free_ar;
381 338
382enomem: 339enomem:
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index c22981fa2f3a..ad5441ed1b57 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -1,30 +1,38 @@
1/* Copyright 2002 Andi Kleen */ 1/* Copyright 2002 Andi Kleen */
2 2
3#include <linux/linkage.h> 3#include <linux/linkage.h>
4#include <asm/dwarf2.h> 4
5#include <asm/cpufeature.h> 5#include <asm/cpufeature.h>
6#include <asm/dwarf2.h>
6 7
7/* 8/*
8 * memcpy - Copy a memory block. 9 * memcpy - Copy a memory block.
9 * 10 *
10 * Input: 11 * Input:
11 * rdi destination 12 * rdi destination
12 * rsi source 13 * rsi source
13 * rdx count 14 * rdx count
14 * 15 *
15 * Output: 16 * Output:
16 * rax original destination 17 * rax original destination
17 */ 18 */
18 19
20/*
21 * memcpy_c() - fast string ops (REP MOVSQ) based variant.
22 *
23 * Calls to this get patched into the kernel image via the
24 * alternative instructions framework:
25 */
19 ALIGN 26 ALIGN
20memcpy_c: 27memcpy_c:
21 CFI_STARTPROC 28 CFI_STARTPROC
22 movq %rdi,%rax 29 movq %rdi, %rax
23 movl %edx,%ecx 30
24 shrl $3,%ecx 31 movl %edx, %ecx
25 andl $7,%edx 32 shrl $3, %ecx
33 andl $7, %edx
26 rep movsq 34 rep movsq
27 movl %edx,%ecx 35 movl %edx, %ecx
28 rep movsb 36 rep movsb
29 ret 37 ret
30 CFI_ENDPROC 38 CFI_ENDPROC
@@ -33,99 +41,110 @@ ENDPROC(memcpy_c)
33ENTRY(__memcpy) 41ENTRY(__memcpy)
34ENTRY(memcpy) 42ENTRY(memcpy)
35 CFI_STARTPROC 43 CFI_STARTPROC
36 pushq %rbx
37 CFI_ADJUST_CFA_OFFSET 8
38 CFI_REL_OFFSET rbx, 0
39 movq %rdi,%rax
40 44
41 movl %edx,%ecx 45 /*
42 shrl $6,%ecx 46 * Put the number of full 64-byte blocks into %ecx.
47 * Tail portion is handled at the end:
48 */
49 movq %rdi, %rax
50 movl %edx, %ecx
51 shrl $6, %ecx
43 jz .Lhandle_tail 52 jz .Lhandle_tail
44 53
45 .p2align 4 54 .p2align 4
46.Lloop_64: 55.Lloop_64:
56 /*
57 * We decrement the loop index here - and the zero-flag is
58 * checked at the end of the loop (instructions inbetween do
59 * not change the zero flag):
60 */
47 decl %ecx 61 decl %ecx
48 62
49 movq (%rsi),%r11 63 /*
50 movq 8(%rsi),%r8 64 * Move in blocks of 4x16 bytes:
65 */
66 movq 0*8(%rsi), %r11
67 movq 1*8(%rsi), %r8
68 movq %r11, 0*8(%rdi)
69 movq %r8, 1*8(%rdi)
51 70
52 movq %r11,(%rdi) 71 movq 2*8(%rsi), %r9
53 movq %r8,1*8(%rdi) 72 movq 3*8(%rsi), %r10
73 movq %r9, 2*8(%rdi)
74 movq %r10, 3*8(%rdi)
54 75
55 movq 2*8(%rsi),%r9 76 movq 4*8(%rsi), %r11
56 movq 3*8(%rsi),%r10 77 movq 5*8(%rsi), %r8
78 movq %r11, 4*8(%rdi)
79 movq %r8, 5*8(%rdi)
57 80
58 movq %r9,2*8(%rdi) 81 movq 6*8(%rsi), %r9
59 movq %r10,3*8(%rdi) 82 movq 7*8(%rsi), %r10
83 movq %r9, 6*8(%rdi)
84 movq %r10, 7*8(%rdi)
60 85
61 movq 4*8(%rsi),%r11 86 leaq 64(%rsi), %rsi
62 movq 5*8(%rsi),%r8 87 leaq 64(%rdi), %rdi
63 88
64 movq %r11,4*8(%rdi)
65 movq %r8,5*8(%rdi)
66
67 movq 6*8(%rsi),%r9
68 movq 7*8(%rsi),%r10
69
70 movq %r9,6*8(%rdi)
71 movq %r10,7*8(%rdi)
72
73 leaq 64(%rsi),%rsi
74 leaq 64(%rdi),%rdi
75 jnz .Lloop_64 89 jnz .Lloop_64
76 90
77.Lhandle_tail: 91.Lhandle_tail:
78 movl %edx,%ecx 92 movl %edx, %ecx
79 andl $63,%ecx 93 andl $63, %ecx
80 shrl $3,%ecx 94 shrl $3, %ecx
81 jz .Lhandle_7 95 jz .Lhandle_7
96
82 .p2align 4 97 .p2align 4
83.Lloop_8: 98.Lloop_8:
84 decl %ecx 99 decl %ecx
85 movq (%rsi),%r8 100 movq (%rsi), %r8
86 movq %r8,(%rdi) 101 movq %r8, (%rdi)
87 leaq 8(%rdi),%rdi 102 leaq 8(%rdi), %rdi
88 leaq 8(%rsi),%rsi 103 leaq 8(%rsi), %rsi
89 jnz .Lloop_8 104 jnz .Lloop_8
90 105
91.Lhandle_7: 106.Lhandle_7:
92 movl %edx,%ecx 107 movl %edx, %ecx
93 andl $7,%ecx 108 andl $7, %ecx
94 jz .Lende 109 jz .Lend
110
95 .p2align 4 111 .p2align 4
96.Lloop_1: 112.Lloop_1:
97 movb (%rsi),%r8b 113 movb (%rsi), %r8b
98 movb %r8b,(%rdi) 114 movb %r8b, (%rdi)
99 incq %rdi 115 incq %rdi
100 incq %rsi 116 incq %rsi
101 decl %ecx 117 decl %ecx
102 jnz .Lloop_1 118 jnz .Lloop_1
103 119
104.Lende: 120.Lend:
105 popq %rbx
106 CFI_ADJUST_CFA_OFFSET -8
107 CFI_RESTORE rbx
108 ret 121 ret
109.Lfinal:
110 CFI_ENDPROC 122 CFI_ENDPROC
111ENDPROC(memcpy) 123ENDPROC(memcpy)
112ENDPROC(__memcpy) 124ENDPROC(__memcpy)
113 125
114 /* Some CPUs run faster using the string copy instructions. 126 /*
115 It is also a lot simpler. Use this when possible */ 127 * Some CPUs run faster using the string copy instructions.
128 * It is also a lot simpler. Use this when possible:
129 */
116 130
117 .section .altinstr_replacement,"ax" 131 .section .altinstr_replacement, "ax"
1181: .byte 0xeb /* jmp <disp8> */ 1321: .byte 0xeb /* jmp <disp8> */
119 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */ 133 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
1202: 1342:
121 .previous 135 .previous
122 .section .altinstructions,"a" 136
137 .section .altinstructions, "a"
123 .align 8 138 .align 8
124 .quad memcpy 139 .quad memcpy
125 .quad 1b 140 .quad 1b
126 .byte X86_FEATURE_REP_GOOD 141 .byte X86_FEATURE_REP_GOOD
127 /* Replace only beginning, memcpy is used to apply alternatives, so it 142
128 * is silly to overwrite itself with nops - reboot is only outcome... */ 143 /*
144 * Replace only beginning, memcpy is used to apply alternatives,
145 * so it is silly to overwrite itself with nops - reboot is the
146 * only outcome...
147 */
129 .byte 2b - 1b 148 .byte 2b - 1b
130 .byte 2b - 1b 149 .byte 2b - 1b
131 .previous 150 .previous
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index d11745334a67..522db5e3d0bf 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -121,22 +121,13 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
121 pagefault_enable(); 121 pagefault_enable();
122} 122}
123 123
124/* This is the same as kmap_atomic() but can map memory that doesn't 124/*
125 * This is the same as kmap_atomic() but can map memory that doesn't
125 * have a struct page associated with it. 126 * have a struct page associated with it.
126 */ 127 */
127void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) 128void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
128{ 129{
129 enum fixed_addresses idx; 130 return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
130 unsigned long vaddr;
131
132 pagefault_disable();
133
134 idx = type + KM_TYPE_NR*smp_processor_id();
135 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
136 set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
137 arch_flush_lazy_mmu_mode();
138
139 return (void*) vaddr;
140} 131}
141EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */ 132EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
142 133
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 15219e0d1243..fd3da1dda1c9 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -94,9 +94,9 @@ struct map_range {
94#define NR_RANGE_MR 5 94#define NR_RANGE_MR 5
95#endif 95#endif
96 96
97static int save_mr(struct map_range *mr, int nr_range, 97static int __meminit save_mr(struct map_range *mr, int nr_range,
98 unsigned long start_pfn, unsigned long end_pfn, 98 unsigned long start_pfn, unsigned long end_pfn,
99 unsigned long page_size_mask) 99 unsigned long page_size_mask)
100{ 100{
101 if (start_pfn < end_pfn) { 101 if (start_pfn < end_pfn) {
102 if (nr_range >= NR_RANGE_MR) 102 if (nr_range >= NR_RANGE_MR)
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 04102d42ff42..6e60ba698cee 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -18,6 +18,7 @@
18 18
19#include <asm/iomap.h> 19#include <asm/iomap.h>
20#include <asm/pat.h> 20#include <asm/pat.h>
21#include <asm/highmem.h>
21#include <linux/module.h> 22#include <linux/module.h>
22 23
23int is_io_mapping_possible(resource_size_t base, unsigned long size) 24int is_io_mapping_possible(resource_size_t base, unsigned long size)
@@ -31,16 +32,27 @@ int is_io_mapping_possible(resource_size_t base, unsigned long size)
31} 32}
32EXPORT_SYMBOL_GPL(is_io_mapping_possible); 33EXPORT_SYMBOL_GPL(is_io_mapping_possible);
33 34
34/* Map 'pfn' using fixed map 'type' and protections 'prot' 35void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
35 */
36void *
37iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
38{ 36{
39 enum fixed_addresses idx; 37 enum fixed_addresses idx;
40 unsigned long vaddr; 38 unsigned long vaddr;
41 39
42 pagefault_disable(); 40 pagefault_disable();
43 41
42 idx = type + KM_TYPE_NR * smp_processor_id();
43 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
44 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
45 arch_flush_lazy_mmu_mode();
46
47 return (void *)vaddr;
48}
49
50/*
51 * Map 'pfn' using fixed map 'type' and protections 'prot'
52 */
53void *
54iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
55{
44 /* 56 /*
45 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. 57 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
46 * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the 58 * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the
@@ -50,12 +62,7 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
50 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) 62 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
51 prot = PAGE_KERNEL_UC_MINUS; 63 prot = PAGE_KERNEL_UC_MINUS;
52 64
53 idx = type + KM_TYPE_NR*smp_processor_id(); 65 return kmap_atomic_prot_pfn(pfn, type, prot);
54 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
55 set_pte(kmap_pte-idx, pfn_pte(pfn, prot));
56 arch_flush_lazy_mmu_mode();
57
58 return (void*) vaddr;
59} 66}
60EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); 67EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
61 68
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index aca924a30ee6..55e127f71ed9 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -22,13 +22,17 @@
22#include <asm/pgalloc.h> 22#include <asm/pgalloc.h>
23#include <asm/pat.h> 23#include <asm/pat.h>
24 24
25#ifdef CONFIG_X86_64 25static inline int phys_addr_valid(resource_size_t addr)
26
27static inline int phys_addr_valid(unsigned long addr)
28{ 26{
29 return addr < (1UL << boot_cpu_data.x86_phys_bits); 27#ifdef CONFIG_PHYS_ADDR_T_64BIT
28 return !(addr >> boot_cpu_data.x86_phys_bits);
29#else
30 return 1;
31#endif
30} 32}
31 33
34#ifdef CONFIG_X86_64
35
32unsigned long __phys_addr(unsigned long x) 36unsigned long __phys_addr(unsigned long x)
33{ 37{
34 if (x >= __START_KERNEL_map) { 38 if (x >= __START_KERNEL_map) {
@@ -65,11 +69,6 @@ EXPORT_SYMBOL(__virt_addr_valid);
65 69
66#else 70#else
67 71
68static inline int phys_addr_valid(unsigned long addr)
69{
70 return 1;
71}
72
73#ifdef CONFIG_DEBUG_VIRTUAL 72#ifdef CONFIG_DEBUG_VIRTUAL
74unsigned long __phys_addr(unsigned long x) 73unsigned long __phys_addr(unsigned long x)
75{ 74{
@@ -488,7 +487,12 @@ static int __init early_ioremap_debug_setup(char *str)
488early_param("early_ioremap_debug", early_ioremap_debug_setup); 487early_param("early_ioremap_debug", early_ioremap_debug_setup);
489 488
490static __initdata int after_paging_init; 489static __initdata int after_paging_init;
491static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; 490#define __FIXADDR_TOP (-PAGE_SIZE)
491static pte_t bm_pte[(__fix_to_virt(FIX_DBGP_BASE)
492 ^ __fix_to_virt(FIX_BTMAP_BEGIN)) >> PMD_SHIFT
493 ? PAGE_SIZE / sizeof(pte_t) : 0] __page_aligned_bss;
494#undef __FIXADDR_TOP
495static __initdata pte_t *bm_ptep;
492 496
493static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) 497static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
494{ 498{
@@ -503,6 +507,8 @@ static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
503 507
504static inline pte_t * __init early_ioremap_pte(unsigned long addr) 508static inline pte_t * __init early_ioremap_pte(unsigned long addr)
505{ 509{
510 if (!sizeof(bm_pte))
511 return &bm_ptep[pte_index(addr)];
506 return &bm_pte[pte_index(addr)]; 512 return &bm_pte[pte_index(addr)];
507} 513}
508 514
@@ -520,8 +526,14 @@ void __init early_ioremap_init(void)
520 slot_virt[i] = fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); 526 slot_virt[i] = fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
521 527
522 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); 528 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
523 memset(bm_pte, 0, sizeof(bm_pte)); 529 if (sizeof(bm_pte)) {
524 pmd_populate_kernel(&init_mm, pmd, bm_pte); 530 memset(bm_pte, 0, sizeof(bm_pte));
531 pmd_populate_kernel(&init_mm, pmd, bm_pte);
532 } else {
533 bm_ptep = pte_offset_kernel(pmd, 0);
534 if (early_ioremap_debug)
535 printk(KERN_INFO "bm_ptep=%p\n", bm_ptep);
536 }
525 537
526 /* 538 /*
527 * The boot-ioremap range spans multiple pmds, for which 539 * The boot-ioremap range spans multiple pmds, for which
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 6a518dd08a36..4f115e00486b 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -310,7 +310,7 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
310 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx); 310 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
311 311
312 if (!ctx->active) { 312 if (!ctx->active) {
313 pr_warning("kmmio: spurious debug trap on CPU %d.\n", 313 pr_debug("kmmio: spurious debug trap on CPU %d.\n",
314 smp_processor_id()); 314 smp_processor_id());
315 goto out; 315 goto out;
316 } 316 }
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 8253bc97587e..9c4294986af7 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -522,6 +522,17 @@ static int split_large_page(pte_t *kpte, unsigned long address)
522 * primary protection behavior: 522 * primary protection behavior:
523 */ 523 */
524 __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); 524 __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
525
526 /*
527 * Intel Atom errata AAH41 workaround.
528 *
529 * The real fix should be in hw or in a microcode update, but
530 * we also probabilistically try to reduce the window of having
531 * a large TLB mixed with 4K TLBs while instruction fetches are
532 * going on.
533 */
534 __flush_tlb_all();
535
525 base = NULL; 536 base = NULL;
526 537
527out_unlock: 538out_unlock:
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 2ed37158012d..640339ee4fb2 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -677,10 +677,11 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
677 is_ram = pat_pagerange_is_ram(paddr, paddr + size); 677 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
678 678
679 /* 679 /*
680 * reserve_pfn_range() doesn't support RAM pages. 680 * reserve_pfn_range() doesn't support RAM pages. Maintain the current
681 * behavior with RAM pages by returning success.
681 */ 682 */
682 if (is_ram != 0) 683 if (is_ram != 0)
683 return -EINVAL; 684 return 0;
684 685
685 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); 686 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
686 if (ret) 687 if (ret)
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 82d22fc601ae..8c362b96b644 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -90,7 +90,7 @@ static int __devinit can_skip_ioresource_align(const struct dmi_system_id *d)
90 return 0; 90 return 0;
91} 91}
92 92
93static struct dmi_system_id can_skip_pciprobe_dmi_table[] __devinitdata = { 93static const struct dmi_system_id can_skip_pciprobe_dmi_table[] __devinitconst = {
94/* 94/*
95 * Systems where PCI IO resource ISA alignment can be skipped 95 * Systems where PCI IO resource ISA alignment can be skipped
96 * when the ISA enable bit in the bridge control is not set 96 * when the ISA enable bit in the bridge control is not set
@@ -183,7 +183,7 @@ static int __devinit assign_all_busses(const struct dmi_system_id *d)
183} 183}
184#endif 184#endif
185 185
186static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = { 186static const struct dmi_system_id __devinitconst pciprobe_dmi_table[] = {
187#ifdef __i386__ 187#ifdef __i386__
188/* 188/*
189 * Laptops which need pci=assign-busses to see Cardbus cards 189 * Laptops which need pci=assign-busses to see Cardbus cards
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 7d388d5cf548..9c49919e4d1c 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -356,7 +356,7 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev)
356DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video); 356DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video);
357 357
358 358
359static struct dmi_system_id __devinitdata msi_k8t_dmi_table[] = { 359static const struct dmi_system_id __devinitconst msi_k8t_dmi_table[] = {
360 { 360 {
361 .ident = "MSI-K8T-Neo2Fir", 361 .ident = "MSI-K8T-Neo2Fir",
362 .matches = { 362 .matches = {
@@ -413,7 +413,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
413 */ 413 */
414static u16 toshiba_line_size; 414static u16 toshiba_line_size;
415 415
416static struct dmi_system_id __devinitdata toshiba_ohci1394_dmi_table[] = { 416static const struct dmi_system_id __devinitconst toshiba_ohci1394_dmi_table[] = {
417 { 417 {
418 .ident = "Toshiba PS5 based laptop", 418 .ident = "Toshiba PS5 based laptop",
419 .matches = { 419 .matches = {