diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-17 16:13:16 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-17 16:13:16 -0400 |
commit | d20ead9e86881bc7ae84e385f47b5196b7d93aac (patch) | |
tree | ed27dd5db5f8447e4b3f541f0ec38219085d2f32 /arch | |
parent | c56ec7639288f3e5d6371b0c48d37da93642fc93 (diff) | |
parent | 88e4d250234fc9e64d6ce51df95efdcf8334fd95 (diff) |
Merge ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86
* ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86: (114 commits)
x86: delete vsyscall files during make clean
kbuild: fix typo SRCARCH in find_sources
x86: fix kernel rebuild due to vsyscall fallout
.gitignore update for x86 arch
x86: unify include/asm/debugreg_32/64.h
x86: unify include/asm/unwind_32/64.h
x86: unify include/asm/types_32/64.h
x86: unify include/asm/tlb_32/64.h
x86: unify include/asm/siginfo_32/64.h
x86: unify include/asm/bug_32/64.h
x86: unify include/asm/mman_32/64.h
x86: unify include/asm/agp_32/64.h
x86: unify include/asm/kdebug_32/64.h
x86: unify include/asm/ioctls_32/64.h
x86: unify include/asm/floppy_32/64.h
x86: apply missing DMA/OOM prevention to floppy_32.h
x86: unify include/asm/cache_32/64.h
x86: unify include/asm/cache_32/64.h
x86: unify include/asm/dmi_32/64.h
x86: unify include/asm/delay_32/64.h
...
Diffstat (limited to 'arch')
86 files changed, 541 insertions, 355 deletions
diff --git a/arch/i386/.gitignore b/arch/i386/.gitignore new file mode 100644 index 000000000000..36ef4c374d25 --- /dev/null +++ b/arch/i386/.gitignore | |||
@@ -0,0 +1 @@ | |||
boot | |||
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 7a95c58947e4..b84d5050e92e 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
@@ -146,6 +146,7 @@ config X86_ELAN | |||
146 | 146 | ||
147 | config X86_VOYAGER | 147 | config X86_VOYAGER |
148 | bool "Voyager (NCR)" | 148 | bool "Voyager (NCR)" |
149 | select SMP if !BROKEN | ||
149 | help | 150 | help |
150 | Voyager is an MCA-based 32-way capable SMP architecture proprietary | 151 | Voyager is an MCA-based 32-way capable SMP architecture proprietary |
151 | to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based. | 152 | to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based. |
diff --git a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu index 11a24d54f27b..0e2adadf5905 100644 --- a/arch/i386/Kconfig.cpu +++ b/arch/i386/Kconfig.cpu | |||
@@ -109,16 +109,42 @@ config MCORE2 | |||
109 | help | 109 | help |
110 | Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and 53xx) | 110 | Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and 53xx) |
111 | CPUs. You can distinguish newer from older Xeons by the CPU family | 111 | CPUs. You can distinguish newer from older Xeons by the CPU family |
112 | in /proc/cpuinfo. Newer ones have 6. | 112 | in /proc/cpuinfo. Newer ones have 6 and older ones 15 (not a typo) |
113 | 113 | ||
114 | config MPENTIUM4 | 114 | config MPENTIUM4 |
115 | bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon" | 115 | bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon" |
116 | help | 116 | help |
117 | Select this for Intel Pentium 4 chips. This includes the | 117 | Select this for Intel Pentium 4 chips. This includes the |
118 | Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M | 118 | Pentium 4, Pentium D, P4-based Celeron and Xeon, and |
119 | (not Pentium M) chips. This option enables compile flags | 119 | Pentium-4 M (not Pentium M) chips. This option enables compile |
120 | optimized for the chip, uses the correct cache shift, and | 120 | flags optimized for the chip, uses the correct cache line size, and |
121 | applies any applicable Pentium III optimizations. | 121 | applies any applicable optimizations. |
122 | |||
123 | CPUIDs: F[0-6][1-A] (in /proc/cpuinfo show = cpu family : 15 ) | ||
124 | |||
125 | Select this for: | ||
126 | Pentiums (Pentium 4, Pentium D, Celeron, Celeron D) corename: | ||
127 | -Willamette | ||
128 | -Northwood | ||
129 | -Mobile Pentium 4 | ||
130 | -Mobile Pentium 4 M | ||
131 | -Extreme Edition (Gallatin) | ||
132 | -Prescott | ||
133 | -Prescott 2M | ||
134 | -Cedar Mill | ||
135 | -Presler | ||
136 | -Smithfiled | ||
137 | Xeons (Intel Xeon, Xeon MP, Xeon LV, Xeon MV) corename: | ||
138 | -Foster | ||
139 | -Prestonia | ||
140 | -Gallatin | ||
141 | -Nocona | ||
142 | -Irwindale | ||
143 | -Cranford | ||
144 | -Potomac | ||
145 | -Paxville | ||
146 | -Dempsey | ||
147 | |||
122 | 148 | ||
123 | config MK6 | 149 | config MK6 |
124 | bool "K6/K6-II/K6-III" | 150 | bool "K6/K6-II/K6-III" |
diff --git a/arch/x86/ia32/Makefile b/arch/x86/ia32/Makefile index cdae36435e21..e2edda255a84 100644 --- a/arch/x86/ia32/Makefile +++ b/arch/x86/ia32/Makefile | |||
@@ -18,18 +18,35 @@ $(obj)/syscall32_syscall.o: \ | |||
18 | $(foreach F,sysenter syscall,$(obj)/vsyscall-$F.so) | 18 | $(foreach F,sysenter syscall,$(obj)/vsyscall-$F.so) |
19 | 19 | ||
20 | # Teach kbuild about targets | 20 | # Teach kbuild about targets |
21 | targets := $(foreach F,sysenter syscall,vsyscall-$F.o vsyscall-$F.so) | 21 | targets := $(foreach F,$(addprefix vsyscall-,sysenter syscall),\ |
22 | $F.o $F.so $F.so.dbg) | ||
22 | 23 | ||
23 | # The DSO images are built using a special linker script | 24 | # The DSO images are built using a special linker script |
24 | quiet_cmd_syscall = SYSCALL $@ | 25 | quiet_cmd_syscall = SYSCALL $@ |
25 | cmd_syscall = $(CC) -m32 -nostdlib -shared -s \ | 26 | cmd_syscall = $(CC) -m32 -nostdlib -shared \ |
26 | $(call ld-option, -Wl$(comma)--hash-style=sysv) \ | 27 | $(call ld-option, -Wl$(comma)--hash-style=sysv) \ |
27 | -Wl,-soname=linux-gate.so.1 -o $@ \ | 28 | -Wl,-soname=linux-gate.so.1 -o $@ \ |
28 | -Wl,-T,$(filter-out FORCE,$^) | 29 | -Wl,-T,$(filter-out FORCE,$^) |
29 | 30 | ||
30 | $(obj)/vsyscall-sysenter.so $(obj)/vsyscall-syscall.so: \ | 31 | $(obj)/%.so: OBJCOPYFLAGS := -S |
31 | $(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE | 32 | $(obj)/%.so: $(obj)/%.so.dbg FORCE |
33 | $(call if_changed,objcopy) | ||
34 | |||
35 | $(obj)/vsyscall-sysenter.so.dbg $(obj)/vsyscall-syscall.so.dbg: \ | ||
36 | $(obj)/vsyscall-%.so.dbg: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE | ||
32 | $(call if_changed,syscall) | 37 | $(call if_changed,syscall) |
33 | 38 | ||
34 | AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32 | 39 | AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32 |
35 | AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32 | 40 | AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32 |
41 | |||
42 | vdsos := vdso32-sysenter.so vdso32-syscall.so | ||
43 | |||
44 | quiet_cmd_vdso_install = INSTALL $@ | ||
45 | cmd_vdso_install = cp $(@:vdso32-%.so=$(obj)/vsyscall-%.so.dbg) \ | ||
46 | $(MODLIB)/vdso/$@ | ||
47 | |||
48 | $(vdsos): | ||
49 | @mkdir -p $(MODLIB)/vdso | ||
50 | $(call cmd,vdso_install) | ||
51 | |||
52 | vdso_install: $(vdsos) | ||
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 7cf1c29bf90e..f82e1a94fcb7 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c | |||
@@ -420,6 +420,8 @@ beyond_if: | |||
420 | (regs)->eflags = 0x200; | 420 | (regs)->eflags = 0x200; |
421 | (regs)->cs = __USER32_CS; | 421 | (regs)->cs = __USER32_CS; |
422 | (regs)->ss = __USER32_DS; | 422 | (regs)->ss = __USER32_DS; |
423 | regs->r8 = regs->r9 = regs->r10 = regs->r11 = | ||
424 | regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0; | ||
423 | set_fs(USER_DS); | 425 | set_fs(USER_DS); |
424 | if (unlikely(current->ptrace & PT_PTRACED)) { | 426 | if (unlikely(current->ptrace & PT_PTRACED)) { |
425 | if (current->ptrace & PT_TRACE_EXEC) | 427 | if (current->ptrace & PT_TRACE_EXEC) |
diff --git a/arch/x86/ia32/ia32_binfmt.c b/arch/x86/ia32/ia32_binfmt.c index d3c53e8b05c0..118b9f9ff499 100644 --- a/arch/x86/ia32/ia32_binfmt.c +++ b/arch/x86/ia32/ia32_binfmt.c | |||
@@ -112,11 +112,8 @@ struct elf_prpsinfo | |||
112 | char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ | 112 | char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ |
113 | }; | 113 | }; |
114 | 114 | ||
115 | #define __STR(x) #x | ||
116 | #define STR(x) __STR(x) | ||
117 | |||
118 | #define _GET_SEG(x) \ | 115 | #define _GET_SEG(x) \ |
119 | ({ __u32 seg; asm("movl %%" STR(x) ",%0" : "=r"(seg)); seg; }) | 116 | ({ __u32 seg; asm("movl %%" __stringify(x) ",%0" : "=r"(seg)); seg; }) |
120 | 117 | ||
121 | /* Assumes current==process to be dumped */ | 118 | /* Assumes current==process to be dumped */ |
122 | #define ELF_CORE_COPY_REGS(pr_reg, regs) \ | 119 | #define ELF_CORE_COPY_REGS(pr_reg, regs) \ |
diff --git a/arch/x86/ia32/ptrace32.c b/arch/x86/ia32/ptrace32.c index 4a233ad6269c..f52770ef0ee3 100644 --- a/arch/x86/ia32/ptrace32.c +++ b/arch/x86/ia32/ptrace32.c | |||
@@ -228,6 +228,8 @@ static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data) | |||
228 | return ret; | 228 | return ret; |
229 | } | 229 | } |
230 | 230 | ||
231 | #define COMPAT_GDT_ENTRY_TLS_MIN 6 | ||
232 | |||
231 | asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) | 233 | asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) |
232 | { | 234 | { |
233 | struct task_struct *child; | 235 | struct task_struct *child; |
@@ -246,8 +248,6 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) | |||
246 | case PTRACE_SYSCALL: | 248 | case PTRACE_SYSCALL: |
247 | case PTRACE_OLDSETOPTIONS: | 249 | case PTRACE_OLDSETOPTIONS: |
248 | case PTRACE_SETOPTIONS: | 250 | case PTRACE_SETOPTIONS: |
249 | case PTRACE_SET_THREAD_AREA: | ||
250 | case PTRACE_GET_THREAD_AREA: | ||
251 | return sys_ptrace(request, pid, addr, data); | 251 | return sys_ptrace(request, pid, addr, data); |
252 | 252 | ||
253 | default: | 253 | default: |
@@ -271,6 +271,12 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) | |||
271 | case PTRACE_SETSIGINFO: | 271 | case PTRACE_SETSIGINFO: |
272 | case PTRACE_GETSIGINFO: | 272 | case PTRACE_GETSIGINFO: |
273 | return ptrace32_siginfo(request, pid, addr, data); | 273 | return ptrace32_siginfo(request, pid, addr, data); |
274 | |||
275 | case PTRACE_SET_THREAD_AREA: | ||
276 | case PTRACE_GET_THREAD_AREA: | ||
277 | return sys_ptrace(request, pid, | ||
278 | addr + GDT_ENTRY_TLS_MIN - COMPAT_GDT_ENTRY_TLS_MIN, | ||
279 | data); | ||
274 | } | 280 | } |
275 | 281 | ||
276 | child = ptrace_get_task_struct(pid); | 282 | child = ptrace_get_task_struct(pid); |
diff --git a/arch/x86/kernel/.gitignore b/arch/x86/kernel/.gitignore index 40836ad9079c..4ea38a39aed4 100644 --- a/arch/x86/kernel/.gitignore +++ b/arch/x86/kernel/.gitignore | |||
@@ -1 +1,2 @@ | |||
1 | vsyscall.lds | 1 | vsyscall.lds |
2 | vsyscall_32.lds | ||
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 45855c97923e..38573340b143 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -3,3 +3,7 @@ include ${srctree}/arch/x86/kernel/Makefile_32 | |||
3 | else | 3 | else |
4 | include ${srctree}/arch/x86/kernel/Makefile_64 | 4 | include ${srctree}/arch/x86/kernel/Makefile_64 |
5 | endif | 5 | endif |
6 | |||
7 | # Workaround to delete .lds files with make clean | ||
8 | # The problem is that we do not enter Makefile_32 with make clean. | ||
9 | clean-files := vsyscall*.lds vsyscall*.so | ||
diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32 index 7ff02063b858..a3fa11f8f460 100644 --- a/arch/x86/kernel/Makefile_32 +++ b/arch/x86/kernel/Makefile_32 | |||
@@ -51,7 +51,7 @@ obj-$(CONFIG_SCx200) += scx200_32.o | |||
51 | # We must build both images before we can assemble it. | 51 | # We must build both images before we can assemble it. |
52 | # Note: kbuild does not track this dependency due to usage of .incbin | 52 | # Note: kbuild does not track this dependency due to usage of .incbin |
53 | $(obj)/vsyscall_32.o: $(obj)/vsyscall-int80_32.so $(obj)/vsyscall-sysenter_32.so | 53 | $(obj)/vsyscall_32.o: $(obj)/vsyscall-int80_32.so $(obj)/vsyscall-sysenter_32.so |
54 | targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so) | 54 | targets += $(foreach F,int80 sysenter,vsyscall-$F_32.o vsyscall-$F_32.so) |
55 | targets += vsyscall-note_32.o vsyscall_32.lds | 55 | targets += vsyscall-note_32.o vsyscall_32.lds |
56 | 56 | ||
57 | # The DSO images are built using a special linker script. | 57 | # The DSO images are built using a special linker script. |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 42421437ded3..3bd2688bd443 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -63,11 +63,11 @@ __setup("noreplace-paravirt", setup_noreplace_paravirt); | |||
63 | /* Use inline assembly to define this because the nops are defined | 63 | /* Use inline assembly to define this because the nops are defined |
64 | as inline assembly strings in the include files and we cannot | 64 | as inline assembly strings in the include files and we cannot |
65 | get them easily into strings. */ | 65 | get them easily into strings. */ |
66 | asm("\t.data\nintelnops: " | 66 | asm("\t.section .rodata, \"a\"\nintelnops: " |
67 | GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 | 67 | GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 |
68 | GENERIC_NOP7 GENERIC_NOP8); | 68 | GENERIC_NOP7 GENERIC_NOP8); |
69 | extern unsigned char intelnops[]; | 69 | extern const unsigned char intelnops[]; |
70 | static unsigned char *intel_nops[ASM_NOP_MAX+1] = { | 70 | static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = { |
71 | NULL, | 71 | NULL, |
72 | intelnops, | 72 | intelnops, |
73 | intelnops + 1, | 73 | intelnops + 1, |
@@ -81,11 +81,11 @@ static unsigned char *intel_nops[ASM_NOP_MAX+1] = { | |||
81 | #endif | 81 | #endif |
82 | 82 | ||
83 | #ifdef K8_NOP1 | 83 | #ifdef K8_NOP1 |
84 | asm("\t.data\nk8nops: " | 84 | asm("\t.section .rodata, \"a\"\nk8nops: " |
85 | K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 | 85 | K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 |
86 | K8_NOP7 K8_NOP8); | 86 | K8_NOP7 K8_NOP8); |
87 | extern unsigned char k8nops[]; | 87 | extern const unsigned char k8nops[]; |
88 | static unsigned char *k8_nops[ASM_NOP_MAX+1] = { | 88 | static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = { |
89 | NULL, | 89 | NULL, |
90 | k8nops, | 90 | k8nops, |
91 | k8nops + 1, | 91 | k8nops + 1, |
@@ -99,11 +99,11 @@ static unsigned char *k8_nops[ASM_NOP_MAX+1] = { | |||
99 | #endif | 99 | #endif |
100 | 100 | ||
101 | #ifdef K7_NOP1 | 101 | #ifdef K7_NOP1 |
102 | asm("\t.data\nk7nops: " | 102 | asm("\t.section .rodata, \"a\"\nk7nops: " |
103 | K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 | 103 | K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 |
104 | K7_NOP7 K7_NOP8); | 104 | K7_NOP7 K7_NOP8); |
105 | extern unsigned char k7nops[]; | 105 | extern const unsigned char k7nops[]; |
106 | static unsigned char *k7_nops[ASM_NOP_MAX+1] = { | 106 | static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = { |
107 | NULL, | 107 | NULL, |
108 | k7nops, | 108 | k7nops, |
109 | k7nops + 1, | 109 | k7nops + 1, |
@@ -116,28 +116,49 @@ static unsigned char *k7_nops[ASM_NOP_MAX+1] = { | |||
116 | }; | 116 | }; |
117 | #endif | 117 | #endif |
118 | 118 | ||
119 | #ifdef P6_NOP1 | ||
120 | asm("\t.section .rodata, \"a\"\np6nops: " | ||
121 | P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6 | ||
122 | P6_NOP7 P6_NOP8); | ||
123 | extern const unsigned char p6nops[]; | ||
124 | static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = { | ||
125 | NULL, | ||
126 | p6nops, | ||
127 | p6nops + 1, | ||
128 | p6nops + 1 + 2, | ||
129 | p6nops + 1 + 2 + 3, | ||
130 | p6nops + 1 + 2 + 3 + 4, | ||
131 | p6nops + 1 + 2 + 3 + 4 + 5, | ||
132 | p6nops + 1 + 2 + 3 + 4 + 5 + 6, | ||
133 | p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | ||
134 | }; | ||
135 | #endif | ||
136 | |||
119 | #ifdef CONFIG_X86_64 | 137 | #ifdef CONFIG_X86_64 |
120 | 138 | ||
121 | extern char __vsyscall_0; | 139 | extern char __vsyscall_0; |
122 | static inline unsigned char** find_nop_table(void) | 140 | static inline const unsigned char*const * find_nop_table(void) |
123 | { | 141 | { |
124 | return k8_nops; | 142 | return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || |
143 | boot_cpu_data.x86 < 6 ? k8_nops : p6_nops; | ||
125 | } | 144 | } |
126 | 145 | ||
127 | #else /* CONFIG_X86_64 */ | 146 | #else /* CONFIG_X86_64 */ |
128 | 147 | ||
129 | static struct nop { | 148 | static const struct nop { |
130 | int cpuid; | 149 | int cpuid; |
131 | unsigned char **noptable; | 150 | const unsigned char *const *noptable; |
132 | } noptypes[] = { | 151 | } noptypes[] = { |
133 | { X86_FEATURE_K8, k8_nops }, | 152 | { X86_FEATURE_K8, k8_nops }, |
134 | { X86_FEATURE_K7, k7_nops }, | 153 | { X86_FEATURE_K7, k7_nops }, |
154 | { X86_FEATURE_P4, p6_nops }, | ||
155 | { X86_FEATURE_P3, p6_nops }, | ||
135 | { -1, NULL } | 156 | { -1, NULL } |
136 | }; | 157 | }; |
137 | 158 | ||
138 | static unsigned char** find_nop_table(void) | 159 | static const unsigned char*const * find_nop_table(void) |
139 | { | 160 | { |
140 | unsigned char **noptable = intel_nops; | 161 | const unsigned char *const *noptable = intel_nops; |
141 | int i; | 162 | int i; |
142 | 163 | ||
143 | for (i = 0; noptypes[i].cpuid >= 0; i++) { | 164 | for (i = 0; noptypes[i].cpuid >= 0; i++) { |
@@ -154,7 +175,7 @@ static unsigned char** find_nop_table(void) | |||
154 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ | 175 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ |
155 | static void add_nops(void *insns, unsigned int len) | 176 | static void add_nops(void *insns, unsigned int len) |
156 | { | 177 | { |
157 | unsigned char **noptable = find_nop_table(); | 178 | const unsigned char *const *noptable = find_nop_table(); |
158 | 179 | ||
159 | while (len > 0) { | 180 | while (len > 0) { |
160 | unsigned int noplen = len; | 181 | unsigned int noplen = len; |
@@ -415,9 +436,6 @@ void __init alternative_instructions(void) | |||
415 | alternatives_smp_unlock(__smp_locks, __smp_locks_end, | 436 | alternatives_smp_unlock(__smp_locks, __smp_locks_end, |
416 | _text, _etext); | 437 | _text, _etext); |
417 | } | 438 | } |
418 | free_init_pages("SMP alternatives", | ||
419 | (unsigned long)__smp_locks, | ||
420 | (unsigned long)__smp_locks_end); | ||
421 | } else { | 439 | } else { |
422 | alternatives_smp_module_add(NULL, "core kernel", | 440 | alternatives_smp_module_add(NULL, "core kernel", |
423 | __smp_locks, __smp_locks_end, | 441 | __smp_locks, __smp_locks_end, |
@@ -428,6 +446,11 @@ void __init alternative_instructions(void) | |||
428 | apply_paravirt(__parainstructions, __parainstructions_end); | 446 | apply_paravirt(__parainstructions, __parainstructions_end); |
429 | local_irq_restore(flags); | 447 | local_irq_restore(flags); |
430 | 448 | ||
449 | if (smp_alt_once) | ||
450 | free_init_pages("SMP alternatives", | ||
451 | (unsigned long)__smp_locks, | ||
452 | (unsigned long)__smp_locks_end); | ||
453 | |||
431 | restart_nmi(); | 454 | restart_nmi(); |
432 | #ifdef CONFIG_X86_MCE | 455 | #ifdef CONFIG_X86_MCE |
433 | restart_mce(); | 456 | restart_mce(); |
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 3d67ae18d762..793341fffc81 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c | |||
@@ -1277,6 +1277,7 @@ void smp_spurious_interrupt(struct pt_regs *regs) | |||
1277 | /* see sw-dev-man vol 3, chapter 7.4.13.5 */ | 1277 | /* see sw-dev-man vol 3, chapter 7.4.13.5 */ |
1278 | printk(KERN_INFO "spurious APIC interrupt on CPU#%d, " | 1278 | printk(KERN_INFO "spurious APIC interrupt on CPU#%d, " |
1279 | "should never happen.\n", smp_processor_id()); | 1279 | "should never happen.\n", smp_processor_id()); |
1280 | __get_cpu_var(irq_stat).irq_spurious_count++; | ||
1280 | irq_exit(); | 1281 | irq_exit(); |
1281 | } | 1282 | } |
1282 | 1283 | ||
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 09b82093bc75..f47bc493dba9 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c | |||
@@ -974,15 +974,12 @@ void __init setup_boot_APIC_clock (void) | |||
974 | */ | 974 | */ |
975 | void __cpuinit check_boot_apic_timer_broadcast(void) | 975 | void __cpuinit check_boot_apic_timer_broadcast(void) |
976 | { | 976 | { |
977 | struct clock_event_device *levt = &per_cpu(lapic_events, boot_cpu_id); | ||
978 | |||
979 | if (!disable_apic_timer || | 977 | if (!disable_apic_timer || |
980 | (lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY)) | 978 | (lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY)) |
981 | return; | 979 | return; |
982 | 980 | ||
983 | printk(KERN_INFO "AMD C1E detected late. Force timer broadcast.\n"); | 981 | printk(KERN_INFO "AMD C1E detected late. Force timer broadcast.\n"); |
984 | lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY; | 982 | lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY; |
985 | levt->features |= CLOCK_EVT_FEAT_DUMMY; | ||
986 | 983 | ||
987 | local_irq_enable(); | 984 | local_irq_enable(); |
988 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id); | 985 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id); |
@@ -1143,6 +1140,7 @@ asmlinkage void smp_spurious_interrupt(void) | |||
1143 | if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) | 1140 | if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) |
1144 | ack_APIC_irq(); | 1141 | ack_APIC_irq(); |
1145 | 1142 | ||
1143 | add_pda(irq_spurious_count, 1); | ||
1146 | irq_exit(); | 1144 | irq_exit(); |
1147 | } | 1145 | } |
1148 | 1146 | ||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index dcf6bbb1c7c0..5f8af875f457 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <asm/io.h> | 4 | #include <asm/io.h> |
5 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
6 | #include <asm/apic.h> | 6 | #include <asm/apic.h> |
7 | #include <asm/mach_apic.h> | ||
7 | 8 | ||
8 | #include "cpu.h" | 9 | #include "cpu.h" |
9 | 10 | ||
@@ -45,13 +46,17 @@ static __cpuinit int amd_apic_timer_broken(void) | |||
45 | case CPUID_XFAM_10H: | 46 | case CPUID_XFAM_10H: |
46 | case CPUID_XFAM_11H: | 47 | case CPUID_XFAM_11H: |
47 | rdmsr(MSR_K8_ENABLE_C1E, lo, hi); | 48 | rdmsr(MSR_K8_ENABLE_C1E, lo, hi); |
48 | if (lo & ENABLE_C1E_MASK) | 49 | if (lo & ENABLE_C1E_MASK) { |
50 | if (smp_processor_id() != boot_cpu_physical_apicid) | ||
51 | printk(KERN_INFO "AMD C1E detected late. " | ||
52 | " Force timer broadcast.\n"); | ||
49 | return 1; | 53 | return 1; |
50 | break; | 54 | } |
51 | default: | 55 | break; |
52 | /* err on the side of caution */ | 56 | default: |
57 | /* err on the side of caution */ | ||
53 | return 1; | 58 | return 1; |
54 | } | 59 | } |
55 | return 0; | 60 | return 0; |
56 | } | 61 | } |
57 | #endif | 62 | #endif |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index 7decd6a50ffa..f3686a5f2308 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c | |||
@@ -565,7 +565,7 @@ static unsigned int powernow_get(unsigned int cpu) | |||
565 | } | 565 | } |
566 | 566 | ||
567 | 567 | ||
568 | static int __init acer_cpufreq_pst(struct dmi_system_id *d) | 568 | static int __init acer_cpufreq_pst(const struct dmi_system_id *d) |
569 | { | 569 | { |
570 | printk(KERN_WARNING "%s laptop with broken PST tables in BIOS detected.\n", d->ident); | 570 | printk(KERN_WARNING "%s laptop with broken PST tables in BIOS detected.\n", d->ident); |
571 | printk(KERN_WARNING "You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n"); | 571 | printk(KERN_WARNING "You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n"); |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index dc4e08147b1f..cc8c501b9f39 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | 9 | ||
10 | #include <asm/processor.h> | 10 | #include <asm/processor.h> |
11 | #include <asm/pgtable.h> | ||
11 | #include <asm/msr.h> | 12 | #include <asm/msr.h> |
12 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
13 | 14 | ||
@@ -19,8 +20,6 @@ | |||
19 | #include <mach_apic.h> | 20 | #include <mach_apic.h> |
20 | #endif | 21 | #endif |
21 | 22 | ||
22 | extern int trap_init_f00f_bug(void); | ||
23 | |||
24 | #ifdef CONFIG_X86_INTEL_USERCOPY | 23 | #ifdef CONFIG_X86_INTEL_USERCOPY |
25 | /* | 24 | /* |
26 | * Alignment at which movsl is preferred for bulk memory copies. | 25 | * Alignment at which movsl is preferred for bulk memory copies. |
@@ -95,6 +94,20 @@ static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) | |||
95 | return 1; | 94 | return 1; |
96 | } | 95 | } |
97 | 96 | ||
97 | #ifdef CONFIG_X86_F00F_BUG | ||
98 | static void __cpuinit trap_init_f00f_bug(void) | ||
99 | { | ||
100 | __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); | ||
101 | |||
102 | /* | ||
103 | * Update the IDT descriptor and reload the IDT so that | ||
104 | * it uses the read-only mapped virtual address. | ||
105 | */ | ||
106 | idt_descr.address = fix_to_virt(FIX_F00F_IDT); | ||
107 | load_idt(&idt_descr); | ||
108 | } | ||
109 | #endif | ||
110 | |||
98 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) | 111 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) |
99 | { | 112 | { |
100 | unsigned int l2 = 0; | 113 | unsigned int l2 = 0; |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index db6c25aa5776..1826395ebeeb 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -170,15 +170,15 @@ union l3_cache { | |||
170 | unsigned val; | 170 | unsigned val; |
171 | }; | 171 | }; |
172 | 172 | ||
173 | static const unsigned short assocs[] = { | 173 | static unsigned short assocs[] __cpuinitdata = { |
174 | [1] = 1, [2] = 2, [4] = 4, [6] = 8, | 174 | [1] = 1, [2] = 2, [4] = 4, [6] = 8, |
175 | [8] = 16, [0xa] = 32, [0xb] = 48, | 175 | [8] = 16, [0xa] = 32, [0xb] = 48, |
176 | [0xc] = 64, | 176 | [0xc] = 64, |
177 | [0xf] = 0xffff // ?? | 177 | [0xf] = 0xffff // ?? |
178 | }; | 178 | }; |
179 | 179 | ||
180 | static const unsigned char levels[] = { 1, 1, 2, 3 }; | 180 | static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; |
181 | static const unsigned char types[] = { 1, 2, 3, 3 }; | 181 | static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; |
182 | 182 | ||
183 | static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | 183 | static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, |
184 | union _cpuid4_leaf_ebx *ebx, | 184 | union _cpuid4_leaf_ebx *ebx, |
@@ -493,8 +493,8 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
493 | } | 493 | } |
494 | } | 494 | } |
495 | #else | 495 | #else |
496 | static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {} | 496 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {} |
497 | static void __init cache_remove_shared_cpu_map(unsigned int cpu, int index) {} | 497 | static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {} |
498 | #endif | 498 | #endif |
499 | 499 | ||
500 | static void free_cache_attributes(unsigned int cpu) | 500 | static void free_cache_attributes(unsigned int cpu) |
@@ -794,8 +794,9 @@ static int __cpuinit cache_sysfs_init(void) | |||
794 | register_hotcpu_notifier(&cacheinfo_cpu_notifier); | 794 | register_hotcpu_notifier(&cacheinfo_cpu_notifier); |
795 | 795 | ||
796 | for_each_online_cpu(i) { | 796 | for_each_online_cpu(i) { |
797 | cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE, | 797 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i); |
798 | (void *)(long)i); | 798 | |
799 | cache_add_dev(sys_dev); | ||
799 | } | 800 | } |
800 | 801 | ||
801 | return 0; | 802 | return 0; |
diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c index 1509edfb2313..be4dabfee1f5 100644 --- a/arch/x86/kernel/cpu/mcheck/p4.c +++ b/arch/x86/kernel/cpu/mcheck/p4.c | |||
@@ -61,6 +61,7 @@ fastcall void smp_thermal_interrupt(struct pt_regs *regs) | |||
61 | { | 61 | { |
62 | irq_enter(); | 62 | irq_enter(); |
63 | vendor_thermal_interrupt(regs); | 63 | vendor_thermal_interrupt(regs); |
64 | __get_cpu_var(irq_stat).irq_thermal_count++; | ||
64 | irq_exit(); | 65 | irq_exit(); |
65 | } | 66 | } |
66 | 67 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 1203dc5ab87a..494d320d909b 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -152,7 +152,7 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb, | |||
152 | return NOTIFY_OK; | 152 | return NOTIFY_OK; |
153 | } | 153 | } |
154 | 154 | ||
155 | static struct notifier_block thermal_throttle_cpu_notifier = | 155 | static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata = |
156 | { | 156 | { |
157 | .notifier_call = thermal_throttle_cpu_callback, | 157 | .notifier_call = thermal_throttle_cpu_callback, |
158 | }; | 158 | }; |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index c48b6fea5ab4..5e4be30ff903 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -738,13 +738,7 @@ void mtrr_ap_init(void) | |||
738 | */ | 738 | */ |
739 | void mtrr_save_state(void) | 739 | void mtrr_save_state(void) |
740 | { | 740 | { |
741 | int cpu = get_cpu(); | 741 | smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); |
742 | |||
743 | if (cpu == 0) | ||
744 | mtrr_save_fixed_ranges(NULL); | ||
745 | else | ||
746 | smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); | ||
747 | put_cpu(); | ||
748 | } | 742 | } |
749 | 743 | ||
750 | static int __init mtrr_init_finialize(void) | 744 | static int __init mtrr_init_finialize(void) |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 93fecd4b03de..54cdbf1a40f1 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -34,7 +34,7 @@ struct wd_ops { | |||
34 | u64 checkbit; | 34 | u64 checkbit; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | static struct wd_ops *wd_ops; | 37 | static const struct wd_ops *wd_ops; |
38 | 38 | ||
39 | /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's | 39 | /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's |
40 | * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) | 40 | * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) |
@@ -317,7 +317,7 @@ static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | |||
317 | write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); | 317 | write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); |
318 | } | 318 | } |
319 | 319 | ||
320 | static struct wd_ops k7_wd_ops = { | 320 | static const struct wd_ops k7_wd_ops = { |
321 | .reserve = single_msr_reserve, | 321 | .reserve = single_msr_reserve, |
322 | .unreserve = single_msr_unreserve, | 322 | .unreserve = single_msr_unreserve, |
323 | .setup = setup_k7_watchdog, | 323 | .setup = setup_k7_watchdog, |
@@ -380,7 +380,7 @@ static void p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | |||
380 | write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz); | 380 | write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz); |
381 | } | 381 | } |
382 | 382 | ||
383 | static struct wd_ops p6_wd_ops = { | 383 | static const struct wd_ops p6_wd_ops = { |
384 | .reserve = single_msr_reserve, | 384 | .reserve = single_msr_reserve, |
385 | .unreserve = single_msr_unreserve, | 385 | .unreserve = single_msr_unreserve, |
386 | .setup = setup_p6_watchdog, | 386 | .setup = setup_p6_watchdog, |
@@ -532,7 +532,7 @@ static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | |||
532 | write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); | 532 | write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); |
533 | } | 533 | } |
534 | 534 | ||
535 | static struct wd_ops p4_wd_ops = { | 535 | static const struct wd_ops p4_wd_ops = { |
536 | .reserve = p4_reserve, | 536 | .reserve = p4_reserve, |
537 | .unreserve = p4_unreserve, | 537 | .unreserve = p4_unreserve, |
538 | .setup = setup_p4_watchdog, | 538 | .setup = setup_p4_watchdog, |
@@ -550,6 +550,8 @@ static struct wd_ops p4_wd_ops = { | |||
550 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL | 550 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL |
551 | #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK | 551 | #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK |
552 | 552 | ||
553 | static struct wd_ops intel_arch_wd_ops; | ||
554 | |||
553 | static int setup_intel_arch_watchdog(unsigned nmi_hz) | 555 | static int setup_intel_arch_watchdog(unsigned nmi_hz) |
554 | { | 556 | { |
555 | unsigned int ebx; | 557 | unsigned int ebx; |
@@ -591,11 +593,11 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz) | |||
591 | wd->perfctr_msr = perfctr_msr; | 593 | wd->perfctr_msr = perfctr_msr; |
592 | wd->evntsel_msr = evntsel_msr; | 594 | wd->evntsel_msr = evntsel_msr; |
593 | wd->cccr_msr = 0; //unused | 595 | wd->cccr_msr = 0; //unused |
594 | wd_ops->checkbit = 1ULL << (eax.split.bit_width - 1); | 596 | intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); |
595 | return 1; | 597 | return 1; |
596 | } | 598 | } |
597 | 599 | ||
598 | static struct wd_ops intel_arch_wd_ops = { | 600 | static struct wd_ops intel_arch_wd_ops __read_mostly = { |
599 | .reserve = single_msr_reserve, | 601 | .reserve = single_msr_reserve, |
600 | .unreserve = single_msr_unreserve, | 602 | .unreserve = single_msr_unreserve, |
601 | .setup = setup_intel_arch_watchdog, | 603 | .setup = setup_intel_arch_watchdog, |
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index f4548c93ccf5..70dcf912d9fb 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -43,8 +43,6 @@ | |||
43 | 43 | ||
44 | static struct class *cpuid_class; | 44 | static struct class *cpuid_class; |
45 | 45 | ||
46 | #ifdef CONFIG_SMP | ||
47 | |||
48 | struct cpuid_command { | 46 | struct cpuid_command { |
49 | u32 reg; | 47 | u32 reg; |
50 | u32 *data; | 48 | u32 *data; |
@@ -62,25 +60,11 @@ static inline void do_cpuid(int cpu, u32 reg, u32 * data) | |||
62 | { | 60 | { |
63 | struct cpuid_command cmd; | 61 | struct cpuid_command cmd; |
64 | 62 | ||
65 | preempt_disable(); | 63 | cmd.reg = reg; |
66 | if (cpu == smp_processor_id()) { | 64 | cmd.data = data; |
67 | cpuid(reg, &data[0], &data[1], &data[2], &data[3]); | ||
68 | } else { | ||
69 | cmd.reg = reg; | ||
70 | cmd.data = data; | ||
71 | 65 | ||
72 | smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); | 66 | smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); |
73 | } | ||
74 | preempt_enable(); | ||
75 | } | 67 | } |
76 | #else /* ! CONFIG_SMP */ | ||
77 | |||
78 | static inline void do_cpuid(int cpu, u32 reg, u32 * data) | ||
79 | { | ||
80 | cpuid(reg, &data[0], &data[1], &data[2], &data[3]); | ||
81 | } | ||
82 | |||
83 | #endif /* ! CONFIG_SMP */ | ||
84 | 68 | ||
85 | static loff_t cpuid_seek(struct file *file, loff_t offset, int orig) | 69 | static loff_t cpuid_seek(struct file *file, loff_t offset, int orig) |
86 | { | 70 | { |
@@ -150,7 +134,7 @@ static const struct file_operations cpuid_fops = { | |||
150 | .open = cpuid_open, | 134 | .open = cpuid_open, |
151 | }; | 135 | }; |
152 | 136 | ||
153 | static int cpuid_device_create(int i) | 137 | static int __cpuinit cpuid_device_create(int i) |
154 | { | 138 | { |
155 | int err = 0; | 139 | int err = 0; |
156 | struct device *dev; | 140 | struct device *dev; |
@@ -161,7 +145,9 @@ static int cpuid_device_create(int i) | |||
161 | return err; | 145 | return err; |
162 | } | 146 | } |
163 | 147 | ||
164 | static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | 148 | static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, |
149 | unsigned long action, | ||
150 | void *hcpu) | ||
165 | { | 151 | { |
166 | unsigned int cpu = (unsigned long)hcpu; | 152 | unsigned int cpu = (unsigned long)hcpu; |
167 | 153 | ||
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index f1cacd4897f7..3a058bb16409 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -988,7 +988,7 @@ child_rip: | |||
988 | movq %rsi, %rdi | 988 | movq %rsi, %rdi |
989 | call *%rax | 989 | call *%rax |
990 | # exit | 990 | # exit |
991 | xorl %edi, %edi | 991 | mov %eax, %edi |
992 | call do_exit | 992 | call do_exit |
993 | CFI_ENDPROC | 993 | CFI_ENDPROC |
994 | ENDPROC(child_rip) | 994 | ENDPROC(child_rip) |
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index 47496a40e84f..4ae03e3e8294 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c | |||
@@ -29,8 +29,6 @@ u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly | |||
29 | = { [0 ... NR_CPUS-1] = BAD_APICID }; | 29 | = { [0 ... NR_CPUS-1] = BAD_APICID }; |
30 | EXPORT_SYMBOL(x86_cpu_to_apicid); | 30 | EXPORT_SYMBOL(x86_cpu_to_apicid); |
31 | 31 | ||
32 | u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; | ||
33 | |||
34 | struct genapic __read_mostly *genapic = &apic_flat; | 32 | struct genapic __read_mostly *genapic = &apic_flat; |
35 | 33 | ||
36 | /* | 34 | /* |
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index ecb01eefdd27..91c7526768ee 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c | |||
@@ -52,7 +52,6 @@ static void flat_init_apic_ldr(void) | |||
52 | 52 | ||
53 | num = smp_processor_id(); | 53 | num = smp_processor_id(); |
54 | id = 1UL << num; | 54 | id = 1UL << num; |
55 | x86_cpu_to_log_apicid[num] = id; | ||
56 | apic_write(APIC_DFR, APIC_DFR_FLAT); | 55 | apic_write(APIC_DFR, APIC_DFR_FLAT); |
57 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; | 56 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; |
58 | val |= SET_APIC_LOGICAL_ID(id); | 57 | val |= SET_APIC_LOGICAL_ID(id); |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 9150ca9b5f80..39677965e161 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -51,6 +51,15 @@ | |||
51 | */ | 51 | */ |
52 | LOW_PAGES = 1<<(32-PAGE_SHIFT_asm) | 52 | LOW_PAGES = 1<<(32-PAGE_SHIFT_asm) |
53 | 53 | ||
54 | /* | ||
55 | * To preserve the DMA pool in PAGEALLOC kernels, we'll allocate | ||
56 | * pagetables from above the 16MB DMA limit, so we'll have to set | ||
57 | * up pagetables 16MB more (worst-case): | ||
58 | */ | ||
59 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
60 | LOW_PAGES = LOW_PAGES + 0x1000000 | ||
61 | #endif | ||
62 | |||
54 | #if PTRS_PER_PMD > 1 | 63 | #if PTRS_PER_PMD > 1 |
55 | PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PMD) + PTRS_PER_PGD | 64 | PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PMD) + PTRS_PER_PGD |
56 | #else | 65 | #else |
@@ -443,6 +452,7 @@ early_page_fault: | |||
443 | early_fault: | 452 | early_fault: |
444 | cld | 453 | cld |
445 | #ifdef CONFIG_PRINTK | 454 | #ifdef CONFIG_PRINTK |
455 | pusha | ||
446 | movl $(__KERNEL_DS),%eax | 456 | movl $(__KERNEL_DS),%eax |
447 | movl %eax,%ds | 457 | movl %eax,%ds |
448 | movl %eax,%es | 458 | movl %eax,%es |
@@ -534,8 +544,15 @@ int_msg: | |||
534 | .asciz "Unknown interrupt or fault at EIP %p %p %p\n" | 544 | .asciz "Unknown interrupt or fault at EIP %p %p %p\n" |
535 | 545 | ||
536 | fault_msg: | 546 | fault_msg: |
537 | .ascii "Int %d: CR2 %p err %p EIP %p CS %p flags %p\n" | 547 | .ascii \ |
538 | .asciz "Stack: %p %p %p %p %p %p %p %p\n" | 548 | /* fault info: */ "BUG: Int %d: CR2 %p\n" \ |
549 | /* pusha regs: */ " EDI %p ESI %p EBP %p ESP %p\n" \ | ||
550 | " EBX %p EDX %p ECX %p EAX %p\n" \ | ||
551 | /* fault frame: */ " err %p EIP %p CS %p flg %p\n" \ | ||
552 | \ | ||
553 | "Stack: %p %p %p %p %p %p %p %p\n" \ | ||
554 | " %p %p %p %p %p %p %p %p\n" \ | ||
555 | " %p %p %p %p %p %p %p %p\n" | ||
539 | 556 | ||
540 | #include "../../x86/xen/xen-head.S" | 557 | #include "../../x86/xen/xen-head.S" |
541 | 558 | ||
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c index e3d4b73bfdb0..edd39ccf139e 100644 --- a/arch/x86/kernel/i386_ksyms_32.c +++ b/arch/x86/kernel/i386_ksyms_32.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | #include <asm/semaphore.h> | ||
2 | #include <asm/checksum.h> | 3 | #include <asm/checksum.h> |
3 | #include <asm/desc.h> | 4 | #include <asm/desc.h> |
4 | 5 | ||
diff --git a/arch/x86/kernel/i8259_32.c b/arch/x86/kernel/i8259_32.c index 679bb33acbf1..d34a10cc13a7 100644 --- a/arch/x86/kernel/i8259_32.c +++ b/arch/x86/kernel/i8259_32.c | |||
@@ -349,7 +349,11 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id) | |||
349 | * New motherboards sometimes make IRQ 13 be a PCI interrupt, | 349 | * New motherboards sometimes make IRQ 13 be a PCI interrupt, |
350 | * so allow interrupt sharing. | 350 | * so allow interrupt sharing. |
351 | */ | 351 | */ |
352 | static struct irqaction fpu_irq = { math_error_irq, 0, CPU_MASK_NONE, "fpu", NULL, NULL }; | 352 | static struct irqaction fpu_irq = { |
353 | .handler = math_error_irq, | ||
354 | .mask = CPU_MASK_NONE, | ||
355 | .name = "fpu", | ||
356 | }; | ||
353 | 357 | ||
354 | void __init init_ISA_irqs (void) | 358 | void __init init_ISA_irqs (void) |
355 | { | 359 | { |
diff --git a/arch/x86/kernel/i8259_64.c b/arch/x86/kernel/i8259_64.c index eb72976cc13c..3f27ea0b9816 100644 --- a/arch/x86/kernel/i8259_64.c +++ b/arch/x86/kernel/i8259_64.c | |||
@@ -395,7 +395,11 @@ device_initcall(i8259A_init_sysfs); | |||
395 | * IRQ2 is cascade interrupt to second interrupt controller | 395 | * IRQ2 is cascade interrupt to second interrupt controller |
396 | */ | 396 | */ |
397 | 397 | ||
398 | static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL}; | 398 | static struct irqaction irq2 = { |
399 | .handler = no_action, | ||
400 | .mask = CPU_MASK_NONE, | ||
401 | .name = "cascade", | ||
402 | }; | ||
399 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { | 403 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { |
400 | [0 ... IRQ0_VECTOR - 1] = -1, | 404 | [0 ... IRQ0_VECTOR - 1] = -1, |
401 | [IRQ0_VECTOR] = 0, | 405 | [IRQ0_VECTOR] = 0, |
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 4ee1e5ee9b57..5f10c7189534 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c | |||
@@ -1296,6 +1296,11 @@ static void __init setup_IO_APIC_irqs(void) | |||
1296 | continue; | 1296 | continue; |
1297 | } | 1297 | } |
1298 | 1298 | ||
1299 | if (!first_notcon) { | ||
1300 | apic_printk(APIC_VERBOSE, " not connected.\n"); | ||
1301 | first_notcon = 1; | ||
1302 | } | ||
1303 | |||
1299 | entry.trigger = irq_trigger(idx); | 1304 | entry.trigger = irq_trigger(idx); |
1300 | entry.polarity = irq_polarity(idx); | 1305 | entry.polarity = irq_polarity(idx); |
1301 | 1306 | ||
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 966fa1062491..1c2c7bf6a9d3 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c | |||
@@ -875,6 +875,10 @@ static void __init setup_IO_APIC_irqs(void) | |||
875 | apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin); | 875 | apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin); |
876 | continue; | 876 | continue; |
877 | } | 877 | } |
878 | if (!first_notcon) { | ||
879 | apic_printk(APIC_VERBOSE, " not connected.\n"); | ||
880 | first_notcon = 1; | ||
881 | } | ||
878 | 882 | ||
879 | irq = pin_2_irq(idx, apic, pin); | 883 | irq = pin_2_irq(idx, apic, pin); |
880 | add_pin_to_irq(irq, apic, pin); | 884 | add_pin_to_irq(irq, apic, pin); |
@@ -885,7 +889,7 @@ static void __init setup_IO_APIC_irqs(void) | |||
885 | } | 889 | } |
886 | 890 | ||
887 | if (!first_notcon) | 891 | if (!first_notcon) |
888 | apic_printk(APIC_VERBOSE," not connected.\n"); | 892 | apic_printk(APIC_VERBOSE, " not connected.\n"); |
889 | } | 893 | } |
890 | 894 | ||
891 | /* | 895 | /* |
@@ -1845,7 +1849,7 @@ static struct sysdev_class ioapic_sysdev_class = { | |||
1845 | static int __init ioapic_init_sysfs(void) | 1849 | static int __init ioapic_init_sysfs(void) |
1846 | { | 1850 | { |
1847 | struct sys_device * dev; | 1851 | struct sys_device * dev; |
1848 | int i, size, error = 0; | 1852 | int i, size, error; |
1849 | 1853 | ||
1850 | error = sysdev_class_register(&ioapic_sysdev_class); | 1854 | error = sysdev_class_register(&ioapic_sysdev_class); |
1851 | if (error) | 1855 | if (error) |
@@ -1854,12 +1858,11 @@ static int __init ioapic_init_sysfs(void) | |||
1854 | for (i = 0; i < nr_ioapics; i++ ) { | 1858 | for (i = 0; i < nr_ioapics; i++ ) { |
1855 | size = sizeof(struct sys_device) + nr_ioapic_registers[i] | 1859 | size = sizeof(struct sys_device) + nr_ioapic_registers[i] |
1856 | * sizeof(struct IO_APIC_route_entry); | 1860 | * sizeof(struct IO_APIC_route_entry); |
1857 | mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL); | 1861 | mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL); |
1858 | if (!mp_ioapic_data[i]) { | 1862 | if (!mp_ioapic_data[i]) { |
1859 | printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); | 1863 | printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); |
1860 | continue; | 1864 | continue; |
1861 | } | 1865 | } |
1862 | memset(mp_ioapic_data[i], 0, size); | ||
1863 | dev = &mp_ioapic_data[i]->dev; | 1866 | dev = &mp_ioapic_data[i]->dev; |
1864 | dev->id = i; | 1867 | dev->id = i; |
1865 | dev->cls = &ioapic_sysdev_class; | 1868 | dev->cls = &ioapic_sysdev_class; |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index e173b763f148..d3fde94f7345 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -255,9 +255,17 @@ int show_interrupts(struct seq_file *p, void *v) | |||
255 | } | 255 | } |
256 | 256 | ||
257 | if (i < NR_IRQS) { | 257 | if (i < NR_IRQS) { |
258 | unsigned any_count = 0; | ||
259 | |||
258 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 260 | spin_lock_irqsave(&irq_desc[i].lock, flags); |
261 | #ifndef CONFIG_SMP | ||
262 | any_count = kstat_irqs(i); | ||
263 | #else | ||
264 | for_each_online_cpu(j) | ||
265 | any_count |= kstat_cpu(j).irqs[i]; | ||
266 | #endif | ||
259 | action = irq_desc[i].action; | 267 | action = irq_desc[i].action; |
260 | if (!action) | 268 | if (!action && !any_count) |
261 | goto skip; | 269 | goto skip; |
262 | seq_printf(p, "%3d: ",i); | 270 | seq_printf(p, "%3d: ",i); |
263 | #ifndef CONFIG_SMP | 271 | #ifndef CONFIG_SMP |
@@ -268,10 +276,12 @@ int show_interrupts(struct seq_file *p, void *v) | |||
268 | #endif | 276 | #endif |
269 | seq_printf(p, " %8s", irq_desc[i].chip->name); | 277 | seq_printf(p, " %8s", irq_desc[i].chip->name); |
270 | seq_printf(p, "-%-8s", irq_desc[i].name); | 278 | seq_printf(p, "-%-8s", irq_desc[i].name); |
271 | seq_printf(p, " %s", action->name); | ||
272 | 279 | ||
273 | for (action=action->next; action; action = action->next) | 280 | if (action) { |
274 | seq_printf(p, ", %s", action->name); | 281 | seq_printf(p, " %s", action->name); |
282 | while ((action = action->next) != NULL) | ||
283 | seq_printf(p, ", %s", action->name); | ||
284 | } | ||
275 | 285 | ||
276 | seq_putc(p, '\n'); | 286 | seq_putc(p, '\n'); |
277 | skip: | 287 | skip: |
@@ -280,14 +290,41 @@ skip: | |||
280 | seq_printf(p, "NMI: "); | 290 | seq_printf(p, "NMI: "); |
281 | for_each_online_cpu(j) | 291 | for_each_online_cpu(j) |
282 | seq_printf(p, "%10u ", nmi_count(j)); | 292 | seq_printf(p, "%10u ", nmi_count(j)); |
283 | seq_putc(p, '\n'); | 293 | seq_printf(p, " Non-maskable interrupts\n"); |
284 | #ifdef CONFIG_X86_LOCAL_APIC | 294 | #ifdef CONFIG_X86_LOCAL_APIC |
285 | seq_printf(p, "LOC: "); | 295 | seq_printf(p, "LOC: "); |
286 | for_each_online_cpu(j) | 296 | for_each_online_cpu(j) |
287 | seq_printf(p, "%10u ", | 297 | seq_printf(p, "%10u ", |
288 | per_cpu(irq_stat,j).apic_timer_irqs); | 298 | per_cpu(irq_stat,j).apic_timer_irqs); |
289 | seq_putc(p, '\n'); | 299 | seq_printf(p, " Local timer interrupts\n"); |
290 | #endif | 300 | #endif |
301 | #ifdef CONFIG_SMP | ||
302 | seq_printf(p, "RES: "); | ||
303 | for_each_online_cpu(j) | ||
304 | seq_printf(p, "%10u ", | ||
305 | per_cpu(irq_stat,j).irq_resched_count); | ||
306 | seq_printf(p, " Rescheduling interrupts\n"); | ||
307 | seq_printf(p, "CAL: "); | ||
308 | for_each_online_cpu(j) | ||
309 | seq_printf(p, "%10u ", | ||
310 | per_cpu(irq_stat,j).irq_call_count); | ||
311 | seq_printf(p, " function call interrupts\n"); | ||
312 | seq_printf(p, "TLB: "); | ||
313 | for_each_online_cpu(j) | ||
314 | seq_printf(p, "%10u ", | ||
315 | per_cpu(irq_stat,j).irq_tlb_count); | ||
316 | seq_printf(p, " TLB shootdowns\n"); | ||
317 | #endif | ||
318 | seq_printf(p, "TRM: "); | ||
319 | for_each_online_cpu(j) | ||
320 | seq_printf(p, "%10u ", | ||
321 | per_cpu(irq_stat,j).irq_thermal_count); | ||
322 | seq_printf(p, " Thermal event interrupts\n"); | ||
323 | seq_printf(p, "SPU: "); | ||
324 | for_each_online_cpu(j) | ||
325 | seq_printf(p, "%10u ", | ||
326 | per_cpu(irq_stat,j).irq_spurious_count); | ||
327 | seq_printf(p, " Spurious interrupts\n"); | ||
291 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 328 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
292 | #if defined(CONFIG_X86_IO_APIC) | 329 | #if defined(CONFIG_X86_IO_APIC) |
293 | seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count)); | 330 | seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count)); |
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 865669efc540..6b5c730d67b9 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -62,9 +62,17 @@ int show_interrupts(struct seq_file *p, void *v) | |||
62 | } | 62 | } |
63 | 63 | ||
64 | if (i < NR_IRQS) { | 64 | if (i < NR_IRQS) { |
65 | unsigned any_count = 0; | ||
66 | |||
65 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 67 | spin_lock_irqsave(&irq_desc[i].lock, flags); |
68 | #ifndef CONFIG_SMP | ||
69 | any_count = kstat_irqs(i); | ||
70 | #else | ||
71 | for_each_online_cpu(j) | ||
72 | any_count |= kstat_cpu(j).irqs[i]; | ||
73 | #endif | ||
66 | action = irq_desc[i].action; | 74 | action = irq_desc[i].action; |
67 | if (!action) | 75 | if (!action && !any_count) |
68 | goto skip; | 76 | goto skip; |
69 | seq_printf(p, "%3d: ",i); | 77 | seq_printf(p, "%3d: ",i); |
70 | #ifndef CONFIG_SMP | 78 | #ifndef CONFIG_SMP |
@@ -76,9 +84,11 @@ int show_interrupts(struct seq_file *p, void *v) | |||
76 | seq_printf(p, " %8s", irq_desc[i].chip->name); | 84 | seq_printf(p, " %8s", irq_desc[i].chip->name); |
77 | seq_printf(p, "-%-8s", irq_desc[i].name); | 85 | seq_printf(p, "-%-8s", irq_desc[i].name); |
78 | 86 | ||
79 | seq_printf(p, " %s", action->name); | 87 | if (action) { |
80 | for (action=action->next; action; action = action->next) | 88 | seq_printf(p, " %s", action->name); |
81 | seq_printf(p, ", %s", action->name); | 89 | while ((action = action->next) != NULL) |
90 | seq_printf(p, ", %s", action->name); | ||
91 | } | ||
82 | seq_putc(p, '\n'); | 92 | seq_putc(p, '\n'); |
83 | skip: | 93 | skip: |
84 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 94 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
@@ -86,11 +96,37 @@ skip: | |||
86 | seq_printf(p, "NMI: "); | 96 | seq_printf(p, "NMI: "); |
87 | for_each_online_cpu(j) | 97 | for_each_online_cpu(j) |
88 | seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); | 98 | seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); |
89 | seq_putc(p, '\n'); | 99 | seq_printf(p, " Non-maskable interrupts\n"); |
90 | seq_printf(p, "LOC: "); | 100 | seq_printf(p, "LOC: "); |
91 | for_each_online_cpu(j) | 101 | for_each_online_cpu(j) |
92 | seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs); | 102 | seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs); |
93 | seq_putc(p, '\n'); | 103 | seq_printf(p, " Local timer interrupts\n"); |
104 | #ifdef CONFIG_SMP | ||
105 | seq_printf(p, "RES: "); | ||
106 | for_each_online_cpu(j) | ||
107 | seq_printf(p, "%10u ", cpu_pda(j)->irq_resched_count); | ||
108 | seq_printf(p, " Rescheduling interrupts\n"); | ||
109 | seq_printf(p, "CAL: "); | ||
110 | for_each_online_cpu(j) | ||
111 | seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count); | ||
112 | seq_printf(p, " function call interrupts\n"); | ||
113 | seq_printf(p, "TLB: "); | ||
114 | for_each_online_cpu(j) | ||
115 | seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count); | ||
116 | seq_printf(p, " TLB shootdowns\n"); | ||
117 | #endif | ||
118 | seq_printf(p, "TRM: "); | ||
119 | for_each_online_cpu(j) | ||
120 | seq_printf(p, "%10u ", cpu_pda(j)->irq_thermal_count); | ||
121 | seq_printf(p, " Thermal event interrupts\n"); | ||
122 | seq_printf(p, "THR: "); | ||
123 | for_each_online_cpu(j) | ||
124 | seq_printf(p, "%10u ", cpu_pda(j)->irq_threshold_count); | ||
125 | seq_printf(p, " Threshold APIC interrupts\n"); | ||
126 | seq_printf(p, "SPU: "); | ||
127 | for_each_online_cpu(j) | ||
128 | seq_printf(p, "%10u ", cpu_pda(j)->irq_spurious_count); | ||
129 | seq_printf(p, " Spurious interrupts\n"); | ||
94 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 130 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
95 | } | 131 | } |
96 | return 0; | 132 | return 0; |
diff --git a/arch/x86/kernel/ldt_32.c b/arch/x86/kernel/ldt_32.c index a8b18421863a..9ff90a27c45f 100644 --- a/arch/x86/kernel/ldt_32.c +++ b/arch/x86/kernel/ldt_32.c | |||
@@ -92,13 +92,13 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
92 | struct mm_struct * old_mm; | 92 | struct mm_struct * old_mm; |
93 | int retval = 0; | 93 | int retval = 0; |
94 | 94 | ||
95 | init_MUTEX(&mm->context.sem); | 95 | mutex_init(&mm->context.lock); |
96 | mm->context.size = 0; | 96 | mm->context.size = 0; |
97 | old_mm = current->mm; | 97 | old_mm = current->mm; |
98 | if (old_mm && old_mm->context.size > 0) { | 98 | if (old_mm && old_mm->context.size > 0) { |
99 | down(&old_mm->context.sem); | 99 | mutex_lock(&old_mm->context.lock); |
100 | retval = copy_ldt(&mm->context, &old_mm->context); | 100 | retval = copy_ldt(&mm->context, &old_mm->context); |
101 | up(&old_mm->context.sem); | 101 | mutex_unlock(&old_mm->context.lock); |
102 | } | 102 | } |
103 | return retval; | 103 | return retval; |
104 | } | 104 | } |
@@ -130,7 +130,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount) | |||
130 | if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) | 130 | if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) |
131 | bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; | 131 | bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; |
132 | 132 | ||
133 | down(&mm->context.sem); | 133 | mutex_lock(&mm->context.lock); |
134 | size = mm->context.size*LDT_ENTRY_SIZE; | 134 | size = mm->context.size*LDT_ENTRY_SIZE; |
135 | if (size > bytecount) | 135 | if (size > bytecount) |
136 | size = bytecount; | 136 | size = bytecount; |
@@ -138,7 +138,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount) | |||
138 | err = 0; | 138 | err = 0; |
139 | if (copy_to_user(ptr, mm->context.ldt, size)) | 139 | if (copy_to_user(ptr, mm->context.ldt, size)) |
140 | err = -EFAULT; | 140 | err = -EFAULT; |
141 | up(&mm->context.sem); | 141 | mutex_unlock(&mm->context.lock); |
142 | if (err < 0) | 142 | if (err < 0) |
143 | goto error_return; | 143 | goto error_return; |
144 | if (size != bytecount) { | 144 | if (size != bytecount) { |
@@ -194,7 +194,7 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode) | |||
194 | goto out; | 194 | goto out; |
195 | } | 195 | } |
196 | 196 | ||
197 | down(&mm->context.sem); | 197 | mutex_lock(&mm->context.lock); |
198 | if (ldt_info.entry_number >= mm->context.size) { | 198 | if (ldt_info.entry_number >= mm->context.size) { |
199 | error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1); | 199 | error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1); |
200 | if (error < 0) | 200 | if (error < 0) |
@@ -221,7 +221,7 @@ install: | |||
221 | error = 0; | 221 | error = 0; |
222 | 222 | ||
223 | out_unlock: | 223 | out_unlock: |
224 | up(&mm->context.sem); | 224 | mutex_unlock(&mm->context.lock); |
225 | out: | 225 | out: |
226 | return error; | 226 | return error; |
227 | } | 227 | } |
diff --git a/arch/x86/kernel/ldt_64.c b/arch/x86/kernel/ldt_64.c index 3796523d616a..60e57abb8e90 100644 --- a/arch/x86/kernel/ldt_64.c +++ b/arch/x86/kernel/ldt_64.c | |||
@@ -96,13 +96,13 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
96 | struct mm_struct * old_mm; | 96 | struct mm_struct * old_mm; |
97 | int retval = 0; | 97 | int retval = 0; |
98 | 98 | ||
99 | init_MUTEX(&mm->context.sem); | 99 | mutex_init(&mm->context.lock); |
100 | mm->context.size = 0; | 100 | mm->context.size = 0; |
101 | old_mm = current->mm; | 101 | old_mm = current->mm; |
102 | if (old_mm && old_mm->context.size > 0) { | 102 | if (old_mm && old_mm->context.size > 0) { |
103 | down(&old_mm->context.sem); | 103 | mutex_lock(&old_mm->context.lock); |
104 | retval = copy_ldt(&mm->context, &old_mm->context); | 104 | retval = copy_ldt(&mm->context, &old_mm->context); |
105 | up(&old_mm->context.sem); | 105 | mutex_unlock(&old_mm->context.lock); |
106 | } | 106 | } |
107 | return retval; | 107 | return retval; |
108 | } | 108 | } |
@@ -133,7 +133,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount) | |||
133 | if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) | 133 | if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) |
134 | bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; | 134 | bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; |
135 | 135 | ||
136 | down(&mm->context.sem); | 136 | mutex_lock(&mm->context.lock); |
137 | size = mm->context.size*LDT_ENTRY_SIZE; | 137 | size = mm->context.size*LDT_ENTRY_SIZE; |
138 | if (size > bytecount) | 138 | if (size > bytecount) |
139 | size = bytecount; | 139 | size = bytecount; |
@@ -141,7 +141,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount) | |||
141 | err = 0; | 141 | err = 0; |
142 | if (copy_to_user(ptr, mm->context.ldt, size)) | 142 | if (copy_to_user(ptr, mm->context.ldt, size)) |
143 | err = -EFAULT; | 143 | err = -EFAULT; |
144 | up(&mm->context.sem); | 144 | mutex_unlock(&mm->context.lock); |
145 | if (err < 0) | 145 | if (err < 0) |
146 | goto error_return; | 146 | goto error_return; |
147 | if (size != bytecount) { | 147 | if (size != bytecount) { |
@@ -193,7 +193,7 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode) | |||
193 | goto out; | 193 | goto out; |
194 | } | 194 | } |
195 | 195 | ||
196 | down(&mm->context.sem); | 196 | mutex_lock(&mm->context.lock); |
197 | if (ldt_info.entry_number >= (unsigned)mm->context.size) { | 197 | if (ldt_info.entry_number >= (unsigned)mm->context.size) { |
198 | error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1); | 198 | error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1); |
199 | if (error < 0) | 199 | if (error < 0) |
@@ -223,7 +223,7 @@ install: | |||
223 | error = 0; | 223 | error = 0; |
224 | 224 | ||
225 | out_unlock: | 225 | out_unlock: |
226 | up(&mm->context.sem); | 226 | mutex_unlock(&mm->context.lock); |
227 | out: | 227 | out: |
228 | return error; | 228 | return error; |
229 | } | 229 | } |
diff --git a/arch/x86/kernel/mce_64.c b/arch/x86/kernel/mce_64.c index 97d2b757d6bd..8ca8f8648969 100644 --- a/arch/x86/kernel/mce_64.c +++ b/arch/x86/kernel/mce_64.c | |||
@@ -695,8 +695,6 @@ static int __init mcheck_disable(char *str) | |||
695 | mce=nobootlog Don't log MCEs from before booting. */ | 695 | mce=nobootlog Don't log MCEs from before booting. */ |
696 | static int __init mcheck_enable(char *str) | 696 | static int __init mcheck_enable(char *str) |
697 | { | 697 | { |
698 | if (*str == '=') | ||
699 | str++; | ||
700 | if (!strcmp(str, "off")) | 698 | if (!strcmp(str, "off")) |
701 | mce_dont_init = 1; | 699 | mce_dont_init = 1; |
702 | else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog")) | 700 | else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog")) |
@@ -709,7 +707,7 @@ static int __init mcheck_enable(char *str) | |||
709 | } | 707 | } |
710 | 708 | ||
711 | __setup("nomce", mcheck_disable); | 709 | __setup("nomce", mcheck_disable); |
712 | __setup("mce", mcheck_enable); | 710 | __setup("mce=", mcheck_enable); |
713 | 711 | ||
714 | /* | 712 | /* |
715 | * Sysfs support | 713 | * Sysfs support |
diff --git a/arch/x86/kernel/mce_amd_64.c b/arch/x86/kernel/mce_amd_64.c index 805b62b1e0df..0d2afd96aca4 100644 --- a/arch/x86/kernel/mce_amd_64.c +++ b/arch/x86/kernel/mce_amd_64.c | |||
@@ -237,6 +237,7 @@ asmlinkage void mce_threshold_interrupt(void) | |||
237 | } | 237 | } |
238 | } | 238 | } |
239 | out: | 239 | out: |
240 | add_pda(irq_threshold_count, 1); | ||
240 | irq_exit(); | 241 | irq_exit(); |
241 | } | 242 | } |
242 | 243 | ||
diff --git a/arch/x86/kernel/mce_intel_64.c b/arch/x86/kernel/mce_intel_64.c index 6551505d8a2c..c17eaf5dd6dd 100644 --- a/arch/x86/kernel/mce_intel_64.c +++ b/arch/x86/kernel/mce_intel_64.c | |||
@@ -26,6 +26,7 @@ asmlinkage void smp_thermal_interrupt(void) | |||
26 | if (therm_throt_process(msr_val & 1)) | 26 | if (therm_throt_process(msr_val & 1)) |
27 | mce_log_therm_throt_event(smp_processor_id(), msr_val); | 27 | mce_log_therm_throt_event(smp_processor_id(), msr_val); |
28 | 28 | ||
29 | add_pda(irq_thermal_count, 1); | ||
29 | irq_exit(); | 30 | irq_exit(); |
30 | } | 31 | } |
31 | 32 | ||
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index c044de310b69..df85c9c13601 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -133,7 +133,7 @@ static const struct file_operations msr_fops = { | |||
133 | .open = msr_open, | 133 | .open = msr_open, |
134 | }; | 134 | }; |
135 | 135 | ||
136 | static int msr_device_create(int i) | 136 | static int __cpuinit msr_device_create(int i) |
137 | { | 137 | { |
138 | int err = 0; | 138 | int err = 0; |
139 | struct device *dev; | 139 | struct device *dev; |
@@ -144,7 +144,7 @@ static int msr_device_create(int i) | |||
144 | return err; | 144 | return err; |
145 | } | 145 | } |
146 | 146 | ||
147 | static int msr_class_cpu_callback(struct notifier_block *nfb, | 147 | static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb, |
148 | unsigned long action, void *hcpu) | 148 | unsigned long action, void *hcpu) |
149 | { | 149 | { |
150 | unsigned int cpu = (unsigned long)hcpu; | 150 | unsigned int cpu = (unsigned long)hcpu; |
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index a50b787b3bfa..5098f58063a5 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c | |||
@@ -222,10 +222,10 @@ static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen) | |||
222 | return npages; | 222 | return npages; |
223 | } | 223 | } |
224 | 224 | ||
225 | static inline int translate_phb(struct pci_dev* dev) | 225 | static inline int translation_enabled(struct iommu_table *tbl) |
226 | { | 226 | { |
227 | int disabled = bus_info[dev->bus->number].translation_disabled; | 227 | /* only PHBs with translation enabled have an IOMMU table */ |
228 | return !disabled; | 228 | return (tbl != NULL); |
229 | } | 229 | } |
230 | 230 | ||
231 | static void iommu_range_reserve(struct iommu_table *tbl, | 231 | static void iommu_range_reserve(struct iommu_table *tbl, |
@@ -388,7 +388,7 @@ static void calgary_unmap_sg(struct device *dev, | |||
388 | struct scatterlist *s; | 388 | struct scatterlist *s; |
389 | int i; | 389 | int i; |
390 | 390 | ||
391 | if (!translate_phb(to_pci_dev(dev))) | 391 | if (!translation_enabled(tbl)) |
392 | return; | 392 | return; |
393 | 393 | ||
394 | for_each_sg(sglist, s, nelems, i) { | 394 | for_each_sg(sglist, s, nelems, i) { |
@@ -428,7 +428,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, | |||
428 | unsigned long entry; | 428 | unsigned long entry; |
429 | int i; | 429 | int i; |
430 | 430 | ||
431 | if (!translate_phb(to_pci_dev(dev))) | 431 | if (!translation_enabled(tbl)) |
432 | return calgary_nontranslate_map_sg(dev, sg, nelems, direction); | 432 | return calgary_nontranslate_map_sg(dev, sg, nelems, direction); |
433 | 433 | ||
434 | for_each_sg(sg, s, nelems, i) { | 434 | for_each_sg(sg, s, nelems, i) { |
@@ -474,7 +474,7 @@ static dma_addr_t calgary_map_single(struct device *dev, void *vaddr, | |||
474 | uaddr = (unsigned long)vaddr; | 474 | uaddr = (unsigned long)vaddr; |
475 | npages = num_dma_pages(uaddr, size); | 475 | npages = num_dma_pages(uaddr, size); |
476 | 476 | ||
477 | if (translate_phb(to_pci_dev(dev))) | 477 | if (translation_enabled(tbl)) |
478 | dma_handle = iommu_alloc(tbl, vaddr, npages, direction); | 478 | dma_handle = iommu_alloc(tbl, vaddr, npages, direction); |
479 | else | 479 | else |
480 | dma_handle = virt_to_bus(vaddr); | 480 | dma_handle = virt_to_bus(vaddr); |
@@ -488,7 +488,7 @@ static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle, | |||
488 | struct iommu_table *tbl = find_iommu_table(dev); | 488 | struct iommu_table *tbl = find_iommu_table(dev); |
489 | unsigned int npages; | 489 | unsigned int npages; |
490 | 490 | ||
491 | if (!translate_phb(to_pci_dev(dev))) | 491 | if (!translation_enabled(tbl)) |
492 | return; | 492 | return; |
493 | 493 | ||
494 | npages = num_dma_pages(dma_handle, size); | 494 | npages = num_dma_pages(dma_handle, size); |
@@ -513,7 +513,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size, | |||
513 | goto error; | 513 | goto error; |
514 | memset(ret, 0, size); | 514 | memset(ret, 0, size); |
515 | 515 | ||
516 | if (translate_phb(to_pci_dev(dev))) { | 516 | if (translation_enabled(tbl)) { |
517 | /* set up tces to cover the allocated range */ | 517 | /* set up tces to cover the allocated range */ |
518 | mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL); | 518 | mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL); |
519 | if (mapping == bad_dma_address) | 519 | if (mapping == bad_dma_address) |
@@ -1194,7 +1194,7 @@ static int __init calgary_init(void) | |||
1194 | { | 1194 | { |
1195 | int ret; | 1195 | int ret; |
1196 | struct pci_dev *dev = NULL; | 1196 | struct pci_dev *dev = NULL; |
1197 | void *tce_space; | 1197 | struct calgary_bus_info *info; |
1198 | 1198 | ||
1199 | ret = calgary_locate_bbars(); | 1199 | ret = calgary_locate_bbars(); |
1200 | if (ret) | 1200 | if (ret) |
@@ -1206,12 +1206,14 @@ static int __init calgary_init(void) | |||
1206 | break; | 1206 | break; |
1207 | if (!is_cal_pci_dev(dev->device)) | 1207 | if (!is_cal_pci_dev(dev->device)) |
1208 | continue; | 1208 | continue; |
1209 | if (!translate_phb(dev)) { | 1209 | |
1210 | info = &bus_info[dev->bus->number]; | ||
1211 | if (info->translation_disabled) { | ||
1210 | calgary_init_one_nontraslated(dev); | 1212 | calgary_init_one_nontraslated(dev); |
1211 | continue; | 1213 | continue; |
1212 | } | 1214 | } |
1213 | tce_space = bus_info[dev->bus->number].tce_space; | 1215 | |
1214 | if (!tce_space && !translate_empty_slots) | 1216 | if (!info->tce_space && !translate_empty_slots) |
1215 | continue; | 1217 | continue; |
1216 | 1218 | ||
1217 | ret = calgary_init_one(dev); | 1219 | ret = calgary_init_one(dev); |
@@ -1229,11 +1231,13 @@ error: | |||
1229 | break; | 1231 | break; |
1230 | if (!is_cal_pci_dev(dev->device)) | 1232 | if (!is_cal_pci_dev(dev->device)) |
1231 | continue; | 1233 | continue; |
1232 | if (!translate_phb(dev)) { | 1234 | |
1235 | info = &bus_info[dev->bus->number]; | ||
1236 | if (info->translation_disabled) { | ||
1233 | pci_dev_put(dev); | 1237 | pci_dev_put(dev); |
1234 | continue; | 1238 | continue; |
1235 | } | 1239 | } |
1236 | if (!bus_info[dev->bus->number].tce_space && !translate_empty_slots) | 1240 | if (!info->tce_space && !translate_empty_slots) |
1237 | continue; | 1241 | continue; |
1238 | 1242 | ||
1239 | calgary_disable_translation(dev); | 1243 | calgary_disable_translation(dev); |
@@ -1546,7 +1550,7 @@ static void __init calgary_fixup_one_tce_space(struct pci_dev *dev) | |||
1546 | static int __init calgary_fixup_tce_spaces(void) | 1550 | static int __init calgary_fixup_tce_spaces(void) |
1547 | { | 1551 | { |
1548 | struct pci_dev *dev = NULL; | 1552 | struct pci_dev *dev = NULL; |
1549 | void *tce_space; | 1553 | struct calgary_bus_info *info; |
1550 | 1554 | ||
1551 | if (no_iommu || swiotlb || !calgary_detected) | 1555 | if (no_iommu || swiotlb || !calgary_detected) |
1552 | return -ENODEV; | 1556 | return -ENODEV; |
@@ -1559,11 +1563,12 @@ static int __init calgary_fixup_tce_spaces(void) | |||
1559 | break; | 1563 | break; |
1560 | if (!is_cal_pci_dev(dev->device)) | 1564 | if (!is_cal_pci_dev(dev->device)) |
1561 | continue; | 1565 | continue; |
1562 | if (!translate_phb(dev)) | 1566 | |
1567 | info = &bus_info[dev->bus->number]; | ||
1568 | if (info->translation_disabled) | ||
1563 | continue; | 1569 | continue; |
1564 | 1570 | ||
1565 | tce_space = bus_info[dev->bus->number].tce_space; | 1571 | if (!info->tce_space) |
1566 | if (!tce_space) | ||
1567 | continue; | 1572 | continue; |
1568 | 1573 | ||
1569 | calgary_fixup_one_tce_space(dev); | 1574 | calgary_fixup_one_tce_space(dev); |
diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c index 0aae2f3847a5..51330321a5d3 100644 --- a/arch/x86/kernel/pci-dma_32.c +++ b/arch/x86/kernel/pci-dma_32.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/string.h> | 12 | #include <linux/string.h> |
13 | #include <linux/pci.h> | 13 | #include <linux/pci.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/pci.h> | ||
16 | #include <asm/io.h> | 15 | #include <asm/io.h> |
17 | 16 | ||
18 | struct dma_coherent_mem { | 17 | struct dma_coherent_mem { |
diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c index 9576a2eb375e..b2b42bdb0a15 100644 --- a/arch/x86/kernel/pci-dma_64.c +++ b/arch/x86/kernel/pci-dma_64.c | |||
@@ -51,11 +51,9 @@ dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order) | |||
51 | { | 51 | { |
52 | struct page *page; | 52 | struct page *page; |
53 | int node; | 53 | int node; |
54 | #ifdef CONFIG_PCI | 54 | |
55 | if (dev->bus == &pci_bus_type) | 55 | node = dev_to_node(dev); |
56 | node = pcibus_to_node(to_pci_dev(dev)->bus); | 56 | if (node == -1) |
57 | else | ||
58 | #endif | ||
59 | node = numa_node_id(); | 57 | node = numa_node_id(); |
60 | 58 | ||
61 | if (node < first_node(node_online_map)) | 59 | if (node < first_node(node_online_map)) |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index cfcc84e6c350..5cdfab65e93f 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * See Documentation/DMA-mapping.txt for the interface specification. | 8 | * See Documentation/DMA-mapping.txt for the interface specification. |
9 | * | 9 | * |
10 | * Copyright 2002 Andi Kleen, SuSE Labs. | 10 | * Copyright 2002 Andi Kleen, SuSE Labs. |
11 | * Subject to the GNU General Public License v2 only. | ||
11 | */ | 12 | */ |
12 | 13 | ||
13 | #include <linux/types.h> | 14 | #include <linux/types.h> |
@@ -375,7 +376,8 @@ static inline int dma_map_cont(struct scatterlist *start, int nelems, | |||
375 | * DMA map all entries in a scatterlist. | 376 | * DMA map all entries in a scatterlist. |
376 | * Merge chunks that have page aligned sizes into a continuous mapping. | 377 | * Merge chunks that have page aligned sizes into a continuous mapping. |
377 | */ | 378 | */ |
378 | int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | 379 | static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
380 | int dir) | ||
379 | { | 381 | { |
380 | int i; | 382 | int i; |
381 | int out; | 383 | int out; |
diff --git a/arch/x86/kernel/ptrace_32.c b/arch/x86/kernel/ptrace_32.c index 8622b9cd3e38..99102ec5fade 100644 --- a/arch/x86/kernel/ptrace_32.c +++ b/arch/x86/kernel/ptrace_32.c | |||
@@ -165,7 +165,7 @@ static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_ | |||
165 | 165 | ||
166 | seg &= ~7UL; | 166 | seg &= ~7UL; |
167 | 167 | ||
168 | down(&child->mm->context.sem); | 168 | mutex_lock(&child->mm->context.lock); |
169 | if (unlikely((seg >> 3) >= child->mm->context.size)) | 169 | if (unlikely((seg >> 3) >= child->mm->context.size)) |
170 | addr = -1L; /* bogus selector, access would fault */ | 170 | addr = -1L; /* bogus selector, access would fault */ |
171 | else { | 171 | else { |
@@ -179,7 +179,7 @@ static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_ | |||
179 | addr &= 0xffff; | 179 | addr &= 0xffff; |
180 | addr += base; | 180 | addr += base; |
181 | } | 181 | } |
182 | up(&child->mm->context.sem); | 182 | mutex_unlock(&child->mm->context.lock); |
183 | } | 183 | } |
184 | return addr; | 184 | return addr; |
185 | } | 185 | } |
diff --git a/arch/x86/kernel/ptrace_64.c b/arch/x86/kernel/ptrace_64.c index 86321ee6da93..607085f3f08a 100644 --- a/arch/x86/kernel/ptrace_64.c +++ b/arch/x86/kernel/ptrace_64.c | |||
@@ -103,7 +103,7 @@ unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *r | |||
103 | 103 | ||
104 | seg &= ~7UL; | 104 | seg &= ~7UL; |
105 | 105 | ||
106 | down(&child->mm->context.sem); | 106 | mutex_lock(&child->mm->context.lock); |
107 | if (unlikely((seg >> 3) >= child->mm->context.size)) | 107 | if (unlikely((seg >> 3) >= child->mm->context.size)) |
108 | addr = -1L; /* bogus selector, access would fault */ | 108 | addr = -1L; /* bogus selector, access would fault */ |
109 | else { | 109 | else { |
@@ -117,7 +117,7 @@ unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *r | |||
117 | addr &= 0xffff; | 117 | addr &= 0xffff; |
118 | addr += base; | 118 | addr += base; |
119 | } | 119 | } |
120 | up(&child->mm->context.sem); | 120 | mutex_unlock(&child->mm->context.lock); |
121 | } | 121 | } |
122 | 122 | ||
123 | return addr; | 123 | return addr; |
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 8159bf0be17a..5a19f0cc5b67 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c | |||
@@ -604,7 +604,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
604 | level = cpuid_eax(1); | 604 | level = cpuid_eax(1); |
605 | if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)) | 605 | if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)) |
606 | set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); | 606 | set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); |
607 | if (c->x86 == 0x10) | 607 | if (c->x86 == 0x10 || c->x86 == 0x11) |
608 | set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); | 608 | set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); |
609 | 609 | ||
610 | /* Enable workaround for FXSAVE leak */ | 610 | /* Enable workaround for FXSAVE leak */ |
@@ -968,7 +968,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
968 | * applications want to get the raw CPUID data, they should access | 968 | * applications want to get the raw CPUID data, they should access |
969 | * /dev/cpu/<cpu_nr>/cpuid instead. | 969 | * /dev/cpu/<cpu_nr>/cpuid instead. |
970 | */ | 970 | */ |
971 | static char *x86_cap_flags[] = { | 971 | static const char *const x86_cap_flags[] = { |
972 | /* Intel-defined */ | 972 | /* Intel-defined */ |
973 | "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", | 973 | "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", |
974 | "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", | 974 | "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", |
@@ -1022,7 +1022,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1022 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 1022 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
1023 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 1023 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
1024 | }; | 1024 | }; |
1025 | static char *x86_power_flags[] = { | 1025 | static const char *const x86_power_flags[] = { |
1026 | "ts", /* temperature sensor */ | 1026 | "ts", /* temperature sensor */ |
1027 | "fid", /* frequency id control */ | 1027 | "fid", /* frequency id control */ |
1028 | "vid", /* voltage id control */ | 1028 | "vid", /* voltage id control */ |
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index d01d51fcce2a..0d79df3c5631 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c | |||
@@ -385,7 +385,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
385 | regs->edx = (unsigned long) 0; | 385 | regs->edx = (unsigned long) 0; |
386 | regs->ecx = (unsigned long) 0; | 386 | regs->ecx = (unsigned long) 0; |
387 | 387 | ||
388 | set_fs(USER_DS); | ||
389 | regs->xds = __USER_DS; | 388 | regs->xds = __USER_DS; |
390 | regs->xes = __USER_DS; | 389 | regs->xes = __USER_DS; |
391 | regs->xss = __USER_DS; | 390 | regs->xss = __USER_DS; |
@@ -479,7 +478,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
479 | regs->edx = (unsigned long) &frame->info; | 478 | regs->edx = (unsigned long) &frame->info; |
480 | regs->ecx = (unsigned long) &frame->uc; | 479 | regs->ecx = (unsigned long) &frame->uc; |
481 | 480 | ||
482 | set_fs(USER_DS); | ||
483 | regs->xds = __USER_DS; | 481 | regs->xds = __USER_DS; |
484 | regs->xes = __USER_DS; | 482 | regs->xes = __USER_DS; |
485 | regs->xss = __USER_DS; | 483 | regs->xss = __USER_DS; |
diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c index 2d35d8502029..791d9f8036ae 100644 --- a/arch/x86/kernel/smp_32.c +++ b/arch/x86/kernel/smp_32.c | |||
@@ -342,6 +342,7 @@ fastcall void smp_invalidate_interrupt(struct pt_regs *regs) | |||
342 | smp_mb__after_clear_bit(); | 342 | smp_mb__after_clear_bit(); |
343 | out: | 343 | out: |
344 | put_cpu_no_resched(); | 344 | put_cpu_no_resched(); |
345 | __get_cpu_var(irq_stat).irq_tlb_count++; | ||
345 | } | 346 | } |
346 | 347 | ||
347 | void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, | 348 | void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, |
@@ -640,6 +641,7 @@ static void native_smp_send_stop(void) | |||
640 | fastcall void smp_reschedule_interrupt(struct pt_regs *regs) | 641 | fastcall void smp_reschedule_interrupt(struct pt_regs *regs) |
641 | { | 642 | { |
642 | ack_APIC_irq(); | 643 | ack_APIC_irq(); |
644 | __get_cpu_var(irq_stat).irq_resched_count++; | ||
643 | } | 645 | } |
644 | 646 | ||
645 | fastcall void smp_call_function_interrupt(struct pt_regs *regs) | 647 | fastcall void smp_call_function_interrupt(struct pt_regs *regs) |
@@ -660,6 +662,7 @@ fastcall void smp_call_function_interrupt(struct pt_regs *regs) | |||
660 | */ | 662 | */ |
661 | irq_enter(); | 663 | irq_enter(); |
662 | (*func)(info); | 664 | (*func)(info); |
665 | __get_cpu_var(irq_stat).irq_call_count++; | ||
663 | irq_exit(); | 666 | irq_exit(); |
664 | 667 | ||
665 | if (wait) { | 668 | if (wait) { |
@@ -705,3 +708,10 @@ struct smp_ops smp_ops = { | |||
705 | .smp_send_reschedule = native_smp_send_reschedule, | 708 | .smp_send_reschedule = native_smp_send_reschedule, |
706 | .smp_call_function_mask = native_smp_call_function_mask, | 709 | .smp_call_function_mask = native_smp_call_function_mask, |
707 | }; | 710 | }; |
711 | |||
712 | int smp_call_function_mask(cpumask_t mask, void (*func) (void *info), | ||
713 | void *info, int wait) | ||
714 | { | ||
715 | return smp_ops.smp_call_function_mask(mask, func, info, wait); | ||
716 | } | ||
717 | EXPORT_SYMBOL(smp_call_function_mask); | ||
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index df4a82812adb..5c2964727d19 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c | |||
@@ -163,6 +163,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) | |||
163 | out: | 163 | out: |
164 | ack_APIC_irq(); | 164 | ack_APIC_irq(); |
165 | cpu_clear(cpu, f->flush_cpumask); | 165 | cpu_clear(cpu, f->flush_cpumask); |
166 | add_pda(irq_tlb_count, 1); | ||
166 | } | 167 | } |
167 | 168 | ||
168 | static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | 169 | static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, |
@@ -493,6 +494,7 @@ void smp_send_stop(void) | |||
493 | asmlinkage void smp_reschedule_interrupt(void) | 494 | asmlinkage void smp_reschedule_interrupt(void) |
494 | { | 495 | { |
495 | ack_APIC_irq(); | 496 | ack_APIC_irq(); |
497 | add_pda(irq_resched_count, 1); | ||
496 | } | 498 | } |
497 | 499 | ||
498 | asmlinkage void smp_call_function_interrupt(void) | 500 | asmlinkage void smp_call_function_interrupt(void) |
@@ -514,6 +516,7 @@ asmlinkage void smp_call_function_interrupt(void) | |||
514 | exit_idle(); | 516 | exit_idle(); |
515 | irq_enter(); | 517 | irq_enter(); |
516 | (*func)(info); | 518 | (*func)(info); |
519 | add_pda(irq_call_count, 1); | ||
517 | irq_exit(); | 520 | irq_exit(); |
518 | if (wait) { | 521 | if (wait) { |
519 | mb(); | 522 | mb(); |
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 31fc08bd15ef..be3faac04719 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c | |||
@@ -102,8 +102,8 @@ u8 apicid_2_node[MAX_APICID]; | |||
102 | * Trampoline 80x86 program as an array. | 102 | * Trampoline 80x86 program as an array. |
103 | */ | 103 | */ |
104 | 104 | ||
105 | extern unsigned char trampoline_data []; | 105 | extern const unsigned char trampoline_data []; |
106 | extern unsigned char trampoline_end []; | 106 | extern const unsigned char trampoline_end []; |
107 | static unsigned char *trampoline_base; | 107 | static unsigned char *trampoline_base; |
108 | static int trampoline_exec; | 108 | static int trampoline_exec; |
109 | 109 | ||
@@ -118,7 +118,7 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 }; | |||
118 | * has made sure it's suitably aligned. | 118 | * has made sure it's suitably aligned. |
119 | */ | 119 | */ |
120 | 120 | ||
121 | static unsigned long __devinit setup_trampoline(void) | 121 | static unsigned long __cpuinit setup_trampoline(void) |
122 | { | 122 | { |
123 | memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data); | 123 | memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data); |
124 | return virt_to_phys(trampoline_base); | 124 | return virt_to_phys(trampoline_base); |
@@ -1021,6 +1021,12 @@ static void __init smp_boot_cpus(unsigned int max_cpus) | |||
1021 | if (!max_cpus) { | 1021 | if (!max_cpus) { |
1022 | smp_found_config = 0; | 1022 | smp_found_config = 0; |
1023 | printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); | 1023 | printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); |
1024 | |||
1025 | if (nmi_watchdog == NMI_LOCAL_APIC) { | ||
1026 | printk(KERN_INFO "activating minimal APIC for NMI watchdog use.\n"); | ||
1027 | connect_bsp_APIC(); | ||
1028 | setup_local_APIC(); | ||
1029 | } | ||
1024 | smpboot_clear_io_apic_irqs(); | 1030 | smpboot_clear_io_apic_irqs(); |
1025 | phys_cpu_present_map = physid_mask_of_physid(0); | 1031 | phys_cpu_present_map = physid_mask_of_physid(0); |
1026 | cpu_set(0, per_cpu(cpu_sibling_map, 0)); | 1032 | cpu_set(0, per_cpu(cpu_sibling_map, 0)); |
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 0faa0a0af272..e351ac4ab5b1 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c | |||
@@ -102,8 +102,8 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); | |||
102 | * Trampoline 80x86 program as an array. | 102 | * Trampoline 80x86 program as an array. |
103 | */ | 103 | */ |
104 | 104 | ||
105 | extern unsigned char trampoline_data[]; | 105 | extern const unsigned char trampoline_data[]; |
106 | extern unsigned char trampoline_end[]; | 106 | extern const unsigned char trampoline_end[]; |
107 | 107 | ||
108 | /* State of each CPU */ | 108 | /* State of each CPU */ |
109 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | 109 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
@@ -695,7 +695,6 @@ do_rest: | |||
695 | cpu_clear(cpu, cpu_present_map); | 695 | cpu_clear(cpu, cpu_present_map); |
696 | cpu_clear(cpu, cpu_possible_map); | 696 | cpu_clear(cpu, cpu_possible_map); |
697 | x86_cpu_to_apicid[cpu] = BAD_APICID; | 697 | x86_cpu_to_apicid[cpu] = BAD_APICID; |
698 | x86_cpu_to_log_apicid[cpu] = BAD_APICID; | ||
699 | return -EIO; | 698 | return -EIO; |
700 | } | 699 | } |
701 | 700 | ||
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 413e527cdeb9..6fa6cf036c70 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c | |||
@@ -33,7 +33,7 @@ static void save_stack_address(void *data, unsigned long addr) | |||
33 | trace->entries[trace->nr_entries++] = addr; | 33 | trace->entries[trace->nr_entries++] = addr; |
34 | } | 34 | } |
35 | 35 | ||
36 | static struct stacktrace_ops save_stack_ops = { | 36 | static const struct stacktrace_ops save_stack_ops = { |
37 | .warning = save_stack_warning, | 37 | .warning = save_stack_warning, |
38 | .warning_symbol = save_stack_warning_symbol, | 38 | .warning_symbol = save_stack_warning_symbol, |
39 | .stack = save_stack_stack, | 39 | .stack = save_stack_stack, |
diff --git a/arch/x86/kernel/tce_64.c b/arch/x86/kernel/tce_64.c index e3f2569b2c44..9e540fee7009 100644 --- a/arch/x86/kernel/tce_64.c +++ b/arch/x86/kernel/tce_64.c | |||
@@ -40,9 +40,9 @@ static inline void flush_tce(void* tceaddr) | |||
40 | { | 40 | { |
41 | /* a single tce can't cross a cache line */ | 41 | /* a single tce can't cross a cache line */ |
42 | if (cpu_has_clflush) | 42 | if (cpu_has_clflush) |
43 | asm volatile("clflush (%0)" :: "r" (tceaddr)); | 43 | clflush(tceaddr); |
44 | else | 44 | else |
45 | asm volatile("wbinvd":::"memory"); | 45 | wbinvd(); |
46 | } | 46 | } |
47 | 47 | ||
48 | void tce_build(struct iommu_table *tbl, unsigned long index, | 48 | void tce_build(struct iommu_table *tbl, unsigned long index, |
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c index c25f23eb397c..8caa0b777466 100644 --- a/arch/x86/kernel/topology.c +++ b/arch/x86/kernel/topology.c | |||
@@ -44,15 +44,15 @@ int arch_register_cpu(int num) | |||
44 | * Also certain PCI quirks require not to enable hotplug control | 44 | * Also certain PCI quirks require not to enable hotplug control |
45 | * for all CPU's. | 45 | * for all CPU's. |
46 | */ | 46 | */ |
47 | if (num && enable_cpu_hotplug) | 47 | #ifdef CONFIG_HOTPLUG_CPU |
48 | if (num) | ||
48 | cpu_devices[num].cpu.hotpluggable = 1; | 49 | cpu_devices[num].cpu.hotpluggable = 1; |
50 | #endif | ||
49 | 51 | ||
50 | return register_cpu(&cpu_devices[num].cpu, num); | 52 | return register_cpu(&cpu_devices[num].cpu, num); |
51 | } | 53 | } |
52 | 54 | ||
53 | #ifdef CONFIG_HOTPLUG_CPU | 55 | #ifdef CONFIG_HOTPLUG_CPU |
54 | int enable_cpu_hotplug = 1; | ||
55 | |||
56 | void arch_unregister_cpu(int num) { | 56 | void arch_unregister_cpu(int num) { |
57 | return unregister_cpu(&cpu_devices[num].cpu); | 57 | return unregister_cpu(&cpu_devices[num].cpu); |
58 | } | 58 | } |
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S index f62815f8d06a..9bcc1c6aca3d 100644 --- a/arch/x86/kernel/trampoline_32.S +++ b/arch/x86/kernel/trampoline_32.S | |||
@@ -36,11 +36,11 @@ | |||
36 | #include <asm/segment.h> | 36 | #include <asm/segment.h> |
37 | #include <asm/page.h> | 37 | #include <asm/page.h> |
38 | 38 | ||
39 | .data | ||
40 | |||
41 | /* We can free up trampoline after bootup if cpu hotplug is not supported. */ | 39 | /* We can free up trampoline after bootup if cpu hotplug is not supported. */ |
42 | #ifndef CONFIG_HOTPLUG_CPU | 40 | #ifndef CONFIG_HOTPLUG_CPU |
43 | .section ".init.data","aw",@progbits | 41 | .section ".init.data","aw",@progbits |
42 | #else | ||
43 | .section .rodata,"a",@progbits | ||
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | .code16 | 46 | .code16 |
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S index 607983b0d27b..e30b67c6a9f5 100644 --- a/arch/x86/kernel/trampoline_64.S +++ b/arch/x86/kernel/trampoline_64.S | |||
@@ -33,7 +33,12 @@ | |||
33 | #include <asm/msr.h> | 33 | #include <asm/msr.h> |
34 | #include <asm/segment.h> | 34 | #include <asm/segment.h> |
35 | 35 | ||
36 | .data | 36 | /* We can free up trampoline after bootup if cpu hotplug is not supported. */ |
37 | #ifndef CONFIG_HOTPLUG_CPU | ||
38 | .section .init.data, "aw", @progbits | ||
39 | #else | ||
40 | .section .rodata, "a", @progbits | ||
41 | #endif | ||
37 | 42 | ||
38 | .code16 | 43 | .code16 |
39 | 44 | ||
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index 05c27ecaf2a7..b132d3957dfc 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c | |||
@@ -112,7 +112,7 @@ struct stack_frame { | |||
112 | 112 | ||
113 | static inline unsigned long print_context_stack(struct thread_info *tinfo, | 113 | static inline unsigned long print_context_stack(struct thread_info *tinfo, |
114 | unsigned long *stack, unsigned long ebp, | 114 | unsigned long *stack, unsigned long ebp, |
115 | struct stacktrace_ops *ops, void *data) | 115 | const struct stacktrace_ops *ops, void *data) |
116 | { | 116 | { |
117 | #ifdef CONFIG_FRAME_POINTER | 117 | #ifdef CONFIG_FRAME_POINTER |
118 | struct stack_frame *frame = (struct stack_frame *)ebp; | 118 | struct stack_frame *frame = (struct stack_frame *)ebp; |
@@ -149,7 +149,7 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, | |||
149 | 149 | ||
150 | void dump_trace(struct task_struct *task, struct pt_regs *regs, | 150 | void dump_trace(struct task_struct *task, struct pt_regs *regs, |
151 | unsigned long *stack, | 151 | unsigned long *stack, |
152 | struct stacktrace_ops *ops, void *data) | 152 | const struct stacktrace_ops *ops, void *data) |
153 | { | 153 | { |
154 | unsigned long ebp = 0; | 154 | unsigned long ebp = 0; |
155 | 155 | ||
@@ -221,7 +221,7 @@ static void print_trace_address(void *data, unsigned long addr) | |||
221 | touch_nmi_watchdog(); | 221 | touch_nmi_watchdog(); |
222 | } | 222 | } |
223 | 223 | ||
224 | static struct stacktrace_ops print_trace_ops = { | 224 | static const struct stacktrace_ops print_trace_ops = { |
225 | .warning = print_trace_warning, | 225 | .warning = print_trace_warning, |
226 | .warning_symbol = print_trace_warning_symbol, | 226 | .warning_symbol = print_trace_warning_symbol, |
227 | .stack = print_trace_stack, | 227 | .stack = print_trace_stack, |
@@ -398,31 +398,24 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
398 | local_save_flags(flags); | 398 | local_save_flags(flags); |
399 | 399 | ||
400 | if (++die.lock_owner_depth < 3) { | 400 | if (++die.lock_owner_depth < 3) { |
401 | int nl = 0; | ||
402 | unsigned long esp; | 401 | unsigned long esp; |
403 | unsigned short ss; | 402 | unsigned short ss; |
404 | 403 | ||
405 | report_bug(regs->eip, regs); | 404 | report_bug(regs->eip, regs); |
406 | 405 | ||
407 | printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); | 406 | printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, |
407 | ++die_counter); | ||
408 | #ifdef CONFIG_PREEMPT | 408 | #ifdef CONFIG_PREEMPT |
409 | printk(KERN_EMERG "PREEMPT "); | 409 | printk("PREEMPT "); |
410 | nl = 1; | ||
411 | #endif | 410 | #endif |
412 | #ifdef CONFIG_SMP | 411 | #ifdef CONFIG_SMP |
413 | if (!nl) | ||
414 | printk(KERN_EMERG); | ||
415 | printk("SMP "); | 412 | printk("SMP "); |
416 | nl = 1; | ||
417 | #endif | 413 | #endif |
418 | #ifdef CONFIG_DEBUG_PAGEALLOC | 414 | #ifdef CONFIG_DEBUG_PAGEALLOC |
419 | if (!nl) | ||
420 | printk(KERN_EMERG); | ||
421 | printk("DEBUG_PAGEALLOC"); | 415 | printk("DEBUG_PAGEALLOC"); |
422 | nl = 1; | ||
423 | #endif | 416 | #endif |
424 | if (nl) | 417 | printk("\n"); |
425 | printk("\n"); | 418 | |
426 | if (notify_die(DIE_OOPS, str, regs, err, | 419 | if (notify_die(DIE_OOPS, str, regs, err, |
427 | current->thread.trap_no, SIGSEGV) != | 420 | current->thread.trap_no, SIGSEGV) != |
428 | NOTIFY_STOP) { | 421 | NOTIFY_STOP) { |
@@ -1112,20 +1105,6 @@ asmlinkage void math_emulate(long arg) | |||
1112 | 1105 | ||
1113 | #endif /* CONFIG_MATH_EMULATION */ | 1106 | #endif /* CONFIG_MATH_EMULATION */ |
1114 | 1107 | ||
1115 | #ifdef CONFIG_X86_F00F_BUG | ||
1116 | void __init trap_init_f00f_bug(void) | ||
1117 | { | ||
1118 | __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); | ||
1119 | |||
1120 | /* | ||
1121 | * Update the IDT descriptor and reload the IDT so that | ||
1122 | * it uses the read-only mapped virtual address. | ||
1123 | */ | ||
1124 | idt_descr.address = fix_to_virt(FIX_F00F_IDT); | ||
1125 | load_idt(&idt_descr); | ||
1126 | } | ||
1127 | #endif | ||
1128 | |||
1129 | /* | 1108 | /* |
1130 | * This needs to use 'idt_table' rather than 'idt', and | 1109 | * This needs to use 'idt_table' rather than 'idt', and |
1131 | * thus use the _nonmapped_ version of the IDT, as the | 1110 | * thus use the _nonmapped_ version of the IDT, as the |
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index bc7116acf8ff..b4a9b3db1994 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c | |||
@@ -215,7 +215,7 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) | |||
215 | 215 | ||
216 | void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | 216 | void dump_trace(struct task_struct *tsk, struct pt_regs *regs, |
217 | unsigned long *stack, | 217 | unsigned long *stack, |
218 | struct stacktrace_ops *ops, void *data) | 218 | const struct stacktrace_ops *ops, void *data) |
219 | { | 219 | { |
220 | const unsigned cpu = get_cpu(); | 220 | const unsigned cpu = get_cpu(); |
221 | unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr; | 221 | unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr; |
@@ -336,7 +336,7 @@ static void print_trace_address(void *data, unsigned long addr) | |||
336 | printk_address(addr); | 336 | printk_address(addr); |
337 | } | 337 | } |
338 | 338 | ||
339 | static struct stacktrace_ops print_trace_ops = { | 339 | static const struct stacktrace_ops print_trace_ops = { |
340 | .warning = print_trace_warning, | 340 | .warning = print_trace_warning, |
341 | .warning_symbol = print_trace_warning_symbol, | 341 | .warning_symbol = print_trace_warning_symbol, |
342 | .stack = print_trace_stack, | 342 | .stack = print_trace_stack, |
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c index b85ad754f70e..e87a3939ed40 100644 --- a/arch/x86/kernel/tsc_32.c +++ b/arch/x86/kernel/tsc_32.c | |||
@@ -349,10 +349,10 @@ __cpuinit int unsynchronized_tsc(void) | |||
349 | 349 | ||
350 | static void __init check_geode_tsc_reliable(void) | 350 | static void __init check_geode_tsc_reliable(void) |
351 | { | 351 | { |
352 | unsigned long val; | 352 | unsigned long res_low, res_high; |
353 | 353 | ||
354 | rdmsrl(MSR_GEODE_BUSCONT_CONF0, val); | 354 | rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); |
355 | if ((val & RTSC_SUSP)) | 355 | if (res_low & RTSC_SUSP) |
356 | clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; | 356 | clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; |
357 | } | 357 | } |
358 | #else | 358 | #else |
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 93847d848157..8a67e282cb5e 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -78,7 +78,6 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) | |||
78 | vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; | 78 | vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; |
79 | vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; | 79 | vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; |
80 | vsyscall_gtod_data.sys_tz = sys_tz; | 80 | vsyscall_gtod_data.sys_tz = sys_tz; |
81 | vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; | ||
82 | vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; | 81 | vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; |
83 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); | 82 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); |
84 | } | 83 | } |
@@ -289,7 +288,7 @@ static void __cpuinit vsyscall_set_cpu(int cpu) | |||
289 | unsigned long *d; | 288 | unsigned long *d; |
290 | unsigned long node = 0; | 289 | unsigned long node = 0; |
291 | #ifdef CONFIG_NUMA | 290 | #ifdef CONFIG_NUMA |
292 | node = cpu_to_node[cpu]; | 291 | node = cpu_to_node(cpu); |
293 | #endif | 292 | #endif |
294 | if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) | 293 | if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) |
295 | write_rdtscp_aux((node << 12) | cpu); | 294 | write_rdtscp_aux((node << 12) | cpu); |
diff --git a/arch/x86/lib/bitstr_64.c b/arch/x86/lib/bitstr_64.c index 24676609a6ac..7445caf1b5de 100644 --- a/arch/x86/lib/bitstr_64.c +++ b/arch/x86/lib/bitstr_64.c | |||
@@ -14,7 +14,7 @@ find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len) | |||
14 | 14 | ||
15 | /* could test bitsliced, but it's hardly worth it */ | 15 | /* could test bitsliced, but it's hardly worth it */ |
16 | end = n+len; | 16 | end = n+len; |
17 | if (end >= nbits) | 17 | if (end > nbits) |
18 | return -1; | 18 | return -1; |
19 | for (i = n+1; i < end; i++) { | 19 | for (i = n+1; i < end; i++) { |
20 | if (test_bit(i, bitmap)) { | 20 | if (test_bit(i, bitmap)) { |
diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c index 7767962f25d3..57d043fa893e 100644 --- a/arch/x86/lib/msr-on-cpu.c +++ b/arch/x86/lib/msr-on-cpu.c | |||
@@ -26,27 +26,18 @@ static void __rdmsr_safe_on_cpu(void *info) | |||
26 | static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe) | 26 | static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe) |
27 | { | 27 | { |
28 | int err = 0; | 28 | int err = 0; |
29 | preempt_disable(); | 29 | struct msr_info rv; |
30 | if (smp_processor_id() == cpu) | 30 | |
31 | if (safe) | 31 | rv.msr_no = msr_no; |
32 | err = rdmsr_safe(msr_no, l, h); | 32 | if (safe) { |
33 | else | 33 | smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 0, 1); |
34 | rdmsr(msr_no, *l, *h); | 34 | err = rv.err; |
35 | else { | 35 | } else { |
36 | struct msr_info rv; | 36 | smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1); |
37 | |||
38 | rv.msr_no = msr_no; | ||
39 | if (safe) { | ||
40 | smp_call_function_single(cpu, __rdmsr_safe_on_cpu, | ||
41 | &rv, 0, 1); | ||
42 | err = rv.err; | ||
43 | } else { | ||
44 | smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1); | ||
45 | } | ||
46 | *l = rv.l; | ||
47 | *h = rv.h; | ||
48 | } | 37 | } |
49 | preempt_enable(); | 38 | *l = rv.l; |
39 | *h = rv.h; | ||
40 | |||
50 | return err; | 41 | return err; |
51 | } | 42 | } |
52 | 43 | ||
@@ -67,27 +58,18 @@ static void __wrmsr_safe_on_cpu(void *info) | |||
67 | static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe) | 58 | static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe) |
68 | { | 59 | { |
69 | int err = 0; | 60 | int err = 0; |
70 | preempt_disable(); | 61 | struct msr_info rv; |
71 | if (smp_processor_id() == cpu) | 62 | |
72 | if (safe) | 63 | rv.msr_no = msr_no; |
73 | err = wrmsr_safe(msr_no, l, h); | 64 | rv.l = l; |
74 | else | 65 | rv.h = h; |
75 | wrmsr(msr_no, l, h); | 66 | if (safe) { |
76 | else { | 67 | smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 0, 1); |
77 | struct msr_info rv; | 68 | err = rv.err; |
78 | 69 | } else { | |
79 | rv.msr_no = msr_no; | 70 | smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1); |
80 | rv.l = l; | ||
81 | rv.h = h; | ||
82 | if (safe) { | ||
83 | smp_call_function_single(cpu, __wrmsr_safe_on_cpu, | ||
84 | &rv, 0, 1); | ||
85 | err = rv.err; | ||
86 | } else { | ||
87 | smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1); | ||
88 | } | ||
89 | } | 71 | } |
90 | preempt_enable(); | 72 | |
91 | return err; | 73 | return err; |
92 | } | 74 | } |
93 | 75 | ||
diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S index 0cde1f807314..05ea55f71405 100644 --- a/arch/x86/lib/rwlock_64.S +++ b/arch/x86/lib/rwlock_64.S | |||
@@ -2,7 +2,7 @@ | |||
2 | 2 | ||
3 | #include <linux/linkage.h> | 3 | #include <linux/linkage.h> |
4 | #include <asm/rwlock.h> | 4 | #include <asm/rwlock.h> |
5 | #include <asm/alternative-asm.i> | 5 | #include <asm/alternative-asm.h> |
6 | #include <asm/dwarf2.h> | 6 | #include <asm/dwarf2.h> |
7 | 7 | ||
8 | /* rdi: pointer to rwlock_t */ | 8 | /* rdi: pointer to rwlock_t */ |
diff --git a/arch/x86/lib/semaphore_32.S b/arch/x86/lib/semaphore_32.S index c01eb39c0b43..444fba400983 100644 --- a/arch/x86/lib/semaphore_32.S +++ b/arch/x86/lib/semaphore_32.S | |||
@@ -15,8 +15,8 @@ | |||
15 | 15 | ||
16 | #include <linux/linkage.h> | 16 | #include <linux/linkage.h> |
17 | #include <asm/rwlock.h> | 17 | #include <asm/rwlock.h> |
18 | #include <asm/alternative-asm.i> | 18 | #include <asm/alternative-asm.h> |
19 | #include <asm/frame.i> | 19 | #include <asm/frame.h> |
20 | #include <asm/dwarf2.h> | 20 | #include <asm/dwarf2.h> |
21 | 21 | ||
22 | /* | 22 | /* |
diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c index 2c773fefa3dd..c2c0504a3071 100644 --- a/arch/x86/lib/string_32.c +++ b/arch/x86/lib/string_32.c | |||
@@ -160,26 +160,6 @@ char *strchr(const char * s, int c) | |||
160 | EXPORT_SYMBOL(strchr); | 160 | EXPORT_SYMBOL(strchr); |
161 | #endif | 161 | #endif |
162 | 162 | ||
163 | #ifdef __HAVE_ARCH_STRRCHR | ||
164 | char *strrchr(const char * s, int c) | ||
165 | { | ||
166 | int d0, d1; | ||
167 | char * res; | ||
168 | asm volatile( "movb %%al,%%ah\n" | ||
169 | "1:\tlodsb\n\t" | ||
170 | "cmpb %%ah,%%al\n\t" | ||
171 | "jne 2f\n\t" | ||
172 | "leal -1(%%esi),%0\n" | ||
173 | "2:\ttestb %%al,%%al\n\t" | ||
174 | "jne 1b" | ||
175 | :"=g" (res), "=&S" (d0), "=&a" (d1) | ||
176 | :"0" (0),"1" (s),"2" (c) | ||
177 | :"memory"); | ||
178 | return res; | ||
179 | } | ||
180 | EXPORT_SYMBOL(strrchr); | ||
181 | #endif | ||
182 | |||
183 | #ifdef __HAVE_ARCH_STRLEN | 163 | #ifdef __HAVE_ARCH_STRLEN |
184 | size_t strlen(const char * s) | 164 | size_t strlen(const char * s) |
185 | { | 165 | { |
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c index 1bd82983986d..3f08010f3517 100644 --- a/arch/x86/mach-default/setup.c +++ b/arch/x86/mach-default/setup.c | |||
@@ -35,7 +35,11 @@ void __init pre_intr_init_hook(void) | |||
35 | /* | 35 | /* |
36 | * IRQ2 is cascade interrupt to second interrupt controller | 36 | * IRQ2 is cascade interrupt to second interrupt controller |
37 | */ | 37 | */ |
38 | static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL}; | 38 | static struct irqaction irq2 = { |
39 | .handler = no_action, | ||
40 | .mask = CPU_MASK_NONE, | ||
41 | .name = "cascade", | ||
42 | }; | ||
39 | 43 | ||
40 | /** | 44 | /** |
41 | * intr_init_hook - post gate setup interrupt initialisation | 45 | * intr_init_hook - post gate setup interrupt initialisation |
diff --git a/arch/x86/mach-es7000/es7000plat.c b/arch/x86/mach-es7000/es7000plat.c index ab99072d3f9a..f5d6f7d8b86e 100644 --- a/arch/x86/mach-es7000/es7000plat.c +++ b/arch/x86/mach-es7000/es7000plat.c | |||
@@ -46,11 +46,11 @@ | |||
46 | * ES7000 Globals | 46 | * ES7000 Globals |
47 | */ | 47 | */ |
48 | 48 | ||
49 | volatile unsigned long *psai = NULL; | 49 | static volatile unsigned long *psai = NULL; |
50 | struct mip_reg *mip_reg; | 50 | static struct mip_reg *mip_reg; |
51 | struct mip_reg *host_reg; | 51 | static struct mip_reg *host_reg; |
52 | int mip_port; | 52 | static int mip_port; |
53 | unsigned long mip_addr, host_addr; | 53 | static unsigned long mip_addr, host_addr; |
54 | 54 | ||
55 | /* | 55 | /* |
56 | * GSI override for ES7000 platforms. | 56 | * GSI override for ES7000 platforms. |
@@ -288,28 +288,8 @@ es7000_start_cpu(int cpu, unsigned long eip) | |||
288 | 288 | ||
289 | } | 289 | } |
290 | 290 | ||
291 | int | ||
292 | es7000_stop_cpu(int cpu) | ||
293 | { | ||
294 | int startup; | ||
295 | |||
296 | if (psai == NULL) | ||
297 | return -1; | ||
298 | |||
299 | startup= (0x1000000 | cpu); | ||
300 | |||
301 | while ((*psai & 0xff00ffff) != startup) | ||
302 | ; | ||
303 | |||
304 | startup = (*psai & 0xff0000) >> 16; | ||
305 | *psai &= 0xffffff; | ||
306 | |||
307 | return 0; | ||
308 | |||
309 | } | ||
310 | |||
311 | void __init | 291 | void __init |
312 | es7000_sw_apic() | 292 | es7000_sw_apic(void) |
313 | { | 293 | { |
314 | if (es7000_plat) { | 294 | if (es7000_plat) { |
315 | int mip_status; | 295 | int mip_status; |
diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c index 74f3da634423..4121d1551800 100644 --- a/arch/x86/mach-generic/probe.c +++ b/arch/x86/mach-generic/probe.c | |||
@@ -22,7 +22,7 @@ extern struct genapic apic_default; | |||
22 | 22 | ||
23 | struct genapic *genapic = &apic_default; | 23 | struct genapic *genapic = &apic_default; |
24 | 24 | ||
25 | struct genapic *apic_probe[] __initdata = { | 25 | static struct genapic *apic_probe[] __initdata = { |
26 | &apic_summit, | 26 | &apic_summit, |
27 | &apic_bigsmp, | 27 | &apic_bigsmp, |
28 | &apic_es7000, | 28 | &apic_es7000, |
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c index a0ab4002abcd..3bef977cb29b 100644 --- a/arch/x86/mach-voyager/setup.c +++ b/arch/x86/mach-voyager/setup.c | |||
@@ -18,7 +18,11 @@ void __init pre_intr_init_hook(void) | |||
18 | /* | 18 | /* |
19 | * IRQ2 is cascade interrupt to second interrupt controller | 19 | * IRQ2 is cascade interrupt to second interrupt controller |
20 | */ | 20 | */ |
21 | static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL}; | 21 | static struct irqaction irq2 = { |
22 | .handler = no_action, | ||
23 | .mask = CPU_MASK_NONE, | ||
24 | .name = "cascade", | ||
25 | }; | ||
22 | 26 | ||
23 | void __init intr_init_hook(void) | 27 | void __init intr_init_hook(void) |
24 | { | 28 | { |
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index b87f8548e75a..e4928aa6bdfb 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -442,8 +442,8 @@ static __u32 __init | |||
442 | setup_trampoline(void) | 442 | setup_trampoline(void) |
443 | { | 443 | { |
444 | /* these two are global symbols in trampoline.S */ | 444 | /* these two are global symbols in trampoline.S */ |
445 | extern __u8 trampoline_end[]; | 445 | extern const __u8 trampoline_end[]; |
446 | extern __u8 trampoline_data[]; | 446 | extern const __u8 trampoline_data[]; |
447 | 447 | ||
448 | memcpy((__u8 *)trampoline_base, trampoline_data, | 448 | memcpy((__u8 *)trampoline_base, trampoline_data, |
449 | trampoline_end - trampoline_data); | 449 | trampoline_end - trampoline_data); |
@@ -1037,6 +1037,7 @@ smp_call_function_interrupt(void) | |||
1037 | */ | 1037 | */ |
1038 | irq_enter(); | 1038 | irq_enter(); |
1039 | (*func)(info); | 1039 | (*func)(info); |
1040 | __get_cpu_var(irq_stat).irq_call_count++; | ||
1040 | irq_exit(); | 1041 | irq_exit(); |
1041 | if (wait) { | 1042 | if (wait) { |
1042 | mb(); | 1043 | mb(); |
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c index b1e45457d4ef..13893772cc48 100644 --- a/arch/x86/mm/discontig_32.c +++ b/arch/x86/mm/discontig_32.c | |||
@@ -103,14 +103,14 @@ extern unsigned long highend_pfn, highstart_pfn; | |||
103 | 103 | ||
104 | #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) | 104 | #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) |
105 | 105 | ||
106 | unsigned long node_remap_start_pfn[MAX_NUMNODES]; | 106 | static unsigned long node_remap_start_pfn[MAX_NUMNODES]; |
107 | unsigned long node_remap_size[MAX_NUMNODES]; | 107 | unsigned long node_remap_size[MAX_NUMNODES]; |
108 | unsigned long node_remap_offset[MAX_NUMNODES]; | 108 | static unsigned long node_remap_offset[MAX_NUMNODES]; |
109 | void *node_remap_start_vaddr[MAX_NUMNODES]; | 109 | static void *node_remap_start_vaddr[MAX_NUMNODES]; |
110 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); | 110 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); |
111 | 111 | ||
112 | void *node_remap_end_vaddr[MAX_NUMNODES]; | 112 | static void *node_remap_end_vaddr[MAX_NUMNODES]; |
113 | void *node_remap_alloc_vaddr[MAX_NUMNODES]; | 113 | static void *node_remap_alloc_vaddr[MAX_NUMNODES]; |
114 | static unsigned long kva_start_pfn; | 114 | static unsigned long kva_start_pfn; |
115 | static unsigned long kva_pages; | 115 | static unsigned long kva_pages; |
116 | /* | 116 | /* |
diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c index c686ae20fd6b..6555c3d14371 100644 --- a/arch/x86/mm/fault_32.c +++ b/arch/x86/mm/fault_32.c | |||
@@ -105,7 +105,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs, | |||
105 | LDT and other horrors are only used in user space. */ | 105 | LDT and other horrors are only used in user space. */ |
106 | if (seg & (1<<2)) { | 106 | if (seg & (1<<2)) { |
107 | /* Must lock the LDT while reading it. */ | 107 | /* Must lock the LDT while reading it. */ |
108 | down(¤t->mm->context.sem); | 108 | mutex_lock(¤t->mm->context.lock); |
109 | desc = current->mm->context.ldt; | 109 | desc = current->mm->context.ldt; |
110 | desc = (void *)desc + (seg & ~7); | 110 | desc = (void *)desc + (seg & ~7); |
111 | } else { | 111 | } else { |
@@ -118,7 +118,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs, | |||
118 | base = get_desc_base((unsigned long *)desc); | 118 | base = get_desc_base((unsigned long *)desc); |
119 | 119 | ||
120 | if (seg & (1<<2)) { | 120 | if (seg & (1<<2)) { |
121 | up(¤t->mm->context.sem); | 121 | mutex_unlock(¤t->mm->context.lock); |
122 | } else | 122 | } else |
123 | put_cpu(); | 123 | put_cpu(); |
124 | 124 | ||
@@ -539,23 +539,22 @@ no_context: | |||
539 | printk(KERN_ALERT "BUG: unable to handle kernel paging" | 539 | printk(KERN_ALERT "BUG: unable to handle kernel paging" |
540 | " request"); | 540 | " request"); |
541 | printk(" at virtual address %08lx\n",address); | 541 | printk(" at virtual address %08lx\n",address); |
542 | printk(KERN_ALERT " printing eip:\n"); | 542 | printk(KERN_ALERT "printing eip: %08lx ", regs->eip); |
543 | printk("%08lx\n", regs->eip); | ||
544 | 543 | ||
545 | page = read_cr3(); | 544 | page = read_cr3(); |
546 | page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; | 545 | page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; |
547 | #ifdef CONFIG_X86_PAE | 546 | #ifdef CONFIG_X86_PAE |
548 | printk(KERN_ALERT "*pdpt = %016Lx\n", page); | 547 | printk("*pdpt = %016Lx ", page); |
549 | if ((page >> PAGE_SHIFT) < max_low_pfn | 548 | if ((page >> PAGE_SHIFT) < max_low_pfn |
550 | && page & _PAGE_PRESENT) { | 549 | && page & _PAGE_PRESENT) { |
551 | page &= PAGE_MASK; | 550 | page &= PAGE_MASK; |
552 | page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) | 551 | page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) |
553 | & (PTRS_PER_PMD - 1)]; | 552 | & (PTRS_PER_PMD - 1)]; |
554 | printk(KERN_ALERT "*pde = %016Lx\n", page); | 553 | printk(KERN_ALERT "*pde = %016Lx ", page); |
555 | page &= ~_PAGE_NX; | 554 | page &= ~_PAGE_NX; |
556 | } | 555 | } |
557 | #else | 556 | #else |
558 | printk(KERN_ALERT "*pde = %08lx\n", page); | 557 | printk("*pde = %08lx ", page); |
559 | #endif | 558 | #endif |
560 | 559 | ||
561 | /* | 560 | /* |
@@ -569,8 +568,10 @@ no_context: | |||
569 | page &= PAGE_MASK; | 568 | page &= PAGE_MASK; |
570 | page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) | 569 | page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) |
571 | & (PTRS_PER_PTE - 1)]; | 570 | & (PTRS_PER_PTE - 1)]; |
572 | printk(KERN_ALERT "*pte = %0*Lx\n", sizeof(page)*2, (u64)page); | 571 | printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page); |
573 | } | 572 | } |
573 | |||
574 | printk("\n"); | ||
574 | } | 575 | } |
575 | 576 | ||
576 | tsk->thread.cr2 = address; | 577 | tsk->thread.cr2 = address; |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 33d367a3432e..c7d19471261d 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -85,13 +85,20 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd) | |||
85 | static pte_t * __init one_page_table_init(pmd_t *pmd) | 85 | static pte_t * __init one_page_table_init(pmd_t *pmd) |
86 | { | 86 | { |
87 | if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { | 87 | if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { |
88 | pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | 88 | pte_t *page_table = NULL; |
89 | |||
90 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
91 | page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); | ||
92 | #endif | ||
93 | if (!page_table) | ||
94 | page_table = | ||
95 | (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); | ||
89 | 96 | ||
90 | paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT); | 97 | paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT); |
91 | set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); | 98 | set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); |
92 | BUG_ON(page_table != pte_offset_kernel(pmd, 0)); | 99 | BUG_ON(page_table != pte_offset_kernel(pmd, 0)); |
93 | } | 100 | } |
94 | 101 | ||
95 | return pte_offset_kernel(pmd, 0); | 102 | return pte_offset_kernel(pmd, 0); |
96 | } | 103 | } |
97 | 104 | ||
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 6da235522269..5eec5e56d07f 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -166,7 +166,7 @@ early_node_mem(int nodeid, unsigned long start, unsigned long end, | |||
166 | return __va(mem); | 166 | return __va(mem); |
167 | ptr = __alloc_bootmem_nopanic(size, | 167 | ptr = __alloc_bootmem_nopanic(size, |
168 | SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)); | 168 | SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)); |
169 | if (ptr == 0) { | 169 | if (ptr == NULL) { |
170 | printk(KERN_ERR "Cannot find %lu bytes in node %d\n", | 170 | printk(KERN_ERR "Cannot find %lu bytes in node %d\n", |
171 | size, nodeid); | 171 | size, nodeid); |
172 | return NULL; | 172 | return NULL; |
@@ -261,7 +261,7 @@ void __init numa_init_array(void) | |||
261 | We round robin the existing nodes. */ | 261 | We round robin the existing nodes. */ |
262 | rr = first_node(node_online_map); | 262 | rr = first_node(node_online_map); |
263 | for (i = 0; i < NR_CPUS; i++) { | 263 | for (i = 0; i < NR_CPUS; i++) { |
264 | if (cpu_to_node[i] != NUMA_NO_NODE) | 264 | if (cpu_to_node(i) != NUMA_NO_NODE) |
265 | continue; | 265 | continue; |
266 | numa_set_node(i, rr); | 266 | numa_set_node(i, rr); |
267 | rr = next_node(rr, node_online_map); | 267 | rr = next_node(rr, node_online_map); |
@@ -543,7 +543,7 @@ __cpuinit void numa_add_cpu(int cpu) | |||
543 | void __cpuinit numa_set_node(int cpu, int node) | 543 | void __cpuinit numa_set_node(int cpu, int node) |
544 | { | 544 | { |
545 | cpu_pda(cpu)->nodenumber = node; | 545 | cpu_pda(cpu)->nodenumber = node; |
546 | cpu_to_node[cpu] = node; | 546 | cpu_to_node(cpu) = node; |
547 | } | 547 | } |
548 | 548 | ||
549 | unsigned long __init numa_free_all_bootmem(void) | 549 | unsigned long __init numa_free_all_bootmem(void) |
diff --git a/arch/x86/mm/pageattr_32.c b/arch/x86/mm/pageattr_32.c index 4241a74d16c8..260073c07600 100644 --- a/arch/x86/mm/pageattr_32.c +++ b/arch/x86/mm/pageattr_32.c | |||
@@ -70,10 +70,10 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot, | |||
70 | 70 | ||
71 | static void cache_flush_page(struct page *p) | 71 | static void cache_flush_page(struct page *p) |
72 | { | 72 | { |
73 | unsigned long adr = (unsigned long)page_address(p); | 73 | void *adr = page_address(p); |
74 | int i; | 74 | int i; |
75 | for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) | 75 | for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) |
76 | asm volatile("clflush (%0)" :: "r" (adr + i)); | 76 | clflush(adr+i); |
77 | } | 77 | } |
78 | 78 | ||
79 | static void flush_kernel_map(void *arg) | 79 | static void flush_kernel_map(void *arg) |
diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c index 10b9809ce821..8a4f65bf956e 100644 --- a/arch/x86/mm/pageattr_64.c +++ b/arch/x86/mm/pageattr_64.c | |||
@@ -65,7 +65,7 @@ static void cache_flush_page(void *adr) | |||
65 | { | 65 | { |
66 | int i; | 66 | int i; |
67 | for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) | 67 | for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) |
68 | asm volatile("clflush (%0)" :: "r" (adr + i)); | 68 | clflush(adr+i); |
69 | } | 69 | } |
70 | 70 | ||
71 | static void flush_kernel_map(void *arg) | 71 | static void flush_kernel_map(void *arg) |
@@ -148,6 +148,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
148 | split = split_large_page(address, prot, ref_prot2); | 148 | split = split_large_page(address, prot, ref_prot2); |
149 | if (!split) | 149 | if (!split) |
150 | return -ENOMEM; | 150 | return -ENOMEM; |
151 | pgprot_val(ref_prot2) &= ~_PAGE_NX; | ||
151 | set_pte(kpte, mk_pte(split, ref_prot2)); | 152 | set_pte(kpte, mk_pte(split, ref_prot2)); |
152 | kpte_page = split; | 153 | kpte_page = split; |
153 | } | 154 | } |
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index ef1f6cd3ea66..be61a1d845a4 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <linux/errno.h> | 7 | #include <linux/errno.h> |
8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
9 | #include <linux/nmi.h> | ||
9 | #include <linux/swap.h> | 10 | #include <linux/swap.h> |
10 | #include <linux/smp.h> | 11 | #include <linux/smp.h> |
11 | #include <linux/highmem.h> | 12 | #include <linux/highmem.h> |
@@ -39,6 +40,8 @@ void show_mem(void) | |||
39 | for_each_online_pgdat(pgdat) { | 40 | for_each_online_pgdat(pgdat) { |
40 | pgdat_resize_lock(pgdat, &flags); | 41 | pgdat_resize_lock(pgdat, &flags); |
41 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { | 42 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { |
43 | if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) | ||
44 | touch_nmi_watchdog(); | ||
42 | page = pgdat_page_nr(pgdat, i); | 45 | page = pgdat_page_nr(pgdat, i); |
43 | total++; | 46 | total++; |
44 | if (PageHighMem(page)) | 47 | if (PageHighMem(page)) |
@@ -97,8 +100,7 @@ static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | |||
97 | } | 100 | } |
98 | pte = pte_offset_kernel(pmd, vaddr); | 101 | pte = pte_offset_kernel(pmd, vaddr); |
99 | if (pgprot_val(flags)) | 102 | if (pgprot_val(flags)) |
100 | /* <pfn,flags> stored as-is, to permit clearing entries */ | 103 | set_pte_present(&init_mm, vaddr, pte, pfn_pte(pfn, flags)); |
101 | set_pte(pte, pfn_pte(pfn, flags)); | ||
102 | else | 104 | else |
103 | pte_clear(&init_mm, vaddr, pte); | 105 | pte_clear(&init_mm, vaddr, pte); |
104 | 106 | ||
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index acdf03e19146..56089ccc3949 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -431,9 +431,9 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) | |||
431 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); | 431 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); |
432 | 432 | ||
433 | for (i = 0; i < NR_CPUS; i++) { | 433 | for (i = 0; i < NR_CPUS; i++) { |
434 | if (cpu_to_node[i] == NUMA_NO_NODE) | 434 | if (cpu_to_node(i) == NUMA_NO_NODE) |
435 | continue; | 435 | continue; |
436 | if (!node_isset(cpu_to_node[i], node_possible_map)) | 436 | if (!node_isset(cpu_to_node(i), node_possible_map)) |
437 | numa_set_node(i, NUMA_NO_NODE); | 437 | numa_set_node(i, NUMA_NO_NODE); |
438 | } | 438 | } |
439 | numa_init_array(); | 439 | numa_init_array(); |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 11b7a51566a8..2d0eeac7251f 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -269,7 +269,6 @@ static void nmi_cpu_shutdown(void * dummy) | |||
269 | apic_write(APIC_LVTPC, saved_lvtpc[cpu]); | 269 | apic_write(APIC_LVTPC, saved_lvtpc[cpu]); |
270 | apic_write(APIC_LVTERR, v); | 270 | apic_write(APIC_LVTERR, v); |
271 | nmi_restore_registers(msrs); | 271 | nmi_restore_registers(msrs); |
272 | model->shutdown(msrs); | ||
273 | } | 272 | } |
274 | 273 | ||
275 | 274 | ||
@@ -278,6 +277,7 @@ static void nmi_shutdown(void) | |||
278 | nmi_enabled = 0; | 277 | nmi_enabled = 0; |
279 | on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); | 278 | on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); |
280 | unregister_die_notifier(&profile_exceptions_nb); | 279 | unregister_die_notifier(&profile_exceptions_nb); |
280 | model->shutdown(cpu_msrs); | ||
281 | free_msrs(); | 281 | free_msrs(); |
282 | } | 282 | } |
283 | 283 | ||
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 2d71bbc411d2..f4386990b150 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
@@ -289,6 +289,22 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = { | |||
289 | DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL685c G1"), | 289 | DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL685c G1"), |
290 | }, | 290 | }, |
291 | }, | 291 | }, |
292 | { | ||
293 | .callback = set_bf_sort, | ||
294 | .ident = "HP ProLiant DL385 G2", | ||
295 | .matches = { | ||
296 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), | ||
297 | DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"), | ||
298 | }, | ||
299 | }, | ||
300 | { | ||
301 | .callback = set_bf_sort, | ||
302 | .ident = "HP ProLiant DL585 G2", | ||
303 | .matches = { | ||
304 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), | ||
305 | DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"), | ||
306 | }, | ||
307 | }, | ||
292 | #ifdef __i386__ | 308 | #ifdef __i386__ |
293 | { | 309 | { |
294 | .callback = assign_all_busses, | 310 | .callback = assign_all_busses, |
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index dcd6bb9e0bb3..7a2ba4583939 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile | |||
@@ -13,7 +13,7 @@ vobjs := $(foreach F,$(vobjs-y),$(obj)/$F) | |||
13 | 13 | ||
14 | $(obj)/vdso.o: $(obj)/vdso.so | 14 | $(obj)/vdso.o: $(obj)/vdso.so |
15 | 15 | ||
16 | targets += vdso.so vdso.lds $(vobjs-y) vdso-syms.o | 16 | targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y) vdso-syms.o |
17 | 17 | ||
18 | # The DSO images are built using a special linker script. | 18 | # The DSO images are built using a special linker script. |
19 | quiet_cmd_syscall = SYSCALL $@ | 19 | quiet_cmd_syscall = SYSCALL $@ |
@@ -26,12 +26,19 @@ vdso-flags = -fPIC -shared -Wl,-soname=linux-vdso.so.1 \ | |||
26 | $(call ld-option, -Wl$(comma)--hash-style=sysv) \ | 26 | $(call ld-option, -Wl$(comma)--hash-style=sysv) \ |
27 | -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 | 27 | -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 |
28 | SYSCFLAGS_vdso.so = $(vdso-flags) | 28 | SYSCFLAGS_vdso.so = $(vdso-flags) |
29 | SYSCFLAGS_vdso.so.dbg = $(vdso-flags) | ||
29 | 30 | ||
30 | $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so | 31 | $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so |
31 | 32 | ||
32 | $(obj)/vdso.so: $(src)/vdso.lds $(vobjs) FORCE | 33 | $(obj)/vdso.so: $(src)/vdso.lds $(vobjs) FORCE |
34 | |||
35 | $(obj)/vdso.so.dbg: $(src)/vdso.lds $(vobjs) FORCE | ||
33 | $(call if_changed,syscall) | 36 | $(call if_changed,syscall) |
34 | 37 | ||
38 | $(obj)/%.so: OBJCOPYFLAGS := -S | ||
39 | $(obj)/%.so: $(obj)/%.so.dbg FORCE | ||
40 | $(call if_changed,objcopy) | ||
41 | |||
35 | CFL := $(PROFILING) -mcmodel=small -fPIC -g0 -O2 -fasynchronous-unwind-tables -m64 | 42 | CFL := $(PROFILING) -mcmodel=small -fPIC -g0 -O2 -fasynchronous-unwind-tables -m64 |
36 | 43 | ||
37 | $(obj)/vclock_gettime.o: KBUILD_CFLAGS = $(CFL) | 44 | $(obj)/vclock_gettime.o: KBUILD_CFLAGS = $(CFL) |
@@ -47,3 +54,11 @@ $(obj)/built-in.o: ld_flags += -R $(obj)/vdso-syms.o | |||
47 | SYSCFLAGS_vdso-syms.o = -r -d | 54 | SYSCFLAGS_vdso-syms.o = -r -d |
48 | $(obj)/vdso-syms.o: $(src)/vdso.lds $(vobjs) FORCE | 55 | $(obj)/vdso-syms.o: $(src)/vdso.lds $(vobjs) FORCE |
49 | $(call if_changed,syscall) | 56 | $(call if_changed,syscall) |
57 | |||
58 | quiet_cmd_vdso_install = INSTALL $@ | ||
59 | cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ | ||
60 | vdso.so: | ||
61 | @mkdir -p $(MODLIB)/vdso | ||
62 | $(call cmd,vdso_install) | ||
63 | |||
64 | vdso_install: vdso.so | ||
diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S index b9a60e665d08..667d3245d972 100644 --- a/arch/x86/vdso/vdso.lds.S +++ b/arch/x86/vdso/vdso.lds.S | |||
@@ -26,13 +26,16 @@ SECTIONS | |||
26 | is insufficient, ld -shared will barf. Just increase it here. */ | 26 | is insufficient, ld -shared will barf. Just increase it here. */ |
27 | . = VDSO_PRELINK + VDSO_TEXT_OFFSET; | 27 | . = VDSO_PRELINK + VDSO_TEXT_OFFSET; |
28 | 28 | ||
29 | .text : { *(.text) } :text | 29 | .text : { *(.text*) } :text |
30 | .text.ptr : { *(.text.ptr) } :text | 30 | .rodata : { *(.rodata*) } :text |
31 | . = VDSO_PRELINK + 0x900; | 31 | .data : { |
32 | .data : { *(.data) } :text | 32 | *(.data*) |
33 | .bss : { *(.bss) } :text | 33 | *(.sdata*) |
34 | *(.bss*) | ||
35 | *(.dynbss*) | ||
36 | } :text | ||
34 | 37 | ||
35 | .altinstructions : { *(.altinstructions) } :text | 38 | .altinstructions : { *(.altinstructions) } :text |
36 | .altinstr_replacement : { *(.altinstr_replacement) } :text | 39 | .altinstr_replacement : { *(.altinstr_replacement) } :text |
37 | 40 | ||
38 | .note : { *(.note.*) } :text :note | 41 | .note : { *(.note.*) } :text :note |
@@ -42,7 +45,6 @@ SECTIONS | |||
42 | .useless : { | 45 | .useless : { |
43 | *(.got.plt) *(.got) | 46 | *(.got.plt) *(.got) |
44 | *(.gnu.linkonce.d.*) | 47 | *(.gnu.linkonce.d.*) |
45 | *(.dynbss) | ||
46 | *(.gnu.linkonce.b.*) | 48 | *(.gnu.linkonce.b.*) |
47 | } :text | 49 | } :text |
48 | } | 50 | } |
diff --git a/arch/x86/vdso/vvar.c b/arch/x86/vdso/vvar.c index 6fc22219a472..1b7e703684f9 100644 --- a/arch/x86/vdso/vvar.c +++ b/arch/x86/vdso/vvar.c | |||
@@ -8,5 +8,5 @@ | |||
8 | #include <asm/timex.h> | 8 | #include <asm/timex.h> |
9 | #include <asm/vgtod.h> | 9 | #include <asm/vgtod.h> |
10 | 10 | ||
11 | #define VEXTERN(x) typeof (__ ## x) *vdso_ ## x = (void *)VMAGIC; | 11 | #define VEXTERN(x) typeof (__ ## x) *const vdso_ ## x = (void *)VMAGIC; |
12 | #include "vextern.h" | 12 | #include "vextern.h" |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index d53bf9d8a72d..c1b131bcdcbe 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -356,6 +356,7 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) | |||
356 | */ | 356 | */ |
357 | irq_enter(); | 357 | irq_enter(); |
358 | (*func)(info); | 358 | (*func)(info); |
359 | __get_cpu_var(irq_stat).irq_call_count++; | ||
359 | irq_exit(); | 360 | irq_exit(); |
360 | 361 | ||
361 | if (wait) { | 362 | if (wait) { |
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index d681be88ae5d..43fafe9e9c08 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig | |||
@@ -175,14 +175,12 @@ config MK8 | |||
175 | config MPSC | 175 | config MPSC |
176 | bool "Intel P4 / older Netburst based Xeon" | 176 | bool "Intel P4 / older Netburst based Xeon" |
177 | help | 177 | help |
178 | Optimize for Intel Pentium 4 and older Nocona/Dempsey Xeon CPUs | 178 | Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey |
179 | with Intel Extended Memory 64 Technology(EM64T). For details see | 179 | Xeon CPUs with Intel 64bit which is compatible with x86-64. |
180 | <http://www.intel.com/technology/64bitextensions/>. | ||
181 | Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the | 180 | Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the |
182 | Netburst core and shouldn't use this option. You can distinguish them | 181 | Netburst core and shouldn't use this option. You can distinguish them |
183 | using the cpu family field | 182 | using the cpu family field |
184 | in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one | 183 | in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one. |
185 | (this rule only applies to systems that support EM64T) | ||
186 | 184 | ||
187 | config MCORE2 | 185 | config MCORE2 |
188 | bool "Intel Core2 / newer Xeon" | 186 | bool "Intel Core2 / newer Xeon" |
@@ -190,8 +188,7 @@ config MCORE2 | |||
190 | Optimize for Intel Core2 and newer Xeons (51xx) | 188 | Optimize for Intel Core2 and newer Xeons (51xx) |
191 | You can distinguish the newer Xeons from the older ones using | 189 | You can distinguish the newer Xeons from the older ones using |
192 | the cpu family field in /proc/cpuinfo. 15 is an older Xeon | 190 | the cpu family field in /proc/cpuinfo. 15 is an older Xeon |
193 | (use CONFIG_MPSC then), 6 is a newer one. This rule only | 191 | (use CONFIG_MPSC then), 6 is a newer one. |
194 | applies to CPUs that support EM64T. | ||
195 | 192 | ||
196 | config GENERIC_CPU | 193 | config GENERIC_CPU |
197 | bool "Generic-x86-64" | 194 | bool "Generic-x86-64" |
@@ -476,8 +473,9 @@ config HPET_TIMER | |||
476 | <http://www.intel.com/hardwaredesign/hpetspec.htm>. | 473 | <http://www.intel.com/hardwaredesign/hpetspec.htm>. |
477 | 474 | ||
478 | config HPET_EMULATE_RTC | 475 | config HPET_EMULATE_RTC |
479 | bool "Provide RTC interrupt" | 476 | bool |
480 | depends on HPET_TIMER && RTC=y | 477 | depends on HPET_TIMER && RTC=y |
478 | default y | ||
481 | 479 | ||
482 | # Mark as embedded because too many people got it wrong. | 480 | # Mark as embedded because too many people got it wrong. |
483 | # The code disables itself when not needed. | 481 | # The code disables itself when not needed. |
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile index 9daa32d1d2a1..03e1ede27b85 100644 --- a/arch/x86_64/Makefile +++ b/arch/x86_64/Makefile | |||
@@ -110,9 +110,15 @@ bzdisk: vmlinux | |||
110 | fdimage fdimage144 fdimage288 isoimage: vmlinux | 110 | fdimage fdimage144 fdimage288 isoimage: vmlinux |
111 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@ | 111 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@ |
112 | 112 | ||
113 | install: | 113 | install: vdso_install |
114 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@ | 114 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@ |
115 | 115 | ||
116 | vdso_install: | ||
117 | ifeq ($(CONFIG_IA32_EMULATION),y) | ||
118 | $(Q)$(MAKE) $(build)=arch/x86/ia32 $@ | ||
119 | endif | ||
120 | $(Q)$(MAKE) $(build)=arch/x86/vdso $@ | ||
121 | |||
116 | archclean: | 122 | archclean: |
117 | $(Q)rm -rf $(objtree)/arch/x86_64/boot | 123 | $(Q)rm -rf $(objtree)/arch/x86_64/boot |
118 | $(Q)$(MAKE) $(clean)=$(boot) | 124 | $(Q)$(MAKE) $(clean)=$(boot) |