aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/boot/compressed/Makefile6
-rw-r--r--arch/x86/boot/compressed/misc.c4
-rw-r--r--arch/x86/crypto/Makefile2
-rw-r--r--arch/x86/crypto/crct10dif-pcl-asm_64.S643
-rw-r--r--arch/x86/crypto/crct10dif-pclmul_glue.c151
-rw-r--r--arch/x86/ia32/ia32_aout.c2
-rw-r--r--arch/x86/include/asm/cpu.h2
-rw-r--r--arch/x86/include/asm/emergency-restart.h12
-rw-r--r--arch/x86/include/asm/io.h7
-rw-r--r--arch/x86/include/asm/mc146818rtc.h4
-rw-r--r--arch/x86/include/asm/mce.h7
-rw-r--r--arch/x86/include/asm/microcode.h4
-rw-r--r--arch/x86/include/asm/microcode_amd.h4
-rw-r--r--arch/x86/include/asm/microcode_intel.h4
-rw-r--r--arch/x86/include/asm/mmconfig.h4
-rw-r--r--arch/x86/include/asm/mpspec.h2
-rw-r--r--arch/x86/include/asm/mrst-vrtc.h4
-rw-r--r--arch/x86/include/asm/mtrr.h10
-rw-r--r--arch/x86/include/asm/numa.h6
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/prom.h2
-rw-r--r--arch/x86/include/asm/smp.h2
-rw-r--r--arch/x86/include/asm/x86_init.h6
-rw-r--r--arch/x86/kernel/acpi/boot.c6
-rw-r--r--arch/x86/kernel/acpi/sleep.c18
-rw-r--r--arch/x86/kernel/apic/apic.c30
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c2
-rw-r--r--arch/x86/kernel/apic/es7000_32.c2
-rw-r--r--arch/x86/kernel/apic/numaq_32.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c16
-rw-r--r--arch/x86/kernel/cpu/amd.c33
-rw-r--r--arch/x86/kernel/cpu/centaur.c26
-rw-r--r--arch/x86/kernel/cpu/common.c64
-rw-r--r--arch/x86/kernel/cpu/cyrix.c40
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c30
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c55
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c23
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c14
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c72
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c71
-rw-r--r--arch/x86/kernel/cpu/perf_event.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_iommu.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c31
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c20
-rw-r--r--arch/x86/kernel/cpu/rdrand.c2
-rw-r--r--arch/x86/kernel/cpu/scattered.c4
-rw-r--r--arch/x86/kernel/cpu/topology.c2
-rw-r--r--arch/x86/kernel/cpu/transmeta.c6
-rw-r--r--arch/x86/kernel/cpu/umc.c2
-rw-r--r--arch/x86/kernel/cpu/vmware.c2
-rw-r--r--arch/x86/kernel/cpuid.c7
-rw-r--r--arch/x86/kernel/devicetree.c6
-rw-r--r--arch/x86/kernel/head_32.S1
-rw-r--r--arch/x86/kernel/head_64.S15
-rw-r--r--arch/x86/kernel/hw_breakpoint.c3
-rw-r--r--arch/x86/kernel/i387.c10
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/kvm.c10
-rw-r--r--arch/x86/kernel/kvmclock.c11
-rw-r--r--arch/x86/kernel/microcode_amd_early.c8
-rw-r--r--arch/x86/kernel/microcode_core.c2
-rw-r--r--arch/x86/kernel/microcode_core_early.c6
-rw-r--r--arch/x86/kernel/microcode_intel_early.c26
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c12
-rw-r--r--arch/x86/kernel/msr.c6
-rw-r--r--arch/x86/kernel/nmi.c7
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/ptrace.c204
-rw-r--r--arch/x86/kernel/reboot.c117
-rw-r--r--arch/x86/kernel/rtc.c17
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/smp.c29
-rw-r--r--arch/x86/kernel/smpboot.c28
-rw-r--r--arch/x86/kernel/tboot.c6
-rw-r--r--arch/x86/kernel/tracepoint.c6
-rw-r--r--arch/x86/kernel/traps.c12
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kernel/tsc_sync.c18
-rw-r--r--arch/x86/kernel/vsyscall_64.c6
-rw-r--r--arch/x86/kernel/x86_init.c4
-rw-r--r--arch/x86/kernel/xsave.c4
-rw-r--r--arch/x86/kvm/mmu.c7
-rw-r--r--arch/x86/kvm/vmx.c11
-rw-r--r--arch/x86/lguest/boot.c4
-rw-r--r--arch/x86/mm/mmap.c2
-rw-r--r--arch/x86/mm/mmio-mod.c4
-rw-r--r--arch/x86/mm/numa.c12
-rw-r--r--arch/x86/mm/numa_emulation.c12
-rw-r--r--arch/x86/mm/pgtable.c4
-rw-r--r--arch/x86/mm/setup_nx.c4
-rw-r--r--arch/x86/net/bpf_jit_comp.c61
-rw-r--r--arch/x86/pci/amd_bus.c8
-rw-r--r--arch/x86/platform/ce4100/ce4100.c3
-rw-r--r--arch/x86/platform/efi/efi.c17
-rw-r--r--arch/x86/platform/mrst/mrst.c4
-rw-r--r--arch/x86/platform/mrst/vrtc.c11
-rw-r--r--arch/x86/um/signal.c1
-rw-r--r--arch/x86/xen/enlighten.c6
-rw-r--r--arch/x86/xen/setup.c6
-rw-r--r--arch/x86/xen/smp.c12
-rw-r--r--arch/x86/xen/spinlock.c2
-rw-r--r--arch/x86/xen/time.c58
-rw-r--r--arch/x86/xen/xen-ops.h2
107 files changed, 789 insertions, 1529 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 265c672a2f40..b32ebf92b0ce 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -65,6 +65,7 @@ config X86
65 select HAVE_KERNEL_LZMA 65 select HAVE_KERNEL_LZMA
66 select HAVE_KERNEL_XZ 66 select HAVE_KERNEL_XZ
67 select HAVE_KERNEL_LZO 67 select HAVE_KERNEL_LZO
68 select HAVE_KERNEL_LZ4
68 select HAVE_HW_BREAKPOINT 69 select HAVE_HW_BREAKPOINT
69 select HAVE_MIXED_BREAKPOINTS_REGS 70 select HAVE_MIXED_BREAKPOINTS_REGS
70 select PERF_EVENTS 71 select PERF_EVENTS
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 5ef205c5f37b..dcd90df10ab4 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -4,7 +4,8 @@
4# create a compressed vmlinux image from the original vmlinux 4# create a compressed vmlinux image from the original vmlinux
5# 5#
6 6
7targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo 7targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
8 vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4
8 9
9KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 10KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
10KBUILD_CFLAGS += -fno-strict-aliasing -fPIC 11KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
@@ -63,12 +64,15 @@ $(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
63 $(call if_changed,xzkern) 64 $(call if_changed,xzkern)
64$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE 65$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
65 $(call if_changed,lzo) 66 $(call if_changed,lzo)
67$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE
68 $(call if_changed,lz4)
66 69
67suffix-$(CONFIG_KERNEL_GZIP) := gz 70suffix-$(CONFIG_KERNEL_GZIP) := gz
68suffix-$(CONFIG_KERNEL_BZIP2) := bz2 71suffix-$(CONFIG_KERNEL_BZIP2) := bz2
69suffix-$(CONFIG_KERNEL_LZMA) := lzma 72suffix-$(CONFIG_KERNEL_LZMA) := lzma
70suffix-$(CONFIG_KERNEL_XZ) := xz 73suffix-$(CONFIG_KERNEL_XZ) := xz
71suffix-$(CONFIG_KERNEL_LZO) := lzo 74suffix-$(CONFIG_KERNEL_LZO) := lzo
75suffix-$(CONFIG_KERNEL_LZ4) := lz4
72 76
73quiet_cmd_mkpiggy = MKPIGGY $@ 77quiet_cmd_mkpiggy = MKPIGGY $@
74 cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false ) 78 cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false )
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 7cb56c6ca351..0319c88290a5 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -145,6 +145,10 @@ static int lines, cols;
145#include "../../../../lib/decompress_unlzo.c" 145#include "../../../../lib/decompress_unlzo.c"
146#endif 146#endif
147 147
148#ifdef CONFIG_KERNEL_LZ4
149#include "../../../../lib/decompress_unlz4.c"
150#endif
151
148static void scroll(void) 152static void scroll(void)
149{ 153{
150 int i; 154 int i;
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 7d6ba9db1be9..6c63c358a7e6 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -27,7 +27,6 @@ obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
27obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o 27obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
28obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o 28obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
29obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o 29obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
30obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o
31 30
32# These modules require assembler to support AVX. 31# These modules require assembler to support AVX.
33ifeq ($(avx_supported),yes) 32ifeq ($(avx_supported),yes)
@@ -82,4 +81,3 @@ crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o
82crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o 81crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o
83sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o 82sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o
84sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o 83sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o
85crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o
diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S
deleted file mode 100644
index 35e97569d05f..000000000000
--- a/arch/x86/crypto/crct10dif-pcl-asm_64.S
+++ /dev/null
@@ -1,643 +0,0 @@
1########################################################################
2# Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
3#
4# Copyright (c) 2013, Intel Corporation
5#
6# Authors:
7# Erdinc Ozturk <erdinc.ozturk@intel.com>
8# Vinodh Gopal <vinodh.gopal@intel.com>
9# James Guilford <james.guilford@intel.com>
10# Tim Chen <tim.c.chen@linux.intel.com>
11#
12# This software is available to you under a choice of one of two
13# licenses. You may choose to be licensed under the terms of the GNU
14# General Public License (GPL) Version 2, available from the file
15# COPYING in the main directory of this source tree, or the
16# OpenIB.org BSD license below:
17#
18# Redistribution and use in source and binary forms, with or without
19# modification, are permitted provided that the following conditions are
20# met:
21#
22# * Redistributions of source code must retain the above copyright
23# notice, this list of conditions and the following disclaimer.
24#
25# * Redistributions in binary form must reproduce the above copyright
26# notice, this list of conditions and the following disclaimer in the
27# documentation and/or other materials provided with the
28# distribution.
29#
30# * Neither the name of the Intel Corporation nor the names of its
31# contributors may be used to endorse or promote products derived from
32# this software without specific prior written permission.
33#
34#
35# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
36# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
39# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
40# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
41# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
42# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
43# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
44# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
45# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46########################################################################
47# Function API:
48# UINT16 crc_t10dif_pcl(
49# UINT16 init_crc, //initial CRC value, 16 bits
50# const unsigned char *buf, //buffer pointer to calculate CRC on
51# UINT64 len //buffer length in bytes (64-bit data)
52# );
53#
54# Reference paper titled "Fast CRC Computation for Generic
55# Polynomials Using PCLMULQDQ Instruction"
56# URL: http://www.intel.com/content/dam/www/public/us/en/documents
57# /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
58#
59#
60
61#include <linux/linkage.h>
62
63.text
64
65#define arg1 %rdi
66#define arg2 %rsi
67#define arg3 %rdx
68
69#define arg1_low32 %edi
70
71ENTRY(crc_t10dif_pcl)
72.align 16
73
74 # adjust the 16-bit initial_crc value, scale it to 32 bits
75 shl $16, arg1_low32
76
77 # Allocate Stack Space
78 mov %rsp, %rcx
79 sub $16*2, %rsp
80 # align stack to 16 byte boundary
81 and $~(0x10 - 1), %rsp
82
83 # check if smaller than 256
84 cmp $256, arg3
85
86 # for sizes less than 128, we can't fold 64B at a time...
87 jl _less_than_128
88
89
90 # load the initial crc value
91 movd arg1_low32, %xmm10 # initial crc
92
93 # crc value does not need to be byte-reflected, but it needs
94 # to be moved to the high part of the register.
95 # because data will be byte-reflected and will align with
96 # initial crc at correct place.
97 pslldq $12, %xmm10
98
99 movdqa SHUF_MASK(%rip), %xmm11
100 # receive the initial 64B data, xor the initial crc value
101 movdqu 16*0(arg2), %xmm0
102 movdqu 16*1(arg2), %xmm1
103 movdqu 16*2(arg2), %xmm2
104 movdqu 16*3(arg2), %xmm3
105 movdqu 16*4(arg2), %xmm4
106 movdqu 16*5(arg2), %xmm5
107 movdqu 16*6(arg2), %xmm6
108 movdqu 16*7(arg2), %xmm7
109
110 pshufb %xmm11, %xmm0
111 # XOR the initial_crc value
112 pxor %xmm10, %xmm0
113 pshufb %xmm11, %xmm1
114 pshufb %xmm11, %xmm2
115 pshufb %xmm11, %xmm3
116 pshufb %xmm11, %xmm4
117 pshufb %xmm11, %xmm5
118 pshufb %xmm11, %xmm6
119 pshufb %xmm11, %xmm7
120
121 movdqa rk3(%rip), %xmm10 #xmm10 has rk3 and rk4
122 #imm value of pclmulqdq instruction
123 #will determine which constant to use
124
125 #################################################################
126 # we subtract 256 instead of 128 to save one instruction from the loop
127 sub $256, arg3
128
129 # at this section of the code, there is 64*x+y (0<=y<64) bytes of
130 # buffer. The _fold_64_B_loop will fold 64B at a time
131 # until we have 64+y Bytes of buffer
132
133
134 # fold 64B at a time. This section of the code folds 4 xmm
135 # registers in parallel
136_fold_64_B_loop:
137
138 # update the buffer pointer
139 add $128, arg2 # buf += 64#
140
141 movdqu 16*0(arg2), %xmm9
142 movdqu 16*1(arg2), %xmm12
143 pshufb %xmm11, %xmm9
144 pshufb %xmm11, %xmm12
145 movdqa %xmm0, %xmm8
146 movdqa %xmm1, %xmm13
147 pclmulqdq $0x0 , %xmm10, %xmm0
148 pclmulqdq $0x11, %xmm10, %xmm8
149 pclmulqdq $0x0 , %xmm10, %xmm1
150 pclmulqdq $0x11, %xmm10, %xmm13
151 pxor %xmm9 , %xmm0
152 xorps %xmm8 , %xmm0
153 pxor %xmm12, %xmm1
154 xorps %xmm13, %xmm1
155
156 movdqu 16*2(arg2), %xmm9
157 movdqu 16*3(arg2), %xmm12
158 pshufb %xmm11, %xmm9
159 pshufb %xmm11, %xmm12
160 movdqa %xmm2, %xmm8
161 movdqa %xmm3, %xmm13
162 pclmulqdq $0x0, %xmm10, %xmm2
163 pclmulqdq $0x11, %xmm10, %xmm8
164 pclmulqdq $0x0, %xmm10, %xmm3
165 pclmulqdq $0x11, %xmm10, %xmm13
166 pxor %xmm9 , %xmm2
167 xorps %xmm8 , %xmm2
168 pxor %xmm12, %xmm3
169 xorps %xmm13, %xmm3
170
171 movdqu 16*4(arg2), %xmm9
172 movdqu 16*5(arg2), %xmm12
173 pshufb %xmm11, %xmm9
174 pshufb %xmm11, %xmm12
175 movdqa %xmm4, %xmm8
176 movdqa %xmm5, %xmm13
177 pclmulqdq $0x0, %xmm10, %xmm4
178 pclmulqdq $0x11, %xmm10, %xmm8
179 pclmulqdq $0x0, %xmm10, %xmm5
180 pclmulqdq $0x11, %xmm10, %xmm13
181 pxor %xmm9 , %xmm4
182 xorps %xmm8 , %xmm4
183 pxor %xmm12, %xmm5
184 xorps %xmm13, %xmm5
185
186 movdqu 16*6(arg2), %xmm9
187 movdqu 16*7(arg2), %xmm12
188 pshufb %xmm11, %xmm9
189 pshufb %xmm11, %xmm12
190 movdqa %xmm6 , %xmm8
191 movdqa %xmm7 , %xmm13
192 pclmulqdq $0x0 , %xmm10, %xmm6
193 pclmulqdq $0x11, %xmm10, %xmm8
194 pclmulqdq $0x0 , %xmm10, %xmm7
195 pclmulqdq $0x11, %xmm10, %xmm13
196 pxor %xmm9 , %xmm6
197 xorps %xmm8 , %xmm6
198 pxor %xmm12, %xmm7
199 xorps %xmm13, %xmm7
200
201 sub $128, arg3
202
203 # check if there is another 64B in the buffer to be able to fold
204 jge _fold_64_B_loop
205 ##################################################################
206
207
208 add $128, arg2
209 # at this point, the buffer pointer is pointing at the last y Bytes
210 # of the buffer the 64B of folded data is in 4 of the xmm
211 # registers: xmm0, xmm1, xmm2, xmm3
212
213
214 # fold the 8 xmm registers to 1 xmm register with different constants
215
216 movdqa rk9(%rip), %xmm10
217 movdqa %xmm0, %xmm8
218 pclmulqdq $0x11, %xmm10, %xmm0
219 pclmulqdq $0x0 , %xmm10, %xmm8
220 pxor %xmm8, %xmm7
221 xorps %xmm0, %xmm7
222
223 movdqa rk11(%rip), %xmm10
224 movdqa %xmm1, %xmm8
225 pclmulqdq $0x11, %xmm10, %xmm1
226 pclmulqdq $0x0 , %xmm10, %xmm8
227 pxor %xmm8, %xmm7
228 xorps %xmm1, %xmm7
229
230 movdqa rk13(%rip), %xmm10
231 movdqa %xmm2, %xmm8
232 pclmulqdq $0x11, %xmm10, %xmm2
233 pclmulqdq $0x0 , %xmm10, %xmm8
234 pxor %xmm8, %xmm7
235 pxor %xmm2, %xmm7
236
237 movdqa rk15(%rip), %xmm10
238 movdqa %xmm3, %xmm8
239 pclmulqdq $0x11, %xmm10, %xmm3
240 pclmulqdq $0x0 , %xmm10, %xmm8
241 pxor %xmm8, %xmm7
242 xorps %xmm3, %xmm7
243
244 movdqa rk17(%rip), %xmm10
245 movdqa %xmm4, %xmm8
246 pclmulqdq $0x11, %xmm10, %xmm4
247 pclmulqdq $0x0 , %xmm10, %xmm8
248 pxor %xmm8, %xmm7
249 pxor %xmm4, %xmm7
250
251 movdqa rk19(%rip), %xmm10
252 movdqa %xmm5, %xmm8
253 pclmulqdq $0x11, %xmm10, %xmm5
254 pclmulqdq $0x0 , %xmm10, %xmm8
255 pxor %xmm8, %xmm7
256 xorps %xmm5, %xmm7
257
258 movdqa rk1(%rip), %xmm10 #xmm10 has rk1 and rk2
259 #imm value of pclmulqdq instruction
260 #will determine which constant to use
261 movdqa %xmm6, %xmm8
262 pclmulqdq $0x11, %xmm10, %xmm6
263 pclmulqdq $0x0 , %xmm10, %xmm8
264 pxor %xmm8, %xmm7
265 pxor %xmm6, %xmm7
266
267
268 # instead of 64, we add 48 to the loop counter to save 1 instruction
269 # from the loop instead of a cmp instruction, we use the negative
270 # flag with the jl instruction
271 add $128-16, arg3
272 jl _final_reduction_for_128
273
274 # now we have 16+y bytes left to reduce. 16 Bytes is in register xmm7
275 # and the rest is in memory. We can fold 16 bytes at a time if y>=16
276 # continue folding 16B at a time
277
278_16B_reduction_loop:
279 movdqa %xmm7, %xmm8
280 pclmulqdq $0x11, %xmm10, %xmm7
281 pclmulqdq $0x0 , %xmm10, %xmm8
282 pxor %xmm8, %xmm7
283 movdqu (arg2), %xmm0
284 pshufb %xmm11, %xmm0
285 pxor %xmm0 , %xmm7
286 add $16, arg2
287 sub $16, arg3
288 # instead of a cmp instruction, we utilize the flags with the
289 # jge instruction equivalent of: cmp arg3, 16-16
290 # check if there is any more 16B in the buffer to be able to fold
291 jge _16B_reduction_loop
292
293 #now we have 16+z bytes left to reduce, where 0<= z < 16.
294 #first, we reduce the data in the xmm7 register
295
296
297_final_reduction_for_128:
298 # check if any more data to fold. If not, compute the CRC of
299 # the final 128 bits
300 add $16, arg3
301 je _128_done
302
303 # here we are getting data that is less than 16 bytes.
304 # since we know that there was data before the pointer, we can
305 # offset the input pointer before the actual point, to receive
306 # exactly 16 bytes. after that the registers need to be adjusted.
307_get_last_two_xmms:
308 movdqa %xmm7, %xmm2
309
310 movdqu -16(arg2, arg3), %xmm1
311 pshufb %xmm11, %xmm1
312
313 # get rid of the extra data that was loaded before
314 # load the shift constant
315 lea pshufb_shf_table+16(%rip), %rax
316 sub arg3, %rax
317 movdqu (%rax), %xmm0
318
319 # shift xmm2 to the left by arg3 bytes
320 pshufb %xmm0, %xmm2
321
322 # shift xmm7 to the right by 16-arg3 bytes
323 pxor mask1(%rip), %xmm0
324 pshufb %xmm0, %xmm7
325 pblendvb %xmm2, %xmm1 #xmm0 is implicit
326
327 # fold 16 Bytes
328 movdqa %xmm1, %xmm2
329 movdqa %xmm7, %xmm8
330 pclmulqdq $0x11, %xmm10, %xmm7
331 pclmulqdq $0x0 , %xmm10, %xmm8
332 pxor %xmm8, %xmm7
333 pxor %xmm2, %xmm7
334
335_128_done:
336 # compute crc of a 128-bit value
337 movdqa rk5(%rip), %xmm10 # rk5 and rk6 in xmm10
338 movdqa %xmm7, %xmm0
339
340 #64b fold
341 pclmulqdq $0x1, %xmm10, %xmm7
342 pslldq $8 , %xmm0
343 pxor %xmm0, %xmm7
344
345 #32b fold
346 movdqa %xmm7, %xmm0
347
348 pand mask2(%rip), %xmm0
349
350 psrldq $12, %xmm7
351 pclmulqdq $0x10, %xmm10, %xmm7
352 pxor %xmm0, %xmm7
353
354 #barrett reduction
355_barrett:
356 movdqa rk7(%rip), %xmm10 # rk7 and rk8 in xmm10
357 movdqa %xmm7, %xmm0
358 pclmulqdq $0x01, %xmm10, %xmm7
359 pslldq $4, %xmm7
360 pclmulqdq $0x11, %xmm10, %xmm7
361
362 pslldq $4, %xmm7
363 pxor %xmm0, %xmm7
364 pextrd $1, %xmm7, %eax
365
366_cleanup:
367 # scale the result back to 16 bits
368 shr $16, %eax
369 mov %rcx, %rsp
370 ret
371
372########################################################################
373
374.align 16
375_less_than_128:
376
377 # check if there is enough buffer to be able to fold 16B at a time
378 cmp $32, arg3
379 jl _less_than_32
380 movdqa SHUF_MASK(%rip), %xmm11
381
382 # now if there is, load the constants
383 movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10
384
385 movd arg1_low32, %xmm0 # get the initial crc value
386 pslldq $12, %xmm0 # align it to its correct place
387 movdqu (arg2), %xmm7 # load the plaintext
388 pshufb %xmm11, %xmm7 # byte-reflect the plaintext
389 pxor %xmm0, %xmm7
390
391
392 # update the buffer pointer
393 add $16, arg2
394
395 # update the counter. subtract 32 instead of 16 to save one
396 # instruction from the loop
397 sub $32, arg3
398
399 jmp _16B_reduction_loop
400
401
402.align 16
403_less_than_32:
404 # mov initial crc to the return value. this is necessary for
405 # zero-length buffers.
406 mov arg1_low32, %eax
407 test arg3, arg3
408 je _cleanup
409
410 movdqa SHUF_MASK(%rip), %xmm11
411
412 movd arg1_low32, %xmm0 # get the initial crc value
413 pslldq $12, %xmm0 # align it to its correct place
414
415 cmp $16, arg3
416 je _exact_16_left
417 jl _less_than_16_left
418
419 movdqu (arg2), %xmm7 # load the plaintext
420 pshufb %xmm11, %xmm7 # byte-reflect the plaintext
421 pxor %xmm0 , %xmm7 # xor the initial crc value
422 add $16, arg2
423 sub $16, arg3
424 movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10
425 jmp _get_last_two_xmms
426
427
428.align 16
429_less_than_16_left:
430 # use stack space to load data less than 16 bytes, zero-out
431 # the 16B in memory first.
432
433 pxor %xmm1, %xmm1
434 mov %rsp, %r11
435 movdqa %xmm1, (%r11)
436
437 cmp $4, arg3
438 jl _only_less_than_4
439
440 # backup the counter value
441 mov arg3, %r9
442 cmp $8, arg3
443 jl _less_than_8_left
444
445 # load 8 Bytes
446 mov (arg2), %rax
447 mov %rax, (%r11)
448 add $8, %r11
449 sub $8, arg3
450 add $8, arg2
451_less_than_8_left:
452
453 cmp $4, arg3
454 jl _less_than_4_left
455
456 # load 4 Bytes
457 mov (arg2), %eax
458 mov %eax, (%r11)
459 add $4, %r11
460 sub $4, arg3
461 add $4, arg2
462_less_than_4_left:
463
464 cmp $2, arg3
465 jl _less_than_2_left
466
467 # load 2 Bytes
468 mov (arg2), %ax
469 mov %ax, (%r11)
470 add $2, %r11
471 sub $2, arg3
472 add $2, arg2
473_less_than_2_left:
474 cmp $1, arg3
475 jl _zero_left
476
477 # load 1 Byte
478 mov (arg2), %al
479 mov %al, (%r11)
480_zero_left:
481 movdqa (%rsp), %xmm7
482 pshufb %xmm11, %xmm7
483 pxor %xmm0 , %xmm7 # xor the initial crc value
484
485 # shl r9, 4
486 lea pshufb_shf_table+16(%rip), %rax
487 sub %r9, %rax
488 movdqu (%rax), %xmm0
489 pxor mask1(%rip), %xmm0
490
491 pshufb %xmm0, %xmm7
492 jmp _128_done
493
494.align 16
495_exact_16_left:
496 movdqu (arg2), %xmm7
497 pshufb %xmm11, %xmm7
498 pxor %xmm0 , %xmm7 # xor the initial crc value
499
500 jmp _128_done
501
502_only_less_than_4:
503 cmp $3, arg3
504 jl _only_less_than_3
505
506 # load 3 Bytes
507 mov (arg2), %al
508 mov %al, (%r11)
509
510 mov 1(arg2), %al
511 mov %al, 1(%r11)
512
513 mov 2(arg2), %al
514 mov %al, 2(%r11)
515
516 movdqa (%rsp), %xmm7
517 pshufb %xmm11, %xmm7
518 pxor %xmm0 , %xmm7 # xor the initial crc value
519
520 psrldq $5, %xmm7
521
522 jmp _barrett
523_only_less_than_3:
524 cmp $2, arg3
525 jl _only_less_than_2
526
527 # load 2 Bytes
528 mov (arg2), %al
529 mov %al, (%r11)
530
531 mov 1(arg2), %al
532 mov %al, 1(%r11)
533
534 movdqa (%rsp), %xmm7
535 pshufb %xmm11, %xmm7
536 pxor %xmm0 , %xmm7 # xor the initial crc value
537
538 psrldq $6, %xmm7
539
540 jmp _barrett
541_only_less_than_2:
542
543 # load 1 Byte
544 mov (arg2), %al
545 mov %al, (%r11)
546
547 movdqa (%rsp), %xmm7
548 pshufb %xmm11, %xmm7
549 pxor %xmm0 , %xmm7 # xor the initial crc value
550
551 psrldq $7, %xmm7
552
553 jmp _barrett
554
555ENDPROC(crc_t10dif_pcl)
556
557.data
558
559# precomputed constants
560# these constants are precomputed from the poly:
561# 0x8bb70000 (0x8bb7 scaled to 32 bits)
562.align 16
563# Q = 0x18BB70000
564# rk1 = 2^(32*3) mod Q << 32
565# rk2 = 2^(32*5) mod Q << 32
566# rk3 = 2^(32*15) mod Q << 32
567# rk4 = 2^(32*17) mod Q << 32
568# rk5 = 2^(32*3) mod Q << 32
569# rk6 = 2^(32*2) mod Q << 32
570# rk7 = floor(2^64/Q)
571# rk8 = Q
572rk1:
573.quad 0x2d56000000000000
574rk2:
575.quad 0x06df000000000000
576rk3:
577.quad 0x9d9d000000000000
578rk4:
579.quad 0x7cf5000000000000
580rk5:
581.quad 0x2d56000000000000
582rk6:
583.quad 0x1368000000000000
584rk7:
585.quad 0x00000001f65a57f8
586rk8:
587.quad 0x000000018bb70000
588
589rk9:
590.quad 0xceae000000000000
591rk10:
592.quad 0xbfd6000000000000
593rk11:
594.quad 0x1e16000000000000
595rk12:
596.quad 0x713c000000000000
597rk13:
598.quad 0xf7f9000000000000
599rk14:
600.quad 0x80a6000000000000
601rk15:
602.quad 0x044c000000000000
603rk16:
604.quad 0xe658000000000000
605rk17:
606.quad 0xad18000000000000
607rk18:
608.quad 0xa497000000000000
609rk19:
610.quad 0x6ee3000000000000
611rk20:
612.quad 0xe7b5000000000000
613
614
615
616mask1:
617.octa 0x80808080808080808080808080808080
618mask2:
619.octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF
620
621SHUF_MASK:
622.octa 0x000102030405060708090A0B0C0D0E0F
623
624pshufb_shf_table:
625# use these values for shift constants for the pshufb instruction
626# different alignments result in values as shown:
627# DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1
628# DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2
629# DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3
630# DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4
631# DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5
632# DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6
633# DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7
634# DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8
635# DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9
636# DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10
637# DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11
638# DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12
639# DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13
640# DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14
641# DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15
642.octa 0x8f8e8d8c8b8a89888786858483828100
643.octa 0x000e0d0c0b0a09080706050403020100
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
deleted file mode 100644
index 7845d7fd54c0..000000000000
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ /dev/null
@@ -1,151 +0,0 @@
1/*
2 * Cryptographic API.
3 *
4 * T10 Data Integrity Field CRC16 Crypto Transform using PCLMULQDQ Instructions
5 *
6 * Copyright (C) 2013 Intel Corporation
7 * Author: Tim Chen <tim.c.chen@linux.intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 */
24
25#include <linux/types.h>
26#include <linux/module.h>
27#include <linux/crc-t10dif.h>
28#include <crypto/internal/hash.h>
29#include <linux/init.h>
30#include <linux/string.h>
31#include <linux/kernel.h>
32#include <asm/i387.h>
33#include <asm/cpufeature.h>
34#include <asm/cpu_device_id.h>
35
36asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf,
37 size_t len);
38
39struct chksum_desc_ctx {
40 __u16 crc;
41};
42
43/*
44 * Steps through buffer one byte at at time, calculates reflected
45 * crc using table.
46 */
47
48static int chksum_init(struct shash_desc *desc)
49{
50 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
51
52 ctx->crc = 0;
53
54 return 0;
55}
56
57static int chksum_update(struct shash_desc *desc, const u8 *data,
58 unsigned int length)
59{
60 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
61
62 if (irq_fpu_usable()) {
63 kernel_fpu_begin();
64 ctx->crc = crc_t10dif_pcl(ctx->crc, data, length);
65 kernel_fpu_end();
66 } else
67 ctx->crc = crc_t10dif_generic(ctx->crc, data, length);
68 return 0;
69}
70
71static int chksum_final(struct shash_desc *desc, u8 *out)
72{
73 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
74
75 *(__u16 *)out = ctx->crc;
76 return 0;
77}
78
79static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
80 u8 *out)
81{
82 if (irq_fpu_usable()) {
83 kernel_fpu_begin();
84 *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
85 kernel_fpu_end();
86 } else
87 *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
88 return 0;
89}
90
91static int chksum_finup(struct shash_desc *desc, const u8 *data,
92 unsigned int len, u8 *out)
93{
94 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
95
96 return __chksum_finup(&ctx->crc, data, len, out);
97}
98
99static int chksum_digest(struct shash_desc *desc, const u8 *data,
100 unsigned int length, u8 *out)
101{
102 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
103
104 return __chksum_finup(&ctx->crc, data, length, out);
105}
106
107static struct shash_alg alg = {
108 .digestsize = CRC_T10DIF_DIGEST_SIZE,
109 .init = chksum_init,
110 .update = chksum_update,
111 .final = chksum_final,
112 .finup = chksum_finup,
113 .digest = chksum_digest,
114 .descsize = sizeof(struct chksum_desc_ctx),
115 .base = {
116 .cra_name = "crct10dif",
117 .cra_driver_name = "crct10dif-pclmul",
118 .cra_priority = 200,
119 .cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
120 .cra_module = THIS_MODULE,
121 }
122};
123
124static const struct x86_cpu_id crct10dif_cpu_id[] = {
125 X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ),
126 {}
127};
128MODULE_DEVICE_TABLE(x86cpu, crct10dif_cpu_id);
129
130static int __init crct10dif_intel_mod_init(void)
131{
132 if (!x86_match_cpu(crct10dif_cpu_id))
133 return -ENODEV;
134
135 return crypto_register_shash(&alg);
136}
137
138static void __exit crct10dif_intel_mod_fini(void)
139{
140 crypto_unregister_shash(&alg);
141}
142
143module_init(crct10dif_intel_mod_init);
144module_exit(crct10dif_intel_mod_fini);
145
146MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
147MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ.");
148MODULE_LICENSE("GPL");
149
150MODULE_ALIAS("crct10dif");
151MODULE_ALIAS("crct10dif-pclmul");
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 52ff81cce008..bae3aba95b15 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -308,8 +308,6 @@ static int load_aout_binary(struct linux_binprm *bprm)
308 (current->mm->start_data = N_DATADDR(ex)); 308 (current->mm->start_data = N_DATADDR(ex));
309 current->mm->brk = ex.a_bss + 309 current->mm->brk = ex.a_bss +
310 (current->mm->start_brk = N_BSSADDR(ex)); 310 (current->mm->start_brk = N_BSSADDR(ex));
311 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
312 current->mm->cached_hole_size = 0;
313 311
314 retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT); 312 retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
315 if (retval < 0) { 313 if (retval < 0) {
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 5f9a1243190e..d2b12988d2ed 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -28,7 +28,7 @@ struct x86_cpu {
28#ifdef CONFIG_HOTPLUG_CPU 28#ifdef CONFIG_HOTPLUG_CPU
29extern int arch_register_cpu(int num); 29extern int arch_register_cpu(int num);
30extern void arch_unregister_cpu(int); 30extern void arch_unregister_cpu(int);
31extern void __cpuinit start_cpu0(void); 31extern void start_cpu0(void);
32#ifdef CONFIG_DEBUG_HOTPLUG_CPU0 32#ifdef CONFIG_DEBUG_HOTPLUG_CPU0
33extern int _debug_hotplug_cpu(int cpu, int action); 33extern int _debug_hotplug_cpu(int cpu, int action);
34#endif 34#endif
diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
index 75ce3f47d204..77a99ac06d00 100644
--- a/arch/x86/include/asm/emergency-restart.h
+++ b/arch/x86/include/asm/emergency-restart.h
@@ -1,18 +1,6 @@
1#ifndef _ASM_X86_EMERGENCY_RESTART_H 1#ifndef _ASM_X86_EMERGENCY_RESTART_H
2#define _ASM_X86_EMERGENCY_RESTART_H 2#define _ASM_X86_EMERGENCY_RESTART_H
3 3
4enum reboot_type {
5 BOOT_TRIPLE = 't',
6 BOOT_KBD = 'k',
7 BOOT_BIOS = 'b',
8 BOOT_ACPI = 'a',
9 BOOT_EFI = 'e',
10 BOOT_CF9 = 'p',
11 BOOT_CF9_COND = 'q',
12};
13
14extern enum reboot_type reboot_type;
15
16extern void machine_emergency_restart(void); 4extern void machine_emergency_restart(void);
17 5
18#endif /* _ASM_X86_EMERGENCY_RESTART_H */ 6#endif /* _ASM_X86_EMERGENCY_RESTART_H */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index d8e8eefbe24c..34f69cb9350a 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -345,4 +345,11 @@ extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
345 345
346#define IO_SPACE_LIMIT 0xffff 346#define IO_SPACE_LIMIT 0xffff
347 347
348#ifdef CONFIG_MTRR
349extern int __must_check arch_phys_wc_add(unsigned long base,
350 unsigned long size);
351extern void arch_phys_wc_del(int handle);
352#define arch_phys_wc_add arch_phys_wc_add
353#endif
354
348#endif /* _ASM_X86_IO_H */ 355#endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h
index d354fb781c57..a55c7efcc4ed 100644
--- a/arch/x86/include/asm/mc146818rtc.h
+++ b/arch/x86/include/asm/mc146818rtc.h
@@ -95,8 +95,8 @@ static inline unsigned char current_lock_cmos_reg(void)
95unsigned char rtc_cmos_read(unsigned char addr); 95unsigned char rtc_cmos_read(unsigned char addr);
96void rtc_cmos_write(unsigned char val, unsigned char addr); 96void rtc_cmos_write(unsigned char val, unsigned char addr);
97 97
98extern int mach_set_rtc_mmss(unsigned long nowtime); 98extern int mach_set_rtc_mmss(const struct timespec *now);
99extern unsigned long mach_get_cmos_time(void); 99extern void mach_get_cmos_time(struct timespec *now);
100 100
101#define RTC_IRQ 8 101#define RTC_IRQ 8
102 102
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 6b52980c29c1..29e3093bbd21 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -214,6 +214,13 @@ void mce_log_therm_throt_event(__u64 status);
214/* Interrupt Handler for core thermal thresholds */ 214/* Interrupt Handler for core thermal thresholds */
215extern int (*platform_thermal_notify)(__u64 msr_val); 215extern int (*platform_thermal_notify)(__u64 msr_val);
216 216
217/* Interrupt Handler for package thermal thresholds */
218extern int (*platform_thermal_package_notify)(__u64 msr_val);
219
220/* Callback support of rate control, return true, if
221 * callback has rate control */
222extern bool (*platform_thermal_package_rate_control)(void);
223
217#ifdef CONFIG_X86_THERMAL_VECTOR 224#ifdef CONFIG_X86_THERMAL_VECTOR
218extern void mcheck_intel_therm_init(void); 225extern void mcheck_intel_therm_init(void);
219#else 226#else
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 6bc3985ee473..f98bd6625318 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {}
60#ifdef CONFIG_MICROCODE_EARLY 60#ifdef CONFIG_MICROCODE_EARLY
61#define MAX_UCODE_COUNT 128 61#define MAX_UCODE_COUNT 128
62extern void __init load_ucode_bsp(void); 62extern void __init load_ucode_bsp(void);
63extern void __cpuinit load_ucode_ap(void); 63extern void load_ucode_ap(void);
64extern int __init save_microcode_in_initrd(void); 64extern int __init save_microcode_in_initrd(void);
65#else 65#else
66static inline void __init load_ucode_bsp(void) {} 66static inline void __init load_ucode_bsp(void) {}
67static inline void __cpuinit load_ucode_ap(void) {} 67static inline void load_ucode_ap(void) {}
68static inline int __init save_microcode_in_initrd(void) 68static inline int __init save_microcode_in_initrd(void)
69{ 69{
70 return 0; 70 return 0;
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index c6b043f40271..50e5c58ced23 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -67,11 +67,11 @@ extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size)
67extern u8 amd_bsp_mpb[MPB_MAX_SIZE]; 67extern u8 amd_bsp_mpb[MPB_MAX_SIZE];
68#endif 68#endif
69extern void __init load_ucode_amd_bsp(void); 69extern void __init load_ucode_amd_bsp(void);
70extern void __cpuinit load_ucode_amd_ap(void); 70extern void load_ucode_amd_ap(void);
71extern int __init save_microcode_in_initrd_amd(void); 71extern int __init save_microcode_in_initrd_amd(void);
72#else 72#else
73static inline void __init load_ucode_amd_bsp(void) {} 73static inline void __init load_ucode_amd_bsp(void) {}
74static inline void __cpuinit load_ucode_amd_ap(void) {} 74static inline void load_ucode_amd_ap(void) {}
75static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; } 75static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; }
76#endif 76#endif
77 77
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
index 87a085333cbf..9067166409bf 100644
--- a/arch/x86/include/asm/microcode_intel.h
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -65,12 +65,12 @@ update_match_revision(struct microcode_header_intel *mc_header, int rev);
65 65
66#ifdef CONFIG_MICROCODE_INTEL_EARLY 66#ifdef CONFIG_MICROCODE_INTEL_EARLY
67extern void __init load_ucode_intel_bsp(void); 67extern void __init load_ucode_intel_bsp(void);
68extern void __cpuinit load_ucode_intel_ap(void); 68extern void load_ucode_intel_ap(void);
69extern void show_ucode_info_early(void); 69extern void show_ucode_info_early(void);
70extern int __init save_microcode_in_initrd_intel(void); 70extern int __init save_microcode_in_initrd_intel(void);
71#else 71#else
72static inline __init void load_ucode_intel_bsp(void) {} 72static inline __init void load_ucode_intel_bsp(void) {}
73static inline __cpuinit void load_ucode_intel_ap(void) {} 73static inline void load_ucode_intel_ap(void) {}
74static inline void show_ucode_info_early(void) {} 74static inline void show_ucode_info_early(void) {}
75static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; } 75static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; }
76#endif 76#endif
diff --git a/arch/x86/include/asm/mmconfig.h b/arch/x86/include/asm/mmconfig.h
index 9b119da1d105..04a3fed22cfe 100644
--- a/arch/x86/include/asm/mmconfig.h
+++ b/arch/x86/include/asm/mmconfig.h
@@ -2,8 +2,8 @@
2#define _ASM_X86_MMCONFIG_H 2#define _ASM_X86_MMCONFIG_H
3 3
4#ifdef CONFIG_PCI_MMCONFIG 4#ifdef CONFIG_PCI_MMCONFIG
5extern void __cpuinit fam10h_check_enable_mmcfg(void); 5extern void fam10h_check_enable_mmcfg(void);
6extern void __cpuinit check_enable_amd_mmconf_dmi(void); 6extern void check_enable_amd_mmconf_dmi(void);
7#else 7#else
8static inline void fam10h_check_enable_mmcfg(void) { } 8static inline void fam10h_check_enable_mmcfg(void) { }
9static inline void check_enable_amd_mmconf_dmi(void) { } 9static inline void check_enable_amd_mmconf_dmi(void) { }
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index 3e2f42a4b872..626cf70082d7 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -94,7 +94,7 @@ static inline void early_reserve_e820_mpc_new(void) { }
94#define default_get_smp_config x86_init_uint_noop 94#define default_get_smp_config x86_init_uint_noop
95#endif 95#endif
96 96
97void __cpuinit generic_processor_info(int apicid, int version); 97void generic_processor_info(int apicid, int version);
98#ifdef CONFIG_ACPI 98#ifdef CONFIG_ACPI
99extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); 99extern void mp_register_ioapic(int id, u32 address, u32 gsi_base);
100extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, 100extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
diff --git a/arch/x86/include/asm/mrst-vrtc.h b/arch/x86/include/asm/mrst-vrtc.h
index 73668abdbedf..1e69a75412a4 100644
--- a/arch/x86/include/asm/mrst-vrtc.h
+++ b/arch/x86/include/asm/mrst-vrtc.h
@@ -3,7 +3,7 @@
3 3
4extern unsigned char vrtc_cmos_read(unsigned char reg); 4extern unsigned char vrtc_cmos_read(unsigned char reg);
5extern void vrtc_cmos_write(unsigned char val, unsigned char reg); 5extern void vrtc_cmos_write(unsigned char val, unsigned char reg);
6extern unsigned long vrtc_get_time(void); 6extern void vrtc_get_time(struct timespec *now);
7extern int vrtc_set_mmss(unsigned long nowtime); 7extern int vrtc_set_mmss(const struct timespec *now);
8 8
9#endif 9#endif
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index e235582f9930..f768f6298419 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -26,7 +26,10 @@
26#include <uapi/asm/mtrr.h> 26#include <uapi/asm/mtrr.h>
27 27
28 28
29/* The following functions are for use by other drivers */ 29/*
30 * The following functions are for use by other drivers that cannot use
31 * arch_phys_wc_add and arch_phys_wc_del.
32 */
30# ifdef CONFIG_MTRR 33# ifdef CONFIG_MTRR
31extern u8 mtrr_type_lookup(u64 addr, u64 end); 34extern u8 mtrr_type_lookup(u64 addr, u64 end);
32extern void mtrr_save_fixed_ranges(void *); 35extern void mtrr_save_fixed_ranges(void *);
@@ -45,6 +48,7 @@ extern void mtrr_aps_init(void);
45extern void mtrr_bp_restore(void); 48extern void mtrr_bp_restore(void);
46extern int mtrr_trim_uncached_memory(unsigned long end_pfn); 49extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
47extern int amd_special_default_mtrr(void); 50extern int amd_special_default_mtrr(void);
51extern int phys_wc_to_mtrr_index(int handle);
48# else 52# else
49static inline u8 mtrr_type_lookup(u64 addr, u64 end) 53static inline u8 mtrr_type_lookup(u64 addr, u64 end)
50{ 54{
@@ -80,6 +84,10 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn)
80static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) 84static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
81{ 85{
82} 86}
87static inline int phys_wc_to_mtrr_index(int handle)
88{
89 return -1;
90}
83 91
84#define mtrr_ap_init() do {} while (0) 92#define mtrr_ap_init() do {} while (0)
85#define mtrr_bp_init() do {} while (0) 93#define mtrr_bp_init() do {} while (0)
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index 1b99ee5c9f00..4064acae625d 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -39,7 +39,7 @@ static inline void set_apicid_to_node(int apicid, s16 node)
39 __apicid_to_node[apicid] = node; 39 __apicid_to_node[apicid] = node;
40} 40}
41 41
42extern int __cpuinit numa_cpu_node(int cpu); 42extern int numa_cpu_node(int cpu);
43 43
44#else /* CONFIG_NUMA */ 44#else /* CONFIG_NUMA */
45static inline void set_apicid_to_node(int apicid, s16 node) 45static inline void set_apicid_to_node(int apicid, s16 node)
@@ -60,8 +60,8 @@ static inline int numa_cpu_node(int cpu)
60extern void numa_set_node(int cpu, int node); 60extern void numa_set_node(int cpu, int node);
61extern void numa_clear_node(int cpu); 61extern void numa_clear_node(int cpu);
62extern void __init init_cpu_to_node(void); 62extern void __init init_cpu_to_node(void);
63extern void __cpuinit numa_add_cpu(int cpu); 63extern void numa_add_cpu(int cpu);
64extern void __cpuinit numa_remove_cpu(int cpu); 64extern void numa_remove_cpu(int cpu);
65#else /* CONFIG_NUMA */ 65#else /* CONFIG_NUMA */
66static inline void numa_set_node(int cpu, int node) { } 66static inline void numa_set_node(int cpu, int node) { }
67static inline void numa_clear_node(int cpu) { } 67static inline void numa_clear_node(int cpu) { }
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 29937c4f6ff8..24cf5aefb704 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -164,7 +164,7 @@ extern const struct seq_operations cpuinfo_op;
164#define cache_line_size() (boot_cpu_data.x86_cache_alignment) 164#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
165 165
166extern void cpu_detect(struct cpuinfo_x86 *c); 166extern void cpu_detect(struct cpuinfo_x86 *c);
167extern void __cpuinit fpu_detect(struct cpuinfo_x86 *c); 167extern void fpu_detect(struct cpuinfo_x86 *c);
168 168
169extern void early_cpu_init(void); 169extern void early_cpu_init(void);
170extern void identify_boot_cpu(void); 170extern void identify_boot_cpu(void);
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
index 60bef663609a..bade6ac3b14f 100644
--- a/arch/x86/include/asm/prom.h
+++ b/arch/x86/include/asm/prom.h
@@ -27,7 +27,7 @@ extern int of_ioapic;
27extern u64 initial_dtb; 27extern u64 initial_dtb;
28extern void add_dtb(u64 data); 28extern void add_dtb(u64 data);
29extern void x86_add_irq_domains(void); 29extern void x86_add_irq_domains(void);
30void __cpuinit x86_of_pci_init(void); 30void x86_of_pci_init(void);
31void x86_dtb_init(void); 31void x86_dtb_init(void);
32#else 32#else
33static inline void add_dtb(u64 data) { } 33static inline void add_dtb(u64 data) { }
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index b073aaea747c..4137890e88e3 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -179,7 +179,7 @@ static inline int wbinvd_on_all_cpus(void)
179} 179}
180#endif /* CONFIG_SMP */ 180#endif /* CONFIG_SMP */
181 181
182extern unsigned disabled_cpus __cpuinitdata; 182extern unsigned disabled_cpus;
183 183
184#ifdef CONFIG_X86_32_SMP 184#ifdef CONFIG_X86_32_SMP
185/* 185/*
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index d8d99222b36a..828a1565ba57 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -142,6 +142,8 @@ struct x86_cpuinit_ops {
142 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node); 142 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
143}; 143};
144 144
145struct timespec;
146
145/** 147/**
146 * struct x86_platform_ops - platform specific runtime functions 148 * struct x86_platform_ops - platform specific runtime functions
147 * @calibrate_tsc: calibrate TSC 149 * @calibrate_tsc: calibrate TSC
@@ -156,8 +158,8 @@ struct x86_cpuinit_ops {
156 */ 158 */
157struct x86_platform_ops { 159struct x86_platform_ops {
158 unsigned long (*calibrate_tsc)(void); 160 unsigned long (*calibrate_tsc)(void);
159 unsigned long (*get_wallclock)(void); 161 void (*get_wallclock)(struct timespec *ts);
160 int (*set_wallclock)(unsigned long nowtime); 162 int (*set_wallclock)(const struct timespec *ts);
161 void (*iommu_shutdown)(void); 163 void (*iommu_shutdown)(void);
162 bool (*is_untracked_pat_range)(u64 start, u64 end); 164 bool (*is_untracked_pat_range)(u64 start, u64 end);
163 void (*nmi_init)(void); 165 void (*nmi_init)(void);
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index d81a972dd506..2627a81253ee 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -195,7 +195,7 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
195 return 0; 195 return 0;
196} 196}
197 197
198static void __cpuinit acpi_register_lapic(int id, u8 enabled) 198static void acpi_register_lapic(int id, u8 enabled)
199{ 199{
200 unsigned int ver = 0; 200 unsigned int ver = 0;
201 201
@@ -607,7 +607,7 @@ void __init acpi_set_irq_model_ioapic(void)
607#ifdef CONFIG_ACPI_HOTPLUG_CPU 607#ifdef CONFIG_ACPI_HOTPLUG_CPU
608#include <acpi/processor.h> 608#include <acpi/processor.h>
609 609
610static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 610static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
611{ 611{
612#ifdef CONFIG_ACPI_NUMA 612#ifdef CONFIG_ACPI_NUMA
613 int nid; 613 int nid;
@@ -620,7 +620,7 @@ static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
620#endif 620#endif
621} 621}
622 622
623static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) 623static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
624{ 624{
625 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 625 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
626 union acpi_object *obj; 626 union acpi_object *obj;
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 2a34aaf3c8f1..33120100ff5e 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -48,9 +48,20 @@ int x86_acpi_suspend_lowlevel(void)
48#ifndef CONFIG_64BIT 48#ifndef CONFIG_64BIT
49 native_store_gdt((struct desc_ptr *)&header->pmode_gdt); 49 native_store_gdt((struct desc_ptr *)&header->pmode_gdt);
50 50
51 /*
52 * We have to check that we can write back the value, and not
53 * just read it. At least on 90 nm Pentium M (Family 6, Model
54 * 13), reading an invalid MSR is not guaranteed to trap, see
55 * Erratum X4 in "Intel Pentium M Processor on 90 nm Process
56 * with 2-MB L2 Cache and Intel® Processor A100 and A110 on 90
57 * nm process with 512-KB L2 Cache Specification Update".
58 */
51 if (!rdmsr_safe(MSR_EFER, 59 if (!rdmsr_safe(MSR_EFER,
52 &header->pmode_efer_low, 60 &header->pmode_efer_low,
53 &header->pmode_efer_high)) 61 &header->pmode_efer_high) &&
62 !wrmsr_safe(MSR_EFER,
63 header->pmode_efer_low,
64 header->pmode_efer_high))
54 header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER); 65 header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER);
55#endif /* !CONFIG_64BIT */ 66#endif /* !CONFIG_64BIT */
56 67
@@ -61,7 +72,10 @@ int x86_acpi_suspend_lowlevel(void)
61 } 72 }
62 if (!rdmsr_safe(MSR_IA32_MISC_ENABLE, 73 if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
63 &header->pmode_misc_en_low, 74 &header->pmode_misc_en_low,
64 &header->pmode_misc_en_high)) 75 &header->pmode_misc_en_high) &&
76 !wrmsr_safe(MSR_IA32_MISC_ENABLE,
77 header->pmode_misc_en_low,
78 header->pmode_misc_en_high))
65 header->pmode_behavior |= 79 header->pmode_behavior |=
66 (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE); 80 (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
67 header->realmode_flags = acpi_realmode_flags; 81 header->realmode_flags = acpi_realmode_flags;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 99663b59123a..eca89c53a7f5 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -58,7 +58,7 @@
58 58
59unsigned int num_processors; 59unsigned int num_processors;
60 60
61unsigned disabled_cpus __cpuinitdata; 61unsigned disabled_cpus;
62 62
63/* Processor that is doing the boot up */ 63/* Processor that is doing the boot up */
64unsigned int boot_cpu_physical_apicid = -1U; 64unsigned int boot_cpu_physical_apicid = -1U;
@@ -544,7 +544,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
544 * Setup the local APIC timer for this CPU. Copy the initialized values 544 * Setup the local APIC timer for this CPU. Copy the initialized values
545 * of the boot CPU and register the clock event in the framework. 545 * of the boot CPU and register the clock event in the framework.
546 */ 546 */
547static void __cpuinit setup_APIC_timer(void) 547static void setup_APIC_timer(void)
548{ 548{
549 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 549 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
550 550
@@ -866,7 +866,7 @@ void __init setup_boot_APIC_clock(void)
866 setup_APIC_timer(); 866 setup_APIC_timer();
867} 867}
868 868
869void __cpuinit setup_secondary_APIC_clock(void) 869void setup_secondary_APIC_clock(void)
870{ 870{
871 setup_APIC_timer(); 871 setup_APIC_timer();
872} 872}
@@ -1229,7 +1229,7 @@ void __init init_bsp_APIC(void)
1229 apic_write(APIC_LVT1, value); 1229 apic_write(APIC_LVT1, value);
1230} 1230}
1231 1231
1232static void __cpuinit lapic_setup_esr(void) 1232static void lapic_setup_esr(void)
1233{ 1233{
1234 unsigned int oldvalue, value, maxlvt; 1234 unsigned int oldvalue, value, maxlvt;
1235 1235
@@ -1276,7 +1276,7 @@ static void __cpuinit lapic_setup_esr(void)
1276 * Used to setup local APIC while initializing BSP or bringin up APs. 1276 * Used to setup local APIC while initializing BSP or bringin up APs.
1277 * Always called with preemption disabled. 1277 * Always called with preemption disabled.
1278 */ 1278 */
1279void __cpuinit setup_local_APIC(void) 1279void setup_local_APIC(void)
1280{ 1280{
1281 int cpu = smp_processor_id(); 1281 int cpu = smp_processor_id();
1282 unsigned int value, queued; 1282 unsigned int value, queued;
@@ -1471,7 +1471,7 @@ void __cpuinit setup_local_APIC(void)
1471#endif 1471#endif
1472} 1472}
1473 1473
1474void __cpuinit end_local_APIC_setup(void) 1474void end_local_APIC_setup(void)
1475{ 1475{
1476 lapic_setup_esr(); 1476 lapic_setup_esr();
1477 1477
@@ -2107,7 +2107,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
2107 apic_write(APIC_LVT1, value); 2107 apic_write(APIC_LVT1, value);
2108} 2108}
2109 2109
2110void __cpuinit generic_processor_info(int apicid, int version) 2110void generic_processor_info(int apicid, int version)
2111{ 2111{
2112 int cpu, max = nr_cpu_ids; 2112 int cpu, max = nr_cpu_ids;
2113 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, 2113 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2377,7 +2377,7 @@ static struct syscore_ops lapic_syscore_ops = {
2377 .suspend = lapic_suspend, 2377 .suspend = lapic_suspend,
2378}; 2378};
2379 2379
2380static void __cpuinit apic_pm_activate(void) 2380static void apic_pm_activate(void)
2381{ 2381{
2382 apic_pm_state.active = 1; 2382 apic_pm_state.active = 1;
2383} 2383}
@@ -2402,7 +2402,7 @@ static void apic_pm_activate(void) { }
2402 2402
2403#ifdef CONFIG_X86_64 2403#ifdef CONFIG_X86_64
2404 2404
2405static int __cpuinit apic_cluster_num(void) 2405static int apic_cluster_num(void)
2406{ 2406{
2407 int i, clusters, zeros; 2407 int i, clusters, zeros;
2408 unsigned id; 2408 unsigned id;
@@ -2447,10 +2447,10 @@ static int __cpuinit apic_cluster_num(void)
2447 return clusters; 2447 return clusters;
2448} 2448}
2449 2449
2450static int __cpuinitdata multi_checked; 2450static int multi_checked;
2451static int __cpuinitdata multi; 2451static int multi;
2452 2452
2453static int __cpuinit set_multi(const struct dmi_system_id *d) 2453static int set_multi(const struct dmi_system_id *d)
2454{ 2454{
2455 if (multi) 2455 if (multi)
2456 return 0; 2456 return 0;
@@ -2459,7 +2459,7 @@ static int __cpuinit set_multi(const struct dmi_system_id *d)
2459 return 0; 2459 return 0;
2460} 2460}
2461 2461
2462static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = { 2462static const struct dmi_system_id multi_dmi_table[] = {
2463 { 2463 {
2464 .callback = set_multi, 2464 .callback = set_multi,
2465 .ident = "IBM System Summit2", 2465 .ident = "IBM System Summit2",
@@ -2471,7 +2471,7 @@ static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = {
2471 {} 2471 {}
2472}; 2472};
2473 2473
2474static void __cpuinit dmi_check_multi(void) 2474static void dmi_check_multi(void)
2475{ 2475{
2476 if (multi_checked) 2476 if (multi_checked)
2477 return; 2477 return;
@@ -2488,7 +2488,7 @@ static void __cpuinit dmi_check_multi(void)
2488 * multi-chassis. 2488 * multi-chassis.
2489 * Use DMI to check them 2489 * Use DMI to check them
2490 */ 2490 */
2491__cpuinit int apic_is_clustered_box(void) 2491int apic_is_clustered_box(void)
2492{ 2492{
2493 dmi_check_multi(); 2493 dmi_check_multi();
2494 if (multi) 2494 if (multi)
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 9a9110918ca7..3e67f9e3d7ef 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -74,7 +74,7 @@ static int numachip_phys_pkg_id(int initial_apic_id, int index_msb)
74 return initial_apic_id >> index_msb; 74 return initial_apic_id >> index_msb;
75} 75}
76 76
77static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) 77static int numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip)
78{ 78{
79 union numachip_csr_g3_ext_irq_gen int_gen; 79 union numachip_csr_g3_ext_irq_gen int_gen;
80 80
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 0874799a98c6..c55224731b2d 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -130,7 +130,7 @@ int es7000_plat;
130 */ 130 */
131 131
132 132
133static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) 133static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
134{ 134{
135 unsigned long vect = 0, psaival = 0; 135 unsigned long vect = 0, psaival = 0;
136 136
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index d661ee95cabf..1e42e8f305ee 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -105,7 +105,7 @@ static void __init smp_dump_qct(void)
105 } 105 }
106} 106}
107 107
108void __cpuinit numaq_tsc_disable(void) 108void numaq_tsc_disable(void)
109{ 109{
110 if (!found_numaq) 110 if (!found_numaq)
111 return; 111 return;
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index c88baa4ff0e5..140e29db478d 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -148,7 +148,7 @@ static void init_x2apic_ldr(void)
148 /* 148 /*
149 * At CPU state changes, update the x2apic cluster sibling info. 149 * At CPU state changes, update the x2apic cluster sibling info.
150 */ 150 */
151static int __cpuinit 151static int
152update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu) 152update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
153{ 153{
154 unsigned int this_cpu = (unsigned long)hcpu; 154 unsigned int this_cpu = (unsigned long)hcpu;
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 39cc7f7acab3..1191ac1c9d25 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -25,6 +25,7 @@
25#include <linux/kdebug.h> 25#include <linux/kdebug.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/crash_dump.h> 27#include <linux/crash_dump.h>
28#include <linux/reboot.h>
28 29
29#include <asm/uv/uv_mmrs.h> 30#include <asm/uv/uv_mmrs.h>
30#include <asm/uv/uv_hub.h> 31#include <asm/uv/uv_hub.h>
@@ -36,7 +37,6 @@
36#include <asm/ipi.h> 37#include <asm/ipi.h>
37#include <asm/smp.h> 38#include <asm/smp.h>
38#include <asm/x86_init.h> 39#include <asm/x86_init.h>
39#include <asm/emergency-restart.h>
40#include <asm/nmi.h> 40#include <asm/nmi.h>
41 41
42/* BMC sets a bit this MMR non-zero before sending an NMI */ 42/* BMC sets a bit this MMR non-zero before sending an NMI */
@@ -209,7 +209,7 @@ EXPORT_SYMBOL_GPL(uv_possible_blades);
209unsigned long sn_rtc_cycles_per_second; 209unsigned long sn_rtc_cycles_per_second;
210EXPORT_SYMBOL(sn_rtc_cycles_per_second); 210EXPORT_SYMBOL(sn_rtc_cycles_per_second);
211 211
212static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) 212static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
213{ 213{
214#ifdef CONFIG_SMP 214#ifdef CONFIG_SMP
215 unsigned long val; 215 unsigned long val;
@@ -416,7 +416,7 @@ static struct apic __refdata apic_x2apic_uv_x = {
416 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, 416 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
417}; 417};
418 418
419static __cpuinit void set_x2apic_extra_bits(int pnode) 419static void set_x2apic_extra_bits(int pnode)
420{ 420{
421 __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift); 421 __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
422} 422}
@@ -735,7 +735,7 @@ static void uv_heartbeat(unsigned long ignored)
735 mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL); 735 mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
736} 736}
737 737
738static void __cpuinit uv_heartbeat_enable(int cpu) 738static void uv_heartbeat_enable(int cpu)
739{ 739{
740 while (!uv_cpu_hub_info(cpu)->scir.enabled) { 740 while (!uv_cpu_hub_info(cpu)->scir.enabled) {
741 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer; 741 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
@@ -752,7 +752,7 @@ static void __cpuinit uv_heartbeat_enable(int cpu)
752} 752}
753 753
754#ifdef CONFIG_HOTPLUG_CPU 754#ifdef CONFIG_HOTPLUG_CPU
755static void __cpuinit uv_heartbeat_disable(int cpu) 755static void uv_heartbeat_disable(int cpu)
756{ 756{
757 if (uv_cpu_hub_info(cpu)->scir.enabled) { 757 if (uv_cpu_hub_info(cpu)->scir.enabled) {
758 uv_cpu_hub_info(cpu)->scir.enabled = 0; 758 uv_cpu_hub_info(cpu)->scir.enabled = 0;
@@ -764,8 +764,8 @@ static void __cpuinit uv_heartbeat_disable(int cpu)
764/* 764/*
765 * cpu hotplug notifier 765 * cpu hotplug notifier
766 */ 766 */
767static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self, 767static int uv_scir_cpu_notify(struct notifier_block *self, unsigned long action,
768 unsigned long action, void *hcpu) 768 void *hcpu)
769{ 769{
770 long cpu = (long)hcpu; 770 long cpu = (long)hcpu;
771 771
@@ -835,7 +835,7 @@ int uv_set_vga_state(struct pci_dev *pdev, bool decode,
835 * Called on each cpu to initialize the per_cpu UV data area. 835 * Called on each cpu to initialize the per_cpu UV data area.
836 * FIXME: hotplug not supported yet 836 * FIXME: hotplug not supported yet
837 */ 837 */
838void __cpuinit uv_cpu_init(void) 838void uv_cpu_init(void)
839{ 839{
840 /* CPU 0 initilization will be done via uv_system_init. */ 840 /* CPU 0 initilization will be done via uv_system_init. */
841 if (!uv_blade_info) 841 if (!uv_blade_info)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c587a8757227..f654ecefea5b 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -69,7 +69,7 @@ static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
69extern void vide(void); 69extern void vide(void);
70__asm__(".align 4\nvide: ret"); 70__asm__(".align 4\nvide: ret");
71 71
72static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) 72static void init_amd_k5(struct cpuinfo_x86 *c)
73{ 73{
74/* 74/*
75 * General Systems BIOSen alias the cpu frequency registers 75 * General Systems BIOSen alias the cpu frequency registers
@@ -87,7 +87,7 @@ static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
87} 87}
88 88
89 89
90static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) 90static void init_amd_k6(struct cpuinfo_x86 *c)
91{ 91{
92 u32 l, h; 92 u32 l, h;
93 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); 93 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
@@ -179,7 +179,7 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
179 } 179 }
180} 180}
181 181
182static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) 182static void amd_k7_smp_check(struct cpuinfo_x86 *c)
183{ 183{
184 /* calling is from identify_secondary_cpu() ? */ 184 /* calling is from identify_secondary_cpu() ? */
185 if (!c->cpu_index) 185 if (!c->cpu_index)
@@ -222,7 +222,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
222 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); 222 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE);
223} 223}
224 224
225static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) 225static void init_amd_k7(struct cpuinfo_x86 *c)
226{ 226{
227 u32 l, h; 227 u32 l, h;
228 228
@@ -267,7 +267,7 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
267 * To workaround broken NUMA config. Read the comment in 267 * To workaround broken NUMA config. Read the comment in
268 * srat_detect_node(). 268 * srat_detect_node().
269 */ 269 */
270static int __cpuinit nearby_node(int apicid) 270static int nearby_node(int apicid)
271{ 271{
272 int i, node; 272 int i, node;
273 273
@@ -292,7 +292,7 @@ static int __cpuinit nearby_node(int apicid)
292 * (2) AMD processors supporting compute units 292 * (2) AMD processors supporting compute units
293 */ 293 */
294#ifdef CONFIG_X86_HT 294#ifdef CONFIG_X86_HT
295static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) 295static void amd_get_topology(struct cpuinfo_x86 *c)
296{ 296{
297 u32 nodes, cores_per_cu = 1; 297 u32 nodes, cores_per_cu = 1;
298 u8 node_id; 298 u8 node_id;
@@ -342,7 +342,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
342 * On a AMD dual core setup the lower bits of the APIC id distingush the cores. 342 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
343 * Assumes number of cores is a power of two. 343 * Assumes number of cores is a power of two.
344 */ 344 */
345static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) 345static void amd_detect_cmp(struct cpuinfo_x86 *c)
346{ 346{
347#ifdef CONFIG_X86_HT 347#ifdef CONFIG_X86_HT
348 unsigned bits; 348 unsigned bits;
@@ -369,7 +369,7 @@ u16 amd_get_nb_id(int cpu)
369} 369}
370EXPORT_SYMBOL_GPL(amd_get_nb_id); 370EXPORT_SYMBOL_GPL(amd_get_nb_id);
371 371
372static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) 372static void srat_detect_node(struct cpuinfo_x86 *c)
373{ 373{
374#ifdef CONFIG_NUMA 374#ifdef CONFIG_NUMA
375 int cpu = smp_processor_id(); 375 int cpu = smp_processor_id();
@@ -421,7 +421,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
421#endif 421#endif
422} 422}
423 423
424static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) 424static void early_init_amd_mc(struct cpuinfo_x86 *c)
425{ 425{
426#ifdef CONFIG_X86_HT 426#ifdef CONFIG_X86_HT
427 unsigned bits, ecx; 427 unsigned bits, ecx;
@@ -447,7 +447,7 @@ static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
447#endif 447#endif
448} 448}
449 449
450static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c) 450static void bsp_init_amd(struct cpuinfo_x86 *c)
451{ 451{
452 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { 452 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
453 453
@@ -475,7 +475,7 @@ static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
475 } 475 }
476} 476}
477 477
478static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) 478static void early_init_amd(struct cpuinfo_x86 *c)
479{ 479{
480 early_init_amd_mc(c); 480 early_init_amd_mc(c);
481 481
@@ -514,7 +514,7 @@ static const int amd_erratum_383[];
514static const int amd_erratum_400[]; 514static const int amd_erratum_400[];
515static bool cpu_has_amd_erratum(const int *erratum); 515static bool cpu_has_amd_erratum(const int *erratum);
516 516
517static void __cpuinit init_amd(struct cpuinfo_x86 *c) 517static void init_amd(struct cpuinfo_x86 *c)
518{ 518{
519 u32 dummy; 519 u32 dummy;
520 unsigned long long value; 520 unsigned long long value;
@@ -740,8 +740,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
740} 740}
741 741
742#ifdef CONFIG_X86_32 742#ifdef CONFIG_X86_32
743static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, 743static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
744 unsigned int size)
745{ 744{
746 /* AMD errata T13 (order #21922) */ 745 /* AMD errata T13 (order #21922) */
747 if ((c->x86 == 6)) { 746 if ((c->x86 == 6)) {
@@ -757,7 +756,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
757} 756}
758#endif 757#endif
759 758
760static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) 759static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
761{ 760{
762 tlb_flushall_shift = 5; 761 tlb_flushall_shift = 5;
763 762
@@ -765,7 +764,7 @@ static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
765 tlb_flushall_shift = 4; 764 tlb_flushall_shift = 4;
766} 765}
767 766
768static void __cpuinit cpu_detect_tlb_amd(struct cpuinfo_x86 *c) 767static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
769{ 768{
770 u32 ebx, eax, ecx, edx; 769 u32 ebx, eax, ecx, edx;
771 u16 mask = 0xfff; 770 u16 mask = 0xfff;
@@ -820,7 +819,7 @@ static void __cpuinit cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
820 cpu_set_tlb_flushall_shift(c); 819 cpu_set_tlb_flushall_shift(c);
821} 820}
822 821
823static const struct cpu_dev __cpuinitconst amd_cpu_dev = { 822static const struct cpu_dev amd_cpu_dev = {
824 .c_vendor = "AMD", 823 .c_vendor = "AMD",
825 .c_ident = { "AuthenticAMD" }, 824 .c_ident = { "AuthenticAMD" },
826#ifdef CONFIG_X86_32 825#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 159103c0b1f4..fbf6c3bc2400 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -11,7 +11,7 @@
11 11
12#ifdef CONFIG_X86_OOSTORE 12#ifdef CONFIG_X86_OOSTORE
13 13
14static u32 __cpuinit power2(u32 x) 14static u32 power2(u32 x)
15{ 15{
16 u32 s = 1; 16 u32 s = 1;
17 17
@@ -25,7 +25,7 @@ static u32 __cpuinit power2(u32 x)
25/* 25/*
26 * Set up an actual MCR 26 * Set up an actual MCR
27 */ 27 */
28static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) 28static void centaur_mcr_insert(int reg, u32 base, u32 size, int key)
29{ 29{
30 u32 lo, hi; 30 u32 lo, hi;
31 31
@@ -42,7 +42,7 @@ static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
42 * 42 *
43 * Shortcut: We know you can't put 4Gig of RAM on a winchip 43 * Shortcut: We know you can't put 4Gig of RAM on a winchip
44 */ 44 */
45static u32 __cpuinit ramtop(void) 45static u32 ramtop(void)
46{ 46{
47 u32 clip = 0xFFFFFFFFUL; 47 u32 clip = 0xFFFFFFFFUL;
48 u32 top = 0; 48 u32 top = 0;
@@ -91,7 +91,7 @@ static u32 __cpuinit ramtop(void)
91/* 91/*
92 * Compute a set of MCR's to give maximum coverage 92 * Compute a set of MCR's to give maximum coverage
93 */ 93 */
94static int __cpuinit centaur_mcr_compute(int nr, int key) 94static int centaur_mcr_compute(int nr, int key)
95{ 95{
96 u32 mem = ramtop(); 96 u32 mem = ramtop();
97 u32 root = power2(mem); 97 u32 root = power2(mem);
@@ -157,7 +157,7 @@ static int __cpuinit centaur_mcr_compute(int nr, int key)
157 return ct; 157 return ct;
158} 158}
159 159
160static void __cpuinit centaur_create_optimal_mcr(void) 160static void centaur_create_optimal_mcr(void)
161{ 161{
162 int used; 162 int used;
163 int i; 163 int i;
@@ -181,7 +181,7 @@ static void __cpuinit centaur_create_optimal_mcr(void)
181 wrmsr(MSR_IDT_MCR0+i, 0, 0); 181 wrmsr(MSR_IDT_MCR0+i, 0, 0);
182} 182}
183 183
184static void __cpuinit winchip2_create_optimal_mcr(void) 184static void winchip2_create_optimal_mcr(void)
185{ 185{
186 u32 lo, hi; 186 u32 lo, hi;
187 int used; 187 int used;
@@ -217,7 +217,7 @@ static void __cpuinit winchip2_create_optimal_mcr(void)
217/* 217/*
218 * Handle the MCR key on the Winchip 2. 218 * Handle the MCR key on the Winchip 2.
219 */ 219 */
220static void __cpuinit winchip2_unprotect_mcr(void) 220static void winchip2_unprotect_mcr(void)
221{ 221{
222 u32 lo, hi; 222 u32 lo, hi;
223 u32 key; 223 u32 key;
@@ -229,7 +229,7 @@ static void __cpuinit winchip2_unprotect_mcr(void)
229 wrmsr(MSR_IDT_MCR_CTRL, lo, hi); 229 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
230} 230}
231 231
232static void __cpuinit winchip2_protect_mcr(void) 232static void winchip2_protect_mcr(void)
233{ 233{
234 u32 lo, hi; 234 u32 lo, hi;
235 235
@@ -247,7 +247,7 @@ static void __cpuinit winchip2_protect_mcr(void)
247#define RNG_ENABLED (1 << 3) 247#define RNG_ENABLED (1 << 3)
248#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */ 248#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */
249 249
250static void __cpuinit init_c3(struct cpuinfo_x86 *c) 250static void init_c3(struct cpuinfo_x86 *c)
251{ 251{
252 u32 lo, hi; 252 u32 lo, hi;
253 253
@@ -318,7 +318,7 @@ enum {
318 EAMD3D = 1<<20, 318 EAMD3D = 1<<20,
319}; 319};
320 320
321static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) 321static void early_init_centaur(struct cpuinfo_x86 *c)
322{ 322{
323 switch (c->x86) { 323 switch (c->x86) {
324#ifdef CONFIG_X86_32 324#ifdef CONFIG_X86_32
@@ -337,7 +337,7 @@ static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
337#endif 337#endif
338} 338}
339 339
340static void __cpuinit init_centaur(struct cpuinfo_x86 *c) 340static void init_centaur(struct cpuinfo_x86 *c)
341{ 341{
342#ifdef CONFIG_X86_32 342#ifdef CONFIG_X86_32
343 char *name; 343 char *name;
@@ -468,7 +468,7 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
468#endif 468#endif
469} 469}
470 470
471static unsigned int __cpuinit 471static unsigned int
472centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) 472centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
473{ 473{
474#ifdef CONFIG_X86_32 474#ifdef CONFIG_X86_32
@@ -488,7 +488,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
488 return size; 488 return size;
489} 489}
490 490
491static const struct cpu_dev __cpuinitconst centaur_cpu_dev = { 491static const struct cpu_dev centaur_cpu_dev = {
492 .c_vendor = "Centaur", 492 .c_vendor = "Centaur",
493 .c_ident = { "CentaurHauls" }, 493 .c_ident = { "CentaurHauls" },
494 .c_early_init = early_init_centaur, 494 .c_early_init = early_init_centaur,
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 548bd039784e..25eb2747b063 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -63,7 +63,7 @@ void __init setup_cpu_local_masks(void)
63 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 63 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
64} 64}
65 65
66static void __cpuinit default_init(struct cpuinfo_x86 *c) 66static void default_init(struct cpuinfo_x86 *c)
67{ 67{
68#ifdef CONFIG_X86_64 68#ifdef CONFIG_X86_64
69 cpu_detect_cache_sizes(c); 69 cpu_detect_cache_sizes(c);
@@ -80,13 +80,13 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
80#endif 80#endif
81} 81}
82 82
83static const struct cpu_dev __cpuinitconst default_cpu = { 83static const struct cpu_dev default_cpu = {
84 .c_init = default_init, 84 .c_init = default_init,
85 .c_vendor = "Unknown", 85 .c_vendor = "Unknown",
86 .c_x86_vendor = X86_VENDOR_UNKNOWN, 86 .c_x86_vendor = X86_VENDOR_UNKNOWN,
87}; 87};
88 88
89static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; 89static const struct cpu_dev *this_cpu = &default_cpu;
90 90
91DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 91DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
92#ifdef CONFIG_X86_64 92#ifdef CONFIG_X86_64
@@ -160,8 +160,8 @@ static int __init x86_xsaveopt_setup(char *s)
160__setup("noxsaveopt", x86_xsaveopt_setup); 160__setup("noxsaveopt", x86_xsaveopt_setup);
161 161
162#ifdef CONFIG_X86_32 162#ifdef CONFIG_X86_32
163static int cachesize_override __cpuinitdata = -1; 163static int cachesize_override = -1;
164static int disable_x86_serial_nr __cpuinitdata = 1; 164static int disable_x86_serial_nr = 1;
165 165
166static int __init cachesize_setup(char *str) 166static int __init cachesize_setup(char *str)
167{ 167{
@@ -215,12 +215,12 @@ static inline int flag_is_changeable_p(u32 flag)
215} 215}
216 216
217/* Probe for the CPUID instruction */ 217/* Probe for the CPUID instruction */
218int __cpuinit have_cpuid_p(void) 218int have_cpuid_p(void)
219{ 219{
220 return flag_is_changeable_p(X86_EFLAGS_ID); 220 return flag_is_changeable_p(X86_EFLAGS_ID);
221} 221}
222 222
223static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 223static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
224{ 224{
225 unsigned long lo, hi; 225 unsigned long lo, hi;
226 226
@@ -298,7 +298,7 @@ struct cpuid_dependent_feature {
298 u32 level; 298 u32 level;
299}; 299};
300 300
301static const struct cpuid_dependent_feature __cpuinitconst 301static const struct cpuid_dependent_feature
302cpuid_dependent_features[] = { 302cpuid_dependent_features[] = {
303 { X86_FEATURE_MWAIT, 0x00000005 }, 303 { X86_FEATURE_MWAIT, 0x00000005 },
304 { X86_FEATURE_DCA, 0x00000009 }, 304 { X86_FEATURE_DCA, 0x00000009 },
@@ -306,7 +306,7 @@ cpuid_dependent_features[] = {
306 { 0, 0 } 306 { 0, 0 }
307}; 307};
308 308
309static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 309static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
310{ 310{
311 const struct cpuid_dependent_feature *df; 311 const struct cpuid_dependent_feature *df;
312 312
@@ -344,7 +344,7 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
344 */ 344 */
345 345
346/* Look up CPU names by table lookup. */ 346/* Look up CPU names by table lookup. */
347static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) 347static const char *table_lookup_model(struct cpuinfo_x86 *c)
348{ 348{
349 const struct cpu_model_info *info; 349 const struct cpu_model_info *info;
350 350
@@ -364,8 +364,8 @@ static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
364 return NULL; /* Not found */ 364 return NULL; /* Not found */
365} 365}
366 366
367__u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata; 367__u32 cpu_caps_cleared[NCAPINTS];
368__u32 cpu_caps_set[NCAPINTS] __cpuinitdata; 368__u32 cpu_caps_set[NCAPINTS];
369 369
370void load_percpu_segment(int cpu) 370void load_percpu_segment(int cpu)
371{ 371{
@@ -394,9 +394,9 @@ void switch_to_new_gdt(int cpu)
394 load_percpu_segment(cpu); 394 load_percpu_segment(cpu);
395} 395}
396 396
397static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; 397static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
398 398
399static void __cpuinit get_model_name(struct cpuinfo_x86 *c) 399static void get_model_name(struct cpuinfo_x86 *c)
400{ 400{
401 unsigned int *v; 401 unsigned int *v;
402 char *p, *q; 402 char *p, *q;
@@ -425,7 +425,7 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
425 } 425 }
426} 426}
427 427
428void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c) 428void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
429{ 429{
430 unsigned int n, dummy, ebx, ecx, edx, l2size; 430 unsigned int n, dummy, ebx, ecx, edx, l2size;
431 431
@@ -479,7 +479,7 @@ u16 __read_mostly tlb_lld_4m[NR_INFO];
479 */ 479 */
480s8 __read_mostly tlb_flushall_shift = -1; 480s8 __read_mostly tlb_flushall_shift = -1;
481 481
482void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c) 482void cpu_detect_tlb(struct cpuinfo_x86 *c)
483{ 483{
484 if (this_cpu->c_detect_tlb) 484 if (this_cpu->c_detect_tlb)
485 this_cpu->c_detect_tlb(c); 485 this_cpu->c_detect_tlb(c);
@@ -493,7 +493,7 @@ void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c)
493 tlb_flushall_shift); 493 tlb_flushall_shift);
494} 494}
495 495
496void __cpuinit detect_ht(struct cpuinfo_x86 *c) 496void detect_ht(struct cpuinfo_x86 *c)
497{ 497{
498#ifdef CONFIG_X86_HT 498#ifdef CONFIG_X86_HT
499 u32 eax, ebx, ecx, edx; 499 u32 eax, ebx, ecx, edx;
@@ -544,7 +544,7 @@ out:
544#endif 544#endif
545} 545}
546 546
547static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) 547static void get_cpu_vendor(struct cpuinfo_x86 *c)
548{ 548{
549 char *v = c->x86_vendor_id; 549 char *v = c->x86_vendor_id;
550 int i; 550 int i;
@@ -571,7 +571,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
571 this_cpu = &default_cpu; 571 this_cpu = &default_cpu;
572} 572}
573 573
574void __cpuinit cpu_detect(struct cpuinfo_x86 *c) 574void cpu_detect(struct cpuinfo_x86 *c)
575{ 575{
576 /* Get vendor name */ 576 /* Get vendor name */
577 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 577 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
@@ -601,7 +601,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
601 } 601 }
602} 602}
603 603
604void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) 604void get_cpu_cap(struct cpuinfo_x86 *c)
605{ 605{
606 u32 tfms, xlvl; 606 u32 tfms, xlvl;
607 u32 ebx; 607 u32 ebx;
@@ -652,7 +652,7 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
652 init_scattered_cpuid_features(c); 652 init_scattered_cpuid_features(c);
653} 653}
654 654
655static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 655static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
656{ 656{
657#ifdef CONFIG_X86_32 657#ifdef CONFIG_X86_32
658 int i; 658 int i;
@@ -769,7 +769,7 @@ void __init early_cpu_init(void)
769 * unless we can find a reliable way to detect all the broken cases. 769 * unless we can find a reliable way to detect all the broken cases.
770 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). 770 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
771 */ 771 */
772static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) 772static void detect_nopl(struct cpuinfo_x86 *c)
773{ 773{
774#ifdef CONFIG_X86_32 774#ifdef CONFIG_X86_32
775 clear_cpu_cap(c, X86_FEATURE_NOPL); 775 clear_cpu_cap(c, X86_FEATURE_NOPL);
@@ -778,7 +778,7 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
778#endif 778#endif
779} 779}
780 780
781static void __cpuinit generic_identify(struct cpuinfo_x86 *c) 781static void generic_identify(struct cpuinfo_x86 *c)
782{ 782{
783 c->extended_cpuid_level = 0; 783 c->extended_cpuid_level = 0;
784 784
@@ -815,7 +815,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
815/* 815/*
816 * This does the hard work of actually picking apart the CPU stuff... 816 * This does the hard work of actually picking apart the CPU stuff...
817 */ 817 */
818static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) 818static void identify_cpu(struct cpuinfo_x86 *c)
819{ 819{
820 int i; 820 int i;
821 821
@@ -960,7 +960,7 @@ void __init identify_boot_cpu(void)
960 cpu_detect_tlb(&boot_cpu_data); 960 cpu_detect_tlb(&boot_cpu_data);
961} 961}
962 962
963void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) 963void identify_secondary_cpu(struct cpuinfo_x86 *c)
964{ 964{
965 BUG_ON(c == &boot_cpu_data); 965 BUG_ON(c == &boot_cpu_data);
966 identify_cpu(c); 966 identify_cpu(c);
@@ -975,14 +975,14 @@ struct msr_range {
975 unsigned max; 975 unsigned max;
976}; 976};
977 977
978static const struct msr_range msr_range_array[] __cpuinitconst = { 978static const struct msr_range msr_range_array[] = {
979 { 0x00000000, 0x00000418}, 979 { 0x00000000, 0x00000418},
980 { 0xc0000000, 0xc000040b}, 980 { 0xc0000000, 0xc000040b},
981 { 0xc0010000, 0xc0010142}, 981 { 0xc0010000, 0xc0010142},
982 { 0xc0011000, 0xc001103b}, 982 { 0xc0011000, 0xc001103b},
983}; 983};
984 984
985static void __cpuinit __print_cpu_msr(void) 985static void __print_cpu_msr(void)
986{ 986{
987 unsigned index_min, index_max; 987 unsigned index_min, index_max;
988 unsigned index; 988 unsigned index;
@@ -1001,7 +1001,7 @@ static void __cpuinit __print_cpu_msr(void)
1001 } 1001 }
1002} 1002}
1003 1003
1004static int show_msr __cpuinitdata; 1004static int show_msr;
1005 1005
1006static __init int setup_show_msr(char *arg) 1006static __init int setup_show_msr(char *arg)
1007{ 1007{
@@ -1022,7 +1022,7 @@ static __init int setup_noclflush(char *arg)
1022} 1022}
1023__setup("noclflush", setup_noclflush); 1023__setup("noclflush", setup_noclflush);
1024 1024
1025void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 1025void print_cpu_info(struct cpuinfo_x86 *c)
1026{ 1026{
1027 const char *vendor = NULL; 1027 const char *vendor = NULL;
1028 1028
@@ -1051,7 +1051,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1051 print_cpu_msr(c); 1051 print_cpu_msr(c);
1052} 1052}
1053 1053
1054void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c) 1054void print_cpu_msr(struct cpuinfo_x86 *c)
1055{ 1055{
1056 if (c->cpu_index < show_msr) 1056 if (c->cpu_index < show_msr)
1057 __print_cpu_msr(); 1057 __print_cpu_msr();
@@ -1216,7 +1216,7 @@ static void dbg_restore_debug_regs(void)
1216 */ 1216 */
1217#ifdef CONFIG_X86_64 1217#ifdef CONFIG_X86_64
1218 1218
1219void __cpuinit cpu_init(void) 1219void cpu_init(void)
1220{ 1220{
1221 struct orig_ist *oist; 1221 struct orig_ist *oist;
1222 struct task_struct *me; 1222 struct task_struct *me;
@@ -1315,7 +1315,7 @@ void __cpuinit cpu_init(void)
1315 1315
1316#else 1316#else
1317 1317
1318void __cpuinit cpu_init(void) 1318void cpu_init(void)
1319{ 1319{
1320 int cpu = smp_processor_id(); 1320 int cpu = smp_processor_id();
1321 struct task_struct *curr = current; 1321 struct task_struct *curr = current;
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 7582f475b163..d0969c75ab54 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -15,7 +15,7 @@
15/* 15/*
16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU 16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
17 */ 17 */
18static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 18static void __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
19{ 19{
20 unsigned char ccr2, ccr3; 20 unsigned char ccr2, ccr3;
21 21
@@ -44,7 +44,7 @@ static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
44 } 44 }
45} 45}
46 46
47static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 47static void do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
48{ 48{
49 unsigned long flags; 49 unsigned long flags;
50 50
@@ -59,25 +59,25 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
59 * Actually since bugs.h doesn't even reference this perhaps someone should 59 * Actually since bugs.h doesn't even reference this perhaps someone should
60 * fix the documentation ??? 60 * fix the documentation ???
61 */ 61 */
62static unsigned char Cx86_dir0_msb __cpuinitdata = 0; 62static unsigned char Cx86_dir0_msb = 0;
63 63
64static const char __cpuinitconst Cx86_model[][9] = { 64static const char Cx86_model[][9] = {
65 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", 65 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
66 "M II ", "Unknown" 66 "M II ", "Unknown"
67}; 67};
68static const char __cpuinitconst Cx486_name[][5] = { 68static const char Cx486_name[][5] = {
69 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", 69 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
70 "SRx2", "DRx2" 70 "SRx2", "DRx2"
71}; 71};
72static const char __cpuinitconst Cx486S_name[][4] = { 72static const char Cx486S_name[][4] = {
73 "S", "S2", "Se", "S2e" 73 "S", "S2", "Se", "S2e"
74}; 74};
75static const char __cpuinitconst Cx486D_name[][4] = { 75static const char Cx486D_name[][4] = {
76 "DX", "DX2", "?", "?", "?", "DX4" 76 "DX", "DX2", "?", "?", "?", "DX4"
77}; 77};
78static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock"; 78static char Cx86_cb[] = "?.5x Core/Bus Clock";
79static const char __cpuinitconst cyrix_model_mult1[] = "12??43"; 79static const char cyrix_model_mult1[] = "12??43";
80static const char __cpuinitconst cyrix_model_mult2[] = "12233445"; 80static const char cyrix_model_mult2[] = "12233445";
81 81
82/* 82/*
83 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old 83 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
@@ -87,7 +87,7 @@ static const char __cpuinitconst cyrix_model_mult2[] = "12233445";
87 * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP 87 * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP
88 */ 88 */
89 89
90static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c) 90static void check_cx686_slop(struct cpuinfo_x86 *c)
91{ 91{
92 unsigned long flags; 92 unsigned long flags;
93 93
@@ -112,7 +112,7 @@ static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c)
112} 112}
113 113
114 114
115static void __cpuinit set_cx86_reorder(void) 115static void set_cx86_reorder(void)
116{ 116{
117 u8 ccr3; 117 u8 ccr3;
118 118
@@ -127,7 +127,7 @@ static void __cpuinit set_cx86_reorder(void)
127 setCx86(CX86_CCR3, ccr3); 127 setCx86(CX86_CCR3, ccr3);
128} 128}
129 129
130static void __cpuinit set_cx86_memwb(void) 130static void set_cx86_memwb(void)
131{ 131{
132 printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); 132 printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
133 133
@@ -143,7 +143,7 @@ static void __cpuinit set_cx86_memwb(void)
143 * Configure later MediaGX and/or Geode processor. 143 * Configure later MediaGX and/or Geode processor.
144 */ 144 */
145 145
146static void __cpuinit geode_configure(void) 146static void geode_configure(void)
147{ 147{
148 unsigned long flags; 148 unsigned long flags;
149 u8 ccr3; 149 u8 ccr3;
@@ -166,7 +166,7 @@ static void __cpuinit geode_configure(void)
166 local_irq_restore(flags); 166 local_irq_restore(flags);
167} 167}
168 168
169static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c) 169static void early_init_cyrix(struct cpuinfo_x86 *c)
170{ 170{
171 unsigned char dir0, dir0_msn, dir1 = 0; 171 unsigned char dir0, dir0_msn, dir1 = 0;
172 172
@@ -185,7 +185,7 @@ static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c)
185 } 185 }
186} 186}
187 187
188static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) 188static void init_cyrix(struct cpuinfo_x86 *c)
189{ 189{
190 unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0; 190 unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
191 char *buf = c->x86_model_id; 191 char *buf = c->x86_model_id;
@@ -356,7 +356,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
356/* 356/*
357 * Handle National Semiconductor branded processors 357 * Handle National Semiconductor branded processors
358 */ 358 */
359static void __cpuinit init_nsc(struct cpuinfo_x86 *c) 359static void init_nsc(struct cpuinfo_x86 *c)
360{ 360{
361 /* 361 /*
362 * There may be GX1 processors in the wild that are branded 362 * There may be GX1 processors in the wild that are branded
@@ -405,7 +405,7 @@ static inline int test_cyrix_52div(void)
405 return (unsigned char) (test >> 8) == 0x02; 405 return (unsigned char) (test >> 8) == 0x02;
406} 406}
407 407
408static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) 408static void cyrix_identify(struct cpuinfo_x86 *c)
409{ 409{
410 /* Detect Cyrix with disabled CPUID */ 410 /* Detect Cyrix with disabled CPUID */
411 if (c->x86 == 4 && test_cyrix_52div()) { 411 if (c->x86 == 4 && test_cyrix_52div()) {
@@ -441,7 +441,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
441 } 441 }
442} 442}
443 443
444static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = { 444static const struct cpu_dev cyrix_cpu_dev = {
445 .c_vendor = "Cyrix", 445 .c_vendor = "Cyrix",
446 .c_ident = { "CyrixInstead" }, 446 .c_ident = { "CyrixInstead" },
447 .c_early_init = early_init_cyrix, 447 .c_early_init = early_init_cyrix,
@@ -452,7 +452,7 @@ static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = {
452 452
453cpu_dev_register(cyrix_cpu_dev); 453cpu_dev_register(cyrix_cpu_dev);
454 454
455static const struct cpu_dev __cpuinitconst nsc_cpu_dev = { 455static const struct cpu_dev nsc_cpu_dev = {
456 .c_vendor = "NSC", 456 .c_vendor = "NSC",
457 .c_ident = { "Geode by NSC" }, 457 .c_ident = { "Geode by NSC" },
458 .c_init = init_nsc, 458 .c_init = init_nsc,
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 1e7e84a02eba..87279212d318 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -60,7 +60,7 @@ detect_hypervisor_vendor(void)
60 } 60 }
61} 61}
62 62
63void __cpuinit init_hypervisor(struct cpuinfo_x86 *c) 63void init_hypervisor(struct cpuinfo_x86 *c)
64{ 64{
65 if (x86_hyper && x86_hyper->set_cpu_features) 65 if (x86_hyper && x86_hyper->set_cpu_features)
66 x86_hyper->set_cpu_features(c); 66 x86_hyper->set_cpu_features(c);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 9b0c441c03f5..ec7299566f79 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -26,7 +26,7 @@
26#include <asm/apic.h> 26#include <asm/apic.h>
27#endif 27#endif
28 28
29static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 29static void early_init_intel(struct cpuinfo_x86 *c)
30{ 30{
31 u64 misc_enable; 31 u64 misc_enable;
32 32
@@ -163,7 +163,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
163 * This is called before we do cpu ident work 163 * This is called before we do cpu ident work
164 */ 164 */
165 165
166int __cpuinit ppro_with_ram_bug(void) 166int ppro_with_ram_bug(void)
167{ 167{
168 /* Uses data from early_cpu_detect now */ 168 /* Uses data from early_cpu_detect now */
169 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 169 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
@@ -176,7 +176,7 @@ int __cpuinit ppro_with_ram_bug(void)
176 return 0; 176 return 0;
177} 177}
178 178
179static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) 179static void intel_smp_check(struct cpuinfo_x86 *c)
180{ 180{
181 /* calling is from identify_secondary_cpu() ? */ 181 /* calling is from identify_secondary_cpu() ? */
182 if (!c->cpu_index) 182 if (!c->cpu_index)
@@ -196,7 +196,7 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
196 } 196 }
197} 197}
198 198
199static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 199static void intel_workarounds(struct cpuinfo_x86 *c)
200{ 200{
201 unsigned long lo, hi; 201 unsigned long lo, hi;
202 202
@@ -275,12 +275,12 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
275 intel_smp_check(c); 275 intel_smp_check(c);
276} 276}
277#else 277#else
278static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 278static void intel_workarounds(struct cpuinfo_x86 *c)
279{ 279{
280} 280}
281#endif 281#endif
282 282
283static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) 283static void srat_detect_node(struct cpuinfo_x86 *c)
284{ 284{
285#ifdef CONFIG_NUMA 285#ifdef CONFIG_NUMA
286 unsigned node; 286 unsigned node;
@@ -300,7 +300,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
300/* 300/*
301 * find out the number of processor cores on the die 301 * find out the number of processor cores on the die
302 */ 302 */
303static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) 303static int intel_num_cpu_cores(struct cpuinfo_x86 *c)
304{ 304{
305 unsigned int eax, ebx, ecx, edx; 305 unsigned int eax, ebx, ecx, edx;
306 306
@@ -315,7 +315,7 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
315 return 1; 315 return 1;
316} 316}
317 317
318static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) 318static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
319{ 319{
320 /* Intel VMX MSR indicated features */ 320 /* Intel VMX MSR indicated features */
321#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 321#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
@@ -353,7 +353,7 @@ static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
353 } 353 }
354} 354}
355 355
356static void __cpuinit init_intel(struct cpuinfo_x86 *c) 356static void init_intel(struct cpuinfo_x86 *c)
357{ 357{
358 unsigned int l2 = 0; 358 unsigned int l2 = 0;
359 359
@@ -472,7 +472,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
472} 472}
473 473
474#ifdef CONFIG_X86_32 474#ifdef CONFIG_X86_32
475static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) 475static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
476{ 476{
477 /* 477 /*
478 * Intel PIII Tualatin. This comes in two flavours. 478 * Intel PIII Tualatin. This comes in two flavours.
@@ -506,7 +506,7 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i
506 506
507#define STLB_4K 0x41 507#define STLB_4K 0x41
508 508
509static const struct _tlb_table intel_tlb_table[] __cpuinitconst = { 509static const struct _tlb_table intel_tlb_table[] = {
510 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, 510 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
511 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" }, 511 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
512 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" }, 512 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
@@ -536,7 +536,7 @@ static const struct _tlb_table intel_tlb_table[] __cpuinitconst = {
536 { 0x00, 0, 0 } 536 { 0x00, 0, 0 }
537}; 537};
538 538
539static void __cpuinit intel_tlb_lookup(const unsigned char desc) 539static void intel_tlb_lookup(const unsigned char desc)
540{ 540{
541 unsigned char k; 541 unsigned char k;
542 if (desc == 0) 542 if (desc == 0)
@@ -605,7 +605,7 @@ static void __cpuinit intel_tlb_lookup(const unsigned char desc)
605 } 605 }
606} 606}
607 607
608static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) 608static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
609{ 609{
610 switch ((c->x86 << 8) + c->x86_model) { 610 switch ((c->x86 << 8) + c->x86_model) {
611 case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ 611 case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
@@ -634,7 +634,7 @@ static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
634 } 634 }
635} 635}
636 636
637static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c) 637static void intel_detect_tlb(struct cpuinfo_x86 *c)
638{ 638{
639 int i, j, n; 639 int i, j, n;
640 unsigned int regs[4]; 640 unsigned int regs[4];
@@ -661,7 +661,7 @@ static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c)
661 intel_tlb_flushall_shift_set(c); 661 intel_tlb_flushall_shift_set(c);
662} 662}
663 663
664static const struct cpu_dev __cpuinitconst intel_cpu_dev = { 664static const struct cpu_dev intel_cpu_dev = {
665 .c_vendor = "Intel", 665 .c_vendor = "Intel",
666 .c_ident = { "GenuineIntel" }, 666 .c_ident = { "GenuineIntel" },
667#ifdef CONFIG_X86_32 667#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 8dc72dda66fe..1414c90feaba 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -37,7 +37,7 @@ struct _cache_table {
37/* All the cache descriptor types we care about (no TLB or 37/* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */ 38 trace cache entries) */
39 39
40static const struct _cache_table __cpuinitconst cache_table[] = 40static const struct _cache_table cache_table[] =
41{ 41{
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
@@ -203,7 +203,7 @@ union l3_cache {
203 unsigned val; 203 unsigned val;
204}; 204};
205 205
206static const unsigned short __cpuinitconst assocs[] = { 206static const unsigned short assocs[] = {
207 [1] = 1, 207 [1] = 1,
208 [2] = 2, 208 [2] = 2,
209 [4] = 4, 209 [4] = 4,
@@ -217,10 +217,10 @@ static const unsigned short __cpuinitconst assocs[] = {
217 [0xf] = 0xffff /* fully associative - no way to show this currently */ 217 [0xf] = 0xffff /* fully associative - no way to show this currently */
218}; 218};
219 219
220static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 }; 220static const unsigned char levels[] = { 1, 1, 2, 3 };
221static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 }; 221static const unsigned char types[] = { 1, 2, 3, 3 };
222 222
223static void __cpuinit 223static void
224amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, 224amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
225 union _cpuid4_leaf_ebx *ebx, 225 union _cpuid4_leaf_ebx *ebx,
226 union _cpuid4_leaf_ecx *ecx) 226 union _cpuid4_leaf_ecx *ecx)
@@ -302,7 +302,7 @@ struct _cache_attr {
302/* 302/*
303 * L3 cache descriptors 303 * L3 cache descriptors
304 */ 304 */
305static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) 305static void amd_calc_l3_indices(struct amd_northbridge *nb)
306{ 306{
307 struct amd_l3_cache *l3 = &nb->l3_cache; 307 struct amd_l3_cache *l3 = &nb->l3_cache;
308 unsigned int sc0, sc1, sc2, sc3; 308 unsigned int sc0, sc1, sc2, sc3;
@@ -325,7 +325,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
325 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; 325 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
326} 326}
327 327
328static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) 328static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
329{ 329{
330 int node; 330 int node;
331 331
@@ -528,8 +528,7 @@ static struct _cache_attr subcaches =
528#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ 528#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
529 529
530static int 530static int
531__cpuinit cpuid4_cache_lookup_regs(int index, 531cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
532 struct _cpuid4_info_regs *this_leaf)
533{ 532{
534 union _cpuid4_leaf_eax eax; 533 union _cpuid4_leaf_eax eax;
535 union _cpuid4_leaf_ebx ebx; 534 union _cpuid4_leaf_ebx ebx;
@@ -560,7 +559,7 @@ __cpuinit cpuid4_cache_lookup_regs(int index,
560 return 0; 559 return 0;
561} 560}
562 561
563static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c) 562static int find_num_cache_leaves(struct cpuinfo_x86 *c)
564{ 563{
565 unsigned int eax, ebx, ecx, edx, op; 564 unsigned int eax, ebx, ecx, edx, op;
566 union _cpuid4_leaf_eax cache_eax; 565 union _cpuid4_leaf_eax cache_eax;
@@ -580,7 +579,7 @@ static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c)
580 return i; 579 return i;
581} 580}
582 581
583void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c) 582void init_amd_cacheinfo(struct cpuinfo_x86 *c)
584{ 583{
585 584
586 if (cpu_has_topoext) { 585 if (cpu_has_topoext) {
@@ -593,7 +592,7 @@ void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c)
593 } 592 }
594} 593}
595 594
596unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) 595unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
597{ 596{
598 /* Cache sizes */ 597 /* Cache sizes */
599 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; 598 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
@@ -744,7 +743,7 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
744 743
745#ifdef CONFIG_SMP 744#ifdef CONFIG_SMP
746 745
747static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) 746static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
748{ 747{
749 struct _cpuid4_info *this_leaf; 748 struct _cpuid4_info *this_leaf;
750 int i, sibling; 749 int i, sibling;
@@ -793,7 +792,7 @@ static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
793 return 1; 792 return 1;
794} 793}
795 794
796static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 795static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
797{ 796{
798 struct _cpuid4_info *this_leaf, *sibling_leaf; 797 struct _cpuid4_info *this_leaf, *sibling_leaf;
799 unsigned long num_threads_sharing; 798 unsigned long num_threads_sharing;
@@ -828,7 +827,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
828 } 827 }
829 } 828 }
830} 829}
831static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) 830static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
832{ 831{
833 struct _cpuid4_info *this_leaf, *sibling_leaf; 832 struct _cpuid4_info *this_leaf, *sibling_leaf;
834 int sibling; 833 int sibling;
@@ -841,16 +840,16 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
841 } 840 }
842} 841}
843#else 842#else
844static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 843static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
845{ 844{
846} 845}
847 846
848static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) 847static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
849{ 848{
850} 849}
851#endif 850#endif
852 851
853static void __cpuinit free_cache_attributes(unsigned int cpu) 852static void free_cache_attributes(unsigned int cpu)
854{ 853{
855 int i; 854 int i;
856 855
@@ -861,7 +860,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
861 per_cpu(ici_cpuid4_info, cpu) = NULL; 860 per_cpu(ici_cpuid4_info, cpu) = NULL;
862} 861}
863 862
864static void __cpuinit get_cpu_leaves(void *_retval) 863static void get_cpu_leaves(void *_retval)
865{ 864{
866 int j, *retval = _retval, cpu = smp_processor_id(); 865 int j, *retval = _retval, cpu = smp_processor_id();
867 866
@@ -881,7 +880,7 @@ static void __cpuinit get_cpu_leaves(void *_retval)
881 } 880 }
882} 881}
883 882
884static int __cpuinit detect_cache_attributes(unsigned int cpu) 883static int detect_cache_attributes(unsigned int cpu)
885{ 884{
886 int retval; 885 int retval;
887 886
@@ -1015,7 +1014,7 @@ static struct attribute *default_attrs[] = {
1015}; 1014};
1016 1015
1017#ifdef CONFIG_AMD_NB 1016#ifdef CONFIG_AMD_NB
1018static struct attribute ** __cpuinit amd_l3_attrs(void) 1017static struct attribute **amd_l3_attrs(void)
1019{ 1018{
1020 static struct attribute **attrs; 1019 static struct attribute **attrs;
1021 int n; 1020 int n;
@@ -1091,7 +1090,7 @@ static struct kobj_type ktype_percpu_entry = {
1091 .sysfs_ops = &sysfs_ops, 1090 .sysfs_ops = &sysfs_ops,
1092}; 1091};
1093 1092
1094static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) 1093static void cpuid4_cache_sysfs_exit(unsigned int cpu)
1095{ 1094{
1096 kfree(per_cpu(ici_cache_kobject, cpu)); 1095 kfree(per_cpu(ici_cache_kobject, cpu));
1097 kfree(per_cpu(ici_index_kobject, cpu)); 1096 kfree(per_cpu(ici_index_kobject, cpu));
@@ -1100,7 +1099,7 @@ static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
1100 free_cache_attributes(cpu); 1099 free_cache_attributes(cpu);
1101} 1100}
1102 1101
1103static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) 1102static int cpuid4_cache_sysfs_init(unsigned int cpu)
1104{ 1103{
1105 int err; 1104 int err;
1106 1105
@@ -1132,7 +1131,7 @@ err_out:
1132static DECLARE_BITMAP(cache_dev_map, NR_CPUS); 1131static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
1133 1132
1134/* Add/Remove cache interface for CPU device */ 1133/* Add/Remove cache interface for CPU device */
1135static int __cpuinit cache_add_dev(struct device *dev) 1134static int cache_add_dev(struct device *dev)
1136{ 1135{
1137 unsigned int cpu = dev->id; 1136 unsigned int cpu = dev->id;
1138 unsigned long i, j; 1137 unsigned long i, j;
@@ -1183,7 +1182,7 @@ static int __cpuinit cache_add_dev(struct device *dev)
1183 return 0; 1182 return 0;
1184} 1183}
1185 1184
1186static void __cpuinit cache_remove_dev(struct device *dev) 1185static void cache_remove_dev(struct device *dev)
1187{ 1186{
1188 unsigned int cpu = dev->id; 1187 unsigned int cpu = dev->id;
1189 unsigned long i; 1188 unsigned long i;
@@ -1200,8 +1199,8 @@ static void __cpuinit cache_remove_dev(struct device *dev)
1200 cpuid4_cache_sysfs_exit(cpu); 1199 cpuid4_cache_sysfs_exit(cpu);
1201} 1200}
1202 1201
1203static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, 1202static int cacheinfo_cpu_callback(struct notifier_block *nfb,
1204 unsigned long action, void *hcpu) 1203 unsigned long action, void *hcpu)
1205{ 1204{
1206 unsigned int cpu = (unsigned long)hcpu; 1205 unsigned int cpu = (unsigned long)hcpu;
1207 struct device *dev; 1206 struct device *dev;
@@ -1220,7 +1219,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1220 return NOTIFY_OK; 1219 return NOTIFY_OK;
1221} 1220}
1222 1221
1223static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = { 1222static struct notifier_block cacheinfo_cpu_notifier = {
1224 .notifier_call = cacheinfo_cpu_callback, 1223 .notifier_call = cacheinfo_cpu_callback,
1225}; 1224};
1226 1225
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index bf49cdbb010f..87a65c939bcd 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1363,7 +1363,7 @@ int mce_notify_irq(void)
1363} 1363}
1364EXPORT_SYMBOL_GPL(mce_notify_irq); 1364EXPORT_SYMBOL_GPL(mce_notify_irq);
1365 1365
1366static int __cpuinit __mcheck_cpu_mce_banks_init(void) 1366static int __mcheck_cpu_mce_banks_init(void)
1367{ 1367{
1368 int i; 1368 int i;
1369 u8 num_banks = mca_cfg.banks; 1369 u8 num_banks = mca_cfg.banks;
@@ -1384,7 +1384,7 @@ static int __cpuinit __mcheck_cpu_mce_banks_init(void)
1384/* 1384/*
1385 * Initialize Machine Checks for a CPU. 1385 * Initialize Machine Checks for a CPU.
1386 */ 1386 */
1387static int __cpuinit __mcheck_cpu_cap_init(void) 1387static int __mcheck_cpu_cap_init(void)
1388{ 1388{
1389 unsigned b; 1389 unsigned b;
1390 u64 cap; 1390 u64 cap;
@@ -1483,7 +1483,7 @@ static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1483} 1483}
1484 1484
1485/* Add per CPU specific workarounds here */ 1485/* Add per CPU specific workarounds here */
1486static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) 1486static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1487{ 1487{
1488 struct mca_config *cfg = &mca_cfg; 1488 struct mca_config *cfg = &mca_cfg;
1489 1489
@@ -1593,7 +1593,7 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1593 return 0; 1593 return 0;
1594} 1594}
1595 1595
1596static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) 1596static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1597{ 1597{
1598 if (c->x86 != 5) 1598 if (c->x86 != 5)
1599 return 0; 1599 return 0;
@@ -1664,7 +1664,7 @@ void (*machine_check_vector)(struct pt_regs *, long error_code) =
1664 * Called for each booted CPU to set up machine checks. 1664 * Called for each booted CPU to set up machine checks.
1665 * Must be called with preempt off: 1665 * Must be called with preempt off:
1666 */ 1666 */
1667void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) 1667void mcheck_cpu_init(struct cpuinfo_x86 *c)
1668{ 1668{
1669 if (mca_cfg.disabled) 1669 if (mca_cfg.disabled)
1670 return; 1670 return;
@@ -2082,7 +2082,6 @@ static struct bus_type mce_subsys = {
2082 2082
2083DEFINE_PER_CPU(struct device *, mce_device); 2083DEFINE_PER_CPU(struct device *, mce_device);
2084 2084
2085__cpuinitdata
2086void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); 2085void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
2087 2086
2088static inline struct mce_bank *attr_to_bank(struct device_attribute *attr) 2087static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
@@ -2228,7 +2227,7 @@ static void mce_device_release(struct device *dev)
2228} 2227}
2229 2228
2230/* Per cpu device init. All of the cpus still share the same ctrl bank: */ 2229/* Per cpu device init. All of the cpus still share the same ctrl bank: */
2231static __cpuinit int mce_device_create(unsigned int cpu) 2230static int mce_device_create(unsigned int cpu)
2232{ 2231{
2233 struct device *dev; 2232 struct device *dev;
2234 int err; 2233 int err;
@@ -2274,7 +2273,7 @@ error:
2274 return err; 2273 return err;
2275} 2274}
2276 2275
2277static __cpuinit void mce_device_remove(unsigned int cpu) 2276static void mce_device_remove(unsigned int cpu)
2278{ 2277{
2279 struct device *dev = per_cpu(mce_device, cpu); 2278 struct device *dev = per_cpu(mce_device, cpu);
2280 int i; 2279 int i;
@@ -2294,7 +2293,7 @@ static __cpuinit void mce_device_remove(unsigned int cpu)
2294} 2293}
2295 2294
2296/* Make sure there are no machine checks on offlined CPUs. */ 2295/* Make sure there are no machine checks on offlined CPUs. */
2297static void __cpuinit mce_disable_cpu(void *h) 2296static void mce_disable_cpu(void *h)
2298{ 2297{
2299 unsigned long action = *(unsigned long *)h; 2298 unsigned long action = *(unsigned long *)h;
2300 int i; 2299 int i;
@@ -2312,7 +2311,7 @@ static void __cpuinit mce_disable_cpu(void *h)
2312 } 2311 }
2313} 2312}
2314 2313
2315static void __cpuinit mce_reenable_cpu(void *h) 2314static void mce_reenable_cpu(void *h)
2316{ 2315{
2317 unsigned long action = *(unsigned long *)h; 2316 unsigned long action = *(unsigned long *)h;
2318 int i; 2317 int i;
@@ -2331,7 +2330,7 @@ static void __cpuinit mce_reenable_cpu(void *h)
2331} 2330}
2332 2331
2333/* Get notified when a cpu comes on/off. Be hotplug friendly. */ 2332/* Get notified when a cpu comes on/off. Be hotplug friendly. */
2334static int __cpuinit 2333static int
2335mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 2334mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2336{ 2335{
2337 unsigned int cpu = (unsigned long)hcpu; 2336 unsigned int cpu = (unsigned long)hcpu;
@@ -2367,7 +2366,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2367 return NOTIFY_OK; 2366 return NOTIFY_OK;
2368} 2367}
2369 2368
2370static struct notifier_block mce_cpu_notifier __cpuinitdata = { 2369static struct notifier_block mce_cpu_notifier = {
2371 .notifier_call = mce_cpu_callback, 2370 .notifier_call = mce_cpu_callback,
2372}; 2371};
2373 2372
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 9cb52767999a..603df4f74640 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -458,10 +458,8 @@ static struct kobj_type threshold_ktype = {
458 .default_attrs = default_attrs, 458 .default_attrs = default_attrs,
459}; 459};
460 460
461static __cpuinit int allocate_threshold_blocks(unsigned int cpu, 461static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
462 unsigned int bank, 462 unsigned int block, u32 address)
463 unsigned int block,
464 u32 address)
465{ 463{
466 struct threshold_block *b = NULL; 464 struct threshold_block *b = NULL;
467 u32 low, high; 465 u32 low, high;
@@ -543,7 +541,7 @@ out_free:
543 return err; 541 return err;
544} 542}
545 543
546static __cpuinit int __threshold_add_blocks(struct threshold_bank *b) 544static int __threshold_add_blocks(struct threshold_bank *b)
547{ 545{
548 struct list_head *head = &b->blocks->miscj; 546 struct list_head *head = &b->blocks->miscj;
549 struct threshold_block *pos = NULL; 547 struct threshold_block *pos = NULL;
@@ -567,7 +565,7 @@ static __cpuinit int __threshold_add_blocks(struct threshold_bank *b)
567 return err; 565 return err;
568} 566}
569 567
570static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) 568static int threshold_create_bank(unsigned int cpu, unsigned int bank)
571{ 569{
572 struct device *dev = per_cpu(mce_device, cpu); 570 struct device *dev = per_cpu(mce_device, cpu);
573 struct amd_northbridge *nb = NULL; 571 struct amd_northbridge *nb = NULL;
@@ -632,7 +630,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
632} 630}
633 631
634/* create dir/files for all valid threshold banks */ 632/* create dir/files for all valid threshold banks */
635static __cpuinit int threshold_create_device(unsigned int cpu) 633static int threshold_create_device(unsigned int cpu)
636{ 634{
637 unsigned int bank; 635 unsigned int bank;
638 struct threshold_bank **bp; 636 struct threshold_bank **bp;
@@ -736,7 +734,7 @@ static void threshold_remove_device(unsigned int cpu)
736} 734}
737 735
738/* get notified when a cpu comes on/off */ 736/* get notified when a cpu comes on/off */
739static void __cpuinit 737static void
740amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu) 738amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
741{ 739{
742 switch (action) { 740 switch (action) {
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 98f2083832eb..3eec7de76efb 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -55,12 +55,24 @@ struct thermal_state {
55 struct _thermal_state package_power_limit; 55 struct _thermal_state package_power_limit;
56 struct _thermal_state core_thresh0; 56 struct _thermal_state core_thresh0;
57 struct _thermal_state core_thresh1; 57 struct _thermal_state core_thresh1;
58 struct _thermal_state pkg_thresh0;
59 struct _thermal_state pkg_thresh1;
58}; 60};
59 61
60/* Callback to handle core threshold interrupts */ 62/* Callback to handle core threshold interrupts */
61int (*platform_thermal_notify)(__u64 msr_val); 63int (*platform_thermal_notify)(__u64 msr_val);
62EXPORT_SYMBOL(platform_thermal_notify); 64EXPORT_SYMBOL(platform_thermal_notify);
63 65
66/* Callback to handle core package threshold_interrupts */
67int (*platform_thermal_package_notify)(__u64 msr_val);
68EXPORT_SYMBOL_GPL(platform_thermal_package_notify);
69
70/* Callback support of rate control, return true, if
71 * callback has rate control */
72bool (*platform_thermal_package_rate_control)(void);
73EXPORT_SYMBOL_GPL(platform_thermal_package_rate_control);
74
75
64static DEFINE_PER_CPU(struct thermal_state, thermal_state); 76static DEFINE_PER_CPU(struct thermal_state, thermal_state);
65 77
66static atomic_t therm_throt_en = ATOMIC_INIT(0); 78static atomic_t therm_throt_en = ATOMIC_INIT(0);
@@ -195,19 +207,25 @@ static int therm_throt_process(bool new_event, int event, int level)
195 return 0; 207 return 0;
196} 208}
197 209
198static int thresh_event_valid(int event) 210static int thresh_event_valid(int level, int event)
199{ 211{
200 struct _thermal_state *state; 212 struct _thermal_state *state;
201 unsigned int this_cpu = smp_processor_id(); 213 unsigned int this_cpu = smp_processor_id();
202 struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); 214 struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
203 u64 now = get_jiffies_64(); 215 u64 now = get_jiffies_64();
204 216
205 state = (event == 0) ? &pstate->core_thresh0 : &pstate->core_thresh1; 217 if (level == PACKAGE_LEVEL)
218 state = (event == 0) ? &pstate->pkg_thresh0 :
219 &pstate->pkg_thresh1;
220 else
221 state = (event == 0) ? &pstate->core_thresh0 :
222 &pstate->core_thresh1;
206 223
207 if (time_before64(now, state->next_check)) 224 if (time_before64(now, state->next_check))
208 return 0; 225 return 0;
209 226
210 state->next_check = now + CHECK_INTERVAL; 227 state->next_check = now + CHECK_INTERVAL;
228
211 return 1; 229 return 1;
212} 230}
213 231
@@ -222,8 +240,7 @@ __setup("int_pln_enable", int_pln_enable_setup);
222 240
223#ifdef CONFIG_SYSFS 241#ifdef CONFIG_SYSFS
224/* Add/Remove thermal_throttle interface for CPU device: */ 242/* Add/Remove thermal_throttle interface for CPU device: */
225static __cpuinit int thermal_throttle_add_dev(struct device *dev, 243static int thermal_throttle_add_dev(struct device *dev, unsigned int cpu)
226 unsigned int cpu)
227{ 244{
228 int err; 245 int err;
229 struct cpuinfo_x86 *c = &cpu_data(cpu); 246 struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -249,7 +266,7 @@ static __cpuinit int thermal_throttle_add_dev(struct device *dev,
249 return err; 266 return err;
250} 267}
251 268
252static __cpuinit void thermal_throttle_remove_dev(struct device *dev) 269static void thermal_throttle_remove_dev(struct device *dev)
253{ 270{
254 sysfs_remove_group(&dev->kobj, &thermal_attr_group); 271 sysfs_remove_group(&dev->kobj, &thermal_attr_group);
255} 272}
@@ -258,7 +275,7 @@ static __cpuinit void thermal_throttle_remove_dev(struct device *dev)
258static DEFINE_MUTEX(therm_cpu_lock); 275static DEFINE_MUTEX(therm_cpu_lock);
259 276
260/* Get notified when a cpu comes on/off. Be hotplug friendly. */ 277/* Get notified when a cpu comes on/off. Be hotplug friendly. */
261static __cpuinit int 278static int
262thermal_throttle_cpu_callback(struct notifier_block *nfb, 279thermal_throttle_cpu_callback(struct notifier_block *nfb,
263 unsigned long action, 280 unsigned long action,
264 void *hcpu) 281 void *hcpu)
@@ -289,7 +306,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
289 return notifier_from_errno(err); 306 return notifier_from_errno(err);
290} 307}
291 308
292static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata = 309static struct notifier_block thermal_throttle_cpu_notifier =
293{ 310{
294 .notifier_call = thermal_throttle_cpu_callback, 311 .notifier_call = thermal_throttle_cpu_callback,
295}; 312};
@@ -322,6 +339,39 @@ device_initcall(thermal_throttle_init_device);
322 339
323#endif /* CONFIG_SYSFS */ 340#endif /* CONFIG_SYSFS */
324 341
342static void notify_package_thresholds(__u64 msr_val)
343{
344 bool notify_thres_0 = false;
345 bool notify_thres_1 = false;
346
347 if (!platform_thermal_package_notify)
348 return;
349
350 /* lower threshold check */
351 if (msr_val & THERM_LOG_THRESHOLD0)
352 notify_thres_0 = true;
353 /* higher threshold check */
354 if (msr_val & THERM_LOG_THRESHOLD1)
355 notify_thres_1 = true;
356
357 if (!notify_thres_0 && !notify_thres_1)
358 return;
359
360 if (platform_thermal_package_rate_control &&
361 platform_thermal_package_rate_control()) {
362 /* Rate control is implemented in callback */
363 platform_thermal_package_notify(msr_val);
364 return;
365 }
366
367 /* lower threshold reached */
368 if (notify_thres_0 && thresh_event_valid(PACKAGE_LEVEL, 0))
369 platform_thermal_package_notify(msr_val);
370 /* higher threshold reached */
371 if (notify_thres_1 && thresh_event_valid(PACKAGE_LEVEL, 1))
372 platform_thermal_package_notify(msr_val);
373}
374
325static void notify_thresholds(__u64 msr_val) 375static void notify_thresholds(__u64 msr_val)
326{ 376{
327 /* check whether the interrupt handler is defined; 377 /* check whether the interrupt handler is defined;
@@ -331,10 +381,12 @@ static void notify_thresholds(__u64 msr_val)
331 return; 381 return;
332 382
333 /* lower threshold reached */ 383 /* lower threshold reached */
334 if ((msr_val & THERM_LOG_THRESHOLD0) && thresh_event_valid(0)) 384 if ((msr_val & THERM_LOG_THRESHOLD0) &&
385 thresh_event_valid(CORE_LEVEL, 0))
335 platform_thermal_notify(msr_val); 386 platform_thermal_notify(msr_val);
336 /* higher threshold reached */ 387 /* higher threshold reached */
337 if ((msr_val & THERM_LOG_THRESHOLD1) && thresh_event_valid(1)) 388 if ((msr_val & THERM_LOG_THRESHOLD1) &&
389 thresh_event_valid(CORE_LEVEL, 1))
338 platform_thermal_notify(msr_val); 390 platform_thermal_notify(msr_val);
339} 391}
340 392
@@ -360,6 +412,8 @@ static void intel_thermal_interrupt(void)
360 412
361 if (this_cpu_has(X86_FEATURE_PTS)) { 413 if (this_cpu_has(X86_FEATURE_PTS)) {
362 rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); 414 rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
415 /* check violations of package thermal thresholds */
416 notify_package_thresholds(msr_val);
363 therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, 417 therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
364 THERMAL_THROTTLING_EVENT, 418 THERMAL_THROTTLING_EVENT,
365 PACKAGE_LEVEL); 419 PACKAGE_LEVEL);
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index ca22b73aaa25..f961de9964c7 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -51,9 +51,13 @@
51#include <asm/e820.h> 51#include <asm/e820.h>
52#include <asm/mtrr.h> 52#include <asm/mtrr.h>
53#include <asm/msr.h> 53#include <asm/msr.h>
54#include <asm/pat.h>
54 55
55#include "mtrr.h" 56#include "mtrr.h"
56 57
58/* arch_phys_wc_add returns an MTRR register index plus this offset. */
59#define MTRR_TO_PHYS_WC_OFFSET 1000
60
57u32 num_var_ranges; 61u32 num_var_ranges;
58 62
59unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; 63unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
@@ -525,6 +529,73 @@ int mtrr_del(int reg, unsigned long base, unsigned long size)
525} 529}
526EXPORT_SYMBOL(mtrr_del); 530EXPORT_SYMBOL(mtrr_del);
527 531
532/**
533 * arch_phys_wc_add - add a WC MTRR and handle errors if PAT is unavailable
534 * @base: Physical base address
535 * @size: Size of region
536 *
537 * If PAT is available, this does nothing. If PAT is unavailable, it
538 * attempts to add a WC MTRR covering size bytes starting at base and
539 * logs an error if this fails.
540 *
541 * Drivers must store the return value to pass to mtrr_del_wc_if_needed,
542 * but drivers should not try to interpret that return value.
543 */
544int arch_phys_wc_add(unsigned long base, unsigned long size)
545{
546 int ret;
547
548 if (pat_enabled)
549 return 0; /* Success! (We don't need to do anything.) */
550
551 ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true);
552 if (ret < 0) {
553 pr_warn("Failed to add WC MTRR for [%p-%p]; performance may suffer.",
554 (void *)base, (void *)(base + size - 1));
555 return ret;
556 }
557 return ret + MTRR_TO_PHYS_WC_OFFSET;
558}
559EXPORT_SYMBOL(arch_phys_wc_add);
560
561/*
562 * arch_phys_wc_del - undoes arch_phys_wc_add
563 * @handle: Return value from arch_phys_wc_add
564 *
565 * This cleans up after mtrr_add_wc_if_needed.
566 *
567 * The API guarantees that mtrr_del_wc_if_needed(error code) and
568 * mtrr_del_wc_if_needed(0) do nothing.
569 */
570void arch_phys_wc_del(int handle)
571{
572 if (handle >= 1) {
573 WARN_ON(handle < MTRR_TO_PHYS_WC_OFFSET);
574 mtrr_del(handle - MTRR_TO_PHYS_WC_OFFSET, 0, 0);
575 }
576}
577EXPORT_SYMBOL(arch_phys_wc_del);
578
579/*
580 * phys_wc_to_mtrr_index - translates arch_phys_wc_add's return value
581 * @handle: Return value from arch_phys_wc_add
582 *
583 * This will turn the return value from arch_phys_wc_add into an mtrr
584 * index suitable for debugging.
585 *
586 * Note: There is no legitimate use for this function, except possibly
587 * in printk line. Alas there is an illegitimate use in some ancient
588 * drm ioctls.
589 */
590int phys_wc_to_mtrr_index(int handle)
591{
592 if (handle < MTRR_TO_PHYS_WC_OFFSET)
593 return -1;
594 else
595 return handle - MTRR_TO_PHYS_WC_OFFSET;
596}
597EXPORT_SYMBOL_GPL(phys_wc_to_mtrr_index);
598
528/* 599/*
529 * HACK ALERT! 600 * HACK ALERT!
530 * These should be called implicitly, but we can't yet until all the initcall 601 * These should be called implicitly, but we can't yet until all the initcall
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 9e581c5cf6d0..a7c7305030cc 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1295,7 +1295,7 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1295struct event_constraint emptyconstraint; 1295struct event_constraint emptyconstraint;
1296struct event_constraint unconstrained; 1296struct event_constraint unconstrained;
1297 1297
1298static int __cpuinit 1298static int
1299x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 1299x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1300{ 1300{
1301 unsigned int cpu = (long)hcpu; 1301 unsigned int cpu = (long)hcpu;
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index 5f0581e713c2..e09f0bfb7b8f 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -851,7 +851,7 @@ static void clear_APIC_ibs(void *dummy)
851 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); 851 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
852} 852}
853 853
854static int __cpuinit 854static int
855perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 855perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
856{ 856{
857 switch (action & ~CPU_TASKS_FROZEN) { 857 switch (action & ~CPU_TASKS_FROZEN) {
diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
index 0db655ef3918..639d1289b1ba 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
@@ -491,10 +491,8 @@ static struct perf_amd_iommu __perf_iommu = {
491static __init int amd_iommu_pc_init(void) 491static __init int amd_iommu_pc_init(void)
492{ 492{
493 /* Make sure the IOMMU PC resource is available */ 493 /* Make sure the IOMMU PC resource is available */
494 if (!amd_iommu_pc_supported()) { 494 if (!amd_iommu_pc_supported())
495 pr_err("perf: amd_iommu PMU not installed. No support!\n");
496 return -ENODEV; 495 return -ENODEV;
497 }
498 496
499 _init_perf_amd_iommu(&__perf_iommu, "amd_iommu"); 497 _init_perf_amd_iommu(&__perf_iommu, "amd_iommu");
500 498
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index c0c661adf03e..754291adec33 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -288,13 +288,13 @@ static struct pmu amd_l2_pmu = {
288 .read = amd_uncore_read, 288 .read = amd_uncore_read,
289}; 289};
290 290
291static struct amd_uncore * __cpuinit amd_uncore_alloc(unsigned int cpu) 291static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
292{ 292{
293 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL, 293 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
294 cpu_to_node(cpu)); 294 cpu_to_node(cpu));
295} 295}
296 296
297static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu) 297static void amd_uncore_cpu_up_prepare(unsigned int cpu)
298{ 298{
299 struct amd_uncore *uncore; 299 struct amd_uncore *uncore;
300 300
@@ -322,8 +322,8 @@ static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu)
322} 322}
323 323
324static struct amd_uncore * 324static struct amd_uncore *
325__cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this, 325amd_uncore_find_online_sibling(struct amd_uncore *this,
326 struct amd_uncore * __percpu *uncores) 326 struct amd_uncore * __percpu *uncores)
327{ 327{
328 unsigned int cpu; 328 unsigned int cpu;
329 struct amd_uncore *that; 329 struct amd_uncore *that;
@@ -348,7 +348,7 @@ __cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this,
348 return this; 348 return this;
349} 349}
350 350
351static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu) 351static void amd_uncore_cpu_starting(unsigned int cpu)
352{ 352{
353 unsigned int eax, ebx, ecx, edx; 353 unsigned int eax, ebx, ecx, edx;
354 struct amd_uncore *uncore; 354 struct amd_uncore *uncore;
@@ -376,8 +376,8 @@ static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu)
376 } 376 }
377} 377}
378 378
379static void __cpuinit uncore_online(unsigned int cpu, 379static void uncore_online(unsigned int cpu,
380 struct amd_uncore * __percpu *uncores) 380 struct amd_uncore * __percpu *uncores)
381{ 381{
382 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); 382 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
383 383
@@ -388,7 +388,7 @@ static void __cpuinit uncore_online(unsigned int cpu,
388 cpumask_set_cpu(cpu, uncore->active_mask); 388 cpumask_set_cpu(cpu, uncore->active_mask);
389} 389}
390 390
391static void __cpuinit amd_uncore_cpu_online(unsigned int cpu) 391static void amd_uncore_cpu_online(unsigned int cpu)
392{ 392{
393 if (amd_uncore_nb) 393 if (amd_uncore_nb)
394 uncore_online(cpu, amd_uncore_nb); 394 uncore_online(cpu, amd_uncore_nb);
@@ -397,8 +397,8 @@ static void __cpuinit amd_uncore_cpu_online(unsigned int cpu)
397 uncore_online(cpu, amd_uncore_l2); 397 uncore_online(cpu, amd_uncore_l2);
398} 398}
399 399
400static void __cpuinit uncore_down_prepare(unsigned int cpu, 400static void uncore_down_prepare(unsigned int cpu,
401 struct amd_uncore * __percpu *uncores) 401 struct amd_uncore * __percpu *uncores)
402{ 402{
403 unsigned int i; 403 unsigned int i;
404 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu); 404 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
@@ -423,7 +423,7 @@ static void __cpuinit uncore_down_prepare(unsigned int cpu,
423 } 423 }
424} 424}
425 425
426static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu) 426static void amd_uncore_cpu_down_prepare(unsigned int cpu)
427{ 427{
428 if (amd_uncore_nb) 428 if (amd_uncore_nb)
429 uncore_down_prepare(cpu, amd_uncore_nb); 429 uncore_down_prepare(cpu, amd_uncore_nb);
@@ -432,8 +432,7 @@ static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu)
432 uncore_down_prepare(cpu, amd_uncore_l2); 432 uncore_down_prepare(cpu, amd_uncore_l2);
433} 433}
434 434
435static void __cpuinit uncore_dead(unsigned int cpu, 435static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
436 struct amd_uncore * __percpu *uncores)
437{ 436{
438 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); 437 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
439 438
@@ -445,7 +444,7 @@ static void __cpuinit uncore_dead(unsigned int cpu,
445 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL; 444 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
446} 445}
447 446
448static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu) 447static void amd_uncore_cpu_dead(unsigned int cpu)
449{ 448{
450 if (amd_uncore_nb) 449 if (amd_uncore_nb)
451 uncore_dead(cpu, amd_uncore_nb); 450 uncore_dead(cpu, amd_uncore_nb);
@@ -454,7 +453,7 @@ static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu)
454 uncore_dead(cpu, amd_uncore_l2); 453 uncore_dead(cpu, amd_uncore_l2);
455} 454}
456 455
457static int __cpuinit 456static int
458amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, 457amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
459 void *hcpu) 458 void *hcpu)
460{ 459{
@@ -489,7 +488,7 @@ amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
489 return NOTIFY_OK; 488 return NOTIFY_OK;
490} 489}
491 490
492static struct notifier_block amd_uncore_cpu_notifier_block __cpuinitdata = { 491static struct notifier_block amd_uncore_cpu_notifier_block = {
493 .notifier_call = amd_uncore_cpu_notifier, 492 .notifier_call = amd_uncore_cpu_notifier,
494 .priority = CPU_PRI_PERF + 1, 493 .priority = CPU_PRI_PERF + 1,
495}; 494};
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 9dd99751ccf9..cad791dbde95 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -3297,7 +3297,7 @@ static void __init uncore_pci_exit(void)
3297/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */ 3297/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3298static LIST_HEAD(boxes_to_free); 3298static LIST_HEAD(boxes_to_free);
3299 3299
3300static void __cpuinit uncore_kfree_boxes(void) 3300static void uncore_kfree_boxes(void)
3301{ 3301{
3302 struct intel_uncore_box *box; 3302 struct intel_uncore_box *box;
3303 3303
@@ -3309,7 +3309,7 @@ static void __cpuinit uncore_kfree_boxes(void)
3309 } 3309 }
3310} 3310}
3311 3311
3312static void __cpuinit uncore_cpu_dying(int cpu) 3312static void uncore_cpu_dying(int cpu)
3313{ 3313{
3314 struct intel_uncore_type *type; 3314 struct intel_uncore_type *type;
3315 struct intel_uncore_pmu *pmu; 3315 struct intel_uncore_pmu *pmu;
@@ -3328,7 +3328,7 @@ static void __cpuinit uncore_cpu_dying(int cpu)
3328 } 3328 }
3329} 3329}
3330 3330
3331static int __cpuinit uncore_cpu_starting(int cpu) 3331static int uncore_cpu_starting(int cpu)
3332{ 3332{
3333 struct intel_uncore_type *type; 3333 struct intel_uncore_type *type;
3334 struct intel_uncore_pmu *pmu; 3334 struct intel_uncore_pmu *pmu;
@@ -3371,7 +3371,7 @@ static int __cpuinit uncore_cpu_starting(int cpu)
3371 return 0; 3371 return 0;
3372} 3372}
3373 3373
3374static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id) 3374static int uncore_cpu_prepare(int cpu, int phys_id)
3375{ 3375{
3376 struct intel_uncore_type *type; 3376 struct intel_uncore_type *type;
3377 struct intel_uncore_pmu *pmu; 3377 struct intel_uncore_pmu *pmu;
@@ -3397,7 +3397,7 @@ static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
3397 return 0; 3397 return 0;
3398} 3398}
3399 3399
3400static void __cpuinit 3400static void
3401uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu) 3401uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
3402{ 3402{
3403 struct intel_uncore_type *type; 3403 struct intel_uncore_type *type;
@@ -3435,7 +3435,7 @@ uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_c
3435 } 3435 }
3436} 3436}
3437 3437
3438static void __cpuinit uncore_event_exit_cpu(int cpu) 3438static void uncore_event_exit_cpu(int cpu)
3439{ 3439{
3440 int i, phys_id, target; 3440 int i, phys_id, target;
3441 3441
@@ -3463,7 +3463,7 @@ static void __cpuinit uncore_event_exit_cpu(int cpu)
3463 uncore_change_context(pci_uncores, cpu, target); 3463 uncore_change_context(pci_uncores, cpu, target);
3464} 3464}
3465 3465
3466static void __cpuinit uncore_event_init_cpu(int cpu) 3466static void uncore_event_init_cpu(int cpu)
3467{ 3467{
3468 int i, phys_id; 3468 int i, phys_id;
3469 3469
@@ -3479,8 +3479,8 @@ static void __cpuinit uncore_event_init_cpu(int cpu)
3479 uncore_change_context(pci_uncores, -1, cpu); 3479 uncore_change_context(pci_uncores, -1, cpu);
3480} 3480}
3481 3481
3482static int 3482static int uncore_cpu_notifier(struct notifier_block *self,
3483 __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 3483 unsigned long action, void *hcpu)
3484{ 3484{
3485 unsigned int cpu = (long)hcpu; 3485 unsigned int cpu = (long)hcpu;
3486 3486
@@ -3520,7 +3520,7 @@ static int
3520 return NOTIFY_OK; 3520 return NOTIFY_OK;
3521} 3521}
3522 3522
3523static struct notifier_block uncore_cpu_nb __cpuinitdata = { 3523static struct notifier_block uncore_cpu_nb = {
3524 .notifier_call = uncore_cpu_notifier, 3524 .notifier_call = uncore_cpu_notifier,
3525 /* 3525 /*
3526 * to migrate uncore events, our notifier should be executed 3526 * to migrate uncore events, our notifier should be executed
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c
index feca286c2bb4..88db010845cb 100644
--- a/arch/x86/kernel/cpu/rdrand.c
+++ b/arch/x86/kernel/cpu/rdrand.c
@@ -52,7 +52,7 @@ static inline int rdrand_long(unsigned long *v)
52 */ 52 */
53#define RESEED_LOOP ((512*128)/sizeof(unsigned long)) 53#define RESEED_LOOP ((512*128)/sizeof(unsigned long))
54 54
55void __cpuinit x86_init_rdrand(struct cpuinfo_x86 *c) 55void x86_init_rdrand(struct cpuinfo_x86 *c)
56{ 56{
57#ifdef CONFIG_ARCH_RANDOM 57#ifdef CONFIG_ARCH_RANDOM
58 unsigned long tmp; 58 unsigned long tmp;
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index d92b5dad15dd..f2cc63e9cf08 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -24,13 +24,13 @@ enum cpuid_regs {
24 CR_EBX 24 CR_EBX
25}; 25};
26 26
27void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) 27void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
28{ 28{
29 u32 max_level; 29 u32 max_level;
30 u32 regs[4]; 30 u32 regs[4];
31 const struct cpuid_bit *cb; 31 const struct cpuid_bit *cb;
32 32
33 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { 33 static const struct cpuid_bit cpuid_bits[] = {
34 { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 }, 34 { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 },
35 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, 35 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, 36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index 4397e987a1cf..4c60eaf0571c 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -26,7 +26,7 @@
26 * exists, use it for populating initial_apicid and cpu topology 26 * exists, use it for populating initial_apicid and cpu topology
27 * detection. 27 * detection.
28 */ 28 */
29void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) 29void detect_extended_topology(struct cpuinfo_x86 *c)
30{ 30{
31#ifdef CONFIG_SMP 31#ifdef CONFIG_SMP
32 unsigned int eax, ebx, ecx, edx, sub_index; 32 unsigned int eax, ebx, ecx, edx, sub_index;
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index 28000743bbb0..aa0430d69b90 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -5,7 +5,7 @@
5#include <asm/msr.h> 5#include <asm/msr.h>
6#include "cpu.h" 6#include "cpu.h"
7 7
8static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c) 8static void early_init_transmeta(struct cpuinfo_x86 *c)
9{ 9{
10 u32 xlvl; 10 u32 xlvl;
11 11
@@ -17,7 +17,7 @@ static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c)
17 } 17 }
18} 18}
19 19
20static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) 20static void init_transmeta(struct cpuinfo_x86 *c)
21{ 21{
22 unsigned int cap_mask, uk, max, dummy; 22 unsigned int cap_mask, uk, max, dummy;
23 unsigned int cms_rev1, cms_rev2; 23 unsigned int cms_rev1, cms_rev2;
@@ -98,7 +98,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
98#endif 98#endif
99} 99}
100 100
101static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = { 101static const struct cpu_dev transmeta_cpu_dev = {
102 .c_vendor = "Transmeta", 102 .c_vendor = "Transmeta",
103 .c_ident = { "GenuineTMx86", "TransmetaCPU" }, 103 .c_ident = { "GenuineTMx86", "TransmetaCPU" },
104 .c_early_init = early_init_transmeta, 104 .c_early_init = early_init_transmeta,
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c
index fd2c37bf7acb..202759a14121 100644
--- a/arch/x86/kernel/cpu/umc.c
+++ b/arch/x86/kernel/cpu/umc.c
@@ -8,7 +8,7 @@
8 * so no special init takes place. 8 * so no special init takes place.
9 */ 9 */
10 10
11static const struct cpu_dev __cpuinitconst umc_cpu_dev = { 11static const struct cpu_dev umc_cpu_dev = {
12 .c_vendor = "UMC", 12 .c_vendor = "UMC",
13 .c_ident = { "UMC UMC UMC" }, 13 .c_ident = { "UMC UMC UMC" },
14 .c_models = { 14 .c_models = {
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 03a36321ec54..7076878404ec 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -122,7 +122,7 @@ static bool __init vmware_platform(void)
122 * so that the kernel could just trust the hypervisor with providing a 122 * so that the kernel could just trust the hypervisor with providing a
123 * reliable virtual TSC that is suitable for timekeeping. 123 * reliable virtual TSC that is suitable for timekeeping.
124 */ 124 */
125static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c) 125static void vmware_set_cpu_features(struct cpuinfo_x86 *c)
126{ 126{
127 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 127 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
128 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); 128 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 1e4dbcfe6d31..7d9481c743f8 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -137,7 +137,7 @@ static const struct file_operations cpuid_fops = {
137 .open = cpuid_open, 137 .open = cpuid_open,
138}; 138};
139 139
140static __cpuinit int cpuid_device_create(int cpu) 140static int cpuid_device_create(int cpu)
141{ 141{
142 struct device *dev; 142 struct device *dev;
143 143
@@ -151,9 +151,8 @@ static void cpuid_device_destroy(int cpu)
151 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); 151 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
152} 152}
153 153
154static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, 154static int cpuid_class_cpu_callback(struct notifier_block *nfb,
155 unsigned long action, 155 unsigned long action, void *hcpu)
156 void *hcpu)
157{ 156{
158 unsigned int cpu = (unsigned long)hcpu; 157 unsigned int cpu = (unsigned long)hcpu;
159 int err = 0; 158 int err = 0;
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index b1581527a236..69eb2fa25494 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -133,7 +133,7 @@ static void x86_of_pci_irq_disable(struct pci_dev *dev)
133{ 133{
134} 134}
135 135
136void __cpuinit x86_of_pci_init(void) 136void x86_of_pci_init(void)
137{ 137{
138 pcibios_enable_irq = x86_of_pci_irq_enable; 138 pcibios_enable_irq = x86_of_pci_irq_enable;
139 pcibios_disable_irq = x86_of_pci_irq_disable; 139 pcibios_disable_irq = x86_of_pci_irq_disable;
@@ -364,9 +364,7 @@ static void dt_add_ioapic_domain(unsigned int ioapic_num,
364 * and assigned so we can keep the 1:1 mapping which the ioapic 364 * and assigned so we can keep the 1:1 mapping which the ioapic
365 * is having. 365 * is having.
366 */ 366 */
367 ret = irq_domain_associate_many(id, 0, 0, NR_IRQS_LEGACY); 367 irq_domain_associate_many(id, 0, 0, NR_IRQS_LEGACY);
368 if (ret)
369 pr_err("Error mapping legacy IRQs: %d\n", ret);
370 368
371 if (num > NR_IRQS_LEGACY) { 369 if (num > NR_IRQS_LEGACY) {
372 ret = irq_create_strict_mappings(id, NR_IRQS_LEGACY, 370 ret = irq_create_strict_mappings(id, NR_IRQS_LEGACY,
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index e65ddc62e113..5dd87a89f011 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -292,7 +292,6 @@ ENDPROC(start_cpu0)
292 * If cpu hotplug is not supported then this code can go in init section 292 * If cpu hotplug is not supported then this code can go in init section
293 * which will be freed later 293 * which will be freed later
294 */ 294 */
295__CPUINIT
296ENTRY(startup_32_smp) 295ENTRY(startup_32_smp)
297 cld 296 cld
298 movl $(__BOOT_DS),%eax 297 movl $(__BOOT_DS),%eax
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 5e4d8a8a5c40..e1aabdb314c8 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -512,21 +512,6 @@ ENTRY(phys_base)
512 512
513#include "../../x86/xen/xen-head.S" 513#include "../../x86/xen/xen-head.S"
514 514
515 .section .bss, "aw", @nobits
516 .align L1_CACHE_BYTES
517ENTRY(idt_table)
518 .skip IDT_ENTRIES * 16
519
520 .align L1_CACHE_BYTES
521ENTRY(debug_idt_table)
522 .skip IDT_ENTRIES * 16
523
524#ifdef CONFIG_TRACING
525 .align L1_CACHE_BYTES
526ENTRY(trace_idt_table)
527 .skip IDT_ENTRIES * 16
528#endif
529
530 __PAGE_ALIGNED_BSS 515 __PAGE_ALIGNED_BSS
531NEXT_PAGE(empty_zero_page) 516NEXT_PAGE(empty_zero_page)
532 .skip PAGE_SIZE 517 .skip PAGE_SIZE
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 02f07634d265..f66ff162dce8 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -393,6 +393,9 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
393 unregister_hw_breakpoint(t->ptrace_bps[i]); 393 unregister_hw_breakpoint(t->ptrace_bps[i]);
394 t->ptrace_bps[i] = NULL; 394 t->ptrace_bps[i] = NULL;
395 } 395 }
396
397 t->debugreg6 = 0;
398 t->ptrace_dr7 = 0;
396} 399}
397 400
398void hw_breakpoint_restore(void) 401void hw_breakpoint_restore(void)
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index b627746f6b1a..202d24f0f7e7 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -108,9 +108,9 @@ EXPORT_SYMBOL(unlazy_fpu);
108unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; 108unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
109unsigned int xstate_size; 109unsigned int xstate_size;
110EXPORT_SYMBOL_GPL(xstate_size); 110EXPORT_SYMBOL_GPL(xstate_size);
111static struct i387_fxsave_struct fx_scratch __cpuinitdata; 111static struct i387_fxsave_struct fx_scratch;
112 112
113static void __cpuinit mxcsr_feature_mask_init(void) 113static void mxcsr_feature_mask_init(void)
114{ 114{
115 unsigned long mask = 0; 115 unsigned long mask = 0;
116 116
@@ -124,7 +124,7 @@ static void __cpuinit mxcsr_feature_mask_init(void)
124 mxcsr_feature_mask &= mask; 124 mxcsr_feature_mask &= mask;
125} 125}
126 126
127static void __cpuinit init_thread_xstate(void) 127static void init_thread_xstate(void)
128{ 128{
129 /* 129 /*
130 * Note that xstate_size might be overwriten later during 130 * Note that xstate_size might be overwriten later during
@@ -153,7 +153,7 @@ static void __cpuinit init_thread_xstate(void)
153 * into all processes. 153 * into all processes.
154 */ 154 */
155 155
156void __cpuinit fpu_init(void) 156void fpu_init(void)
157{ 157{
158 unsigned long cr0; 158 unsigned long cr0;
159 unsigned long cr4_mask = 0; 159 unsigned long cr4_mask = 0;
@@ -608,7 +608,7 @@ static int __init no_387(char *s)
608 608
609__setup("no387", no_387); 609__setup("no387", no_387);
610 610
611void __cpuinit fpu_detect(struct cpuinfo_x86 *c) 611void fpu_detect(struct cpuinfo_x86 *c)
612{ 612{
613 unsigned long cr0; 613 unsigned long cr0;
614 u16 fsw, fcw; 614 u16 fsw, fcw;
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 344faf8d0d62..4186755f1d7c 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -119,7 +119,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
119/* 119/*
120 * allocate per-cpu stacks for hardirq and for softirq processing 120 * allocate per-cpu stacks for hardirq and for softirq processing
121 */ 121 */
122void __cpuinit irq_ctx_init(int cpu) 122void irq_ctx_init(int cpu)
123{ 123{
124 union irq_ctx *irqctx; 124 union irq_ctx *irqctx;
125 125
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index cd6d9a5a42f6..a96d32cc55b8 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -320,7 +320,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
320 apic_write(APIC_EOI, APIC_EOI_ACK); 320 apic_write(APIC_EOI, APIC_EOI_ACK);
321} 321}
322 322
323void __cpuinit kvm_guest_cpu_init(void) 323void kvm_guest_cpu_init(void)
324{ 324{
325 if (!kvm_para_available()) 325 if (!kvm_para_available())
326 return; 326 return;
@@ -421,7 +421,7 @@ static void __init kvm_smp_prepare_boot_cpu(void)
421 native_smp_prepare_boot_cpu(); 421 native_smp_prepare_boot_cpu();
422} 422}
423 423
424static void __cpuinit kvm_guest_cpu_online(void *dummy) 424static void kvm_guest_cpu_online(void *dummy)
425{ 425{
426 kvm_guest_cpu_init(); 426 kvm_guest_cpu_init();
427} 427}
@@ -435,8 +435,8 @@ static void kvm_guest_cpu_offline(void *dummy)
435 apf_task_wake_all(); 435 apf_task_wake_all();
436} 436}
437 437
438static int __cpuinit kvm_cpu_notify(struct notifier_block *self, 438static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
439 unsigned long action, void *hcpu) 439 void *hcpu)
440{ 440{
441 int cpu = (unsigned long)hcpu; 441 int cpu = (unsigned long)hcpu;
442 switch (action) { 442 switch (action) {
@@ -455,7 +455,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
455 return NOTIFY_OK; 455 return NOTIFY_OK;
456} 456}
457 457
458static struct notifier_block __cpuinitdata kvm_cpu_notifier = { 458static struct notifier_block kvm_cpu_notifier = {
459 .notifier_call = kvm_cpu_notify, 459 .notifier_call = kvm_cpu_notify,
460}; 460};
461#endif 461#endif
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 3dd37ebd591b..1570e0741344 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -48,10 +48,9 @@ static struct pvclock_wall_clock wall_clock;
48 * have elapsed since the hypervisor wrote the data. So we try to account for 48 * have elapsed since the hypervisor wrote the data. So we try to account for
49 * that with system time 49 * that with system time
50 */ 50 */
51static unsigned long kvm_get_wallclock(void) 51static void kvm_get_wallclock(struct timespec *now)
52{ 52{
53 struct pvclock_vcpu_time_info *vcpu_time; 53 struct pvclock_vcpu_time_info *vcpu_time;
54 struct timespec ts;
55 int low, high; 54 int low, high;
56 int cpu; 55 int cpu;
57 56
@@ -64,14 +63,12 @@ static unsigned long kvm_get_wallclock(void)
64 cpu = smp_processor_id(); 63 cpu = smp_processor_id();
65 64
66 vcpu_time = &hv_clock[cpu].pvti; 65 vcpu_time = &hv_clock[cpu].pvti;
67 pvclock_read_wallclock(&wall_clock, vcpu_time, &ts); 66 pvclock_read_wallclock(&wall_clock, vcpu_time, now);
68 67
69 preempt_enable(); 68 preempt_enable();
70
71 return ts.tv_sec;
72} 69}
73 70
74static int kvm_set_wallclock(unsigned long now) 71static int kvm_set_wallclock(const struct timespec *now)
75{ 72{
76 return -1; 73 return -1;
77} 74}
@@ -185,7 +182,7 @@ static void kvm_restore_sched_clock_state(void)
185} 182}
186 183
187#ifdef CONFIG_X86_LOCAL_APIC 184#ifdef CONFIG_X86_LOCAL_APIC
188static void __cpuinit kvm_setup_secondary_clock(void) 185static void kvm_setup_secondary_clock(void)
189{ 186{
190 /* 187 /*
191 * Now that the first cpu already had this clocksource initialized, 188 * Now that the first cpu already had this clocksource initialized,
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c
index 1ac6e9aee766..1d14ffee5749 100644
--- a/arch/x86/kernel/microcode_amd_early.c
+++ b/arch/x86/kernel/microcode_amd_early.c
@@ -82,7 +82,7 @@ static struct cpio_data __init find_ucode_in_initrd(void)
82 * load_microcode_amd() to save equivalent cpu table and microcode patches in 82 * load_microcode_amd() to save equivalent cpu table and microcode patches in
83 * kernel heap memory. 83 * kernel heap memory.
84 */ 84 */
85static void __cpuinit apply_ucode_in_initrd(void *ucode, size_t size) 85static void apply_ucode_in_initrd(void *ucode, size_t size)
86{ 86{
87 struct equiv_cpu_entry *eq; 87 struct equiv_cpu_entry *eq;
88 u32 *header; 88 u32 *header;
@@ -206,7 +206,7 @@ u8 amd_bsp_mpb[MPB_MAX_SIZE];
206 * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which 206 * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which
207 * is used upon resume from suspend. 207 * is used upon resume from suspend.
208 */ 208 */
209void __cpuinit load_ucode_amd_ap(void) 209void load_ucode_amd_ap(void)
210{ 210{
211 struct microcode_amd *mc; 211 struct microcode_amd *mc;
212 unsigned long *initrd; 212 unsigned long *initrd;
@@ -238,7 +238,7 @@ static void __init collect_cpu_sig_on_bsp(void *arg)
238 uci->cpu_sig.sig = cpuid_eax(0x00000001); 238 uci->cpu_sig.sig = cpuid_eax(0x00000001);
239} 239}
240#else 240#else
241static void __cpuinit collect_cpu_info_amd_early(struct cpuinfo_x86 *c, 241static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c,
242 struct ucode_cpu_info *uci) 242 struct ucode_cpu_info *uci)
243{ 243{
244 u32 rev, eax; 244 u32 rev, eax;
@@ -252,7 +252,7 @@ static void __cpuinit collect_cpu_info_amd_early(struct cpuinfo_x86 *c,
252 c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); 252 c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
253} 253}
254 254
255void __cpuinit load_ucode_amd_ap(void) 255void load_ucode_amd_ap(void)
256{ 256{
257 unsigned int cpu = smp_processor_id(); 257 unsigned int cpu = smp_processor_id();
258 258
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 22db92bbdf1a..15c987698b0f 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -468,7 +468,7 @@ static struct syscore_ops mc_syscore_ops = {
468 .resume = mc_bp_resume, 468 .resume = mc_bp_resume,
469}; 469};
470 470
471static __cpuinit int 471static int
472mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) 472mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
473{ 473{
474 unsigned int cpu = (unsigned long)hcpu; 474 unsigned int cpu = (unsigned long)hcpu;
diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/microcode_core_early.c
index 86119f63db0c..be7f8514f577 100644
--- a/arch/x86/kernel/microcode_core_early.c
+++ b/arch/x86/kernel/microcode_core_early.c
@@ -41,7 +41,7 @@
41 * 41 *
42 * x86_vendor() gets vendor information directly through cpuid. 42 * x86_vendor() gets vendor information directly through cpuid.
43 */ 43 */
44static int __cpuinit x86_vendor(void) 44static int x86_vendor(void)
45{ 45{
46 u32 eax = 0x00000000; 46 u32 eax = 0x00000000;
47 u32 ebx, ecx = 0, edx; 47 u32 ebx, ecx = 0, edx;
@@ -57,7 +57,7 @@ static int __cpuinit x86_vendor(void)
57 return X86_VENDOR_UNKNOWN; 57 return X86_VENDOR_UNKNOWN;
58} 58}
59 59
60static int __cpuinit x86_family(void) 60static int x86_family(void)
61{ 61{
62 u32 eax = 0x00000001; 62 u32 eax = 0x00000001;
63 u32 ebx, ecx = 0, edx; 63 u32 ebx, ecx = 0, edx;
@@ -96,7 +96,7 @@ void __init load_ucode_bsp(void)
96 } 96 }
97} 97}
98 98
99void __cpuinit load_ucode_ap(void) 99void load_ucode_ap(void)
100{ 100{
101 int vendor, x86; 101 int vendor, x86;
102 102
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
index dabef95506f3..1575deb2e636 100644
--- a/arch/x86/kernel/microcode_intel_early.c
+++ b/arch/x86/kernel/microcode_intel_early.c
@@ -34,7 +34,7 @@ struct mc_saved_data {
34 struct microcode_intel **mc_saved; 34 struct microcode_intel **mc_saved;
35} mc_saved_data; 35} mc_saved_data;
36 36
37static enum ucode_state __cpuinit 37static enum ucode_state
38generic_load_microcode_early(struct microcode_intel **mc_saved_p, 38generic_load_microcode_early(struct microcode_intel **mc_saved_p,
39 unsigned int mc_saved_count, 39 unsigned int mc_saved_count,
40 struct ucode_cpu_info *uci) 40 struct ucode_cpu_info *uci)
@@ -69,7 +69,7 @@ out:
69 return state; 69 return state;
70} 70}
71 71
72static void __cpuinit 72static void
73microcode_pointer(struct microcode_intel **mc_saved, 73microcode_pointer(struct microcode_intel **mc_saved,
74 unsigned long *mc_saved_in_initrd, 74 unsigned long *mc_saved_in_initrd,
75 unsigned long initrd_start, int mc_saved_count) 75 unsigned long initrd_start, int mc_saved_count)
@@ -82,7 +82,7 @@ microcode_pointer(struct microcode_intel **mc_saved,
82} 82}
83 83
84#ifdef CONFIG_X86_32 84#ifdef CONFIG_X86_32
85static void __cpuinit 85static void
86microcode_phys(struct microcode_intel **mc_saved_tmp, 86microcode_phys(struct microcode_intel **mc_saved_tmp,
87 struct mc_saved_data *mc_saved_data) 87 struct mc_saved_data *mc_saved_data)
88{ 88{
@@ -101,7 +101,7 @@ microcode_phys(struct microcode_intel **mc_saved_tmp,
101} 101}
102#endif 102#endif
103 103
104static enum ucode_state __cpuinit 104static enum ucode_state
105load_microcode(struct mc_saved_data *mc_saved_data, 105load_microcode(struct mc_saved_data *mc_saved_data,
106 unsigned long *mc_saved_in_initrd, 106 unsigned long *mc_saved_in_initrd,
107 unsigned long initrd_start, 107 unsigned long initrd_start,
@@ -375,7 +375,7 @@ do { \
375#define native_wrmsr(msr, low, high) \ 375#define native_wrmsr(msr, low, high) \
376 native_write_msr(msr, low, high); 376 native_write_msr(msr, low, high);
377 377
378static int __cpuinit collect_cpu_info_early(struct ucode_cpu_info *uci) 378static int collect_cpu_info_early(struct ucode_cpu_info *uci)
379{ 379{
380 unsigned int val[2]; 380 unsigned int val[2];
381 u8 x86, x86_model; 381 u8 x86, x86_model;
@@ -584,7 +584,7 @@ scan_microcode(unsigned long start, unsigned long end,
584/* 584/*
585 * Print ucode update info. 585 * Print ucode update info.
586 */ 586 */
587static void __cpuinit 587static void
588print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) 588print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
589{ 589{
590 int cpu = smp_processor_id(); 590 int cpu = smp_processor_id();
@@ -605,7 +605,7 @@ static int current_mc_date;
605/* 605/*
606 * Print early updated ucode info after printk works. This is delayed info dump. 606 * Print early updated ucode info after printk works. This is delayed info dump.
607 */ 607 */
608void __cpuinit show_ucode_info_early(void) 608void show_ucode_info_early(void)
609{ 609{
610 struct ucode_cpu_info uci; 610 struct ucode_cpu_info uci;
611 611
@@ -621,7 +621,7 @@ void __cpuinit show_ucode_info_early(void)
621 * mc_saved_data.mc_saved and delay printing microcode info in 621 * mc_saved_data.mc_saved and delay printing microcode info in
622 * show_ucode_info_early() until printk() works. 622 * show_ucode_info_early() until printk() works.
623 */ 623 */
624static void __cpuinit print_ucode(struct ucode_cpu_info *uci) 624static void print_ucode(struct ucode_cpu_info *uci)
625{ 625{
626 struct microcode_intel *mc_intel; 626 struct microcode_intel *mc_intel;
627 int *delay_ucode_info_p; 627 int *delay_ucode_info_p;
@@ -643,12 +643,12 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci)
643 * Flush global tlb. We only do this in x86_64 where paging has been enabled 643 * Flush global tlb. We only do this in x86_64 where paging has been enabled
644 * already and PGE should be enabled as well. 644 * already and PGE should be enabled as well.
645 */ 645 */
646static inline void __cpuinit flush_tlb_early(void) 646static inline void flush_tlb_early(void)
647{ 647{
648 __native_flush_tlb_global_irq_disabled(); 648 __native_flush_tlb_global_irq_disabled();
649} 649}
650 650
651static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci) 651static inline void print_ucode(struct ucode_cpu_info *uci)
652{ 652{
653 struct microcode_intel *mc_intel; 653 struct microcode_intel *mc_intel;
654 654
@@ -660,8 +660,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci)
660} 660}
661#endif 661#endif
662 662
663static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data, 663static int apply_microcode_early(struct mc_saved_data *mc_saved_data,
664 struct ucode_cpu_info *uci) 664 struct ucode_cpu_info *uci)
665{ 665{
666 struct microcode_intel *mc_intel; 666 struct microcode_intel *mc_intel;
667 unsigned int val[2]; 667 unsigned int val[2];
@@ -763,7 +763,7 @@ load_ucode_intel_bsp(void)
763#endif 763#endif
764} 764}
765 765
766void __cpuinit load_ucode_intel_ap(void) 766void load_ucode_intel_ap(void)
767{ 767{
768 struct mc_saved_data *mc_saved_data_p; 768 struct mc_saved_data *mc_saved_data_p;
769 struct ucode_cpu_info uci; 769 struct ucode_cpu_info uci;
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index ac861b8348e2..f4c886d9165c 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -24,14 +24,14 @@ struct pci_hostbridge_probe {
24 u32 device; 24 u32 device;
25}; 25};
26 26
27static u64 __cpuinitdata fam10h_pci_mmconf_base; 27static u64 fam10h_pci_mmconf_base;
28 28
29static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = { 29static struct pci_hostbridge_probe pci_probes[] = {
30 { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, 30 { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 },
31 { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 }, 31 { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 },
32}; 32};
33 33
34static int __cpuinit cmp_range(const void *x1, const void *x2) 34static int cmp_range(const void *x1, const void *x2)
35{ 35{
36 const struct range *r1 = x1; 36 const struct range *r1 = x1;
37 const struct range *r2 = x2; 37 const struct range *r2 = x2;
@@ -49,7 +49,7 @@ static int __cpuinit cmp_range(const void *x1, const void *x2)
49/* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */ 49/* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */
50#define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) 50#define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32)
51#define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40)) 51#define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40))
52static void __cpuinit get_fam10h_pci_mmconf_base(void) 52static void get_fam10h_pci_mmconf_base(void)
53{ 53{
54 int i; 54 int i;
55 unsigned bus; 55 unsigned bus;
@@ -166,7 +166,7 @@ out:
166 fam10h_pci_mmconf_base = base; 166 fam10h_pci_mmconf_base = base;
167} 167}
168 168
169void __cpuinit fam10h_check_enable_mmcfg(void) 169void fam10h_check_enable_mmcfg(void)
170{ 170{
171 u64 val; 171 u64 val;
172 u32 address; 172 u32 address;
@@ -230,7 +230,7 @@ static const struct dmi_system_id __initconst mmconf_dmi_table[] = {
230 {} 230 {}
231}; 231};
232 232
233/* Called from a __cpuinit function, but only on the BSP. */ 233/* Called from a non __init function, but only on the BSP. */
234void __ref check_enable_amd_mmconf_dmi(void) 234void __ref check_enable_amd_mmconf_dmi(void)
235{ 235{
236 dmi_check_system(mmconf_dmi_table); 236 dmi_check_system(mmconf_dmi_table);
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index ce130493b802..88458faea2f8 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -200,7 +200,7 @@ static const struct file_operations msr_fops = {
200 .compat_ioctl = msr_ioctl, 200 .compat_ioctl = msr_ioctl,
201}; 201};
202 202
203static int __cpuinit msr_device_create(int cpu) 203static int msr_device_create(int cpu)
204{ 204{
205 struct device *dev; 205 struct device *dev;
206 206
@@ -214,8 +214,8 @@ static void msr_device_destroy(int cpu)
214 device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); 214 device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
215} 215}
216 216
217static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb, 217static int msr_class_cpu_callback(struct notifier_block *nfb,
218 unsigned long action, void *hcpu) 218 unsigned long action, void *hcpu)
219{ 219{
220 unsigned int cpu = (unsigned long)hcpu; 220 unsigned int cpu = (unsigned long)hcpu;
221 int err = 0; 221 int err = 0;
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 0920212e6159..ba77ebc2c353 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -111,7 +111,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
111 */ 111 */
112 list_for_each_entry_rcu(a, &desc->head, list) { 112 list_for_each_entry_rcu(a, &desc->head, list) {
113 u64 before, delta, whole_msecs; 113 u64 before, delta, whole_msecs;
114 int decimal_msecs, thishandled; 114 int remainder_ns, decimal_msecs, thishandled;
115 115
116 before = local_clock(); 116 before = local_clock();
117 thishandled = a->handler(type, regs); 117 thishandled = a->handler(type, regs);
@@ -123,8 +123,9 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
123 continue; 123 continue;
124 124
125 nmi_longest_ns = delta; 125 nmi_longest_ns = delta;
126 whole_msecs = do_div(delta, (1000 * 1000)); 126 whole_msecs = delta;
127 decimal_msecs = do_div(delta, 1000) % 1000; 127 remainder_ns = do_div(whole_msecs, (1000 * 1000));
128 decimal_msecs = remainder_ns / 1000;
128 printk_ratelimited(KERN_INFO 129 printk_ratelimited(KERN_INFO
129 "INFO: NMI handler (%ps) took too long to run: " 130 "INFO: NMI handler (%ps) took too long to run: "
130 "%lld.%03d msecs\n", a->handler, whole_msecs, 131 "%lld.%03d msecs\n", a->handler, whole_msecs,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 81a5f5e8f142..83369e5a1d27 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -398,7 +398,7 @@ static void amd_e400_idle(void)
398 default_idle(); 398 default_idle();
399} 399}
400 400
401void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 401void select_idle_routine(const struct cpuinfo_x86 *c)
402{ 402{
403#ifdef CONFIG_SMP 403#ifdef CONFIG_SMP
404 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1) 404 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 29a8120e6fe8..7461f50d5bb1 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -601,30 +601,48 @@ static unsigned long ptrace_get_dr7(struct perf_event *bp[])
601 return dr7; 601 return dr7;
602} 602}
603 603
604static int 604static int ptrace_fill_bp_fields(struct perf_event_attr *attr,
605ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, 605 int len, int type, bool disabled)
606 struct task_struct *tsk, int disabled) 606{
607 int err, bp_len, bp_type;
608
609 err = arch_bp_generic_fields(len, type, &bp_len, &bp_type);
610 if (!err) {
611 attr->bp_len = bp_len;
612 attr->bp_type = bp_type;
613 attr->disabled = disabled;
614 }
615
616 return err;
617}
618
619static struct perf_event *
620ptrace_register_breakpoint(struct task_struct *tsk, int len, int type,
621 unsigned long addr, bool disabled)
607{ 622{
608 int err;
609 int gen_len, gen_type;
610 struct perf_event_attr attr; 623 struct perf_event_attr attr;
624 int err;
611 625
612 /* 626 ptrace_breakpoint_init(&attr);
613 * We should have at least an inactive breakpoint at this 627 attr.bp_addr = addr;
614 * slot. It means the user is writing dr7 without having
615 * written the address register first
616 */
617 if (!bp)
618 return -EINVAL;
619 628
620 err = arch_bp_generic_fields(len, type, &gen_len, &gen_type); 629 err = ptrace_fill_bp_fields(&attr, len, type, disabled);
621 if (err) 630 if (err)
622 return err; 631 return ERR_PTR(err);
632
633 return register_user_hw_breakpoint(&attr, ptrace_triggered,
634 NULL, tsk);
635}
623 636
624 attr = bp->attr; 637static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
625 attr.bp_len = gen_len; 638 int disabled)
626 attr.bp_type = gen_type; 639{
627 attr.disabled = disabled; 640 struct perf_event_attr attr = bp->attr;
641 int err;
642
643 err = ptrace_fill_bp_fields(&attr, len, type, disabled);
644 if (err)
645 return err;
628 646
629 return modify_user_hw_breakpoint(bp, &attr); 647 return modify_user_hw_breakpoint(bp, &attr);
630} 648}
@@ -634,67 +652,50 @@ ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
634 */ 652 */
635static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) 653static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
636{ 654{
637 struct thread_struct *thread = &(tsk->thread); 655 struct thread_struct *thread = &tsk->thread;
638 unsigned long old_dr7; 656 unsigned long old_dr7;
639 int i, orig_ret = 0, rc = 0; 657 bool second_pass = false;
640 int enabled, second_pass = 0; 658 int i, rc, ret = 0;
641 unsigned len, type;
642 struct perf_event *bp;
643
644 if (ptrace_get_breakpoints(tsk) < 0)
645 return -ESRCH;
646 659
647 data &= ~DR_CONTROL_RESERVED; 660 data &= ~DR_CONTROL_RESERVED;
648 old_dr7 = ptrace_get_dr7(thread->ptrace_bps); 661 old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
662
649restore: 663restore:
650 /* 664 rc = 0;
651 * Loop through all the hardware breakpoints, making the
652 * appropriate changes to each.
653 */
654 for (i = 0; i < HBP_NUM; i++) { 665 for (i = 0; i < HBP_NUM; i++) {
655 enabled = decode_dr7(data, i, &len, &type); 666 unsigned len, type;
656 bp = thread->ptrace_bps[i]; 667 bool disabled = !decode_dr7(data, i, &len, &type);
657 668 struct perf_event *bp = thread->ptrace_bps[i];
658 if (!enabled) { 669
659 if (bp) { 670 if (!bp) {
660 /* 671 if (disabled)
661 * Don't unregister the breakpoints right-away, 672 continue;
662 * unless all register_user_hw_breakpoint() 673
663 * requests have succeeded. This prevents 674 bp = ptrace_register_breakpoint(tsk,
664 * any window of opportunity for debug 675 len, type, 0, disabled);
665 * register grabbing by other users. 676 if (IS_ERR(bp)) {
666 */ 677 rc = PTR_ERR(bp);
667 if (!second_pass) 678 break;
668 continue;
669
670 rc = ptrace_modify_breakpoint(bp, len, type,
671 tsk, 1);
672 if (rc)
673 break;
674 } 679 }
680
681 thread->ptrace_bps[i] = bp;
675 continue; 682 continue;
676 } 683 }
677 684
678 rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0); 685 rc = ptrace_modify_breakpoint(bp, len, type, disabled);
679 if (rc) 686 if (rc)
680 break; 687 break;
681 } 688 }
682 /* 689
683 * Make a second pass to free the remaining unused breakpoints 690 /* Restore if the first pass failed, second_pass shouldn't fail. */
684 * or to restore the original breakpoints if an error occurred. 691 if (rc && !WARN_ON(second_pass)) {
685 */ 692 ret = rc;
686 if (!second_pass) { 693 data = old_dr7;
687 second_pass = 1; 694 second_pass = true;
688 if (rc < 0) {
689 orig_ret = rc;
690 data = old_dr7;
691 }
692 goto restore; 695 goto restore;
693 } 696 }
694 697
695 ptrace_put_breakpoints(tsk); 698 return ret;
696
697 return ((orig_ret < 0) ? orig_ret : rc);
698} 699}
699 700
700/* 701/*
@@ -702,25 +703,17 @@ restore:
702 */ 703 */
703static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) 704static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
704{ 705{
705 struct thread_struct *thread = &(tsk->thread); 706 struct thread_struct *thread = &tsk->thread;
706 unsigned long val = 0; 707 unsigned long val = 0;
707 708
708 if (n < HBP_NUM) { 709 if (n < HBP_NUM) {
709 struct perf_event *bp; 710 struct perf_event *bp = thread->ptrace_bps[n];
710 711
711 if (ptrace_get_breakpoints(tsk) < 0) 712 if (bp)
712 return -ESRCH;
713
714 bp = thread->ptrace_bps[n];
715 if (!bp)
716 val = 0;
717 else
718 val = bp->hw.info.address; 713 val = bp->hw.info.address;
719
720 ptrace_put_breakpoints(tsk);
721 } else if (n == 6) { 714 } else if (n == 6) {
722 val = thread->debugreg6; 715 val = thread->debugreg6;
723 } else if (n == 7) { 716 } else if (n == 7) {
724 val = thread->ptrace_dr7; 717 val = thread->ptrace_dr7;
725 } 718 }
726 return val; 719 return val;
@@ -729,29 +722,14 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
729static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, 722static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
730 unsigned long addr) 723 unsigned long addr)
731{ 724{
732 struct perf_event *bp;
733 struct thread_struct *t = &tsk->thread; 725 struct thread_struct *t = &tsk->thread;
734 struct perf_event_attr attr; 726 struct perf_event *bp = t->ptrace_bps[nr];
735 int err = 0; 727 int err = 0;
736 728
737 if (ptrace_get_breakpoints(tsk) < 0) 729 if (!bp) {
738 return -ESRCH;
739
740 if (!t->ptrace_bps[nr]) {
741 ptrace_breakpoint_init(&attr);
742 /*
743 * Put stub len and type to register (reserve) an inactive but
744 * correct bp
745 */
746 attr.bp_addr = addr;
747 attr.bp_len = HW_BREAKPOINT_LEN_1;
748 attr.bp_type = HW_BREAKPOINT_W;
749 attr.disabled = 1;
750
751 bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
752 NULL, tsk);
753
754 /* 730 /*
731 * Put stub len and type to create an inactive but correct bp.
732 *
755 * CHECKME: the previous code returned -EIO if the addr wasn't 733 * CHECKME: the previous code returned -EIO if the addr wasn't
756 * a valid task virtual addr. The new one will return -EINVAL in 734 * a valid task virtual addr. The new one will return -EINVAL in
757 * this case. 735 * this case.
@@ -760,22 +738,20 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
760 * writing for the user. And anyway this is the previous 738 * writing for the user. And anyway this is the previous
761 * behaviour. 739 * behaviour.
762 */ 740 */
763 if (IS_ERR(bp)) { 741 bp = ptrace_register_breakpoint(tsk,
742 X86_BREAKPOINT_LEN_1, X86_BREAKPOINT_WRITE,
743 addr, true);
744 if (IS_ERR(bp))
764 err = PTR_ERR(bp); 745 err = PTR_ERR(bp);
765 goto put; 746 else
766 } 747 t->ptrace_bps[nr] = bp;
767
768 t->ptrace_bps[nr] = bp;
769 } else { 748 } else {
770 bp = t->ptrace_bps[nr]; 749 struct perf_event_attr attr = bp->attr;
771 750
772 attr = bp->attr;
773 attr.bp_addr = addr; 751 attr.bp_addr = addr;
774 err = modify_user_hw_breakpoint(bp, &attr); 752 err = modify_user_hw_breakpoint(bp, &attr);
775 } 753 }
776 754
777put:
778 ptrace_put_breakpoints(tsk);
779 return err; 755 return err;
780} 756}
781 757
@@ -785,30 +761,20 @@ put:
785static int ptrace_set_debugreg(struct task_struct *tsk, int n, 761static int ptrace_set_debugreg(struct task_struct *tsk, int n,
786 unsigned long val) 762 unsigned long val)
787{ 763{
788 struct thread_struct *thread = &(tsk->thread); 764 struct thread_struct *thread = &tsk->thread;
789 int rc = 0;
790
791 /* There are no DR4 or DR5 registers */ 765 /* There are no DR4 or DR5 registers */
792 if (n == 4 || n == 5) 766 int rc = -EIO;
793 return -EIO;
794 767
795 if (n == 6) {
796 thread->debugreg6 = val;
797 goto ret_path;
798 }
799 if (n < HBP_NUM) { 768 if (n < HBP_NUM) {
800 rc = ptrace_set_breakpoint_addr(tsk, n, val); 769 rc = ptrace_set_breakpoint_addr(tsk, n, val);
801 if (rc) 770 } else if (n == 6) {
802 return rc; 771 thread->debugreg6 = val;
803 } 772 rc = 0;
804 /* All that's left is DR7 */ 773 } else if (n == 7) {
805 if (n == 7) {
806 rc = ptrace_write_dr7(tsk, val); 774 rc = ptrace_write_dr7(tsk, val);
807 if (!rc) 775 if (!rc)
808 thread->ptrace_dr7 = val; 776 thread->ptrace_dr7 = val;
809 } 777 }
810
811ret_path:
812 return rc; 778 return rc;
813} 779}
814 780
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 76fa1e9a2b39..563ed91e6faa 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -36,22 +36,6 @@ void (*pm_power_off)(void);
36EXPORT_SYMBOL(pm_power_off); 36EXPORT_SYMBOL(pm_power_off);
37 37
38static const struct desc_ptr no_idt = {}; 38static const struct desc_ptr no_idt = {};
39static int reboot_mode;
40enum reboot_type reboot_type = BOOT_ACPI;
41int reboot_force;
42
43/*
44 * This variable is used privately to keep track of whether or not
45 * reboot_type is still set to its default value (i.e., reboot= hasn't
46 * been set on the command line). This is needed so that we can
47 * suppress DMI scanning for reboot quirks. Without it, it's
48 * impossible to override a faulty reboot quirk without recompiling.
49 */
50static int reboot_default = 1;
51
52#ifdef CONFIG_SMP
53static int reboot_cpu = -1;
54#endif
55 39
56/* 40/*
57 * This is set if we need to go through the 'emergency' path. 41 * This is set if we need to go through the 'emergency' path.
@@ -64,79 +48,6 @@ static int reboot_emergency;
64bool port_cf9_safe = false; 48bool port_cf9_safe = false;
65 49
66/* 50/*
67 * reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old] | p[ci]
68 * warm Don't set the cold reboot flag
69 * cold Set the cold reboot flag
70 * bios Reboot by jumping through the BIOS
71 * smp Reboot by executing reset on BSP or other CPU
72 * triple Force a triple fault (init)
73 * kbd Use the keyboard controller. cold reset (default)
74 * acpi Use the RESET_REG in the FADT
75 * efi Use efi reset_system runtime service
76 * pci Use the so-called "PCI reset register", CF9
77 * force Avoid anything that could hang.
78 */
79static int __init reboot_setup(char *str)
80{
81 for (;;) {
82 /*
83 * Having anything passed on the command line via
84 * reboot= will cause us to disable DMI checking
85 * below.
86 */
87 reboot_default = 0;
88
89 switch (*str) {
90 case 'w':
91 reboot_mode = 0x1234;
92 break;
93
94 case 'c':
95 reboot_mode = 0;
96 break;
97
98#ifdef CONFIG_SMP
99 case 's':
100 if (isdigit(*(str+1))) {
101 reboot_cpu = (int) (*(str+1) - '0');
102 if (isdigit(*(str+2)))
103 reboot_cpu = reboot_cpu*10 + (int)(*(str+2) - '0');
104 }
105 /*
106 * We will leave sorting out the final value
107 * when we are ready to reboot, since we might not
108 * have detected BSP APIC ID or smp_num_cpu
109 */
110 break;
111#endif /* CONFIG_SMP */
112
113 case 'b':
114 case 'a':
115 case 'k':
116 case 't':
117 case 'e':
118 case 'p':
119 reboot_type = *str;
120 break;
121
122 case 'f':
123 reboot_force = 1;
124 break;
125 }
126
127 str = strchr(str, ',');
128 if (str)
129 str++;
130 else
131 break;
132 }
133 return 1;
134}
135
136__setup("reboot=", reboot_setup);
137
138
139/*
140 * Reboot options and system auto-detection code provided by 51 * Reboot options and system auto-detection code provided by
141 * Dell Inc. so their systems "just work". :-) 52 * Dell Inc. so their systems "just work". :-)
142 */ 53 */
@@ -536,6 +447,7 @@ static void native_machine_emergency_restart(void)
536 int i; 447 int i;
537 int attempt = 0; 448 int attempt = 0;
538 int orig_reboot_type = reboot_type; 449 int orig_reboot_type = reboot_type;
450 unsigned short mode;
539 451
540 if (reboot_emergency) 452 if (reboot_emergency)
541 emergency_vmx_disable_all(); 453 emergency_vmx_disable_all();
@@ -543,7 +455,8 @@ static void native_machine_emergency_restart(void)
543 tboot_shutdown(TB_SHUTDOWN_REBOOT); 455 tboot_shutdown(TB_SHUTDOWN_REBOOT);
544 456
545 /* Tell the BIOS if we want cold or warm reboot */ 457 /* Tell the BIOS if we want cold or warm reboot */
546 *((unsigned short *)__va(0x472)) = reboot_mode; 458 mode = reboot_mode == REBOOT_WARM ? 0x1234 : 0;
459 *((unsigned short *)__va(0x472)) = mode;
547 460
548 for (;;) { 461 for (;;) {
549 /* Could also try the reset bit in the Hammer NB */ 462 /* Could also try the reset bit in the Hammer NB */
@@ -585,7 +498,7 @@ static void native_machine_emergency_restart(void)
585 498
586 case BOOT_EFI: 499 case BOOT_EFI:
587 if (efi_enabled(EFI_RUNTIME_SERVICES)) 500 if (efi_enabled(EFI_RUNTIME_SERVICES))
588 efi.reset_system(reboot_mode ? 501 efi.reset_system(reboot_mode == REBOOT_WARM ?
589 EFI_RESET_WARM : 502 EFI_RESET_WARM :
590 EFI_RESET_COLD, 503 EFI_RESET_COLD,
591 EFI_SUCCESS, 0, NULL); 504 EFI_SUCCESS, 0, NULL);
@@ -614,26 +527,10 @@ void native_machine_shutdown(void)
614{ 527{
615 /* Stop the cpus and apics */ 528 /* Stop the cpus and apics */
616#ifdef CONFIG_SMP 529#ifdef CONFIG_SMP
617
618 /* The boot cpu is always logical cpu 0 */
619 int reboot_cpu_id = 0;
620
621 /* See if there has been given a command line override */
622 if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) &&
623 cpu_online(reboot_cpu))
624 reboot_cpu_id = reboot_cpu;
625
626 /* Make certain the cpu I'm about to reboot on is online */
627 if (!cpu_online(reboot_cpu_id))
628 reboot_cpu_id = smp_processor_id();
629
630 /* Make certain I only run on the appropriate processor */
631 set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
632
633 /* 530 /*
634 * O.K Now that I'm on the appropriate processor, stop all of the 531 * Stop all of the others. Also disable the local irq to
635 * others. Also disable the local irq to not receive the per-cpu 532 * not receive the per-cpu timer interrupt which may trigger
636 * timer interrupt which may trigger scheduler's load balance. 533 * scheduler's load balance.
637 */ 534 */
638 local_irq_disable(); 535 local_irq_disable();
639 stop_other_cpus(); 536 stop_other_cpus();
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 198eb201ed3b..0aa29394ed6f 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -38,8 +38,9 @@ EXPORT_SYMBOL(rtc_lock);
38 * jump to the next second precisely 500 ms later. Check the Motorola 38 * jump to the next second precisely 500 ms later. Check the Motorola
39 * MC146818A or Dallas DS12887 data sheet for details. 39 * MC146818A or Dallas DS12887 data sheet for details.
40 */ 40 */
41int mach_set_rtc_mmss(unsigned long nowtime) 41int mach_set_rtc_mmss(const struct timespec *now)
42{ 42{
43 unsigned long nowtime = now->tv_sec;
43 struct rtc_time tm; 44 struct rtc_time tm;
44 int retval = 0; 45 int retval = 0;
45 46
@@ -58,7 +59,7 @@ int mach_set_rtc_mmss(unsigned long nowtime)
58 return retval; 59 return retval;
59} 60}
60 61
61unsigned long mach_get_cmos_time(void) 62void mach_get_cmos_time(struct timespec *now)
62{ 63{
63 unsigned int status, year, mon, day, hour, min, sec, century = 0; 64 unsigned int status, year, mon, day, hour, min, sec, century = 0;
64 unsigned long flags; 65 unsigned long flags;
@@ -107,7 +108,8 @@ unsigned long mach_get_cmos_time(void)
107 } else 108 } else
108 year += CMOS_YEARS_OFFS; 109 year += CMOS_YEARS_OFFS;
109 110
110 return mktime(year, mon, day, hour, min, sec); 111 now->tv_sec = mktime(year, mon, day, hour, min, sec);
112 now->tv_nsec = 0;
111} 113}
112 114
113/* Routines for accessing the CMOS RAM/RTC. */ 115/* Routines for accessing the CMOS RAM/RTC. */
@@ -135,18 +137,13 @@ EXPORT_SYMBOL(rtc_cmos_write);
135 137
136int update_persistent_clock(struct timespec now) 138int update_persistent_clock(struct timespec now)
137{ 139{
138 return x86_platform.set_wallclock(now.tv_sec); 140 return x86_platform.set_wallclock(&now);
139} 141}
140 142
141/* not static: needed by APM */ 143/* not static: needed by APM */
142void read_persistent_clock(struct timespec *ts) 144void read_persistent_clock(struct timespec *ts)
143{ 145{
144 unsigned long retval; 146 x86_platform.get_wallclock(ts);
145
146 retval = x86_platform.get_wallclock();
147
148 ts->tv_sec = retval;
149 ts->tv_nsec = 0;
150} 147}
151 148
152 149
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index e68709da8251..f8ec57815c05 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -170,7 +170,7 @@ static struct resource bss_resource = {
170 170
171#ifdef CONFIG_X86_32 171#ifdef CONFIG_X86_32
172/* cpu data as detected by the assembly code in head.S */ 172/* cpu data as detected by the assembly code in head.S */
173struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 173struct cpuinfo_x86 new_cpu_data = {
174 .wp_works_ok = -1, 174 .wp_works_ok = -1,
175}; 175};
176/* common cpu data for all cpus */ 176/* common cpu data for all cpus */
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index f4fe0b8879e0..cdaa347dfcad 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -265,23 +265,30 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
265 */ 265 */
266} 266}
267 267
268void smp_trace_reschedule_interrupt(struct pt_regs *regs) 268static inline void smp_entering_irq(void)
269{ 269{
270 ack_APIC_irq(); 270 ack_APIC_irq();
271 irq_enter();
272}
273
274void smp_trace_reschedule_interrupt(struct pt_regs *regs)
275{
276 /*
277 * Need to call irq_enter() before calling the trace point.
278 * __smp_reschedule_interrupt() calls irq_enter/exit() too (in
279 * scheduler_ipi(). This is OK, since those functions are allowed
280 * to nest.
281 */
282 smp_entering_irq();
271 trace_reschedule_entry(RESCHEDULE_VECTOR); 283 trace_reschedule_entry(RESCHEDULE_VECTOR);
272 __smp_reschedule_interrupt(); 284 __smp_reschedule_interrupt();
273 trace_reschedule_exit(RESCHEDULE_VECTOR); 285 trace_reschedule_exit(RESCHEDULE_VECTOR);
286 exiting_irq();
274 /* 287 /*
275 * KVM uses this interrupt to force a cpu out of guest mode 288 * KVM uses this interrupt to force a cpu out of guest mode
276 */ 289 */
277} 290}
278 291
279static inline void call_function_entering_irq(void)
280{
281 ack_APIC_irq();
282 irq_enter();
283}
284
285static inline void __smp_call_function_interrupt(void) 292static inline void __smp_call_function_interrupt(void)
286{ 293{
287 generic_smp_call_function_interrupt(); 294 generic_smp_call_function_interrupt();
@@ -290,14 +297,14 @@ static inline void __smp_call_function_interrupt(void)
290 297
291void smp_call_function_interrupt(struct pt_regs *regs) 298void smp_call_function_interrupt(struct pt_regs *regs)
292{ 299{
293 call_function_entering_irq(); 300 smp_entering_irq();
294 __smp_call_function_interrupt(); 301 __smp_call_function_interrupt();
295 exiting_irq(); 302 exiting_irq();
296} 303}
297 304
298void smp_trace_call_function_interrupt(struct pt_regs *regs) 305void smp_trace_call_function_interrupt(struct pt_regs *regs)
299{ 306{
300 call_function_entering_irq(); 307 smp_entering_irq();
301 trace_call_function_entry(CALL_FUNCTION_VECTOR); 308 trace_call_function_entry(CALL_FUNCTION_VECTOR);
302 __smp_call_function_interrupt(); 309 __smp_call_function_interrupt();
303 trace_call_function_exit(CALL_FUNCTION_VECTOR); 310 trace_call_function_exit(CALL_FUNCTION_VECTOR);
@@ -312,14 +319,14 @@ static inline void __smp_call_function_single_interrupt(void)
312 319
313void smp_call_function_single_interrupt(struct pt_regs *regs) 320void smp_call_function_single_interrupt(struct pt_regs *regs)
314{ 321{
315 call_function_entering_irq(); 322 smp_entering_irq();
316 __smp_call_function_single_interrupt(); 323 __smp_call_function_single_interrupt();
317 exiting_irq(); 324 exiting_irq();
318} 325}
319 326
320void smp_trace_call_function_single_interrupt(struct pt_regs *regs) 327void smp_trace_call_function_single_interrupt(struct pt_regs *regs)
321{ 328{
322 call_function_entering_irq(); 329 smp_entering_irq();
323 trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR); 330 trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
324 __smp_call_function_single_interrupt(); 331 __smp_call_function_single_interrupt();
325 trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR); 332 trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index bfd348e99369..aecc98a93d1b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -130,7 +130,7 @@ atomic_t init_deasserted;
130 * Report back to the Boot Processor during boot time or to the caller processor 130 * Report back to the Boot Processor during boot time or to the caller processor
131 * during CPU online. 131 * during CPU online.
132 */ 132 */
133static void __cpuinit smp_callin(void) 133static void smp_callin(void)
134{ 134{
135 int cpuid, phys_id; 135 int cpuid, phys_id;
136 unsigned long timeout; 136 unsigned long timeout;
@@ -237,7 +237,7 @@ static int enable_start_cpu0;
237/* 237/*
238 * Activate a secondary processor. 238 * Activate a secondary processor.
239 */ 239 */
240notrace static void __cpuinit start_secondary(void *unused) 240static void notrace start_secondary(void *unused)
241{ 241{
242 /* 242 /*
243 * Don't put *anything* before cpu_init(), SMP booting is too 243 * Don't put *anything* before cpu_init(), SMP booting is too
@@ -300,7 +300,7 @@ void __init smp_store_boot_cpu_info(void)
300 * The bootstrap kernel entry code has set these up. Save them for 300 * The bootstrap kernel entry code has set these up. Save them for
301 * a given CPU 301 * a given CPU
302 */ 302 */
303void __cpuinit smp_store_cpu_info(int id) 303void smp_store_cpu_info(int id)
304{ 304{
305 struct cpuinfo_x86 *c = &cpu_data(id); 305 struct cpuinfo_x86 *c = &cpu_data(id);
306 306
@@ -313,7 +313,7 @@ void __cpuinit smp_store_cpu_info(int id)
313 identify_secondary_cpu(c); 313 identify_secondary_cpu(c);
314} 314}
315 315
316static bool __cpuinit 316static bool
317topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name) 317topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
318{ 318{
319 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 319 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
@@ -330,7 +330,7 @@ do { \
330 cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \ 330 cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \
331} while (0) 331} while (0)
332 332
333static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 333static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
334{ 334{
335 if (cpu_has_topoext) { 335 if (cpu_has_topoext) {
336 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 336 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
@@ -348,7 +348,7 @@ static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
348 return false; 348 return false;
349} 349}
350 350
351static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 351static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
352{ 352{
353 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 353 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
354 354
@@ -359,7 +359,7 @@ static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
359 return false; 359 return false;
360} 360}
361 361
362static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 362static bool match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
363{ 363{
364 if (c->phys_proc_id == o->phys_proc_id) { 364 if (c->phys_proc_id == o->phys_proc_id) {
365 if (cpu_has(c, X86_FEATURE_AMD_DCM)) 365 if (cpu_has(c, X86_FEATURE_AMD_DCM))
@@ -370,7 +370,7 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
370 return false; 370 return false;
371} 371}
372 372
373void __cpuinit set_cpu_sibling_map(int cpu) 373void set_cpu_sibling_map(int cpu)
374{ 374{
375 bool has_smt = smp_num_siblings > 1; 375 bool has_smt = smp_num_siblings > 1;
376 bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; 376 bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
@@ -499,7 +499,7 @@ void __inquire_remote_apic(int apicid)
499 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this 499 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
500 * won't ... remember to clear down the APIC, etc later. 500 * won't ... remember to clear down the APIC, etc later.
501 */ 501 */
502int __cpuinit 502int
503wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip) 503wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
504{ 504{
505 unsigned long send_status, accept_status = 0; 505 unsigned long send_status, accept_status = 0;
@@ -533,7 +533,7 @@ wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
533 return (send_status | accept_status); 533 return (send_status | accept_status);
534} 534}
535 535
536static int __cpuinit 536static int
537wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) 537wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
538{ 538{
539 unsigned long send_status, accept_status = 0; 539 unsigned long send_status, accept_status = 0;
@@ -649,7 +649,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
649} 649}
650 650
651/* reduce the number of lines printed when booting a large cpu count system */ 651/* reduce the number of lines printed when booting a large cpu count system */
652static void __cpuinit announce_cpu(int cpu, int apicid) 652static void announce_cpu(int cpu, int apicid)
653{ 653{
654 static int current_node = -1; 654 static int current_node = -1;
655 int node = early_cpu_to_node(cpu); 655 int node = early_cpu_to_node(cpu);
@@ -691,7 +691,7 @@ static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs)
691 * We'll change this code in the future to wake up hard offlined CPU0 if 691 * We'll change this code in the future to wake up hard offlined CPU0 if
692 * real platform and request are available. 692 * real platform and request are available.
693 */ 693 */
694static int __cpuinit 694static int
695wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid, 695wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
696 int *cpu0_nmi_registered) 696 int *cpu0_nmi_registered)
697{ 697{
@@ -731,7 +731,7 @@ wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
731 * Returns zero if CPU booted OK, else error code from 731 * Returns zero if CPU booted OK, else error code from
732 * ->wakeup_secondary_cpu. 732 * ->wakeup_secondary_cpu.
733 */ 733 */
734static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle) 734static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
735{ 735{
736 volatile u32 *trampoline_status = 736 volatile u32 *trampoline_status =
737 (volatile u32 *) __va(real_mode_header->trampoline_status); 737 (volatile u32 *) __va(real_mode_header->trampoline_status);
@@ -872,7 +872,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
872 return boot_error; 872 return boot_error;
873} 873}
874 874
875int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle) 875int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
876{ 876{
877 int apicid = apic->cpu_present_to_apicid(cpu); 877 int apicid = apic->cpu_present_to_apicid(cpu);
878 unsigned long flags; 878 unsigned long flags;
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 3ff42d2f046d..addf7b58f4e8 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -320,8 +320,8 @@ static int tboot_wait_for_aps(int num_aps)
320 return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps); 320 return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps);
321} 321}
322 322
323static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb, 323static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
324 unsigned long action, void *hcpu) 324 void *hcpu)
325{ 325{
326 switch (action) { 326 switch (action) {
327 case CPU_DYING: 327 case CPU_DYING:
@@ -334,7 +334,7 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
334 return NOTIFY_OK; 334 return NOTIFY_OK;
335} 335}
336 336
337static struct notifier_block tboot_cpu_notifier __cpuinitdata = 337static struct notifier_block tboot_cpu_notifier =
338{ 338{
339 .notifier_call = tboot_cpu_callback, 339 .notifier_call = tboot_cpu_callback,
340}; 340};
diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
index 4e584a8d6edd..1c113db9ed57 100644
--- a/arch/x86/kernel/tracepoint.c
+++ b/arch/x86/kernel/tracepoint.c
@@ -12,10 +12,8 @@ atomic_t trace_idt_ctr = ATOMIC_INIT(0);
12struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1, 12struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
13 (unsigned long) trace_idt_table }; 13 (unsigned long) trace_idt_table };
14 14
15#ifndef CONFIG_X86_64 15/* No need to be aligned, but done to keep all IDTs defined the same way. */
16gate_desc trace_idt_table[NR_VECTORS] __page_aligned_data 16gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
17 = { { { { 0, 0 } } }, };
18#endif
19 17
20static int trace_irq_vector_refcount; 18static int trace_irq_vector_refcount;
21static DEFINE_MUTEX(irq_vector_mutex); 19static DEFINE_MUTEX(irq_vector_mutex);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index b0865e88d3cc..1b23a1c92746 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -63,19 +63,19 @@
63#include <asm/x86_init.h> 63#include <asm/x86_init.h>
64#include <asm/pgalloc.h> 64#include <asm/pgalloc.h>
65#include <asm/proto.h> 65#include <asm/proto.h>
66
67/* No need to be aligned, but done to keep all IDTs defined the same way. */
68gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
66#else 69#else
67#include <asm/processor-flags.h> 70#include <asm/processor-flags.h>
68#include <asm/setup.h> 71#include <asm/setup.h>
69 72
70asmlinkage int system_call(void); 73asmlinkage int system_call(void);
71
72/*
73 * The IDT has to be page-aligned to simplify the Pentium
74 * F0 0F bug workaround.
75 */
76gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
77#endif 74#endif
78 75
76/* Must be page-aligned because the real IDT is used in a fixmap. */
77gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
78
79DECLARE_BITMAP(used_vectors, NR_VECTORS); 79DECLARE_BITMAP(used_vectors, NR_VECTORS);
80EXPORT_SYMBOL_GPL(used_vectors); 80EXPORT_SYMBOL_GPL(used_vectors);
81 81
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 098b3cfda72e..6ff49247edf8 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -824,7 +824,7 @@ static void __init check_system_tsc_reliable(void)
824 * Make an educated guess if the TSC is trustworthy and synchronized 824 * Make an educated guess if the TSC is trustworthy and synchronized
825 * over all CPUs. 825 * over all CPUs.
826 */ 826 */
827__cpuinit int unsynchronized_tsc(void) 827int unsynchronized_tsc(void)
828{ 828{
829 if (!cpu_has_tsc || tsc_unstable) 829 if (!cpu_has_tsc || tsc_unstable)
830 return 1; 830 return 1;
@@ -1020,7 +1020,7 @@ void __init tsc_init(void)
1020 * been calibrated. This assumes that CONSTANT_TSC applies to all 1020 * been calibrated. This assumes that CONSTANT_TSC applies to all
1021 * cpus in the socket - this should be a safe assumption. 1021 * cpus in the socket - this should be a safe assumption.
1022 */ 1022 */
1023unsigned long __cpuinit calibrate_delay_is_known(void) 1023unsigned long calibrate_delay_is_known(void)
1024{ 1024{
1025 int i, cpu = smp_processor_id(); 1025 int i, cpu = smp_processor_id();
1026 1026
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index fc25e60a5884..adfdf56a3714 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -25,24 +25,24 @@
25 * Entry/exit counters that make sure that both CPUs 25 * Entry/exit counters that make sure that both CPUs
26 * run the measurement code at once: 26 * run the measurement code at once:
27 */ 27 */
28static __cpuinitdata atomic_t start_count; 28static atomic_t start_count;
29static __cpuinitdata atomic_t stop_count; 29static atomic_t stop_count;
30 30
31/* 31/*
32 * We use a raw spinlock in this exceptional case, because 32 * We use a raw spinlock in this exceptional case, because
33 * we want to have the fastest, inlined, non-debug version 33 * we want to have the fastest, inlined, non-debug version
34 * of a critical section, to be able to prove TSC time-warps: 34 * of a critical section, to be able to prove TSC time-warps:
35 */ 35 */
36static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; 36static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
37 37
38static __cpuinitdata cycles_t last_tsc; 38static cycles_t last_tsc;
39static __cpuinitdata cycles_t max_warp; 39static cycles_t max_warp;
40static __cpuinitdata int nr_warps; 40static int nr_warps;
41 41
42/* 42/*
43 * TSC-warp measurement loop running on both CPUs: 43 * TSC-warp measurement loop running on both CPUs:
44 */ 44 */
45static __cpuinit void check_tsc_warp(unsigned int timeout) 45static void check_tsc_warp(unsigned int timeout)
46{ 46{
47 cycles_t start, now, prev, end; 47 cycles_t start, now, prev, end;
48 int i; 48 int i;
@@ -121,7 +121,7 @@ static inline unsigned int loop_timeout(int cpu)
121 * Source CPU calls into this - it waits for the freshly booted 121 * Source CPU calls into this - it waits for the freshly booted
122 * target CPU to arrive and then starts the measurement: 122 * target CPU to arrive and then starts the measurement:
123 */ 123 */
124void __cpuinit check_tsc_sync_source(int cpu) 124void check_tsc_sync_source(int cpu)
125{ 125{
126 int cpus = 2; 126 int cpus = 2;
127 127
@@ -187,7 +187,7 @@ void __cpuinit check_tsc_sync_source(int cpu)
187/* 187/*
188 * Freshly booted CPUs call into this: 188 * Freshly booted CPUs call into this:
189 */ 189 */
190void __cpuinit check_tsc_sync_target(void) 190void check_tsc_sync_target(void)
191{ 191{
192 int cpus = 2; 192 int cpus = 2;
193 193
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 9a907a67be8f..1f96f9347ed9 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -331,7 +331,7 @@ sigsegv:
331 * Assume __initcall executes before all user space. Hopefully kmod 331 * Assume __initcall executes before all user space. Hopefully kmod
332 * doesn't violate that. We'll find out if it does. 332 * doesn't violate that. We'll find out if it does.
333 */ 333 */
334static void __cpuinit vsyscall_set_cpu(int cpu) 334static void vsyscall_set_cpu(int cpu)
335{ 335{
336 unsigned long d; 336 unsigned long d;
337 unsigned long node = 0; 337 unsigned long node = 0;
@@ -353,13 +353,13 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
353 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); 353 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
354} 354}
355 355
356static void __cpuinit cpu_vsyscall_init(void *arg) 356static void cpu_vsyscall_init(void *arg)
357{ 357{
358 /* preemption should be already off */ 358 /* preemption should be already off */
359 vsyscall_set_cpu(raw_smp_processor_id()); 359 vsyscall_set_cpu(raw_smp_processor_id());
360} 360}
361 361
362static int __cpuinit 362static int
363cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) 363cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
364{ 364{
365 long cpu = (long)arg; 365 long cpu = (long)arg;
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 45a14dbbddaf..5f24c71accaa 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -25,7 +25,7 @@
25#include <asm/iommu.h> 25#include <asm/iommu.h>
26#include <asm/mach_traps.h> 26#include <asm/mach_traps.h>
27 27
28void __cpuinit x86_init_noop(void) { } 28void x86_init_noop(void) { }
29void __init x86_init_uint_noop(unsigned int unused) { } 29void __init x86_init_uint_noop(unsigned int unused) { }
30int __init iommu_init_noop(void) { return 0; } 30int __init iommu_init_noop(void) { return 0; }
31void iommu_shutdown_noop(void) { } 31void iommu_shutdown_noop(void) { }
@@ -85,7 +85,7 @@ struct x86_init_ops x86_init __initdata = {
85 }, 85 },
86}; 86};
87 87
88struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { 88struct x86_cpuinit_ops x86_cpuinit = {
89 .early_percpu_clock_init = x86_init_noop, 89 .early_percpu_clock_init = x86_init_noop,
90 .setup_percpu_clockev = setup_secondary_APIC_clock, 90 .setup_percpu_clockev = setup_secondary_APIC_clock,
91}; 91};
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index d6c28acdf99c..422fd8223470 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -573,7 +573,7 @@ static void __init xstate_enable_boot_cpu(void)
573 * This is somewhat obfuscated due to the lack of powerful enough 573 * This is somewhat obfuscated due to the lack of powerful enough
574 * overrides for the section checks. 574 * overrides for the section checks.
575 */ 575 */
576void __cpuinit xsave_init(void) 576void xsave_init(void)
577{ 577{
578 static __refdata void (*next_func)(void) = xstate_enable_boot_cpu; 578 static __refdata void (*next_func)(void) = xstate_enable_boot_cpu;
579 void (*this_func)(void); 579 void (*this_func)(void);
@@ -594,7 +594,7 @@ static inline void __init eager_fpu_init_bp(void)
594 setup_init_fpu_buf(); 594 setup_init_fpu_buf();
595} 595}
596 596
597void __cpuinit eager_fpu_init(void) 597void eager_fpu_init(void)
598{ 598{
599 static __refdata void (*boot_func)(void) = eager_fpu_init_bp; 599 static __refdata void (*boot_func)(void) = eager_fpu_init_bp;
600 600
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 0d094da49541..9e9285ae9b94 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2811,6 +2811,13 @@ exit:
2811static bool page_fault_can_be_fast(struct kvm_vcpu *vcpu, u32 error_code) 2811static bool page_fault_can_be_fast(struct kvm_vcpu *vcpu, u32 error_code)
2812{ 2812{
2813 /* 2813 /*
2814 * Do not fix the mmio spte with invalid generation number which
2815 * need to be updated by slow page fault path.
2816 */
2817 if (unlikely(error_code & PFERR_RSVD_MASK))
2818 return false;
2819
2820 /*
2814 * #PF can be fast only if the shadow page table is present and it 2821 * #PF can be fast only if the shadow page table is present and it
2815 * is caused by write-protect, that means we just need change the 2822 * is caused by write-protect, that means we just need change the
2816 * W bit of the spte which can be done out of mmu-lock. 2823 * W bit of the spte which can be done out of mmu-lock.
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a7e18551c968..064d0be67ecc 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3404,15 +3404,22 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
3404 var->limit = vmx_read_guest_seg_limit(vmx, seg); 3404 var->limit = vmx_read_guest_seg_limit(vmx, seg);
3405 var->selector = vmx_read_guest_seg_selector(vmx, seg); 3405 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3406 ar = vmx_read_guest_seg_ar(vmx, seg); 3406 ar = vmx_read_guest_seg_ar(vmx, seg);
3407 var->unusable = (ar >> 16) & 1;
3407 var->type = ar & 15; 3408 var->type = ar & 15;
3408 var->s = (ar >> 4) & 1; 3409 var->s = (ar >> 4) & 1;
3409 var->dpl = (ar >> 5) & 3; 3410 var->dpl = (ar >> 5) & 3;
3410 var->present = (ar >> 7) & 1; 3411 /*
3412 * Some userspaces do not preserve unusable property. Since usable
3413 * segment has to be present according to VMX spec we can use present
3414 * property to amend userspace bug by making unusable segment always
3415 * nonpresent. vmx_segment_access_rights() already marks nonpresent
3416 * segment as unusable.
3417 */
3418 var->present = !var->unusable;
3411 var->avl = (ar >> 12) & 1; 3419 var->avl = (ar >> 12) & 1;
3412 var->l = (ar >> 13) & 1; 3420 var->l = (ar >> 13) & 1;
3413 var->db = (ar >> 14) & 1; 3421 var->db = (ar >> 14) & 1;
3414 var->g = (ar >> 15) & 1; 3422 var->g = (ar >> 15) & 1;
3415 var->unusable = (ar >> 16) & 1;
3416} 3423}
3417 3424
3418static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) 3425static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index d482bcaf61c1..6a22c19da663 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -882,9 +882,9 @@ int lguest_setup_irq(unsigned int irq)
882 * It would be far better for everyone if the Guest had its own clock, but 882 * It would be far better for everyone if the Guest had its own clock, but
883 * until then the Host gives us the time on every interrupt. 883 * until then the Host gives us the time on every interrupt.
884 */ 884 */
885static unsigned long lguest_get_wallclock(void) 885static void lguest_get_wallclock(struct timespec *now)
886{ 886{
887 return lguest_data.time.tv_sec; 887 *now = lguest_data.time;
888} 888}
889 889
890/* 890/*
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 845df6835f9f..62c29a5bfe26 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -115,10 +115,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
115 if (mmap_is_legacy()) { 115 if (mmap_is_legacy()) {
116 mm->mmap_base = mmap_legacy_base(); 116 mm->mmap_base = mmap_legacy_base();
117 mm->get_unmapped_area = arch_get_unmapped_area; 117 mm->get_unmapped_area = arch_get_unmapped_area;
118 mm->unmap_area = arch_unmap_area;
119 } else { 118 } else {
120 mm->mmap_base = mmap_base(); 119 mm->mmap_base = mmap_base();
121 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 120 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
122 mm->unmap_area = arch_unmap_area_topdown;
123 } 121 }
124} 122}
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index dc0b727742f4..0057a7accfb1 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -410,9 +410,7 @@ out:
410 pr_warning("multiple CPUs still online, may miss events.\n"); 410 pr_warning("multiple CPUs still online, may miss events.\n");
411} 411}
412 412
413/* __ref because leave_uniprocessor calls cpu_up which is __cpuinit, 413static void leave_uniprocessor(void)
414 but this whole function is ifdefed CONFIG_HOTPLUG_CPU */
415static void __ref leave_uniprocessor(void)
416{ 414{
417 int cpu; 415 int cpu;
418 int err; 416 int err;
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index a71c4e207679..8bf93bae1f13 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -60,7 +60,7 @@ s16 __apicid_to_node[MAX_LOCAL_APIC] = {
60 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE 60 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
61}; 61};
62 62
63int __cpuinit numa_cpu_node(int cpu) 63int numa_cpu_node(int cpu)
64{ 64{
65 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); 65 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
66 66
@@ -691,12 +691,12 @@ void __init init_cpu_to_node(void)
691#ifndef CONFIG_DEBUG_PER_CPU_MAPS 691#ifndef CONFIG_DEBUG_PER_CPU_MAPS
692 692
693# ifndef CONFIG_NUMA_EMU 693# ifndef CONFIG_NUMA_EMU
694void __cpuinit numa_add_cpu(int cpu) 694void numa_add_cpu(int cpu)
695{ 695{
696 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 696 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
697} 697}
698 698
699void __cpuinit numa_remove_cpu(int cpu) 699void numa_remove_cpu(int cpu)
700{ 700{
701 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 701 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
702} 702}
@@ -763,17 +763,17 @@ void debug_cpumask_set_cpu(int cpu, int node, bool enable)
763} 763}
764 764
765# ifndef CONFIG_NUMA_EMU 765# ifndef CONFIG_NUMA_EMU
766static void __cpuinit numa_set_cpumask(int cpu, bool enable) 766static void numa_set_cpumask(int cpu, bool enable)
767{ 767{
768 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); 768 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
769} 769}
770 770
771void __cpuinit numa_add_cpu(int cpu) 771void numa_add_cpu(int cpu)
772{ 772{
773 numa_set_cpumask(cpu, true); 773 numa_set_cpumask(cpu, true);
774} 774}
775 775
776void __cpuinit numa_remove_cpu(int cpu) 776void numa_remove_cpu(int cpu)
777{ 777{
778 numa_set_cpumask(cpu, false); 778 numa_set_cpumask(cpu, false);
779} 779}
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index dbbbb47260cc..a8f90ce3dedf 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -10,7 +10,7 @@
10 10
11#include "numa_internal.h" 11#include "numa_internal.h"
12 12
13static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata; 13static int emu_nid_to_phys[MAX_NUMNODES];
14static char *emu_cmdline __initdata; 14static char *emu_cmdline __initdata;
15 15
16void __init numa_emu_cmdline(char *str) 16void __init numa_emu_cmdline(char *str)
@@ -444,7 +444,7 @@ no_emu:
444} 444}
445 445
446#ifndef CONFIG_DEBUG_PER_CPU_MAPS 446#ifndef CONFIG_DEBUG_PER_CPU_MAPS
447void __cpuinit numa_add_cpu(int cpu) 447void numa_add_cpu(int cpu)
448{ 448{
449 int physnid, nid; 449 int physnid, nid;
450 450
@@ -462,7 +462,7 @@ void __cpuinit numa_add_cpu(int cpu)
462 cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); 462 cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
463} 463}
464 464
465void __cpuinit numa_remove_cpu(int cpu) 465void numa_remove_cpu(int cpu)
466{ 466{
467 int i; 467 int i;
468 468
@@ -470,7 +470,7 @@ void __cpuinit numa_remove_cpu(int cpu)
470 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); 470 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
471} 471}
472#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 472#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
473static void __cpuinit numa_set_cpumask(int cpu, bool enable) 473static void numa_set_cpumask(int cpu, bool enable)
474{ 474{
475 int nid, physnid; 475 int nid, physnid;
476 476
@@ -490,12 +490,12 @@ static void __cpuinit numa_set_cpumask(int cpu, bool enable)
490 } 490 }
491} 491}
492 492
493void __cpuinit numa_add_cpu(int cpu) 493void numa_add_cpu(int cpu)
494{ 494{
495 numa_set_cpumask(cpu, true); 495 numa_set_cpumask(cpu, true);
496} 496}
497 497
498void __cpuinit numa_remove_cpu(int cpu) 498void numa_remove_cpu(int cpu)
499{ 499{
500 numa_set_cpumask(cpu, false); 500 numa_set_cpumask(cpu, false);
501} 501}
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 17fda6a8b3c2..dfa537a03be1 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -240,7 +240,6 @@ static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
240static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) 240static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
241{ 241{
242 pud_t *pud; 242 pud_t *pud;
243 unsigned long addr;
244 int i; 243 int i;
245 244
246 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ 245 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
@@ -248,8 +247,7 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
248 247
249 pud = pud_offset(pgd, 0); 248 pud = pud_offset(pgd, 0);
250 249
251 for (addr = i = 0; i < PREALLOCATED_PMDS; 250 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
252 i++, pud++, addr += PUD_SIZE) {
253 pmd_t *pmd = pmds[i]; 251 pmd_t *pmd = pmds[i];
254 252
255 if (i >= KERNEL_PGD_BOUNDARY) 253 if (i >= KERNEL_PGD_BOUNDARY)
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index 410531d3c292..90555bf60aa4 100644
--- a/arch/x86/mm/setup_nx.c
+++ b/arch/x86/mm/setup_nx.c
@@ -5,7 +5,7 @@
5#include <asm/pgtable.h> 5#include <asm/pgtable.h>
6#include <asm/proto.h> 6#include <asm/proto.h>
7 7
8static int disable_nx __cpuinitdata; 8static int disable_nx;
9 9
10/* 10/*
11 * noexec = on|off 11 * noexec = on|off
@@ -29,7 +29,7 @@ static int __init noexec_setup(char *str)
29} 29}
30early_param("noexec", noexec_setup); 30early_param("noexec", noexec_setup);
31 31
32void __cpuinit x86_configure_nx(void) 32void x86_configure_nx(void)
33{ 33{
34 if (cpu_has_nx && !disable_nx) 34 if (cpu_has_nx && !disable_nx)
35 __supported_pte_mask |= _PAGE_NX; 35 __supported_pte_mask |= _PAGE_NX;
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index f66b54086ce5..79c216aa0e2b 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -12,6 +12,7 @@
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/filter.h> 13#include <linux/filter.h>
14#include <linux/if_vlan.h> 14#include <linux/if_vlan.h>
15#include <linux/random.h>
15 16
16/* 17/*
17 * Conventions : 18 * Conventions :
@@ -144,6 +145,39 @@ static int pkt_type_offset(void)
144 return -1; 145 return -1;
145} 146}
146 147
148struct bpf_binary_header {
149 unsigned int pages;
150 /* Note : for security reasons, bpf code will follow a randomly
151 * sized amount of int3 instructions
152 */
153 u8 image[];
154};
155
156static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
157 u8 **image_ptr)
158{
159 unsigned int sz, hole;
160 struct bpf_binary_header *header;
161
162 /* Most of BPF filters are really small,
163 * but if some of them fill a page, allow at least
164 * 128 extra bytes to insert a random section of int3
165 */
166 sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
167 header = module_alloc(sz);
168 if (!header)
169 return NULL;
170
171 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
172
173 header->pages = sz / PAGE_SIZE;
174 hole = sz - (proglen + sizeof(*header));
175
176 /* insert a random number of int3 instructions before BPF code */
177 *image_ptr = &header->image[prandom_u32() % hole];
178 return header;
179}
180
147void bpf_jit_compile(struct sk_filter *fp) 181void bpf_jit_compile(struct sk_filter *fp)
148{ 182{
149 u8 temp[64]; 183 u8 temp[64];
@@ -153,6 +187,7 @@ void bpf_jit_compile(struct sk_filter *fp)
153 int t_offset, f_offset; 187 int t_offset, f_offset;
154 u8 t_op, f_op, seen = 0, pass; 188 u8 t_op, f_op, seen = 0, pass;
155 u8 *image = NULL; 189 u8 *image = NULL;
190 struct bpf_binary_header *header = NULL;
156 u8 *func; 191 u8 *func;
157 int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */ 192 int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
158 unsigned int cleanup_addr; /* epilogue code offset */ 193 unsigned int cleanup_addr; /* epilogue code offset */
@@ -693,7 +728,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
693 if (unlikely(proglen + ilen > oldproglen)) { 728 if (unlikely(proglen + ilen > oldproglen)) {
694 pr_err("bpb_jit_compile fatal error\n"); 729 pr_err("bpb_jit_compile fatal error\n");
695 kfree(addrs); 730 kfree(addrs);
696 module_free(NULL, image); 731 module_free(NULL, header);
697 return; 732 return;
698 } 733 }
699 memcpy(image + proglen, temp, ilen); 734 memcpy(image + proglen, temp, ilen);
@@ -717,10 +752,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
717 break; 752 break;
718 } 753 }
719 if (proglen == oldproglen) { 754 if (proglen == oldproglen) {
720 image = module_alloc(max_t(unsigned int, 755 header = bpf_alloc_binary(proglen, &image);
721 proglen, 756 if (!header)
722 sizeof(struct work_struct)));
723 if (!image)
724 goto out; 757 goto out;
725 } 758 }
726 oldproglen = proglen; 759 oldproglen = proglen;
@@ -730,7 +763,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
730 bpf_jit_dump(flen, proglen, pass, image); 763 bpf_jit_dump(flen, proglen, pass, image);
731 764
732 if (image) { 765 if (image) {
733 bpf_flush_icache(image, image + proglen); 766 bpf_flush_icache(header, image + proglen);
767 set_memory_ro((unsigned long)header, header->pages);
734 fp->bpf_func = (void *)image; 768 fp->bpf_func = (void *)image;
735 } 769 }
736out: 770out:
@@ -738,20 +772,13 @@ out:
738 return; 772 return;
739} 773}
740 774
741static void jit_free_defer(struct work_struct *arg)
742{
743 module_free(NULL, arg);
744}
745
746/* run from softirq, we must use a work_struct to call
747 * module_free() from process context
748 */
749void bpf_jit_free(struct sk_filter *fp) 775void bpf_jit_free(struct sk_filter *fp)
750{ 776{
751 if (fp->bpf_func != sk_run_filter) { 777 if (fp->bpf_func != sk_run_filter) {
752 struct work_struct *work = (struct work_struct *)fp->bpf_func; 778 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
779 struct bpf_binary_header *header = (void *)addr;
753 780
754 INIT_WORK(work, jit_free_defer); 781 set_memory_rw(addr, header->pages);
755 schedule_work(work); 782 module_free(NULL, header);
756 } 783 }
757} 784}
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index e9e6ed5cdf94..a48be98e9ded 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -312,7 +312,7 @@ static int __init early_fill_mp_bus_info(void)
312 312
313#define ENABLE_CF8_EXT_CFG (1ULL << 46) 313#define ENABLE_CF8_EXT_CFG (1ULL << 46)
314 314
315static void __cpuinit enable_pci_io_ecs(void *unused) 315static void enable_pci_io_ecs(void *unused)
316{ 316{
317 u64 reg; 317 u64 reg;
318 rdmsrl(MSR_AMD64_NB_CFG, reg); 318 rdmsrl(MSR_AMD64_NB_CFG, reg);
@@ -322,8 +322,8 @@ static void __cpuinit enable_pci_io_ecs(void *unused)
322 } 322 }
323} 323}
324 324
325static int __cpuinit amd_cpu_notify(struct notifier_block *self, 325static int amd_cpu_notify(struct notifier_block *self, unsigned long action,
326 unsigned long action, void *hcpu) 326 void *hcpu)
327{ 327{
328 int cpu = (long)hcpu; 328 int cpu = (long)hcpu;
329 switch (action) { 329 switch (action) {
@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
337 return NOTIFY_OK; 337 return NOTIFY_OK;
338} 338}
339 339
340static struct notifier_block __cpuinitdata amd_cpu_notifier = { 340static struct notifier_block amd_cpu_notifier = {
341 .notifier_call = amd_cpu_notify, 341 .notifier_call = amd_cpu_notify,
342}; 342};
343 343
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index f8ab4945892e..643b8b5eee86 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/serial_reg.h> 15#include <linux/serial_reg.h>
16#include <linux/serial_8250.h> 16#include <linux/serial_8250.h>
17#include <linux/reboot.h>
17 18
18#include <asm/ce4100.h> 19#include <asm/ce4100.h>
19#include <asm/prom.h> 20#include <asm/prom.h>
@@ -134,7 +135,7 @@ static void __init sdv_arch_setup(void)
134} 135}
135 136
136#ifdef CONFIG_X86_IO_APIC 137#ifdef CONFIG_X86_IO_APIC
137static void __cpuinit sdv_pci_init(void) 138static void sdv_pci_init(void)
138{ 139{
139 x86_of_pci_init(); 140 x86_of_pci_init();
140 /* We can't set this earlier, because we need to calibrate the timer */ 141 /* We can't set this earlier, because we need to calibrate the timer */
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index b410b71bdcf7..90f6ed127096 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -274,8 +274,9 @@ static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
274 return status; 274 return status;
275} 275}
276 276
277int efi_set_rtc_mmss(unsigned long nowtime) 277int efi_set_rtc_mmss(const struct timespec *now)
278{ 278{
279 unsigned long nowtime = now->tv_sec;
279 efi_status_t status; 280 efi_status_t status;
280 efi_time_t eft; 281 efi_time_t eft;
281 efi_time_cap_t cap; 282 efi_time_cap_t cap;
@@ -310,7 +311,7 @@ int efi_set_rtc_mmss(unsigned long nowtime)
310 return 0; 311 return 0;
311} 312}
312 313
313unsigned long efi_get_time(void) 314void efi_get_time(struct timespec *now)
314{ 315{
315 efi_status_t status; 316 efi_status_t status;
316 efi_time_t eft; 317 efi_time_t eft;
@@ -320,8 +321,9 @@ unsigned long efi_get_time(void)
320 if (status != EFI_SUCCESS) 321 if (status != EFI_SUCCESS)
321 pr_err("Oops: efitime: can't read time!\n"); 322 pr_err("Oops: efitime: can't read time!\n");
322 323
323 return mktime(eft.year, eft.month, eft.day, eft.hour, 324 now->tv_sec = mktime(eft.year, eft.month, eft.day, eft.hour,
324 eft.minute, eft.second); 325 eft.minute, eft.second);
326 now->tv_nsec = 0;
325} 327}
326 328
327/* 329/*
@@ -929,13 +931,6 @@ void __init efi_enter_virtual_mode(void)
929 va = efi_ioremap(md->phys_addr, size, 931 va = efi_ioremap(md->phys_addr, size,
930 md->type, md->attribute); 932 md->type, md->attribute);
931 933
932 if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
933 if (!va)
934 pr_err("ioremap of 0x%llX failed!\n",
935 (unsigned long long)md->phys_addr);
936 continue;
937 }
938
939 md->virt_addr = (u64) (unsigned long) va; 934 md->virt_addr = (u64) (unsigned long) va;
940 935
941 if (!va) { 936 if (!va) {
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index a0a0a4389bbd..47fe66fe61f1 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -65,7 +65,7 @@
65 * lapic (always-on,ARAT) ------ 150 65 * lapic (always-on,ARAT) ------ 150
66 */ 66 */
67 67
68__cpuinitdata enum mrst_timer_options mrst_timer_options; 68enum mrst_timer_options mrst_timer_options;
69 69
70static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM]; 70static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM];
71static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM]; 71static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM];
@@ -248,7 +248,7 @@ static void __init mrst_time_init(void)
248 apbt_time_init(); 248 apbt_time_init();
249} 249}
250 250
251static void __cpuinit mrst_arch_setup(void) 251static void mrst_arch_setup(void)
252{ 252{
253 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27) 253 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27)
254 __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL; 254 __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL;
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c
index d62b0a3b5c14..5e355b134ba4 100644
--- a/arch/x86/platform/mrst/vrtc.c
+++ b/arch/x86/platform/mrst/vrtc.c
@@ -56,7 +56,7 @@ void vrtc_cmos_write(unsigned char val, unsigned char reg)
56} 56}
57EXPORT_SYMBOL_GPL(vrtc_cmos_write); 57EXPORT_SYMBOL_GPL(vrtc_cmos_write);
58 58
59unsigned long vrtc_get_time(void) 59void vrtc_get_time(struct timespec *now)
60{ 60{
61 u8 sec, min, hour, mday, mon; 61 u8 sec, min, hour, mday, mon;
62 unsigned long flags; 62 unsigned long flags;
@@ -82,17 +82,18 @@ unsigned long vrtc_get_time(void)
82 printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d " 82 printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d "
83 "mon: %d year: %d\n", sec, min, hour, mday, mon, year); 83 "mon: %d year: %d\n", sec, min, hour, mday, mon, year);
84 84
85 return mktime(year, mon, mday, hour, min, sec); 85 now->tv_sec = mktime(year, mon, mday, hour, min, sec);
86 now->tv_nsec = 0;
86} 87}
87 88
88int vrtc_set_mmss(unsigned long nowtime) 89int vrtc_set_mmss(const struct timespec *now)
89{ 90{
90 unsigned long flags; 91 unsigned long flags;
91 struct rtc_time tm; 92 struct rtc_time tm;
92 int year; 93 int year;
93 int retval = 0; 94 int retval = 0;
94 95
95 rtc_time_to_tm(nowtime, &tm); 96 rtc_time_to_tm(now->tv_sec, &tm);
96 if (!rtc_valid_tm(&tm) && tm.tm_year >= 72) { 97 if (!rtc_valid_tm(&tm) && tm.tm_year >= 72) {
97 /* 98 /*
98 * tm.year is the number of years since 1900, and the 99 * tm.year is the number of years since 1900, and the
@@ -110,7 +111,7 @@ int vrtc_set_mmss(unsigned long nowtime)
110 } else { 111 } else {
111 printk(KERN_ERR 112 printk(KERN_ERR
112 "%s: Invalid vRTC value: write of %lx to vRTC failed\n", 113 "%s: Invalid vRTC value: write of %lx to vRTC failed\n",
113 __FUNCTION__, nowtime); 114 __FUNCTION__, now->tv_sec);
114 retval = -EINVAL; 115 retval = -EINVAL;
115 } 116 }
116 return retval; 117 return retval;
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index ae7319db18ee..5e04a1c899fa 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -508,7 +508,6 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
508{ 508{
509 struct rt_sigframe __user *frame; 509 struct rt_sigframe __user *frame;
510 int err = 0; 510 int err = 0;
511 struct task_struct *me = current;
512 511
513 frame = (struct rt_sigframe __user *) 512 frame = (struct rt_sigframe __user *)
514 round_down(stack_top - sizeof(struct rt_sigframe), 16); 513 round_down(stack_top - sizeof(struct rt_sigframe), 16);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2fa02bc50034..193097ef3d7d 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1681,8 +1681,8 @@ static void __init init_hvm_pv_info(void)
1681 xen_domain_type = XEN_HVM_DOMAIN; 1681 xen_domain_type = XEN_HVM_DOMAIN;
1682} 1682}
1683 1683
1684static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, 1684static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
1685 unsigned long action, void *hcpu) 1685 void *hcpu)
1686{ 1686{
1687 int cpu = (long)hcpu; 1687 int cpu = (long)hcpu;
1688 switch (action) { 1688 switch (action) {
@@ -1700,7 +1700,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
1700 return NOTIFY_OK; 1700 return NOTIFY_OK;
1701} 1701}
1702 1702
1703static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = { 1703static struct notifier_block xen_hvm_cpu_notifier = {
1704 .notifier_call = xen_hvm_cpu_notify, 1704 .notifier_call = xen_hvm_cpu_notify,
1705}; 1705};
1706 1706
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 94eac5c85cdc..056d11faef21 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -475,7 +475,7 @@ static void __init fiddle_vdso(void)
475#endif 475#endif
476} 476}
477 477
478static int __cpuinit register_callback(unsigned type, const void *func) 478static int register_callback(unsigned type, const void *func)
479{ 479{
480 struct callback_register callback = { 480 struct callback_register callback = {
481 .type = type, 481 .type = type,
@@ -486,7 +486,7 @@ static int __cpuinit register_callback(unsigned type, const void *func)
486 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback); 486 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
487} 487}
488 488
489void __cpuinit xen_enable_sysenter(void) 489void xen_enable_sysenter(void)
490{ 490{
491 int ret; 491 int ret;
492 unsigned sysenter_feature; 492 unsigned sysenter_feature;
@@ -505,7 +505,7 @@ void __cpuinit xen_enable_sysenter(void)
505 setup_clear_cpu_cap(sysenter_feature); 505 setup_clear_cpu_cap(sysenter_feature);
506} 506}
507 507
508void __cpuinit xen_enable_syscall(void) 508void xen_enable_syscall(void)
509{ 509{
510#ifdef CONFIG_X86_64 510#ifdef CONFIG_X86_64
511 int ret; 511 int ret;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index c1367b29c3b1..ca92754eb846 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -65,7 +65,7 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
65 return IRQ_HANDLED; 65 return IRQ_HANDLED;
66} 66}
67 67
68static void __cpuinit cpu_bringup(void) 68static void cpu_bringup(void)
69{ 69{
70 int cpu; 70 int cpu;
71 71
@@ -97,7 +97,7 @@ static void __cpuinit cpu_bringup(void)
97 wmb(); /* make sure everything is out */ 97 wmb(); /* make sure everything is out */
98} 98}
99 99
100static void __cpuinit cpu_bringup_and_idle(void) 100static void cpu_bringup_and_idle(void)
101{ 101{
102 cpu_bringup(); 102 cpu_bringup();
103 cpu_startup_entry(CPUHP_ONLINE); 103 cpu_startup_entry(CPUHP_ONLINE);
@@ -326,7 +326,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
326 set_cpu_present(cpu, true); 326 set_cpu_present(cpu, true);
327} 327}
328 328
329static int __cpuinit 329static int
330cpu_initialize_context(unsigned int cpu, struct task_struct *idle) 330cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
331{ 331{
332 struct vcpu_guest_context *ctxt; 332 struct vcpu_guest_context *ctxt;
@@ -397,7 +397,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
397 return 0; 397 return 0;
398} 398}
399 399
400static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle) 400static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
401{ 401{
402 int rc; 402 int rc;
403 403
@@ -470,7 +470,7 @@ static void xen_cpu_die(unsigned int cpu)
470 xen_teardown_timer(cpu); 470 xen_teardown_timer(cpu);
471} 471}
472 472
473static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ 473static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
474{ 474{
475 play_dead_common(); 475 play_dead_common();
476 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 476 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
@@ -691,7 +691,7 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
691 xen_init_lock_cpu(0); 691 xen_init_lock_cpu(0);
692} 692}
693 693
694static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) 694static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
695{ 695{
696 int rc; 696 int rc;
697 rc = native_cpu_up(cpu, tidle); 697 rc = native_cpu_up(cpu, tidle);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index a40f8508e760..cf3caee356b3 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -361,7 +361,7 @@ static irqreturn_t dummy_handler(int irq, void *dev_id)
361 return IRQ_HANDLED; 361 return IRQ_HANDLED;
362} 362}
363 363
364void __cpuinit xen_init_lock_cpu(int cpu) 364void xen_init_lock_cpu(int cpu)
365{ 365{
366 int irq; 366 int irq;
367 char *name; 367 char *name;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index a690868be837..ee365895b06b 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -15,6 +15,7 @@
15#include <linux/math64.h> 15#include <linux/math64.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/pvclock_gtod.h>
18 19
19#include <asm/pvclock.h> 20#include <asm/pvclock.h>
20#include <asm/xen/hypervisor.h> 21#include <asm/xen/hypervisor.h>
@@ -179,34 +180,56 @@ static void xen_read_wallclock(struct timespec *ts)
179 put_cpu_var(xen_vcpu); 180 put_cpu_var(xen_vcpu);
180} 181}
181 182
182static unsigned long xen_get_wallclock(void) 183static void xen_get_wallclock(struct timespec *now)
183{ 184{
184 struct timespec ts; 185 xen_read_wallclock(now);
186}
185 187
186 xen_read_wallclock(&ts); 188static int xen_set_wallclock(const struct timespec *now)
187 return ts.tv_sec; 189{
190 return -1;
188} 191}
189 192
190static int xen_set_wallclock(unsigned long now) 193static int xen_pvclock_gtod_notify(struct notifier_block *nb,
194 unsigned long was_set, void *priv)
191{ 195{
196 /* Protected by the calling core code serialization */
197 static struct timespec next_sync;
198
192 struct xen_platform_op op; 199 struct xen_platform_op op;
193 int rc; 200 struct timespec now;
194 201
195 /* do nothing for domU */ 202 now = __current_kernel_time();
196 if (!xen_initial_domain()) 203
197 return -1; 204 /*
205 * We only take the expensive HV call when the clock was set
206 * or when the 11 minutes RTC synchronization time elapsed.
207 */
208 if (!was_set && timespec_compare(&now, &next_sync) < 0)
209 return NOTIFY_OK;
198 210
199 op.cmd = XENPF_settime; 211 op.cmd = XENPF_settime;
200 op.u.settime.secs = now; 212 op.u.settime.secs = now.tv_sec;
201 op.u.settime.nsecs = 0; 213 op.u.settime.nsecs = now.tv_nsec;
202 op.u.settime.system_time = xen_clocksource_read(); 214 op.u.settime.system_time = xen_clocksource_read();
203 215
204 rc = HYPERVISOR_dom0_op(&op); 216 (void)HYPERVISOR_dom0_op(&op);
205 WARN(rc != 0, "XENPF_settime failed: now=%ld\n", now);
206 217
207 return rc; 218 /*
219 * Move the next drift compensation time 11 minutes
220 * ahead. That's emulating the sync_cmos_clock() update for
221 * the hardware RTC.
222 */
223 next_sync = now;
224 next_sync.tv_sec += 11 * 60;
225
226 return NOTIFY_OK;
208} 227}
209 228
229static struct notifier_block xen_pvclock_gtod_notifier = {
230 .notifier_call = xen_pvclock_gtod_notify,
231};
232
210static struct clocksource xen_clocksource __read_mostly = { 233static struct clocksource xen_clocksource __read_mostly = {
211 .name = "xen", 234 .name = "xen",
212 .rating = 400, 235 .rating = 400,
@@ -482,6 +505,9 @@ static void __init xen_time_init(void)
482 xen_setup_runstate_info(cpu); 505 xen_setup_runstate_info(cpu);
483 xen_setup_timer(cpu); 506 xen_setup_timer(cpu);
484 xen_setup_cpu_clockevents(); 507 xen_setup_cpu_clockevents();
508
509 if (xen_initial_domain())
510 pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
485} 511}
486 512
487void __init xen_init_time_ops(void) 513void __init xen_init_time_ops(void)
@@ -494,7 +520,9 @@ void __init xen_init_time_ops(void)
494 520
495 x86_platform.calibrate_tsc = xen_tsc_khz; 521 x86_platform.calibrate_tsc = xen_tsc_khz;
496 x86_platform.get_wallclock = xen_get_wallclock; 522 x86_platform.get_wallclock = xen_get_wallclock;
497 x86_platform.set_wallclock = xen_set_wallclock; 523 /* Dom0 uses the native method to set the hardware RTC. */
524 if (!xen_initial_domain())
525 x86_platform.set_wallclock = xen_set_wallclock;
498} 526}
499 527
500#ifdef CONFIG_XEN_PVHVM 528#ifdef CONFIG_XEN_PVHVM
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index a95b41744ad0..86782c5d7e2a 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -73,7 +73,7 @@ static inline void xen_hvm_smp_init(void) {}
73 73
74#ifdef CONFIG_PARAVIRT_SPINLOCKS 74#ifdef CONFIG_PARAVIRT_SPINLOCKS
75void __init xen_init_spinlocks(void); 75void __init xen_init_spinlocks(void);
76void __cpuinit xen_init_lock_cpu(int cpu); 76void xen_init_lock_cpu(int cpu);
77void xen_uninit_lock_cpu(int cpu); 77void xen_uninit_lock_cpu(int cpu);
78#else 78#else
79static inline void xen_init_spinlocks(void) 79static inline void xen_init_spinlocks(void)