diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-02-22 11:59:49 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-22 11:59:49 -0500 |
commit | 2702e0a46c2d28da92b32c9b068ee1291fc0de35 (patch) | |
tree | c1e9868a4b91beeebd5f4e820588bc2ae8371911 /arch/x86 | |
parent | 42bb8cc5e81028e217105299001070d57eb84ad7 (diff) | |
parent | adfafefd104d840ee4461965f22624d77532675b (diff) |
Merge branch 'linus' into timers/hpet
Diffstat (limited to 'arch/x86')
107 files changed, 773 insertions, 536 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 73f7fe8fd4d1..9c39095b33fc 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -1802,6 +1802,17 @@ config DMAR | |||
1802 | and include PCI device scope covered by these DMA | 1802 | and include PCI device scope covered by these DMA |
1803 | remapping devices. | 1803 | remapping devices. |
1804 | 1804 | ||
1805 | config DMAR_DEFAULT_ON | ||
1806 | def_bool n | ||
1807 | prompt "Enable DMA Remapping Devices by default" | ||
1808 | depends on DMAR | ||
1809 | help | ||
1810 | Selecting this option will enable a DMAR device at boot time if | ||
1811 | one is found. If this option is not selected, DMAR support can | ||
1812 | be enabled by passing intel_iommu=on to the kernel. It is | ||
1813 | recommended you say N here while the DMAR code remains | ||
1814 | experimental. | ||
1815 | |||
1805 | config DMAR_GFX_WA | 1816 | config DMAR_GFX_WA |
1806 | def_bool y | 1817 | def_bool y |
1807 | prompt "Support for Graphics workaround" | 1818 | prompt "Support for Graphics workaround" |
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 8078955845ae..c98d52e82966 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -167,9 +167,9 @@ config MK7 | |||
167 | config MK8 | 167 | config MK8 |
168 | bool "Opteron/Athlon64/Hammer/K8" | 168 | bool "Opteron/Athlon64/Hammer/K8" |
169 | help | 169 | help |
170 | Select this for an AMD Opteron or Athlon64 Hammer-family processor. Enables | 170 | Select this for an AMD Opteron or Athlon64 Hammer-family processor. |
171 | use of some extended instructions, and passes appropriate optimization | 171 | Enables use of some extended instructions, and passes appropriate |
172 | flags to GCC. | 172 | optimization flags to GCC. |
173 | 173 | ||
174 | config MCRUSOE | 174 | config MCRUSOE |
175 | bool "Crusoe" | 175 | bool "Crusoe" |
@@ -256,9 +256,11 @@ config MPSC | |||
256 | config MCORE2 | 256 | config MCORE2 |
257 | bool "Core 2/newer Xeon" | 257 | bool "Core 2/newer Xeon" |
258 | help | 258 | help |
259 | Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and 53xx) | 259 | |
260 | CPUs. You can distinguish newer from older Xeons by the CPU family | 260 | Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and |
261 | in /proc/cpuinfo. Newer ones have 6 and older ones 15 (not a typo) | 261 | 53xx) CPUs. You can distinguish newer from older Xeons by the CPU |
262 | family in /proc/cpuinfo. Newer ones have 6 and older ones 15 | ||
263 | (not a typo) | ||
262 | 264 | ||
263 | config GENERIC_CPU | 265 | config GENERIC_CPU |
264 | bool "Generic-x86-64" | 266 | bool "Generic-x86-64" |
@@ -320,14 +322,14 @@ config X86_PPRO_FENCE | |||
320 | bool "PentiumPro memory ordering errata workaround" | 322 | bool "PentiumPro memory ordering errata workaround" |
321 | depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1 | 323 | depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1 |
322 | help | 324 | help |
323 | Old PentiumPro multiprocessor systems had errata that could cause memory | 325 | Old PentiumPro multiprocessor systems had errata that could cause |
324 | operations to violate the x86 ordering standard in rare cases. Enabling this | 326 | memory operations to violate the x86 ordering standard in rare cases. |
325 | option will attempt to work around some (but not all) occurances of | 327 | Enabling this option will attempt to work around some (but not all) |
326 | this problem, at the cost of much heavier spinlock and memory barrier | 328 | occurances of this problem, at the cost of much heavier spinlock and |
327 | operations. | 329 | memory barrier operations. |
328 | 330 | ||
329 | If unsure, say n here. Even distro kernels should think twice before enabling | 331 | If unsure, say n here. Even distro kernels should think twice before |
330 | this: there are few systems, and an unlikely bug. | 332 | enabling this: there are few systems, and an unlikely bug. |
331 | 333 | ||
332 | config X86_F00F_BUG | 334 | config X86_F00F_BUG |
333 | def_bool y | 335 | def_bool y |
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 10d6cc3fd052..e1983fa025d2 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug | |||
@@ -174,28 +174,8 @@ config IOMMU_LEAK | |||
174 | Add a simple leak tracer to the IOMMU code. This is useful when you | 174 | Add a simple leak tracer to the IOMMU code. This is useful when you |
175 | are debugging a buggy device driver that leaks IOMMU mappings. | 175 | are debugging a buggy device driver that leaks IOMMU mappings. |
176 | 176 | ||
177 | config MMIOTRACE | 177 | config HAVE_MMIOTRACE_SUPPORT |
178 | bool "Memory mapped IO tracing" | 178 | def_bool y |
179 | depends on DEBUG_KERNEL && PCI | ||
180 | select TRACING | ||
181 | help | ||
182 | Mmiotrace traces Memory Mapped I/O access and is meant for | ||
183 | debugging and reverse engineering. It is called from the ioremap | ||
184 | implementation and works via page faults. Tracing is disabled by | ||
185 | default and can be enabled at run-time. | ||
186 | |||
187 | See Documentation/tracers/mmiotrace.txt. | ||
188 | If you are not helping to develop drivers, say N. | ||
189 | |||
190 | config MMIOTRACE_TEST | ||
191 | tristate "Test module for mmiotrace" | ||
192 | depends on MMIOTRACE && m | ||
193 | help | ||
194 | This is a dumb module for testing mmiotrace. It is very dangerous | ||
195 | as it will write garbage to IO memory starting at a given address. | ||
196 | However, it should be safe to use on e.g. unused portion of VRAM. | ||
197 | |||
198 | Say N, unless you absolutely know what you are doing. | ||
199 | 179 | ||
200 | # | 180 | # |
201 | # IO delay types: | 181 | # IO delay types: |
diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c index 75115849af33..4a58c8ce3f69 100644 --- a/arch/x86/boot/video-vesa.c +++ b/arch/x86/boot/video-vesa.c | |||
@@ -269,9 +269,8 @@ void vesa_store_edid(void) | |||
269 | we genuinely have to assume all registers are destroyed here. */ | 269 | we genuinely have to assume all registers are destroyed here. */ |
270 | 270 | ||
271 | asm("pushw %%es; movw %2,%%es; "INT10"; popw %%es" | 271 | asm("pushw %%es; movw %2,%%es; "INT10"; popw %%es" |
272 | : "+a" (ax), "+b" (bx) | 272 | : "+a" (ax), "+b" (bx), "+c" (cx), "+D" (di) |
273 | : "c" (cx), "D" (di) | 273 | : : "esi", "edx"); |
274 | : "esi"); | ||
275 | 274 | ||
276 | if (ax != 0x004f) | 275 | if (ax != 0x004f) |
277 | return; /* No EDID */ | 276 | return; /* No EDID */ |
@@ -285,9 +284,9 @@ void vesa_store_edid(void) | |||
285 | dx = 0; /* EDID block number */ | 284 | dx = 0; /* EDID block number */ |
286 | di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */ | 285 | di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */ |
287 | asm(INT10 | 286 | asm(INT10 |
288 | : "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info) | 287 | : "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info), |
289 | : "c" (cx), "D" (di) | 288 | "+c" (cx), "+D" (di) |
290 | : "esi"); | 289 | : : "esi"); |
291 | #endif /* CONFIG_FIRMWARE_EDID */ | 290 | #endif /* CONFIG_FIRMWARE_EDID */ |
292 | } | 291 | } |
293 | 292 | ||
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index b30a08ed8eb4..edba00d98ac3 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig | |||
@@ -1331,8 +1331,8 @@ CONFIG_I2C_I801=y | |||
1331 | # Miscellaneous I2C Chip support | 1331 | # Miscellaneous I2C Chip support |
1332 | # | 1332 | # |
1333 | # CONFIG_DS1682 is not set | 1333 | # CONFIG_DS1682 is not set |
1334 | # CONFIG_AT24 is not set | 1334 | # CONFIG_EEPROM_AT24 is not set |
1335 | # CONFIG_SENSORS_EEPROM is not set | 1335 | # CONFIG_EEPROM_LEGACY is not set |
1336 | # CONFIG_SENSORS_PCF8574 is not set | 1336 | # CONFIG_SENSORS_PCF8574 is not set |
1337 | # CONFIG_PCF8575 is not set | 1337 | # CONFIG_PCF8575 is not set |
1338 | # CONFIG_SENSORS_PCA9539 is not set | 1338 | # CONFIG_SENSORS_PCA9539 is not set |
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 0e7dbc0a3e46..322dd2748fc9 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig | |||
@@ -1311,8 +1311,8 @@ CONFIG_I2C_I801=y | |||
1311 | # Miscellaneous I2C Chip support | 1311 | # Miscellaneous I2C Chip support |
1312 | # | 1312 | # |
1313 | # CONFIG_DS1682 is not set | 1313 | # CONFIG_DS1682 is not set |
1314 | # CONFIG_AT24 is not set | 1314 | # CONFIG_EEPROM_AT24 is not set |
1315 | # CONFIG_SENSORS_EEPROM is not set | 1315 | # CONFIG_EEPROM_LEGACY is not set |
1316 | # CONFIG_SENSORS_PCF8574 is not set | 1316 | # CONFIG_SENSORS_PCF8574 is not set |
1317 | # CONFIG_PCF8575 is not set | 1317 | # CONFIG_PCF8575 is not set |
1318 | # CONFIG_SENSORS_PCA9539 is not set | 1318 | # CONFIG_SENSORS_PCA9539 is not set |
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 256b00b61892..5a0d76dc56a4 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
@@ -418,9 +418,9 @@ ENTRY(ia32_syscall) | |||
418 | orl $TS_COMPAT,TI_status(%r10) | 418 | orl $TS_COMPAT,TI_status(%r10) |
419 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) | 419 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) |
420 | jnz ia32_tracesys | 420 | jnz ia32_tracesys |
421 | ia32_do_syscall: | ||
422 | cmpl $(IA32_NR_syscalls-1),%eax | 421 | cmpl $(IA32_NR_syscalls-1),%eax |
423 | ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ | 422 | ja ia32_badsys |
423 | ia32_do_call: | ||
424 | IA32_ARG_FIXUP | 424 | IA32_ARG_FIXUP |
425 | call *ia32_sys_call_table(,%rax,8) # xxx: rip relative | 425 | call *ia32_sys_call_table(,%rax,8) # xxx: rip relative |
426 | ia32_sysret: | 426 | ia32_sysret: |
@@ -435,7 +435,9 @@ ia32_tracesys: | |||
435 | call syscall_trace_enter | 435 | call syscall_trace_enter |
436 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ | 436 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ |
437 | RESTORE_REST | 437 | RESTORE_REST |
438 | jmp ia32_do_syscall | 438 | cmpl $(IA32_NR_syscalls-1),%eax |
439 | ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ | ||
440 | jmp ia32_do_call | ||
439 | END(ia32_syscall) | 441 | END(ia32_syscall) |
440 | 442 | ||
441 | ia32_badsys: | 443 | ia32_badsys: |
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index a9f8a814a1f7..4a8e80cdcfa5 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild | |||
@@ -22,4 +22,3 @@ unifdef-y += unistd_32.h | |||
22 | unifdef-y += unistd_64.h | 22 | unifdef-y += unistd_64.h |
23 | unifdef-y += vm86.h | 23 | unifdef-y += vm86.h |
24 | unifdef-y += vsyscall.h | 24 | unifdef-y += vsyscall.h |
25 | unifdef-y += swab.h | ||
diff --git a/arch/x86/include/asm/a.out-core.h b/arch/x86/include/asm/a.out-core.h index 37822206083e..3c601f8224be 100644 --- a/arch/x86/include/asm/a.out-core.h +++ b/arch/x86/include/asm/a.out-core.h | |||
@@ -23,8 +23,6 @@ | |||
23 | */ | 23 | */ |
24 | static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) | 24 | static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) |
25 | { | 25 | { |
26 | u16 gs; | ||
27 | |||
28 | /* changed the size calculations - should hopefully work better. lbt */ | 26 | /* changed the size calculations - should hopefully work better. lbt */ |
29 | dump->magic = CMAGIC; | 27 | dump->magic = CMAGIC; |
30 | dump->start_code = 0; | 28 | dump->start_code = 0; |
@@ -57,7 +55,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) | |||
57 | dump->regs.ds = (u16)regs->ds; | 55 | dump->regs.ds = (u16)regs->ds; |
58 | dump->regs.es = (u16)regs->es; | 56 | dump->regs.es = (u16)regs->es; |
59 | dump->regs.fs = (u16)regs->fs; | 57 | dump->regs.fs = (u16)regs->fs; |
60 | savesegment(gs, gs); | 58 | savesegment(gs, dump->regs.gs); |
61 | dump->regs.orig_ax = regs->orig_ax; | 59 | dump->regs.orig_ax = regs->orig_ax; |
62 | dump->regs.ip = regs->ip; | 60 | dump->regs.ip = regs->ip; |
63 | dump->regs.cs = (u16)regs->cs; | 61 | dump->regs.cs = (u16)regs->cs; |
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index e02a359d2aa5..02b47a603fc8 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
@@ -3,6 +3,9 @@ | |||
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Copyright 1992, Linus Torvalds. | 5 | * Copyright 1992, Linus Torvalds. |
6 | * | ||
7 | * Note: inlines with more than a single statement should be marked | ||
8 | * __always_inline to avoid problems with older gcc's inlining heuristics. | ||
6 | */ | 9 | */ |
7 | 10 | ||
8 | #ifndef _LINUX_BITOPS_H | 11 | #ifndef _LINUX_BITOPS_H |
@@ -53,7 +56,8 @@ | |||
53 | * Note that @nr may be almost arbitrarily large; this function is not | 56 | * Note that @nr may be almost arbitrarily large; this function is not |
54 | * restricted to acting on a single-word quantity. | 57 | * restricted to acting on a single-word quantity. |
55 | */ | 58 | */ |
56 | static inline void set_bit(unsigned int nr, volatile unsigned long *addr) | 59 | static __always_inline void |
60 | set_bit(unsigned int nr, volatile unsigned long *addr) | ||
57 | { | 61 | { |
58 | if (IS_IMMEDIATE(nr)) { | 62 | if (IS_IMMEDIATE(nr)) { |
59 | asm volatile(LOCK_PREFIX "orb %1,%0" | 63 | asm volatile(LOCK_PREFIX "orb %1,%0" |
@@ -90,7 +94,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) | |||
90 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 94 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
91 | * in order to ensure changes are visible on other processors. | 95 | * in order to ensure changes are visible on other processors. |
92 | */ | 96 | */ |
93 | static inline void clear_bit(int nr, volatile unsigned long *addr) | 97 | static __always_inline void |
98 | clear_bit(int nr, volatile unsigned long *addr) | ||
94 | { | 99 | { |
95 | if (IS_IMMEDIATE(nr)) { | 100 | if (IS_IMMEDIATE(nr)) { |
96 | asm volatile(LOCK_PREFIX "andb %1,%0" | 101 | asm volatile(LOCK_PREFIX "andb %1,%0" |
@@ -204,7 +209,8 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | |||
204 | * | 209 | * |
205 | * This is the same as test_and_set_bit on x86. | 210 | * This is the same as test_and_set_bit on x86. |
206 | */ | 211 | */ |
207 | static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr) | 212 | static __always_inline int |
213 | test_and_set_bit_lock(int nr, volatile unsigned long *addr) | ||
208 | { | 214 | { |
209 | return test_and_set_bit(nr, addr); | 215 | return test_and_set_bit(nr, addr); |
210 | } | 216 | } |
@@ -300,7 +306,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
300 | return oldbit; | 306 | return oldbit; |
301 | } | 307 | } |
302 | 308 | ||
303 | static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) | 309 | static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) |
304 | { | 310 | { |
305 | return ((1UL << (nr % BITS_PER_LONG)) & | 311 | return ((1UL << (nr % BITS_PER_LONG)) & |
306 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; | 312 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; |
diff --git a/arch/x86/include/asm/byteorder.h b/arch/x86/include/asm/byteorder.h index 7c49917e3d9d..b13a7a88f3eb 100644 --- a/arch/x86/include/asm/byteorder.h +++ b/arch/x86/include/asm/byteorder.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef _ASM_X86_BYTEORDER_H | 1 | #ifndef _ASM_X86_BYTEORDER_H |
2 | #define _ASM_X86_BYTEORDER_H | 2 | #define _ASM_X86_BYTEORDER_H |
3 | 3 | ||
4 | #include <asm/swab.h> | ||
5 | #include <linux/byteorder/little_endian.h> | 4 | #include <linux/byteorder/little_endian.h> |
6 | 5 | ||
7 | #endif /* _ASM_X86_BYTEORDER_H */ | 6 | #endif /* _ASM_X86_BYTEORDER_H */ |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index ea408dcba513..7301e60dc4a8 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -93,6 +93,7 @@ | |||
93 | #define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */ | 93 | #define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */ |
94 | #define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */ | 94 | #define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */ |
95 | #define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ | 95 | #define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ |
96 | #define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */ | ||
96 | 97 | ||
97 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 98 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
98 | #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ | 99 | #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 4035357f5b9d..132a134d12f2 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -2,8 +2,8 @@ | |||
2 | #define _ASM_X86_DMA_MAPPING_H | 2 | #define _ASM_X86_DMA_MAPPING_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for | 5 | * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and |
6 | * documentation. | 6 | * Documentation/DMA-API.txt for documentation. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/scatterlist.h> | 9 | #include <linux/scatterlist.h> |
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index 3d8ceddbd407..00d41ce4c844 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h | |||
@@ -49,6 +49,7 @@ | |||
49 | #define E820_RESERVED_KERN 128 | 49 | #define E820_RESERVED_KERN 128 |
50 | 50 | ||
51 | #ifndef __ASSEMBLY__ | 51 | #ifndef __ASSEMBLY__ |
52 | #include <linux/types.h> | ||
52 | struct e820entry { | 53 | struct e820entry { |
53 | __u64 addr; /* start of memory segment */ | 54 | __u64 addr; /* start of memory segment */ |
54 | __u64 size; /* size of memory segment */ | 55 | __u64 size; /* size of memory segment */ |
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 05cfed4485fa..1dbbdf4be9b4 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
@@ -99,7 +99,6 @@ extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); | |||
99 | * A boot-time mapping is currently limited to at most 16 pages. | 99 | * A boot-time mapping is currently limited to at most 16 pages. |
100 | */ | 100 | */ |
101 | extern void early_ioremap_init(void); | 101 | extern void early_ioremap_init(void); |
102 | extern void early_ioremap_clear(void); | ||
103 | extern void early_ioremap_reset(void); | 102 | extern void early_ioremap_reset(void); |
104 | extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); | 103 | extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); |
105 | extern void __iomem *early_memremap(unsigned long offset, unsigned long size); | 104 | extern void __iomem *early_memremap(unsigned long offset, unsigned long size); |
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index b95162af0bf6..886c9402ec45 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h | |||
@@ -6,9 +6,16 @@ | |||
6 | * | 6 | * |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <asm/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/ioctl.h> | 10 | #include <linux/ioctl.h> |
11 | 11 | ||
12 | /* Select x86 specific features in <linux/kvm.h> */ | ||
13 | #define __KVM_HAVE_PIT | ||
14 | #define __KVM_HAVE_IOAPIC | ||
15 | #define __KVM_HAVE_DEVICE_ASSIGNMENT | ||
16 | #define __KVM_HAVE_MSI | ||
17 | #define __KVM_HAVE_USER_NMI | ||
18 | |||
12 | /* Architectural interrupt line count. */ | 19 | /* Architectural interrupt line count. */ |
13 | #define KVM_NR_INTERRUPTS 256 | 20 | #define KVM_NR_INTERRUPTS 256 |
14 | 21 | ||
diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h index ceb013660146..89897a6a65b9 100644 --- a/arch/x86/include/asm/mach-default/mach_wakecpu.h +++ b/arch/x86/include/asm/mach-default/mach_wakecpu.h | |||
@@ -24,7 +24,13 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) | |||
24 | { | 24 | { |
25 | } | 25 | } |
26 | 26 | ||
27 | #ifdef CONFIG_SMP | ||
27 | extern void __inquire_remote_apic(int apicid); | 28 | extern void __inquire_remote_apic(int apicid); |
29 | #else /* CONFIG_SMP */ | ||
30 | static inline void __inquire_remote_apic(int apicid) | ||
31 | { | ||
32 | } | ||
33 | #endif /* CONFIG_SMP */ | ||
28 | 34 | ||
29 | static inline void inquire_remote_apic(int apicid) | 35 | static inline void inquire_remote_apic(int apicid) |
30 | { | 36 | { |
diff --git a/arch/x86/include/asm/math_emu.h b/arch/x86/include/asm/math_emu.h index 5a65b107ad58..031f6266f425 100644 --- a/arch/x86/include/asm/math_emu.h +++ b/arch/x86/include/asm/math_emu.h | |||
@@ -1,31 +1,18 @@ | |||
1 | #ifndef _ASM_X86_MATH_EMU_H | 1 | #ifndef _ASM_X86_MATH_EMU_H |
2 | #define _ASM_X86_MATH_EMU_H | 2 | #define _ASM_X86_MATH_EMU_H |
3 | 3 | ||
4 | #include <asm/ptrace.h> | ||
5 | #include <asm/vm86.h> | ||
6 | |||
4 | /* This structure matches the layout of the data saved to the stack | 7 | /* This structure matches the layout of the data saved to the stack |
5 | following a device-not-present interrupt, part of it saved | 8 | following a device-not-present interrupt, part of it saved |
6 | automatically by the 80386/80486. | 9 | automatically by the 80386/80486. |
7 | */ | 10 | */ |
8 | struct info { | 11 | struct math_emu_info { |
9 | long ___orig_eip; | 12 | long ___orig_eip; |
10 | long ___ebx; | 13 | union { |
11 | long ___ecx; | 14 | struct pt_regs *regs; |
12 | long ___edx; | 15 | struct kernel_vm86_regs *vm86; |
13 | long ___esi; | 16 | }; |
14 | long ___edi; | ||
15 | long ___ebp; | ||
16 | long ___eax; | ||
17 | long ___ds; | ||
18 | long ___es; | ||
19 | long ___fs; | ||
20 | long ___orig_eax; | ||
21 | long ___eip; | ||
22 | long ___cs; | ||
23 | long ___eflags; | ||
24 | long ___esp; | ||
25 | long ___ss; | ||
26 | long ___vm86_es; /* This and the following only in vm86 mode */ | ||
27 | long ___vm86_ds; | ||
28 | long ___vm86_fs; | ||
29 | long ___vm86_gs; | ||
30 | }; | 17 | }; |
31 | #endif /* _ASM_X86_MATH_EMU_H */ | 18 | #endif /* _ASM_X86_MATH_EMU_H */ |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 1d6e17c2f23a..32c6e17b960b 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -3,8 +3,8 @@ | |||
3 | 3 | ||
4 | #ifdef __x86_64__ | 4 | #ifdef __x86_64__ |
5 | 5 | ||
6 | #include <linux/types.h> | ||
6 | #include <asm/ioctls.h> | 7 | #include <asm/ioctls.h> |
7 | #include <asm/types.h> | ||
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Machine Check support for x86 | 10 | * Machine Check support for x86 |
@@ -115,8 +115,6 @@ extern int mce_notify_user(void); | |||
115 | 115 | ||
116 | #endif /* !CONFIG_X86_32 */ | 116 | #endif /* !CONFIG_X86_32 */ |
117 | 117 | ||
118 | |||
119 | |||
120 | #ifdef CONFIG_X86_MCE | 118 | #ifdef CONFIG_X86_MCE |
121 | extern void mcheck_init(struct cpuinfo_x86 *c); | 119 | extern void mcheck_init(struct cpuinfo_x86 *c); |
122 | #else | 120 | #else |
@@ -126,5 +124,4 @@ extern void stop_mce(void); | |||
126 | extern void restart_mce(void); | 124 | extern void restart_mce(void); |
127 | 125 | ||
128 | #endif /* __KERNEL__ */ | 126 | #endif /* __KERNEL__ */ |
129 | |||
130 | #endif /* _ASM_X86_MCE_H */ | 127 | #endif /* _ASM_X86_MCE_H */ |
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h index 07f1af494ca5..105fb90a0635 100644 --- a/arch/x86/include/asm/mmzone_32.h +++ b/arch/x86/include/asm/mmzone_32.h | |||
@@ -32,8 +32,6 @@ static inline void get_memcfg_numa(void) | |||
32 | get_memcfg_numa_flat(); | 32 | get_memcfg_numa_flat(); |
33 | } | 33 | } |
34 | 34 | ||
35 | extern int early_pfn_to_nid(unsigned long pfn); | ||
36 | |||
37 | extern void resume_map_numa_kva(pgd_t *pgd); | 35 | extern void resume_map_numa_kva(pgd_t *pgd); |
38 | 36 | ||
39 | #else /* !CONFIG_NUMA */ | 37 | #else /* !CONFIG_NUMA */ |
diff --git a/arch/x86/include/asm/mmzone_64.h b/arch/x86/include/asm/mmzone_64.h index a5b3817d4b9e..a29f48c2a322 100644 --- a/arch/x86/include/asm/mmzone_64.h +++ b/arch/x86/include/asm/mmzone_64.h | |||
@@ -40,8 +40,6 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) | |||
40 | #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ | 40 | #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ |
41 | NODE_DATA(nid)->node_spanned_pages) | 41 | NODE_DATA(nid)->node_spanned_pages) |
42 | 42 | ||
43 | extern int early_pfn_to_nid(unsigned long pfn); | ||
44 | |||
45 | #ifdef CONFIG_NUMA_EMU | 43 | #ifdef CONFIG_NUMA_EMU |
46 | #define FAKE_NODE_MIN_SIZE (64 * 1024 * 1024) | 44 | #define FAKE_NODE_MIN_SIZE (64 * 1024 * 1024) |
47 | #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) | 45 | #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) |
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 62d14ce3cd00..bd22f2a3713f 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h | |||
@@ -60,6 +60,7 @@ extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, | |||
60 | u32 gsi); | 60 | u32 gsi); |
61 | extern void mp_config_acpi_legacy_irqs(void); | 61 | extern void mp_config_acpi_legacy_irqs(void); |
62 | extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low); | 62 | extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low); |
63 | extern int acpi_probe_gsi(void); | ||
63 | #ifdef CONFIG_X86_IO_APIC | 64 | #ifdef CONFIG_X86_IO_APIC |
64 | extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, | 65 | extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, |
65 | u32 gsi, int triggering, int polarity); | 66 | u32 gsi, int triggering, int polarity); |
@@ -71,6 +72,11 @@ mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, | |||
71 | return 0; | 72 | return 0; |
72 | } | 73 | } |
73 | #endif | 74 | #endif |
75 | #else /* !CONFIG_ACPI: */ | ||
76 | static inline int acpi_probe_gsi(void) | ||
77 | { | ||
78 | return 0; | ||
79 | } | ||
74 | #endif /* CONFIG_ACPI */ | 80 | #endif /* CONFIG_ACPI */ |
75 | 81 | ||
76 | #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) | 82 | #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index cb58643947b9..358acc59ae04 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -202,6 +202,35 @@ | |||
202 | #define MSR_IA32_THERM_STATUS 0x0000019c | 202 | #define MSR_IA32_THERM_STATUS 0x0000019c |
203 | #define MSR_IA32_MISC_ENABLE 0x000001a0 | 203 | #define MSR_IA32_MISC_ENABLE 0x000001a0 |
204 | 204 | ||
205 | /* MISC_ENABLE bits: architectural */ | ||
206 | #define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0) | ||
207 | #define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1) | ||
208 | #define MSR_IA32_MISC_ENABLE_EMON (1ULL << 7) | ||
209 | #define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << 11) | ||
210 | #define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << 12) | ||
211 | #define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << 16) | ||
212 | #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) | ||
213 | #define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << 22) | ||
214 | #define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << 23) | ||
215 | #define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << 34) | ||
216 | |||
217 | /* MISC_ENABLE bits: model-specific, meaning may vary from core to core */ | ||
218 | #define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << 2) | ||
219 | #define MSR_IA32_MISC_ENABLE_TM1 (1ULL << 3) | ||
220 | #define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << 4) | ||
221 | #define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << 6) | ||
222 | #define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << 8) | ||
223 | #define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << 9) | ||
224 | #define MSR_IA32_MISC_ENABLE_FERR (1ULL << 10) | ||
225 | #define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << 10) | ||
226 | #define MSR_IA32_MISC_ENABLE_TM2 (1ULL << 13) | ||
227 | #define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << 19) | ||
228 | #define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << 20) | ||
229 | #define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << 24) | ||
230 | #define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << 37) | ||
231 | #define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38) | ||
232 | #define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39) | ||
233 | |||
205 | /* Intel Model 6 */ | 234 | /* Intel Model 6 */ |
206 | #define MSR_P6_EVNTSEL0 0x00000186 | 235 | #define MSR_P6_EVNTSEL0 0x00000186 |
207 | #define MSR_P6_EVNTSEL1 0x00000187 | 236 | #define MSR_P6_EVNTSEL1 0x00000187 |
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h index cb988aab716d..a51ada8467de 100644 --- a/arch/x86/include/asm/mtrr.h +++ b/arch/x86/include/asm/mtrr.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #ifndef _ASM_X86_MTRR_H | 23 | #ifndef _ASM_X86_MTRR_H |
24 | #define _ASM_X86_MTRR_H | 24 | #define _ASM_X86_MTRR_H |
25 | 25 | ||
26 | #include <linux/types.h> | ||
26 | #include <linux/ioctl.h> | 27 | #include <linux/ioctl.h> |
27 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
28 | 29 | ||
@@ -58,15 +59,15 @@ struct mtrr_gentry { | |||
58 | #endif /* !__i386__ */ | 59 | #endif /* !__i386__ */ |
59 | 60 | ||
60 | struct mtrr_var_range { | 61 | struct mtrr_var_range { |
61 | u32 base_lo; | 62 | __u32 base_lo; |
62 | u32 base_hi; | 63 | __u32 base_hi; |
63 | u32 mask_lo; | 64 | __u32 mask_lo; |
64 | u32 mask_hi; | 65 | __u32 mask_hi; |
65 | }; | 66 | }; |
66 | 67 | ||
67 | /* In the Intel processor's MTRR interface, the MTRR type is always held in | 68 | /* In the Intel processor's MTRR interface, the MTRR type is always held in |
68 | an 8 bit field: */ | 69 | an 8 bit field: */ |
69 | typedef u8 mtrr_type; | 70 | typedef __u8 mtrr_type; |
70 | 71 | ||
71 | #define MTRR_NUM_FIXED_RANGES 88 | 72 | #define MTRR_NUM_FIXED_RANGES 88 |
72 | #define MTRR_MAX_VAR_RANGES 256 | 73 | #define MTRR_MAX_VAR_RANGES 256 |
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index e9873a2e8695..776579119a00 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h | |||
@@ -57,7 +57,6 @@ typedef struct { pgdval_t pgd; } pgd_t; | |||
57 | typedef struct { pgprotval_t pgprot; } pgprot_t; | 57 | typedef struct { pgprotval_t pgprot; } pgprot_t; |
58 | 58 | ||
59 | extern int page_is_ram(unsigned long pagenr); | 59 | extern int page_is_ram(unsigned long pagenr); |
60 | extern int pagerange_is_ram(unsigned long start, unsigned long end); | ||
61 | extern int devmem_is_allowed(unsigned long pagenr); | 60 | extern int devmem_is_allowed(unsigned long pagenr); |
62 | extern void map_devmem(unsigned long pfn, unsigned long size, | 61 | extern void map_devmem(unsigned long pfn, unsigned long size, |
63 | pgprot_t vma_prot); | 62 | pgprot_t vma_prot); |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index ba3e2ff6aedc..e299287e8e33 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -1352,14 +1352,7 @@ static inline void arch_leave_lazy_cpu_mode(void) | |||
1352 | PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave); | 1352 | PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave); |
1353 | } | 1353 | } |
1354 | 1354 | ||
1355 | static inline void arch_flush_lazy_cpu_mode(void) | 1355 | void arch_flush_lazy_cpu_mode(void); |
1356 | { | ||
1357 | if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) { | ||
1358 | arch_leave_lazy_cpu_mode(); | ||
1359 | arch_enter_lazy_cpu_mode(); | ||
1360 | } | ||
1361 | } | ||
1362 | |||
1363 | 1356 | ||
1364 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE | 1357 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE |
1365 | static inline void arch_enter_lazy_mmu_mode(void) | 1358 | static inline void arch_enter_lazy_mmu_mode(void) |
@@ -1372,13 +1365,7 @@ static inline void arch_leave_lazy_mmu_mode(void) | |||
1372 | PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); | 1365 | PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); |
1373 | } | 1366 | } |
1374 | 1367 | ||
1375 | static inline void arch_flush_lazy_mmu_mode(void) | 1368 | void arch_flush_lazy_mmu_mode(void); |
1376 | { | ||
1377 | if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) { | ||
1378 | arch_leave_lazy_mmu_mode(); | ||
1379 | arch_enter_lazy_mmu_mode(); | ||
1380 | } | ||
1381 | } | ||
1382 | 1369 | ||
1383 | static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, | 1370 | static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, |
1384 | unsigned long phys, pgprot_t flags) | 1371 | unsigned long phys, pgprot_t flags) |
@@ -1402,6 +1389,7 @@ static inline int __raw_spin_is_contended(struct raw_spinlock *lock) | |||
1402 | { | 1389 | { |
1403 | return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); | 1390 | return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); |
1404 | } | 1391 | } |
1392 | #define __raw_spin_is_contended __raw_spin_is_contended | ||
1405 | 1393 | ||
1406 | static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) | 1394 | static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) |
1407 | { | 1395 | { |
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index cb7c151a8bff..dd14c54ac718 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h | |||
@@ -42,6 +42,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | |||
42 | 42 | ||
43 | static inline void pte_free(struct mm_struct *mm, struct page *pte) | 43 | static inline void pte_free(struct mm_struct *mm, struct page *pte) |
44 | { | 44 | { |
45 | pgtable_page_dtor(pte); | ||
45 | __free_page(pte); | 46 | __free_page(pte); |
46 | } | 47 | } |
47 | 48 | ||
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 83e69f4a37f0..4f5af8447d54 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -302,16 +302,30 @@ static inline pte_t pte_mkspecial(pte_t pte) | |||
302 | 302 | ||
303 | extern pteval_t __supported_pte_mask; | 303 | extern pteval_t __supported_pte_mask; |
304 | 304 | ||
305 | /* | ||
306 | * Mask out unsupported bits in a present pgprot. Non-present pgprots | ||
307 | * can use those bits for other purposes, so leave them be. | ||
308 | */ | ||
309 | static inline pgprotval_t massage_pgprot(pgprot_t pgprot) | ||
310 | { | ||
311 | pgprotval_t protval = pgprot_val(pgprot); | ||
312 | |||
313 | if (protval & _PAGE_PRESENT) | ||
314 | protval &= __supported_pte_mask; | ||
315 | |||
316 | return protval; | ||
317 | } | ||
318 | |||
305 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) | 319 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) |
306 | { | 320 | { |
307 | return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) | | 321 | return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) | |
308 | pgprot_val(pgprot)) & __supported_pte_mask); | 322 | massage_pgprot(pgprot)); |
309 | } | 323 | } |
310 | 324 | ||
311 | static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | 325 | static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) |
312 | { | 326 | { |
313 | return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) | | 327 | return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) | |
314 | pgprot_val(pgprot)) & __supported_pte_mask); | 328 | massage_pgprot(pgprot)); |
315 | } | 329 | } |
316 | 330 | ||
317 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 331 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
@@ -323,7 +337,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
323 | * the newprot (if present): | 337 | * the newprot (if present): |
324 | */ | 338 | */ |
325 | val &= _PAGE_CHG_MASK; | 339 | val &= _PAGE_CHG_MASK; |
326 | val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask; | 340 | val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK; |
327 | 341 | ||
328 | return __pte(val); | 342 | return __pte(val); |
329 | } | 343 | } |
@@ -339,7 +353,26 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | |||
339 | 353 | ||
340 | #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK) | 354 | #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK) |
341 | 355 | ||
342 | #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) | 356 | #define canon_pgprot(p) __pgprot(massage_pgprot(p)) |
357 | |||
358 | static inline int is_new_memtype_allowed(unsigned long flags, | ||
359 | unsigned long new_flags) | ||
360 | { | ||
361 | /* | ||
362 | * Certain new memtypes are not allowed with certain | ||
363 | * requested memtype: | ||
364 | * - request is uncached, return cannot be write-back | ||
365 | * - request is write-combine, return cannot be write-back | ||
366 | */ | ||
367 | if ((flags == _PAGE_CACHE_UC_MINUS && | ||
368 | new_flags == _PAGE_CACHE_WB) || | ||
369 | (flags == _PAGE_CACHE_WC && | ||
370 | new_flags == _PAGE_CACHE_WB)) { | ||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | return 1; | ||
375 | } | ||
343 | 376 | ||
344 | #ifndef __ASSEMBLY__ | 377 | #ifndef __ASSEMBLY__ |
345 | /* Indicate that x86 has its own track and untrack pfn vma functions */ | 378 | /* Indicate that x86 has its own track and untrack pfn vma functions */ |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 091cd8855f2e..3bfd5235a9eb 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -353,7 +353,7 @@ struct i387_soft_struct { | |||
353 | u8 no_update; | 353 | u8 no_update; |
354 | u8 rm; | 354 | u8 rm; |
355 | u8 alimit; | 355 | u8 alimit; |
356 | struct info *info; | 356 | struct math_emu_info *info; |
357 | u32 entry_eip; | 357 | u32 entry_eip; |
358 | }; | 358 | }; |
359 | 359 | ||
diff --git a/arch/x86/include/asm/ptrace-abi.h b/arch/x86/include/asm/ptrace-abi.h index 25f1bb8fc626..8e0f8d199e05 100644 --- a/arch/x86/include/asm/ptrace-abi.h +++ b/arch/x86/include/asm/ptrace-abi.h | |||
@@ -83,7 +83,7 @@ | |||
83 | #ifdef CONFIG_X86_PTRACE_BTS | 83 | #ifdef CONFIG_X86_PTRACE_BTS |
84 | 84 | ||
85 | #ifndef __ASSEMBLY__ | 85 | #ifndef __ASSEMBLY__ |
86 | #include <asm/types.h> | 86 | #include <linux/types.h> |
87 | 87 | ||
88 | /* configuration/status structure used in PTRACE_BTS_CONFIG and | 88 | /* configuration/status structure used in PTRACE_BTS_CONFIG and |
89 | PTRACE_BTS_STATUS commands. | 89 | PTRACE_BTS_STATUS commands. |
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h index 0afcb5e58acc..ec666491aaa4 100644 --- a/arch/x86/include/asm/sigcontext.h +++ b/arch/x86/include/asm/sigcontext.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _ASM_X86_SIGCONTEXT_H | 2 | #define _ASM_X86_SIGCONTEXT_H |
3 | 3 | ||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | #include <asm/types.h> | 5 | #include <linux/types.h> |
6 | 6 | ||
7 | #define FP_XSTATE_MAGIC1 0x46505853U | 7 | #define FP_XSTATE_MAGIC1 0x46505853U |
8 | #define FP_XSTATE_MAGIC2 0x46505845U | 8 | #define FP_XSTATE_MAGIC2 0x46505845U |
diff --git a/arch/x86/include/asm/sigcontext32.h b/arch/x86/include/asm/sigcontext32.h index 6126188cf3a9..ad1478c4ae12 100644 --- a/arch/x86/include/asm/sigcontext32.h +++ b/arch/x86/include/asm/sigcontext32.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _ASM_X86_SIGCONTEXT32_H | 1 | #ifndef _ASM_X86_SIGCONTEXT32_H |
2 | #define _ASM_X86_SIGCONTEXT32_H | 2 | #define _ASM_X86_SIGCONTEXT32_H |
3 | 3 | ||
4 | #include <linux/types.h> | ||
5 | |||
4 | /* signal context for 32bit programs. */ | 6 | /* signal context for 32bit programs. */ |
5 | 7 | ||
6 | #define X86_FXSR_MAGIC 0x0000 | 8 | #define X86_FXSR_MAGIC 0x0000 |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index d17c91981da2..8247e94ac6b1 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -245,6 +245,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | |||
245 | { | 245 | { |
246 | return __ticket_spin_is_contended(lock); | 246 | return __ticket_spin_is_contended(lock); |
247 | } | 247 | } |
248 | #define __raw_spin_is_contended __raw_spin_is_contended | ||
248 | 249 | ||
249 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) | 250 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) |
250 | { | 251 | { |
diff --git a/arch/x86/include/asm/swab.h b/arch/x86/include/asm/swab.h index 306d4178ffc9..557cd9f00661 100644 --- a/arch/x86/include/asm/swab.h +++ b/arch/x86/include/asm/swab.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef _ASM_X86_SWAB_H | 1 | #ifndef _ASM_X86_SWAB_H |
2 | #define _ASM_X86_SWAB_H | 2 | #define _ASM_X86_SWAB_H |
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/compiler.h> | 5 | #include <linux/compiler.h> |
6 | 6 | ||
7 | static inline __attribute_const__ __u32 __arch_swab32(__u32 val) | 7 | static inline __attribute_const__ __u32 __arch_swab32(__u32 val) |
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 9c6797c3e56c..c0b0bda754ee 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h | |||
@@ -40,7 +40,7 @@ asmlinkage int sys_sigaction(int, const struct old_sigaction __user *, | |||
40 | struct old_sigaction __user *); | 40 | struct old_sigaction __user *); |
41 | asmlinkage int sys_sigaltstack(unsigned long); | 41 | asmlinkage int sys_sigaltstack(unsigned long); |
42 | asmlinkage unsigned long sys_sigreturn(unsigned long); | 42 | asmlinkage unsigned long sys_sigreturn(unsigned long); |
43 | asmlinkage int sys_rt_sigreturn(struct pt_regs); | 43 | asmlinkage int sys_rt_sigreturn(unsigned long); |
44 | 44 | ||
45 | /* kernel/ioport.c */ | 45 | /* kernel/ioport.c */ |
46 | asmlinkage long sys_iopl(unsigned long); | 46 | asmlinkage long sys_iopl(unsigned long); |
diff --git a/arch/x86/include/asm/timex.h b/arch/x86/include/asm/timex.h index 1287dc1347d6..b5c9d45c981f 100644 --- a/arch/x86/include/asm/timex.h +++ b/arch/x86/include/asm/timex.h | |||
@@ -1,18 +1,13 @@ | |||
1 | /* x86 architecture timex specifications */ | ||
2 | #ifndef _ASM_X86_TIMEX_H | 1 | #ifndef _ASM_X86_TIMEX_H |
3 | #define _ASM_X86_TIMEX_H | 2 | #define _ASM_X86_TIMEX_H |
4 | 3 | ||
5 | #include <asm/processor.h> | 4 | #include <asm/processor.h> |
6 | #include <asm/tsc.h> | 5 | #include <asm/tsc.h> |
7 | 6 | ||
8 | #ifdef CONFIG_X86_ELAN | 7 | /* The PIT ticks at this frequency (in HZ): */ |
9 | # define PIT_TICK_RATE 1189200 /* AMD Elan has different frequency! */ | 8 | #define PIT_TICK_RATE 1193182 |
10 | #elif defined(CONFIG_X86_RDC321X) | 9 | |
11 | # define PIT_TICK_RATE 1041667 /* Underlying HZ for R8610 */ | 10 | #define CLOCK_TICK_RATE PIT_TICK_RATE |
12 | #else | ||
13 | # define PIT_TICK_RATE 1193182 /* Underlying HZ */ | ||
14 | #endif | ||
15 | #define CLOCK_TICK_RATE PIT_TICK_RATE | ||
16 | 11 | ||
17 | #define ARCH_HAS_READ_CURRENT_TIMER | 12 | #define ARCH_HAS_READ_CURRENT_TIMER |
18 | 13 | ||
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 2ee0a3bceedf..cf3bb053da0b 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h | |||
@@ -41,7 +41,7 @@ dotraplinkage void do_int3(struct pt_regs *, long); | |||
41 | dotraplinkage void do_overflow(struct pt_regs *, long); | 41 | dotraplinkage void do_overflow(struct pt_regs *, long); |
42 | dotraplinkage void do_bounds(struct pt_regs *, long); | 42 | dotraplinkage void do_bounds(struct pt_regs *, long); |
43 | dotraplinkage void do_invalid_op(struct pt_regs *, long); | 43 | dotraplinkage void do_invalid_op(struct pt_regs *, long); |
44 | dotraplinkage void do_device_not_available(struct pt_regs *, long); | 44 | dotraplinkage void do_device_not_available(struct pt_regs); |
45 | dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long); | 45 | dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long); |
46 | dotraplinkage void do_invalid_TSS(struct pt_regs *, long); | 46 | dotraplinkage void do_invalid_TSS(struct pt_regs *, long); |
47 | dotraplinkage void do_segment_not_present(struct pt_regs *, long); | 47 | dotraplinkage void do_segment_not_present(struct pt_regs *, long); |
@@ -77,7 +77,7 @@ extern int panic_on_unrecovered_nmi; | |||
77 | extern int kstack_depth_to_print; | 77 | extern int kstack_depth_to_print; |
78 | 78 | ||
79 | void math_error(void __user *); | 79 | void math_error(void __user *); |
80 | asmlinkage void math_emulate(long); | 80 | void math_emulate(struct math_emu_info *); |
81 | #ifdef CONFIG_X86_32 | 81 | #ifdef CONFIG_X86_32 |
82 | unsigned long patch_espfix_desc(unsigned long, unsigned long); | 82 | unsigned long patch_espfix_desc(unsigned long, unsigned long); |
83 | #else | 83 | #else |
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 7ef617ef1df3..4bd990ee43df 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -137,7 +137,7 @@ static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot) | |||
137 | pte_t pte; | 137 | pte_t pte; |
138 | 138 | ||
139 | pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) | | 139 | pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) | |
140 | (pgprot_val(pgprot) & __supported_pte_mask); | 140 | massage_pgprot(pgprot); |
141 | 141 | ||
142 | return pte; | 142 | return pte; |
143 | } | 143 | } |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index d37593c2f438..7678f10c4568 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -973,6 +973,29 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | |||
973 | nr_ioapics++; | 973 | nr_ioapics++; |
974 | } | 974 | } |
975 | 975 | ||
976 | int __init acpi_probe_gsi(void) | ||
977 | { | ||
978 | int idx; | ||
979 | int gsi; | ||
980 | int max_gsi = 0; | ||
981 | |||
982 | if (acpi_disabled) | ||
983 | return 0; | ||
984 | |||
985 | if (!acpi_ioapic) | ||
986 | return 0; | ||
987 | |||
988 | max_gsi = 0; | ||
989 | for (idx = 0; idx < nr_ioapics; idx++) { | ||
990 | gsi = mp_ioapic_routing[idx].gsi_end; | ||
991 | |||
992 | if (gsi > max_gsi) | ||
993 | max_gsi = gsi; | ||
994 | } | ||
995 | |||
996 | return max_gsi + 1; | ||
997 | } | ||
998 | |||
976 | static void assign_to_mp_irq(struct mp_config_intsrc *m, | 999 | static void assign_to_mp_irq(struct mp_config_intsrc *m, |
977 | struct mp_config_intsrc *mp_irq) | 1000 | struct mp_config_intsrc *mp_irq) |
978 | { | 1001 | { |
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 707c1f6f95fa..a60c1f3bcb87 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -156,11 +156,11 @@ static int __init acpi_sleep_setup(char *str) | |||
156 | #ifdef CONFIG_HIBERNATION | 156 | #ifdef CONFIG_HIBERNATION |
157 | if (strncmp(str, "s4_nohwsig", 10) == 0) | 157 | if (strncmp(str, "s4_nohwsig", 10) == 0) |
158 | acpi_no_s4_hw_signature(); | 158 | acpi_no_s4_hw_signature(); |
159 | if (strncmp(str, "s4_nonvs", 8) == 0) | ||
160 | acpi_s4_no_nvs(); | ||
159 | #endif | 161 | #endif |
160 | if (strncmp(str, "old_ordering", 12) == 0) | 162 | if (strncmp(str, "old_ordering", 12) == 0) |
161 | acpi_old_suspend_ordering(); | 163 | acpi_old_suspend_ordering(); |
162 | if (strncmp(str, "s4_nonvs", 8) == 0) | ||
163 | acpi_s4_no_nvs(); | ||
164 | str = strchr(str, ','); | 164 | str = strchr(str, ','); |
165 | if (str != NULL) | 165 | if (str != NULL) |
166 | str += strspn(str, ", \t"); | 166 | str += strspn(str, ", \t"); |
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 566a08466b19..570f36e44e59 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <asm/proto.h> | 47 | #include <asm/proto.h> |
48 | #include <asm/apic.h> | 48 | #include <asm/apic.h> |
49 | #include <asm/i8259.h> | 49 | #include <asm/i8259.h> |
50 | #include <asm/smp.h> | ||
50 | 51 | ||
51 | #include <mach_apic.h> | 52 | #include <mach_apic.h> |
52 | #include <mach_apicdef.h> | 53 | #include <mach_apicdef.h> |
@@ -861,7 +862,7 @@ void clear_local_APIC(void) | |||
861 | } | 862 | } |
862 | 863 | ||
863 | /* lets not touch this if we didn't frob it */ | 864 | /* lets not touch this if we didn't frob it */ |
864 | #if defined(CONFIG_X86_MCE_P4THERMAL) || defined(X86_MCE_INTEL) | 865 | #if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL) |
865 | if (maxlvt >= 5) { | 866 | if (maxlvt >= 5) { |
866 | v = apic_read(APIC_LVTTHMR); | 867 | v = apic_read(APIC_LVTTHMR); |
867 | apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); | 868 | apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); |
@@ -894,6 +895,10 @@ void disable_local_APIC(void) | |||
894 | { | 895 | { |
895 | unsigned int value; | 896 | unsigned int value; |
896 | 897 | ||
898 | /* APIC hasn't been mapped yet */ | ||
899 | if (!apic_phys) | ||
900 | return; | ||
901 | |||
897 | clear_local_APIC(); | 902 | clear_local_APIC(); |
898 | 903 | ||
899 | /* | 904 | /* |
@@ -1431,7 +1436,7 @@ static int __init detect_init_APIC(void) | |||
1431 | switch (boot_cpu_data.x86_vendor) { | 1436 | switch (boot_cpu_data.x86_vendor) { |
1432 | case X86_VENDOR_AMD: | 1437 | case X86_VENDOR_AMD: |
1433 | if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || | 1438 | if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || |
1434 | (boot_cpu_data.x86 == 15)) | 1439 | (boot_cpu_data.x86 >= 15)) |
1435 | break; | 1440 | break; |
1436 | goto no_apic; | 1441 | goto no_apic; |
1437 | case X86_VENDOR_INTEL: | 1442 | case X86_VENDOR_INTEL: |
@@ -1832,6 +1837,11 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1832 | num_processors++; | 1837 | num_processors++; |
1833 | cpu = cpumask_next_zero(-1, cpu_present_mask); | 1838 | cpu = cpumask_next_zero(-1, cpu_present_mask); |
1834 | 1839 | ||
1840 | if (version != apic_version[boot_cpu_physical_apicid]) | ||
1841 | WARN_ONCE(1, | ||
1842 | "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n", | ||
1843 | apic_version[boot_cpu_physical_apicid], cpu, version); | ||
1844 | |||
1835 | physid_set(apicid, phys_cpu_present_map); | 1845 | physid_set(apicid, phys_cpu_present_map); |
1836 | if (apicid == boot_cpu_physical_apicid) { | 1846 | if (apicid == boot_cpu_physical_apicid) { |
1837 | /* | 1847 | /* |
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig index efae3b22a0ff..65792c2cc462 100644 --- a/arch/x86/kernel/cpu/cpufreq/Kconfig +++ b/arch/x86/kernel/cpu/cpufreq/Kconfig | |||
@@ -245,17 +245,6 @@ config X86_E_POWERSAVER | |||
245 | 245 | ||
246 | comment "shared options" | 246 | comment "shared options" |
247 | 247 | ||
248 | config X86_ACPI_CPUFREQ_PROC_INTF | ||
249 | bool "/proc/acpi/processor/../performance interface (deprecated)" | ||
250 | depends on PROC_FS | ||
251 | depends on X86_ACPI_CPUFREQ || X86_POWERNOW_K7_ACPI || X86_POWERNOW_K8_ACPI | ||
252 | help | ||
253 | This enables the deprecated /proc/acpi/processor/../performance | ||
254 | interface. While it is helpful for debugging, the generic, | ||
255 | cross-architecture cpufreq interfaces should be used. | ||
256 | |||
257 | If in doubt, say N. | ||
258 | |||
259 | config X86_SPEEDSTEP_LIB | 248 | config X86_SPEEDSTEP_LIB |
260 | tristate | 249 | tristate |
261 | default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD) | 250 | default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD) |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 06fcd8f9323c..4b1c319d30c3 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -145,7 +145,7 @@ typedef union { | |||
145 | 145 | ||
146 | struct drv_cmd { | 146 | struct drv_cmd { |
147 | unsigned int type; | 147 | unsigned int type; |
148 | cpumask_var_t mask; | 148 | const struct cpumask *mask; |
149 | drv_addr_union addr; | 149 | drv_addr_union addr; |
150 | u32 val; | 150 | u32 val; |
151 | }; | 151 | }; |
@@ -231,15 +231,9 @@ static u32 get_cur_val(const struct cpumask *mask) | |||
231 | return 0; | 231 | return 0; |
232 | } | 232 | } |
233 | 233 | ||
234 | if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL))) | 234 | cmd.mask = mask; |
235 | return 0; | ||
236 | |||
237 | cpumask_copy(cmd.mask, mask); | ||
238 | |||
239 | drv_read(&cmd); | 235 | drv_read(&cmd); |
240 | 236 | ||
241 | free_cpumask_var(cmd.mask); | ||
242 | |||
243 | dprintk("get_cur_val = %u\n", cmd.val); | 237 | dprintk("get_cur_val = %u\n", cmd.val); |
244 | 238 | ||
245 | return cmd.val; | 239 | return cmd.val; |
@@ -369,7 +363,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | |||
369 | return freq; | 363 | return freq; |
370 | } | 364 | } |
371 | 365 | ||
372 | static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq, | 366 | static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, |
373 | struct acpi_cpufreq_data *data) | 367 | struct acpi_cpufreq_data *data) |
374 | { | 368 | { |
375 | unsigned int cur_freq; | 369 | unsigned int cur_freq; |
@@ -404,9 +398,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
404 | return -ENODEV; | 398 | return -ENODEV; |
405 | } | 399 | } |
406 | 400 | ||
407 | if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL))) | ||
408 | return -ENOMEM; | ||
409 | |||
410 | perf = data->acpi_data; | 401 | perf = data->acpi_data; |
411 | result = cpufreq_frequency_table_target(policy, | 402 | result = cpufreq_frequency_table_target(policy, |
412 | data->freq_table, | 403 | data->freq_table, |
@@ -451,9 +442,9 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
451 | 442 | ||
452 | /* cpufreq holds the hotplug lock, so we are safe from here on */ | 443 | /* cpufreq holds the hotplug lock, so we are safe from here on */ |
453 | if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) | 444 | if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) |
454 | cpumask_and(cmd.mask, cpu_online_mask, policy->cpus); | 445 | cmd.mask = policy->cpus; |
455 | else | 446 | else |
456 | cpumask_copy(cmd.mask, cpumask_of(policy->cpu)); | 447 | cmd.mask = cpumask_of(policy->cpu); |
457 | 448 | ||
458 | freqs.old = perf->states[perf->state].core_frequency * 1000; | 449 | freqs.old = perf->states[perf->state].core_frequency * 1000; |
459 | freqs.new = data->freq_table[next_state].frequency; | 450 | freqs.new = data->freq_table[next_state].frequency; |
@@ -480,7 +471,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
480 | perf->state = next_perf_state; | 471 | perf->state = next_perf_state; |
481 | 472 | ||
482 | out: | 473 | out: |
483 | free_cpumask_var(cmd.mask); | ||
484 | return result; | 474 | return result; |
485 | } | 475 | } |
486 | 476 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 5c28b37dea11..6428aa17b40e 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -939,10 +939,25 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) | |||
939 | free_cpumask_var(data->acpi_data.shared_cpu_map); | 939 | free_cpumask_var(data->acpi_data.shared_cpu_map); |
940 | } | 940 | } |
941 | 941 | ||
942 | static int get_transition_latency(struct powernow_k8_data *data) | ||
943 | { | ||
944 | int max_latency = 0; | ||
945 | int i; | ||
946 | for (i = 0; i < data->acpi_data.state_count; i++) { | ||
947 | int cur_latency = data->acpi_data.states[i].transition_latency | ||
948 | + data->acpi_data.states[i].bus_master_latency; | ||
949 | if (cur_latency > max_latency) | ||
950 | max_latency = cur_latency; | ||
951 | } | ||
952 | /* value in usecs, needs to be in nanoseconds */ | ||
953 | return 1000 * max_latency; | ||
954 | } | ||
955 | |||
942 | #else | 956 | #else |
943 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } | 957 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } |
944 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } | 958 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } |
945 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } | 959 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } |
960 | static int get_transition_latency(struct powernow_k8_data *data) { return 0; } | ||
946 | #endif /* CONFIG_X86_POWERNOW_K8_ACPI */ | 961 | #endif /* CONFIG_X86_POWERNOW_K8_ACPI */ |
947 | 962 | ||
948 | /* Take a frequency, and issue the fid/vid transition command */ | 963 | /* Take a frequency, and issue the fid/vid transition command */ |
@@ -1142,8 +1157,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1142 | data->cpu = pol->cpu; | 1157 | data->cpu = pol->cpu; |
1143 | data->currpstate = HW_PSTATE_INVALID; | 1158 | data->currpstate = HW_PSTATE_INVALID; |
1144 | 1159 | ||
1145 | rc = powernow_k8_cpu_init_acpi(data); | 1160 | if (powernow_k8_cpu_init_acpi(data)) { |
1146 | if (rc) { | ||
1147 | /* | 1161 | /* |
1148 | * Use the PSB BIOS structure. This is only availabe on | 1162 | * Use the PSB BIOS structure. This is only availabe on |
1149 | * an UP version, and is deprecated by AMD. | 1163 | * an UP version, and is deprecated by AMD. |
@@ -1161,19 +1175,28 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1161 | "ACPI maintainers and complain to your BIOS " | 1175 | "ACPI maintainers and complain to your BIOS " |
1162 | "vendor.\n"); | 1176 | "vendor.\n"); |
1163 | #endif | 1177 | #endif |
1164 | goto err_out; | 1178 | kfree(data); |
1179 | return -ENODEV; | ||
1165 | } | 1180 | } |
1166 | if (pol->cpu != 0) { | 1181 | if (pol->cpu != 0) { |
1167 | printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " | 1182 | printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " |
1168 | "CPU other than CPU0. Complain to your BIOS " | 1183 | "CPU other than CPU0. Complain to your BIOS " |
1169 | "vendor.\n"); | 1184 | "vendor.\n"); |
1170 | goto err_out; | 1185 | kfree(data); |
1186 | return -ENODEV; | ||
1171 | } | 1187 | } |
1172 | rc = find_psb_table(data); | 1188 | rc = find_psb_table(data); |
1173 | if (rc) { | 1189 | if (rc) { |
1174 | goto err_out; | 1190 | kfree(data); |
1191 | return -ENODEV; | ||
1175 | } | 1192 | } |
1176 | } | 1193 | /* Take a crude guess here. |
1194 | * That guess was in microseconds, so multiply with 1000 */ | ||
1195 | pol->cpuinfo.transition_latency = ( | ||
1196 | ((data->rvo + 8) * data->vstable * VST_UNITS_20US) + | ||
1197 | ((1 << data->irt) * 30)) * 1000; | ||
1198 | } else /* ACPI _PSS objects available */ | ||
1199 | pol->cpuinfo.transition_latency = get_transition_latency(data); | ||
1177 | 1200 | ||
1178 | /* only run on specific CPU from here on */ | 1201 | /* only run on specific CPU from here on */ |
1179 | oldmask = current->cpus_allowed; | 1202 | oldmask = current->cpus_allowed; |
@@ -1204,11 +1227,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1204 | cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); | 1227 | cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); |
1205 | data->available_cores = pol->cpus; | 1228 | data->available_cores = pol->cpus; |
1206 | 1229 | ||
1207 | /* Take a crude guess here. | ||
1208 | * That guess was in microseconds, so multiply with 1000 */ | ||
1209 | pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US) | ||
1210 | + (3 * (1 << data->irt) * 10)) * 1000; | ||
1211 | |||
1212 | if (cpu_family == CPU_HW_PSTATE) | 1230 | if (cpu_family == CPU_HW_PSTATE) |
1213 | pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); | 1231 | pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); |
1214 | else | 1232 | else |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 8ea6929e974c..24ff26a38ade 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -29,6 +29,19 @@ | |||
29 | 29 | ||
30 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 30 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
31 | { | 31 | { |
32 | /* Unmask CPUID levels if masked: */ | ||
33 | if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { | ||
34 | u64 misc_enable; | ||
35 | |||
36 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | ||
37 | |||
38 | if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { | ||
39 | misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; | ||
40 | wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | ||
41 | c->cpuid_level = cpuid_eax(0); | ||
42 | } | ||
43 | } | ||
44 | |||
32 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | 45 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || |
33 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | 46 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) |
34 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 47 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
@@ -278,6 +291,9 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
278 | ds_init_intel(c); | 291 | ds_init_intel(c); |
279 | } | 292 | } |
280 | 293 | ||
294 | if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) | ||
295 | set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); | ||
296 | |||
281 | #ifdef CONFIG_X86_64 | 297 | #ifdef CONFIG_X86_64 |
282 | if (c->x86 == 15) | 298 | if (c->x86 == 15) |
283 | c->x86_cache_alignment = c->x86_clflush_size * 2; | 299 | c->x86_cache_alignment = c->x86_clflush_size * 2; |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 48533d77be78..da299eb85fc0 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -36,8 +36,11 @@ static struct _cache_table cache_table[] __cpuinitdata = | |||
36 | { | 36 | { |
37 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ | 37 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ |
38 | { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ | 38 | { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ |
39 | { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */ | ||
39 | { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ | 40 | { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ |
40 | { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ | 41 | { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ |
42 | { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ | ||
43 | { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ | ||
41 | { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ | 44 | { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ |
42 | { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | 45 | { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
43 | { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | 46 | { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
@@ -85,6 +88,18 @@ static struct _cache_table cache_table[] __cpuinitdata = | |||
85 | { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */ | 88 | { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */ |
86 | { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ | 89 | { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ |
87 | { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */ | 90 | { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */ |
91 | { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */ | ||
92 | { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */ | ||
93 | { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */ | ||
94 | { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */ | ||
95 | { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */ | ||
96 | { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */ | ||
97 | { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */ | ||
98 | { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */ | ||
99 | { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */ | ||
100 | { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */ | ||
101 | { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */ | ||
102 | { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */ | ||
88 | { 0x00, 0, 0} | 103 | { 0x00, 0, 0} |
89 | }; | 104 | }; |
90 | 105 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 1c838032fd37..fe79985ce0f2 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -295,11 +295,11 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
295 | * If we know that the error was in user space, send a | 295 | * If we know that the error was in user space, send a |
296 | * SIGBUS. Otherwise, panic if tolerance is low. | 296 | * SIGBUS. Otherwise, panic if tolerance is low. |
297 | * | 297 | * |
298 | * do_exit() takes an awful lot of locks and has a slight | 298 | * force_sig() takes an awful lot of locks and has a slight |
299 | * risk of deadlocking. | 299 | * risk of deadlocking. |
300 | */ | 300 | */ |
301 | if (user_space) { | 301 | if (user_space) { |
302 | do_exit(SIGBUS); | 302 | force_sig(SIGBUS, current); |
303 | } else if (panic_on_oops || tolerant < 2) { | 303 | } else if (panic_on_oops || tolerant < 2) { |
304 | mce_panic("Uncorrected machine check", | 304 | mce_panic("Uncorrected machine check", |
305 | &panicm, mcestart); | 305 | &panicm, mcestart); |
@@ -490,7 +490,7 @@ static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) | |||
490 | 490 | ||
491 | } | 491 | } |
492 | 492 | ||
493 | static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) | 493 | static void mce_cpu_features(struct cpuinfo_x86 *c) |
494 | { | 494 | { |
495 | switch (c->x86_vendor) { | 495 | switch (c->x86_vendor) { |
496 | case X86_VENDOR_INTEL: | 496 | case X86_VENDOR_INTEL: |
@@ -734,6 +734,7 @@ __setup("mce=", mcheck_enable); | |||
734 | static int mce_resume(struct sys_device *dev) | 734 | static int mce_resume(struct sys_device *dev) |
735 | { | 735 | { |
736 | mce_init(NULL); | 736 | mce_init(NULL); |
737 | mce_cpu_features(¤t_cpu_data); | ||
737 | return 0; | 738 | return 0; |
738 | } | 739 | } |
739 | 740 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 8ae8c4ff094d..f2ee0ae29bd6 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
@@ -121,7 +121,7 @@ static long threshold_restart_bank(void *_tr) | |||
121 | } | 121 | } |
122 | 122 | ||
123 | /* cpu init entry point, called from mce.c with preempt off */ | 123 | /* cpu init entry point, called from mce.c with preempt off */ |
124 | void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) | 124 | void mce_amd_feature_init(struct cpuinfo_x86 *c) |
125 | { | 125 | { |
126 | unsigned int bank, block; | 126 | unsigned int bank, block; |
127 | unsigned int cpu = smp_processor_id(); | 127 | unsigned int cpu = smp_processor_id(); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index 4b48f251fd39..f44c36624360 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c | |||
@@ -30,7 +30,7 @@ asmlinkage void smp_thermal_interrupt(void) | |||
30 | irq_exit(); | 30 | irq_exit(); |
31 | } | 31 | } |
32 | 32 | ||
33 | static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c) | 33 | static void intel_init_thermal(struct cpuinfo_x86 *c) |
34 | { | 34 | { |
35 | u32 l, h; | 35 | u32 l, h; |
36 | int tm2 = 0; | 36 | int tm2 = 0; |
@@ -84,7 +84,7 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c) | |||
84 | return; | 84 | return; |
85 | } | 85 | } |
86 | 86 | ||
87 | void __cpuinit mce_intel_feature_init(struct cpuinfo_x86 *c) | 87 | void mce_intel_feature_init(struct cpuinfo_x86 *c) |
88 | { | 88 | { |
89 | intel_init_thermal(c); | 89 | intel_init_thermal(c); |
90 | } | 90 | } |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index b59ddcc88cd8..0c0a455fe95c 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -33,11 +33,13 @@ u64 mtrr_tom2; | |||
33 | struct mtrr_state_type mtrr_state = {}; | 33 | struct mtrr_state_type mtrr_state = {}; |
34 | EXPORT_SYMBOL_GPL(mtrr_state); | 34 | EXPORT_SYMBOL_GPL(mtrr_state); |
35 | 35 | ||
36 | #undef MODULE_PARAM_PREFIX | 36 | static int __initdata mtrr_show; |
37 | #define MODULE_PARAM_PREFIX "mtrr." | 37 | static int __init mtrr_debug(char *opt) |
38 | 38 | { | |
39 | static int mtrr_show; | 39 | mtrr_show = 1; |
40 | module_param_named(show, mtrr_show, bool, 0); | 40 | return 0; |
41 | } | ||
42 | early_param("mtrr.show", mtrr_debug); | ||
41 | 43 | ||
42 | /* | 44 | /* |
43 | * Returns the effective MTRR type for the region | 45 | * Returns the effective MTRR type for the region |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index d259e5d2e054..236a401b8259 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -1594,8 +1594,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
1594 | 1594 | ||
1595 | /* kvm/qemu doesn't have mtrr set right, don't trim them all */ | 1595 | /* kvm/qemu doesn't have mtrr set right, don't trim them all */ |
1596 | if (!highest_pfn) { | 1596 | if (!highest_pfn) { |
1597 | WARN(!kvm_para_available(), KERN_WARNING | 1597 | printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); |
1598 | "WARNING: strange, CPU MTRRs all blank?\n"); | ||
1599 | return 0; | 1598 | return 0; |
1600 | } | 1599 | } |
1601 | 1600 | ||
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index da91701a2348..169a120587be 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c | |||
@@ -15,8 +15,8 @@ | |||
15 | * - buffer allocation (memory accounting) | 15 | * - buffer allocation (memory accounting) |
16 | * | 16 | * |
17 | * | 17 | * |
18 | * Copyright (C) 2007-2008 Intel Corporation. | 18 | * Copyright (C) 2007-2009 Intel Corporation. |
19 | * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008 | 19 | * Markus Metzger <markus.t.metzger@intel.com>, 2007-2009 |
20 | */ | 20 | */ |
21 | 21 | ||
22 | 22 | ||
@@ -890,7 +890,7 @@ int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value) | |||
890 | } | 890 | } |
891 | 891 | ||
892 | static const struct ds_configuration ds_cfg_netburst = { | 892 | static const struct ds_configuration ds_cfg_netburst = { |
893 | .name = "netburst", | 893 | .name = "Netburst", |
894 | .ctl[dsf_bts] = (1 << 2) | (1 << 3), | 894 | .ctl[dsf_bts] = (1 << 2) | (1 << 3), |
895 | .ctl[dsf_bts_kernel] = (1 << 5), | 895 | .ctl[dsf_bts_kernel] = (1 << 5), |
896 | .ctl[dsf_bts_user] = (1 << 6), | 896 | .ctl[dsf_bts_user] = (1 << 6), |
@@ -904,7 +904,7 @@ static const struct ds_configuration ds_cfg_netburst = { | |||
904 | #endif | 904 | #endif |
905 | }; | 905 | }; |
906 | static const struct ds_configuration ds_cfg_pentium_m = { | 906 | static const struct ds_configuration ds_cfg_pentium_m = { |
907 | .name = "pentium m", | 907 | .name = "Pentium M", |
908 | .ctl[dsf_bts] = (1 << 6) | (1 << 7), | 908 | .ctl[dsf_bts] = (1 << 6) | (1 << 7), |
909 | 909 | ||
910 | .sizeof_field = sizeof(long), | 910 | .sizeof_field = sizeof(long), |
@@ -915,8 +915,8 @@ static const struct ds_configuration ds_cfg_pentium_m = { | |||
915 | .sizeof_rec[ds_pebs] = sizeof(long) * 18, | 915 | .sizeof_rec[ds_pebs] = sizeof(long) * 18, |
916 | #endif | 916 | #endif |
917 | }; | 917 | }; |
918 | static const struct ds_configuration ds_cfg_core2 = { | 918 | static const struct ds_configuration ds_cfg_core2_atom = { |
919 | .name = "core 2", | 919 | .name = "Core 2/Atom", |
920 | .ctl[dsf_bts] = (1 << 6) | (1 << 7), | 920 | .ctl[dsf_bts] = (1 << 6) | (1 << 7), |
921 | .ctl[dsf_bts_kernel] = (1 << 9), | 921 | .ctl[dsf_bts_kernel] = (1 << 9), |
922 | .ctl[dsf_bts_user] = (1 << 10), | 922 | .ctl[dsf_bts_user] = (1 << 10), |
@@ -949,19 +949,22 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c) | |||
949 | switch (c->x86) { | 949 | switch (c->x86) { |
950 | case 0x6: | 950 | case 0x6: |
951 | switch (c->x86_model) { | 951 | switch (c->x86_model) { |
952 | case 0 ... 0xC: | 952 | case 0x9: |
953 | /* sorry, don't know about them */ | 953 | case 0xd: /* Pentium M */ |
954 | break; | ||
955 | case 0xD: | ||
956 | case 0xE: /* Pentium M */ | ||
957 | ds_configure(&ds_cfg_pentium_m); | 954 | ds_configure(&ds_cfg_pentium_m); |
958 | break; | 955 | break; |
959 | default: /* Core2, Atom, ... */ | 956 | case 0xf: |
960 | ds_configure(&ds_cfg_core2); | 957 | case 0x17: /* Core2 */ |
958 | case 0x1c: /* Atom */ | ||
959 | ds_configure(&ds_cfg_core2_atom); | ||
960 | break; | ||
961 | case 0x1a: /* i7 */ | ||
962 | default: | ||
963 | /* sorry, don't know about them */ | ||
961 | break; | 964 | break; |
962 | } | 965 | } |
963 | break; | 966 | break; |
964 | case 0xF: | 967 | case 0xf: |
965 | switch (c->x86_model) { | 968 | switch (c->x86_model) { |
966 | case 0x0: | 969 | case 0x0: |
967 | case 0x1: | 970 | case 0x1: |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index d6f0490a7391..46469029e9d3 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -1203,7 +1203,6 @@ nmi_stack_correct: | |||
1203 | pushl %eax | 1203 | pushl %eax |
1204 | CFI_ADJUST_CFA_OFFSET 4 | 1204 | CFI_ADJUST_CFA_OFFSET 4 |
1205 | SAVE_ALL | 1205 | SAVE_ALL |
1206 | TRACE_IRQS_OFF | ||
1207 | xorl %edx,%edx # zero error code | 1206 | xorl %edx,%edx # zero error code |
1208 | movl %esp,%eax # pt_regs pointer | 1207 | movl %esp,%eax # pt_regs pointer |
1209 | call do_nmi | 1208 | call do_nmi |
@@ -1244,7 +1243,6 @@ nmi_espfix_stack: | |||
1244 | pushl %eax | 1243 | pushl %eax |
1245 | CFI_ADJUST_CFA_OFFSET 4 | 1244 | CFI_ADJUST_CFA_OFFSET 4 |
1246 | SAVE_ALL | 1245 | SAVE_ALL |
1247 | TRACE_IRQS_OFF | ||
1248 | FIXUP_ESPFIX_STACK # %eax == %esp | 1246 | FIXUP_ESPFIX_STACK # %eax == %esp |
1249 | xorl %edx,%edx # zero error code | 1247 | xorl %edx,%edx # zero error code |
1250 | call do_nmi | 1248 | call do_nmi |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index e28c7a987793..a1346217e43c 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -346,6 +346,7 @@ ENTRY(save_args) | |||
346 | popq_cfi %rax /* move return address... */ | 346 | popq_cfi %rax /* move return address... */ |
347 | mov %gs:pda_irqstackptr,%rsp | 347 | mov %gs:pda_irqstackptr,%rsp |
348 | EMPTY_FRAME 0 | 348 | EMPTY_FRAME 0 |
349 | pushq_cfi %rbp /* backlink for unwinder */ | ||
349 | pushq_cfi %rax /* ... to the new stack */ | 350 | pushq_cfi %rax /* ... to the new stack */ |
350 | /* | 351 | /* |
351 | * We entered an interrupt context - irqs are off: | 352 | * We entered an interrupt context - irqs are off: |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 1b43086b097a..231bdd3c5b1c 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -488,20 +488,21 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
488 | * ignore such a protection. | 488 | * ignore such a protection. |
489 | */ | 489 | */ |
490 | asm volatile( | 490 | asm volatile( |
491 | "1: " _ASM_MOV " (%[parent_old]), %[old]\n" | 491 | "1: " _ASM_MOV " (%[parent]), %[old]\n" |
492 | "2: " _ASM_MOV " %[return_hooker], (%[parent_replaced])\n" | 492 | "2: " _ASM_MOV " %[return_hooker], (%[parent])\n" |
493 | " movl $0, %[faulted]\n" | 493 | " movl $0, %[faulted]\n" |
494 | "3:\n" | ||
494 | 495 | ||
495 | ".section .fixup, \"ax\"\n" | 496 | ".section .fixup, \"ax\"\n" |
496 | "3: movl $1, %[faulted]\n" | 497 | "4: movl $1, %[faulted]\n" |
498 | " jmp 3b\n" | ||
497 | ".previous\n" | 499 | ".previous\n" |
498 | 500 | ||
499 | _ASM_EXTABLE(1b, 3b) | 501 | _ASM_EXTABLE(1b, 4b) |
500 | _ASM_EXTABLE(2b, 3b) | 502 | _ASM_EXTABLE(2b, 4b) |
501 | 503 | ||
502 | : [parent_replaced] "=r" (parent), [old] "=r" (old), | 504 | : [old] "=r" (old), [faulted] "=r" (faulted) |
503 | [faulted] "=r" (faulted) | 505 | : [parent] "r" (parent), [return_hooker] "r" (return_hooker) |
504 | : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker) | ||
505 | : "memory" | 506 | : "memory" |
506 | ); | 507 | ); |
507 | 508 | ||
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index cd759ad90690..a00545fe5cdd 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -269,6 +269,8 @@ static void hpet_set_mode(enum clock_event_mode mode, | |||
269 | now = hpet_readl(HPET_COUNTER); | 269 | now = hpet_readl(HPET_COUNTER); |
270 | cmp = now + (unsigned long) delta; | 270 | cmp = now + (unsigned long) delta; |
271 | cfg = hpet_readl(HPET_Tn_CFG(timer)); | 271 | cfg = hpet_readl(HPET_Tn_CFG(timer)); |
272 | /* Make sure we use edge triggered interrupts */ | ||
273 | cfg &= ~HPET_TN_LEVEL; | ||
272 | cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | | 274 | cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | |
273 | HPET_TN_SETVAL | HPET_TN_32BIT; | 275 | HPET_TN_SETVAL | HPET_TN_32BIT; |
274 | hpet_writel(cfg, HPET_Tn_CFG(timer)); | 276 | hpet_writel(cfg, HPET_Tn_CFG(timer)); |
@@ -628,11 +630,12 @@ static int hpet_cpuhp_notify(struct notifier_block *n, | |||
628 | 630 | ||
629 | switch (action & 0xf) { | 631 | switch (action & 0xf) { |
630 | case CPU_ONLINE: | 632 | case CPU_ONLINE: |
631 | INIT_DELAYED_WORK(&work.work, hpet_work); | 633 | INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work); |
632 | init_completion(&work.complete); | 634 | init_completion(&work.complete); |
633 | /* FIXME: add schedule_work_on() */ | 635 | /* FIXME: add schedule_work_on() */ |
634 | schedule_delayed_work_on(cpu, &work.work, 0); | 636 | schedule_delayed_work_on(cpu, &work.work, 0); |
635 | wait_for_completion(&work.complete); | 637 | wait_for_completion(&work.complete); |
638 | destroy_timer_on_stack(&work.work.timer); | ||
636 | break; | 639 | break; |
637 | case CPU_DEAD: | 640 | case CPU_DEAD: |
638 | if (hdev) { | 641 | if (hdev) { |
@@ -896,7 +899,7 @@ static unsigned long hpet_rtc_flags; | |||
896 | static int hpet_prev_update_sec; | 899 | static int hpet_prev_update_sec; |
897 | static struct rtc_time hpet_alarm_time; | 900 | static struct rtc_time hpet_alarm_time; |
898 | static unsigned long hpet_pie_count; | 901 | static unsigned long hpet_pie_count; |
899 | static unsigned long hpet_t1_cmp; | 902 | static u32 hpet_t1_cmp; |
900 | static unsigned long hpet_default_delta; | 903 | static unsigned long hpet_default_delta; |
901 | static unsigned long hpet_pie_delta; | 904 | static unsigned long hpet_pie_delta; |
902 | static unsigned long hpet_pie_limit; | 905 | static unsigned long hpet_pie_limit; |
@@ -904,6 +907,14 @@ static unsigned long hpet_pie_limit; | |||
904 | static rtc_irq_handler irq_handler; | 907 | static rtc_irq_handler irq_handler; |
905 | 908 | ||
906 | /* | 909 | /* |
910 | * Check that the hpet counter c1 is ahead of the c2 | ||
911 | */ | ||
912 | static inline int hpet_cnt_ahead(u32 c1, u32 c2) | ||
913 | { | ||
914 | return (s32)(c2 - c1) < 0; | ||
915 | } | ||
916 | |||
917 | /* | ||
907 | * Registers a IRQ handler. | 918 | * Registers a IRQ handler. |
908 | */ | 919 | */ |
909 | int hpet_register_irq_handler(rtc_irq_handler handler) | 920 | int hpet_register_irq_handler(rtc_irq_handler handler) |
@@ -1074,7 +1085,7 @@ static void hpet_rtc_timer_reinit(void) | |||
1074 | hpet_t1_cmp += delta; | 1085 | hpet_t1_cmp += delta; |
1075 | hpet_writel(hpet_t1_cmp, HPET_T1_CMP); | 1086 | hpet_writel(hpet_t1_cmp, HPET_T1_CMP); |
1076 | lost_ints++; | 1087 | lost_ints++; |
1077 | } while ((long)(hpet_readl(HPET_COUNTER) - hpet_t1_cmp) > 0); | 1088 | } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER))); |
1078 | 1089 | ||
1079 | if (lost_ints) { | 1090 | if (lost_ints) { |
1080 | if (hpet_rtc_flags & RTC_PIE) | 1091 | if (hpet_rtc_flags & RTC_PIE) |
diff --git a/arch/x86/kernel/i8237.c b/arch/x86/kernel/i8237.c index dbd6c1d1b638..b42ca694dc68 100644 --- a/arch/x86/kernel/i8237.c +++ b/arch/x86/kernel/i8237.c | |||
@@ -28,10 +28,10 @@ static int i8237A_resume(struct sys_device *dev) | |||
28 | 28 | ||
29 | flags = claim_dma_lock(); | 29 | flags = claim_dma_lock(); |
30 | 30 | ||
31 | dma_outb(DMA1_RESET_REG, 0); | 31 | dma_outb(0, DMA1_RESET_REG); |
32 | dma_outb(DMA2_RESET_REG, 0); | 32 | dma_outb(0, DMA2_RESET_REG); |
33 | 33 | ||
34 | for (i = 0;i < 8;i++) { | 34 | for (i = 0; i < 8; i++) { |
35 | set_dma_addr(i, 0x000000); | 35 | set_dma_addr(i, 0x000000); |
36 | /* DMA count is a bit weird so this is not 0 */ | 36 | /* DMA count is a bit weird so this is not 0 */ |
37 | set_dma_count(i, 1); | 37 | set_dma_count(i, 1); |
@@ -51,14 +51,14 @@ static int i8237A_suspend(struct sys_device *dev, pm_message_t state) | |||
51 | } | 51 | } |
52 | 52 | ||
53 | static struct sysdev_class i8237_sysdev_class = { | 53 | static struct sysdev_class i8237_sysdev_class = { |
54 | .name = "i8237", | 54 | .name = "i8237", |
55 | .suspend = i8237A_suspend, | 55 | .suspend = i8237A_suspend, |
56 | .resume = i8237A_resume, | 56 | .resume = i8237A_resume, |
57 | }; | 57 | }; |
58 | 58 | ||
59 | static struct sys_device device_i8237A = { | 59 | static struct sys_device device_i8237A = { |
60 | .id = 0, | 60 | .id = 0, |
61 | .cls = &i8237_sysdev_class, | 61 | .cls = &i8237_sysdev_class, |
62 | }; | 62 | }; |
63 | 63 | ||
64 | static int __init i8237A_init_sysfs(void) | 64 | static int __init i8237A_init_sysfs(void) |
@@ -68,5 +68,4 @@ static int __init i8237A_init_sysfs(void) | |||
68 | error = sysdev_register(&device_i8237A); | 68 | error = sysdev_register(&device_i8237A); |
69 | return error; | 69 | return error; |
70 | } | 70 | } |
71 | |||
72 | device_initcall(i8237A_init_sysfs); | 71 | device_initcall(i8237A_init_sysfs); |
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 1c4a1302536c..bc7ac4da90d7 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c | |||
@@ -2528,14 +2528,15 @@ static void irq_complete_move(struct irq_desc **descp) | |||
2528 | 2528 | ||
2529 | vector = ~get_irq_regs()->orig_ax; | 2529 | vector = ~get_irq_regs()->orig_ax; |
2530 | me = smp_processor_id(); | 2530 | me = smp_processor_id(); |
2531 | |||
2532 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) { | ||
2531 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC | 2533 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC |
2532 | *descp = desc = move_irq_desc(desc, me); | 2534 | *descp = desc = move_irq_desc(desc, me); |
2533 | /* get the new one */ | 2535 | /* get the new one */ |
2534 | cfg = desc->chip_data; | 2536 | cfg = desc->chip_data; |
2535 | #endif | 2537 | #endif |
2536 | |||
2537 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | ||
2538 | send_cleanup_vector(cfg); | 2538 | send_cleanup_vector(cfg); |
2539 | } | ||
2539 | } | 2540 | } |
2540 | #else | 2541 | #else |
2541 | static inline void irq_complete_move(struct irq_desc **descp) {} | 2542 | static inline void irq_complete_move(struct irq_desc **descp) {} |
@@ -3840,14 +3841,24 @@ int __init io_apic_get_redir_entries (int ioapic) | |||
3840 | 3841 | ||
3841 | void __init probe_nr_irqs_gsi(void) | 3842 | void __init probe_nr_irqs_gsi(void) |
3842 | { | 3843 | { |
3843 | int idx; | ||
3844 | int nr = 0; | 3844 | int nr = 0; |
3845 | 3845 | ||
3846 | for (idx = 0; idx < nr_ioapics; idx++) | 3846 | nr = acpi_probe_gsi(); |
3847 | nr += io_apic_get_redir_entries(idx) + 1; | 3847 | if (nr > nr_irqs_gsi) { |
3848 | |||
3849 | if (nr > nr_irqs_gsi) | ||
3850 | nr_irqs_gsi = nr; | 3848 | nr_irqs_gsi = nr; |
3849 | } else { | ||
3850 | /* for acpi=off or acpi is not compiled in */ | ||
3851 | int idx; | ||
3852 | |||
3853 | nr = 0; | ||
3854 | for (idx = 0; idx < nr_ioapics; idx++) | ||
3855 | nr += io_apic_get_redir_entries(idx) + 1; | ||
3856 | |||
3857 | if (nr > nr_irqs_gsi) | ||
3858 | nr_irqs_gsi = nr; | ||
3859 | } | ||
3860 | |||
3861 | printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); | ||
3851 | } | 3862 | } |
3852 | 3863 | ||
3853 | /* -------------------------------------------------------------------------- | 3864 | /* -------------------------------------------------------------------------- |
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 1507ad4e674d..10a09c2f1828 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c | |||
@@ -78,15 +78,6 @@ void __init init_ISA_irqs(void) | |||
78 | } | 78 | } |
79 | } | 79 | } |
80 | 80 | ||
81 | /* | ||
82 | * IRQ2 is cascade interrupt to second interrupt controller | ||
83 | */ | ||
84 | static struct irqaction irq2 = { | ||
85 | .handler = no_action, | ||
86 | .mask = CPU_MASK_NONE, | ||
87 | .name = "cascade", | ||
88 | }; | ||
89 | |||
90 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { | 81 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { |
91 | [0 ... IRQ0_VECTOR - 1] = -1, | 82 | [0 ... IRQ0_VECTOR - 1] = -1, |
92 | [IRQ0_VECTOR] = 0, | 83 | [IRQ0_VECTOR] = 0, |
@@ -178,9 +169,6 @@ void __init native_init_IRQ(void) | |||
178 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | 169 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); |
179 | #endif | 170 | #endif |
180 | 171 | ||
181 | if (!acpi_ioapic) | ||
182 | setup_irq(2, &irq2); | ||
183 | |||
184 | /* setup after call gates are initialised (usually add in | 172 | /* setup after call gates are initialised (usually add in |
185 | * the architecture specific gates) | 173 | * the architecture specific gates) |
186 | */ | 174 | */ |
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 884d985b8b82..e948b28a5a9a 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -446,7 +446,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | |||
446 | static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, | 446 | static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, |
447 | struct kprobe_ctlblk *kcb) | 447 | struct kprobe_ctlblk *kcb) |
448 | { | 448 | { |
449 | #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM) | 449 | #if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER) |
450 | if (p->ainsn.boostable == 1 && !p->post_handler) { | 450 | if (p->ainsn.boostable == 1 && !p->post_handler) { |
451 | /* Boost up -- we can execute copied instructions directly */ | 451 | /* Boost up -- we can execute copied instructions directly */ |
452 | reset_current_kprobe(); | 452 | reset_current_kprobe(); |
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index c0601c2848a1..a649a4ccad43 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/e820.h> | 27 | #include <asm/e820.h> |
28 | #include <asm/trampoline.h> | 28 | #include <asm/trampoline.h> |
29 | #include <asm/setup.h> | 29 | #include <asm/setup.h> |
30 | #include <asm/smp.h> | ||
30 | 31 | ||
31 | #include <mach_apic.h> | 32 | #include <mach_apic.h> |
32 | #ifdef CONFIG_X86_32 | 33 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c index 7a13fac63a1f..4006c522adc7 100644 --- a/arch/x86/kernel/olpc.c +++ b/arch/x86/kernel/olpc.c | |||
@@ -203,7 +203,7 @@ static void __init platform_detect(void) | |||
203 | static void __init platform_detect(void) | 203 | static void __init platform_detect(void) |
204 | { | 204 | { |
205 | /* stopgap until OFW support is added to the kernel */ | 205 | /* stopgap until OFW support is added to the kernel */ |
206 | olpc_platform_info.boardrev = 0xc2; | 206 | olpc_platform_info.boardrev = olpc_board(0xc2); |
207 | } | 207 | } |
208 | #endif | 208 | #endif |
209 | 209 | ||
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index e4c8fb608873..c6520a4e85d4 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -268,6 +268,32 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) | |||
268 | return __get_cpu_var(paravirt_lazy_mode); | 268 | return __get_cpu_var(paravirt_lazy_mode); |
269 | } | 269 | } |
270 | 270 | ||
271 | void arch_flush_lazy_mmu_mode(void) | ||
272 | { | ||
273 | preempt_disable(); | ||
274 | |||
275 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { | ||
276 | WARN_ON(preempt_count() == 1); | ||
277 | arch_leave_lazy_mmu_mode(); | ||
278 | arch_enter_lazy_mmu_mode(); | ||
279 | } | ||
280 | |||
281 | preempt_enable(); | ||
282 | } | ||
283 | |||
284 | void arch_flush_lazy_cpu_mode(void) | ||
285 | { | ||
286 | preempt_disable(); | ||
287 | |||
288 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) { | ||
289 | WARN_ON(preempt_count() == 1); | ||
290 | arch_leave_lazy_cpu_mode(); | ||
291 | arch_enter_lazy_cpu_mode(); | ||
292 | } | ||
293 | |||
294 | preempt_enable(); | ||
295 | } | ||
296 | |||
271 | struct pv_info pv_info = { | 297 | struct pv_info pv_info = { |
272 | .name = "bare hardware", | 298 | .name = "bare hardware", |
273 | .paravirt_enabled = 0, | 299 | .paravirt_enabled = 0, |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 00c2bcd41463..d5768b1af080 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * This allows to use PCI devices that only support 32bit addresses on systems | 5 | * This allows to use PCI devices that only support 32bit addresses on systems |
6 | * with more than 4GB. | 6 | * with more than 4GB. |
7 | * | 7 | * |
8 | * See Documentation/DMA-mapping.txt for the interface specification. | 8 | * See Documentation/PCI/PCI-DMA-mapping.txt for the interface specification. |
9 | * | 9 | * |
10 | * Copyright 2002 Andi Kleen, SuSE Labs. | 10 | * Copyright 2002 Andi Kleen, SuSE Labs. |
11 | * Subject to the GNU General Public License v2 only. | 11 | * Subject to the GNU General Public License v2 only. |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e68bb9e30864..6d12f7e37f8c 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -180,6 +180,9 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | |||
180 | 180 | ||
181 | trace_power_start(&it, POWER_CSTATE, (ax>>4)+1); | 181 | trace_power_start(&it, POWER_CSTATE, (ax>>4)+1); |
182 | if (!need_resched()) { | 182 | if (!need_resched()) { |
183 | if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) | ||
184 | clflush((void *)¤t_thread_info()->flags); | ||
185 | |||
183 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 186 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
184 | smp_mb(); | 187 | smp_mb(); |
185 | if (!need_resched()) | 188 | if (!need_resched()) |
@@ -194,6 +197,9 @@ static void mwait_idle(void) | |||
194 | struct power_trace it; | 197 | struct power_trace it; |
195 | if (!need_resched()) { | 198 | if (!need_resched()) { |
196 | trace_power_start(&it, POWER_CSTATE, 1); | 199 | trace_power_start(&it, POWER_CSTATE, 1); |
200 | if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) | ||
201 | clflush((void *)¤t_thread_info()->flags); | ||
202 | |||
197 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 203 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
198 | smp_mb(); | 204 | smp_mb(); |
199 | if (!need_resched()) | 205 | if (!need_resched()) |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index a546f55c77b4..bd4da2af08ae 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -104,9 +104,6 @@ void cpu_idle(void) | |||
104 | check_pgt_cache(); | 104 | check_pgt_cache(); |
105 | rmb(); | 105 | rmb(); |
106 | 106 | ||
107 | if (rcu_pending(cpu)) | ||
108 | rcu_check_callbacks(cpu, 0); | ||
109 | |||
110 | if (cpu_is_offline(cpu)) | 107 | if (cpu_is_offline(cpu)) |
111 | play_dead(); | 108 | play_dead(); |
112 | 109 | ||
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 416fb9282f4f..85b4cb5c1980 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/uaccess.h> | 40 | #include <linux/uaccess.h> |
41 | #include <linux/io.h> | 41 | #include <linux/io.h> |
42 | #include <linux/ftrace.h> | 42 | #include <linux/ftrace.h> |
43 | #include <linux/dmi.h> | ||
43 | 44 | ||
44 | #include <asm/pgtable.h> | 45 | #include <asm/pgtable.h> |
45 | #include <asm/system.h> | 46 | #include <asm/system.h> |
@@ -151,14 +152,18 @@ void __show_regs(struct pt_regs *regs, int all) | |||
151 | unsigned long d0, d1, d2, d3, d6, d7; | 152 | unsigned long d0, d1, d2, d3, d6, d7; |
152 | unsigned int fsindex, gsindex; | 153 | unsigned int fsindex, gsindex; |
153 | unsigned int ds, cs, es; | 154 | unsigned int ds, cs, es; |
155 | const char *board; | ||
154 | 156 | ||
155 | printk("\n"); | 157 | printk("\n"); |
156 | print_modules(); | 158 | print_modules(); |
157 | printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n", | 159 | board = dmi_get_system_info(DMI_PRODUCT_NAME); |
160 | if (!board) | ||
161 | board = ""; | ||
162 | printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n", | ||
158 | current->pid, current->comm, print_tainted(), | 163 | current->pid, current->comm, print_tainted(), |
159 | init_utsname()->release, | 164 | init_utsname()->release, |
160 | (int)strcspn(init_utsname()->version, " "), | 165 | (int)strcspn(init_utsname()->version, " "), |
161 | init_utsname()->version); | 166 | init_utsname()->version, board); |
162 | printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); | 167 | printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); |
163 | printk_address(regs->ip, 1); | 168 | printk_address(regs->ip, 1); |
164 | printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, | 169 | printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 0a5df5f82fb9..5a4c23d89892 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -810,12 +810,16 @@ static void ptrace_bts_untrace(struct task_struct *child) | |||
810 | 810 | ||
811 | static void ptrace_bts_detach(struct task_struct *child) | 811 | static void ptrace_bts_detach(struct task_struct *child) |
812 | { | 812 | { |
813 | if (unlikely(child->bts)) { | 813 | /* |
814 | ds_release_bts(child->bts); | 814 | * Ptrace_detach() races with ptrace_untrace() in case |
815 | child->bts = NULL; | 815 | * the child dies and is reaped by another thread. |
816 | 816 | * | |
817 | ptrace_bts_free_buffer(child); | 817 | * We only do the memory accounting at this point and |
818 | } | 818 | * leave the buffer deallocation and the bts tracer |
819 | * release to ptrace_bts_untrace() which will be called | ||
820 | * later on with tasklist_lock held. | ||
821 | */ | ||
822 | release_locked_buffer(child->bts_buffer, child->bts_size); | ||
819 | } | 823 | } |
820 | #else | 824 | #else |
821 | static inline void ptrace_bts_fork(struct task_struct *tsk) {} | 825 | static inline void ptrace_bts_fork(struct task_struct *tsk) {} |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index ae0d8042cf69..c461f6d69074 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -607,7 +607,7 @@ struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; | |||
607 | static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) | 607 | static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) |
608 | { | 608 | { |
609 | printk(KERN_NOTICE | 609 | printk(KERN_NOTICE |
610 | "%s detected: BIOS may corrupt low RAM, working it around.\n", | 610 | "%s detected: BIOS may corrupt low RAM, working around it.\n", |
611 | d->ident); | 611 | d->ident); |
612 | 612 | ||
613 | e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED); | 613 | e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED); |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 55c46074eba0..01161077a49c 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -136,7 +136,7 @@ static void __init setup_cpu_pda_map(void) | |||
136 | #ifdef CONFIG_X86_64 | 136 | #ifdef CONFIG_X86_64 |
137 | 137 | ||
138 | /* correctly size the local cpu masks */ | 138 | /* correctly size the local cpu masks */ |
139 | static void setup_cpu_local_masks(void) | 139 | static void __init setup_cpu_local_masks(void) |
140 | { | 140 | { |
141 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); | 141 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); |
142 | alloc_bootmem_cpumask_var(&cpu_callin_mask); | 142 | alloc_bootmem_cpumask_var(&cpu_callin_mask); |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 89bb7668041d..df0587f24c54 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -632,9 +632,16 @@ badframe: | |||
632 | } | 632 | } |
633 | 633 | ||
634 | #ifdef CONFIG_X86_32 | 634 | #ifdef CONFIG_X86_32 |
635 | asmlinkage int sys_rt_sigreturn(struct pt_regs regs) | 635 | /* |
636 | * Note: do not pass in pt_regs directly as with tail-call optimization | ||
637 | * GCC will incorrectly stomp on the caller's frame and corrupt user-space | ||
638 | * register state: | ||
639 | */ | ||
640 | asmlinkage int sys_rt_sigreturn(unsigned long __unused) | ||
636 | { | 641 | { |
637 | return do_rt_sigreturn(®s); | 642 | struct pt_regs *regs = (struct pt_regs *)&__unused; |
643 | |||
644 | return do_rt_sigreturn(regs); | ||
638 | } | 645 | } |
639 | #else /* !CONFIG_X86_32 */ | 646 | #else /* !CONFIG_X86_32 */ |
640 | asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) | 647 | asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) |
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index d44395ff34c3..e2e86a08f31d 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S | |||
@@ -88,7 +88,7 @@ ENTRY(sys_call_table) | |||
88 | .long sys_uselib | 88 | .long sys_uselib |
89 | .long sys_swapon | 89 | .long sys_swapon |
90 | .long sys_reboot | 90 | .long sys_reboot |
91 | .long old_readdir | 91 | .long sys_old_readdir |
92 | .long old_mmap /* 90 */ | 92 | .long old_mmap /* 90 */ |
93 | .long sys_munmap | 93 | .long sys_munmap |
94 | .long sys_truncate | 94 | .long sys_truncate |
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index f885023167e0..6812b829ed83 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -200,6 +200,7 @@ static int uv_wait_completion(struct bau_desc *bau_desc, | |||
200 | destination_timeouts = 0; | 200 | destination_timeouts = 0; |
201 | } | 201 | } |
202 | } | 202 | } |
203 | cpu_relax(); | ||
203 | } | 204 | } |
204 | return FLUSH_COMPLETE; | 205 | return FLUSH_COMPLETE; |
205 | } | 206 | } |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 98c2d055284b..a9e7548e1790 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -99,6 +99,12 @@ static inline void preempt_conditional_sti(struct pt_regs *regs) | |||
99 | local_irq_enable(); | 99 | local_irq_enable(); |
100 | } | 100 | } |
101 | 101 | ||
102 | static inline void conditional_cli(struct pt_regs *regs) | ||
103 | { | ||
104 | if (regs->flags & X86_EFLAGS_IF) | ||
105 | local_irq_disable(); | ||
106 | } | ||
107 | |||
102 | static inline void preempt_conditional_cli(struct pt_regs *regs) | 108 | static inline void preempt_conditional_cli(struct pt_regs *regs) |
103 | { | 109 | { |
104 | if (regs->flags & X86_EFLAGS_IF) | 110 | if (regs->flags & X86_EFLAGS_IF) |
@@ -626,8 +632,10 @@ clear_dr7: | |||
626 | 632 | ||
627 | #ifdef CONFIG_X86_32 | 633 | #ifdef CONFIG_X86_32 |
628 | debug_vm86: | 634 | debug_vm86: |
635 | /* reenable preemption: handle_vm86_trap() might sleep */ | ||
636 | dec_preempt_count(); | ||
629 | handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); | 637 | handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); |
630 | preempt_conditional_cli(regs); | 638 | conditional_cli(regs); |
631 | return; | 639 | return; |
632 | #endif | 640 | #endif |
633 | 641 | ||
@@ -896,7 +904,7 @@ asmlinkage void math_state_restore(void) | |||
896 | EXPORT_SYMBOL_GPL(math_state_restore); | 904 | EXPORT_SYMBOL_GPL(math_state_restore); |
897 | 905 | ||
898 | #ifndef CONFIG_MATH_EMULATION | 906 | #ifndef CONFIG_MATH_EMULATION |
899 | asmlinkage void math_emulate(long arg) | 907 | void math_emulate(struct math_emu_info *info) |
900 | { | 908 | { |
901 | printk(KERN_EMERG | 909 | printk(KERN_EMERG |
902 | "math-emulation not enabled and no coprocessor found.\n"); | 910 | "math-emulation not enabled and no coprocessor found.\n"); |
@@ -906,16 +914,19 @@ asmlinkage void math_emulate(long arg) | |||
906 | } | 914 | } |
907 | #endif /* CONFIG_MATH_EMULATION */ | 915 | #endif /* CONFIG_MATH_EMULATION */ |
908 | 916 | ||
909 | dotraplinkage void __kprobes | 917 | dotraplinkage void __kprobes do_device_not_available(struct pt_regs regs) |
910 | do_device_not_available(struct pt_regs *regs, long error) | ||
911 | { | 918 | { |
912 | #ifdef CONFIG_X86_32 | 919 | #ifdef CONFIG_X86_32 |
913 | if (read_cr0() & X86_CR0_EM) { | 920 | if (read_cr0() & X86_CR0_EM) { |
914 | conditional_sti(regs); | 921 | struct math_emu_info info = { }; |
915 | math_emulate(0); | 922 | |
923 | conditional_sti(®s); | ||
924 | |||
925 | info.regs = ®s; | ||
926 | math_emulate(&info); | ||
916 | } else { | 927 | } else { |
917 | math_state_restore(); /* interrupts still off */ | 928 | math_state_restore(); /* interrupts still off */ |
918 | conditional_sti(regs); | 929 | conditional_sti(®s); |
919 | } | 930 | } |
920 | #else | 931 | #else |
921 | math_state_restore(); | 932 | math_state_restore(); |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 23206ba16874..bef58b4982db 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -321,6 +321,16 @@ static void vmi_release_pmd(unsigned long pfn) | |||
321 | } | 321 | } |
322 | 322 | ||
323 | /* | 323 | /* |
324 | * We use the pgd_free hook for releasing the pgd page: | ||
325 | */ | ||
326 | static void vmi_pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
327 | { | ||
328 | unsigned long pfn = __pa(pgd) >> PAGE_SHIFT; | ||
329 | |||
330 | vmi_ops.release_page(pfn, VMI_PAGE_L2); | ||
331 | } | ||
332 | |||
333 | /* | ||
324 | * Helper macros for MMU update flags. We can defer updates until a flush | 334 | * Helper macros for MMU update flags. We can defer updates until a flush |
325 | * or page invalidation only if the update is to the current address space | 335 | * or page invalidation only if the update is to the current address space |
326 | * (otherwise, there is no flush). We must check against init_mm, since | 336 | * (otherwise, there is no flush). We must check against init_mm, since |
@@ -762,6 +772,7 @@ static inline int __init activate_vmi(void) | |||
762 | if (vmi_ops.release_page) { | 772 | if (vmi_ops.release_page) { |
763 | pv_mmu_ops.release_pte = vmi_release_pte; | 773 | pv_mmu_ops.release_pte = vmi_release_pte; |
764 | pv_mmu_ops.release_pmd = vmi_release_pmd; | 774 | pv_mmu_ops.release_pmd = vmi_release_pmd; |
775 | pv_mmu_ops.pgd_free = vmi_pgd_free; | ||
765 | } | 776 | } |
766 | 777 | ||
767 | /* Set linear is needed in all cases */ | 778 | /* Set linear is needed in all cases */ |
@@ -858,7 +869,7 @@ void __init vmi_init(void) | |||
858 | #endif | 869 | #endif |
859 | } | 870 | } |
860 | 871 | ||
861 | void vmi_activate(void) | 872 | void __init vmi_activate(void) |
862 | { | 873 | { |
863 | unsigned long flags; | 874 | unsigned long flags; |
864 | 875 | ||
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index c4c1f9e09402..bde106cae0a9 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c | |||
@@ -283,10 +283,13 @@ void __devinit vmi_time_ap_init(void) | |||
283 | #endif | 283 | #endif |
284 | 284 | ||
285 | /** vmi clocksource */ | 285 | /** vmi clocksource */ |
286 | static struct clocksource clocksource_vmi; | ||
286 | 287 | ||
287 | static cycle_t read_real_cycles(void) | 288 | static cycle_t read_real_cycles(void) |
288 | { | 289 | { |
289 | return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); | 290 | cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); |
291 | return ret >= clocksource_vmi.cycle_last ? | ||
292 | ret : clocksource_vmi.cycle_last; | ||
290 | } | 293 | } |
291 | 294 | ||
292 | static struct clocksource clocksource_vmi = { | 295 | static struct clocksource clocksource_vmi = { |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index e665d1c623ca..72bd275a9b5c 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -207,7 +207,7 @@ static int __pit_timer_fn(struct kvm_kpit_state *ps) | |||
207 | hrtimer_add_expires_ns(&pt->timer, pt->period); | 207 | hrtimer_add_expires_ns(&pt->timer, pt->period); |
208 | pt->scheduled = hrtimer_get_expires_ns(&pt->timer); | 208 | pt->scheduled = hrtimer_get_expires_ns(&pt->timer); |
209 | if (pt->period) | 209 | if (pt->period) |
210 | ps->channels[0].count_load_time = hrtimer_get_expires(&pt->timer); | 210 | ps->channels[0].count_load_time = ktime_get(); |
211 | 211 | ||
212 | return (pt->period == 0 ? 0 : 1); | 212 | return (pt->period == 0 ? 0 : 1); |
213 | } | 213 | } |
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index c019b8edcdb7..cf17ed52f6fb 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c | |||
@@ -87,13 +87,6 @@ void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu) | |||
87 | } | 87 | } |
88 | EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs); | 88 | EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs); |
89 | 89 | ||
90 | void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec) | ||
91 | { | ||
92 | kvm_apic_timer_intr_post(vcpu, vec); | ||
93 | /* TODO: PIT, RTC etc. */ | ||
94 | } | ||
95 | EXPORT_SYMBOL_GPL(kvm_timer_intr_post); | ||
96 | |||
97 | void __kvm_migrate_timers(struct kvm_vcpu *vcpu) | 90 | void __kvm_migrate_timers(struct kvm_vcpu *vcpu) |
98 | { | 91 | { |
99 | __kvm_migrate_apic_timer(vcpu); | 92 | __kvm_migrate_apic_timer(vcpu); |
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index 2bf32a03ceec..82579ee538d0 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h | |||
@@ -89,7 +89,6 @@ static inline int irqchip_in_kernel(struct kvm *kvm) | |||
89 | 89 | ||
90 | void kvm_pic_reset(struct kvm_kpic_state *s); | 90 | void kvm_pic_reset(struct kvm_kpic_state *s); |
91 | 91 | ||
92 | void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec); | ||
93 | void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); | 92 | void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); |
94 | void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); | 93 | void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); |
95 | void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu); | 94 | void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index afac68c0815c..f0b67f2cdd69 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -35,6 +35,12 @@ | |||
35 | #include "kvm_cache_regs.h" | 35 | #include "kvm_cache_regs.h" |
36 | #include "irq.h" | 36 | #include "irq.h" |
37 | 37 | ||
38 | #ifndef CONFIG_X86_64 | ||
39 | #define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) | ||
40 | #else | ||
41 | #define mod_64(x, y) ((x) % (y)) | ||
42 | #endif | ||
43 | |||
38 | #define PRId64 "d" | 44 | #define PRId64 "d" |
39 | #define PRIx64 "llx" | 45 | #define PRIx64 "llx" |
40 | #define PRIu64 "u" | 46 | #define PRIu64 "u" |
@@ -511,52 +517,22 @@ static void apic_send_ipi(struct kvm_lapic *apic) | |||
511 | 517 | ||
512 | static u32 apic_get_tmcct(struct kvm_lapic *apic) | 518 | static u32 apic_get_tmcct(struct kvm_lapic *apic) |
513 | { | 519 | { |
514 | u64 counter_passed; | 520 | ktime_t remaining; |
515 | ktime_t passed, now; | 521 | s64 ns; |
516 | u32 tmcct; | 522 | u32 tmcct; |
517 | 523 | ||
518 | ASSERT(apic != NULL); | 524 | ASSERT(apic != NULL); |
519 | 525 | ||
520 | now = apic->timer.dev.base->get_time(); | ||
521 | tmcct = apic_get_reg(apic, APIC_TMICT); | ||
522 | |||
523 | /* if initial count is 0, current count should also be 0 */ | 526 | /* if initial count is 0, current count should also be 0 */ |
524 | if (tmcct == 0) | 527 | if (apic_get_reg(apic, APIC_TMICT) == 0) |
525 | return 0; | 528 | return 0; |
526 | 529 | ||
527 | if (unlikely(ktime_to_ns(now) <= | 530 | remaining = hrtimer_expires_remaining(&apic->timer.dev); |
528 | ktime_to_ns(apic->timer.last_update))) { | 531 | if (ktime_to_ns(remaining) < 0) |
529 | /* Wrap around */ | 532 | remaining = ktime_set(0, 0); |
530 | passed = ktime_add(( { | 533 | |
531 | (ktime_t) { | 534 | ns = mod_64(ktime_to_ns(remaining), apic->timer.period); |
532 | .tv64 = KTIME_MAX - | 535 | tmcct = div64_u64(ns, (APIC_BUS_CYCLE_NS * apic->timer.divide_count)); |
533 | (apic->timer.last_update).tv64}; } | ||
534 | ), now); | ||
535 | apic_debug("time elapsed\n"); | ||
536 | } else | ||
537 | passed = ktime_sub(now, apic->timer.last_update); | ||
538 | |||
539 | counter_passed = div64_u64(ktime_to_ns(passed), | ||
540 | (APIC_BUS_CYCLE_NS * apic->timer.divide_count)); | ||
541 | |||
542 | if (counter_passed > tmcct) { | ||
543 | if (unlikely(!apic_lvtt_period(apic))) { | ||
544 | /* one-shot timers stick at 0 until reset */ | ||
545 | tmcct = 0; | ||
546 | } else { | ||
547 | /* | ||
548 | * periodic timers reset to APIC_TMICT when they | ||
549 | * hit 0. The while loop simulates this happening N | ||
550 | * times. (counter_passed %= tmcct) would also work, | ||
551 | * but might be slower or not work on 32-bit?? | ||
552 | */ | ||
553 | while (counter_passed > tmcct) | ||
554 | counter_passed -= tmcct; | ||
555 | tmcct -= counter_passed; | ||
556 | } | ||
557 | } else { | ||
558 | tmcct -= counter_passed; | ||
559 | } | ||
560 | 536 | ||
561 | return tmcct; | 537 | return tmcct; |
562 | } | 538 | } |
@@ -653,8 +629,6 @@ static void start_apic_timer(struct kvm_lapic *apic) | |||
653 | { | 629 | { |
654 | ktime_t now = apic->timer.dev.base->get_time(); | 630 | ktime_t now = apic->timer.dev.base->get_time(); |
655 | 631 | ||
656 | apic->timer.last_update = now; | ||
657 | |||
658 | apic->timer.period = apic_get_reg(apic, APIC_TMICT) * | 632 | apic->timer.period = apic_get_reg(apic, APIC_TMICT) * |
659 | APIC_BUS_CYCLE_NS * apic->timer.divide_count; | 633 | APIC_BUS_CYCLE_NS * apic->timer.divide_count; |
660 | atomic_set(&apic->timer.pending, 0); | 634 | atomic_set(&apic->timer.pending, 0); |
@@ -1110,16 +1084,6 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) | |||
1110 | } | 1084 | } |
1111 | } | 1085 | } |
1112 | 1086 | ||
1113 | void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec) | ||
1114 | { | ||
1115 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
1116 | |||
1117 | if (apic && apic_lvt_vector(apic, APIC_LVTT) == vec) | ||
1118 | apic->timer.last_update = ktime_add_ns( | ||
1119 | apic->timer.last_update, | ||
1120 | apic->timer.period); | ||
1121 | } | ||
1122 | |||
1123 | int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) | 1087 | int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) |
1124 | { | 1088 | { |
1125 | int vector = kvm_apic_has_interrupt(vcpu); | 1089 | int vector = kvm_apic_has_interrupt(vcpu); |
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 81858881287e..45ab6ee71209 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h | |||
@@ -12,7 +12,6 @@ struct kvm_lapic { | |||
12 | atomic_t pending; | 12 | atomic_t pending; |
13 | s64 period; /* unit: ns */ | 13 | s64 period; /* unit: ns */ |
14 | u32 divide_count; | 14 | u32 divide_count; |
15 | ktime_t last_update; | ||
16 | struct hrtimer dev; | 15 | struct hrtimer dev; |
17 | } timer; | 16 | } timer; |
18 | struct kvm_vcpu *vcpu; | 17 | struct kvm_vcpu *vcpu; |
@@ -42,7 +41,6 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data); | |||
42 | void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu); | 41 | void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu); |
43 | int kvm_lapic_enabled(struct kvm_vcpu *vcpu); | 42 | int kvm_lapic_enabled(struct kvm_vcpu *vcpu); |
44 | int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); | 43 | int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); |
45 | void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec); | ||
46 | 44 | ||
47 | void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); | 45 | void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); |
48 | void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); | 46 | void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 83f11c7474a1..2d4477c71473 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1698,8 +1698,13 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1698 | if (largepage) | 1698 | if (largepage) |
1699 | spte |= PT_PAGE_SIZE_MASK; | 1699 | spte |= PT_PAGE_SIZE_MASK; |
1700 | if (mt_mask) { | 1700 | if (mt_mask) { |
1701 | mt_mask = get_memory_type(vcpu, gfn) << | 1701 | if (!kvm_is_mmio_pfn(pfn)) { |
1702 | kvm_x86_ops->get_mt_mask_shift(); | 1702 | mt_mask = get_memory_type(vcpu, gfn) << |
1703 | kvm_x86_ops->get_mt_mask_shift(); | ||
1704 | mt_mask |= VMX_EPT_IGMT_BIT; | ||
1705 | } else | ||
1706 | mt_mask = MTRR_TYPE_UNCACHABLE << | ||
1707 | kvm_x86_ops->get_mt_mask_shift(); | ||
1703 | spte |= mt_mask; | 1708 | spte |= mt_mask; |
1704 | } | 1709 | } |
1705 | 1710 | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 1452851ae258..a9e769e4e251 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -1600,7 +1600,6 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu) | |||
1600 | /* Okay, we can deliver the interrupt: grab it and update PIC state. */ | 1600 | /* Okay, we can deliver the interrupt: grab it and update PIC state. */ |
1601 | intr_vector = kvm_cpu_get_interrupt(vcpu); | 1601 | intr_vector = kvm_cpu_get_interrupt(vcpu); |
1602 | svm_inject_irq(svm, intr_vector); | 1602 | svm_inject_irq(svm, intr_vector); |
1603 | kvm_timer_intr_post(vcpu, intr_vector); | ||
1604 | out: | 1603 | out: |
1605 | update_cr8_intercept(vcpu); | 1604 | update_cr8_intercept(vcpu); |
1606 | } | 1605 | } |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 6259d7467648..7611af576829 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -903,6 +903,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
903 | data = vmcs_readl(GUEST_SYSENTER_ESP); | 903 | data = vmcs_readl(GUEST_SYSENTER_ESP); |
904 | break; | 904 | break; |
905 | default: | 905 | default: |
906 | vmx_load_host_state(to_vmx(vcpu)); | ||
906 | msr = find_msr_entry(to_vmx(vcpu), msr_index); | 907 | msr = find_msr_entry(to_vmx(vcpu), msr_index); |
907 | if (msr) { | 908 | if (msr) { |
908 | data = msr->data; | 909 | data = msr->data; |
@@ -3285,7 +3286,6 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) | |||
3285 | } | 3286 | } |
3286 | if (vcpu->arch.interrupt.pending) { | 3287 | if (vcpu->arch.interrupt.pending) { |
3287 | vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); | 3288 | vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); |
3288 | kvm_timer_intr_post(vcpu, vcpu->arch.interrupt.nr); | ||
3289 | if (kvm_cpu_has_interrupt(vcpu)) | 3289 | if (kvm_cpu_has_interrupt(vcpu)) |
3290 | enable_irq_window(vcpu); | 3290 | enable_irq_window(vcpu); |
3291 | } | 3291 | } |
@@ -3687,8 +3687,7 @@ static int __init vmx_init(void) | |||
3687 | if (vm_need_ept()) { | 3687 | if (vm_need_ept()) { |
3688 | bypass_guest_pf = 0; | 3688 | bypass_guest_pf = 0; |
3689 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | | 3689 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | |
3690 | VMX_EPT_WRITABLE_MASK | | 3690 | VMX_EPT_WRITABLE_MASK); |
3691 | VMX_EPT_IGMT_BIT); | ||
3692 | kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, | 3691 | kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, |
3693 | VMX_EPT_EXECUTABLE_MASK, | 3692 | VMX_EPT_EXECUTABLE_MASK, |
3694 | VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); | 3693 | VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cc17546a2406..758b7a155ae9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -967,7 +967,6 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
967 | case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: | 967 | case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: |
968 | case KVM_CAP_SET_TSS_ADDR: | 968 | case KVM_CAP_SET_TSS_ADDR: |
969 | case KVM_CAP_EXT_CPUID: | 969 | case KVM_CAP_EXT_CPUID: |
970 | case KVM_CAP_CLOCKSOURCE: | ||
971 | case KVM_CAP_PIT: | 970 | case KVM_CAP_PIT: |
972 | case KVM_CAP_NOP_IO_DELAY: | 971 | case KVM_CAP_NOP_IO_DELAY: |
973 | case KVM_CAP_MP_STATE: | 972 | case KVM_CAP_MP_STATE: |
@@ -992,6 +991,9 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
992 | case KVM_CAP_IOMMU: | 991 | case KVM_CAP_IOMMU: |
993 | r = iommu_found(); | 992 | r = iommu_found(); |
994 | break; | 993 | break; |
994 | case KVM_CAP_CLOCKSOURCE: | ||
995 | r = boot_cpu_has(X86_FEATURE_CONSTANT_TSC); | ||
996 | break; | ||
995 | default: | 997 | default: |
996 | r = 0; | 998 | r = 0; |
997 | break; | 999 | break; |
@@ -4127,9 +4129,13 @@ static void kvm_free_vcpus(struct kvm *kvm) | |||
4127 | 4129 | ||
4128 | } | 4130 | } |
4129 | 4131 | ||
4130 | void kvm_arch_destroy_vm(struct kvm *kvm) | 4132 | void kvm_arch_sync_events(struct kvm *kvm) |
4131 | { | 4133 | { |
4132 | kvm_free_all_assigned_devices(kvm); | 4134 | kvm_free_all_assigned_devices(kvm); |
4135 | } | ||
4136 | |||
4137 | void kvm_arch_destroy_vm(struct kvm *kvm) | ||
4138 | { | ||
4133 | kvm_iommu_unmap_guest(kvm); | 4139 | kvm_iommu_unmap_guest(kvm); |
4134 | kvm_free_pit(kvm); | 4140 | kvm_free_pit(kvm); |
4135 | kfree(kvm->arch.vpic); | 4141 | kfree(kvm->arch.vpic); |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index a7ed208f81e3..92f1c6f3e19d 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -931,7 +931,7 @@ static void lguest_restart(char *reason) | |||
931 | * that we can fit comfortably. | 931 | * that we can fit comfortably. |
932 | * | 932 | * |
933 | * First we need assembly templates of each of the patchable Guest operations, | 933 | * First we need assembly templates of each of the patchable Guest operations, |
934 | * and these are in lguest_asm.S. */ | 934 | * and these are in i386_head.S. */ |
935 | 935 | ||
936 | /*G:060 We construct a table from the assembler templates: */ | 936 | /*G:060 We construct a table from the assembler templates: */ |
937 | static const struct lguest_insns | 937 | static const struct lguest_insns |
@@ -1093,7 +1093,7 @@ __init void lguest_init(void) | |||
1093 | acpi_ht = 0; | 1093 | acpi_ht = 0; |
1094 | #endif | 1094 | #endif |
1095 | 1095 | ||
1096 | /* We set the perferred console to "hvc". This is the "hypervisor | 1096 | /* We set the preferred console to "hvc". This is the "hypervisor |
1097 | * virtual console" driver written by the PowerPC people, which we also | 1097 | * virtual console" driver written by the PowerPC people, which we also |
1098 | * adapted for lguest's use. */ | 1098 | * adapted for lguest's use. */ |
1099 | add_preferred_console("hvc", 0, NULL); | 1099 | add_preferred_console("hvc", 0, NULL); |
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 4a20b2f9a381..7c8ca91bb9ec 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c | |||
@@ -56,7 +56,7 @@ do { \ | |||
56 | " jmp 2b\n" \ | 56 | " jmp 2b\n" \ |
57 | ".previous\n" \ | 57 | ".previous\n" \ |
58 | _ASM_EXTABLE(0b,3b) \ | 58 | _ASM_EXTABLE(0b,3b) \ |
59 | : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ | 59 | : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ |
60 | "=&D" (__d2) \ | 60 | "=&D" (__d2) \ |
61 | : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ | 61 | : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ |
62 | : "memory"); \ | 62 | : "memory"); \ |
@@ -218,7 +218,7 @@ long strnlen_user(const char __user *s, long n) | |||
218 | " .align 4\n" | 218 | " .align 4\n" |
219 | " .long 0b,2b\n" | 219 | " .long 0b,2b\n" |
220 | ".previous" | 220 | ".previous" |
221 | :"=r" (n), "=D" (s), "=a" (res), "=c" (tmp) | 221 | :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp) |
222 | :"0" (n), "1" (s), "2" (0), "3" (mask) | 222 | :"0" (n), "1" (s), "2" (0), "3" (mask) |
223 | :"cc"); | 223 | :"cc"); |
224 | return res & mask; | 224 | return res & mask; |
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 64d6c84e6353..ec13cb5f17ed 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c | |||
@@ -32,7 +32,7 @@ do { \ | |||
32 | " jmp 2b\n" \ | 32 | " jmp 2b\n" \ |
33 | ".previous\n" \ | 33 | ".previous\n" \ |
34 | _ASM_EXTABLE(0b,3b) \ | 34 | _ASM_EXTABLE(0b,3b) \ |
35 | : "=r"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ | 35 | : "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ |
36 | "=&D" (__d2) \ | 36 | "=&D" (__d2) \ |
37 | : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ | 37 | : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ |
38 | : "memory"); \ | 38 | : "memory"); \ |
@@ -86,7 +86,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) | |||
86 | ".previous\n" | 86 | ".previous\n" |
87 | _ASM_EXTABLE(0b,3b) | 87 | _ASM_EXTABLE(0b,3b) |
88 | _ASM_EXTABLE(1b,2b) | 88 | _ASM_EXTABLE(1b,2b) |
89 | : [size8] "=c"(size), [dst] "=&D" (__d0) | 89 | : [size8] "=&c"(size), [dst] "=&D" (__d0) |
90 | : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), | 90 | : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), |
91 | [zero] "r" (0UL), [eight] "r" (8UL)); | 91 | [zero] "r" (0UL), [eight] "r" (8UL)); |
92 | return size; | 92 | return size; |
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c index df167f265622..a265a7c63190 100644 --- a/arch/x86/mach-default/setup.c +++ b/arch/x86/mach-default/setup.c | |||
@@ -38,6 +38,15 @@ void __init pre_intr_init_hook(void) | |||
38 | init_ISA_irqs(); | 38 | init_ISA_irqs(); |
39 | } | 39 | } |
40 | 40 | ||
41 | /* | ||
42 | * IRQ2 is cascade interrupt to second interrupt controller | ||
43 | */ | ||
44 | static struct irqaction irq2 = { | ||
45 | .handler = no_action, | ||
46 | .mask = CPU_MASK_NONE, | ||
47 | .name = "cascade", | ||
48 | }; | ||
49 | |||
41 | /** | 50 | /** |
42 | * intr_init_hook - post gate setup interrupt initialisation | 51 | * intr_init_hook - post gate setup interrupt initialisation |
43 | * | 52 | * |
@@ -53,6 +62,9 @@ void __init intr_init_hook(void) | |||
53 | if (x86_quirks->arch_intr_init()) | 62 | if (x86_quirks->arch_intr_init()) |
54 | return; | 63 | return; |
55 | } | 64 | } |
65 | if (!acpi_ioapic) | ||
66 | setup_irq(2, &irq2); | ||
67 | |||
56 | } | 68 | } |
57 | 69 | ||
58 | /** | 70 | /** |
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c index a580b9562e76..d914a7996a66 100644 --- a/arch/x86/mach-voyager/setup.c +++ b/arch/x86/mach-voyager/setup.c | |||
@@ -33,13 +33,23 @@ void __init intr_init_hook(void) | |||
33 | setup_irq(2, &irq2); | 33 | setup_irq(2, &irq2); |
34 | } | 34 | } |
35 | 35 | ||
36 | void __init pre_setup_arch_hook(void) | 36 | static void voyager_disable_tsc(void) |
37 | { | 37 | { |
38 | /* Voyagers run their CPUs from independent clocks, so disable | 38 | /* Voyagers run their CPUs from independent clocks, so disable |
39 | * the TSC code because we can't sync them */ | 39 | * the TSC code because we can't sync them */ |
40 | setup_clear_cpu_cap(X86_FEATURE_TSC); | 40 | setup_clear_cpu_cap(X86_FEATURE_TSC); |
41 | } | 41 | } |
42 | 42 | ||
43 | void __init pre_setup_arch_hook(void) | ||
44 | { | ||
45 | voyager_disable_tsc(); | ||
46 | } | ||
47 | |||
48 | void __init pre_time_init_hook(void) | ||
49 | { | ||
50 | voyager_disable_tsc(); | ||
51 | } | ||
52 | |||
43 | void __init trap_init_hook(void) | 53 | void __init trap_init_hook(void) |
44 | { | 54 | { |
45 | } | 55 | } |
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 9840b7ec749a..7ffcdeec4631 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -81,7 +81,7 @@ static void enable_local_vic_irq(unsigned int irq); | |||
81 | static void disable_local_vic_irq(unsigned int irq); | 81 | static void disable_local_vic_irq(unsigned int irq); |
82 | static void before_handle_vic_irq(unsigned int irq); | 82 | static void before_handle_vic_irq(unsigned int irq); |
83 | static void after_handle_vic_irq(unsigned int irq); | 83 | static void after_handle_vic_irq(unsigned int irq); |
84 | static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask); | 84 | static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask); |
85 | static void ack_vic_irq(unsigned int irq); | 85 | static void ack_vic_irq(unsigned int irq); |
86 | static void vic_enable_cpi(void); | 86 | static void vic_enable_cpi(void); |
87 | static void do_boot_cpu(__u8 cpuid); | 87 | static void do_boot_cpu(__u8 cpuid); |
@@ -211,8 +211,6 @@ static __u32 cpu_booted_map; | |||
211 | static cpumask_t smp_commenced_mask = CPU_MASK_NONE; | 211 | static cpumask_t smp_commenced_mask = CPU_MASK_NONE; |
212 | 212 | ||
213 | /* This is for the new dynamic CPU boot code */ | 213 | /* This is for the new dynamic CPU boot code */ |
214 | cpumask_t cpu_callin_map = CPU_MASK_NONE; | ||
215 | cpumask_t cpu_callout_map = CPU_MASK_NONE; | ||
216 | 214 | ||
217 | /* The per processor IRQ masks (these are usually kept in sync) */ | 215 | /* The per processor IRQ masks (these are usually kept in sync) */ |
218 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; | 216 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; |
@@ -378,7 +376,7 @@ void __init find_smp_config(void) | |||
378 | cpus_addr(phys_cpu_present_map)[0] |= | 376 | cpus_addr(phys_cpu_present_map)[0] |= |
379 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + | 377 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + |
380 | 3) << 24; | 378 | 3) << 24; |
381 | cpu_possible_map = phys_cpu_present_map; | 379 | init_cpu_possible(&phys_cpu_present_map); |
382 | printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", | 380 | printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", |
383 | cpus_addr(phys_cpu_present_map)[0]); | 381 | cpus_addr(phys_cpu_present_map)[0]); |
384 | /* Here we set up the VIC to enable SMP */ | 382 | /* Here we set up the VIC to enable SMP */ |
@@ -1599,16 +1597,16 @@ static void after_handle_vic_irq(unsigned int irq) | |||
1599 | * change the mask and then do an interrupt enable CPI to re-enable on | 1597 | * change the mask and then do an interrupt enable CPI to re-enable on |
1600 | * the selected processors */ | 1598 | * the selected processors */ |
1601 | 1599 | ||
1602 | void set_vic_irq_affinity(unsigned int irq, cpumask_t mask) | 1600 | void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask) |
1603 | { | 1601 | { |
1604 | /* Only extended processors handle interrupts */ | 1602 | /* Only extended processors handle interrupts */ |
1605 | unsigned long real_mask; | 1603 | unsigned long real_mask; |
1606 | unsigned long irq_mask = 1 << irq; | 1604 | unsigned long irq_mask = 1 << irq; |
1607 | int cpu; | 1605 | int cpu; |
1608 | 1606 | ||
1609 | real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; | 1607 | real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors; |
1610 | 1608 | ||
1611 | if (cpus_addr(mask)[0] == 0) | 1609 | if (cpus_addr(*mask)[0] == 0) |
1612 | /* can't have no CPUs to accept the interrupt -- extremely | 1610 | /* can't have no CPUs to accept the interrupt -- extremely |
1613 | * bad things will happen */ | 1611 | * bad things will happen */ |
1614 | return; | 1612 | return; |
@@ -1750,10 +1748,11 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void) | |||
1750 | init_gdt(smp_processor_id()); | 1748 | init_gdt(smp_processor_id()); |
1751 | switch_to_new_gdt(); | 1749 | switch_to_new_gdt(); |
1752 | 1750 | ||
1753 | cpu_set(smp_processor_id(), cpu_online_map); | 1751 | cpu_online_map = cpumask_of_cpu(smp_processor_id()); |
1754 | cpu_set(smp_processor_id(), cpu_callout_map); | 1752 | cpu_callout_map = cpumask_of_cpu(smp_processor_id()); |
1755 | cpu_set(smp_processor_id(), cpu_possible_map); | 1753 | cpu_callin_map = CPU_MASK_NONE; |
1756 | cpu_set(smp_processor_id(), cpu_present_map); | 1754 | cpu_present_map = cpumask_of_cpu(smp_processor_id()); |
1755 | |||
1757 | } | 1756 | } |
1758 | 1757 | ||
1759 | static int __cpuinit voyager_cpu_up(unsigned int cpu) | 1758 | static int __cpuinit voyager_cpu_up(unsigned int cpu) |
@@ -1783,9 +1782,9 @@ void __init smp_setup_processor_id(void) | |||
1783 | x86_write_percpu(cpu_number, hard_smp_processor_id()); | 1782 | x86_write_percpu(cpu_number, hard_smp_processor_id()); |
1784 | } | 1783 | } |
1785 | 1784 | ||
1786 | static void voyager_send_call_func(cpumask_t callmask) | 1785 | static void voyager_send_call_func(const struct cpumask *callmask) |
1787 | { | 1786 | { |
1788 | __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id()); | 1787 | __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id()); |
1789 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); | 1788 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); |
1790 | } | 1789 | } |
1791 | 1790 | ||
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c index c7b06feb139b..5d87f586f8d7 100644 --- a/arch/x86/math-emu/fpu_entry.c +++ b/arch/x86/math-emu/fpu_entry.c | |||
@@ -131,7 +131,7 @@ u_char emulating = 0; | |||
131 | static int valid_prefix(u_char *Byte, u_char __user ** fpu_eip, | 131 | static int valid_prefix(u_char *Byte, u_char __user ** fpu_eip, |
132 | overrides * override); | 132 | overrides * override); |
133 | 133 | ||
134 | asmlinkage void math_emulate(long arg) | 134 | void math_emulate(struct math_emu_info *info) |
135 | { | 135 | { |
136 | u_char FPU_modrm, byte1; | 136 | u_char FPU_modrm, byte1; |
137 | unsigned short code; | 137 | unsigned short code; |
@@ -161,7 +161,7 @@ asmlinkage void math_emulate(long arg) | |||
161 | RE_ENTRANT_CHECK_ON; | 161 | RE_ENTRANT_CHECK_ON; |
162 | #endif /* RE_ENTRANT_CHECKING */ | 162 | #endif /* RE_ENTRANT_CHECKING */ |
163 | 163 | ||
164 | SETUP_DATA_AREA(arg); | 164 | FPU_info = info; |
165 | 165 | ||
166 | FPU_ORIG_EIP = FPU_EIP; | 166 | FPU_ORIG_EIP = FPU_EIP; |
167 | 167 | ||
@@ -659,7 +659,7 @@ static int valid_prefix(u_char *Byte, u_char __user **fpu_eip, | |||
659 | } | 659 | } |
660 | } | 660 | } |
661 | 661 | ||
662 | void math_abort(struct info *info, unsigned int signal) | 662 | void math_abort(struct math_emu_info *info, unsigned int signal) |
663 | { | 663 | { |
664 | FPU_EIP = FPU_ORIG_EIP; | 664 | FPU_EIP = FPU_ORIG_EIP; |
665 | current->thread.trap_no = 16; | 665 | current->thread.trap_no = 16; |
diff --git a/arch/x86/math-emu/fpu_proto.h b/arch/x86/math-emu/fpu_proto.h index aa49b6a0d850..9779df436b7d 100644 --- a/arch/x86/math-emu/fpu_proto.h +++ b/arch/x86/math-emu/fpu_proto.h | |||
@@ -51,8 +51,8 @@ extern void ffreep(void); | |||
51 | extern void fst_i_(void); | 51 | extern void fst_i_(void); |
52 | extern void fstp_i(void); | 52 | extern void fstp_i(void); |
53 | /* fpu_entry.c */ | 53 | /* fpu_entry.c */ |
54 | asmlinkage extern void math_emulate(long arg); | 54 | extern void math_emulate(struct math_emu_info *info); |
55 | extern void math_abort(struct info *info, unsigned int signal); | 55 | extern void math_abort(struct math_emu_info *info, unsigned int signal); |
56 | /* fpu_etc.c */ | 56 | /* fpu_etc.c */ |
57 | extern void FPU_etc(void); | 57 | extern void FPU_etc(void); |
58 | /* fpu_tags.c */ | 58 | /* fpu_tags.c */ |
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h index 13488fa153e0..50fa0ec2c8a5 100644 --- a/arch/x86/math-emu/fpu_system.h +++ b/arch/x86/math-emu/fpu_system.h | |||
@@ -16,10 +16,6 @@ | |||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | 18 | ||
19 | /* This sets the pointer FPU_info to point to the argument part | ||
20 | of the stack frame of math_emulate() */ | ||
21 | #define SETUP_DATA_AREA(arg) FPU_info = (struct info *) &arg | ||
22 | |||
23 | /* s is always from a cpu register, and the cpu does bounds checking | 19 | /* s is always from a cpu register, and the cpu does bounds checking |
24 | * during register load --> no further bounds checks needed */ | 20 | * during register load --> no further bounds checks needed */ |
25 | #define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3]) | 21 | #define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3]) |
@@ -38,12 +34,12 @@ | |||
38 | #define I387 (current->thread.xstate) | 34 | #define I387 (current->thread.xstate) |
39 | #define FPU_info (I387->soft.info) | 35 | #define FPU_info (I387->soft.info) |
40 | 36 | ||
41 | #define FPU_CS (*(unsigned short *) &(FPU_info->___cs)) | 37 | #define FPU_CS (*(unsigned short *) &(FPU_info->regs->cs)) |
42 | #define FPU_SS (*(unsigned short *) &(FPU_info->___ss)) | 38 | #define FPU_SS (*(unsigned short *) &(FPU_info->regs->ss)) |
43 | #define FPU_DS (*(unsigned short *) &(FPU_info->___ds)) | 39 | #define FPU_DS (*(unsigned short *) &(FPU_info->regs->ds)) |
44 | #define FPU_EAX (FPU_info->___eax) | 40 | #define FPU_EAX (FPU_info->regs->ax) |
45 | #define FPU_EFLAGS (FPU_info->___eflags) | 41 | #define FPU_EFLAGS (FPU_info->regs->flags) |
46 | #define FPU_EIP (FPU_info->___eip) | 42 | #define FPU_EIP (FPU_info->regs->ip) |
47 | #define FPU_ORIG_EIP (FPU_info->___orig_eip) | 43 | #define FPU_ORIG_EIP (FPU_info->___orig_eip) |
48 | 44 | ||
49 | #define FPU_lookahead (I387->soft.lookahead) | 45 | #define FPU_lookahead (I387->soft.lookahead) |
diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c index d701e2b39e44..420b3b6e3915 100644 --- a/arch/x86/math-emu/get_address.c +++ b/arch/x86/math-emu/get_address.c | |||
@@ -29,46 +29,43 @@ | |||
29 | #define FPU_WRITE_BIT 0x10 | 29 | #define FPU_WRITE_BIT 0x10 |
30 | 30 | ||
31 | static int reg_offset[] = { | 31 | static int reg_offset[] = { |
32 | offsetof(struct info, ___eax), | 32 | offsetof(struct pt_regs, ax), |
33 | offsetof(struct info, ___ecx), | 33 | offsetof(struct pt_regs, cx), |
34 | offsetof(struct info, ___edx), | 34 | offsetof(struct pt_regs, dx), |
35 | offsetof(struct info, ___ebx), | 35 | offsetof(struct pt_regs, bx), |
36 | offsetof(struct info, ___esp), | 36 | offsetof(struct pt_regs, sp), |
37 | offsetof(struct info, ___ebp), | 37 | offsetof(struct pt_regs, bp), |
38 | offsetof(struct info, ___esi), | 38 | offsetof(struct pt_regs, si), |
39 | offsetof(struct info, ___edi) | 39 | offsetof(struct pt_regs, di) |
40 | }; | 40 | }; |
41 | 41 | ||
42 | #define REG_(x) (*(long *)(reg_offset[(x)]+(u_char *) FPU_info)) | 42 | #define REG_(x) (*(long *)(reg_offset[(x)] + (u_char *)FPU_info->regs)) |
43 | 43 | ||
44 | static int reg_offset_vm86[] = { | 44 | static int reg_offset_vm86[] = { |
45 | offsetof(struct info, ___cs), | 45 | offsetof(struct pt_regs, cs), |
46 | offsetof(struct info, ___vm86_ds), | 46 | offsetof(struct kernel_vm86_regs, ds), |
47 | offsetof(struct info, ___vm86_es), | 47 | offsetof(struct kernel_vm86_regs, es), |
48 | offsetof(struct info, ___vm86_fs), | 48 | offsetof(struct kernel_vm86_regs, fs), |
49 | offsetof(struct info, ___vm86_gs), | 49 | offsetof(struct kernel_vm86_regs, gs), |
50 | offsetof(struct info, ___ss), | 50 | offsetof(struct pt_regs, ss), |
51 | offsetof(struct info, ___vm86_ds) | 51 | offsetof(struct kernel_vm86_regs, ds) |
52 | }; | 52 | }; |
53 | 53 | ||
54 | #define VM86_REG_(x) (*(unsigned short *) \ | 54 | #define VM86_REG_(x) (*(unsigned short *) \ |
55 | (reg_offset_vm86[((unsigned)x)]+(u_char *) FPU_info)) | 55 | (reg_offset_vm86[((unsigned)x)] + (u_char *)FPU_info->regs)) |
56 | |||
57 | /* This dummy, gs is not saved on the stack. */ | ||
58 | #define ___GS ___ds | ||
59 | 56 | ||
60 | static int reg_offset_pm[] = { | 57 | static int reg_offset_pm[] = { |
61 | offsetof(struct info, ___cs), | 58 | offsetof(struct pt_regs, cs), |
62 | offsetof(struct info, ___ds), | 59 | offsetof(struct pt_regs, ds), |
63 | offsetof(struct info, ___es), | 60 | offsetof(struct pt_regs, es), |
64 | offsetof(struct info, ___fs), | 61 | offsetof(struct pt_regs, fs), |
65 | offsetof(struct info, ___GS), | 62 | offsetof(struct pt_regs, ds), /* dummy, not saved on stack */ |
66 | offsetof(struct info, ___ss), | 63 | offsetof(struct pt_regs, ss), |
67 | offsetof(struct info, ___ds) | 64 | offsetof(struct pt_regs, ds) |
68 | }; | 65 | }; |
69 | 66 | ||
70 | #define PM_REG_(x) (*(unsigned short *) \ | 67 | #define PM_REG_(x) (*(unsigned short *) \ |
71 | (reg_offset_pm[((unsigned)x)]+(u_char *) FPU_info)) | 68 | (reg_offset_pm[((unsigned)x)] + (u_char *)FPU_info->regs)) |
72 | 69 | ||
73 | /* Decode the SIB byte. This function assumes mod != 0 */ | 70 | /* Decode the SIB byte. This function assumes mod != 0 */ |
74 | static int sib(int mod, unsigned long *fpu_eip) | 71 | static int sib(int mod, unsigned long *fpu_eip) |
@@ -349,34 +346,34 @@ void __user *FPU_get_address_16(u_char FPU_modrm, unsigned long *fpu_eip, | |||
349 | } | 346 | } |
350 | switch (rm) { | 347 | switch (rm) { |
351 | case 0: | 348 | case 0: |
352 | address += FPU_info->___ebx + FPU_info->___esi; | 349 | address += FPU_info->regs->bx + FPU_info->regs->si; |
353 | break; | 350 | break; |
354 | case 1: | 351 | case 1: |
355 | address += FPU_info->___ebx + FPU_info->___edi; | 352 | address += FPU_info->regs->bx + FPU_info->regs->di; |
356 | break; | 353 | break; |
357 | case 2: | 354 | case 2: |
358 | address += FPU_info->___ebp + FPU_info->___esi; | 355 | address += FPU_info->regs->bp + FPU_info->regs->si; |
359 | if (addr_modes.override.segment == PREFIX_DEFAULT) | 356 | if (addr_modes.override.segment == PREFIX_DEFAULT) |
360 | addr_modes.override.segment = PREFIX_SS_; | 357 | addr_modes.override.segment = PREFIX_SS_; |
361 | break; | 358 | break; |
362 | case 3: | 359 | case 3: |
363 | address += FPU_info->___ebp + FPU_info->___edi; | 360 | address += FPU_info->regs->bp + FPU_info->regs->di; |
364 | if (addr_modes.override.segment == PREFIX_DEFAULT) | 361 | if (addr_modes.override.segment == PREFIX_DEFAULT) |
365 | addr_modes.override.segment = PREFIX_SS_; | 362 | addr_modes.override.segment = PREFIX_SS_; |
366 | break; | 363 | break; |
367 | case 4: | 364 | case 4: |
368 | address += FPU_info->___esi; | 365 | address += FPU_info->regs->si; |
369 | break; | 366 | break; |
370 | case 5: | 367 | case 5: |
371 | address += FPU_info->___edi; | 368 | address += FPU_info->regs->di; |
372 | break; | 369 | break; |
373 | case 6: | 370 | case 6: |
374 | address += FPU_info->___ebp; | 371 | address += FPU_info->regs->bp; |
375 | if (addr_modes.override.segment == PREFIX_DEFAULT) | 372 | if (addr_modes.override.segment == PREFIX_DEFAULT) |
376 | addr_modes.override.segment = PREFIX_SS_; | 373 | addr_modes.override.segment = PREFIX_SS_; |
377 | break; | 374 | break; |
378 | case 7: | 375 | case 7: |
379 | address += FPU_info->___ebx; | 376 | address += FPU_info->regs->bx; |
380 | break; | 377 | break; |
381 | } | 378 | } |
382 | 379 | ||
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9e268b6b204e..c76ef1d701c9 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -534,7 +534,7 @@ static int vmalloc_fault(unsigned long address) | |||
534 | happen within a race in page table update. In the later | 534 | happen within a race in page table update. In the later |
535 | case just flush. */ | 535 | case just flush. */ |
536 | 536 | ||
537 | pgd = pgd_offset(current->mm ?: &init_mm, address); | 537 | pgd = pgd_offset(current->active_mm, address); |
538 | pgd_ref = pgd_offset_k(address); | 538 | pgd_ref = pgd_offset_k(address); |
539 | if (pgd_none(*pgd_ref)) | 539 | if (pgd_none(*pgd_ref)) |
540 | return -1; | 540 | return -1; |
@@ -603,8 +603,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
603 | 603 | ||
604 | si_code = SEGV_MAPERR; | 604 | si_code = SEGV_MAPERR; |
605 | 605 | ||
606 | if (notify_page_fault(regs)) | ||
607 | return; | ||
608 | if (unlikely(kmmio_fault(regs, address))) | 606 | if (unlikely(kmmio_fault(regs, address))) |
609 | return; | 607 | return; |
610 | 608 | ||
@@ -634,6 +632,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
634 | if (spurious_fault(address, error_code)) | 632 | if (spurious_fault(address, error_code)) |
635 | return; | 633 | return; |
636 | 634 | ||
635 | /* kprobes don't want to hook the spurious faults. */ | ||
636 | if (notify_page_fault(regs)) | ||
637 | return; | ||
637 | /* | 638 | /* |
638 | * Don't take the mm semaphore here. If we fixup a prefetch | 639 | * Don't take the mm semaphore here. If we fixup a prefetch |
639 | * fault we could otherwise deadlock. | 640 | * fault we could otherwise deadlock. |
@@ -641,6 +642,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
641 | goto bad_area_nosemaphore; | 642 | goto bad_area_nosemaphore; |
642 | } | 643 | } |
643 | 644 | ||
645 | /* kprobes don't want to hook the spurious faults. */ | ||
646 | if (notify_page_fault(regs)) | ||
647 | return; | ||
644 | 648 | ||
645 | /* | 649 | /* |
646 | * It's safe to allow irq's after cr2 has been saved and the | 650 | * It's safe to allow irq's after cr2 has been saved and the |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 88f1b10de3be..2cef05074413 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -138,6 +138,47 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) | |||
138 | return pte_offset_kernel(pmd, 0); | 138 | return pte_offset_kernel(pmd, 0); |
139 | } | 139 | } |
140 | 140 | ||
141 | static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, | ||
142 | unsigned long vaddr, pte_t *lastpte) | ||
143 | { | ||
144 | #ifdef CONFIG_HIGHMEM | ||
145 | /* | ||
146 | * Something (early fixmap) may already have put a pte | ||
147 | * page here, which causes the page table allocation | ||
148 | * to become nonlinear. Attempt to fix it, and if it | ||
149 | * is still nonlinear then we have to bug. | ||
150 | */ | ||
151 | int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; | ||
152 | int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; | ||
153 | |||
154 | if (pmd_idx_kmap_begin != pmd_idx_kmap_end | ||
155 | && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin | ||
156 | && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end | ||
157 | && ((__pa(pte) >> PAGE_SHIFT) < table_start | ||
158 | || (__pa(pte) >> PAGE_SHIFT) >= table_end)) { | ||
159 | pte_t *newpte; | ||
160 | int i; | ||
161 | |||
162 | BUG_ON(after_init_bootmem); | ||
163 | newpte = alloc_low_page(); | ||
164 | for (i = 0; i < PTRS_PER_PTE; i++) | ||
165 | set_pte(newpte + i, pte[i]); | ||
166 | |||
167 | paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); | ||
168 | set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); | ||
169 | BUG_ON(newpte != pte_offset_kernel(pmd, 0)); | ||
170 | __flush_tlb_all(); | ||
171 | |||
172 | paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); | ||
173 | pte = newpte; | ||
174 | } | ||
175 | BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) | ||
176 | && vaddr > fix_to_virt(FIX_KMAP_END) | ||
177 | && lastpte && lastpte + PTRS_PER_PTE != pte); | ||
178 | #endif | ||
179 | return pte; | ||
180 | } | ||
181 | |||
141 | /* | 182 | /* |
142 | * This function initializes a certain range of kernel virtual memory | 183 | * This function initializes a certain range of kernel virtual memory |
143 | * with new bootmem page tables, everywhere page tables are missing in | 184 | * with new bootmem page tables, everywhere page tables are missing in |
@@ -154,6 +195,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) | |||
154 | unsigned long vaddr; | 195 | unsigned long vaddr; |
155 | pgd_t *pgd; | 196 | pgd_t *pgd; |
156 | pmd_t *pmd; | 197 | pmd_t *pmd; |
198 | pte_t *pte = NULL; | ||
157 | 199 | ||
158 | vaddr = start; | 200 | vaddr = start; |
159 | pgd_idx = pgd_index(vaddr); | 201 | pgd_idx = pgd_index(vaddr); |
@@ -165,7 +207,8 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) | |||
165 | pmd = pmd + pmd_index(vaddr); | 207 | pmd = pmd + pmd_index(vaddr); |
166 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); | 208 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); |
167 | pmd++, pmd_idx++) { | 209 | pmd++, pmd_idx++) { |
168 | one_page_table_init(pmd); | 210 | pte = page_table_kmap_check(one_page_table_init(pmd), |
211 | pmd, vaddr, pte); | ||
169 | 212 | ||
170 | vaddr += PMD_SIZE; | 213 | vaddr += PMD_SIZE; |
171 | } | 214 | } |
@@ -508,7 +551,6 @@ static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base) | |||
508 | * Fixed mappings, only the page table structure has to be | 551 | * Fixed mappings, only the page table structure has to be |
509 | * created - mappings will be set by set_fixmap(): | 552 | * created - mappings will be set by set_fixmap(): |
510 | */ | 553 | */ |
511 | early_ioremap_clear(); | ||
512 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | 554 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; |
513 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; | 555 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; |
514 | page_table_range_init(vaddr, end, pgd_base); | 556 | page_table_range_init(vaddr, end, pgd_base); |
@@ -801,7 +843,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse) | |||
801 | tables += PAGE_ALIGN(ptes * sizeof(pte_t)); | 843 | tables += PAGE_ALIGN(ptes * sizeof(pte_t)); |
802 | 844 | ||
803 | /* for fixmap */ | 845 | /* for fixmap */ |
804 | tables += PAGE_SIZE * 2; | 846 | tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t)); |
805 | 847 | ||
806 | /* | 848 | /* |
807 | * RED-PEN putting page tables only on node 0 could | 849 | * RED-PEN putting page tables only on node 0 could |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 23f68e77ad1f..e6d36b490250 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -596,7 +596,7 @@ static void __init init_gbpages(void) | |||
596 | direct_gbpages = 0; | 596 | direct_gbpages = 0; |
597 | } | 597 | } |
598 | 598 | ||
599 | static unsigned long __init kernel_physical_mapping_init(unsigned long start, | 599 | static unsigned long __meminit kernel_physical_mapping_init(unsigned long start, |
600 | unsigned long end, | 600 | unsigned long end, |
601 | unsigned long page_size_mask) | 601 | unsigned long page_size_mask) |
602 | { | 602 | { |
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index d0151d8ce452..ca53224fc56c 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c | |||
@@ -17,6 +17,7 @@ | |||
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <asm/iomap.h> | 19 | #include <asm/iomap.h> |
20 | #include <asm/pat.h> | ||
20 | #include <linux/module.h> | 21 | #include <linux/module.h> |
21 | 22 | ||
22 | /* Map 'pfn' using fixed map 'type' and protections 'prot' | 23 | /* Map 'pfn' using fixed map 'type' and protections 'prot' |
@@ -29,6 +30,15 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | |||
29 | 30 | ||
30 | pagefault_disable(); | 31 | pagefault_disable(); |
31 | 32 | ||
33 | /* | ||
34 | * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. | ||
35 | * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the | ||
36 | * MTRR is UC or WC. UC_MINUS gets the real intention, of the | ||
37 | * user, which is "WC if the MTRR is WC, UC if you can't do that." | ||
38 | */ | ||
39 | if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) | ||
40 | prot = PAGE_KERNEL_UC_MINUS; | ||
41 | |||
32 | idx = type + KM_TYPE_NR*smp_processor_id(); | 42 | idx = type + KM_TYPE_NR*smp_processor_id(); |
33 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 43 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
34 | set_pte(kmap_pte-idx, pfn_pte(pfn, prot)); | 44 | set_pte(kmap_pte-idx, pfn_pte(pfn, prot)); |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index bd85d42819e1..f45d5e29a72e 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -134,25 +134,6 @@ int page_is_ram(unsigned long pagenr) | |||
134 | return 0; | 134 | return 0; |
135 | } | 135 | } |
136 | 136 | ||
137 | int pagerange_is_ram(unsigned long start, unsigned long end) | ||
138 | { | ||
139 | int ram_page = 0, not_rampage = 0; | ||
140 | unsigned long page_nr; | ||
141 | |||
142 | for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT); | ||
143 | ++page_nr) { | ||
144 | if (page_is_ram(page_nr)) | ||
145 | ram_page = 1; | ||
146 | else | ||
147 | not_rampage = 1; | ||
148 | |||
149 | if (ram_page == not_rampage) | ||
150 | return -1; | ||
151 | } | ||
152 | |||
153 | return ram_page; | ||
154 | } | ||
155 | |||
156 | /* | 137 | /* |
157 | * Fix up the linear direct mapping of the kernel to avoid cache attribute | 138 | * Fix up the linear direct mapping of the kernel to avoid cache attribute |
158 | * conflicts. | 139 | * conflicts. |
@@ -557,34 +538,9 @@ void __init early_ioremap_init(void) | |||
557 | } | 538 | } |
558 | } | 539 | } |
559 | 540 | ||
560 | void __init early_ioremap_clear(void) | ||
561 | { | ||
562 | pmd_t *pmd; | ||
563 | |||
564 | if (early_ioremap_debug) | ||
565 | printk(KERN_INFO "early_ioremap_clear()\n"); | ||
566 | |||
567 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); | ||
568 | pmd_clear(pmd); | ||
569 | paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT); | ||
570 | __flush_tlb_all(); | ||
571 | } | ||
572 | |||
573 | void __init early_ioremap_reset(void) | 541 | void __init early_ioremap_reset(void) |
574 | { | 542 | { |
575 | enum fixed_addresses idx; | ||
576 | unsigned long addr, phys; | ||
577 | pte_t *pte; | ||
578 | |||
579 | after_paging_init = 1; | 543 | after_paging_init = 1; |
580 | for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { | ||
581 | addr = fix_to_virt(idx); | ||
582 | pte = early_ioremap_pte(addr); | ||
583 | if (pte_present(*pte)) { | ||
584 | phys = pte_val(*pte) & PAGE_MASK; | ||
585 | set_fixmap(idx, phys); | ||
586 | } | ||
587 | } | ||
588 | } | 544 | } |
589 | 545 | ||
590 | static void __init __early_set_fixmap(enum fixed_addresses idx, | 546 | static void __init __early_set_fixmap(enum fixed_addresses idx, |
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 71a14f89f89e..f3516da035d1 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -145,7 +145,7 @@ int __init compute_hash_shift(struct bootnode *nodes, int numnodes, | |||
145 | return shift; | 145 | return shift; |
146 | } | 146 | } |
147 | 147 | ||
148 | int early_pfn_to_nid(unsigned long pfn) | 148 | int __meminit __early_pfn_to_nid(unsigned long pfn) |
149 | { | 149 | { |
150 | return phys_to_nid(pfn << PAGE_SHIFT); | 150 | return phys_to_nid(pfn << PAGE_SHIFT); |
151 | } | 151 | } |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index e89d24815f26..7be47d1a97e4 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -508,18 +508,13 @@ static int split_large_page(pte_t *kpte, unsigned long address) | |||
508 | #endif | 508 | #endif |
509 | 509 | ||
510 | /* | 510 | /* |
511 | * Install the new, split up pagetable. Important details here: | 511 | * Install the new, split up pagetable. |
512 | * | 512 | * |
513 | * On Intel the NX bit of all levels must be cleared to make a | 513 | * We use the standard kernel pagetable protections for the new |
514 | * page executable. See section 4.13.2 of Intel 64 and IA-32 | 514 | * pagetable protections, the actual ptes set above control the |
515 | * Architectures Software Developer's Manual). | 515 | * primary protection behavior: |
516 | * | ||
517 | * Mark the entry present. The current mapping might be | ||
518 | * set to not present, which we preserved above. | ||
519 | */ | 516 | */ |
520 | ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte))); | 517 | __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); |
521 | pgprot_val(ref_prot) |= _PAGE_PRESENT; | ||
522 | __set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); | ||
523 | base = NULL; | 518 | base = NULL; |
524 | 519 | ||
525 | out_unlock: | 520 | out_unlock: |
@@ -534,6 +529,36 @@ out_unlock: | |||
534 | return 0; | 529 | return 0; |
535 | } | 530 | } |
536 | 531 | ||
532 | static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, | ||
533 | int primary) | ||
534 | { | ||
535 | /* | ||
536 | * Ignore all non primary paths. | ||
537 | */ | ||
538 | if (!primary) | ||
539 | return 0; | ||
540 | |||
541 | /* | ||
542 | * Ignore the NULL PTE for kernel identity mapping, as it is expected | ||
543 | * to have holes. | ||
544 | * Also set numpages to '1' indicating that we processed cpa req for | ||
545 | * one virtual address page and its pfn. TBD: numpages can be set based | ||
546 | * on the initial value and the level returned by lookup_address(). | ||
547 | */ | ||
548 | if (within(vaddr, PAGE_OFFSET, | ||
549 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { | ||
550 | cpa->numpages = 1; | ||
551 | cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; | ||
552 | return 0; | ||
553 | } else { | ||
554 | WARN(1, KERN_WARNING "CPA: called for zero pte. " | ||
555 | "vaddr = %lx cpa->vaddr = %lx\n", vaddr, | ||
556 | *cpa->vaddr); | ||
557 | |||
558 | return -EFAULT; | ||
559 | } | ||
560 | } | ||
561 | |||
537 | static int __change_page_attr(struct cpa_data *cpa, int primary) | 562 | static int __change_page_attr(struct cpa_data *cpa, int primary) |
538 | { | 563 | { |
539 | unsigned long address; | 564 | unsigned long address; |
@@ -545,21 +570,14 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) | |||
545 | address = cpa->vaddr[cpa->curpage]; | 570 | address = cpa->vaddr[cpa->curpage]; |
546 | else | 571 | else |
547 | address = *cpa->vaddr; | 572 | address = *cpa->vaddr; |
548 | |||
549 | repeat: | 573 | repeat: |
550 | kpte = lookup_address(address, &level); | 574 | kpte = lookup_address(address, &level); |
551 | if (!kpte) | 575 | if (!kpte) |
552 | return 0; | 576 | return __cpa_process_fault(cpa, address, primary); |
553 | 577 | ||
554 | old_pte = *kpte; | 578 | old_pte = *kpte; |
555 | if (!pte_val(old_pte)) { | 579 | if (!pte_val(old_pte)) |
556 | if (!primary) | 580 | return __cpa_process_fault(cpa, address, primary); |
557 | return 0; | ||
558 | WARN(1, KERN_WARNING "CPA: called for zero pte. " | ||
559 | "vaddr = %lx cpa->vaddr = %lx\n", address, | ||
560 | *cpa->vaddr); | ||
561 | return -EINVAL; | ||
562 | } | ||
563 | 581 | ||
564 | if (level == PG_LEVEL_4K) { | 582 | if (level == PG_LEVEL_4K) { |
565 | pte_t new_pte; | 583 | pte_t new_pte; |
@@ -657,12 +675,7 @@ static int cpa_process_alias(struct cpa_data *cpa) | |||
657 | vaddr = *cpa->vaddr; | 675 | vaddr = *cpa->vaddr; |
658 | 676 | ||
659 | if (!(within(vaddr, PAGE_OFFSET, | 677 | if (!(within(vaddr, PAGE_OFFSET, |
660 | PAGE_OFFSET + (max_low_pfn_mapped << PAGE_SHIFT)) | 678 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { |
661 | #ifdef CONFIG_X86_64 | ||
662 | || within(vaddr, PAGE_OFFSET + (1UL<<32), | ||
663 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)) | ||
664 | #endif | ||
665 | )) { | ||
666 | 679 | ||
667 | alias_cpa = *cpa; | 680 | alias_cpa = *cpa; |
668 | temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); | 681 | temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); |
@@ -793,6 +806,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
793 | 806 | ||
794 | vm_unmap_aliases(); | 807 | vm_unmap_aliases(); |
795 | 808 | ||
809 | /* | ||
810 | * If we're called with lazy mmu updates enabled, the | ||
811 | * in-memory pte state may be stale. Flush pending updates to | ||
812 | * bring them up to date. | ||
813 | */ | ||
814 | arch_flush_lazy_mmu_mode(); | ||
815 | |||
796 | cpa.vaddr = addr; | 816 | cpa.vaddr = addr; |
797 | cpa.numpages = numpages; | 817 | cpa.numpages = numpages; |
798 | cpa.mask_set = mask_set; | 818 | cpa.mask_set = mask_set; |
@@ -835,6 +855,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
835 | } else | 855 | } else |
836 | cpa_flush_all(cache); | 856 | cpa_flush_all(cache); |
837 | 857 | ||
858 | /* | ||
859 | * If we've been called with lazy mmu updates enabled, then | ||
860 | * make sure that everything gets flushed out before we | ||
861 | * return. | ||
862 | */ | ||
863 | arch_flush_lazy_mmu_mode(); | ||
864 | |||
838 | out: | 865 | out: |
839 | return ret; | 866 | return ret; |
840 | } | 867 | } |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 85cbd3cd3723..aebbf67a79d0 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -211,6 +211,33 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type) | |||
211 | static struct memtype *cached_entry; | 211 | static struct memtype *cached_entry; |
212 | static u64 cached_start; | 212 | static u64 cached_start; |
213 | 213 | ||
214 | static int pat_pagerange_is_ram(unsigned long start, unsigned long end) | ||
215 | { | ||
216 | int ram_page = 0, not_rampage = 0; | ||
217 | unsigned long page_nr; | ||
218 | |||
219 | for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT); | ||
220 | ++page_nr) { | ||
221 | /* | ||
222 | * For legacy reasons, physical address range in the legacy ISA | ||
223 | * region is tracked as non-RAM. This will allow users of | ||
224 | * /dev/mem to map portions of legacy ISA region, even when | ||
225 | * some of those portions are listed(or not even listed) with | ||
226 | * different e820 types(RAM/reserved/..) | ||
227 | */ | ||
228 | if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) && | ||
229 | page_is_ram(page_nr)) | ||
230 | ram_page = 1; | ||
231 | else | ||
232 | not_rampage = 1; | ||
233 | |||
234 | if (ram_page == not_rampage) | ||
235 | return -1; | ||
236 | } | ||
237 | |||
238 | return ram_page; | ||
239 | } | ||
240 | |||
214 | /* | 241 | /* |
215 | * For RAM pages, mark the pages as non WB memory type using | 242 | * For RAM pages, mark the pages as non WB memory type using |
216 | * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or | 243 | * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or |
@@ -333,9 +360,13 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
333 | req_type & _PAGE_CACHE_MASK); | 360 | req_type & _PAGE_CACHE_MASK); |
334 | } | 361 | } |
335 | 362 | ||
336 | is_range_ram = pagerange_is_ram(start, end); | 363 | if (new_type) |
364 | *new_type = actual_type; | ||
365 | |||
366 | is_range_ram = pat_pagerange_is_ram(start, end); | ||
337 | if (is_range_ram == 1) | 367 | if (is_range_ram == 1) |
338 | return reserve_ram_pages_type(start, end, req_type, new_type); | 368 | return reserve_ram_pages_type(start, end, req_type, |
369 | new_type); | ||
339 | else if (is_range_ram < 0) | 370 | else if (is_range_ram < 0) |
340 | return -EINVAL; | 371 | return -EINVAL; |
341 | 372 | ||
@@ -347,9 +378,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
347 | new->end = end; | 378 | new->end = end; |
348 | new->type = actual_type; | 379 | new->type = actual_type; |
349 | 380 | ||
350 | if (new_type) | ||
351 | *new_type = actual_type; | ||
352 | |||
353 | spin_lock(&memtype_lock); | 381 | spin_lock(&memtype_lock); |
354 | 382 | ||
355 | if (cached_entry && start >= cached_start) | 383 | if (cached_entry && start >= cached_start) |
@@ -437,7 +465,7 @@ int free_memtype(u64 start, u64 end) | |||
437 | if (is_ISA_range(start, end - 1)) | 465 | if (is_ISA_range(start, end - 1)) |
438 | return 0; | 466 | return 0; |
439 | 467 | ||
440 | is_range_ram = pagerange_is_ram(start, end); | 468 | is_range_ram = pat_pagerange_is_ram(start, end); |
441 | if (is_range_ram == 1) | 469 | if (is_range_ram == 1) |
442 | return free_ram_pages_type(start, end); | 470 | return free_ram_pages_type(start, end); |
443 | else if (is_range_ram < 0) | 471 | else if (is_range_ram < 0) |
@@ -601,39 +629,45 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) | |||
601 | * Reserved non RAM regions only and after successful reserve_memtype, | 629 | * Reserved non RAM regions only and after successful reserve_memtype, |
602 | * this func also keeps identity mapping (if any) in sync with this new prot. | 630 | * this func also keeps identity mapping (if any) in sync with this new prot. |
603 | */ | 631 | */ |
604 | static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot) | 632 | static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, |
633 | int strict_prot) | ||
605 | { | 634 | { |
606 | int is_ram = 0; | 635 | int is_ram = 0; |
607 | int id_sz, ret; | 636 | int id_sz, ret; |
608 | unsigned long flags; | 637 | unsigned long flags; |
609 | unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK); | 638 | unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); |
610 | 639 | ||
611 | is_ram = pagerange_is_ram(paddr, paddr + size); | 640 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
612 | 641 | ||
613 | if (is_ram != 0) { | 642 | /* |
614 | /* | 643 | * reserve_pfn_range() doesn't support RAM pages. |
615 | * For mapping RAM pages, drivers need to call | 644 | */ |
616 | * set_memory_[uc|wc|wb] directly, for reserve and free, before | 645 | if (is_ram != 0) |
617 | * setting up the PTE. | 646 | return -EINVAL; |
618 | */ | ||
619 | WARN_ON_ONCE(1); | ||
620 | return 0; | ||
621 | } | ||
622 | 647 | ||
623 | ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); | 648 | ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); |
624 | if (ret) | 649 | if (ret) |
625 | return ret; | 650 | return ret; |
626 | 651 | ||
627 | if (flags != want_flags) { | 652 | if (flags != want_flags) { |
628 | free_memtype(paddr, paddr + size); | 653 | if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) { |
629 | printk(KERN_ERR | 654 | free_memtype(paddr, paddr + size); |
630 | "%s:%d map pfn expected mapping type %s for %Lx-%Lx, got %s\n", | 655 | printk(KERN_ERR "%s:%d map pfn expected mapping type %s" |
631 | current->comm, current->pid, | 656 | " for %Lx-%Lx, got %s\n", |
632 | cattr_name(want_flags), | 657 | current->comm, current->pid, |
633 | (unsigned long long)paddr, | 658 | cattr_name(want_flags), |
634 | (unsigned long long)(paddr + size), | 659 | (unsigned long long)paddr, |
635 | cattr_name(flags)); | 660 | (unsigned long long)(paddr + size), |
636 | return -EINVAL; | 661 | cattr_name(flags)); |
662 | return -EINVAL; | ||
663 | } | ||
664 | /* | ||
665 | * We allow returning different type than the one requested in | ||
666 | * non strict case. | ||
667 | */ | ||
668 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & | ||
669 | (~_PAGE_CACHE_MASK)) | | ||
670 | flags); | ||
637 | } | 671 | } |
638 | 672 | ||
639 | /* Need to keep identity mapping in sync */ | 673 | /* Need to keep identity mapping in sync */ |
@@ -666,7 +700,7 @@ static void free_pfn_range(u64 paddr, unsigned long size) | |||
666 | { | 700 | { |
667 | int is_ram; | 701 | int is_ram; |
668 | 702 | ||
669 | is_ram = pagerange_is_ram(paddr, paddr + size); | 703 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
670 | if (is_ram == 0) | 704 | if (is_ram == 0) |
671 | free_memtype(paddr, paddr + size); | 705 | free_memtype(paddr, paddr + size); |
672 | } | 706 | } |
@@ -689,6 +723,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma) | |||
689 | unsigned long vma_start = vma->vm_start; | 723 | unsigned long vma_start = vma->vm_start; |
690 | unsigned long vma_end = vma->vm_end; | 724 | unsigned long vma_end = vma->vm_end; |
691 | unsigned long vma_size = vma_end - vma_start; | 725 | unsigned long vma_size = vma_end - vma_start; |
726 | pgprot_t pgprot; | ||
692 | 727 | ||
693 | if (!pat_enabled) | 728 | if (!pat_enabled) |
694 | return 0; | 729 | return 0; |
@@ -702,7 +737,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma) | |||
702 | WARN_ON_ONCE(1); | 737 | WARN_ON_ONCE(1); |
703 | return -EINVAL; | 738 | return -EINVAL; |
704 | } | 739 | } |
705 | return reserve_pfn_range(paddr, vma_size, __pgprot(prot)); | 740 | pgprot = __pgprot(prot); |
741 | return reserve_pfn_range(paddr, vma_size, &pgprot, 1); | ||
706 | } | 742 | } |
707 | 743 | ||
708 | /* reserve entire vma page by page, using pfn and prot from pte */ | 744 | /* reserve entire vma page by page, using pfn and prot from pte */ |
@@ -710,7 +746,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma) | |||
710 | if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) | 746 | if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) |
711 | continue; | 747 | continue; |
712 | 748 | ||
713 | retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot)); | 749 | pgprot = __pgprot(prot); |
750 | retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1); | ||
714 | if (retval) | 751 | if (retval) |
715 | goto cleanup_ret; | 752 | goto cleanup_ret; |
716 | } | 753 | } |
@@ -741,7 +778,7 @@ cleanup_ret: | |||
741 | * Note that this function can be called with caller trying to map only a | 778 | * Note that this function can be called with caller trying to map only a |
742 | * subrange/page inside the vma. | 779 | * subrange/page inside the vma. |
743 | */ | 780 | */ |
744 | int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, | 781 | int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, |
745 | unsigned long pfn, unsigned long size) | 782 | unsigned long pfn, unsigned long size) |
746 | { | 783 | { |
747 | int retval = 0; | 784 | int retval = 0; |
@@ -758,14 +795,14 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, | |||
758 | if (is_linear_pfn_mapping(vma)) { | 795 | if (is_linear_pfn_mapping(vma)) { |
759 | /* reserve the whole chunk starting from vm_pgoff */ | 796 | /* reserve the whole chunk starting from vm_pgoff */ |
760 | paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; | 797 | paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; |
761 | return reserve_pfn_range(paddr, vma_size, prot); | 798 | return reserve_pfn_range(paddr, vma_size, prot, 0); |
762 | } | 799 | } |
763 | 800 | ||
764 | /* reserve page by page using pfn and size */ | 801 | /* reserve page by page using pfn and size */ |
765 | base_paddr = (resource_size_t)pfn << PAGE_SHIFT; | 802 | base_paddr = (resource_size_t)pfn << PAGE_SHIFT; |
766 | for (i = 0; i < size; i += PAGE_SIZE) { | 803 | for (i = 0; i < size; i += PAGE_SIZE) { |
767 | paddr = base_paddr + i; | 804 | paddr = base_paddr + i; |
768 | retval = reserve_pfn_range(paddr, PAGE_SIZE, prot); | 805 | retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0); |
769 | if (retval) | 806 | if (retval) |
770 | goto cleanup_ret; | 807 | goto cleanup_ret; |
771 | } | 808 | } |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index f884740da318..5ead808dd70c 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -314,17 +314,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |||
314 | return retval; | 314 | return retval; |
315 | 315 | ||
316 | if (flags != new_flags) { | 316 | if (flags != new_flags) { |
317 | /* | 317 | if (!is_new_memtype_allowed(flags, new_flags)) { |
318 | * Do not fallback to certain memory types with certain | ||
319 | * requested type: | ||
320 | * - request is uncached, return cannot be write-back | ||
321 | * - request is uncached, return cannot be write-combine | ||
322 | * - request is write-combine, return cannot be write-back | ||
323 | */ | ||
324 | if ((flags == _PAGE_CACHE_UC_MINUS && | ||
325 | (new_flags == _PAGE_CACHE_WB)) || | ||
326 | (flags == _PAGE_CACHE_WC && | ||
327 | new_flags == _PAGE_CACHE_WB)) { | ||
328 | free_memtype(addr, addr+len); | 318 | free_memtype(addr, addr+len); |
329 | return -EINVAL; | 319 | return -EINVAL; |
330 | } | 320 | } |
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 4064345cf144..fecbce6e7d7c 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -572,6 +572,7 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route | |||
572 | case PCI_DEVICE_ID_INTEL_ICH7_1: | 572 | case PCI_DEVICE_ID_INTEL_ICH7_1: |
573 | case PCI_DEVICE_ID_INTEL_ICH7_30: | 573 | case PCI_DEVICE_ID_INTEL_ICH7_30: |
574 | case PCI_DEVICE_ID_INTEL_ICH7_31: | 574 | case PCI_DEVICE_ID_INTEL_ICH7_31: |
575 | case PCI_DEVICE_ID_INTEL_TGP_LPC: | ||
575 | case PCI_DEVICE_ID_INTEL_ESB2_0: | 576 | case PCI_DEVICE_ID_INTEL_ESB2_0: |
576 | case PCI_DEVICE_ID_INTEL_ICH8_0: | 577 | case PCI_DEVICE_ID_INTEL_ICH8_0: |
577 | case PCI_DEVICE_ID_INTEL_ICH8_1: | 578 | case PCI_DEVICE_ID_INTEL_ICH8_1: |
diff --git a/arch/x86/scripts/strip-symbols b/arch/x86/scripts/strip-symbols deleted file mode 100644 index a2f1ccb827c7..000000000000 --- a/arch/x86/scripts/strip-symbols +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | __cpu_vendor_dev_X86_VENDOR_* | ||
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h index 858938241616..fa3e10725d98 100644 --- a/arch/x86/xen/multicalls.h +++ b/arch/x86/xen/multicalls.h | |||
@@ -19,8 +19,10 @@ DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags); | |||
19 | paired with xen_mc_issue() */ | 19 | paired with xen_mc_issue() */ |
20 | static inline void xen_mc_batch(void) | 20 | static inline void xen_mc_batch(void) |
21 | { | 21 | { |
22 | unsigned long flags; | ||
22 | /* need to disable interrupts until this entry is complete */ | 23 | /* need to disable interrupts until this entry is complete */ |
23 | local_irq_save(__get_cpu_var(xen_mc_irq_flags)); | 24 | local_irq_save(flags); |
25 | __get_cpu_var(xen_mc_irq_flags) = flags; | ||
24 | } | 26 | } |
25 | 27 | ||
26 | static inline struct multicall_space xen_mc_entry(size_t args) | 28 | static inline struct multicall_space xen_mc_entry(size_t args) |