aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-05 17:55:20 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-05 17:55:20 -0400
commitea62ccd00fd0b6720b033adfc9984f31130ce195 (patch)
tree9837b797b2466fffcb0af96c388b06eae9c3df18 /include
parent886a0768affe9a32f18c45f8e1393bca9ece5392 (diff)
parent35060b6a9a4e1c89bc6fbea61090e302dbc61847 (diff)
Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6
* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6: (231 commits) [PATCH] i386: Don't delete cpu_devs data to identify different x86 types in late_initcall [PATCH] i386: type may be unused [PATCH] i386: Some additional chipset register values validation. [PATCH] i386: Add missing !X86_PAE dependincy to the 2G/2G split. [PATCH] x86-64: Don't exclude asm-offsets.c in Documentation/dontdiff [PATCH] i386: avoid redundant preempt_disable in __unlazy_fpu [PATCH] i386: white space fixes in i387.h [PATCH] i386: Drop noisy e820 debugging printks [PATCH] x86-64: Fix allnoconfig error in genapic_flat.c [PATCH] x86-64: Shut up warnings for vfat compat ioctls on other file systems [PATCH] x86-64: Share identical video.S between i386 and x86-64 [PATCH] x86-64: Remove CONFIG_REORDER [PATCH] x86-64: Print type and size correctly for unknown compat ioctls [PATCH] i386: Remove copy_*_user BUG_ONs for (size < 0) [PATCH] i386: Little cleanups in smpboot.c [PATCH] x86-64: Don't enable NUMA for a single node in K8 NUMA scanning [PATCH] x86: Use RDTSCP for synchronous get_cycles if possible [PATCH] i386: Add X86_FEATURE_RDTSCP [PATCH] i386: Implement X86_FEATURE_SYNC_RDTSC on i386 [PATCH] i386: Implement alternative_io for i386 ... Fix up trivial conflict in include/linux/highmem.h manually. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/mmu_context.h1
-rw-r--r--include/asm-alpha/percpu.h14
-rw-r--r--include/asm-arm/mmu_context.h1
-rw-r--r--include/asm-arm26/mmu_context.h2
-rw-r--r--include/asm-avr32/mmu_context.h1
-rw-r--r--include/asm-cris/mmu_context.h2
-rw-r--r--include/asm-frv/mmu_context.h1
-rw-r--r--include/asm-generic/mm_hooks.h18
-rw-r--r--include/asm-generic/percpu.h1
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/asm-h8300/mmu_context.h1
-rw-r--r--include/asm-i386/Kbuild2
-rw-r--r--include/asm-i386/alternative.h34
-rw-r--r--include/asm-i386/apic.h9
-rw-r--r--include/asm-i386/bugs.h194
-rw-r--r--include/asm-i386/cpufeature.h13
-rw-r--r--include/asm-i386/current.h5
-rw-r--r--include/asm-i386/desc.h95
-rw-r--r--include/asm-i386/e820.h1
-rw-r--r--include/asm-i386/elf.h28
-rw-r--r--include/asm-i386/fixmap.h11
-rw-r--r--include/asm-i386/genapic.h6
-rw-r--r--include/asm-i386/highmem.h6
-rw-r--r--include/asm-i386/hpet.h2
-rw-r--r--include/asm-i386/i387.h17
-rw-r--r--include/asm-i386/io.h15
-rw-r--r--include/asm-i386/irq.h2
-rw-r--r--include/asm-i386/irq_regs.h12
-rw-r--r--include/asm-i386/irqflags.h64
-rw-r--r--include/asm-i386/kexec.h8
-rw-r--r--include/asm-i386/mach-bigsmp/mach_apic.h2
-rw-r--r--include/asm-i386/mach-default/mach_apic.h2
-rw-r--r--include/asm-i386/mach-es7000/mach_apic.h9
-rw-r--r--include/asm-i386/mach-es7000/mach_mpparse.h32
-rw-r--r--include/asm-i386/mach-generic/mach_apic.h2
-rw-r--r--include/asm-i386/mach-numaq/mach_apic.h2
-rw-r--r--include/asm-i386/mach-summit/mach_apic.h2
-rw-r--r--include/asm-i386/mach-summit/mach_mpparse.h4
-rw-r--r--include/asm-i386/mach-visws/mach_apic.h2
-rw-r--r--include/asm-i386/mmu_context.h17
-rw-r--r--include/asm-i386/module.h2
-rw-r--r--include/asm-i386/msr-index.h278
-rw-r--r--include/asm-i386/msr.h400
-rw-r--r--include/asm-i386/mtrr.h4
-rw-r--r--include/asm-i386/nmi.h8
-rw-r--r--include/asm-i386/page.h81
-rw-r--r--include/asm-i386/paravirt.h957
-rw-r--r--include/asm-i386/pda.h100
-rw-r--r--include/asm-i386/percpu.h136
-rw-r--r--include/asm-i386/pgalloc.h1
-rw-r--r--include/asm-i386/pgtable-2level-defs.h2
-rw-r--r--include/asm-i386/pgtable-2level.h37
-rw-r--r--include/asm-i386/pgtable-3level-defs.h6
-rw-r--r--include/asm-i386/pgtable-3level.h69
-rw-r--r--include/asm-i386/pgtable.h62
-rw-r--r--include/asm-i386/processor-flags.h91
-rw-r--r--include/asm-i386/processor.h187
-rw-r--r--include/asm-i386/reboot.h20
-rw-r--r--include/asm-i386/reboot_fixups.h (renamed from include/linux/reboot_fixups.h)4
-rw-r--r--include/asm-i386/required-features.h34
-rw-r--r--include/asm-i386/segment.h10
-rw-r--r--include/asm-i386/smp.h64
-rw-r--r--include/asm-i386/system.h139
-rw-r--r--include/asm-i386/timer.h2
-rw-r--r--include/asm-i386/tlbflush.h19
-rw-r--r--include/asm-i386/tsc.h15
-rw-r--r--include/asm-i386/uaccess.h14
-rw-r--r--include/asm-i386/vmi_time.h18
-rw-r--r--include/asm-ia64/mmu_context.h1
-rw-r--r--include/asm-m32r/mmu_context.h1
-rw-r--r--include/asm-m68k/mmu_context.h1
-rw-r--r--include/asm-m68knommu/mmu_context.h1
-rw-r--r--include/asm-mips/mmu_context.h1
-rw-r--r--include/asm-parisc/mmu_context.h1
-rw-r--r--include/asm-powerpc/mmu_context.h1
-rw-r--r--include/asm-ppc/mmu_context.h1
-rw-r--r--include/asm-s390/mmu_context.h2
-rw-r--r--include/asm-sh/mmu_context.h1
-rw-r--r--include/asm-sh64/mmu_context.h2
-rw-r--r--include/asm-sparc/mmu_context.h2
-rw-r--r--include/asm-sparc64/mmu_context.h1
-rw-r--r--include/asm-sparc64/percpu.h10
-rw-r--r--include/asm-um/mmu_context.h2
-rw-r--r--include/asm-v850/mmu_context.h2
-rw-r--r--include/asm-x86_64/Kbuild4
-rw-r--r--include/asm-x86_64/alternative.h5
-rw-r--r--include/asm-x86_64/apic.h10
-rw-r--r--include/asm-x86_64/bugs.h30
-rw-r--r--include/asm-x86_64/const.h20
-rw-r--r--include/asm-x86_64/desc.h21
-rw-r--r--include/asm-x86_64/dma-mapping.h2
-rw-r--r--include/asm-x86_64/fixmap.h1
-rw-r--r--include/asm-x86_64/genapic.h4
-rw-r--r--include/asm-x86_64/ipi.h61
-rw-r--r--include/asm-x86_64/irqflags.h9
-rw-r--r--include/asm-x86_64/mmu_context.h1
-rw-r--r--include/asm-x86_64/mmzone.h2
-rw-r--r--include/asm-x86_64/msr-index.h1
-rw-r--r--include/asm-x86_64/msr.h274
-rw-r--r--include/asm-x86_64/mtrr.h12
-rw-r--r--include/asm-x86_64/nmi.h9
-rw-r--r--include/asm-x86_64/page.h39
-rw-r--r--include/asm-x86_64/percpu.h10
-rw-r--r--include/asm-x86_64/pgalloc.h15
-rw-r--r--include/asm-x86_64/pgtable.h42
-rw-r--r--include/asm-x86_64/processor-flags.h1
-rw-r--r--include/asm-x86_64/processor.h55
-rw-r--r--include/asm-x86_64/proto.h15
-rw-r--r--include/asm-x86_64/segment.h2
-rw-r--r--include/asm-x86_64/smp.h4
-rw-r--r--include/asm-x86_64/suspend.h13
-rw-r--r--include/asm-x86_64/system.h7
-rw-r--r--include/asm-x86_64/timex.h2
-rw-r--r--include/asm-x86_64/tlbflush.h33
-rw-r--r--include/asm-x86_64/unistd.h3
-rw-r--r--include/asm-xtensa/mmu_context.h1
-rw-r--r--include/linux/bootmem.h4
-rw-r--r--include/linux/cpufreq.h19
-rw-r--r--include/linux/crash_dump.h8
-rw-r--r--include/linux/elf.h17
-rw-r--r--include/linux/elfnote.h4
-rw-r--r--include/linux/highmem.h5
-rw-r--r--include/linux/init.h7
-rw-r--r--include/linux/percpu.h9
-rw-r--r--include/linux/poison.h3
125 files changed, 2421 insertions, 1795 deletions
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index fe249e9d3360..0bd7bd2ccb90 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -10,6 +10,7 @@
10#include <asm/system.h> 10#include <asm/system.h>
11#include <asm/machvec.h> 11#include <asm/machvec.h>
12#include <asm/compiler.h> 12#include <asm/compiler.h>
13#include <asm-generic/mm_hooks.h>
13 14
14/* 15/*
15 * Force a context reload. This is needed when we change the page 16 * Force a context reload. This is needed when we change the page
diff --git a/include/asm-alpha/percpu.h b/include/asm-alpha/percpu.h
index 651ebb141b24..48348fe34c19 100644
--- a/include/asm-alpha/percpu.h
+++ b/include/asm-alpha/percpu.h
@@ -1,20 +1,6 @@
1#ifndef __ALPHA_PERCPU_H 1#ifndef __ALPHA_PERCPU_H
2#define __ALPHA_PERCPU_H 2#define __ALPHA_PERCPU_H
3 3
4/*
5 * Increase the per cpu area for Alpha so that
6 * modules using percpu area can load.
7 */
8#ifdef CONFIG_MODULES
9# define PERCPU_MODULE_RESERVE 8192
10#else
11# define PERCPU_MODULE_RESERVE 0
12#endif
13
14#define PERCPU_ENOUGH_ROOM \
15 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
16 PERCPU_MODULE_RESERVE)
17
18#include <asm-generic/percpu.h> 4#include <asm-generic/percpu.h>
19 5
20#endif /* __ALPHA_PERCPU_H */ 6#endif /* __ALPHA_PERCPU_H */
diff --git a/include/asm-arm/mmu_context.h b/include/asm-arm/mmu_context.h
index d1a65b1edcaa..f8755c818b54 100644
--- a/include/asm-arm/mmu_context.h
+++ b/include/asm-arm/mmu_context.h
@@ -16,6 +16,7 @@
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
18#include <asm/proc-fns.h> 18#include <asm/proc-fns.h>
19#include <asm-generic/mm_hooks.h>
19 20
20void __check_kvm_seq(struct mm_struct *mm); 21void __check_kvm_seq(struct mm_struct *mm);
21 22
diff --git a/include/asm-arm26/mmu_context.h b/include/asm-arm26/mmu_context.h
index 1a929bfe5c3a..16c821f81b8d 100644
--- a/include/asm-arm26/mmu_context.h
+++ b/include/asm-arm26/mmu_context.h
@@ -13,6 +13,8 @@
13#ifndef __ASM_ARM_MMU_CONTEXT_H 13#ifndef __ASM_ARM_MMU_CONTEXT_H
14#define __ASM_ARM_MMU_CONTEXT_H 14#define __ASM_ARM_MMU_CONTEXT_H
15 15
16#include <asm-generic/mm_hooks.h>
17
16#define init_new_context(tsk,mm) 0 18#define init_new_context(tsk,mm) 0
17#define destroy_context(mm) do { } while(0) 19#define destroy_context(mm) do { } while(0)
18 20
diff --git a/include/asm-avr32/mmu_context.h b/include/asm-avr32/mmu_context.h
index 31add1ae8089..c37c391faef6 100644
--- a/include/asm-avr32/mmu_context.h
+++ b/include/asm-avr32/mmu_context.h
@@ -15,6 +15,7 @@
15#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
16#include <asm/pgalloc.h> 16#include <asm/pgalloc.h>
17#include <asm/sysreg.h> 17#include <asm/sysreg.h>
18#include <asm-generic/mm_hooks.h>
18 19
19/* 20/*
20 * The MMU "context" consists of two things: 21 * The MMU "context" consists of two things:
diff --git a/include/asm-cris/mmu_context.h b/include/asm-cris/mmu_context.h
index e6e659dc757b..72ba08dcfd18 100644
--- a/include/asm-cris/mmu_context.h
+++ b/include/asm-cris/mmu_context.h
@@ -1,6 +1,8 @@
1#ifndef __CRIS_MMU_CONTEXT_H 1#ifndef __CRIS_MMU_CONTEXT_H
2#define __CRIS_MMU_CONTEXT_H 2#define __CRIS_MMU_CONTEXT_H
3 3
4#include <asm-generic/mm_hooks.h>
5
4extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 6extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
5extern void get_mmu_context(struct mm_struct *mm); 7extern void get_mmu_context(struct mm_struct *mm);
6extern void destroy_context(struct mm_struct *mm); 8extern void destroy_context(struct mm_struct *mm);
diff --git a/include/asm-frv/mmu_context.h b/include/asm-frv/mmu_context.h
index 72edcaaccd5d..c7daa395156a 100644
--- a/include/asm-frv/mmu_context.h
+++ b/include/asm-frv/mmu_context.h
@@ -15,6 +15,7 @@
15#include <asm/setup.h> 15#include <asm/setup.h>
16#include <asm/page.h> 16#include <asm/page.h>
17#include <asm/pgalloc.h> 17#include <asm/pgalloc.h>
18#include <asm-generic/mm_hooks.h>
18 19
19static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 20static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
20{ 21{
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
new file mode 100644
index 000000000000..67dea8123683
--- /dev/null
+++ b/include/asm-generic/mm_hooks.h
@@ -0,0 +1,18 @@
1/*
2 * Define generic no-op hooks for arch_dup_mmap and arch_exit_mmap, to
3 * be included in asm-FOO/mmu_context.h for any arch FOO which doesn't
4 * need to hook these.
5 */
6#ifndef _ASM_GENERIC_MM_HOOKS_H
7#define _ASM_GENERIC_MM_HOOKS_H
8
9static inline void arch_dup_mmap(struct mm_struct *oldmm,
10 struct mm_struct *mm)
11{
12}
13
14static inline void arch_exit_mmap(struct mm_struct *mm)
15{
16}
17
18#endif /* _ASM_GENERIC_MM_HOOKS_H */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 196376262240..d984a9041436 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_GENERIC_PERCPU_H_ 1#ifndef _ASM_GENERIC_PERCPU_H_
2#define _ASM_GENERIC_PERCPU_H_ 2#define _ASM_GENERIC_PERCPU_H_
3#include <linux/compiler.h> 3#include <linux/compiler.h>
4#include <linux/threads.h>
4 5
5#define __GENERIC_PER_CPU 6#define __GENERIC_PER_CPU
6#ifdef CONFIG_SMP 7#ifdef CONFIG_SMP
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 9fcc8d9fbb14..f3806a74c478 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -208,7 +208,7 @@
208 } 208 }
209 209
210#define NOTES \ 210#define NOTES \
211 .notes : { *(.note.*) } :note 211 .notes : { *(.note.*) } :note
212 212
213#define INITCALLS \ 213#define INITCALLS \
214 *(.initcall0.init) \ 214 *(.initcall0.init) \
diff --git a/include/asm-h8300/mmu_context.h b/include/asm-h8300/mmu_context.h
index 5c165f7bee0e..f44b730da54d 100644
--- a/include/asm-h8300/mmu_context.h
+++ b/include/asm-h8300/mmu_context.h
@@ -4,6 +4,7 @@
4#include <asm/setup.h> 4#include <asm/setup.h>
5#include <asm/page.h> 5#include <asm/page.h>
6#include <asm/pgalloc.h> 6#include <asm/pgalloc.h>
7#include <asm-generic/mm_hooks.h>
7 8
8static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 9static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9{ 10{
diff --git a/include/asm-i386/Kbuild b/include/asm-i386/Kbuild
index 5ae93afc67e1..cbf6e8f1087b 100644
--- a/include/asm-i386/Kbuild
+++ b/include/asm-i386/Kbuild
@@ -3,8 +3,10 @@ include include/asm-generic/Kbuild.asm
3header-y += boot.h 3header-y += boot.h
4header-y += debugreg.h 4header-y += debugreg.h
5header-y += ldt.h 5header-y += ldt.h
6header-y += msr-index.h
6header-y += ptrace-abi.h 7header-y += ptrace-abi.h
7header-y += ucontext.h 8header-y += ucontext.h
8 9
10unifdef-y += msr.h
9unifdef-y += mtrr.h 11unifdef-y += mtrr.h
10unifdef-y += vm86.h 12unifdef-y += vm86.h
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
index b8fa9557c532..0f70b379b029 100644
--- a/include/asm-i386/alternative.h
+++ b/include/asm-i386/alternative.h
@@ -1,8 +1,6 @@
1#ifndef _I386_ALTERNATIVE_H 1#ifndef _I386_ALTERNATIVE_H
2#define _I386_ALTERNATIVE_H 2#define _I386_ALTERNATIVE_H
3 3
4#ifdef __KERNEL__
5
6#include <asm/types.h> 4#include <asm/types.h>
7#include <linux/stddef.h> 5#include <linux/stddef.h>
8#include <linux/types.h> 6#include <linux/types.h>
@@ -16,6 +14,7 @@ struct alt_instr {
16 u8 pad; 14 u8 pad;
17}; 15};
18 16
17extern void alternative_instructions(void);
19extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); 18extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
20 19
21struct module; 20struct module;
@@ -31,9 +30,7 @@ static inline void alternatives_smp_module_add(struct module *mod, char *name,
31 void *text, void *text_end) {} 30 void *text, void *text_end) {}
32static inline void alternatives_smp_module_del(struct module *mod) {} 31static inline void alternatives_smp_module_del(struct module *mod) {}
33static inline void alternatives_smp_switch(int smp) {} 32static inline void alternatives_smp_switch(int smp) {}
34#endif 33#endif /* CONFIG_SMP */
35
36#endif
37 34
38/* 35/*
39 * Alternative instructions for different CPU types or capabilities. 36 * Alternative instructions for different CPU types or capabilities.
@@ -85,6 +82,21 @@ static inline void alternatives_smp_switch(int smp) {}
85 "663:\n\t" newinstr "\n664:\n" /* replacement */\ 82 "663:\n\t" newinstr "\n664:\n" /* replacement */\
86 ".previous" :: "i" (feature), ##input) 83 ".previous" :: "i" (feature), ##input)
87 84
85/* Like alternative_input, but with a single output argument */
86#define alternative_io(oldinstr, newinstr, feature, output, input...) \
87 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
88 ".section .altinstructions,\"a\"\n" \
89 " .align 4\n" \
90 " .long 661b\n" /* label */ \
91 " .long 663f\n" /* new instruction */ \
92 " .byte %c[feat]\n" /* feature bit */ \
93 " .byte 662b-661b\n" /* sourcelen */ \
94 " .byte 664f-663f\n" /* replacementlen */ \
95 ".previous\n" \
96 ".section .altinstr_replacement,\"ax\"\n" \
97 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
98 ".previous" : output : [feat] "i" (feature), ##input)
99
88/* 100/*
89 * Alternative inline assembly for SMP. 101 * Alternative inline assembly for SMP.
90 * 102 *
@@ -118,15 +130,17 @@ static inline void alternatives_smp_switch(int smp) {}
118#define LOCK_PREFIX "" 130#define LOCK_PREFIX ""
119#endif 131#endif
120 132
121struct paravirt_patch; 133struct paravirt_patch_site;
122#ifdef CONFIG_PARAVIRT 134#ifdef CONFIG_PARAVIRT
123void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end); 135void apply_paravirt(struct paravirt_patch_site *start,
136 struct paravirt_patch_site *end);
124#else 137#else
125static inline void 138static inline void
126apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end) 139apply_paravirt(struct paravirt_patch_site *start,
140 struct paravirt_patch_site *end)
127{} 141{}
128#define __start_parainstructions NULL 142#define __parainstructions NULL
129#define __stop_parainstructions NULL 143#define __parainstructions_end NULL
130#endif 144#endif
131 145
132#endif /* _I386_ALTERNATIVE_H */ 146#endif /* _I386_ALTERNATIVE_H */
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index a19810a08ae9..1e8f6f252dd3 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -2,6 +2,7 @@
2#define __ASM_APIC_H 2#define __ASM_APIC_H
3 3
4#include <linux/pm.h> 4#include <linux/pm.h>
5#include <linux/delay.h>
5#include <asm/fixmap.h> 6#include <asm/fixmap.h>
6#include <asm/apicdef.h> 7#include <asm/apicdef.h>
7#include <asm/processor.h> 8#include <asm/processor.h>
@@ -64,12 +65,8 @@ static __inline fastcall unsigned long native_apic_read(unsigned long reg)
64 return *((volatile unsigned long *)(APIC_BASE+reg)); 65 return *((volatile unsigned long *)(APIC_BASE+reg));
65} 66}
66 67
67static __inline__ void apic_wait_icr_idle(void) 68void apic_wait_icr_idle(void);
68{ 69unsigned long safe_apic_wait_icr_idle(void);
69 while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY )
70 cpu_relax();
71}
72
73int get_physical_broadcast(void); 70int get_physical_broadcast(void);
74 71
75#ifdef CONFIG_X86_GOOD_APIC 72#ifdef CONFIG_X86_GOOD_APIC
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h
index c90c7c499302..d28979ff73be 100644
--- a/include/asm-i386/bugs.h
+++ b/include/asm-i386/bugs.h
@@ -1,198 +1,12 @@
1/* 1/*
2 * include/asm-i386/bugs.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 *
6 * Cyrix stuff, June 1998 by:
7 * - Rafael R. Reilova (moved everything from head.S),
8 * <rreilova@ececs.uc.edu>
9 * - Channing Corn (tests & fixes),
10 * - Andrew D. Balsa (code cleanup).
11 */
12
13/*
14 * This is included by init/main.c to check for architecture-dependent bugs. 2 * This is included by init/main.c to check for architecture-dependent bugs.
15 * 3 *
16 * Needs: 4 * Needs:
17 * void check_bugs(void); 5 * void check_bugs(void);
18 */ 6 */
7#ifndef _ASM_I386_BUG_H
8#define _ASM_I386_BUG_H
19 9
20#include <linux/init.h> 10void check_bugs(void);
21#include <asm/processor.h>
22#include <asm/i387.h>
23#include <asm/msr.h>
24#include <asm/paravirt.h>
25
26static int __init no_halt(char *s)
27{
28 boot_cpu_data.hlt_works_ok = 0;
29 return 1;
30}
31
32__setup("no-hlt", no_halt);
33
34static int __init mca_pentium(char *s)
35{
36 mca_pentium_flag = 1;
37 return 1;
38}
39
40__setup("mca-pentium", mca_pentium);
41
42static int __init no_387(char *s)
43{
44 boot_cpu_data.hard_math = 0;
45 write_cr0(0xE | read_cr0());
46 return 1;
47}
48
49__setup("no387", no_387);
50
51static double __initdata x = 4195835.0;
52static double __initdata y = 3145727.0;
53
54/*
55 * This used to check for exceptions..
56 * However, it turns out that to support that,
57 * the XMM trap handlers basically had to
58 * be buggy. So let's have a correct XMM trap
59 * handler, and forget about printing out
60 * some status at boot.
61 *
62 * We should really only care about bugs here
63 * anyway. Not features.
64 */
65static void __init check_fpu(void)
66{
67 if (!boot_cpu_data.hard_math) {
68#ifndef CONFIG_MATH_EMULATION
69 printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
70 printk(KERN_EMERG "Giving up.\n");
71 for (;;) ;
72#endif
73 return;
74 }
75
76/* trap_init() enabled FXSR and company _before_ testing for FP problems here. */
77 /* Test for the divl bug.. */
78 __asm__("fninit\n\t"
79 "fldl %1\n\t"
80 "fdivl %2\n\t"
81 "fmull %2\n\t"
82 "fldl %1\n\t"
83 "fsubp %%st,%%st(1)\n\t"
84 "fistpl %0\n\t"
85 "fwait\n\t"
86 "fninit"
87 : "=m" (*&boot_cpu_data.fdiv_bug)
88 : "m" (*&x), "m" (*&y));
89 if (boot_cpu_data.fdiv_bug)
90 printk("Hmm, FPU with FDIV bug.\n");
91}
92
93static void __init check_hlt(void)
94{
95 if (paravirt_enabled())
96 return;
97
98 printk(KERN_INFO "Checking 'hlt' instruction... ");
99 if (!boot_cpu_data.hlt_works_ok) {
100 printk("disabled\n");
101 return;
102 }
103 halt();
104 halt();
105 halt();
106 halt();
107 printk("OK.\n");
108}
109
110/*
111 * Most 386 processors have a bug where a POPAD can lock the
112 * machine even from user space.
113 */
114
115static void __init check_popad(void)
116{
117#ifndef CONFIG_X86_POPAD_OK
118 int res, inp = (int) &res;
119
120 printk(KERN_INFO "Checking for popad bug... ");
121 __asm__ __volatile__(
122 "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx "
123 : "=&a" (res)
124 : "d" (inp)
125 : "ecx", "edi" );
126 /* If this fails, it means that any user program may lock the CPU hard. Too bad. */
127 if (res != 12345678) printk( "Buggy.\n" );
128 else printk( "OK.\n" );
129#endif
130}
131
132/*
133 * Check whether we are able to run this kernel safely on SMP.
134 *
135 * - In order to run on a i386, we need to be compiled for i386
136 * (for due to lack of "invlpg" and working WP on a i386)
137 * - In order to run on anything without a TSC, we need to be
138 * compiled for a i486.
139 * - In order to support the local APIC on a buggy Pentium machine,
140 * we need to be compiled with CONFIG_X86_GOOD_APIC disabled,
141 * which happens implicitly if compiled for a Pentium or lower
142 * (unless an advanced selection of CPU features is used) as an
143 * otherwise config implies a properly working local APIC without
144 * the need to do extra reads from the APIC.
145*/
146
147static void __init check_config(void)
148{
149/*
150 * We'd better not be a i386 if we're configured to use some
151 * i486+ only features! (WP works in supervisor mode and the
152 * new "invlpg" and "bswap" instructions)
153 */
154#if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_BSWAP)
155 if (boot_cpu_data.x86 == 3)
156 panic("Kernel requires i486+ for 'invlpg' and other features");
157#endif
158
159/*
160 * If we configured ourselves for a TSC, we'd better have one!
161 */
162#ifdef CONFIG_X86_TSC
163 if (!cpu_has_tsc && !tsc_disable)
164 panic("Kernel compiled for Pentium+, requires TSC feature!");
165#endif
166
167/*
168 * If we were told we had a good local APIC, check for buggy Pentia,
169 * i.e. all B steppings and the C2 stepping of P54C when using their
170 * integrated APIC (see 11AP erratum in "Pentium Processor
171 * Specification Update").
172 */
173#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_GOOD_APIC)
174 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL
175 && cpu_has_apic
176 && boot_cpu_data.x86 == 5
177 && boot_cpu_data.x86_model == 2
178 && (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11))
179 panic("Kernel compiled for PMMX+, assumes a local APIC without the read-before-write bug!");
180#endif
181}
182
183extern void alternative_instructions(void);
184 11
185static void __init check_bugs(void) 12#endif /* _ASM_I386_BUG_H */
186{
187 identify_cpu(&boot_cpu_data);
188#ifndef CONFIG_SMP
189 printk("CPU: ");
190 print_cpu_info(&boot_cpu_data);
191#endif
192 check_config();
193 check_fpu();
194 check_hlt();
195 check_popad();
196 init_utsname()->machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
197 alternative_instructions();
198}
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index d1b8e4ab6c1a..f514e906643a 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -7,7 +7,10 @@
7#ifndef __ASM_I386_CPUFEATURE_H 7#ifndef __ASM_I386_CPUFEATURE_H
8#define __ASM_I386_CPUFEATURE_H 8#define __ASM_I386_CPUFEATURE_H
9 9
10#ifndef __ASSEMBLY__
10#include <linux/bitops.h> 11#include <linux/bitops.h>
12#endif
13#include <asm/required-features.h>
11 14
12#define NCAPINTS 7 /* N 32-bit words worth of info */ 15#define NCAPINTS 7 /* N 32-bit words worth of info */
13 16
@@ -49,6 +52,7 @@
49#define X86_FEATURE_MP (1*32+19) /* MP Capable. */ 52#define X86_FEATURE_MP (1*32+19) /* MP Capable. */
50#define X86_FEATURE_NX (1*32+20) /* Execute Disable */ 53#define X86_FEATURE_NX (1*32+20) /* Execute Disable */
51#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ 54#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
55#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
52#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ 56#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
53#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ 57#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
54#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ 58#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */
@@ -76,6 +80,7 @@
76#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ 80#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
77#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ 81#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
78#define X86_FEATURE_LAPIC_TIMER_BROKEN (3*32+ 14) /* lapic timer broken in C1 */ 82#define X86_FEATURE_LAPIC_TIMER_BROKEN (3*32+ 14) /* lapic timer broken in C1 */
83#define X86_FEATURE_SYNC_RDTSC (3*32+15) /* RDTSC synchronizes the CPU */
79 84
80/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 85/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
81#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ 86#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
@@ -103,8 +108,12 @@
103#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ 108#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
104#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ 109#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
105 110
106#define cpu_has(c, bit) test_bit(bit, (c)->x86_capability) 111#define cpu_has(c, bit) \
107#define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability) 112 ((__builtin_constant_p(bit) && (bit) < 32 && \
113 (1UL << (bit)) & REQUIRED_MASK1) ? \
114 1 : \
115 test_bit(bit, (c)->x86_capability))
116#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
108 117
109#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) 118#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
110#define cpu_has_vme boot_cpu_has(X86_FEATURE_VME) 119#define cpu_has_vme boot_cpu_has(X86_FEATURE_VME)
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h
index 5252ee0f6d7a..d35248539912 100644
--- a/include/asm-i386/current.h
+++ b/include/asm-i386/current.h
@@ -1,14 +1,15 @@
1#ifndef _I386_CURRENT_H 1#ifndef _I386_CURRENT_H
2#define _I386_CURRENT_H 2#define _I386_CURRENT_H
3 3
4#include <asm/pda.h>
5#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <asm/percpu.h>
6 6
7struct task_struct; 7struct task_struct;
8 8
9DECLARE_PER_CPU(struct task_struct *, current_task);
9static __always_inline struct task_struct *get_current(void) 10static __always_inline struct task_struct *get_current(void)
10{ 11{
11 return read_pda(pcurrent); 12 return x86_read_percpu(current_task);
12} 13}
13 14
14#define current get_current() 15#define current get_current()
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index 050831f34f71..c547403f341d 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -12,23 +12,24 @@
12 12
13#include <asm/mmu.h> 13#include <asm/mmu.h>
14 14
15extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
16
17struct Xgt_desc_struct { 15struct Xgt_desc_struct {
18 unsigned short size; 16 unsigned short size;
19 unsigned long address __attribute__((packed)); 17 unsigned long address __attribute__((packed));
20 unsigned short pad; 18 unsigned short pad;
21} __attribute__ ((packed)); 19} __attribute__ ((packed));
22 20
23extern struct Xgt_desc_struct idt_descr; 21struct gdt_page
24DECLARE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr); 22{
25extern struct Xgt_desc_struct early_gdt_descr; 23 struct desc_struct gdt[GDT_ENTRIES];
24} __attribute__((aligned(PAGE_SIZE)));
25DECLARE_PER_CPU(struct gdt_page, gdt_page);
26 26
27static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) 27static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
28{ 28{
29 return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; 29 return per_cpu(gdt_page, cpu).gdt;
30} 30}
31 31
32extern struct Xgt_desc_struct idt_descr;
32extern struct desc_struct idt_table[]; 33extern struct desc_struct idt_table[];
33extern void set_intr_gate(unsigned int irq, void * addr); 34extern void set_intr_gate(unsigned int irq, void * addr);
34 35
@@ -58,45 +59,33 @@ static inline void pack_gate(__u32 *a, __u32 *b,
58#ifdef CONFIG_PARAVIRT 59#ifdef CONFIG_PARAVIRT
59#include <asm/paravirt.h> 60#include <asm/paravirt.h>
60#else 61#else
61#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) 62#define load_TR_desc() native_load_tr_desc()
62 63#define load_gdt(dtr) native_load_gdt(dtr)
63#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) 64#define load_idt(dtr) native_load_idt(dtr)
64#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
65#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr)) 65#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr))
66#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt)) 66#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt))
67 67
68#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr)) 68#define store_gdt(dtr) native_store_gdt(dtr)
69#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr)) 69#define store_idt(dtr) native_store_idt(dtr)
70#define store_tr(tr) __asm__ ("str %0":"=m" (tr)) 70#define store_tr(tr) (tr = native_store_tr())
71#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt)) 71#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt))
72 72
73#if TLS_SIZE != 24 73#define load_TLS(t, cpu) native_load_tls(t, cpu)
74# error update this code. 74#define set_ldt native_set_ldt
75#endif
76
77static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
78{
79#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
80 C(0); C(1); C(2);
81#undef C
82}
83 75
84#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 76#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
85#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 77#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
86#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 78#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
79#endif
87 80
88static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b) 81static inline void write_dt_entry(struct desc_struct *dt,
82 int entry, u32 entry_low, u32 entry_high)
89{ 83{
90 __u32 *lp = (__u32 *)((char *)dt + entry*8); 84 dt[entry].a = entry_low;
91 *lp = entry_a; 85 dt[entry].b = entry_high;
92 *(lp+1) = entry_b;
93} 86}
94 87
95#define set_ldt native_set_ldt 88static inline void native_set_ldt(const void *addr, unsigned int entries)
96#endif /* CONFIG_PARAVIRT */
97
98static inline fastcall void native_set_ldt(const void *addr,
99 unsigned int entries)
100{ 89{
101 if (likely(entries == 0)) 90 if (likely(entries == 0))
102 __asm__ __volatile__("lldt %w0"::"q" (0)); 91 __asm__ __volatile__("lldt %w0"::"q" (0));
@@ -112,6 +101,48 @@ static inline fastcall void native_set_ldt(const void *addr,
112 } 101 }
113} 102}
114 103
104
105static inline void native_load_tr_desc(void)
106{
107 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
108}
109
110static inline void native_load_gdt(const struct Xgt_desc_struct *dtr)
111{
112 asm volatile("lgdt %0"::"m" (*dtr));
113}
114
115static inline void native_load_idt(const struct Xgt_desc_struct *dtr)
116{
117 asm volatile("lidt %0"::"m" (*dtr));
118}
119
120static inline void native_store_gdt(struct Xgt_desc_struct *dtr)
121{
122 asm ("sgdt %0":"=m" (*dtr));
123}
124
125static inline void native_store_idt(struct Xgt_desc_struct *dtr)
126{
127 asm ("sidt %0":"=m" (*dtr));
128}
129
130static inline unsigned long native_store_tr(void)
131{
132 unsigned long tr;
133 asm ("str %0":"=r" (tr));
134 return tr;
135}
136
137static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
138{
139 unsigned int i;
140 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
141
142 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
143 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
144}
145
115static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) 146static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
116{ 147{
117 __u32 a, b; 148 __u32 a, b;
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h
index c5b8fc6109d6..096a2a8eb1da 100644
--- a/include/asm-i386/e820.h
+++ b/include/asm-i386/e820.h
@@ -38,6 +38,7 @@ extern struct e820map e820;
38 38
39extern int e820_all_mapped(unsigned long start, unsigned long end, 39extern int e820_all_mapped(unsigned long start, unsigned long end,
40 unsigned type); 40 unsigned type);
41extern int e820_any_mapped(u64 start, u64 end, unsigned type);
41extern void find_max_pfn(void); 42extern void find_max_pfn(void);
42extern void register_bootmem_low_pages(unsigned long max_low_pfn); 43extern void register_bootmem_low_pages(unsigned long max_low_pfn);
43extern void e820_register_memory(void); 44extern void e820_register_memory(void);
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
index 952b3ee3c9bb..d304ab4161ff 100644
--- a/include/asm-i386/elf.h
+++ b/include/asm-i386/elf.h
@@ -133,39 +133,31 @@ extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct
133#define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs) 133#define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs)
134 134
135#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO)) 135#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO))
136#define VDSO_BASE ((unsigned long)current->mm->context.vdso) 136#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
137 137#define VDSO_PRELINK 0
138#ifdef CONFIG_COMPAT_VDSO
139# define VDSO_COMPAT_BASE VDSO_HIGH_BASE
140# define VDSO_PRELINK VDSO_HIGH_BASE
141#else
142# define VDSO_COMPAT_BASE VDSO_BASE
143# define VDSO_PRELINK 0
144#endif
145 138
146#define VDSO_SYM(x) \ 139#define VDSO_SYM(x) \
147 (VDSO_COMPAT_BASE + (unsigned long)(x) - VDSO_PRELINK) 140 (VDSO_CURRENT_BASE + (unsigned long)(x) - VDSO_PRELINK)
148 141
149#define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE) 142#define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE)
150#define VDSO_EHDR ((const struct elfhdr *) VDSO_COMPAT_BASE) 143#define VDSO_EHDR ((const struct elfhdr *) VDSO_CURRENT_BASE)
151 144
152extern void __kernel_vsyscall; 145extern void __kernel_vsyscall;
153 146
154#define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall) 147#define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall)
155 148
156#ifndef CONFIG_COMPAT_VDSO
157#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
158struct linux_binprm; 149struct linux_binprm;
150
151#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
159extern int arch_setup_additional_pages(struct linux_binprm *bprm, 152extern int arch_setup_additional_pages(struct linux_binprm *bprm,
160 int executable_stack); 153 int executable_stack);
161#endif
162 154
163extern unsigned int vdso_enabled; 155extern unsigned int vdso_enabled;
164 156
165#define ARCH_DLINFO \ 157#define ARCH_DLINFO \
166do if (vdso_enabled) { \ 158do if (vdso_enabled) { \
167 NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ 159 NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
168 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_COMPAT_BASE); \ 160 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
169} while (0) 161} while (0)
170 162
171#endif 163#endif
diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h
index 3e9f610c35df..80ea052ee3a4 100644
--- a/include/asm-i386/fixmap.h
+++ b/include/asm-i386/fixmap.h
@@ -19,13 +19,9 @@
19 * Leave one empty page between vmalloc'ed areas and 19 * Leave one empty page between vmalloc'ed areas and
20 * the start of the fixmap. 20 * the start of the fixmap.
21 */ 21 */
22#ifndef CONFIG_COMPAT_VDSO
23extern unsigned long __FIXADDR_TOP; 22extern unsigned long __FIXADDR_TOP;
24#else 23#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
25#define __FIXADDR_TOP 0xfffff000 24#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
26#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
27#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
28#endif
29 25
30#ifndef __ASSEMBLY__ 26#ifndef __ASSEMBLY__
31#include <linux/kernel.h> 27#include <linux/kernel.h>
@@ -88,6 +84,9 @@ enum fixed_addresses {
88#ifdef CONFIG_PCI_MMCONFIG 84#ifdef CONFIG_PCI_MMCONFIG
89 FIX_PCIE_MCFG, 85 FIX_PCIE_MCFG,
90#endif 86#endif
87#ifdef CONFIG_PARAVIRT
88 FIX_PARAVIRT_BOOTMAP,
89#endif
91 __end_of_permanent_fixed_addresses, 90 __end_of_permanent_fixed_addresses,
92 /* temporary boot-time mappings, used before ioremap() is functional */ 91 /* temporary boot-time mappings, used before ioremap() is functional */
93#define NR_FIX_BTMAPS 16 92#define NR_FIX_BTMAPS 16
diff --git a/include/asm-i386/genapic.h b/include/asm-i386/genapic.h
index fd2be593b06e..33e3ffe1766c 100644
--- a/include/asm-i386/genapic.h
+++ b/include/asm-i386/genapic.h
@@ -36,7 +36,7 @@ struct genapic {
36 void (*init_apic_ldr)(void); 36 void (*init_apic_ldr)(void);
37 physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); 37 physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
38 38
39 void (*clustered_apic_check)(void); 39 void (*setup_apic_routing)(void);
40 int (*multi_timer_check)(int apic, int irq); 40 int (*multi_timer_check)(int apic, int irq);
41 int (*apicid_to_node)(int logical_apicid); 41 int (*apicid_to_node)(int logical_apicid);
42 int (*cpu_to_logical_apicid)(int cpu); 42 int (*cpu_to_logical_apicid)(int cpu);
@@ -99,7 +99,7 @@ struct genapic {
99 APICFUNC(check_apicid_present) \ 99 APICFUNC(check_apicid_present) \
100 APICFUNC(init_apic_ldr) \ 100 APICFUNC(init_apic_ldr) \
101 APICFUNC(ioapic_phys_id_map) \ 101 APICFUNC(ioapic_phys_id_map) \
102 APICFUNC(clustered_apic_check) \ 102 APICFUNC(setup_apic_routing) \
103 APICFUNC(multi_timer_check) \ 103 APICFUNC(multi_timer_check) \
104 APICFUNC(apicid_to_node) \ 104 APICFUNC(apicid_to_node) \
105 APICFUNC(cpu_to_logical_apicid) \ 105 APICFUNC(cpu_to_logical_apicid) \
@@ -122,6 +122,6 @@ struct genapic {
122 APICFUNC(phys_pkg_id) \ 122 APICFUNC(phys_pkg_id) \
123 } 123 }
124 124
125extern struct genapic *genapic, apic_default; 125extern struct genapic *genapic;
126 126
127#endif 127#endif
diff --git a/include/asm-i386/highmem.h b/include/asm-i386/highmem.h
index e9a34ebc25d5..13cdcd66fff2 100644
--- a/include/asm-i386/highmem.h
+++ b/include/asm-i386/highmem.h
@@ -24,6 +24,7 @@
24#include <linux/threads.h> 24#include <linux/threads.h>
25#include <asm/kmap_types.h> 25#include <asm/kmap_types.h>
26#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
27#include <asm/paravirt.h>
27 28
28/* declarations for highmem.c */ 29/* declarations for highmem.c */
29extern unsigned long highstart_pfn, highend_pfn; 30extern unsigned long highstart_pfn, highend_pfn;
@@ -67,11 +68,16 @@ extern void FASTCALL(kunmap_high(struct page *page));
67 68
68void *kmap(struct page *page); 69void *kmap(struct page *page);
69void kunmap(struct page *page); 70void kunmap(struct page *page);
71void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
70void *kmap_atomic(struct page *page, enum km_type type); 72void *kmap_atomic(struct page *page, enum km_type type);
71void kunmap_atomic(void *kvaddr, enum km_type type); 73void kunmap_atomic(void *kvaddr, enum km_type type);
72void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 74void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
73struct page *kmap_atomic_to_page(void *ptr); 75struct page *kmap_atomic_to_page(void *ptr);
74 76
77#ifndef CONFIG_PARAVIRT
78#define kmap_atomic_pte(page, type) kmap_atomic(page, type)
79#endif
80
75#define flush_cache_kmaps() do { } while (0) 81#define flush_cache_kmaps() do { } while (0)
76 82
77#endif /* __KERNEL__ */ 83#endif /* __KERNEL__ */
diff --git a/include/asm-i386/hpet.h b/include/asm-i386/hpet.h
index fc03cf9de5c4..dddeedf504b7 100644
--- a/include/asm-i386/hpet.h
+++ b/include/asm-i386/hpet.h
@@ -28,8 +28,6 @@
28 28
29#include <linux/timex.h> 29#include <linux/timex.h>
30 30
31#include <asm/fixmap.h>
32
33/* 31/*
34 * Documentation on HPET can be found at: 32 * Documentation on HPET can be found at:
35 * http://www.intel.com/ial/home/sp/pcmmspec.htm 33 * http://www.intel.com/ial/home/sp/pcmmspec.htm
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h
index 434936c732d6..cdd1e248e3b4 100644
--- a/include/asm-i386/i387.h
+++ b/include/asm-i386/i387.h
@@ -74,17 +74,18 @@ static inline void __save_init_fpu( struct task_struct *tsk )
74 task_thread_info(tsk)->status &= ~TS_USEDFPU; 74 task_thread_info(tsk)->status &= ~TS_USEDFPU;
75} 75}
76 76
77#define __unlazy_fpu( tsk ) do { \ 77#define __unlazy_fpu( tsk ) do { \
78 if (task_thread_info(tsk)->status & TS_USEDFPU) \ 78 if (task_thread_info(tsk)->status & TS_USEDFPU) { \
79 save_init_fpu( tsk ); \ 79 __save_init_fpu(tsk); \
80 else \ 80 stts(); \
81 tsk->fpu_counter = 0; \ 81 } else \
82 tsk->fpu_counter = 0; \
82} while (0) 83} while (0)
83 84
84#define __clear_fpu( tsk ) \ 85#define __clear_fpu( tsk ) \
85do { \ 86do { \
86 if (task_thread_info(tsk)->status & TS_USEDFPU) { \ 87 if (task_thread_info(tsk)->status & TS_USEDFPU) { \
87 asm volatile("fnclex ; fwait"); \ 88 asm volatile("fnclex ; fwait"); \
88 task_thread_info(tsk)->status &= ~TS_USEDFPU; \ 89 task_thread_info(tsk)->status &= ~TS_USEDFPU; \
89 stts(); \ 90 stts(); \
90 } \ 91 } \
@@ -113,7 +114,7 @@ static inline void save_init_fpu( struct task_struct *tsk )
113 __clear_fpu( tsk ); \ 114 __clear_fpu( tsk ); \
114 preempt_enable(); \ 115 preempt_enable(); \
115} while (0) 116} while (0)
116 \ 117
117/* 118/*
118 * FPU state interaction... 119 * FPU state interaction...
119 */ 120 */
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index 59fe616933c4..e797586a5bfc 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -250,19 +250,22 @@ static inline void flush_write_buffers(void)
250 250
251#endif /* __KERNEL__ */ 251#endif /* __KERNEL__ */
252 252
253static inline void native_io_delay(void)
254{
255 asm volatile("outb %%al,$0x80" : : : "memory");
256}
257
253#if defined(CONFIG_PARAVIRT) 258#if defined(CONFIG_PARAVIRT)
254#include <asm/paravirt.h> 259#include <asm/paravirt.h>
255#else 260#else
256 261
257#define __SLOW_DOWN_IO "outb %%al,$0x80;"
258
259static inline void slow_down_io(void) { 262static inline void slow_down_io(void) {
260 __asm__ __volatile__( 263 native_io_delay();
261 __SLOW_DOWN_IO
262#ifdef REALLY_SLOW_IO 264#ifdef REALLY_SLOW_IO
263 __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO 265 native_io_delay();
266 native_io_delay();
267 native_io_delay();
264#endif 268#endif
265 : : );
266} 269}
267 270
268#endif 271#endif
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
index 11761cdaae19..9e15ce0006eb 100644
--- a/include/asm-i386/irq.h
+++ b/include/asm-i386/irq.h
@@ -37,8 +37,6 @@ static __inline__ int irq_canonicalize(int irq)
37extern int irqbalance_disable(char *str); 37extern int irqbalance_disable(char *str);
38#endif 38#endif
39 39
40extern void quirk_intel_irqbalance(void);
41
42#ifdef CONFIG_HOTPLUG_CPU 40#ifdef CONFIG_HOTPLUG_CPU
43extern void fixup_irqs(cpumask_t map); 41extern void fixup_irqs(cpumask_t map);
44#endif 42#endif
diff --git a/include/asm-i386/irq_regs.h b/include/asm-i386/irq_regs.h
index a1b3f7f594a2..3368b20c0b48 100644
--- a/include/asm-i386/irq_regs.h
+++ b/include/asm-i386/irq_regs.h
@@ -1,25 +1,27 @@
1/* 1/*
2 * Per-cpu current frame pointer - the location of the last exception frame on 2 * Per-cpu current frame pointer - the location of the last exception frame on
3 * the stack, stored in the PDA. 3 * the stack, stored in the per-cpu area.
4 * 4 *
5 * Jeremy Fitzhardinge <jeremy@goop.org> 5 * Jeremy Fitzhardinge <jeremy@goop.org>
6 */ 6 */
7#ifndef _ASM_I386_IRQ_REGS_H 7#ifndef _ASM_I386_IRQ_REGS_H
8#define _ASM_I386_IRQ_REGS_H 8#define _ASM_I386_IRQ_REGS_H
9 9
10#include <asm/pda.h> 10#include <asm/percpu.h>
11
12DECLARE_PER_CPU(struct pt_regs *, irq_regs);
11 13
12static inline struct pt_regs *get_irq_regs(void) 14static inline struct pt_regs *get_irq_regs(void)
13{ 15{
14 return read_pda(irq_regs); 16 return x86_read_percpu(irq_regs);
15} 17}
16 18
17static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) 19static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
18{ 20{
19 struct pt_regs *old_regs; 21 struct pt_regs *old_regs;
20 22
21 old_regs = read_pda(irq_regs); 23 old_regs = get_irq_regs();
22 write_pda(irq_regs, new_regs); 24 x86_write_percpu(irq_regs, new_regs);
23 25
24 return old_regs; 26 return old_regs;
25} 27}
diff --git a/include/asm-i386/irqflags.h b/include/asm-i386/irqflags.h
index 17b18cf4fe9d..eff8585cb741 100644
--- a/include/asm-i386/irqflags.h
+++ b/include/asm-i386/irqflags.h
@@ -9,6 +9,43 @@
9 */ 9 */
10#ifndef _ASM_IRQFLAGS_H 10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H 11#define _ASM_IRQFLAGS_H
12#include <asm/processor-flags.h>
13
14#ifndef __ASSEMBLY__
15static inline unsigned long native_save_fl(void)
16{
17 unsigned long f;
18 asm volatile("pushfl ; popl %0":"=g" (f): /* no input */);
19 return f;
20}
21
22static inline void native_restore_fl(unsigned long f)
23{
24 asm volatile("pushl %0 ; popfl": /* no output */
25 :"g" (f)
26 :"memory", "cc");
27}
28
29static inline void native_irq_disable(void)
30{
31 asm volatile("cli": : :"memory");
32}
33
34static inline void native_irq_enable(void)
35{
36 asm volatile("sti": : :"memory");
37}
38
39static inline void native_safe_halt(void)
40{
41 asm volatile("sti; hlt": : :"memory");
42}
43
44static inline void native_halt(void)
45{
46 asm volatile("hlt": : :"memory");
47}
48#endif /* __ASSEMBLY__ */
12 49
13#ifdef CONFIG_PARAVIRT 50#ifdef CONFIG_PARAVIRT
14#include <asm/paravirt.h> 51#include <asm/paravirt.h>
@@ -17,35 +54,22 @@
17 54
18static inline unsigned long __raw_local_save_flags(void) 55static inline unsigned long __raw_local_save_flags(void)
19{ 56{
20 unsigned long flags; 57 return native_save_fl();
21
22 __asm__ __volatile__(
23 "pushfl ; popl %0"
24 : "=g" (flags)
25 : /* no input */
26 );
27
28 return flags;
29} 58}
30 59
31static inline void raw_local_irq_restore(unsigned long flags) 60static inline void raw_local_irq_restore(unsigned long flags)
32{ 61{
33 __asm__ __volatile__( 62 native_restore_fl(flags);
34 "pushl %0 ; popfl"
35 : /* no output */
36 :"g" (flags)
37 :"memory", "cc"
38 );
39} 63}
40 64
41static inline void raw_local_irq_disable(void) 65static inline void raw_local_irq_disable(void)
42{ 66{
43 __asm__ __volatile__("cli" : : : "memory"); 67 native_irq_disable();
44} 68}
45 69
46static inline void raw_local_irq_enable(void) 70static inline void raw_local_irq_enable(void)
47{ 71{
48 __asm__ __volatile__("sti" : : : "memory"); 72 native_irq_enable();
49} 73}
50 74
51/* 75/*
@@ -54,7 +78,7 @@ static inline void raw_local_irq_enable(void)
54 */ 78 */
55static inline void raw_safe_halt(void) 79static inline void raw_safe_halt(void)
56{ 80{
57 __asm__ __volatile__("sti; hlt" : : : "memory"); 81 native_safe_halt();
58} 82}
59 83
60/* 84/*
@@ -63,7 +87,7 @@ static inline void raw_safe_halt(void)
63 */ 87 */
64static inline void halt(void) 88static inline void halt(void)
65{ 89{
66 __asm__ __volatile__("hlt": : :"memory"); 90 native_halt();
67} 91}
68 92
69/* 93/*
@@ -96,7 +120,7 @@ static inline unsigned long __raw_local_irq_save(void)
96 120
97static inline int raw_irqs_disabled_flags(unsigned long flags) 121static inline int raw_irqs_disabled_flags(unsigned long flags)
98{ 122{
99 return !(flags & (1 << 9)); 123 return !(flags & X86_EFLAGS_IF);
100} 124}
101 125
102static inline int raw_irqs_disabled(void) 126static inline int raw_irqs_disabled(void)
diff --git a/include/asm-i386/kexec.h b/include/asm-i386/kexec.h
index 4dfc9f5ed031..bcb5b21de2d2 100644
--- a/include/asm-i386/kexec.h
+++ b/include/asm-i386/kexec.h
@@ -21,7 +21,6 @@
21 21
22#ifndef __ASSEMBLY__ 22#ifndef __ASSEMBLY__
23 23
24#include <asm/fixmap.h>
25#include <asm/ptrace.h> 24#include <asm/ptrace.h>
26#include <asm/string.h> 25#include <asm/string.h>
27 26
@@ -29,10 +28,6 @@
29 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. 28 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
30 * I.e. Maximum page that is mapped directly into kernel memory, 29 * I.e. Maximum page that is mapped directly into kernel memory,
31 * and kmap is not required. 30 * and kmap is not required.
32 *
33 * Someone correct me if FIXADDR_START - PAGEOFFSET is not the correct
34 * calculation for the amount of memory directly mappable into the
35 * kernel memory space.
36 */ 31 */
37 32
38/* Maximum physical address we can use pages from */ 33/* Maximum physical address we can use pages from */
@@ -47,6 +42,9 @@
47/* The native architecture */ 42/* The native architecture */
48#define KEXEC_ARCH KEXEC_ARCH_386 43#define KEXEC_ARCH KEXEC_ARCH_386
49 44
45/* We can also handle crash dumps from 64 bit kernel. */
46#define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
47
50#define MAX_NOTE_BYTES 1024 48#define MAX_NOTE_BYTES 1024
51 49
52/* CPU does not save ss and esp on stack if execution is already 50/* CPU does not save ss and esp on stack if execution is already
diff --git a/include/asm-i386/mach-bigsmp/mach_apic.h b/include/asm-i386/mach-bigsmp/mach_apic.h
index 18b19a773440..ebd319f838ab 100644
--- a/include/asm-i386/mach-bigsmp/mach_apic.h
+++ b/include/asm-i386/mach-bigsmp/mach_apic.h
@@ -71,7 +71,7 @@ static inline void init_apic_ldr(void)
71 apic_write_around(APIC_LDR, val); 71 apic_write_around(APIC_LDR, val);
72} 72}
73 73
74static inline void clustered_apic_check(void) 74static inline void setup_apic_routing(void)
75{ 75{
76 printk("Enabling APIC mode: %s. Using %d I/O APICs\n", 76 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
77 "Physflat", nr_ioapics); 77 "Physflat", nr_ioapics);
diff --git a/include/asm-i386/mach-default/mach_apic.h b/include/asm-i386/mach-default/mach_apic.h
index 3ef6292db780..6db1c3babe9a 100644
--- a/include/asm-i386/mach-default/mach_apic.h
+++ b/include/asm-i386/mach-default/mach_apic.h
@@ -54,7 +54,7 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
54 return phys_map; 54 return phys_map;
55} 55}
56 56
57static inline void clustered_apic_check(void) 57static inline void setup_apic_routing(void)
58{ 58{
59 printk("Enabling APIC mode: %s. Using %d I/O APICs\n", 59 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
60 "Flat", nr_ioapics); 60 "Flat", nr_ioapics);
diff --git a/include/asm-i386/mach-es7000/mach_apic.h b/include/asm-i386/mach-es7000/mach_apic.h
index 26333685a7fb..2d978928a395 100644
--- a/include/asm-i386/mach-es7000/mach_apic.h
+++ b/include/asm-i386/mach-es7000/mach_apic.h
@@ -73,15 +73,8 @@ static inline void init_apic_ldr(void)
73 apic_write_around(APIC_LDR, val); 73 apic_write_around(APIC_LDR, val);
74} 74}
75 75
76extern void es7000_sw_apic(void);
77static inline void enable_apic_mode(void)
78{
79 es7000_sw_apic();
80 return;
81}
82
83extern int apic_version [MAX_APICS]; 76extern int apic_version [MAX_APICS];
84static inline void clustered_apic_check(void) 77static inline void setup_apic_routing(void)
85{ 78{
86 int apic = bios_cpu_apicid[smp_processor_id()]; 79 int apic = bios_cpu_apicid[smp_processor_id()];
87 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", 80 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
diff --git a/include/asm-i386/mach-es7000/mach_mpparse.h b/include/asm-i386/mach-es7000/mach_mpparse.h
index 24990e546da3..b9fb784e1fd5 100644
--- a/include/asm-i386/mach-es7000/mach_mpparse.h
+++ b/include/asm-i386/mach-es7000/mach_mpparse.h
@@ -18,18 +18,6 @@ extern int parse_unisys_oem (char *oemptr);
18extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); 18extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
19extern void setup_unisys(void); 19extern void setup_unisys(void);
20 20
21static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
22 char *productid)
23{
24 if (mpc->mpc_oemptr) {
25 struct mp_config_oemtable *oem_table =
26 (struct mp_config_oemtable *)mpc->mpc_oemptr;
27 if (!strncmp(oem, "UNISYS", 6))
28 return parse_unisys_oem((char *)oem_table);
29 }
30 return 0;
31}
32
33#ifdef CONFIG_ACPI 21#ifdef CONFIG_ACPI
34 22
35static inline int es7000_check_dsdt(void) 23static inline int es7000_check_dsdt(void)
@@ -41,26 +29,6 @@ static inline int es7000_check_dsdt(void)
41 return 1; 29 return 1;
42 return 0; 30 return 0;
43} 31}
44
45/* Hook from generic ACPI tables.c */
46static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
47{
48 unsigned long oem_addr;
49 if (!find_unisys_acpi_oem_table(&oem_addr)) {
50 if (es7000_check_dsdt())
51 return parse_unisys_oem((char *)oem_addr);
52 else {
53 setup_unisys();
54 return 1;
55 }
56 }
57 return 0;
58}
59#else
60static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
61{
62 return 0;
63}
64#endif 32#endif
65 33
66#endif /* __ASM_MACH_MPPARSE_H */ 34#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/include/asm-i386/mach-generic/mach_apic.h b/include/asm-i386/mach-generic/mach_apic.h
index d9dc039da94a..a236e7021528 100644
--- a/include/asm-i386/mach-generic/mach_apic.h
+++ b/include/asm-i386/mach-generic/mach_apic.h
@@ -13,7 +13,7 @@
13#define apic_id_registered (genapic->apic_id_registered) 13#define apic_id_registered (genapic->apic_id_registered)
14#define init_apic_ldr (genapic->init_apic_ldr) 14#define init_apic_ldr (genapic->init_apic_ldr)
15#define ioapic_phys_id_map (genapic->ioapic_phys_id_map) 15#define ioapic_phys_id_map (genapic->ioapic_phys_id_map)
16#define clustered_apic_check (genapic->clustered_apic_check) 16#define setup_apic_routing (genapic->setup_apic_routing)
17#define multi_timer_check (genapic->multi_timer_check) 17#define multi_timer_check (genapic->multi_timer_check)
18#define apicid_to_node (genapic->apicid_to_node) 18#define apicid_to_node (genapic->apicid_to_node)
19#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) 19#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid)
diff --git a/include/asm-i386/mach-numaq/mach_apic.h b/include/asm-i386/mach-numaq/mach_apic.h
index 9d158095da82..5e5e7dd2692e 100644
--- a/include/asm-i386/mach-numaq/mach_apic.h
+++ b/include/asm-i386/mach-numaq/mach_apic.h
@@ -34,7 +34,7 @@ static inline void init_apic_ldr(void)
34 /* Already done in NUMA-Q firmware */ 34 /* Already done in NUMA-Q firmware */
35} 35}
36 36
37static inline void clustered_apic_check(void) 37static inline void setup_apic_routing(void)
38{ 38{
39 printk("Enabling APIC mode: %s. Using %d I/O APICs\n", 39 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
40 "NUMA-Q", nr_ioapics); 40 "NUMA-Q", nr_ioapics);
diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h
index 43e5bd8f4a19..732f776aab8e 100644
--- a/include/asm-i386/mach-summit/mach_apic.h
+++ b/include/asm-i386/mach-summit/mach_apic.h
@@ -80,7 +80,7 @@ static inline int apic_id_registered(void)
80 return 1; 80 return 1;
81} 81}
82 82
83static inline void clustered_apic_check(void) 83static inline void setup_apic_routing(void)
84{ 84{
85 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", 85 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
86 nr_ioapics); 86 nr_ioapics);
diff --git a/include/asm-i386/mach-summit/mach_mpparse.h b/include/asm-i386/mach-summit/mach_mpparse.h
index 94268399170d..c2520539d934 100644
--- a/include/asm-i386/mach-summit/mach_mpparse.h
+++ b/include/asm-i386/mach-summit/mach_mpparse.h
@@ -30,7 +30,7 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
30 (!strncmp(productid, "VIGIL SMP", 9) 30 (!strncmp(productid, "VIGIL SMP", 9)
31 || !strncmp(productid, "EXA", 3) 31 || !strncmp(productid, "EXA", 3)
32 || !strncmp(productid, "RUTHLESS SMP", 12))){ 32 || !strncmp(productid, "RUTHLESS SMP", 12))){
33 mark_tsc_unstable(); 33 mark_tsc_unstable("Summit based system");
34 use_cyclone = 1; /*enable cyclone-timer*/ 34 use_cyclone = 1; /*enable cyclone-timer*/
35 setup_summit(); 35 setup_summit();
36 return 1; 36 return 1;
@@ -44,7 +44,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
44 if (!strncmp(oem_id, "IBM", 3) && 44 if (!strncmp(oem_id, "IBM", 3) &&
45 (!strncmp(oem_table_id, "SERVIGIL", 8) 45 (!strncmp(oem_table_id, "SERVIGIL", 8)
46 || !strncmp(oem_table_id, "EXA", 3))){ 46 || !strncmp(oem_table_id, "EXA", 3))){
47 mark_tsc_unstable(); 47 mark_tsc_unstable("Summit based system");
48 use_cyclone = 1; /*enable cyclone-timer*/ 48 use_cyclone = 1; /*enable cyclone-timer*/
49 setup_summit(); 49 setup_summit();
50 return 1; 50 return 1;
diff --git a/include/asm-i386/mach-visws/mach_apic.h b/include/asm-i386/mach-visws/mach_apic.h
index 18afe6b6fc4d..efac6f0d139f 100644
--- a/include/asm-i386/mach-visws/mach_apic.h
+++ b/include/asm-i386/mach-visws/mach_apic.h
@@ -47,7 +47,7 @@ static inline void summit_check(char *oem, char *productid)
47{ 47{
48} 48}
49 49
50static inline void clustered_apic_check(void) 50static inline void setup_apic_routing(void)
51{ 51{
52} 52}
53 53
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index e6aa30f8de5b..8198d1cca1f3 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -5,6 +5,16 @@
5#include <asm/atomic.h> 5#include <asm/atomic.h>
6#include <asm/pgalloc.h> 6#include <asm/pgalloc.h>
7#include <asm/tlbflush.h> 7#include <asm/tlbflush.h>
8#include <asm/paravirt.h>
9#ifndef CONFIG_PARAVIRT
10#include <asm-generic/mm_hooks.h>
11
12static inline void paravirt_activate_mm(struct mm_struct *prev,
13 struct mm_struct *next)
14{
15}
16#endif /* !CONFIG_PARAVIRT */
17
8 18
9/* 19/*
10 * Used for LDT copy/destruction. 20 * Used for LDT copy/destruction.
@@ -65,7 +75,10 @@ static inline void switch_mm(struct mm_struct *prev,
65#define deactivate_mm(tsk, mm) \ 75#define deactivate_mm(tsk, mm) \
66 asm("movl %0,%%gs": :"r" (0)); 76 asm("movl %0,%%gs": :"r" (0));
67 77
68#define activate_mm(prev, next) \ 78#define activate_mm(prev, next) \
69 switch_mm((prev),(next),NULL) 79 do { \
80 paravirt_activate_mm(prev, next); \
81 switch_mm((prev),(next),NULL); \
82 } while(0);
70 83
71#endif 84#endif
diff --git a/include/asm-i386/module.h b/include/asm-i386/module.h
index 02f8f541cbe0..7e5fda6c3976 100644
--- a/include/asm-i386/module.h
+++ b/include/asm-i386/module.h
@@ -54,6 +54,8 @@ struct mod_arch_specific
54#define MODULE_PROC_FAMILY "CYRIXIII " 54#define MODULE_PROC_FAMILY "CYRIXIII "
55#elif defined CONFIG_MVIAC3_2 55#elif defined CONFIG_MVIAC3_2
56#define MODULE_PROC_FAMILY "VIAC3-2 " 56#define MODULE_PROC_FAMILY "VIAC3-2 "
57#elif defined CONFIG_MVIAC7
58#define MODULE_PROC_FAMILY "VIAC7 "
57#elif defined CONFIG_MGEODEGX1 59#elif defined CONFIG_MGEODEGX1
58#define MODULE_PROC_FAMILY "GEODEGX1 " 60#define MODULE_PROC_FAMILY "GEODEGX1 "
59#elif defined CONFIG_MGEODE_LX 61#elif defined CONFIG_MGEODE_LX
diff --git a/include/asm-i386/msr-index.h b/include/asm-i386/msr-index.h
new file mode 100644
index 000000000000..a02eb2991349
--- /dev/null
+++ b/include/asm-i386/msr-index.h
@@ -0,0 +1,278 @@
1#ifndef __ASM_MSR_INDEX_H
2#define __ASM_MSR_INDEX_H
3
4/* CPU model specific register (MSR) numbers */
5
6/* x86-64 specific MSRs */
7#define MSR_EFER 0xc0000080 /* extended feature register */
8#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
9#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
10#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */
11#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
12#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
13#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
14#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */
15
16/* EFER bits: */
17#define _EFER_SCE 0 /* SYSCALL/SYSRET */
18#define _EFER_LME 8 /* Long mode enable */
19#define _EFER_LMA 10 /* Long mode active (read-only) */
20#define _EFER_NX 11 /* No execute enable */
21
22#define EFER_SCE (1<<_EFER_SCE)
23#define EFER_LME (1<<_EFER_LME)
24#define EFER_LMA (1<<_EFER_LMA)
25#define EFER_NX (1<<_EFER_NX)
26
27/* Intel MSRs. Some also available on other CPUs */
28#define MSR_IA32_PERFCTR0 0x000000c1
29#define MSR_IA32_PERFCTR1 0x000000c2
30#define MSR_FSB_FREQ 0x000000cd
31
32#define MSR_MTRRcap 0x000000fe
33#define MSR_IA32_BBL_CR_CTL 0x00000119
34
35#define MSR_IA32_SYSENTER_CS 0x00000174
36#define MSR_IA32_SYSENTER_ESP 0x00000175
37#define MSR_IA32_SYSENTER_EIP 0x00000176
38
39#define MSR_IA32_MCG_CAP 0x00000179
40#define MSR_IA32_MCG_STATUS 0x0000017a
41#define MSR_IA32_MCG_CTL 0x0000017b
42
43#define MSR_IA32_PEBS_ENABLE 0x000003f1
44#define MSR_IA32_DS_AREA 0x00000600
45#define MSR_IA32_PERF_CAPABILITIES 0x00000345
46
47#define MSR_MTRRfix64K_00000 0x00000250
48#define MSR_MTRRfix16K_80000 0x00000258
49#define MSR_MTRRfix16K_A0000 0x00000259
50#define MSR_MTRRfix4K_C0000 0x00000268
51#define MSR_MTRRfix4K_C8000 0x00000269
52#define MSR_MTRRfix4K_D0000 0x0000026a
53#define MSR_MTRRfix4K_D8000 0x0000026b
54#define MSR_MTRRfix4K_E0000 0x0000026c
55#define MSR_MTRRfix4K_E8000 0x0000026d
56#define MSR_MTRRfix4K_F0000 0x0000026e
57#define MSR_MTRRfix4K_F8000 0x0000026f
58#define MSR_MTRRdefType 0x000002ff
59
60#define MSR_IA32_DEBUGCTLMSR 0x000001d9
61#define MSR_IA32_LASTBRANCHFROMIP 0x000001db
62#define MSR_IA32_LASTBRANCHTOIP 0x000001dc
63#define MSR_IA32_LASTINTFROMIP 0x000001dd
64#define MSR_IA32_LASTINTTOIP 0x000001de
65
66#define MSR_IA32_MC0_CTL 0x00000400
67#define MSR_IA32_MC0_STATUS 0x00000401
68#define MSR_IA32_MC0_ADDR 0x00000402
69#define MSR_IA32_MC0_MISC 0x00000403
70
71#define MSR_P6_PERFCTR0 0x000000c1
72#define MSR_P6_PERFCTR1 0x000000c2
73#define MSR_P6_EVNTSEL0 0x00000186
74#define MSR_P6_EVNTSEL1 0x00000187
75
76/* K7/K8 MSRs. Not complete. See the architecture manual for a more
77 complete list. */
78#define MSR_K7_EVNTSEL0 0xc0010000
79#define MSR_K7_PERFCTR0 0xc0010004
80#define MSR_K7_EVNTSEL1 0xc0010001
81#define MSR_K7_PERFCTR1 0xc0010005
82#define MSR_K7_EVNTSEL2 0xc0010002
83#define MSR_K7_PERFCTR2 0xc0010006
84#define MSR_K7_EVNTSEL3 0xc0010003
85#define MSR_K7_PERFCTR3 0xc0010007
86#define MSR_K8_TOP_MEM1 0xc001001a
87#define MSR_K7_CLK_CTL 0xc001001b
88#define MSR_K8_TOP_MEM2 0xc001001d
89#define MSR_K8_SYSCFG 0xc0010010
90
91#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
92#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
93#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
94
95#define MSR_K7_HWCR 0xc0010015
96#define MSR_K8_HWCR 0xc0010015
97#define MSR_K7_FID_VID_CTL 0xc0010041
98#define MSR_K7_FID_VID_STATUS 0xc0010042
99#define MSR_K8_ENABLE_C1E 0xc0010055
100
101/* K6 MSRs */
102#define MSR_K6_EFER 0xc0000080
103#define MSR_K6_STAR 0xc0000081
104#define MSR_K6_WHCR 0xc0000082
105#define MSR_K6_UWCCR 0xc0000085
106#define MSR_K6_EPMR 0xc0000086
107#define MSR_K6_PSOR 0xc0000087
108#define MSR_K6_PFIR 0xc0000088
109
110/* Centaur-Hauls/IDT defined MSRs. */
111#define MSR_IDT_FCR1 0x00000107
112#define MSR_IDT_FCR2 0x00000108
113#define MSR_IDT_FCR3 0x00000109
114#define MSR_IDT_FCR4 0x0000010a
115
116#define MSR_IDT_MCR0 0x00000110
117#define MSR_IDT_MCR1 0x00000111
118#define MSR_IDT_MCR2 0x00000112
119#define MSR_IDT_MCR3 0x00000113
120#define MSR_IDT_MCR4 0x00000114
121#define MSR_IDT_MCR5 0x00000115
122#define MSR_IDT_MCR6 0x00000116
123#define MSR_IDT_MCR7 0x00000117
124#define MSR_IDT_MCR_CTRL 0x00000120
125
126/* VIA Cyrix defined MSRs*/
127#define MSR_VIA_FCR 0x00001107
128#define MSR_VIA_LONGHAUL 0x0000110a
129#define MSR_VIA_RNG 0x0000110b
130#define MSR_VIA_BCR2 0x00001147
131
132/* Transmeta defined MSRs */
133#define MSR_TMTA_LONGRUN_CTRL 0x80868010
134#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
135#define MSR_TMTA_LRTI_READOUT 0x80868018
136#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
137
138/* Intel defined MSRs. */
139#define MSR_IA32_P5_MC_ADDR 0x00000000
140#define MSR_IA32_P5_MC_TYPE 0x00000001
141#define MSR_IA32_TSC 0x00000010
142#define MSR_IA32_PLATFORM_ID 0x00000017
143#define MSR_IA32_EBL_CR_POWERON 0x0000002a
144
145#define MSR_IA32_APICBASE 0x0000001b
146#define MSR_IA32_APICBASE_BSP (1<<8)
147#define MSR_IA32_APICBASE_ENABLE (1<<11)
148#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
149
150#define MSR_IA32_UCODE_WRITE 0x00000079
151#define MSR_IA32_UCODE_REV 0x0000008b
152
153#define MSR_IA32_PERF_STATUS 0x00000198
154#define MSR_IA32_PERF_CTL 0x00000199
155
156#define MSR_IA32_MPERF 0x000000e7
157#define MSR_IA32_APERF 0x000000e8
158
159#define MSR_IA32_THERM_CONTROL 0x0000019a
160#define MSR_IA32_THERM_INTERRUPT 0x0000019b
161#define MSR_IA32_THERM_STATUS 0x0000019c
162#define MSR_IA32_MISC_ENABLE 0x000001a0
163
164/* Intel Model 6 */
165#define MSR_P6_EVNTSEL0 0x00000186
166#define MSR_P6_EVNTSEL1 0x00000187
167
168/* P4/Xeon+ specific */
169#define MSR_IA32_MCG_EAX 0x00000180
170#define MSR_IA32_MCG_EBX 0x00000181
171#define MSR_IA32_MCG_ECX 0x00000182
172#define MSR_IA32_MCG_EDX 0x00000183
173#define MSR_IA32_MCG_ESI 0x00000184
174#define MSR_IA32_MCG_EDI 0x00000185
175#define MSR_IA32_MCG_EBP 0x00000186
176#define MSR_IA32_MCG_ESP 0x00000187
177#define MSR_IA32_MCG_EFLAGS 0x00000188
178#define MSR_IA32_MCG_EIP 0x00000189
179#define MSR_IA32_MCG_RESERVED 0x0000018a
180
181/* Pentium IV performance counter MSRs */
182#define MSR_P4_BPU_PERFCTR0 0x00000300
183#define MSR_P4_BPU_PERFCTR1 0x00000301
184#define MSR_P4_BPU_PERFCTR2 0x00000302
185#define MSR_P4_BPU_PERFCTR3 0x00000303
186#define MSR_P4_MS_PERFCTR0 0x00000304
187#define MSR_P4_MS_PERFCTR1 0x00000305
188#define MSR_P4_MS_PERFCTR2 0x00000306
189#define MSR_P4_MS_PERFCTR3 0x00000307
190#define MSR_P4_FLAME_PERFCTR0 0x00000308
191#define MSR_P4_FLAME_PERFCTR1 0x00000309
192#define MSR_P4_FLAME_PERFCTR2 0x0000030a
193#define MSR_P4_FLAME_PERFCTR3 0x0000030b
194#define MSR_P4_IQ_PERFCTR0 0x0000030c
195#define MSR_P4_IQ_PERFCTR1 0x0000030d
196#define MSR_P4_IQ_PERFCTR2 0x0000030e
197#define MSR_P4_IQ_PERFCTR3 0x0000030f
198#define MSR_P4_IQ_PERFCTR4 0x00000310
199#define MSR_P4_IQ_PERFCTR5 0x00000311
200#define MSR_P4_BPU_CCCR0 0x00000360
201#define MSR_P4_BPU_CCCR1 0x00000361
202#define MSR_P4_BPU_CCCR2 0x00000362
203#define MSR_P4_BPU_CCCR3 0x00000363
204#define MSR_P4_MS_CCCR0 0x00000364
205#define MSR_P4_MS_CCCR1 0x00000365
206#define MSR_P4_MS_CCCR2 0x00000366
207#define MSR_P4_MS_CCCR3 0x00000367
208#define MSR_P4_FLAME_CCCR0 0x00000368
209#define MSR_P4_FLAME_CCCR1 0x00000369
210#define MSR_P4_FLAME_CCCR2 0x0000036a
211#define MSR_P4_FLAME_CCCR3 0x0000036b
212#define MSR_P4_IQ_CCCR0 0x0000036c
213#define MSR_P4_IQ_CCCR1 0x0000036d
214#define MSR_P4_IQ_CCCR2 0x0000036e
215#define MSR_P4_IQ_CCCR3 0x0000036f
216#define MSR_P4_IQ_CCCR4 0x00000370
217#define MSR_P4_IQ_CCCR5 0x00000371
218#define MSR_P4_ALF_ESCR0 0x000003ca
219#define MSR_P4_ALF_ESCR1 0x000003cb
220#define MSR_P4_BPU_ESCR0 0x000003b2
221#define MSR_P4_BPU_ESCR1 0x000003b3
222#define MSR_P4_BSU_ESCR0 0x000003a0
223#define MSR_P4_BSU_ESCR1 0x000003a1
224#define MSR_P4_CRU_ESCR0 0x000003b8
225#define MSR_P4_CRU_ESCR1 0x000003b9
226#define MSR_P4_CRU_ESCR2 0x000003cc
227#define MSR_P4_CRU_ESCR3 0x000003cd
228#define MSR_P4_CRU_ESCR4 0x000003e0
229#define MSR_P4_CRU_ESCR5 0x000003e1
230#define MSR_P4_DAC_ESCR0 0x000003a8
231#define MSR_P4_DAC_ESCR1 0x000003a9
232#define MSR_P4_FIRM_ESCR0 0x000003a4
233#define MSR_P4_FIRM_ESCR1 0x000003a5
234#define MSR_P4_FLAME_ESCR0 0x000003a6
235#define MSR_P4_FLAME_ESCR1 0x000003a7
236#define MSR_P4_FSB_ESCR0 0x000003a2
237#define MSR_P4_FSB_ESCR1 0x000003a3
238#define MSR_P4_IQ_ESCR0 0x000003ba
239#define MSR_P4_IQ_ESCR1 0x000003bb
240#define MSR_P4_IS_ESCR0 0x000003b4
241#define MSR_P4_IS_ESCR1 0x000003b5
242#define MSR_P4_ITLB_ESCR0 0x000003b6
243#define MSR_P4_ITLB_ESCR1 0x000003b7
244#define MSR_P4_IX_ESCR0 0x000003c8
245#define MSR_P4_IX_ESCR1 0x000003c9
246#define MSR_P4_MOB_ESCR0 0x000003aa
247#define MSR_P4_MOB_ESCR1 0x000003ab
248#define MSR_P4_MS_ESCR0 0x000003c0
249#define MSR_P4_MS_ESCR1 0x000003c1
250#define MSR_P4_PMH_ESCR0 0x000003ac
251#define MSR_P4_PMH_ESCR1 0x000003ad
252#define MSR_P4_RAT_ESCR0 0x000003bc
253#define MSR_P4_RAT_ESCR1 0x000003bd
254#define MSR_P4_SAAT_ESCR0 0x000003ae
255#define MSR_P4_SAAT_ESCR1 0x000003af
256#define MSR_P4_SSU_ESCR0 0x000003be
257#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */
258
259#define MSR_P4_TBPU_ESCR0 0x000003c2
260#define MSR_P4_TBPU_ESCR1 0x000003c3
261#define MSR_P4_TC_ESCR0 0x000003c4
262#define MSR_P4_TC_ESCR1 0x000003c5
263#define MSR_P4_U2L_ESCR0 0x000003b0
264#define MSR_P4_U2L_ESCR1 0x000003b1
265
266/* Intel Core-based CPU performance counters */
267#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
268#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
269#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b
270#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d
271#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e
272#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
273#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
274
275/* Geode defined MSRs */
276#define MSR_GEODE_BUSCONT_CONF0 0x00001900
277
278#endif /* __ASM_MSR_INDEX_H */
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index 2ad3f30b1a68..9559894c7658 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -1,6 +1,79 @@
1#ifndef __ASM_MSR_H 1#ifndef __ASM_MSR_H
2#define __ASM_MSR_H 2#define __ASM_MSR_H
3 3
4#include <asm/msr-index.h>
5
6#ifdef __KERNEL__
7#ifndef __ASSEMBLY__
8
9#include <asm/errno.h>
10
11static inline unsigned long long native_read_msr(unsigned int msr)
12{
13 unsigned long long val;
14
15 asm volatile("rdmsr" : "=A" (val) : "c" (msr));
16 return val;
17}
18
19static inline unsigned long long native_read_msr_safe(unsigned int msr,
20 int *err)
21{
22 unsigned long long val;
23
24 asm volatile("2: rdmsr ; xorl %0,%0\n"
25 "1:\n\t"
26 ".section .fixup,\"ax\"\n\t"
27 "3: movl %3,%0 ; jmp 1b\n\t"
28 ".previous\n\t"
29 ".section __ex_table,\"a\"\n"
30 " .align 4\n\t"
31 " .long 2b,3b\n\t"
32 ".previous"
33 : "=r" (*err), "=A" (val)
34 : "c" (msr), "i" (-EFAULT));
35
36 return val;
37}
38
39static inline void native_write_msr(unsigned int msr, unsigned long long val)
40{
41 asm volatile("wrmsr" : : "c" (msr), "A"(val));
42}
43
44static inline int native_write_msr_safe(unsigned int msr,
45 unsigned long long val)
46{
47 int err;
48 asm volatile("2: wrmsr ; xorl %0,%0\n"
49 "1:\n\t"
50 ".section .fixup,\"ax\"\n\t"
51 "3: movl %4,%0 ; jmp 1b\n\t"
52 ".previous\n\t"
53 ".section __ex_table,\"a\"\n"
54 " .align 4\n\t"
55 " .long 2b,3b\n\t"
56 ".previous"
57 : "=a" (err)
58 : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
59 "i" (-EFAULT));
60 return err;
61}
62
63static inline unsigned long long native_read_tsc(void)
64{
65 unsigned long long val;
66 asm volatile("rdtsc" : "=A" (val));
67 return val;
68}
69
70static inline unsigned long long native_read_pmc(void)
71{
72 unsigned long long val;
73 asm volatile("rdpmc" : "=A" (val));
74 return val;
75}
76
4#ifdef CONFIG_PARAVIRT 77#ifdef CONFIG_PARAVIRT
5#include <asm/paravirt.h> 78#include <asm/paravirt.h>
6#else 79#else
@@ -11,22 +84,20 @@
11 * pointer indirection), this allows gcc to optimize better 84 * pointer indirection), this allows gcc to optimize better
12 */ 85 */
13 86
14#define rdmsr(msr,val1,val2) \ 87#define rdmsr(msr,val1,val2) \
15 __asm__ __volatile__("rdmsr" \ 88 do { \
16 : "=a" (val1), "=d" (val2) \ 89 unsigned long long __val = native_read_msr(msr); \
17 : "c" (msr)) 90 val1 = __val; \
91 val2 = __val >> 32; \
92 } while(0)
18 93
19#define wrmsr(msr,val1,val2) \ 94#define wrmsr(msr,val1,val2) \
20 __asm__ __volatile__("wrmsr" \ 95 native_write_msr(msr, ((unsigned long long)val2 << 32) | val1)
21 : /* no outputs */ \
22 : "c" (msr), "a" (val1), "d" (val2))
23 96
24#define rdmsrl(msr,val) do { \ 97#define rdmsrl(msr,val) \
25 unsigned long l__,h__; \ 98 do { \
26 rdmsr (msr, l__, h__); \ 99 (val) = native_read_msr(msr); \
27 val = l__; \ 100 } while(0)
28 val |= ((u64)h__<<32); \
29} while(0)
30 101
31static inline void wrmsrl (unsigned long msr, unsigned long long val) 102static inline void wrmsrl (unsigned long msr, unsigned long long val)
32{ 103{
@@ -37,50 +108,41 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
37} 108}
38 109
39/* wrmsr with exception handling */ 110/* wrmsr with exception handling */
40#define wrmsr_safe(msr,a,b) ({ int ret__; \ 111#define wrmsr_safe(msr,val1,val2) \
41 asm volatile("2: wrmsr ; xorl %0,%0\n" \ 112 (native_write_msr_safe(msr, ((unsigned long long)val2 << 32) | val1))
42 "1:\n\t" \
43 ".section .fixup,\"ax\"\n\t" \
44 "3: movl %4,%0 ; jmp 1b\n\t" \
45 ".previous\n\t" \
46 ".section __ex_table,\"a\"\n" \
47 " .align 4\n\t" \
48 " .long 2b,3b\n\t" \
49 ".previous" \
50 : "=a" (ret__) \
51 : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\
52 ret__; })
53 113
54/* rdmsr with exception handling */ 114/* rdmsr with exception handling */
55#define rdmsr_safe(msr,a,b) ({ int ret__; \ 115#define rdmsr_safe(msr,p1,p2) \
56 asm volatile("2: rdmsr ; xorl %0,%0\n" \ 116 ({ \
57 "1:\n\t" \ 117 int __err; \
58 ".section .fixup,\"ax\"\n\t" \ 118 unsigned long long __val = native_read_msr_safe(msr, &__err);\
59 "3: movl %4,%0 ; jmp 1b\n\t" \ 119 (*p1) = __val; \
60 ".previous\n\t" \ 120 (*p2) = __val >> 32; \
61 ".section __ex_table,\"a\"\n" \ 121 __err; \
62 " .align 4\n\t" \ 122 })
63 " .long 2b,3b\n\t" \ 123
64 ".previous" \ 124#define rdtsc(low,high) \
65 : "=r" (ret__), "=a" (*(a)), "=d" (*(b)) \ 125 do { \
66 : "c" (msr), "i" (-EFAULT));\ 126 u64 _l = native_read_tsc(); \
67 ret__; }) 127 (low) = (u32)_l; \
68 128 (high) = _l >> 32; \
69#define rdtsc(low,high) \ 129 } while(0)
70 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) 130
71 131#define rdtscl(low) \
72#define rdtscl(low) \ 132 do { \
73 __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx") 133 (low) = native_read_tsc(); \
74 134 } while(0)
75#define rdtscll(val) \ 135
76 __asm__ __volatile__("rdtsc" : "=A" (val)) 136#define rdtscll(val) ((val) = native_read_tsc())
77 137
78#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 138#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
79 139
80#define rdpmc(counter,low,high) \ 140#define rdpmc(counter,low,high) \
81 __asm__ __volatile__("rdpmc" \ 141 do { \
82 : "=a" (low), "=d" (high) \ 142 u64 _l = native_read_pmc(); \
83 : "c" (counter)) 143 low = (u32)_l; \
144 high = _l >> 32; \
145 } while(0)
84#endif /* !CONFIG_PARAVIRT */ 146#endif /* !CONFIG_PARAVIRT */
85 147
86#ifdef CONFIG_SMP 148#ifdef CONFIG_SMP
@@ -96,234 +158,6 @@ static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
96 wrmsr(msr_no, l, h); 158 wrmsr(msr_no, l, h);
97} 159}
98#endif /* CONFIG_SMP */ 160#endif /* CONFIG_SMP */
99 161#endif
100/* symbolic names for some interesting MSRs */ 162#endif
101/* Intel defined MSRs. */
102#define MSR_IA32_P5_MC_ADDR 0
103#define MSR_IA32_P5_MC_TYPE 1
104#define MSR_IA32_PLATFORM_ID 0x17
105#define MSR_IA32_EBL_CR_POWERON 0x2a
106
107#define MSR_IA32_APICBASE 0x1b
108#define MSR_IA32_APICBASE_BSP (1<<8)
109#define MSR_IA32_APICBASE_ENABLE (1<<11)
110#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
111
112#define MSR_IA32_UCODE_WRITE 0x79
113#define MSR_IA32_UCODE_REV 0x8b
114
115#define MSR_P6_PERFCTR0 0xc1
116#define MSR_P6_PERFCTR1 0xc2
117#define MSR_FSB_FREQ 0xcd
118
119
120#define MSR_IA32_BBL_CR_CTL 0x119
121
122#define MSR_IA32_SYSENTER_CS 0x174
123#define MSR_IA32_SYSENTER_ESP 0x175
124#define MSR_IA32_SYSENTER_EIP 0x176
125
126#define MSR_IA32_MCG_CAP 0x179
127#define MSR_IA32_MCG_STATUS 0x17a
128#define MSR_IA32_MCG_CTL 0x17b
129
130/* P4/Xeon+ specific */
131#define MSR_IA32_MCG_EAX 0x180
132#define MSR_IA32_MCG_EBX 0x181
133#define MSR_IA32_MCG_ECX 0x182
134#define MSR_IA32_MCG_EDX 0x183
135#define MSR_IA32_MCG_ESI 0x184
136#define MSR_IA32_MCG_EDI 0x185
137#define MSR_IA32_MCG_EBP 0x186
138#define MSR_IA32_MCG_ESP 0x187
139#define MSR_IA32_MCG_EFLAGS 0x188
140#define MSR_IA32_MCG_EIP 0x189
141#define MSR_IA32_MCG_RESERVED 0x18A
142
143#define MSR_P6_EVNTSEL0 0x186
144#define MSR_P6_EVNTSEL1 0x187
145
146#define MSR_IA32_PERF_STATUS 0x198
147#define MSR_IA32_PERF_CTL 0x199
148
149#define MSR_IA32_MPERF 0xE7
150#define MSR_IA32_APERF 0xE8
151
152#define MSR_IA32_THERM_CONTROL 0x19a
153#define MSR_IA32_THERM_INTERRUPT 0x19b
154#define MSR_IA32_THERM_STATUS 0x19c
155#define MSR_IA32_MISC_ENABLE 0x1a0
156
157#define MSR_IA32_DEBUGCTLMSR 0x1d9
158#define MSR_IA32_LASTBRANCHFROMIP 0x1db
159#define MSR_IA32_LASTBRANCHTOIP 0x1dc
160#define MSR_IA32_LASTINTFROMIP 0x1dd
161#define MSR_IA32_LASTINTTOIP 0x1de
162
163#define MSR_IA32_MC0_CTL 0x400
164#define MSR_IA32_MC0_STATUS 0x401
165#define MSR_IA32_MC0_ADDR 0x402
166#define MSR_IA32_MC0_MISC 0x403
167
168#define MSR_IA32_PEBS_ENABLE 0x3f1
169#define MSR_IA32_DS_AREA 0x600
170#define MSR_IA32_PERF_CAPABILITIES 0x345
171
172/* Pentium IV performance counter MSRs */
173#define MSR_P4_BPU_PERFCTR0 0x300
174#define MSR_P4_BPU_PERFCTR1 0x301
175#define MSR_P4_BPU_PERFCTR2 0x302
176#define MSR_P4_BPU_PERFCTR3 0x303
177#define MSR_P4_MS_PERFCTR0 0x304
178#define MSR_P4_MS_PERFCTR1 0x305
179#define MSR_P4_MS_PERFCTR2 0x306
180#define MSR_P4_MS_PERFCTR3 0x307
181#define MSR_P4_FLAME_PERFCTR0 0x308
182#define MSR_P4_FLAME_PERFCTR1 0x309
183#define MSR_P4_FLAME_PERFCTR2 0x30a
184#define MSR_P4_FLAME_PERFCTR3 0x30b
185#define MSR_P4_IQ_PERFCTR0 0x30c
186#define MSR_P4_IQ_PERFCTR1 0x30d
187#define MSR_P4_IQ_PERFCTR2 0x30e
188#define MSR_P4_IQ_PERFCTR3 0x30f
189#define MSR_P4_IQ_PERFCTR4 0x310
190#define MSR_P4_IQ_PERFCTR5 0x311
191#define MSR_P4_BPU_CCCR0 0x360
192#define MSR_P4_BPU_CCCR1 0x361
193#define MSR_P4_BPU_CCCR2 0x362
194#define MSR_P4_BPU_CCCR3 0x363
195#define MSR_P4_MS_CCCR0 0x364
196#define MSR_P4_MS_CCCR1 0x365
197#define MSR_P4_MS_CCCR2 0x366
198#define MSR_P4_MS_CCCR3 0x367
199#define MSR_P4_FLAME_CCCR0 0x368
200#define MSR_P4_FLAME_CCCR1 0x369
201#define MSR_P4_FLAME_CCCR2 0x36a
202#define MSR_P4_FLAME_CCCR3 0x36b
203#define MSR_P4_IQ_CCCR0 0x36c
204#define MSR_P4_IQ_CCCR1 0x36d
205#define MSR_P4_IQ_CCCR2 0x36e
206#define MSR_P4_IQ_CCCR3 0x36f
207#define MSR_P4_IQ_CCCR4 0x370
208#define MSR_P4_IQ_CCCR5 0x371
209#define MSR_P4_ALF_ESCR0 0x3ca
210#define MSR_P4_ALF_ESCR1 0x3cb
211#define MSR_P4_BPU_ESCR0 0x3b2
212#define MSR_P4_BPU_ESCR1 0x3b3
213#define MSR_P4_BSU_ESCR0 0x3a0
214#define MSR_P4_BSU_ESCR1 0x3a1
215#define MSR_P4_CRU_ESCR0 0x3b8
216#define MSR_P4_CRU_ESCR1 0x3b9
217#define MSR_P4_CRU_ESCR2 0x3cc
218#define MSR_P4_CRU_ESCR3 0x3cd
219#define MSR_P4_CRU_ESCR4 0x3e0
220#define MSR_P4_CRU_ESCR5 0x3e1
221#define MSR_P4_DAC_ESCR0 0x3a8
222#define MSR_P4_DAC_ESCR1 0x3a9
223#define MSR_P4_FIRM_ESCR0 0x3a4
224#define MSR_P4_FIRM_ESCR1 0x3a5
225#define MSR_P4_FLAME_ESCR0 0x3a6
226#define MSR_P4_FLAME_ESCR1 0x3a7
227#define MSR_P4_FSB_ESCR0 0x3a2
228#define MSR_P4_FSB_ESCR1 0x3a3
229#define MSR_P4_IQ_ESCR0 0x3ba
230#define MSR_P4_IQ_ESCR1 0x3bb
231#define MSR_P4_IS_ESCR0 0x3b4
232#define MSR_P4_IS_ESCR1 0x3b5
233#define MSR_P4_ITLB_ESCR0 0x3b6
234#define MSR_P4_ITLB_ESCR1 0x3b7
235#define MSR_P4_IX_ESCR0 0x3c8
236#define MSR_P4_IX_ESCR1 0x3c9
237#define MSR_P4_MOB_ESCR0 0x3aa
238#define MSR_P4_MOB_ESCR1 0x3ab
239#define MSR_P4_MS_ESCR0 0x3c0
240#define MSR_P4_MS_ESCR1 0x3c1
241#define MSR_P4_PMH_ESCR0 0x3ac
242#define MSR_P4_PMH_ESCR1 0x3ad
243#define MSR_P4_RAT_ESCR0 0x3bc
244#define MSR_P4_RAT_ESCR1 0x3bd
245#define MSR_P4_SAAT_ESCR0 0x3ae
246#define MSR_P4_SAAT_ESCR1 0x3af
247#define MSR_P4_SSU_ESCR0 0x3be
248#define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */
249#define MSR_P4_TBPU_ESCR0 0x3c2
250#define MSR_P4_TBPU_ESCR1 0x3c3
251#define MSR_P4_TC_ESCR0 0x3c4
252#define MSR_P4_TC_ESCR1 0x3c5
253#define MSR_P4_U2L_ESCR0 0x3b0
254#define MSR_P4_U2L_ESCR1 0x3b1
255
256/* AMD Defined MSRs */
257#define MSR_K6_EFER 0xC0000080
258#define MSR_K6_STAR 0xC0000081
259#define MSR_K6_WHCR 0xC0000082
260#define MSR_K6_UWCCR 0xC0000085
261#define MSR_K6_EPMR 0xC0000086
262#define MSR_K6_PSOR 0xC0000087
263#define MSR_K6_PFIR 0xC0000088
264
265#define MSR_K7_EVNTSEL0 0xC0010000
266#define MSR_K7_EVNTSEL1 0xC0010001
267#define MSR_K7_EVNTSEL2 0xC0010002
268#define MSR_K7_EVNTSEL3 0xC0010003
269#define MSR_K7_PERFCTR0 0xC0010004
270#define MSR_K7_PERFCTR1 0xC0010005
271#define MSR_K7_PERFCTR2 0xC0010006
272#define MSR_K7_PERFCTR3 0xC0010007
273#define MSR_K7_HWCR 0xC0010015
274#define MSR_K7_CLK_CTL 0xC001001b
275#define MSR_K7_FID_VID_CTL 0xC0010041
276#define MSR_K7_FID_VID_STATUS 0xC0010042
277
278#define MSR_K8_ENABLE_C1E 0xC0010055
279
280/* extended feature register */
281#define MSR_EFER 0xc0000080
282
283/* EFER bits: */
284
285/* Execute Disable enable */
286#define _EFER_NX 11
287#define EFER_NX (1<<_EFER_NX)
288
289/* Centaur-Hauls/IDT defined MSRs. */
290#define MSR_IDT_FCR1 0x107
291#define MSR_IDT_FCR2 0x108
292#define MSR_IDT_FCR3 0x109
293#define MSR_IDT_FCR4 0x10a
294
295#define MSR_IDT_MCR0 0x110
296#define MSR_IDT_MCR1 0x111
297#define MSR_IDT_MCR2 0x112
298#define MSR_IDT_MCR3 0x113
299#define MSR_IDT_MCR4 0x114
300#define MSR_IDT_MCR5 0x115
301#define MSR_IDT_MCR6 0x116
302#define MSR_IDT_MCR7 0x117
303#define MSR_IDT_MCR_CTRL 0x120
304
305/* VIA Cyrix defined MSRs*/
306#define MSR_VIA_FCR 0x1107
307#define MSR_VIA_LONGHAUL 0x110a
308#define MSR_VIA_RNG 0x110b
309#define MSR_VIA_BCR2 0x1147
310
311/* Transmeta defined MSRs */
312#define MSR_TMTA_LONGRUN_CTRL 0x80868010
313#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
314#define MSR_TMTA_LRTI_READOUT 0x80868018
315#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
316
317/* Intel Core-based CPU performance counters */
318#define MSR_CORE_PERF_FIXED_CTR0 0x309
319#define MSR_CORE_PERF_FIXED_CTR1 0x30a
320#define MSR_CORE_PERF_FIXED_CTR2 0x30b
321#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
322#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
323#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
324#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
325
326/* Geode defined MSRs */
327#define MSR_GEODE_BUSCONT_CONF0 0x1900
328
329#endif /* __ASM_MSR_H */ 163#endif /* __ASM_MSR_H */
diff --git a/include/asm-i386/mtrr.h b/include/asm-i386/mtrr.h
index 07f063ae26ea..7e9c7ccbdcfe 100644
--- a/include/asm-i386/mtrr.h
+++ b/include/asm-i386/mtrr.h
@@ -69,6 +69,8 @@ struct mtrr_gentry
69 69
70/* The following functions are for use by other drivers */ 70/* The following functions are for use by other drivers */
71# ifdef CONFIG_MTRR 71# ifdef CONFIG_MTRR
72extern void mtrr_save_fixed_ranges(void *);
73extern void mtrr_save_state(void);
72extern int mtrr_add (unsigned long base, unsigned long size, 74extern int mtrr_add (unsigned long base, unsigned long size,
73 unsigned int type, char increment); 75 unsigned int type, char increment);
74extern int mtrr_add_page (unsigned long base, unsigned long size, 76extern int mtrr_add_page (unsigned long base, unsigned long size,
@@ -79,6 +81,8 @@ extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
79extern void mtrr_ap_init(void); 81extern void mtrr_ap_init(void);
80extern void mtrr_bp_init(void); 82extern void mtrr_bp_init(void);
81# else 83# else
84#define mtrr_save_fixed_ranges(arg) do {} while (0)
85#define mtrr_save_state() do {} while (0)
82static __inline__ int mtrr_add (unsigned long base, unsigned long size, 86static __inline__ int mtrr_add (unsigned long base, unsigned long size,
83 unsigned int type, char increment) 87 unsigned int type, char increment)
84{ 88{
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h
index b04333ea6f31..fb1e133efd9f 100644
--- a/include/asm-i386/nmi.h
+++ b/include/asm-i386/nmi.h
@@ -50,4 +50,12 @@ void __trigger_all_cpu_backtrace(void);
50 50
51#endif 51#endif
52 52
53void lapic_watchdog_stop(void);
54int lapic_watchdog_init(unsigned nmi_hz);
55int lapic_wd_event(unsigned nmi_hz);
56unsigned lapic_adjust_nmi_hz(unsigned hz);
57int lapic_watchdog_ok(void);
58void disable_lapic_nmi_watchdog(void);
59void enable_lapic_nmi_watchdog(void);
60
53#endif /* ASM_NMI_H */ 61#endif /* ASM_NMI_H */
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index 7b19f454761d..818ac8bf01e2 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -12,7 +12,6 @@
12#ifdef __KERNEL__ 12#ifdef __KERNEL__
13#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
14 14
15
16#ifdef CONFIG_X86_USE_3DNOW 15#ifdef CONFIG_X86_USE_3DNOW
17 16
18#include <asm/mmx.h> 17#include <asm/mmx.h>
@@ -42,26 +41,81 @@
42 * These are used to make use of C type-checking.. 41 * These are used to make use of C type-checking..
43 */ 42 */
44extern int nx_enabled; 43extern int nx_enabled;
44
45#ifdef CONFIG_X86_PAE 45#ifdef CONFIG_X86_PAE
46extern unsigned long long __supported_pte_mask; 46extern unsigned long long __supported_pte_mask;
47typedef struct { unsigned long pte_low, pte_high; } pte_t; 47typedef struct { unsigned long pte_low, pte_high; } pte_t;
48typedef struct { unsigned long long pmd; } pmd_t; 48typedef struct { unsigned long long pmd; } pmd_t;
49typedef struct { unsigned long long pgd; } pgd_t; 49typedef struct { unsigned long long pgd; } pgd_t;
50typedef struct { unsigned long long pgprot; } pgprot_t; 50typedef struct { unsigned long long pgprot; } pgprot_t;
51#define pmd_val(x) ((x).pmd) 51
52#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) 52static inline unsigned long long native_pgd_val(pgd_t pgd)
53#define __pmd(x) ((pmd_t) { (x) } ) 53{
54 return pgd.pgd;
55}
56
57static inline unsigned long long native_pmd_val(pmd_t pmd)
58{
59 return pmd.pmd;
60}
61
62static inline unsigned long long native_pte_val(pte_t pte)
63{
64 return pte.pte_low | ((unsigned long long)pte.pte_high << 32);
65}
66
67static inline pgd_t native_make_pgd(unsigned long long val)
68{
69 return (pgd_t) { val };
70}
71
72static inline pmd_t native_make_pmd(unsigned long long val)
73{
74 return (pmd_t) { val };
75}
76
77static inline pte_t native_make_pte(unsigned long long val)
78{
79 return (pte_t) { .pte_low = val, .pte_high = (val >> 32) } ;
80}
81
82#ifndef CONFIG_PARAVIRT
83#define pmd_val(x) native_pmd_val(x)
84#define __pmd(x) native_make_pmd(x)
85#endif
86
54#define HPAGE_SHIFT 21 87#define HPAGE_SHIFT 21
55#include <asm-generic/pgtable-nopud.h> 88#include <asm-generic/pgtable-nopud.h>
56#else 89#else /* !CONFIG_X86_PAE */
57typedef struct { unsigned long pte_low; } pte_t; 90typedef struct { unsigned long pte_low; } pte_t;
58typedef struct { unsigned long pgd; } pgd_t; 91typedef struct { unsigned long pgd; } pgd_t;
59typedef struct { unsigned long pgprot; } pgprot_t; 92typedef struct { unsigned long pgprot; } pgprot_t;
60#define boot_pte_t pte_t /* or would you rather have a typedef */ 93#define boot_pte_t pte_t /* or would you rather have a typedef */
61#define pte_val(x) ((x).pte_low) 94
95static inline unsigned long native_pgd_val(pgd_t pgd)
96{
97 return pgd.pgd;
98}
99
100static inline unsigned long native_pte_val(pte_t pte)
101{
102 return pte.pte_low;
103}
104
105static inline pgd_t native_make_pgd(unsigned long val)
106{
107 return (pgd_t) { val };
108}
109
110static inline pte_t native_make_pte(unsigned long val)
111{
112 return (pte_t) { .pte_low = val };
113}
114
62#define HPAGE_SHIFT 22 115#define HPAGE_SHIFT 22
63#include <asm-generic/pgtable-nopmd.h> 116#include <asm-generic/pgtable-nopmd.h>
64#endif 117#endif /* CONFIG_X86_PAE */
118
65#define PTE_MASK PAGE_MASK 119#define PTE_MASK PAGE_MASK
66 120
67#ifdef CONFIG_HUGETLB_PAGE 121#ifdef CONFIG_HUGETLB_PAGE
@@ -71,13 +125,16 @@ typedef struct { unsigned long pgprot; } pgprot_t;
71#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 125#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
72#endif 126#endif
73 127
74#define pgd_val(x) ((x).pgd)
75#define pgprot_val(x) ((x).pgprot) 128#define pgprot_val(x) ((x).pgprot)
76
77#define __pte(x) ((pte_t) { (x) } )
78#define __pgd(x) ((pgd_t) { (x) } )
79#define __pgprot(x) ((pgprot_t) { (x) } ) 129#define __pgprot(x) ((pgprot_t) { (x) } )
80 130
131#ifndef CONFIG_PARAVIRT
132#define pgd_val(x) native_pgd_val(x)
133#define __pgd(x) native_make_pgd(x)
134#define pte_val(x) native_pte_val(x)
135#define __pte(x) native_make_pte(x)
136#endif
137
81#endif /* !__ASSEMBLY__ */ 138#endif /* !__ASSEMBLY__ */
82 139
83/* to align the pointer to the (next) page boundary */ 140/* to align the pointer to the (next) page boundary */
@@ -143,9 +200,7 @@ extern int page_is_ram(unsigned long pagenr);
143#include <asm-generic/memory_model.h> 200#include <asm-generic/memory_model.h>
144#include <asm-generic/page.h> 201#include <asm-generic/page.h>
145 202
146#ifndef CONFIG_COMPAT_VDSO
147#define __HAVE_ARCH_GATE_AREA 1 203#define __HAVE_ARCH_GATE_AREA 1
148#endif
149#endif /* __KERNEL__ */ 204#endif /* __KERNEL__ */
150 205
151#endif /* _I386_PAGE_H */ 206#endif /* _I386_PAGE_H */
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index e63f1e444fcf..e2e7f98723c5 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -2,20 +2,9 @@
2#define __ASM_PARAVIRT_H 2#define __ASM_PARAVIRT_H
3/* Various instructions on x86 need to be replaced for 3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */ 4 * para-virtualization: those hooks are defined here. */
5#include <linux/linkage.h>
6#include <linux/stringify.h>
7#include <asm/page.h>
8 5
9#ifdef CONFIG_PARAVIRT 6#ifdef CONFIG_PARAVIRT
10/* These are the most performance critical ops, so we want to be able to patch 7#include <asm/page.h>
11 * callers */
12#define PARAVIRT_IRQ_DISABLE 0
13#define PARAVIRT_IRQ_ENABLE 1
14#define PARAVIRT_RESTORE_FLAGS 2
15#define PARAVIRT_SAVE_FLAGS 3
16#define PARAVIRT_SAVE_FLAGS_IRQ_DISABLE 4
17#define PARAVIRT_INTERRUPT_RETURN 5
18#define PARAVIRT_STI_SYSEXIT 6
19 8
20/* Bitmask of what can be clobbered: usually at least eax. */ 9/* Bitmask of what can be clobbered: usually at least eax. */
21#define CLBR_NONE 0x0 10#define CLBR_NONE 0x0
@@ -25,13 +14,29 @@
25#define CLBR_ANY 0x7 14#define CLBR_ANY 0x7
26 15
27#ifndef __ASSEMBLY__ 16#ifndef __ASSEMBLY__
17#include <linux/types.h>
18#include <linux/cpumask.h>
19#include <asm/kmap_types.h>
20
21struct page;
28struct thread_struct; 22struct thread_struct;
29struct Xgt_desc_struct; 23struct Xgt_desc_struct;
30struct tss_struct; 24struct tss_struct;
31struct mm_struct; 25struct mm_struct;
26struct desc_struct;
27
28/* Lazy mode for batching updates / context switch */
29enum paravirt_lazy_mode {
30 PARAVIRT_LAZY_NONE = 0,
31 PARAVIRT_LAZY_MMU = 1,
32 PARAVIRT_LAZY_CPU = 2,
33 PARAVIRT_LAZY_FLUSH = 3,
34};
35
32struct paravirt_ops 36struct paravirt_ops
33{ 37{
34 unsigned int kernel_rpl; 38 unsigned int kernel_rpl;
39 int shared_kernel_pmd;
35 int paravirt_enabled; 40 int paravirt_enabled;
36 const char *name; 41 const char *name;
37 42
@@ -44,24 +49,33 @@ struct paravirt_ops
44 */ 49 */
45 unsigned (*patch)(u8 type, u16 clobber, void *firstinsn, unsigned len); 50 unsigned (*patch)(u8 type, u16 clobber, void *firstinsn, unsigned len);
46 51
52 /* Basic arch-specific setup */
47 void (*arch_setup)(void); 53 void (*arch_setup)(void);
48 char *(*memory_setup)(void); 54 char *(*memory_setup)(void);
49 void (*init_IRQ)(void); 55 void (*init_IRQ)(void);
56 void (*time_init)(void);
50 57
58 /*
59 * Called before/after init_mm pagetable setup. setup_start
60 * may reset %cr3, and may pre-install parts of the pagetable;
61 * pagetable setup is expected to preserve any existing
62 * mapping.
63 */
64 void (*pagetable_setup_start)(pgd_t *pgd_base);
65 void (*pagetable_setup_done)(pgd_t *pgd_base);
66
67 /* Print a banner to identify the environment */
51 void (*banner)(void); 68 void (*banner)(void);
52 69
70 /* Set and set time of day */
53 unsigned long (*get_wallclock)(void); 71 unsigned long (*get_wallclock)(void);
54 int (*set_wallclock)(unsigned long); 72 int (*set_wallclock)(unsigned long);
55 void (*time_init)(void);
56
57 /* All the function pointers here are declared as "fastcall"
58 so that we get a specific register-based calling
59 convention. This makes it easier to implement inline
60 assembler replacements. */
61 73
74 /* cpuid emulation, mostly so that caps bits can be disabled */
62 void (*cpuid)(unsigned int *eax, unsigned int *ebx, 75 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
63 unsigned int *ecx, unsigned int *edx); 76 unsigned int *ecx, unsigned int *edx);
64 77
78 /* hooks for various privileged instructions */
65 unsigned long (*get_debugreg)(int regno); 79 unsigned long (*get_debugreg)(int regno);
66 void (*set_debugreg)(int regno, unsigned long value); 80 void (*set_debugreg)(int regno, unsigned long value);
67 81
@@ -80,15 +94,23 @@ struct paravirt_ops
80 unsigned long (*read_cr4)(void); 94 unsigned long (*read_cr4)(void);
81 void (*write_cr4)(unsigned long); 95 void (*write_cr4)(unsigned long);
82 96
97 /*
98 * Get/set interrupt state. save_fl and restore_fl are only
99 * expected to use X86_EFLAGS_IF; all other bits
100 * returned from save_fl are undefined, and may be ignored by
101 * restore_fl.
102 */
83 unsigned long (*save_fl)(void); 103 unsigned long (*save_fl)(void);
84 void (*restore_fl)(unsigned long); 104 void (*restore_fl)(unsigned long);
85 void (*irq_disable)(void); 105 void (*irq_disable)(void);
86 void (*irq_enable)(void); 106 void (*irq_enable)(void);
87 void (*safe_halt)(void); 107 void (*safe_halt)(void);
88 void (*halt)(void); 108 void (*halt)(void);
109
89 void (*wbinvd)(void); 110 void (*wbinvd)(void);
90 111
91 /* err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ 112 /* MSR, PMC and TSR operations.
113 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
92 u64 (*read_msr)(unsigned int msr, int *err); 114 u64 (*read_msr)(unsigned int msr, int *err);
93 int (*write_msr)(unsigned int msr, u64 val); 115 int (*write_msr)(unsigned int msr, u64 val);
94 116
@@ -97,6 +119,7 @@ struct paravirt_ops
97 u64 (*get_scheduled_cycles)(void); 119 u64 (*get_scheduled_cycles)(void);
98 unsigned long (*get_cpu_khz)(void); 120 unsigned long (*get_cpu_khz)(void);
99 121
122 /* Segment descriptor handling */
100 void (*load_tr_desc)(void); 123 void (*load_tr_desc)(void);
101 void (*load_gdt)(const struct Xgt_desc_struct *); 124 void (*load_gdt)(const struct Xgt_desc_struct *);
102 void (*load_idt)(const struct Xgt_desc_struct *); 125 void (*load_idt)(const struct Xgt_desc_struct *);
@@ -105,59 +128,98 @@ struct paravirt_ops
105 void (*set_ldt)(const void *desc, unsigned entries); 128 void (*set_ldt)(const void *desc, unsigned entries);
106 unsigned long (*store_tr)(void); 129 unsigned long (*store_tr)(void);
107 void (*load_tls)(struct thread_struct *t, unsigned int cpu); 130 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
108 void (*write_ldt_entry)(void *dt, int entrynum, 131 void (*write_ldt_entry)(struct desc_struct *,
109 u32 low, u32 high); 132 int entrynum, u32 low, u32 high);
110 void (*write_gdt_entry)(void *dt, int entrynum, 133 void (*write_gdt_entry)(struct desc_struct *,
111 u32 low, u32 high); 134 int entrynum, u32 low, u32 high);
112 void (*write_idt_entry)(void *dt, int entrynum, 135 void (*write_idt_entry)(struct desc_struct *,
113 u32 low, u32 high); 136 int entrynum, u32 low, u32 high);
114 void (*load_esp0)(struct tss_struct *tss, 137 void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t);
115 struct thread_struct *thread);
116 138
117 void (*set_iopl_mask)(unsigned mask); 139 void (*set_iopl_mask)(unsigned mask);
118
119 void (*io_delay)(void); 140 void (*io_delay)(void);
120 141
142 /*
143 * Hooks for intercepting the creation/use/destruction of an
144 * mm_struct.
145 */
146 void (*activate_mm)(struct mm_struct *prev,
147 struct mm_struct *next);
148 void (*dup_mmap)(struct mm_struct *oldmm,
149 struct mm_struct *mm);
150 void (*exit_mmap)(struct mm_struct *mm);
151
121#ifdef CONFIG_X86_LOCAL_APIC 152#ifdef CONFIG_X86_LOCAL_APIC
153 /*
154 * Direct APIC operations, principally for VMI. Ideally
155 * these shouldn't be in this interface.
156 */
122 void (*apic_write)(unsigned long reg, unsigned long v); 157 void (*apic_write)(unsigned long reg, unsigned long v);
123 void (*apic_write_atomic)(unsigned long reg, unsigned long v); 158 void (*apic_write_atomic)(unsigned long reg, unsigned long v);
124 unsigned long (*apic_read)(unsigned long reg); 159 unsigned long (*apic_read)(unsigned long reg);
125 void (*setup_boot_clock)(void); 160 void (*setup_boot_clock)(void);
126 void (*setup_secondary_clock)(void); 161 void (*setup_secondary_clock)(void);
162
163 void (*startup_ipi_hook)(int phys_apicid,
164 unsigned long start_eip,
165 unsigned long start_esp);
127#endif 166#endif
128 167
168 /* TLB operations */
129 void (*flush_tlb_user)(void); 169 void (*flush_tlb_user)(void);
130 void (*flush_tlb_kernel)(void); 170 void (*flush_tlb_kernel)(void);
131 void (*flush_tlb_single)(u32 addr); 171 void (*flush_tlb_single)(unsigned long addr);
132 172 void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
133 void (*map_pt_hook)(int type, pte_t *va, u32 pfn); 173 unsigned long va);
134 174
175 /* Hooks for allocating/releasing pagetable pages */
135 void (*alloc_pt)(u32 pfn); 176 void (*alloc_pt)(u32 pfn);
136 void (*alloc_pd)(u32 pfn); 177 void (*alloc_pd)(u32 pfn);
137 void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); 178 void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
138 void (*release_pt)(u32 pfn); 179 void (*release_pt)(u32 pfn);
139 void (*release_pd)(u32 pfn); 180 void (*release_pd)(u32 pfn);
140 181
182 /* Pagetable manipulation functions */
141 void (*set_pte)(pte_t *ptep, pte_t pteval); 183 void (*set_pte)(pte_t *ptep, pte_t pteval);
142 void (*set_pte_at)(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval); 184 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
185 pte_t *ptep, pte_t pteval);
143 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); 186 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
144 void (*pte_update)(struct mm_struct *mm, u32 addr, pte_t *ptep); 187 void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
145 void (*pte_update_defer)(struct mm_struct *mm, u32 addr, pte_t *ptep); 188 void (*pte_update_defer)(struct mm_struct *mm,
189 unsigned long addr, pte_t *ptep);
190
191#ifdef CONFIG_HIGHPTE
192 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
193#endif
194
146#ifdef CONFIG_X86_PAE 195#ifdef CONFIG_X86_PAE
147 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); 196 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
148 void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); 197 void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
149 void (*set_pud)(pud_t *pudp, pud_t pudval); 198 void (*set_pud)(pud_t *pudp, pud_t pudval);
150 void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 199 void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
151 void (*pmd_clear)(pmd_t *pmdp); 200 void (*pmd_clear)(pmd_t *pmdp);
201
202 unsigned long long (*pte_val)(pte_t);
203 unsigned long long (*pmd_val)(pmd_t);
204 unsigned long long (*pgd_val)(pgd_t);
205
206 pte_t (*make_pte)(unsigned long long pte);
207 pmd_t (*make_pmd)(unsigned long long pmd);
208 pgd_t (*make_pgd)(unsigned long long pgd);
209#else
210 unsigned long (*pte_val)(pte_t);
211 unsigned long (*pgd_val)(pgd_t);
212
213 pte_t (*make_pte)(unsigned long pte);
214 pgd_t (*make_pgd)(unsigned long pgd);
152#endif 215#endif
153 216
154 void (*set_lazy_mode)(int mode); 217 /* Set deferred update mode, used for batching operations. */
218 void (*set_lazy_mode)(enum paravirt_lazy_mode mode);
155 219
156 /* These two are jmp to, not actually called. */ 220 /* These two are jmp to, not actually called. */
157 void (*irq_enable_sysexit)(void); 221 void (*irq_enable_sysexit)(void);
158 void (*iret)(void); 222 void (*iret)(void);
159
160 void (*startup_ipi_hook)(int phys_apicid, unsigned long start_eip, unsigned long start_esp);
161}; 223};
162 224
163/* Mark a paravirt probe function. */ 225/* Mark a paravirt probe function. */
@@ -167,23 +229,202 @@ struct paravirt_ops
167 229
168extern struct paravirt_ops paravirt_ops; 230extern struct paravirt_ops paravirt_ops;
169 231
170#define paravirt_enabled() (paravirt_ops.paravirt_enabled) 232#define PARAVIRT_PATCH(x) \
233 (offsetof(struct paravirt_ops, x) / sizeof(void *))
234
235#define paravirt_type(type) \
236 [paravirt_typenum] "i" (PARAVIRT_PATCH(type))
237#define paravirt_clobber(clobber) \
238 [paravirt_clobber] "i" (clobber)
239
240/*
241 * Generate some code, and mark it as patchable by the
242 * apply_paravirt() alternate instruction patcher.
243 */
244#define _paravirt_alt(insn_string, type, clobber) \
245 "771:\n\t" insn_string "\n" "772:\n" \
246 ".pushsection .parainstructions,\"a\"\n" \
247 " .long 771b\n" \
248 " .byte " type "\n" \
249 " .byte 772b-771b\n" \
250 " .short " clobber "\n" \
251 ".popsection\n"
252
253/* Generate patchable code, with the default asm parameters. */
254#define paravirt_alt(insn_string) \
255 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
256
257unsigned paravirt_patch_nop(void);
258unsigned paravirt_patch_ignore(unsigned len);
259unsigned paravirt_patch_call(void *target, u16 tgt_clobbers,
260 void *site, u16 site_clobbers,
261 unsigned len);
262unsigned paravirt_patch_jmp(void *target, void *site, unsigned len);
263unsigned paravirt_patch_default(u8 type, u16 clobbers, void *site, unsigned len);
264
265unsigned paravirt_patch_insns(void *site, unsigned len,
266 const char *start, const char *end);
267
268
269/*
270 * This generates an indirect call based on the operation type number.
271 * The type number, computed in PARAVIRT_PATCH, is derived from the
272 * offset into the paravirt_ops structure, and can therefore be freely
273 * converted back into a structure offset.
274 */
275#define PARAVIRT_CALL "call *(paravirt_ops+%c[paravirt_typenum]*4);"
276
277/*
278 * These macros are intended to wrap calls into a paravirt_ops
279 * operation, so that they can be later identified and patched at
280 * runtime.
281 *
282 * Normally, a call to a pv_op function is a simple indirect call:
283 * (paravirt_ops.operations)(args...).
284 *
285 * Unfortunately, this is a relatively slow operation for modern CPUs,
286 * because it cannot necessarily determine what the destination
287 * address is. In this case, the address is a runtime constant, so at
288 * the very least we can patch the call to e a simple direct call, or
289 * ideally, patch an inline implementation into the callsite. (Direct
290 * calls are essentially free, because the call and return addresses
291 * are completely predictable.)
292 *
293 * These macros rely on the standard gcc "regparm(3)" calling
294 * convention, in which the first three arguments are placed in %eax,
295 * %edx, %ecx (in that order), and the remaining arguments are placed
296 * on the stack. All caller-save registers (eax,edx,ecx) are expected
297 * to be modified (either clobbered or used for return values).
298 *
299 * The call instruction itself is marked by placing its start address
300 * and size into the .parainstructions section, so that
301 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
302 * appropriate patching under the control of the backend paravirt_ops
303 * implementation.
304 *
305 * Unfortunately there's no way to get gcc to generate the args setup
306 * for the call, and then allow the call itself to be generated by an
307 * inline asm. Because of this, we must do the complete arg setup and
308 * return value handling from within these macros. This is fairly
309 * cumbersome.
310 *
311 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
312 * It could be extended to more arguments, but there would be little
313 * to be gained from that. For each number of arguments, there are
314 * the two VCALL and CALL variants for void and non-void functions.
315 *
316 * When there is a return value, the invoker of the macro must specify
317 * the return type. The macro then uses sizeof() on that type to
318 * determine whether its a 32 or 64 bit value, and places the return
319 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
320 * 64-bit).
321 *
322 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
323 * in low,high order.
324 *
325 * Small structures are passed and returned in registers. The macro
326 * calling convention can't directly deal with this, so the wrapper
327 * functions must do this.
328 *
329 * These PVOP_* macros are only defined within this header. This
330 * means that all uses must be wrapped in inline functions. This also
331 * makes sure the incoming and outgoing types are always correct.
332 */
333#define __PVOP_CALL(rettype, op, pre, post, ...) \
334 ({ \
335 rettype __ret; \
336 unsigned long __eax, __edx, __ecx; \
337 if (sizeof(rettype) > sizeof(unsigned long)) { \
338 asm volatile(pre \
339 paravirt_alt(PARAVIRT_CALL) \
340 post \
341 : "=a" (__eax), "=d" (__edx), \
342 "=c" (__ecx) \
343 : paravirt_type(op), \
344 paravirt_clobber(CLBR_ANY), \
345 ##__VA_ARGS__ \
346 : "memory", "cc"); \
347 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
348 } else { \
349 asm volatile(pre \
350 paravirt_alt(PARAVIRT_CALL) \
351 post \
352 : "=a" (__eax), "=d" (__edx), \
353 "=c" (__ecx) \
354 : paravirt_type(op), \
355 paravirt_clobber(CLBR_ANY), \
356 ##__VA_ARGS__ \
357 : "memory", "cc"); \
358 __ret = (rettype)__eax; \
359 } \
360 __ret; \
361 })
362#define __PVOP_VCALL(op, pre, post, ...) \
363 ({ \
364 unsigned long __eax, __edx, __ecx; \
365 asm volatile(pre \
366 paravirt_alt(PARAVIRT_CALL) \
367 post \
368 : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
369 : paravirt_type(op), \
370 paravirt_clobber(CLBR_ANY), \
371 ##__VA_ARGS__ \
372 : "memory", "cc"); \
373 })
374
375#define PVOP_CALL0(rettype, op) \
376 __PVOP_CALL(rettype, op, "", "")
377#define PVOP_VCALL0(op) \
378 __PVOP_VCALL(op, "", "")
379
380#define PVOP_CALL1(rettype, op, arg1) \
381 __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)))
382#define PVOP_VCALL1(op, arg1) \
383 __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)))
384
385#define PVOP_CALL2(rettype, op, arg1, arg2) \
386 __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)), "1" ((u32)(arg2)))
387#define PVOP_VCALL2(op, arg1, arg2) \
388 __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)), "1" ((u32)(arg2)))
389
390#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
391 __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)), \
392 "1"((u32)(arg2)), "2"((u32)(arg3)))
393#define PVOP_VCALL3(op, arg1, arg2, arg3) \
394 __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)), "1"((u32)(arg2)), \
395 "2"((u32)(arg3)))
396
397#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
398 __PVOP_CALL(rettype, op, \
399 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
400 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
401 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
402#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
403 __PVOP_VCALL(op, \
404 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
405 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
406 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
407
408static inline int paravirt_enabled(void)
409{
410 return paravirt_ops.paravirt_enabled;
411}
171 412
172static inline void load_esp0(struct tss_struct *tss, 413static inline void load_esp0(struct tss_struct *tss,
173 struct thread_struct *thread) 414 struct thread_struct *thread)
174{ 415{
175 paravirt_ops.load_esp0(tss, thread); 416 PVOP_VCALL2(load_esp0, tss, thread);
176} 417}
177 418
178#define ARCH_SETUP paravirt_ops.arch_setup(); 419#define ARCH_SETUP paravirt_ops.arch_setup();
179static inline unsigned long get_wallclock(void) 420static inline unsigned long get_wallclock(void)
180{ 421{
181 return paravirt_ops.get_wallclock(); 422 return PVOP_CALL0(unsigned long, get_wallclock);
182} 423}
183 424
184static inline int set_wallclock(unsigned long nowtime) 425static inline int set_wallclock(unsigned long nowtime)
185{ 426{
186 return paravirt_ops.set_wallclock(nowtime); 427 return PVOP_CALL1(int, set_wallclock, nowtime);
187} 428}
188 429
189static inline void (*choose_time_init(void))(void) 430static inline void (*choose_time_init(void))(void)
@@ -195,113 +436,208 @@ static inline void (*choose_time_init(void))(void)
195static inline void __cpuid(unsigned int *eax, unsigned int *ebx, 436static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
196 unsigned int *ecx, unsigned int *edx) 437 unsigned int *ecx, unsigned int *edx)
197{ 438{
198 paravirt_ops.cpuid(eax, ebx, ecx, edx); 439 PVOP_VCALL4(cpuid, eax, ebx, ecx, edx);
199} 440}
200 441
201/* 442/*
202 * These special macros can be used to get or set a debugging register 443 * These special macros can be used to get or set a debugging register
203 */ 444 */
204#define get_debugreg(var, reg) var = paravirt_ops.get_debugreg(reg) 445static inline unsigned long paravirt_get_debugreg(int reg)
205#define set_debugreg(val, reg) paravirt_ops.set_debugreg(reg, val) 446{
447 return PVOP_CALL1(unsigned long, get_debugreg, reg);
448}
449#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
450static inline void set_debugreg(unsigned long val, int reg)
451{
452 PVOP_VCALL2(set_debugreg, reg, val);
453}
454
455static inline void clts(void)
456{
457 PVOP_VCALL0(clts);
458}
206 459
207#define clts() paravirt_ops.clts() 460static inline unsigned long read_cr0(void)
461{
462 return PVOP_CALL0(unsigned long, read_cr0);
463}
208 464
209#define read_cr0() paravirt_ops.read_cr0() 465static inline void write_cr0(unsigned long x)
210#define write_cr0(x) paravirt_ops.write_cr0(x) 466{
467 PVOP_VCALL1(write_cr0, x);
468}
211 469
212#define read_cr2() paravirt_ops.read_cr2() 470static inline unsigned long read_cr2(void)
213#define write_cr2(x) paravirt_ops.write_cr2(x) 471{
472 return PVOP_CALL0(unsigned long, read_cr2);
473}
214 474
215#define read_cr3() paravirt_ops.read_cr3() 475static inline void write_cr2(unsigned long x)
216#define write_cr3(x) paravirt_ops.write_cr3(x) 476{
477 PVOP_VCALL1(write_cr2, x);
478}
217 479
218#define read_cr4() paravirt_ops.read_cr4() 480static inline unsigned long read_cr3(void)
219#define read_cr4_safe(x) paravirt_ops.read_cr4_safe() 481{
220#define write_cr4(x) paravirt_ops.write_cr4(x) 482 return PVOP_CALL0(unsigned long, read_cr3);
483}
484
485static inline void write_cr3(unsigned long x)
486{
487 PVOP_VCALL1(write_cr3, x);
488}
489
490static inline unsigned long read_cr4(void)
491{
492 return PVOP_CALL0(unsigned long, read_cr4);
493}
494static inline unsigned long read_cr4_safe(void)
495{
496 return PVOP_CALL0(unsigned long, read_cr4_safe);
497}
498
499static inline void write_cr4(unsigned long x)
500{
501 PVOP_VCALL1(write_cr4, x);
502}
221 503
222static inline void raw_safe_halt(void) 504static inline void raw_safe_halt(void)
223{ 505{
224 paravirt_ops.safe_halt(); 506 PVOP_VCALL0(safe_halt);
225} 507}
226 508
227static inline void halt(void) 509static inline void halt(void)
228{ 510{
229 paravirt_ops.safe_halt(); 511 PVOP_VCALL0(safe_halt);
512}
513
514static inline void wbinvd(void)
515{
516 PVOP_VCALL0(wbinvd);
230} 517}
231#define wbinvd() paravirt_ops.wbinvd()
232 518
233#define get_kernel_rpl() (paravirt_ops.kernel_rpl) 519#define get_kernel_rpl() (paravirt_ops.kernel_rpl)
234 520
235#define rdmsr(msr,val1,val2) do { \ 521static inline u64 paravirt_read_msr(unsigned msr, int *err)
236 int _err; \ 522{
237 u64 _l = paravirt_ops.read_msr(msr,&_err); \ 523 return PVOP_CALL2(u64, read_msr, msr, err);
238 val1 = (u32)_l; \ 524}
239 val2 = _l >> 32; \ 525static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
526{
527 return PVOP_CALL3(int, write_msr, msr, low, high);
528}
529
530/* These should all do BUG_ON(_err), but our headers are too tangled. */
531#define rdmsr(msr,val1,val2) do { \
532 int _err; \
533 u64 _l = paravirt_read_msr(msr, &_err); \
534 val1 = (u32)_l; \
535 val2 = _l >> 32; \
240} while(0) 536} while(0)
241 537
242#define wrmsr(msr,val1,val2) do { \ 538#define wrmsr(msr,val1,val2) do { \
243 u64 _l = ((u64)(val2) << 32) | (val1); \ 539 paravirt_write_msr(msr, val1, val2); \
244 paravirt_ops.write_msr((msr), _l); \
245} while(0) 540} while(0)
246 541
247#define rdmsrl(msr,val) do { \ 542#define rdmsrl(msr,val) do { \
248 int _err; \ 543 int _err; \
249 val = paravirt_ops.read_msr((msr),&_err); \ 544 val = paravirt_read_msr(msr, &_err); \
250} while(0) 545} while(0)
251 546
252#define wrmsrl(msr,val) (paravirt_ops.write_msr((msr),(val))) 547#define wrmsrl(msr,val) ((void)paravirt_write_msr(msr, val, 0))
253#define wrmsr_safe(msr,a,b) ({ \ 548#define wrmsr_safe(msr,a,b) paravirt_write_msr(msr, a, b)
254 u64 _l = ((u64)(b) << 32) | (a); \
255 paravirt_ops.write_msr((msr),_l); \
256})
257 549
258/* rdmsr with exception handling */ 550/* rdmsr with exception handling */
259#define rdmsr_safe(msr,a,b) ({ \ 551#define rdmsr_safe(msr,a,b) ({ \
260 int _err; \ 552 int _err; \
261 u64 _l = paravirt_ops.read_msr(msr,&_err); \ 553 u64 _l = paravirt_read_msr(msr, &_err); \
262 (*a) = (u32)_l; \ 554 (*a) = (u32)_l; \
263 (*b) = _l >> 32; \ 555 (*b) = _l >> 32; \
264 _err; }) 556 _err; })
265 557
266#define rdtsc(low,high) do { \ 558
267 u64 _l = paravirt_ops.read_tsc(); \ 559static inline u64 paravirt_read_tsc(void)
268 low = (u32)_l; \ 560{
269 high = _l >> 32; \ 561 return PVOP_CALL0(u64, read_tsc);
562}
563#define rdtsc(low,high) do { \
564 u64 _l = paravirt_read_tsc(); \
565 low = (u32)_l; \
566 high = _l >> 32; \
270} while(0) 567} while(0)
271 568
272#define rdtscl(low) do { \ 569#define rdtscl(low) do { \
273 u64 _l = paravirt_ops.read_tsc(); \ 570 u64 _l = paravirt_read_tsc(); \
274 low = (int)_l; \ 571 low = (int)_l; \
275} while(0) 572} while(0)
276 573
277#define rdtscll(val) (val = paravirt_ops.read_tsc()) 574#define rdtscll(val) (val = paravirt_read_tsc())
278 575
279#define get_scheduled_cycles(val) (val = paravirt_ops.get_scheduled_cycles()) 576#define get_scheduled_cycles(val) (val = paravirt_ops.get_scheduled_cycles())
280#define calculate_cpu_khz() (paravirt_ops.get_cpu_khz()) 577#define calculate_cpu_khz() (paravirt_ops.get_cpu_khz())
281 578
282#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 579#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
283 580
284#define rdpmc(counter,low,high) do { \ 581static inline unsigned long long paravirt_read_pmc(int counter)
285 u64 _l = paravirt_ops.read_pmc(); \ 582{
286 low = (u32)_l; \ 583 return PVOP_CALL1(u64, read_pmc, counter);
287 high = _l >> 32; \ 584}
585
586#define rdpmc(counter,low,high) do { \
587 u64 _l = paravirt_read_pmc(counter); \
588 low = (u32)_l; \
589 high = _l >> 32; \
288} while(0) 590} while(0)
289 591
290#define load_TR_desc() (paravirt_ops.load_tr_desc()) 592static inline void load_TR_desc(void)
291#define load_gdt(dtr) (paravirt_ops.load_gdt(dtr)) 593{
292#define load_idt(dtr) (paravirt_ops.load_idt(dtr)) 594 PVOP_VCALL0(load_tr_desc);
293#define set_ldt(addr, entries) (paravirt_ops.set_ldt((addr), (entries))) 595}
294#define store_gdt(dtr) (paravirt_ops.store_gdt(dtr)) 596static inline void load_gdt(const struct Xgt_desc_struct *dtr)
295#define store_idt(dtr) (paravirt_ops.store_idt(dtr)) 597{
296#define store_tr(tr) ((tr) = paravirt_ops.store_tr()) 598 PVOP_VCALL1(load_gdt, dtr);
297#define load_TLS(t,cpu) (paravirt_ops.load_tls((t),(cpu))) 599}
298#define write_ldt_entry(dt, entry, low, high) \ 600static inline void load_idt(const struct Xgt_desc_struct *dtr)
299 (paravirt_ops.write_ldt_entry((dt), (entry), (low), (high))) 601{
300#define write_gdt_entry(dt, entry, low, high) \ 602 PVOP_VCALL1(load_idt, dtr);
301 (paravirt_ops.write_gdt_entry((dt), (entry), (low), (high))) 603}
302#define write_idt_entry(dt, entry, low, high) \ 604static inline void set_ldt(const void *addr, unsigned entries)
303 (paravirt_ops.write_idt_entry((dt), (entry), (low), (high))) 605{
304#define set_iopl_mask(mask) (paravirt_ops.set_iopl_mask(mask)) 606 PVOP_VCALL2(set_ldt, addr, entries);
607}
608static inline void store_gdt(struct Xgt_desc_struct *dtr)
609{
610 PVOP_VCALL1(store_gdt, dtr);
611}
612static inline void store_idt(struct Xgt_desc_struct *dtr)
613{
614 PVOP_VCALL1(store_idt, dtr);
615}
616static inline unsigned long paravirt_store_tr(void)
617{
618 return PVOP_CALL0(unsigned long, store_tr);
619}
620#define store_tr(tr) ((tr) = paravirt_store_tr())
621static inline void load_TLS(struct thread_struct *t, unsigned cpu)
622{
623 PVOP_VCALL2(load_tls, t, cpu);
624}
625static inline void write_ldt_entry(void *dt, int entry, u32 low, u32 high)
626{
627 PVOP_VCALL4(write_ldt_entry, dt, entry, low, high);
628}
629static inline void write_gdt_entry(void *dt, int entry, u32 low, u32 high)
630{
631 PVOP_VCALL4(write_gdt_entry, dt, entry, low, high);
632}
633static inline void write_idt_entry(void *dt, int entry, u32 low, u32 high)
634{
635 PVOP_VCALL4(write_idt_entry, dt, entry, low, high);
636}
637static inline void set_iopl_mask(unsigned mask)
638{
639 PVOP_VCALL1(set_iopl_mask, mask);
640}
305 641
306/* The paravirtualized I/O functions */ 642/* The paravirtualized I/O functions */
307static inline void slow_down_io(void) { 643static inline void slow_down_io(void) {
@@ -319,215 +655,390 @@ static inline void slow_down_io(void) {
319 */ 655 */
320static inline void apic_write(unsigned long reg, unsigned long v) 656static inline void apic_write(unsigned long reg, unsigned long v)
321{ 657{
322 paravirt_ops.apic_write(reg,v); 658 PVOP_VCALL2(apic_write, reg, v);
323} 659}
324 660
325static inline void apic_write_atomic(unsigned long reg, unsigned long v) 661static inline void apic_write_atomic(unsigned long reg, unsigned long v)
326{ 662{
327 paravirt_ops.apic_write_atomic(reg,v); 663 PVOP_VCALL2(apic_write_atomic, reg, v);
328} 664}
329 665
330static inline unsigned long apic_read(unsigned long reg) 666static inline unsigned long apic_read(unsigned long reg)
331{ 667{
332 return paravirt_ops.apic_read(reg); 668 return PVOP_CALL1(unsigned long, apic_read, reg);
333} 669}
334 670
335static inline void setup_boot_clock(void) 671static inline void setup_boot_clock(void)
336{ 672{
337 paravirt_ops.setup_boot_clock(); 673 PVOP_VCALL0(setup_boot_clock);
338} 674}
339 675
340static inline void setup_secondary_clock(void) 676static inline void setup_secondary_clock(void)
341{ 677{
342 paravirt_ops.setup_secondary_clock(); 678 PVOP_VCALL0(setup_secondary_clock);
343} 679}
344#endif 680#endif
345 681
682static inline void paravirt_pagetable_setup_start(pgd_t *base)
683{
684 if (paravirt_ops.pagetable_setup_start)
685 (*paravirt_ops.pagetable_setup_start)(base);
686}
687
688static inline void paravirt_pagetable_setup_done(pgd_t *base)
689{
690 if (paravirt_ops.pagetable_setup_done)
691 (*paravirt_ops.pagetable_setup_done)(base);
692}
693
346#ifdef CONFIG_SMP 694#ifdef CONFIG_SMP
347static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip, 695static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
348 unsigned long start_esp) 696 unsigned long start_esp)
349{ 697{
350 return paravirt_ops.startup_ipi_hook(phys_apicid, start_eip, start_esp); 698 PVOP_VCALL3(startup_ipi_hook, phys_apicid, start_eip, start_esp);
351} 699}
352#endif 700#endif
353 701
354#define __flush_tlb() paravirt_ops.flush_tlb_user() 702static inline void paravirt_activate_mm(struct mm_struct *prev,
355#define __flush_tlb_global() paravirt_ops.flush_tlb_kernel() 703 struct mm_struct *next)
356#define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr) 704{
705 PVOP_VCALL2(activate_mm, prev, next);
706}
357 707
358#define paravirt_map_pt_hook(type, va, pfn) paravirt_ops.map_pt_hook(type, va, pfn) 708static inline void arch_dup_mmap(struct mm_struct *oldmm,
709 struct mm_struct *mm)
710{
711 PVOP_VCALL2(dup_mmap, oldmm, mm);
712}
359 713
360#define paravirt_alloc_pt(pfn) paravirt_ops.alloc_pt(pfn) 714static inline void arch_exit_mmap(struct mm_struct *mm)
361#define paravirt_release_pt(pfn) paravirt_ops.release_pt(pfn) 715{
716 PVOP_VCALL1(exit_mmap, mm);
717}
362 718
363#define paravirt_alloc_pd(pfn) paravirt_ops.alloc_pd(pfn) 719static inline void __flush_tlb(void)
364#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) \ 720{
365 paravirt_ops.alloc_pd_clone(pfn, clonepfn, start, count) 721 PVOP_VCALL0(flush_tlb_user);
366#define paravirt_release_pd(pfn) paravirt_ops.release_pd(pfn) 722}
723static inline void __flush_tlb_global(void)
724{
725 PVOP_VCALL0(flush_tlb_kernel);
726}
727static inline void __flush_tlb_single(unsigned long addr)
728{
729 PVOP_VCALL1(flush_tlb_single, addr);
730}
367 731
368static inline void set_pte(pte_t *ptep, pte_t pteval) 732static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
733 unsigned long va)
369{ 734{
370 paravirt_ops.set_pte(ptep, pteval); 735 PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va);
371} 736}
372 737
373static inline void set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval) 738static inline void paravirt_alloc_pt(unsigned pfn)
374{ 739{
375 paravirt_ops.set_pte_at(mm, addr, ptep, pteval); 740 PVOP_VCALL1(alloc_pt, pfn);
741}
742static inline void paravirt_release_pt(unsigned pfn)
743{
744 PVOP_VCALL1(release_pt, pfn);
376} 745}
377 746
378static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) 747static inline void paravirt_alloc_pd(unsigned pfn)
379{ 748{
380 paravirt_ops.set_pmd(pmdp, pmdval); 749 PVOP_VCALL1(alloc_pd, pfn);
381} 750}
382 751
383static inline void pte_update(struct mm_struct *mm, u32 addr, pte_t *ptep) 752static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn,
753 unsigned start, unsigned count)
384{ 754{
385 paravirt_ops.pte_update(mm, addr, ptep); 755 PVOP_VCALL4(alloc_pd_clone, pfn, clonepfn, start, count);
756}
757static inline void paravirt_release_pd(unsigned pfn)
758{
759 PVOP_VCALL1(release_pd, pfn);
386} 760}
387 761
388static inline void pte_update_defer(struct mm_struct *mm, u32 addr, pte_t *ptep) 762#ifdef CONFIG_HIGHPTE
763static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
389{ 764{
390 paravirt_ops.pte_update_defer(mm, addr, ptep); 765 unsigned long ret;
766 ret = PVOP_CALL2(unsigned long, kmap_atomic_pte, page, type);
767 return (void *)ret;
768}
769#endif
770
771static inline void pte_update(struct mm_struct *mm, unsigned long addr,
772 pte_t *ptep)
773{
774 PVOP_VCALL3(pte_update, mm, addr, ptep);
775}
776
777static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
778 pte_t *ptep)
779{
780 PVOP_VCALL3(pte_update_defer, mm, addr, ptep);
391} 781}
392 782
393#ifdef CONFIG_X86_PAE 783#ifdef CONFIG_X86_PAE
784static inline pte_t __pte(unsigned long long val)
785{
786 unsigned long long ret = PVOP_CALL2(unsigned long long, make_pte,
787 val, val >> 32);
788 return (pte_t) { ret, ret >> 32 };
789}
790
791static inline pmd_t __pmd(unsigned long long val)
792{
793 return (pmd_t) { PVOP_CALL2(unsigned long long, make_pmd, val, val >> 32) };
794}
795
796static inline pgd_t __pgd(unsigned long long val)
797{
798 return (pgd_t) { PVOP_CALL2(unsigned long long, make_pgd, val, val >> 32) };
799}
800
801static inline unsigned long long pte_val(pte_t x)
802{
803 return PVOP_CALL2(unsigned long long, pte_val, x.pte_low, x.pte_high);
804}
805
806static inline unsigned long long pmd_val(pmd_t x)
807{
808 return PVOP_CALL2(unsigned long long, pmd_val, x.pmd, x.pmd >> 32);
809}
810
811static inline unsigned long long pgd_val(pgd_t x)
812{
813 return PVOP_CALL2(unsigned long long, pgd_val, x.pgd, x.pgd >> 32);
814}
815
816static inline void set_pte(pte_t *ptep, pte_t pteval)
817{
818 PVOP_VCALL3(set_pte, ptep, pteval.pte_low, pteval.pte_high);
819}
820
821static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
822 pte_t *ptep, pte_t pteval)
823{
824 /* 5 arg words */
825 paravirt_ops.set_pte_at(mm, addr, ptep, pteval);
826}
827
394static inline void set_pte_atomic(pte_t *ptep, pte_t pteval) 828static inline void set_pte_atomic(pte_t *ptep, pte_t pteval)
395{ 829{
396 paravirt_ops.set_pte_atomic(ptep, pteval); 830 PVOP_VCALL3(set_pte_atomic, ptep, pteval.pte_low, pteval.pte_high);
397} 831}
398 832
399static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) 833static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
834 pte_t *ptep, pte_t pte)
400{ 835{
836 /* 5 arg words */
401 paravirt_ops.set_pte_present(mm, addr, ptep, pte); 837 paravirt_ops.set_pte_present(mm, addr, ptep, pte);
402} 838}
403 839
840static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
841{
842 PVOP_VCALL3(set_pmd, pmdp, pmdval.pmd, pmdval.pmd >> 32);
843}
844
404static inline void set_pud(pud_t *pudp, pud_t pudval) 845static inline void set_pud(pud_t *pudp, pud_t pudval)
405{ 846{
406 paravirt_ops.set_pud(pudp, pudval); 847 PVOP_VCALL3(set_pud, pudp, pudval.pgd.pgd, pudval.pgd.pgd >> 32);
407} 848}
408 849
409static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 850static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
410{ 851{
411 paravirt_ops.pte_clear(mm, addr, ptep); 852 PVOP_VCALL3(pte_clear, mm, addr, ptep);
412} 853}
413 854
414static inline void pmd_clear(pmd_t *pmdp) 855static inline void pmd_clear(pmd_t *pmdp)
415{ 856{
416 paravirt_ops.pmd_clear(pmdp); 857 PVOP_VCALL1(pmd_clear, pmdp);
417} 858}
418#endif
419 859
420/* Lazy mode for batching updates / context switch */ 860#else /* !CONFIG_X86_PAE */
421#define PARAVIRT_LAZY_NONE 0 861
422#define PARAVIRT_LAZY_MMU 1 862static inline pte_t __pte(unsigned long val)
423#define PARAVIRT_LAZY_CPU 2 863{
424#define PARAVIRT_LAZY_FLUSH 3 864 return (pte_t) { PVOP_CALL1(unsigned long, make_pte, val) };
865}
866
867static inline pgd_t __pgd(unsigned long val)
868{
869 return (pgd_t) { PVOP_CALL1(unsigned long, make_pgd, val) };
870}
871
872static inline unsigned long pte_val(pte_t x)
873{
874 return PVOP_CALL1(unsigned long, pte_val, x.pte_low);
875}
876
877static inline unsigned long pgd_val(pgd_t x)
878{
879 return PVOP_CALL1(unsigned long, pgd_val, x.pgd);
880}
881
882static inline void set_pte(pte_t *ptep, pte_t pteval)
883{
884 PVOP_VCALL2(set_pte, ptep, pteval.pte_low);
885}
886
887static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
888 pte_t *ptep, pte_t pteval)
889{
890 PVOP_VCALL4(set_pte_at, mm, addr, ptep, pteval.pte_low);
891}
892
893static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
894{
895 PVOP_VCALL2(set_pmd, pmdp, pmdval.pud.pgd.pgd);
896}
897#endif /* CONFIG_X86_PAE */
425 898
426#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE 899#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
427#define arch_enter_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_CPU) 900static inline void arch_enter_lazy_cpu_mode(void)
428#define arch_leave_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE) 901{
429#define arch_flush_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_FLUSH) 902 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_CPU);
903}
904
905static inline void arch_leave_lazy_cpu_mode(void)
906{
907 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE);
908}
909
910static inline void arch_flush_lazy_cpu_mode(void)
911{
912 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH);
913}
914
430 915
431#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 916#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
432#define arch_enter_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_MMU) 917static inline void arch_enter_lazy_mmu_mode(void)
433#define arch_leave_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE) 918{
434#define arch_flush_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_FLUSH) 919 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_MMU);
920}
921
922static inline void arch_leave_lazy_mmu_mode(void)
923{
924 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE);
925}
926
927static inline void arch_flush_lazy_mmu_mode(void)
928{
929 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH);
930}
931
932void _paravirt_nop(void);
933#define paravirt_nop ((void *)_paravirt_nop)
435 934
436/* These all sit in the .parainstructions section to tell us what to patch. */ 935/* These all sit in the .parainstructions section to tell us what to patch. */
437struct paravirt_patch { 936struct paravirt_patch_site {
438 u8 *instr; /* original instructions */ 937 u8 *instr; /* original instructions */
439 u8 instrtype; /* type of this instruction */ 938 u8 instrtype; /* type of this instruction */
440 u8 len; /* length of original instruction */ 939 u8 len; /* length of original instruction */
441 u16 clobbers; /* what registers you may clobber */ 940 u16 clobbers; /* what registers you may clobber */
442}; 941};
443 942
444#define paravirt_alt(insn_string, typenum, clobber) \ 943extern struct paravirt_patch_site __parainstructions[],
445 "771:\n\t" insn_string "\n" "772:\n" \ 944 __parainstructions_end[];
446 ".pushsection .parainstructions,\"a\"\n" \
447 " .long 771b\n" \
448 " .byte " __stringify(typenum) "\n" \
449 " .byte 772b-771b\n" \
450 " .short " __stringify(clobber) "\n" \
451 ".popsection"
452 945
453static inline unsigned long __raw_local_save_flags(void) 946static inline unsigned long __raw_local_save_flags(void)
454{ 947{
455 unsigned long f; 948 unsigned long f;
456 949
457 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;" 950 asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
458 "call *%1;" 951 PARAVIRT_CALL
459 "popl %%edx; popl %%ecx", 952 "popl %%edx; popl %%ecx")
460 PARAVIRT_SAVE_FLAGS, CLBR_NONE) 953 : "=a"(f)
461 : "=a"(f): "m"(paravirt_ops.save_fl) 954 : paravirt_type(save_fl),
462 : "memory", "cc"); 955 paravirt_clobber(CLBR_EAX)
956 : "memory", "cc");
463 return f; 957 return f;
464} 958}
465 959
466static inline void raw_local_irq_restore(unsigned long f) 960static inline void raw_local_irq_restore(unsigned long f)
467{ 961{
468 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;" 962 asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
469 "call *%1;" 963 PARAVIRT_CALL
470 "popl %%edx; popl %%ecx", 964 "popl %%edx; popl %%ecx")
471 PARAVIRT_RESTORE_FLAGS, CLBR_EAX) 965 : "=a"(f)
472 : "=a"(f) : "m" (paravirt_ops.restore_fl), "0"(f) 966 : "0"(f),
473 : "memory", "cc"); 967 paravirt_type(restore_fl),
968 paravirt_clobber(CLBR_EAX)
969 : "memory", "cc");
474} 970}
475 971
476static inline void raw_local_irq_disable(void) 972static inline void raw_local_irq_disable(void)
477{ 973{
478 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;" 974 asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
479 "call *%0;" 975 PARAVIRT_CALL
480 "popl %%edx; popl %%ecx", 976 "popl %%edx; popl %%ecx")
481 PARAVIRT_IRQ_DISABLE, CLBR_EAX) 977 :
482 : : "m" (paravirt_ops.irq_disable) 978 : paravirt_type(irq_disable),
483 : "memory", "eax", "cc"); 979 paravirt_clobber(CLBR_EAX)
980 : "memory", "eax", "cc");
484} 981}
485 982
486static inline void raw_local_irq_enable(void) 983static inline void raw_local_irq_enable(void)
487{ 984{
488 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;" 985 asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
489 "call *%0;" 986 PARAVIRT_CALL
490 "popl %%edx; popl %%ecx", 987 "popl %%edx; popl %%ecx")
491 PARAVIRT_IRQ_ENABLE, CLBR_EAX) 988 :
492 : : "m" (paravirt_ops.irq_enable) 989 : paravirt_type(irq_enable),
493 : "memory", "eax", "cc"); 990 paravirt_clobber(CLBR_EAX)
991 : "memory", "eax", "cc");
494} 992}
495 993
496static inline unsigned long __raw_local_irq_save(void) 994static inline unsigned long __raw_local_irq_save(void)
497{ 995{
498 unsigned long f; 996 unsigned long f;
499 997
500 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;" 998 f = __raw_local_save_flags();
501 "call *%1; pushl %%eax;" 999 raw_local_irq_disable();
502 "call *%2; popl %%eax;"
503 "popl %%edx; popl %%ecx",
504 PARAVIRT_SAVE_FLAGS_IRQ_DISABLE,
505 CLBR_NONE)
506 : "=a"(f)
507 : "m" (paravirt_ops.save_fl),
508 "m" (paravirt_ops.irq_disable)
509 : "memory", "cc");
510 return f; 1000 return f;
511} 1001}
512 1002
513#define CLI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \ 1003#define CLI_STRING \
514 "call *paravirt_ops+%c[irq_disable];" \ 1004 _paravirt_alt("pushl %%ecx; pushl %%edx;" \
515 "popl %%edx; popl %%ecx", \ 1005 "call *paravirt_ops+%c[paravirt_cli_type]*4;" \
516 PARAVIRT_IRQ_DISABLE, CLBR_EAX) 1006 "popl %%edx; popl %%ecx", \
1007 "%c[paravirt_cli_type]", "%c[paravirt_clobber]")
1008
1009#define STI_STRING \
1010 _paravirt_alt("pushl %%ecx; pushl %%edx;" \
1011 "call *paravirt_ops+%c[paravirt_sti_type]*4;" \
1012 "popl %%edx; popl %%ecx", \
1013 "%c[paravirt_sti_type]", "%c[paravirt_clobber]")
517 1014
518#define STI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \
519 "call *paravirt_ops+%c[irq_enable];" \
520 "popl %%edx; popl %%ecx", \
521 PARAVIRT_IRQ_ENABLE, CLBR_EAX)
522#define CLI_STI_CLOBBERS , "%eax" 1015#define CLI_STI_CLOBBERS , "%eax"
523#define CLI_STI_INPUT_ARGS \ 1016#define CLI_STI_INPUT_ARGS \
524 , \ 1017 , \
525 [irq_disable] "i" (offsetof(struct paravirt_ops, irq_disable)), \ 1018 [paravirt_cli_type] "i" (PARAVIRT_PATCH(irq_disable)), \
526 [irq_enable] "i" (offsetof(struct paravirt_ops, irq_enable)) 1019 [paravirt_sti_type] "i" (PARAVIRT_PATCH(irq_enable)), \
1020 paravirt_clobber(CLBR_EAX)
1021
1022/* Make sure as little as possible of this mess escapes. */
1023#undef PARAVIRT_CALL
1024#undef __PVOP_CALL
1025#undef __PVOP_VCALL
1026#undef PVOP_VCALL0
1027#undef PVOP_CALL0
1028#undef PVOP_VCALL1
1029#undef PVOP_CALL1
1030#undef PVOP_VCALL2
1031#undef PVOP_CALL2
1032#undef PVOP_VCALL3
1033#undef PVOP_CALL3
1034#undef PVOP_VCALL4
1035#undef PVOP_CALL4
527 1036
528#else /* __ASSEMBLY__ */ 1037#else /* __ASSEMBLY__ */
529 1038
530#define PARA_PATCH(ptype, clobbers, ops) \ 1039#define PARA_PATCH(off) ((off) / 4)
1040
1041#define PARA_SITE(ptype, clobbers, ops) \
531771:; \ 1042771:; \
532 ops; \ 1043 ops; \
533772:; \ 1044772:; \
@@ -538,28 +1049,30 @@ static inline unsigned long __raw_local_irq_save(void)
538 .short clobbers; \ 1049 .short clobbers; \
539 .popsection 1050 .popsection
540 1051
541#define INTERRUPT_RETURN \ 1052#define INTERRUPT_RETURN \
542 PARA_PATCH(PARAVIRT_INTERRUPT_RETURN, CLBR_ANY, \ 1053 PARA_SITE(PARA_PATCH(PARAVIRT_iret), CLBR_NONE, \
543 jmp *%cs:paravirt_ops+PARAVIRT_iret) 1054 jmp *%cs:paravirt_ops+PARAVIRT_iret)
544 1055
545#define DISABLE_INTERRUPTS(clobbers) \ 1056#define DISABLE_INTERRUPTS(clobbers) \
546 PARA_PATCH(PARAVIRT_IRQ_DISABLE, clobbers, \ 1057 PARA_SITE(PARA_PATCH(PARAVIRT_irq_disable), clobbers, \
547 pushl %ecx; pushl %edx; \ 1058 pushl %eax; pushl %ecx; pushl %edx; \
548 call *paravirt_ops+PARAVIRT_irq_disable; \ 1059 call *%cs:paravirt_ops+PARAVIRT_irq_disable; \
549 popl %edx; popl %ecx) \ 1060 popl %edx; popl %ecx; popl %eax) \
550 1061
551#define ENABLE_INTERRUPTS(clobbers) \ 1062#define ENABLE_INTERRUPTS(clobbers) \
552 PARA_PATCH(PARAVIRT_IRQ_ENABLE, clobbers, \ 1063 PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable), clobbers, \
553 pushl %ecx; pushl %edx; \ 1064 pushl %eax; pushl %ecx; pushl %edx; \
554 call *%cs:paravirt_ops+PARAVIRT_irq_enable; \ 1065 call *%cs:paravirt_ops+PARAVIRT_irq_enable; \
555 popl %edx; popl %ecx) 1066 popl %edx; popl %ecx; popl %eax)
556 1067
557#define ENABLE_INTERRUPTS_SYSEXIT \ 1068#define ENABLE_INTERRUPTS_SYSEXIT \
558 PARA_PATCH(PARAVIRT_STI_SYSEXIT, CLBR_ANY, \ 1069 PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable_sysexit), CLBR_NONE, \
559 jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit) 1070 jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit)
560 1071
561#define GET_CR0_INTO_EAX \ 1072#define GET_CR0_INTO_EAX \
562 call *paravirt_ops+PARAVIRT_read_cr0 1073 push %ecx; push %edx; \
1074 call *paravirt_ops+PARAVIRT_read_cr0; \
1075 pop %edx; pop %ecx
563 1076
564#endif /* __ASSEMBLY__ */ 1077#endif /* __ASSEMBLY__ */
565#endif /* CONFIG_PARAVIRT */ 1078#endif /* CONFIG_PARAVIRT */
diff --git a/include/asm-i386/pda.h b/include/asm-i386/pda.h
deleted file mode 100644
index b12d59a318b7..000000000000
--- a/include/asm-i386/pda.h
+++ /dev/null
@@ -1,100 +0,0 @@
1/*
2 Per-processor Data Areas
3 Jeremy Fitzhardinge <jeremy@goop.org> 2006
4 Based on asm-x86_64/pda.h by Andi Kleen.
5 */
6#ifndef _I386_PDA_H
7#define _I386_PDA_H
8
9#include <linux/stddef.h>
10#include <linux/types.h>
11
12struct i386_pda
13{
14 struct i386_pda *_pda; /* pointer to self */
15
16 int cpu_number;
17 struct task_struct *pcurrent; /* current process */
18 struct pt_regs *irq_regs;
19};
20
21extern struct i386_pda *_cpu_pda[];
22
23#define cpu_pda(i) (_cpu_pda[i])
24
25#define pda_offset(field) offsetof(struct i386_pda, field)
26
27extern void __bad_pda_field(void);
28
29/* This variable is never instantiated. It is only used as a stand-in
30 for the real per-cpu PDA memory, so that gcc can understand what
31 memory operations the inline asms() below are performing. This
32 eliminates the need to make the asms volatile or have memory
33 clobbers, so gcc can readily analyse them. */
34extern struct i386_pda _proxy_pda;
35
36#define pda_to_op(op,field,val) \
37 do { \
38 typedef typeof(_proxy_pda.field) T__; \
39 if (0) { T__ tmp__; tmp__ = (val); } \
40 switch (sizeof(_proxy_pda.field)) { \
41 case 1: \
42 asm(op "b %1,%%fs:%c2" \
43 : "+m" (_proxy_pda.field) \
44 :"ri" ((T__)val), \
45 "i"(pda_offset(field))); \
46 break; \
47 case 2: \
48 asm(op "w %1,%%fs:%c2" \
49 : "+m" (_proxy_pda.field) \
50 :"ri" ((T__)val), \
51 "i"(pda_offset(field))); \
52 break; \
53 case 4: \
54 asm(op "l %1,%%fs:%c2" \
55 : "+m" (_proxy_pda.field) \
56 :"ri" ((T__)val), \
57 "i"(pda_offset(field))); \
58 break; \
59 default: __bad_pda_field(); \
60 } \
61 } while (0)
62
63#define pda_from_op(op,field) \
64 ({ \
65 typeof(_proxy_pda.field) ret__; \
66 switch (sizeof(_proxy_pda.field)) { \
67 case 1: \
68 asm(op "b %%fs:%c1,%0" \
69 : "=r" (ret__) \
70 : "i" (pda_offset(field)), \
71 "m" (_proxy_pda.field)); \
72 break; \
73 case 2: \
74 asm(op "w %%fs:%c1,%0" \
75 : "=r" (ret__) \
76 : "i" (pda_offset(field)), \
77 "m" (_proxy_pda.field)); \
78 break; \
79 case 4: \
80 asm(op "l %%fs:%c1,%0" \
81 : "=r" (ret__) \
82 : "i" (pda_offset(field)), \
83 "m" (_proxy_pda.field)); \
84 break; \
85 default: __bad_pda_field(); \
86 } \
87 ret__; })
88
89/* Return a pointer to a pda field */
90#define pda_addr(field) \
91 ((typeof(_proxy_pda.field) *)((unsigned char *)read_pda(_pda) + \
92 pda_offset(field)))
93
94#define read_pda(field) pda_from_op("mov",field)
95#define write_pda(field,val) pda_to_op("mov",field,val)
96#define add_pda(field,val) pda_to_op("add",field,val)
97#define sub_pda(field,val) pda_to_op("sub",field,val)
98#define or_pda(field,val) pda_to_op("or",field,val)
99
100#endif /* _I386_PDA_H */
diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h
index 510ae1d3486c..f54830b5d5ac 100644
--- a/include/asm-i386/percpu.h
+++ b/include/asm-i386/percpu.h
@@ -1,9 +1,32 @@
1#ifndef __ARCH_I386_PERCPU__ 1#ifndef __ARCH_I386_PERCPU__
2#define __ARCH_I386_PERCPU__ 2#define __ARCH_I386_PERCPU__
3 3
4#ifndef __ASSEMBLY__ 4#ifdef __ASSEMBLY__
5#include <asm-generic/percpu.h> 5
6#else 6/*
7 * PER_CPU finds an address of a per-cpu variable.
8 *
9 * Args:
10 * var - variable name
11 * reg - 32bit register
12 *
13 * The resulting address is stored in the "reg" argument.
14 *
15 * Example:
16 * PER_CPU(cpu_gdt_descr, %ebx)
17 */
18#ifdef CONFIG_SMP
19#define PER_CPU(var, reg) \
20 movl %fs:per_cpu__##this_cpu_off, reg; \
21 lea per_cpu__##var(reg), reg
22#define PER_CPU_VAR(var) %fs:per_cpu__##var
23#else /* ! SMP */
24#define PER_CPU(var, reg) \
25 movl $per_cpu__##var, reg
26#define PER_CPU_VAR(var) per_cpu__##var
27#endif /* SMP */
28
29#else /* ...!ASSEMBLY */
7 30
8/* 31/*
9 * PER_CPU finds an address of a per-cpu variable. 32 * PER_CPU finds an address of a per-cpu variable.
@@ -18,14 +41,109 @@
18 * PER_CPU(cpu_gdt_descr, %ebx) 41 * PER_CPU(cpu_gdt_descr, %ebx)
19 */ 42 */
20#ifdef CONFIG_SMP 43#ifdef CONFIG_SMP
21#define PER_CPU(var, cpu) \ 44/* Same as generic implementation except for optimized local access. */
22 movl __per_cpu_offset(,cpu,4), cpu; \ 45#define __GENERIC_PER_CPU
23 addl $per_cpu__/**/var, cpu; 46
24#else /* ! SMP */ 47/* This is used for other cpus to find our section. */
25#define PER_CPU(var, cpu) \ 48extern unsigned long __per_cpu_offset[];
26 movl $per_cpu__/**/var, cpu; 49
50#define per_cpu_offset(x) (__per_cpu_offset[x])
51
52/* Separate out the type, so (int[3], foo) works. */
53#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
54#define DEFINE_PER_CPU(type, name) \
55 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
56
57/* We can use this directly for local CPU (faster). */
58DECLARE_PER_CPU(unsigned long, this_cpu_off);
59
60/* var is in discarded region: offset to particular copy we want */
61#define per_cpu(var, cpu) (*({ \
62 extern int simple_indentifier_##var(void); \
63 RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); }))
64
65#define __raw_get_cpu_var(var) (*({ \
66 extern int simple_indentifier_##var(void); \
67 RELOC_HIDE(&per_cpu__##var, x86_read_percpu(this_cpu_off)); \
68}))
69
70#define __get_cpu_var(var) __raw_get_cpu_var(var)
71
72/* A macro to avoid #include hell... */
73#define percpu_modcopy(pcpudst, src, size) \
74do { \
75 unsigned int __i; \
76 for_each_possible_cpu(__i) \
77 memcpy((pcpudst)+__per_cpu_offset[__i], \
78 (src), (size)); \
79} while (0)
80
81#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
82#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
83
84/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
85#define __percpu_seg "%%fs:"
86#else /* !SMP */
87#include <asm-generic/percpu.h>
88#define __percpu_seg ""
27#endif /* SMP */ 89#endif /* SMP */
28 90
91/* For arch-specific code, we can use direct single-insn ops (they
92 * don't give an lvalue though). */
93extern void __bad_percpu_size(void);
94
95#define percpu_to_op(op,var,val) \
96 do { \
97 typedef typeof(var) T__; \
98 if (0) { T__ tmp__; tmp__ = (val); } \
99 switch (sizeof(var)) { \
100 case 1: \
101 asm(op "b %1,"__percpu_seg"%0" \
102 : "+m" (var) \
103 :"ri" ((T__)val)); \
104 break; \
105 case 2: \
106 asm(op "w %1,"__percpu_seg"%0" \
107 : "+m" (var) \
108 :"ri" ((T__)val)); \
109 break; \
110 case 4: \
111 asm(op "l %1,"__percpu_seg"%0" \
112 : "+m" (var) \
113 :"ri" ((T__)val)); \
114 break; \
115 default: __bad_percpu_size(); \
116 } \
117 } while (0)
118
119#define percpu_from_op(op,var) \
120 ({ \
121 typeof(var) ret__; \
122 switch (sizeof(var)) { \
123 case 1: \
124 asm(op "b "__percpu_seg"%1,%0" \
125 : "=r" (ret__) \
126 : "m" (var)); \
127 break; \
128 case 2: \
129 asm(op "w "__percpu_seg"%1,%0" \
130 : "=r" (ret__) \
131 : "m" (var)); \
132 break; \
133 case 4: \
134 asm(op "l "__percpu_seg"%1,%0" \
135 : "=r" (ret__) \
136 : "m" (var)); \
137 break; \
138 default: __bad_percpu_size(); \
139 } \
140 ret__; })
141
142#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
143#define x86_write_percpu(var,val) percpu_to_op("mov", per_cpu__##var, val)
144#define x86_add_percpu(var,val) percpu_to_op("add", per_cpu__##var, val)
145#define x86_sub_percpu(var,val) percpu_to_op("sub", per_cpu__##var, val)
146#define x86_or_percpu(var,val) percpu_to_op("or", per_cpu__##var, val)
29#endif /* !__ASSEMBLY__ */ 147#endif /* !__ASSEMBLY__ */
30 148
31#endif /* __ARCH_I386_PERCPU__ */ 149#endif /* __ARCH_I386_PERCPU__ */
diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h
index c8dc2d0141a7..47430175b75f 100644
--- a/include/asm-i386/pgalloc.h
+++ b/include/asm-i386/pgalloc.h
@@ -1,7 +1,6 @@
1#ifndef _I386_PGALLOC_H 1#ifndef _I386_PGALLOC_H
2#define _I386_PGALLOC_H 2#define _I386_PGALLOC_H
3 3
4#include <asm/fixmap.h>
5#include <linux/threads.h> 4#include <linux/threads.h>
6#include <linux/mm.h> /* for struct page */ 5#include <linux/mm.h> /* for struct page */
7 6
diff --git a/include/asm-i386/pgtable-2level-defs.h b/include/asm-i386/pgtable-2level-defs.h
index 02518079f816..0f71c9f13da4 100644
--- a/include/asm-i386/pgtable-2level-defs.h
+++ b/include/asm-i386/pgtable-2level-defs.h
@@ -1,6 +1,8 @@
1#ifndef _I386_PGTABLE_2LEVEL_DEFS_H 1#ifndef _I386_PGTABLE_2LEVEL_DEFS_H
2#define _I386_PGTABLE_2LEVEL_DEFS_H 2#define _I386_PGTABLE_2LEVEL_DEFS_H
3 3
4#define SHARED_KERNEL_PMD 0
5
4/* 6/*
5 * traditional i386 two-level paging structure: 7 * traditional i386 two-level paging structure:
6 */ 8 */
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
index 38c3fcc0676d..a50fd1773de8 100644
--- a/include/asm-i386/pgtable-2level.h
+++ b/include/asm-i386/pgtable-2level.h
@@ -11,10 +11,23 @@
11 * within a page table are directly modified. Thus, the following 11 * within a page table are directly modified. Thus, the following
12 * hook is made available. 12 * hook is made available.
13 */ 13 */
14static inline void native_set_pte(pte_t *ptep , pte_t pte)
15{
16 *ptep = pte;
17}
18static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
19 pte_t *ptep , pte_t pte)
20{
21 native_set_pte(ptep, pte);
22}
23static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
24{
25 *pmdp = pmd;
26}
14#ifndef CONFIG_PARAVIRT 27#ifndef CONFIG_PARAVIRT
15#define set_pte(pteptr, pteval) (*(pteptr) = pteval) 28#define set_pte(pteptr, pteval) native_set_pte(pteptr, pteval)
16#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 29#define set_pte_at(mm,addr,ptep,pteval) native_set_pte_at(mm, addr, ptep, pteval)
17#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) 30#define set_pmd(pmdptr, pmdval) native_set_pmd(pmdptr, pmdval)
18#endif 31#endif
19 32
20#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) 33#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
@@ -23,11 +36,23 @@
23#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) 36#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
24#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) 37#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
25 38
26#define raw_ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte_low, 0)) 39static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp)
40{
41 *xp = __pte(0);
42}
43
44#ifdef CONFIG_SMP
45static inline pte_t native_ptep_get_and_clear(pte_t *xp)
46{
47 return __pte(xchg(&xp->pte_low, 0));
48}
49#else
50#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
51#endif
27 52
28#define pte_page(x) pfn_to_page(pte_pfn(x)) 53#define pte_page(x) pfn_to_page(pte_pfn(x))
29#define pte_none(x) (!(x).pte_low) 54#define pte_none(x) (!(x).pte_low)
30#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) 55#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
31#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) 56#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
32#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) 57#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
33 58
@@ -66,6 +91,4 @@ static inline int pte_exec_kernel(pte_t pte)
66#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) 91#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
67#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 92#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
68 93
69void vmalloc_sync_all(void);
70
71#endif /* _I386_PGTABLE_2LEVEL_H */ 94#endif /* _I386_PGTABLE_2LEVEL_H */
diff --git a/include/asm-i386/pgtable-3level-defs.h b/include/asm-i386/pgtable-3level-defs.h
index eb3a1ea88671..c0df89f66e8b 100644
--- a/include/asm-i386/pgtable-3level-defs.h
+++ b/include/asm-i386/pgtable-3level-defs.h
@@ -1,6 +1,12 @@
1#ifndef _I386_PGTABLE_3LEVEL_DEFS_H 1#ifndef _I386_PGTABLE_3LEVEL_DEFS_H
2#define _I386_PGTABLE_3LEVEL_DEFS_H 2#define _I386_PGTABLE_3LEVEL_DEFS_H
3 3
4#ifdef CONFIG_PARAVIRT
5#define SHARED_KERNEL_PMD (paravirt_ops.shared_kernel_pmd)
6#else
7#define SHARED_KERNEL_PMD 1
8#endif
9
4/* 10/*
5 * PGDIR_SHIFT determines what a top-level page table entry can map 11 * PGDIR_SHIFT determines what a top-level page table entry can map
6 */ 12 */
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index 7a2318f38303..eb0f1d7e96a1 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -42,20 +42,23 @@ static inline int pte_exec_kernel(pte_t pte)
42 return pte_x(pte); 42 return pte_x(pte);
43} 43}
44 44
45#ifndef CONFIG_PARAVIRT
46/* Rules for using set_pte: the pte being assigned *must* be 45/* Rules for using set_pte: the pte being assigned *must* be
47 * either not present or in a state where the hardware will 46 * either not present or in a state where the hardware will
48 * not attempt to update the pte. In places where this is 47 * not attempt to update the pte. In places where this is
49 * not possible, use pte_get_and_clear to obtain the old pte 48 * not possible, use pte_get_and_clear to obtain the old pte
50 * value and then use set_pte to update it. -ben 49 * value and then use set_pte to update it. -ben
51 */ 50 */
52static inline void set_pte(pte_t *ptep, pte_t pte) 51static inline void native_set_pte(pte_t *ptep, pte_t pte)
53{ 52{
54 ptep->pte_high = pte.pte_high; 53 ptep->pte_high = pte.pte_high;
55 smp_wmb(); 54 smp_wmb();
56 ptep->pte_low = pte.pte_low; 55 ptep->pte_low = pte.pte_low;
57} 56}
58#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 57static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
58 pte_t *ptep , pte_t pte)
59{
60 native_set_pte(ptep, pte);
61}
59 62
60/* 63/*
61 * Since this is only called on user PTEs, and the page fault handler 64 * Since this is only called on user PTEs, and the page fault handler
@@ -63,7 +66,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
63 * we are justified in merely clearing the PTE present bit, followed 66 * we are justified in merely clearing the PTE present bit, followed
64 * by a set. The ordering here is important. 67 * by a set. The ordering here is important.
65 */ 68 */
66static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) 69static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr,
70 pte_t *ptep, pte_t pte)
67{ 71{
68 ptep->pte_low = 0; 72 ptep->pte_low = 0;
69 smp_wmb(); 73 smp_wmb();
@@ -72,32 +76,48 @@ static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte
72 ptep->pte_low = pte.pte_low; 76 ptep->pte_low = pte.pte_low;
73} 77}
74 78
75#define set_pte_atomic(pteptr,pteval) \ 79static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
76 set_64bit((unsigned long long *)(pteptr),pte_val(pteval)) 80{
77#define set_pmd(pmdptr,pmdval) \ 81 set_64bit((unsigned long long *)(ptep),native_pte_val(pte));
78 set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval)) 82}
79#define set_pud(pudptr,pudval) \ 83static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
80 (*(pudptr) = (pudval)) 84{
85 set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd));
86}
87static inline void native_set_pud(pud_t *pudp, pud_t pud)
88{
89 *pudp = pud;
90}
81 91
82/* 92/*
83 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table 93 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
84 * entry, so clear the bottom half first and enforce ordering with a compiler 94 * entry, so clear the bottom half first and enforce ordering with a compiler
85 * barrier. 95 * barrier.
86 */ 96 */
87static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 97static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
88{ 98{
89 ptep->pte_low = 0; 99 ptep->pte_low = 0;
90 smp_wmb(); 100 smp_wmb();
91 ptep->pte_high = 0; 101 ptep->pte_high = 0;
92} 102}
93 103
94static inline void pmd_clear(pmd_t *pmd) 104static inline void native_pmd_clear(pmd_t *pmd)
95{ 105{
96 u32 *tmp = (u32 *)pmd; 106 u32 *tmp = (u32 *)pmd;
97 *tmp = 0; 107 *tmp = 0;
98 smp_wmb(); 108 smp_wmb();
99 *(tmp + 1) = 0; 109 *(tmp + 1) = 0;
100} 110}
111
112#ifndef CONFIG_PARAVIRT
113#define set_pte(ptep, pte) native_set_pte(ptep, pte)
114#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
115#define set_pte_present(mm, addr, ptep, pte) native_set_pte_present(mm, addr, ptep, pte)
116#define set_pte_atomic(ptep, pte) native_set_pte_atomic(ptep, pte)
117#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
118#define set_pud(pudp, pud) native_set_pud(pudp, pud)
119#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
120#define pmd_clear(pmd) native_pmd_clear(pmd)
101#endif 121#endif
102 122
103/* 123/*
@@ -119,7 +139,8 @@ static inline void pud_clear (pud_t * pud) { }
119#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ 139#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
120 pmd_index(address)) 140 pmd_index(address))
121 141
122static inline pte_t raw_ptep_get_and_clear(pte_t *ptep) 142#ifdef CONFIG_SMP
143static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
123{ 144{
124 pte_t res; 145 pte_t res;
125 146
@@ -130,6 +151,9 @@ static inline pte_t raw_ptep_get_and_clear(pte_t *ptep)
130 151
131 return res; 152 return res;
132} 153}
154#else
155#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
156#endif
133 157
134#define __HAVE_ARCH_PTE_SAME 158#define __HAVE_ARCH_PTE_SAME
135static inline int pte_same(pte_t a, pte_t b) 159static inline int pte_same(pte_t a, pte_t b)
@@ -146,28 +170,21 @@ static inline int pte_none(pte_t pte)
146 170
147static inline unsigned long pte_pfn(pte_t pte) 171static inline unsigned long pte_pfn(pte_t pte)
148{ 172{
149 return (pte.pte_low >> PAGE_SHIFT) | 173 return pte_val(pte) >> PAGE_SHIFT;
150 (pte.pte_high << (32 - PAGE_SHIFT));
151} 174}
152 175
153extern unsigned long long __supported_pte_mask; 176extern unsigned long long __supported_pte_mask;
154 177
155static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) 178static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
156{ 179{
157 pte_t pte; 180 return __pte((((unsigned long long)page_nr << PAGE_SHIFT) |
158 181 pgprot_val(pgprot)) & __supported_pte_mask);
159 pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
160 (pgprot_val(pgprot) >> 32);
161 pte.pte_high &= (__supported_pte_mask >> 32);
162 pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
163 __supported_pte_mask;
164 return pte;
165} 182}
166 183
167static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) 184static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
168{ 185{
169 return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | \ 186 return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) |
170 pgprot_val(pgprot)) & __supported_pte_mask); 187 pgprot_val(pgprot)) & __supported_pte_mask);
171} 188}
172 189
173/* 190/*
@@ -187,6 +204,4 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
187 204
188#define __pmd_free_tlb(tlb, x) do { } while (0) 205#define __pmd_free_tlb(tlb, x) do { } while (0)
189 206
190#define vmalloc_sync_all() ((void)0)
191
192#endif /* _I386_PGTABLE_3LEVEL_H */ 207#endif /* _I386_PGTABLE_3LEVEL_H */
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index c3b58d473a55..c6b8b944120c 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -159,6 +159,7 @@ void paging_init(void);
159 159
160extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; 160extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
161#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) 161#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
162#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
162#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD) 163#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
163#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) 164#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
164#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) 165#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
@@ -166,6 +167,7 @@ extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
166#define PAGE_KERNEL __pgprot(__PAGE_KERNEL) 167#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
167#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) 168#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
168#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) 169#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
170#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
169#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) 171#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
170#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) 172#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
171#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) 173#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
@@ -241,6 +243,8 @@ static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; re
241static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } 243static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
242static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; } 244static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; }
243 245
246extern void vmalloc_sync_all(void);
247
244#ifdef CONFIG_X86_PAE 248#ifdef CONFIG_X86_PAE
245# include <asm/pgtable-3level.h> 249# include <asm/pgtable-3level.h>
246#else 250#else
@@ -263,9 +267,18 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
263 */ 267 */
264#define pte_update(mm, addr, ptep) do { } while (0) 268#define pte_update(mm, addr, ptep) do { } while (0)
265#define pte_update_defer(mm, addr, ptep) do { } while (0) 269#define pte_update_defer(mm, addr, ptep) do { } while (0)
266#define paravirt_map_pt_hook(slot, va, pfn) do { } while (0)
267#endif 270#endif
268 271
272/* local pte updates need not use xchg for locking */
273static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
274{
275 pte_t res = *ptep;
276
277 /* Pure native function needs no input for mm, addr */
278 native_pte_clear(NULL, 0, ptep);
279 return res;
280}
281
269/* 282/*
270 * We only update the dirty/accessed state if we set 283 * We only update the dirty/accessed state if we set
271 * the dirty bit by hand in the kernel, since the hardware 284 * the dirty bit by hand in the kernel, since the hardware
@@ -330,7 +343,7 @@ do { \
330#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 343#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
331static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 344static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
332{ 345{
333 pte_t pte = raw_ptep_get_and_clear(ptep); 346 pte_t pte = native_ptep_get_and_clear(ptep);
334 pte_update(mm, addr, ptep); 347 pte_update(mm, addr, ptep);
335 return pte; 348 return pte;
336} 349}
@@ -340,8 +353,11 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
340{ 353{
341 pte_t pte; 354 pte_t pte;
342 if (full) { 355 if (full) {
343 pte = *ptep; 356 /*
344 pte_clear(mm, addr, ptep); 357 * Full address destruction in progress; paravirt does not
358 * care about updates and native needs no locking
359 */
360 pte = native_local_ptep_get_and_clear(ptep);
345 } else { 361 } else {
346 pte = ptep_get_and_clear(mm, addr, ptep); 362 pte = ptep_get_and_clear(mm, addr, ptep);
347 } 363 }
@@ -470,24 +486,10 @@ extern pte_t *lookup_address(unsigned long address);
470#endif 486#endif
471 487
472#if defined(CONFIG_HIGHPTE) 488#if defined(CONFIG_HIGHPTE)
473#define pte_offset_map(dir, address) \ 489#define pte_offset_map(dir, address) \
474({ \ 490 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
475 pte_t *__ptep; \ 491#define pte_offset_map_nested(dir, address) \
476 unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \ 492 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
477 __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE0);\
478 paravirt_map_pt_hook(KM_PTE0,__ptep, pfn); \
479 __ptep = __ptep + pte_index(address); \
480 __ptep; \
481})
482#define pte_offset_map_nested(dir, address) \
483({ \
484 pte_t *__ptep; \
485 unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
486 __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE1);\
487 paravirt_map_pt_hook(KM_PTE1,__ptep, pfn); \
488 __ptep = __ptep + pte_index(address); \
489 __ptep; \
490})
491#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) 493#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
492#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) 494#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
493#else 495#else
@@ -510,6 +512,22 @@ do { \
510 * tables contain all the necessary information. 512 * tables contain all the necessary information.
511 */ 513 */
512#define update_mmu_cache(vma,address,pte) do { } while (0) 514#define update_mmu_cache(vma,address,pte) do { } while (0)
515
516void native_pagetable_setup_start(pgd_t *base);
517void native_pagetable_setup_done(pgd_t *base);
518
519#ifndef CONFIG_PARAVIRT
520static inline void paravirt_pagetable_setup_start(pgd_t *base)
521{
522 native_pagetable_setup_start(base);
523}
524
525static inline void paravirt_pagetable_setup_done(pgd_t *base)
526{
527 native_pagetable_setup_done(base);
528}
529#endif /* !CONFIG_PARAVIRT */
530
513#endif /* !__ASSEMBLY__ */ 531#endif /* !__ASSEMBLY__ */
514 532
515#ifdef CONFIG_FLATMEM 533#ifdef CONFIG_FLATMEM
diff --git a/include/asm-i386/processor-flags.h b/include/asm-i386/processor-flags.h
new file mode 100644
index 000000000000..5404e90edd57
--- /dev/null
+++ b/include/asm-i386/processor-flags.h
@@ -0,0 +1,91 @@
1#ifndef __ASM_I386_PROCESSOR_FLAGS_H
2#define __ASM_I386_PROCESSOR_FLAGS_H
3/* Various flags defined: can be included from assembler. */
4
5/*
6 * EFLAGS bits
7 */
8#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
9#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
10#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
11#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
12#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
13#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
14#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
15#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
16#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
17#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
18#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
19#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
20#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
21#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
22#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
23#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
24#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
25
26/*
27 * Basic CPU control in CR0
28 */
29#define X86_CR0_PE 0x00000001 /* Protection Enable */
30#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor */
31#define X86_CR0_EM 0x00000004 /* Emulation */
32#define X86_CR0_TS 0x00000008 /* Task Switched */
33#define X86_CR0_ET 0x00000010 /* Extension Type */
34#define X86_CR0_NE 0x00000020 /* Numeric Error */
35#define X86_CR0_WP 0x00010000 /* Write Protect */
36#define X86_CR0_AM 0x00040000 /* Alignment Mask */
37#define X86_CR0_NW 0x20000000 /* Not Write-through */
38#define X86_CR0_CD 0x40000000 /* Cache Disable */
39#define X86_CR0_PG 0x80000000 /* Paging */
40
41/*
42 * Paging options in CR3
43 */
44#define X86_CR3_PWT 0x00000008 /* Page Write Through */
45#define X86_CR3_PCD 0x00000010 /* Page Cache Disable */
46
47/*
48 * Intel CPU features in CR4
49 */
50#define X86_CR4_VME 0x00000001 /* enable vm86 extensions */
51#define X86_CR4_PVI 0x00000002 /* virtual interrupts flag enable */
52#define X86_CR4_TSD 0x00000004 /* disable time stamp at ipl 3 */
53#define X86_CR4_DE 0x00000008 /* enable debugging extensions */
54#define X86_CR4_PSE 0x00000010 /* enable page size extensions */
55#define X86_CR4_PAE 0x00000020 /* enable physical address extensions */
56#define X86_CR4_MCE 0x00000040 /* Machine check enable */
57#define X86_CR4_PGE 0x00000080 /* enable global pages */
58#define X86_CR4_PCE 0x00000100 /* enable performance counters at ipl 3 */
59#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */
60#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
61#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
62
63/*
64 * x86-64 Task Priority Register, CR8
65 */
66#define X86_CR8_TPR 0x00000007 /* task priority register */
67
68/*
69 * AMD and Transmeta use MSRs for configuration; see <asm/msr-index.h>
70 */
71
72/*
73 * NSC/Cyrix CPU configuration register indexes
74 */
75#define CX86_PCR0 0x20
76#define CX86_GCR 0xb8
77#define CX86_CCR0 0xc0
78#define CX86_CCR1 0xc1
79#define CX86_CCR2 0xc2
80#define CX86_CCR3 0xc3
81#define CX86_CCR4 0xe8
82#define CX86_CCR5 0xe9
83#define CX86_CCR6 0xea
84#define CX86_CCR7 0xeb
85#define CX86_PCR1 0xf0
86#define CX86_DIR0 0xfe
87#define CX86_DIR1 0xff
88#define CX86_ARR_BASE 0xc4
89#define CX86_RCR_BASE 0xdc
90
91#endif /* __ASM_I386_PROCESSOR_FLAGS_H */
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 11bf899de8aa..70f3515c3db0 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -21,6 +21,7 @@
21#include <asm/percpu.h> 21#include <asm/percpu.h>
22#include <linux/cpumask.h> 22#include <linux/cpumask.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <asm/processor-flags.h>
24 25
25/* flag for disabling the tsc */ 26/* flag for disabling the tsc */
26extern int tsc_disable; 27extern int tsc_disable;
@@ -115,7 +116,8 @@ extern char ignore_fpu_irq;
115 116
116void __init cpu_detect(struct cpuinfo_x86 *c); 117void __init cpu_detect(struct cpuinfo_x86 *c);
117 118
118extern void identify_cpu(struct cpuinfo_x86 *); 119extern void identify_boot_cpu(void);
120extern void identify_secondary_cpu(struct cpuinfo_x86 *);
119extern void print_cpu_info(struct cpuinfo_x86 *); 121extern void print_cpu_info(struct cpuinfo_x86 *);
120extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 122extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
121extern unsigned short num_cache_leaves; 123extern unsigned short num_cache_leaves;
@@ -126,28 +128,7 @@ extern void detect_ht(struct cpuinfo_x86 *c);
126static inline void detect_ht(struct cpuinfo_x86 *c) {} 128static inline void detect_ht(struct cpuinfo_x86 *c) {}
127#endif 129#endif
128 130
129/* 131static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
130 * EFLAGS bits
131 */
132#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
133#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
134#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
135#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
136#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
137#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
138#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
139#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
140#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
141#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
142#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
143#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
144#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
145#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
146#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
147#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
148#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
149
150static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx,
151 unsigned int *ecx, unsigned int *edx) 132 unsigned int *ecx, unsigned int *edx)
152{ 133{
153 /* ecx is often an input as well as an output. */ 134 /* ecx is often an input as well as an output. */
@@ -162,21 +143,6 @@ static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx,
162#define load_cr3(pgdir) write_cr3(__pa(pgdir)) 143#define load_cr3(pgdir) write_cr3(__pa(pgdir))
163 144
164/* 145/*
165 * Intel CPU features in CR4
166 */
167#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
168#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
169#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
170#define X86_CR4_DE 0x0008 /* enable debugging extensions */
171#define X86_CR4_PSE 0x0010 /* enable page size extensions */
172#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
173#define X86_CR4_MCE 0x0040 /* Machine check enable */
174#define X86_CR4_PGE 0x0080 /* enable global pages */
175#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
176#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
177#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
178
179/*
180 * Save the cr4 feature set we're using (ie 146 * Save the cr4 feature set we're using (ie
181 * Pentium 4MB enable and PPro Global page 147 * Pentium 4MB enable and PPro Global page
182 * enable), so that any CPU's that boot up 148 * enable), so that any CPU's that boot up
@@ -203,26 +169,6 @@ static inline void clear_in_cr4 (unsigned long mask)
203} 169}
204 170
205/* 171/*
206 * NSC/Cyrix CPU configuration register indexes
207 */
208
209#define CX86_PCR0 0x20
210#define CX86_GCR 0xb8
211#define CX86_CCR0 0xc0
212#define CX86_CCR1 0xc1
213#define CX86_CCR2 0xc2
214#define CX86_CCR3 0xc3
215#define CX86_CCR4 0xe8
216#define CX86_CCR5 0xe9
217#define CX86_CCR6 0xea
218#define CX86_CCR7 0xeb
219#define CX86_PCR1 0xf0
220#define CX86_DIR0 0xfe
221#define CX86_DIR1 0xff
222#define CX86_ARR_BASE 0xc4
223#define CX86_RCR_BASE 0xdc
224
225/*
226 * NSC/Cyrix CPU indexed register access macros 172 * NSC/Cyrix CPU indexed register access macros
227 */ 173 */
228 174
@@ -345,7 +291,8 @@ typedef struct {
345 291
346struct thread_struct; 292struct thread_struct;
347 293
348struct tss_struct { 294/* This is the TSS defined by the hardware. */
295struct i386_hw_tss {
349 unsigned short back_link,__blh; 296 unsigned short back_link,__blh;
350 unsigned long esp0; 297 unsigned long esp0;
351 unsigned short ss0,__ss0h; 298 unsigned short ss0,__ss0h;
@@ -369,6 +316,11 @@ struct tss_struct {
369 unsigned short gs, __gsh; 316 unsigned short gs, __gsh;
370 unsigned short ldt, __ldth; 317 unsigned short ldt, __ldth;
371 unsigned short trace, io_bitmap_base; 318 unsigned short trace, io_bitmap_base;
319} __attribute__((packed));
320
321struct tss_struct {
322 struct i386_hw_tss x86_tss;
323
372 /* 324 /*
373 * The extra 1 is there because the CPU will access an 325 * The extra 1 is there because the CPU will access an
374 * additional byte beyond the end of the IO permission 326 * additional byte beyond the end of the IO permission
@@ -421,10 +373,11 @@ struct thread_struct {
421}; 373};
422 374
423#define INIT_THREAD { \ 375#define INIT_THREAD { \
376 .esp0 = sizeof(init_stack) + (long)&init_stack, \
424 .vm86_info = NULL, \ 377 .vm86_info = NULL, \
425 .sysenter_cs = __KERNEL_CS, \ 378 .sysenter_cs = __KERNEL_CS, \
426 .io_bitmap_ptr = NULL, \ 379 .io_bitmap_ptr = NULL, \
427 .fs = __KERNEL_PDA, \ 380 .fs = __KERNEL_PERCPU, \
428} 381}
429 382
430/* 383/*
@@ -434,10 +387,12 @@ struct thread_struct {
434 * be within the limit. 387 * be within the limit.
435 */ 388 */
436#define INIT_TSS { \ 389#define INIT_TSS { \
437 .esp0 = sizeof(init_stack) + (long)&init_stack, \ 390 .x86_tss = { \
438 .ss0 = __KERNEL_DS, \ 391 .esp0 = sizeof(init_stack) + (long)&init_stack, \
439 .ss1 = __KERNEL_CS, \ 392 .ss0 = __KERNEL_DS, \
440 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ 393 .ss1 = __KERNEL_CS, \
394 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
395 }, \
441 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ 396 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
442} 397}
443 398
@@ -544,40 +499,70 @@ static inline void rep_nop(void)
544 499
545#define cpu_relax() rep_nop() 500#define cpu_relax() rep_nop()
546 501
547#ifdef CONFIG_PARAVIRT 502static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread)
548#include <asm/paravirt.h>
549#else
550#define paravirt_enabled() 0
551#define __cpuid native_cpuid
552
553static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
554{ 503{
555 tss->esp0 = thread->esp0; 504 tss->x86_tss.esp0 = thread->esp0;
556 /* This can only happen when SEP is enabled, no need to test "SEP"arately */ 505 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
557 if (unlikely(tss->ss1 != thread->sysenter_cs)) { 506 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
558 tss->ss1 = thread->sysenter_cs; 507 tss->x86_tss.ss1 = thread->sysenter_cs;
559 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); 508 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
560 } 509 }
561} 510}
562 511
563/*
564 * These special macros can be used to get or set a debugging register
565 */
566#define get_debugreg(var, register) \
567 __asm__("movl %%db" #register ", %0" \
568 :"=r" (var))
569#define set_debugreg(value, register) \
570 __asm__("movl %0,%%db" #register \
571 : /* no output */ \
572 :"r" (value))
573 512
574#define set_iopl_mask native_set_iopl_mask 513static inline unsigned long native_get_debugreg(int regno)
575#endif /* CONFIG_PARAVIRT */ 514{
515 unsigned long val = 0; /* Damn you, gcc! */
516
517 switch (regno) {
518 case 0:
519 asm("movl %%db0, %0" :"=r" (val)); break;
520 case 1:
521 asm("movl %%db1, %0" :"=r" (val)); break;
522 case 2:
523 asm("movl %%db2, %0" :"=r" (val)); break;
524 case 3:
525 asm("movl %%db3, %0" :"=r" (val)); break;
526 case 6:
527 asm("movl %%db6, %0" :"=r" (val)); break;
528 case 7:
529 asm("movl %%db7, %0" :"=r" (val)); break;
530 default:
531 BUG();
532 }
533 return val;
534}
535
536static inline void native_set_debugreg(int regno, unsigned long value)
537{
538 switch (regno) {
539 case 0:
540 asm("movl %0,%%db0" : /* no output */ :"r" (value));
541 break;
542 case 1:
543 asm("movl %0,%%db1" : /* no output */ :"r" (value));
544 break;
545 case 2:
546 asm("movl %0,%%db2" : /* no output */ :"r" (value));
547 break;
548 case 3:
549 asm("movl %0,%%db3" : /* no output */ :"r" (value));
550 break;
551 case 6:
552 asm("movl %0,%%db6" : /* no output */ :"r" (value));
553 break;
554 case 7:
555 asm("movl %0,%%db7" : /* no output */ :"r" (value));
556 break;
557 default:
558 BUG();
559 }
560}
576 561
577/* 562/*
578 * Set IOPL bits in EFLAGS from given mask 563 * Set IOPL bits in EFLAGS from given mask
579 */ 564 */
580static fastcall inline void native_set_iopl_mask(unsigned mask) 565static inline void native_set_iopl_mask(unsigned mask)
581{ 566{
582 unsigned int reg; 567 unsigned int reg;
583 __asm__ __volatile__ ("pushfl;" 568 __asm__ __volatile__ ("pushfl;"
@@ -590,6 +575,28 @@ static fastcall inline void native_set_iopl_mask(unsigned mask)
590 : "i" (~X86_EFLAGS_IOPL), "r" (mask)); 575 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
591} 576}
592 577
578#ifdef CONFIG_PARAVIRT
579#include <asm/paravirt.h>
580#else
581#define paravirt_enabled() 0
582#define __cpuid native_cpuid
583
584static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
585{
586 native_load_esp0(tss, thread);
587}
588
589/*
590 * These special macros can be used to get or set a debugging register
591 */
592#define get_debugreg(var, register) \
593 (var) = native_get_debugreg(register)
594#define set_debugreg(value, register) \
595 native_set_debugreg(register, value)
596
597#define set_iopl_mask native_set_iopl_mask
598#endif /* CONFIG_PARAVIRT */
599
593/* 600/*
594 * Generic CPUID function 601 * Generic CPUID function
595 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx 602 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
@@ -742,8 +749,10 @@ extern unsigned long boot_option_idle_override;
742extern void enable_sep_cpu(void); 749extern void enable_sep_cpu(void);
743extern int sysenter_setup(void); 750extern int sysenter_setup(void);
744 751
745extern int init_gdt(int cpu, struct task_struct *idle);
746extern void cpu_set_gdt(int); 752extern void cpu_set_gdt(int);
747extern void secondary_cpu_init(void); 753extern void switch_to_new_gdt(void);
754extern void cpu_init(void);
755
756extern int force_mwait;
748 757
749#endif /* __ASM_I386_PROCESSOR_H */ 758#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/include/asm-i386/reboot.h b/include/asm-i386/reboot.h
new file mode 100644
index 000000000000..e9e3ffc22c07
--- /dev/null
+++ b/include/asm-i386/reboot.h
@@ -0,0 +1,20 @@
1#ifndef _ASM_REBOOT_H
2#define _ASM_REBOOT_H
3
4struct pt_regs;
5
6struct machine_ops
7{
8 void (*restart)(char *cmd);
9 void (*halt)(void);
10 void (*power_off)(void);
11 void (*shutdown)(void);
12 void (*crash_shutdown)(struct pt_regs *);
13 void (*emergency_restart)(void);
14};
15
16extern struct machine_ops machine_ops;
17
18void machine_real_restart(unsigned char *code, int length);
19
20#endif /* _ASM_REBOOT_H */
diff --git a/include/linux/reboot_fixups.h b/include/asm-i386/reboot_fixups.h
index 480ea2d489d8..0cb7d87c2b68 100644
--- a/include/linux/reboot_fixups.h
+++ b/include/asm-i386/reboot_fixups.h
@@ -1,10 +1,6 @@
1#ifndef _LINUX_REBOOT_FIXUPS_H 1#ifndef _LINUX_REBOOT_FIXUPS_H
2#define _LINUX_REBOOT_FIXUPS_H 2#define _LINUX_REBOOT_FIXUPS_H
3 3
4#ifdef CONFIG_X86_REBOOTFIXUPS
5extern void mach_reboot_fixups(void); 4extern void mach_reboot_fixups(void);
6#else
7#define mach_reboot_fixups() ((void)(0))
8#endif
9 5
10#endif /* _LINUX_REBOOT_FIXUPS_H */ 6#endif /* _LINUX_REBOOT_FIXUPS_H */
diff --git a/include/asm-i386/required-features.h b/include/asm-i386/required-features.h
new file mode 100644
index 000000000000..9db866c1e64c
--- /dev/null
+++ b/include/asm-i386/required-features.h
@@ -0,0 +1,34 @@
1#ifndef _ASM_REQUIRED_FEATURES_H
2#define _ASM_REQUIRED_FEATURES_H 1
3
4/* Define minimum CPUID feature set for kernel These bits are checked
5 really early to actually display a visible error message before the
6 kernel dies. Only add word 0 bits here
7
8 Some requirements that are not in CPUID yet are also in the
9 CONFIG_X86_MINIMUM_CPU mode which is checked too.
10
11 The real information is in arch/i386/Kconfig.cpu, this just converts
12 the CONFIGs into a bitmask */
13
14#ifdef CONFIG_X86_PAE
15#define NEED_PAE (1<<X86_FEATURE_PAE)
16#else
17#define NEED_PAE 0
18#endif
19
20#ifdef CONFIG_X86_CMOV
21#define NEED_CMOV (1<<X86_FEATURE_CMOV)
22#else
23#define NEED_CMOV 0
24#endif
25
26#ifdef CONFIG_X86_CMPXCHG64
27#define NEED_CMPXCHG64 (1<<X86_FEATURE_CX8)
28#else
29#define NEED_CMPXCHG64 0
30#endif
31
32#define REQUIRED_MASK1 (NEED_PAE|NEED_CMOV|NEED_CMPXCHG64)
33
34#endif
diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h
index 065f10bfa487..597a47c2515f 100644
--- a/include/asm-i386/segment.h
+++ b/include/asm-i386/segment.h
@@ -39,7 +39,7 @@
39 * 25 - APM BIOS support 39 * 25 - APM BIOS support
40 * 40 *
41 * 26 - ESPFIX small SS 41 * 26 - ESPFIX small SS
42 * 27 - PDA [ per-cpu private data area ] 42 * 27 - per-cpu [ offset to per-cpu data area ]
43 * 28 - unused 43 * 28 - unused
44 * 29 - unused 44 * 29 - unused
45 * 30 - unused 45 * 30 - unused
@@ -74,8 +74,12 @@
74#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) 74#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
75#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) 75#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
76 76
77#define GDT_ENTRY_PDA (GDT_ENTRY_KERNEL_BASE + 15) 77#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
78#define __KERNEL_PDA (GDT_ENTRY_PDA * 8) 78#ifdef CONFIG_SMP
79#define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
80#else
81#define __KERNEL_PERCPU 0
82#endif
79 83
80#define GDT_ENTRY_DOUBLEFAULT_TSS 31 84#define GDT_ENTRY_DOUBLEFAULT_TSS 31
81 85
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 6bf0033a301c..090abc1da32a 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -8,19 +8,15 @@
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/threads.h> 9#include <linux/threads.h>
10#include <linux/cpumask.h> 10#include <linux/cpumask.h>
11#include <asm/pda.h>
12#endif 11#endif
13 12
14#ifdef CONFIG_X86_LOCAL_APIC 13#if defined(CONFIG_X86_LOCAL_APIC) && !defined(__ASSEMBLY__)
15#ifndef __ASSEMBLY__
16#include <asm/fixmap.h>
17#include <asm/bitops.h> 14#include <asm/bitops.h>
18#include <asm/mpspec.h> 15#include <asm/mpspec.h>
16#include <asm/apic.h>
19#ifdef CONFIG_X86_IO_APIC 17#ifdef CONFIG_X86_IO_APIC
20#include <asm/io_apic.h> 18#include <asm/io_apic.h>
21#endif 19#endif
22#include <asm/apic.h>
23#endif
24#endif 20#endif
25 21
26#define BAD_APICID 0xFFu 22#define BAD_APICID 0xFFu
@@ -52,6 +48,59 @@ extern void cpu_exit_clear(void);
52extern void cpu_uninit(void); 48extern void cpu_uninit(void);
53#endif 49#endif
54 50
51struct smp_ops
52{
53 void (*smp_prepare_boot_cpu)(void);
54 void (*smp_prepare_cpus)(unsigned max_cpus);
55 int (*cpu_up)(unsigned cpu);
56 void (*smp_cpus_done)(unsigned max_cpus);
57
58 void (*smp_send_stop)(void);
59 void (*smp_send_reschedule)(int cpu);
60 int (*smp_call_function_mask)(cpumask_t mask,
61 void (*func)(void *info), void *info,
62 int wait);
63};
64
65extern struct smp_ops smp_ops;
66
67static inline void smp_prepare_boot_cpu(void)
68{
69 smp_ops.smp_prepare_boot_cpu();
70}
71static inline void smp_prepare_cpus(unsigned int max_cpus)
72{
73 smp_ops.smp_prepare_cpus(max_cpus);
74}
75static inline int __cpu_up(unsigned int cpu)
76{
77 return smp_ops.cpu_up(cpu);
78}
79static inline void smp_cpus_done(unsigned int max_cpus)
80{
81 smp_ops.smp_cpus_done(max_cpus);
82}
83
84static inline void smp_send_stop(void)
85{
86 smp_ops.smp_send_stop();
87}
88static inline void smp_send_reschedule(int cpu)
89{
90 smp_ops.smp_send_reschedule(cpu);
91}
92static inline int smp_call_function_mask(cpumask_t mask,
93 void (*func) (void *info), void *info,
94 int wait)
95{
96 return smp_ops.smp_call_function_mask(mask, func, info, wait);
97}
98
99void native_smp_prepare_boot_cpu(void);
100void native_smp_prepare_cpus(unsigned int max_cpus);
101int native_cpu_up(unsigned int cpunum);
102void native_smp_cpus_done(unsigned int max_cpus);
103
55#ifndef CONFIG_PARAVIRT 104#ifndef CONFIG_PARAVIRT
56#define startup_ipi_hook(phys_apicid, start_eip, start_esp) \ 105#define startup_ipi_hook(phys_apicid, start_eip, start_esp) \
57do { } while (0) 106do { } while (0)
@@ -62,7 +111,8 @@ do { } while (0)
62 * from the initial startup. We map APIC_BASE very early in page_setup(), 111 * from the initial startup. We map APIC_BASE very early in page_setup(),
63 * so this is correct in the x86 case. 112 * so this is correct in the x86 case.
64 */ 113 */
65#define raw_smp_processor_id() (read_pda(cpu_number)) 114DECLARE_PER_CPU(int, cpu_number);
115#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
66 116
67extern cpumask_t cpu_callout_map; 117extern cpumask_t cpu_callout_map;
68extern cpumask_t cpu_callin_map; 118extern cpumask_t cpu_callin_map;
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index a6d20d9a1a30..c3a58c08c495 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -88,65 +88,96 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
88#define savesegment(seg, value) \ 88#define savesegment(seg, value) \
89 asm volatile("mov %%" #seg ",%0":"=rm" (value)) 89 asm volatile("mov %%" #seg ",%0":"=rm" (value))
90 90
91
92static inline void native_clts(void)
93{
94 asm volatile ("clts");
95}
96
97static inline unsigned long native_read_cr0(void)
98{
99 unsigned long val;
100 asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
101 return val;
102}
103
104static inline void native_write_cr0(unsigned long val)
105{
106 asm volatile("movl %0,%%cr0": :"r" (val));
107}
108
109static inline unsigned long native_read_cr2(void)
110{
111 unsigned long val;
112 asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
113 return val;
114}
115
116static inline void native_write_cr2(unsigned long val)
117{
118 asm volatile("movl %0,%%cr2": :"r" (val));
119}
120
121static inline unsigned long native_read_cr3(void)
122{
123 unsigned long val;
124 asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
125 return val;
126}
127
128static inline void native_write_cr3(unsigned long val)
129{
130 asm volatile("movl %0,%%cr3": :"r" (val));
131}
132
133static inline unsigned long native_read_cr4(void)
134{
135 unsigned long val;
136 asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
137 return val;
138}
139
140static inline unsigned long native_read_cr4_safe(void)
141{
142 unsigned long val;
143 /* This could fault if %cr4 does not exist */
144 asm("1: movl %%cr4, %0 \n"
145 "2: \n"
146 ".section __ex_table,\"a\" \n"
147 ".long 1b,2b \n"
148 ".previous \n"
149 : "=r" (val): "0" (0));
150 return val;
151}
152
153static inline void native_write_cr4(unsigned long val)
154{
155 asm volatile("movl %0,%%cr4": :"r" (val));
156}
157
158static inline void native_wbinvd(void)
159{
160 asm volatile("wbinvd": : :"memory");
161}
162
163
91#ifdef CONFIG_PARAVIRT 164#ifdef CONFIG_PARAVIRT
92#include <asm/paravirt.h> 165#include <asm/paravirt.h>
93#else 166#else
94#define read_cr0() ({ \ 167#define read_cr0() (native_read_cr0())
95 unsigned int __dummy; \ 168#define write_cr0(x) (native_write_cr0(x))
96 __asm__ __volatile__( \ 169#define read_cr2() (native_read_cr2())
97 "movl %%cr0,%0\n\t" \ 170#define write_cr2(x) (native_write_cr2(x))
98 :"=r" (__dummy)); \ 171#define read_cr3() (native_read_cr3())
99 __dummy; \ 172#define write_cr3(x) (native_write_cr3(x))
100}) 173#define read_cr4() (native_read_cr4())
101#define write_cr0(x) \ 174#define read_cr4_safe() (native_read_cr4_safe())
102 __asm__ __volatile__("movl %0,%%cr0": :"r" (x)) 175#define write_cr4(x) (native_write_cr4(x))
103 176#define wbinvd() (native_wbinvd())
104#define read_cr2() ({ \
105 unsigned int __dummy; \
106 __asm__ __volatile__( \
107 "movl %%cr2,%0\n\t" \
108 :"=r" (__dummy)); \
109 __dummy; \
110})
111#define write_cr2(x) \
112 __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
113
114#define read_cr3() ({ \
115 unsigned int __dummy; \
116 __asm__ ( \
117 "movl %%cr3,%0\n\t" \
118 :"=r" (__dummy)); \
119 __dummy; \
120})
121#define write_cr3(x) \
122 __asm__ __volatile__("movl %0,%%cr3": :"r" (x))
123
124#define read_cr4() ({ \
125 unsigned int __dummy; \
126 __asm__( \
127 "movl %%cr4,%0\n\t" \
128 :"=r" (__dummy)); \
129 __dummy; \
130})
131#define read_cr4_safe() ({ \
132 unsigned int __dummy; \
133 /* This could fault if %cr4 does not exist */ \
134 __asm__("1: movl %%cr4, %0 \n" \
135 "2: \n" \
136 ".section __ex_table,\"a\" \n" \
137 ".long 1b,2b \n" \
138 ".previous \n" \
139 : "=r" (__dummy): "0" (0)); \
140 __dummy; \
141})
142#define write_cr4(x) \
143 __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
144
145#define wbinvd() \
146 __asm__ __volatile__ ("wbinvd": : :"memory")
147 177
148/* Clear the 'TS' bit */ 178/* Clear the 'TS' bit */
149#define clts() __asm__ __volatile__ ("clts") 179#define clts() (native_clts())
180
150#endif/* CONFIG_PARAVIRT */ 181#endif/* CONFIG_PARAVIRT */
151 182
152/* Set the 'TS' bit */ 183/* Set the 'TS' bit */
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h
index 12dd67bf760f..153770e25faa 100644
--- a/include/asm-i386/timer.h
+++ b/include/asm-i386/timer.h
@@ -9,8 +9,6 @@ void setup_pit_timer(void);
9unsigned long long native_sched_clock(void); 9unsigned long long native_sched_clock(void);
10unsigned long native_calculate_cpu_khz(void); 10unsigned long native_calculate_cpu_khz(void);
11 11
12/* Modifiers for buggy PIT handling */
13extern int pit_latch_buggy;
14extern int timer_ack; 12extern int timer_ack;
15extern int no_timer_check; 13extern int no_timer_check;
16extern int no_sync_cmos_clock; 14extern int no_sync_cmos_clock;
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h
index 4dd82840d53b..db7f77eacfa0 100644
--- a/include/asm-i386/tlbflush.h
+++ b/include/asm-i386/tlbflush.h
@@ -79,11 +79,15 @@
79 * - flush_tlb_range(vma, start, end) flushes a range of pages 79 * - flush_tlb_range(vma, start, end) flushes a range of pages
80 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 80 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
81 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 81 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
82 * - flush_tlb_others(cpumask, mm, va) flushes a TLBs on other cpus
82 * 83 *
83 * ..but the i386 has somewhat limited tlb flushing capabilities, 84 * ..but the i386 has somewhat limited tlb flushing capabilities,
84 * and page-granular flushes are available only on i486 and up. 85 * and page-granular flushes are available only on i486 and up.
85 */ 86 */
86 87
88#define TLB_FLUSH_ALL 0xffffffff
89
90
87#ifndef CONFIG_SMP 91#ifndef CONFIG_SMP
88 92
89#define flush_tlb() __flush_tlb() 93#define flush_tlb() __flush_tlb()
@@ -110,7 +114,12 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
110 __flush_tlb(); 114 __flush_tlb();
111} 115}
112 116
113#else 117static inline void native_flush_tlb_others(const cpumask_t *cpumask,
118 struct mm_struct *mm, unsigned long va)
119{
120}
121
122#else /* SMP */
114 123
115#include <asm/smp.h> 124#include <asm/smp.h>
116 125
@@ -129,6 +138,9 @@ static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long st
129 flush_tlb_mm(vma->vm_mm); 138 flush_tlb_mm(vma->vm_mm);
130} 139}
131 140
141void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm,
142 unsigned long va);
143
132#define TLBSTATE_OK 1 144#define TLBSTATE_OK 1
133#define TLBSTATE_LAZY 2 145#define TLBSTATE_LAZY 2
134 146
@@ -139,8 +151,11 @@ struct tlb_state
139 char __cacheline_padding[L1_CACHE_BYTES-8]; 151 char __cacheline_padding[L1_CACHE_BYTES-8];
140}; 152};
141DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); 153DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
154#endif /* SMP */
142 155
143 156#ifndef CONFIG_PARAVIRT
157#define flush_tlb_others(mask, mm, va) \
158 native_flush_tlb_others(&mask, mm, va)
144#endif 159#endif
145 160
146#define flush_tlb_kernel_range(start, end) flush_tlb_all() 161#define flush_tlb_kernel_range(start, end) flush_tlb_all()
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h
index 84016ff481b9..3f3c1fa000b4 100644
--- a/include/asm-i386/tsc.h
+++ b/include/asm-i386/tsc.h
@@ -35,25 +35,30 @@ static inline cycles_t get_cycles(void)
35static __always_inline cycles_t get_cycles_sync(void) 35static __always_inline cycles_t get_cycles_sync(void)
36{ 36{
37 unsigned long long ret; 37 unsigned long long ret;
38#ifdef X86_FEATURE_SYNC_RDTSC
39 unsigned eax; 38 unsigned eax;
40 39
41 /* 40 /*
41 * Use RDTSCP if possible; it is guaranteed to be synchronous
42 * and doesn't cause a VMEXIT on Hypervisors
43 */
44 alternative_io(ASM_NOP3, ".byte 0x0f,0x01,0xf9", X86_FEATURE_RDTSCP,
45 "=A" (ret), "0" (0ULL) : "ecx", "memory");
46 if (ret)
47 return ret;
48
49 /*
42 * Don't do an additional sync on CPUs where we know 50 * Don't do an additional sync on CPUs where we know
43 * RDTSC is already synchronous: 51 * RDTSC is already synchronous:
44 */ 52 */
45 alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC, 53 alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC,
46 "=a" (eax), "0" (1) : "ebx","ecx","edx","memory"); 54 "=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
47#else
48 sync_core();
49#endif
50 rdtscll(ret); 55 rdtscll(ret);
51 56
52 return ret; 57 return ret;
53} 58}
54 59
55extern void tsc_init(void); 60extern void tsc_init(void);
56extern void mark_tsc_unstable(void); 61extern void mark_tsc_unstable(char *reason);
57extern int unsynchronized_tsc(void); 62extern int unsynchronized_tsc(void);
58extern void init_tsc_clocksource(void); 63extern void init_tsc_clocksource(void);
59 64
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 70829ae3ad52..e2aa5e0d0cc7 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -397,7 +397,19 @@ unsigned long __must_check __copy_from_user_ll_nocache(void *to,
397unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to, 397unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to,
398 const void __user *from, unsigned long n); 398 const void __user *from, unsigned long n);
399 399
400/* 400/**
401 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
402 * @to: Destination address, in user space.
403 * @from: Source address, in kernel space.
404 * @n: Number of bytes to copy.
405 *
406 * Context: User context only.
407 *
408 * Copy data from kernel space to user space. Caller must check
409 * the specified block with access_ok() before calling this function.
410 * The caller should also make sure he pins the user space address
411 * so that the we don't result in page fault and sleep.
412 *
401 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault 413 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
402 * we return the initial request size (1, 2 or 4), as copy_*_user should do. 414 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
403 * If a store crosses a page boundary and gets a fault, the x86 will not write 415 * If a store crosses a page boundary and gets a fault, the x86 will not write
diff --git a/include/asm-i386/vmi_time.h b/include/asm-i386/vmi_time.h
index c3a1fcf66c96..213930b995cb 100644
--- a/include/asm-i386/vmi_time.h
+++ b/include/asm-i386/vmi_time.h
@@ -53,22 +53,8 @@ extern unsigned long long vmi_get_sched_cycles(void);
53extern unsigned long vmi_cpu_khz(void); 53extern unsigned long vmi_cpu_khz(void);
54 54
55#ifdef CONFIG_X86_LOCAL_APIC 55#ifdef CONFIG_X86_LOCAL_APIC
56extern void __init vmi_timer_setup_boot_alarm(void); 56extern void __devinit vmi_time_bsp_init(void);
57extern void __devinit vmi_timer_setup_secondary_alarm(void); 57extern void __devinit vmi_time_ap_init(void);
58extern void apic_vmi_timer_interrupt(void);
59#endif
60
61#ifdef CONFIG_NO_IDLE_HZ
62extern int vmi_stop_hz_timer(void);
63extern void vmi_account_time_restart_hz_timer(void);
64#else
65static inline int vmi_stop_hz_timer(void)
66{
67 return 0;
68}
69static inline void vmi_account_time_restart_hz_timer(void)
70{
71}
72#endif 58#endif
73 59
74/* 60/*
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h
index b5c65081a3aa..cef2400983fa 100644
--- a/include/asm-ia64/mmu_context.h
+++ b/include/asm-ia64/mmu_context.h
@@ -29,6 +29,7 @@
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30 30
31#include <asm/processor.h> 31#include <asm/processor.h>
32#include <asm-generic/mm_hooks.h>
32 33
33struct ia64_ctx { 34struct ia64_ctx {
34 spinlock_t lock; 35 spinlock_t lock;
diff --git a/include/asm-m32r/mmu_context.h b/include/asm-m32r/mmu_context.h
index 1f40d4a0acf1..91909e5dd9d0 100644
--- a/include/asm-m32r/mmu_context.h
+++ b/include/asm-m32r/mmu_context.h
@@ -15,6 +15,7 @@
15#include <asm/pgalloc.h> 15#include <asm/pgalloc.h>
16#include <asm/mmu.h> 16#include <asm/mmu.h>
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
18#include <asm-generic/mm_hooks.h>
18 19
19/* 20/*
20 * Cache of MMU context last used. 21 * Cache of MMU context last used.
diff --git a/include/asm-m68k/mmu_context.h b/include/asm-m68k/mmu_context.h
index 231d11bd8e32..894dacbcee14 100644
--- a/include/asm-m68k/mmu_context.h
+++ b/include/asm-m68k/mmu_context.h
@@ -1,6 +1,7 @@
1#ifndef __M68K_MMU_CONTEXT_H 1#ifndef __M68K_MMU_CONTEXT_H
2#define __M68K_MMU_CONTEXT_H 2#define __M68K_MMU_CONTEXT_H
3 3
4#include <asm-generic/mm_hooks.h>
4 5
5static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 6static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
6{ 7{
diff --git a/include/asm-m68knommu/mmu_context.h b/include/asm-m68knommu/mmu_context.h
index 6c077d3a2572..9ccee4278c97 100644
--- a/include/asm-m68knommu/mmu_context.h
+++ b/include/asm-m68knommu/mmu_context.h
@@ -4,6 +4,7 @@
4#include <asm/setup.h> 4#include <asm/setup.h>
5#include <asm/page.h> 5#include <asm/page.h>
6#include <asm/pgalloc.h> 6#include <asm/pgalloc.h>
7#include <asm-generic/mm_hooks.h>
7 8
8static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 9static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9{ 10{
diff --git a/include/asm-mips/mmu_context.h b/include/asm-mips/mmu_context.h
index fe065d6070ca..65024ffd7879 100644
--- a/include/asm-mips/mmu_context.h
+++ b/include/asm-mips/mmu_context.h
@@ -20,6 +20,7 @@
20#include <asm/mipsmtregs.h> 20#include <asm/mipsmtregs.h>
21#include <asm/smtc.h> 21#include <asm/smtc.h>
22#endif /* SMTC */ 22#endif /* SMTC */
23#include <asm-generic/mm_hooks.h>
23 24
24/* 25/*
25 * For the fast tlb miss handlers, we keep a per cpu array of pointers 26 * For the fast tlb miss handlers, we keep a per cpu array of pointers
diff --git a/include/asm-parisc/mmu_context.h b/include/asm-parisc/mmu_context.h
index 9c05836239a2..bad690298f0c 100644
--- a/include/asm-parisc/mmu_context.h
+++ b/include/asm-parisc/mmu_context.h
@@ -5,6 +5,7 @@
5#include <asm/atomic.h> 5#include <asm/atomic.h>
6#include <asm/pgalloc.h> 6#include <asm/pgalloc.h>
7#include <asm/pgtable.h> 7#include <asm/pgtable.h>
8#include <asm-generic/mm_hooks.h>
8 9
9static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 10static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10{ 11{
diff --git a/include/asm-powerpc/mmu_context.h b/include/asm-powerpc/mmu_context.h
index 083ac917bd29..c0d7795e3d25 100644
--- a/include/asm-powerpc/mmu_context.h
+++ b/include/asm-powerpc/mmu_context.h
@@ -10,6 +10,7 @@
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <asm/mmu.h> 11#include <asm/mmu.h>
12#include <asm/cputable.h> 12#include <asm/cputable.h>
13#include <asm-generic/mm_hooks.h>
13 14
14/* 15/*
15 * Copyright (C) 2001 PPC 64 Team, IBM Corp 16 * Copyright (C) 2001 PPC 64 Team, IBM Corp
diff --git a/include/asm-ppc/mmu_context.h b/include/asm-ppc/mmu_context.h
index 2bc8589cc451..a6441a063e5d 100644
--- a/include/asm-ppc/mmu_context.h
+++ b/include/asm-ppc/mmu_context.h
@@ -6,6 +6,7 @@
6#include <asm/bitops.h> 6#include <asm/bitops.h>
7#include <asm/mmu.h> 7#include <asm/mmu.h>
8#include <asm/cputable.h> 8#include <asm/cputable.h>
9#include <asm-generic/mm_hooks.h>
9 10
10/* 11/*
11 * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs 12 * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index 1d21da220d49..501cb9b06314 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -10,6 +10,8 @@
10#define __S390_MMU_CONTEXT_H 10#define __S390_MMU_CONTEXT_H
11 11
12#include <asm/pgalloc.h> 12#include <asm/pgalloc.h>
13#include <asm-generic/mm_hooks.h>
14
13/* 15/*
14 * get a new mmu context.. S390 don't know about contexts. 16 * get a new mmu context.. S390 don't know about contexts.
15 */ 17 */
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h
index 342024425b7d..01acaaae9751 100644
--- a/include/asm-sh/mmu_context.h
+++ b/include/asm-sh/mmu_context.h
@@ -12,6 +12,7 @@
12#include <asm/tlbflush.h> 12#include <asm/tlbflush.h>
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14#include <asm/io.h> 14#include <asm/io.h>
15#include <asm-generic/mm_hooks.h>
15 16
16/* 17/*
17 * The MMU "context" consists of two things: 18 * The MMU "context" consists of two things:
diff --git a/include/asm-sh64/mmu_context.h b/include/asm-sh64/mmu_context.h
index 8c860dab2d0e..507bf72bb8e1 100644
--- a/include/asm-sh64/mmu_context.h
+++ b/include/asm-sh64/mmu_context.h
@@ -27,7 +27,7 @@
27extern unsigned long mmu_context_cache; 27extern unsigned long mmu_context_cache;
28 28
29#include <asm/page.h> 29#include <asm/page.h>
30 30#include <asm-generic/mm_hooks.h>
31 31
32/* Current mm's pgd */ 32/* Current mm's pgd */
33extern pgd_t *mmu_pdtp_cache; 33extern pgd_t *mmu_pdtp_cache;
diff --git a/include/asm-sparc/mmu_context.h b/include/asm-sparc/mmu_context.h
index ed1e01d04d21..671a997b9e69 100644
--- a/include/asm-sparc/mmu_context.h
+++ b/include/asm-sparc/mmu_context.h
@@ -5,6 +5,8 @@
5 5
6#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
7 7
8#include <asm-generic/mm_hooks.h>
9
8static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 10static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9{ 11{
10} 12}
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 2337eb487719..8d129032013e 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -9,6 +9,7 @@
9#include <linux/spinlock.h> 9#include <linux/spinlock.h>
10#include <asm/system.h> 10#include <asm/system.h>
11#include <asm/spitfire.h> 11#include <asm/spitfire.h>
12#include <asm-generic/mm_hooks.h>
12 13
13static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 14static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
14{ 15{
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
index 0d3df76aa47f..ced8cbde046d 100644
--- a/include/asm-sparc64/percpu.h
+++ b/include/asm-sparc64/percpu.h
@@ -5,16 +5,6 @@
5 5
6#ifdef CONFIG_SMP 6#ifdef CONFIG_SMP
7 7
8#ifdef CONFIG_MODULES
9# define PERCPU_MODULE_RESERVE 8192
10#else
11# define PERCPU_MODULE_RESERVE 0
12#endif
13
14#define PERCPU_ENOUGH_ROOM \
15 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
16 PERCPU_MODULE_RESERVE)
17
18extern void setup_per_cpu_areas(void); 8extern void setup_per_cpu_areas(void);
19 9
20extern unsigned long __per_cpu_base; 10extern unsigned long __per_cpu_base;
diff --git a/include/asm-um/mmu_context.h b/include/asm-um/mmu_context.h
index f709c784bf12..9aa4b44e8cc1 100644
--- a/include/asm-um/mmu_context.h
+++ b/include/asm-um/mmu_context.h
@@ -6,6 +6,8 @@
6#ifndef __UM_MMU_CONTEXT_H 6#ifndef __UM_MMU_CONTEXT_H
7#define __UM_MMU_CONTEXT_H 7#define __UM_MMU_CONTEXT_H
8 8
9#include <asm-generic/mm_hooks.h>
10
9#include "linux/sched.h" 11#include "linux/sched.h"
10#include "choose-mode.h" 12#include "choose-mode.h"
11#include "um_mmu.h" 13#include "um_mmu.h"
diff --git a/include/asm-v850/mmu_context.h b/include/asm-v850/mmu_context.h
index f521c8050d3c..01daacd5474e 100644
--- a/include/asm-v850/mmu_context.h
+++ b/include/asm-v850/mmu_context.h
@@ -1,6 +1,8 @@
1#ifndef __V850_MMU_CONTEXT_H__ 1#ifndef __V850_MMU_CONTEXT_H__
2#define __V850_MMU_CONTEXT_H__ 2#define __V850_MMU_CONTEXT_H__
3 3
4#include <asm-generic/mm_hooks.h>
5
4#define destroy_context(mm) ((void)0) 6#define destroy_context(mm) ((void)0)
5#define init_new_context(tsk,mm) 0 7#define init_new_context(tsk,mm) 0
6#define switch_mm(prev,next,tsk) ((void)0) 8#define switch_mm(prev,next,tsk) ((void)0)
diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild
index ebd7117782a6..89ad1fc27c8b 100644
--- a/include/asm-x86_64/Kbuild
+++ b/include/asm-x86_64/Kbuild
@@ -8,7 +8,7 @@ header-y += boot.h
8header-y += bootsetup.h 8header-y += bootsetup.h
9header-y += debugreg.h 9header-y += debugreg.h
10header-y += ldt.h 10header-y += ldt.h
11header-y += msr.h 11header-y += msr-index.h
12header-y += prctl.h 12header-y += prctl.h
13header-y += ptrace-abi.h 13header-y += ptrace-abi.h
14header-y += sigcontext32.h 14header-y += sigcontext32.h
@@ -16,5 +16,7 @@ header-y += ucontext.h
16header-y += vsyscall32.h 16header-y += vsyscall32.h
17 17
18unifdef-y += mce.h 18unifdef-y += mce.h
19unifdef-y += msr.h
19unifdef-y += mtrr.h 20unifdef-y += mtrr.h
20unifdef-y += vsyscall.h 21unifdef-y += vsyscall.h
22unifdef-y += const.h
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h
index a6657b4f3e0e..a09fe85c268e 100644
--- a/include/asm-x86_64/alternative.h
+++ b/include/asm-x86_64/alternative.h
@@ -16,6 +16,7 @@ struct alt_instr {
16 u8 pad[5]; 16 u8 pad[5];
17}; 17};
18 18
19extern void alternative_instructions(void);
19extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); 20extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
20 21
21struct module; 22struct module;
@@ -141,8 +142,8 @@ void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end);
141static inline void 142static inline void
142apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end) 143apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
143{} 144{}
144#define __start_parainstructions NULL 145#define __parainstructions NULL
145#define __stop_parainstructions NULL 146#define __parainstructions_end NULL
146#endif 147#endif
147 148
148#endif /* _X86_64_ALTERNATIVE_H */ 149#endif /* _X86_64_ALTERNATIVE_H */
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
index 7cfb39cbd918..45e9fca1febc 100644
--- a/include/asm-x86_64/apic.h
+++ b/include/asm-x86_64/apic.h
@@ -2,6 +2,7 @@
2#define __ASM_APIC_H 2#define __ASM_APIC_H
3 3
4#include <linux/pm.h> 4#include <linux/pm.h>
5#include <linux/delay.h>
5#include <asm/fixmap.h> 6#include <asm/fixmap.h>
6#include <asm/apicdef.h> 7#include <asm/apicdef.h>
7#include <asm/system.h> 8#include <asm/system.h>
@@ -47,11 +48,8 @@ static __inline unsigned int apic_read(unsigned long reg)
47 return *((volatile unsigned int *)(APIC_BASE+reg)); 48 return *((volatile unsigned int *)(APIC_BASE+reg));
48} 49}
49 50
50static __inline__ void apic_wait_icr_idle(void) 51extern void apic_wait_icr_idle(void);
51{ 52extern unsigned int safe_apic_wait_icr_idle(void);
52 while (apic_read( APIC_ICR ) & APIC_ICR_BUSY)
53 cpu_relax();
54}
55 53
56static inline void ack_APIC_irq(void) 54static inline void ack_APIC_irq(void)
57{ 55{
@@ -83,7 +81,7 @@ extern void setup_secondary_APIC_clock (void);
83extern int APIC_init_uniprocessor (void); 81extern int APIC_init_uniprocessor (void);
84extern void disable_APIC_timer(void); 82extern void disable_APIC_timer(void);
85extern void enable_APIC_timer(void); 83extern void enable_APIC_timer(void);
86extern void clustered_apic_check(void); 84extern void setup_apic_routing(void);
87 85
88extern void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector, 86extern void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector,
89 unsigned char msg_type, unsigned char mask); 87 unsigned char msg_type, unsigned char mask);
diff --git a/include/asm-x86_64/bugs.h b/include/asm-x86_64/bugs.h
index d86c5dd689fa..b33dc04d8f42 100644
--- a/include/asm-x86_64/bugs.h
+++ b/include/asm-x86_64/bugs.h
@@ -1,28 +1,6 @@
1/* 1#ifndef _ASM_X86_64_BUGS_H
2 * include/asm-x86_64/bugs.h 2#define _ASM_X86_64_BUGS_H
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 * Copyright (C) 2000 SuSE
6 *
7 * This is included by init/main.c to check for architecture-dependent bugs.
8 *
9 * Needs:
10 * void check_bugs(void);
11 */
12 3
13#include <asm/processor.h> 4void check_bugs(void);
14#include <asm/i387.h>
15#include <asm/msr.h>
16#include <asm/pda.h>
17 5
18extern void alternative_instructions(void); 6#endif /* _ASM_X86_64_BUGS_H */
19
20static void __init check_bugs(void)
21{
22 identify_cpu(&boot_cpu_data);
23#if !defined(CONFIG_SMP)
24 printk("CPU: ");
25 print_cpu_info(&boot_cpu_data);
26#endif
27 alternative_instructions();
28}
diff --git a/include/asm-x86_64/const.h b/include/asm-x86_64/const.h
new file mode 100644
index 000000000000..54fb08f3db9b
--- /dev/null
+++ b/include/asm-x86_64/const.h
@@ -0,0 +1,20 @@
1/* const.h: Macros for dealing with constants. */
2
3#ifndef _X86_64_CONST_H
4#define _X86_64_CONST_H
5
6/* Some constant macros are used in both assembler and
7 * C code. Therefore we cannot annotate them always with
8 * 'UL' and other type specificers unilaterally. We
9 * use the following macros to deal with this.
10 */
11
12#ifdef __ASSEMBLY__
13#define _AC(X,Y) X
14#else
15#define __AC(X,Y) (X##Y)
16#define _AC(X,Y) __AC(X,Y)
17#endif
18
19
20#endif /* !(_X86_64_CONST_H) */
diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h
index 913d6ac00033..ac991b5ca0fd 100644
--- a/include/asm-x86_64/desc.h
+++ b/include/asm-x86_64/desc.h
@@ -107,16 +107,6 @@ static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
107 DESC_LDT, size * 8 - 1); 107 DESC_LDT, size * 8 - 1);
108} 108}
109 109
110static inline void set_seg_base(unsigned cpu, int entry, void *base)
111{
112 struct desc_struct *d = &cpu_gdt(cpu)[entry];
113 u32 addr = (u32)(u64)base;
114 BUG_ON((u64)base >> 32);
115 d->base0 = addr & 0xffff;
116 d->base1 = (addr >> 16) & 0xff;
117 d->base2 = (addr >> 24) & 0xff;
118}
119
120#define LDT_entry_a(info) \ 110#define LDT_entry_a(info) \
121 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) 111 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
122/* Don't allow setting of the lm bit. It is useless anyways because 112/* Don't allow setting of the lm bit. It is useless anyways because
@@ -145,16 +135,13 @@ static inline void set_seg_base(unsigned cpu, int entry, void *base)
145 (info)->useable == 0 && \ 135 (info)->useable == 0 && \
146 (info)->lm == 0) 136 (info)->lm == 0)
147 137
148#if TLS_SIZE != 24
149# error update this code.
150#endif
151
152static inline void load_TLS(struct thread_struct *t, unsigned int cpu) 138static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
153{ 139{
140 unsigned int i;
154 u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN); 141 u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
155 gdt[0] = t->tls_array[0]; 142
156 gdt[1] = t->tls_array[1]; 143 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
157 gdt[2] = t->tls_array[2]; 144 gdt[i] = t->tls_array[i];
158} 145}
159 146
160/* 147/*
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
index d2af227f06d0..6897e2a436e5 100644
--- a/include/asm-x86_64/dma-mapping.h
+++ b/include/asm-x86_64/dma-mapping.h
@@ -52,7 +52,7 @@ struct dma_mapping_ops {
52}; 52};
53 53
54extern dma_addr_t bad_dma_address; 54extern dma_addr_t bad_dma_address;
55extern struct dma_mapping_ops* dma_ops; 55extern const struct dma_mapping_ops* dma_ops;
56extern int iommu_merge; 56extern int iommu_merge;
57 57
58static inline int dma_mapping_error(dma_addr_t dma_addr) 58static inline int dma_mapping_error(dma_addr_t dma_addr)
diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h
index 1b620db5b9e3..e90e1677531b 100644
--- a/include/asm-x86_64/fixmap.h
+++ b/include/asm-x86_64/fixmap.h
@@ -15,7 +15,6 @@
15#include <asm/apicdef.h> 15#include <asm/apicdef.h>
16#include <asm/page.h> 16#include <asm/page.h>
17#include <asm/vsyscall.h> 17#include <asm/vsyscall.h>
18#include <asm/vsyscall32.h>
19 18
20/* 19/*
21 * Here we define all the compile-time 'special' virtual 20 * Here we define all the compile-time 'special' virtual
diff --git a/include/asm-x86_64/genapic.h b/include/asm-x86_64/genapic.h
index b80f4bb5f273..d7e516ccbaa4 100644
--- a/include/asm-x86_64/genapic.h
+++ b/include/asm-x86_64/genapic.h
@@ -29,7 +29,9 @@ struct genapic {
29 unsigned int (*phys_pkg_id)(int index_msb); 29 unsigned int (*phys_pkg_id)(int index_msb);
30}; 30};
31 31
32extern struct genapic *genapic;
32 33
33extern struct genapic *genapic, *genapic_force, apic_flat; 34extern struct genapic apic_flat;
35extern struct genapic apic_physflat;
34 36
35#endif 37#endif
diff --git a/include/asm-x86_64/ipi.h b/include/asm-x86_64/ipi.h
index 2a5c162b7d92..a7c75ea408a8 100644
--- a/include/asm-x86_64/ipi.h
+++ b/include/asm-x86_64/ipi.h
@@ -18,10 +18,8 @@
18 * Subject to the GNU Public License, v.2 18 * Subject to the GNU Public License, v.2
19 */ 19 */
20 20
21#include <asm/fixmap.h>
22#include <asm/hw_irq.h> 21#include <asm/hw_irq.h>
23#include <asm/apicdef.h> 22#include <asm/apic.h>
24#include <asm/genapic.h>
25 23
26/* 24/*
27 * the following functions deal with sending IPIs between CPUs. 25 * the following functions deal with sending IPIs between CPUs.
@@ -76,10 +74,42 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsign
76 apic_write(APIC_ICR, cfg); 74 apic_write(APIC_ICR, cfg);
77} 75}
78 76
77/*
78 * This is used to send an IPI with no shorthand notation (the destination is
79 * specified in bits 56 to 63 of the ICR).
80 */
81static inline void __send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
82{
83 unsigned long cfg;
84
85 /*
86 * Wait for idle.
87 */
88 if (unlikely(vector == NMI_VECTOR))
89 safe_apic_wait_icr_idle();
90 else
91 apic_wait_icr_idle();
92
93 /*
94 * prepare target chip field
95 */
96 cfg = __prepare_ICR2(mask);
97 apic_write(APIC_ICR2, cfg);
98
99 /*
100 * program the ICR
101 */
102 cfg = __prepare_ICR(0, vector, dest);
103
104 /*
105 * Send the IPI. The write to APIC_ICR fires this off.
106 */
107 apic_write(APIC_ICR, cfg);
108}
79 109
80static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) 110static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
81{ 111{
82 unsigned long cfg, flags; 112 unsigned long flags;
83 unsigned long query_cpu; 113 unsigned long query_cpu;
84 114
85 /* 115 /*
@@ -88,28 +118,9 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
88 * - mbligh 118 * - mbligh
89 */ 119 */
90 local_irq_save(flags); 120 local_irq_save(flags);
91
92 for_each_cpu_mask(query_cpu, mask) { 121 for_each_cpu_mask(query_cpu, mask) {
93 /* 122 __send_IPI_dest_field(x86_cpu_to_apicid[query_cpu],
94 * Wait for idle. 123 vector, APIC_DEST_PHYSICAL);
95 */
96 apic_wait_icr_idle();
97
98 /*
99 * prepare target chip field
100 */
101 cfg = __prepare_ICR2(x86_cpu_to_apicid[query_cpu]);
102 apic_write(APIC_ICR2, cfg);
103
104 /*
105 * program the ICR
106 */
107 cfg = __prepare_ICR(0, vector, APIC_DEST_PHYSICAL);
108
109 /*
110 * Send the IPI. The write to APIC_ICR fires this off.
111 */
112 apic_write(APIC_ICR, cfg);
113 } 124 }
114 local_irq_restore(flags); 125 local_irq_restore(flags);
115} 126}
diff --git a/include/asm-x86_64/irqflags.h b/include/asm-x86_64/irqflags.h
index cce6937e87c0..86e70fe23659 100644
--- a/include/asm-x86_64/irqflags.h
+++ b/include/asm-x86_64/irqflags.h
@@ -9,6 +9,7 @@
9 */ 9 */
10#ifndef _ASM_IRQFLAGS_H 10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H 11#define _ASM_IRQFLAGS_H
12#include <asm/processor-flags.h>
12 13
13#ifndef __ASSEMBLY__ 14#ifndef __ASSEMBLY__
14/* 15/*
@@ -53,19 +54,19 @@ static inline void raw_local_irq_disable(void)
53{ 54{
54 unsigned long flags = __raw_local_save_flags(); 55 unsigned long flags = __raw_local_save_flags();
55 56
56 raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); 57 raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
57} 58}
58 59
59static inline void raw_local_irq_enable(void) 60static inline void raw_local_irq_enable(void)
60{ 61{
61 unsigned long flags = __raw_local_save_flags(); 62 unsigned long flags = __raw_local_save_flags();
62 63
63 raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); 64 raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
64} 65}
65 66
66static inline int raw_irqs_disabled_flags(unsigned long flags) 67static inline int raw_irqs_disabled_flags(unsigned long flags)
67{ 68{
68 return !(flags & (1<<9)) || (flags & (1 << 18)); 69 return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC);
69} 70}
70 71
71#else /* CONFIG_X86_VSMP */ 72#else /* CONFIG_X86_VSMP */
@@ -82,7 +83,7 @@ static inline void raw_local_irq_enable(void)
82 83
83static inline int raw_irqs_disabled_flags(unsigned long flags) 84static inline int raw_irqs_disabled_flags(unsigned long flags)
84{ 85{
85 return !(flags & (1 << 9)); 86 return !(flags & X86_EFLAGS_IF);
86} 87}
87 88
88#endif 89#endif
diff --git a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h
index af03b9f852d6..0cce83a78378 100644
--- a/include/asm-x86_64/mmu_context.h
+++ b/include/asm-x86_64/mmu_context.h
@@ -7,6 +7,7 @@
7#include <asm/pda.h> 7#include <asm/pda.h>
8#include <asm/pgtable.h> 8#include <asm/pgtable.h>
9#include <asm/tlbflush.h> 9#include <asm/tlbflush.h>
10#include <asm-generic/mm_hooks.h>
10 11
11/* 12/*
12 * possibly do the LDT unload here? 13 * possibly do the LDT unload here?
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
index fb558fb1d211..19a89377b123 100644
--- a/include/asm-x86_64/mmzone.h
+++ b/include/asm-x86_64/mmzone.h
@@ -49,7 +49,7 @@ extern int pfn_valid(unsigned long pfn);
49 49
50#ifdef CONFIG_NUMA_EMU 50#ifdef CONFIG_NUMA_EMU
51#define FAKE_NODE_MIN_SIZE (64*1024*1024) 51#define FAKE_NODE_MIN_SIZE (64*1024*1024)
52#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1ul)) 52#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1uL))
53#endif 53#endif
54 54
55#endif 55#endif
diff --git a/include/asm-x86_64/msr-index.h b/include/asm-x86_64/msr-index.h
new file mode 100644
index 000000000000..d77a63f1ddf2
--- /dev/null
+++ b/include/asm-x86_64/msr-index.h
@@ -0,0 +1 @@
#include <asm-i386/msr-index.h>
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
index 902f9a58617e..a524f0325673 100644
--- a/include/asm-x86_64/msr.h
+++ b/include/asm-x86_64/msr.h
@@ -1,6 +1,8 @@
1#ifndef X86_64_MSR_H 1#ifndef X86_64_MSR_H
2#define X86_64_MSR_H 1 2#define X86_64_MSR_H 1
3 3
4#include <asm/msr-index.h>
5
4#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
5/* 7/*
6 * Access to machine-specific registers (available on 586 and better only) 8 * Access to machine-specific registers (available on 586 and better only)
@@ -157,9 +159,6 @@ static inline unsigned int cpuid_edx(unsigned int op)
157 return edx; 159 return edx;
158} 160}
159 161
160#define MSR_IA32_UCODE_WRITE 0x79
161#define MSR_IA32_UCODE_REV 0x8b
162
163#ifdef CONFIG_SMP 162#ifdef CONFIG_SMP
164void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 163void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
165void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 164void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
@@ -172,269 +171,6 @@ static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
172{ 171{
173 wrmsr(msr_no, l, h); 172 wrmsr(msr_no, l, h);
174} 173}
175#endif /* CONFIG_SMP */ 174#endif /* CONFIG_SMP */
176 175#endif /* __ASSEMBLY__ */
177#endif 176#endif /* X86_64_MSR_H */
178
179/* AMD/K8 specific MSRs */
180#define MSR_EFER 0xc0000080 /* extended feature register */
181#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
182#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
183#define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */
184#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
185#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
186#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
187#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow (or USER_GS from kernel) */
188/* EFER bits: */
189#define _EFER_SCE 0 /* SYSCALL/SYSRET */
190#define _EFER_LME 8 /* Long mode enable */
191#define _EFER_LMA 10 /* Long mode active (read-only) */
192#define _EFER_NX 11 /* No execute enable */
193
194#define EFER_SCE (1<<_EFER_SCE)
195#define EFER_LME (1<<_EFER_LME)
196#define EFER_LMA (1<<_EFER_LMA)
197#define EFER_NX (1<<_EFER_NX)
198
199/* Intel MSRs. Some also available on other CPUs */
200#define MSR_IA32_TSC 0x10
201#define MSR_IA32_PLATFORM_ID 0x17
202
203#define MSR_IA32_PERFCTR0 0xc1
204#define MSR_IA32_PERFCTR1 0xc2
205#define MSR_FSB_FREQ 0xcd
206
207#define MSR_MTRRcap 0x0fe
208#define MSR_IA32_BBL_CR_CTL 0x119
209
210#define MSR_IA32_SYSENTER_CS 0x174
211#define MSR_IA32_SYSENTER_ESP 0x175
212#define MSR_IA32_SYSENTER_EIP 0x176
213
214#define MSR_IA32_MCG_CAP 0x179
215#define MSR_IA32_MCG_STATUS 0x17a
216#define MSR_IA32_MCG_CTL 0x17b
217
218#define MSR_IA32_EVNTSEL0 0x186
219#define MSR_IA32_EVNTSEL1 0x187
220
221#define MSR_IA32_DEBUGCTLMSR 0x1d9
222#define MSR_IA32_LASTBRANCHFROMIP 0x1db
223#define MSR_IA32_LASTBRANCHTOIP 0x1dc
224#define MSR_IA32_LASTINTFROMIP 0x1dd
225#define MSR_IA32_LASTINTTOIP 0x1de
226
227#define MSR_IA32_PEBS_ENABLE 0x3f1
228#define MSR_IA32_DS_AREA 0x600
229#define MSR_IA32_PERF_CAPABILITIES 0x345
230
231#define MSR_MTRRfix64K_00000 0x250
232#define MSR_MTRRfix16K_80000 0x258
233#define MSR_MTRRfix16K_A0000 0x259
234#define MSR_MTRRfix4K_C0000 0x268
235#define MSR_MTRRfix4K_C8000 0x269
236#define MSR_MTRRfix4K_D0000 0x26a
237#define MSR_MTRRfix4K_D8000 0x26b
238#define MSR_MTRRfix4K_E0000 0x26c
239#define MSR_MTRRfix4K_E8000 0x26d
240#define MSR_MTRRfix4K_F0000 0x26e
241#define MSR_MTRRfix4K_F8000 0x26f
242#define MSR_MTRRdefType 0x2ff
243
244#define MSR_IA32_MC0_CTL 0x400
245#define MSR_IA32_MC0_STATUS 0x401
246#define MSR_IA32_MC0_ADDR 0x402
247#define MSR_IA32_MC0_MISC 0x403
248
249#define MSR_P6_PERFCTR0 0xc1
250#define MSR_P6_PERFCTR1 0xc2
251#define MSR_P6_EVNTSEL0 0x186
252#define MSR_P6_EVNTSEL1 0x187
253
254/* K7/K8 MSRs. Not complete. See the architecture manual for a more complete list. */
255#define MSR_K7_EVNTSEL0 0xC0010000
256#define MSR_K7_PERFCTR0 0xC0010004
257#define MSR_K7_EVNTSEL1 0xC0010001
258#define MSR_K7_PERFCTR1 0xC0010005
259#define MSR_K7_EVNTSEL2 0xC0010002
260#define MSR_K7_PERFCTR2 0xC0010006
261#define MSR_K7_EVNTSEL3 0xC0010003
262#define MSR_K7_PERFCTR3 0xC0010007
263#define MSR_K8_TOP_MEM1 0xC001001A
264#define MSR_K8_TOP_MEM2 0xC001001D
265#define MSR_K8_SYSCFG 0xC0010010
266#define MSR_K8_HWCR 0xC0010015
267
268/* K6 MSRs */
269#define MSR_K6_EFER 0xC0000080
270#define MSR_K6_STAR 0xC0000081
271#define MSR_K6_WHCR 0xC0000082
272#define MSR_K6_UWCCR 0xC0000085
273#define MSR_K6_PSOR 0xC0000087
274#define MSR_K6_PFIR 0xC0000088
275
276/* Centaur-Hauls/IDT defined MSRs. */
277#define MSR_IDT_FCR1 0x107
278#define MSR_IDT_FCR2 0x108
279#define MSR_IDT_FCR3 0x109
280#define MSR_IDT_FCR4 0x10a
281
282#define MSR_IDT_MCR0 0x110
283#define MSR_IDT_MCR1 0x111
284#define MSR_IDT_MCR2 0x112
285#define MSR_IDT_MCR3 0x113
286#define MSR_IDT_MCR4 0x114
287#define MSR_IDT_MCR5 0x115
288#define MSR_IDT_MCR6 0x116
289#define MSR_IDT_MCR7 0x117
290#define MSR_IDT_MCR_CTRL 0x120
291
292/* VIA Cyrix defined MSRs*/
293#define MSR_VIA_FCR 0x1107
294#define MSR_VIA_LONGHAUL 0x110a
295#define MSR_VIA_RNG 0x110b
296#define MSR_VIA_BCR2 0x1147
297
298/* Intel defined MSRs. */
299#define MSR_IA32_P5_MC_ADDR 0
300#define MSR_IA32_P5_MC_TYPE 1
301#define MSR_IA32_PLATFORM_ID 0x17
302#define MSR_IA32_EBL_CR_POWERON 0x2a
303
304#define MSR_IA32_APICBASE 0x1b
305#define MSR_IA32_APICBASE_BSP (1<<8)
306#define MSR_IA32_APICBASE_ENABLE (1<<11)
307#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
308
309/* P4/Xeon+ specific */
310#define MSR_IA32_MCG_EAX 0x180
311#define MSR_IA32_MCG_EBX 0x181
312#define MSR_IA32_MCG_ECX 0x182
313#define MSR_IA32_MCG_EDX 0x183
314#define MSR_IA32_MCG_ESI 0x184
315#define MSR_IA32_MCG_EDI 0x185
316#define MSR_IA32_MCG_EBP 0x186
317#define MSR_IA32_MCG_ESP 0x187
318#define MSR_IA32_MCG_EFLAGS 0x188
319#define MSR_IA32_MCG_EIP 0x189
320#define MSR_IA32_MCG_RESERVED 0x18A
321
322#define MSR_P6_EVNTSEL0 0x186
323#define MSR_P6_EVNTSEL1 0x187
324
325#define MSR_IA32_PERF_STATUS 0x198
326#define MSR_IA32_PERF_CTL 0x199
327
328#define MSR_IA32_MPERF 0xE7
329#define MSR_IA32_APERF 0xE8
330
331#define MSR_IA32_THERM_CONTROL 0x19a
332#define MSR_IA32_THERM_INTERRUPT 0x19b
333#define MSR_IA32_THERM_STATUS 0x19c
334#define MSR_IA32_MISC_ENABLE 0x1a0
335
336#define MSR_IA32_DEBUGCTLMSR 0x1d9
337#define MSR_IA32_LASTBRANCHFROMIP 0x1db
338#define MSR_IA32_LASTBRANCHTOIP 0x1dc
339#define MSR_IA32_LASTINTFROMIP 0x1dd
340#define MSR_IA32_LASTINTTOIP 0x1de
341
342#define MSR_IA32_MC0_CTL 0x400
343#define MSR_IA32_MC0_STATUS 0x401
344#define MSR_IA32_MC0_ADDR 0x402
345#define MSR_IA32_MC0_MISC 0x403
346
347/* Pentium IV performance counter MSRs */
348#define MSR_P4_BPU_PERFCTR0 0x300
349#define MSR_P4_BPU_PERFCTR1 0x301
350#define MSR_P4_BPU_PERFCTR2 0x302
351#define MSR_P4_BPU_PERFCTR3 0x303
352#define MSR_P4_MS_PERFCTR0 0x304
353#define MSR_P4_MS_PERFCTR1 0x305
354#define MSR_P4_MS_PERFCTR2 0x306
355#define MSR_P4_MS_PERFCTR3 0x307
356#define MSR_P4_FLAME_PERFCTR0 0x308
357#define MSR_P4_FLAME_PERFCTR1 0x309
358#define MSR_P4_FLAME_PERFCTR2 0x30a
359#define MSR_P4_FLAME_PERFCTR3 0x30b
360#define MSR_P4_IQ_PERFCTR0 0x30c
361#define MSR_P4_IQ_PERFCTR1 0x30d
362#define MSR_P4_IQ_PERFCTR2 0x30e
363#define MSR_P4_IQ_PERFCTR3 0x30f
364#define MSR_P4_IQ_PERFCTR4 0x310
365#define MSR_P4_IQ_PERFCTR5 0x311
366#define MSR_P4_BPU_CCCR0 0x360
367#define MSR_P4_BPU_CCCR1 0x361
368#define MSR_P4_BPU_CCCR2 0x362
369#define MSR_P4_BPU_CCCR3 0x363
370#define MSR_P4_MS_CCCR0 0x364
371#define MSR_P4_MS_CCCR1 0x365
372#define MSR_P4_MS_CCCR2 0x366
373#define MSR_P4_MS_CCCR3 0x367
374#define MSR_P4_FLAME_CCCR0 0x368
375#define MSR_P4_FLAME_CCCR1 0x369
376#define MSR_P4_FLAME_CCCR2 0x36a
377#define MSR_P4_FLAME_CCCR3 0x36b
378#define MSR_P4_IQ_CCCR0 0x36c
379#define MSR_P4_IQ_CCCR1 0x36d
380#define MSR_P4_IQ_CCCR2 0x36e
381#define MSR_P4_IQ_CCCR3 0x36f
382#define MSR_P4_IQ_CCCR4 0x370
383#define MSR_P4_IQ_CCCR5 0x371
384#define MSR_P4_ALF_ESCR0 0x3ca
385#define MSR_P4_ALF_ESCR1 0x3cb
386#define MSR_P4_BPU_ESCR0 0x3b2
387#define MSR_P4_BPU_ESCR1 0x3b3
388#define MSR_P4_BSU_ESCR0 0x3a0
389#define MSR_P4_BSU_ESCR1 0x3a1
390#define MSR_P4_CRU_ESCR0 0x3b8
391#define MSR_P4_CRU_ESCR1 0x3b9
392#define MSR_P4_CRU_ESCR2 0x3cc
393#define MSR_P4_CRU_ESCR3 0x3cd
394#define MSR_P4_CRU_ESCR4 0x3e0
395#define MSR_P4_CRU_ESCR5 0x3e1
396#define MSR_P4_DAC_ESCR0 0x3a8
397#define MSR_P4_DAC_ESCR1 0x3a9
398#define MSR_P4_FIRM_ESCR0 0x3a4
399#define MSR_P4_FIRM_ESCR1 0x3a5
400#define MSR_P4_FLAME_ESCR0 0x3a6
401#define MSR_P4_FLAME_ESCR1 0x3a7
402#define MSR_P4_FSB_ESCR0 0x3a2
403#define MSR_P4_FSB_ESCR1 0x3a3
404#define MSR_P4_IQ_ESCR0 0x3ba
405#define MSR_P4_IQ_ESCR1 0x3bb
406#define MSR_P4_IS_ESCR0 0x3b4
407#define MSR_P4_IS_ESCR1 0x3b5
408#define MSR_P4_ITLB_ESCR0 0x3b6
409#define MSR_P4_ITLB_ESCR1 0x3b7
410#define MSR_P4_IX_ESCR0 0x3c8
411#define MSR_P4_IX_ESCR1 0x3c9
412#define MSR_P4_MOB_ESCR0 0x3aa
413#define MSR_P4_MOB_ESCR1 0x3ab
414#define MSR_P4_MS_ESCR0 0x3c0
415#define MSR_P4_MS_ESCR1 0x3c1
416#define MSR_P4_PMH_ESCR0 0x3ac
417#define MSR_P4_PMH_ESCR1 0x3ad
418#define MSR_P4_RAT_ESCR0 0x3bc
419#define MSR_P4_RAT_ESCR1 0x3bd
420#define MSR_P4_SAAT_ESCR0 0x3ae
421#define MSR_P4_SAAT_ESCR1 0x3af
422#define MSR_P4_SSU_ESCR0 0x3be
423#define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */
424#define MSR_P4_TBPU_ESCR0 0x3c2
425#define MSR_P4_TBPU_ESCR1 0x3c3
426#define MSR_P4_TC_ESCR0 0x3c4
427#define MSR_P4_TC_ESCR1 0x3c5
428#define MSR_P4_U2L_ESCR0 0x3b0
429#define MSR_P4_U2L_ESCR1 0x3b1
430
431/* Intel Core-based CPU performance counters */
432#define MSR_CORE_PERF_FIXED_CTR0 0x309
433#define MSR_CORE_PERF_FIXED_CTR1 0x30a
434#define MSR_CORE_PERF_FIXED_CTR2 0x30b
435#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
436#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
437#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
438#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
439
440#endif
diff --git a/include/asm-x86_64/mtrr.h b/include/asm-x86_64/mtrr.h
index d6135b2549bf..b557c486bef8 100644
--- a/include/asm-x86_64/mtrr.h
+++ b/include/asm-x86_64/mtrr.h
@@ -135,6 +135,18 @@ struct mtrr_gentry32
135 135
136#endif /* CONFIG_COMPAT */ 136#endif /* CONFIG_COMPAT */
137 137
138#ifdef CONFIG_MTRR
139extern void mtrr_ap_init(void);
140extern void mtrr_bp_init(void);
141extern void mtrr_save_fixed_ranges(void *);
142extern void mtrr_save_state(void);
143#else
144#define mtrr_ap_init() do {} while (0)
145#define mtrr_bp_init() do {} while (0)
146#define mtrr_save_fixed_ranges(arg) do {} while (0)
147#define mtrr_save_state() do {} while (0)
148#endif
149
138#endif /* __KERNEL__ */ 150#endif /* __KERNEL__ */
139 151
140#endif /* _LINUX_MTRR_H */ 152#endif /* _LINUX_MTRR_H */
diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h
index 72375e7d32a8..d0a7f53b1497 100644
--- a/include/asm-x86_64/nmi.h
+++ b/include/asm-x86_64/nmi.h
@@ -80,4 +80,13 @@ extern int unknown_nmi_panic;
80void __trigger_all_cpu_backtrace(void); 80void __trigger_all_cpu_backtrace(void);
81#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() 81#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
82 82
83
84void lapic_watchdog_stop(void);
85int lapic_watchdog_init(unsigned nmi_hz);
86int lapic_wd_event(unsigned nmi_hz);
87unsigned lapic_adjust_nmi_hz(unsigned hz);
88int lapic_watchdog_ok(void);
89void disable_lapic_nmi_watchdog(void);
90void enable_lapic_nmi_watchdog(void);
91
83#endif /* ASM_NMI_H */ 92#endif /* ASM_NMI_H */
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index 10f346165cab..b17fc16ec2eb 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -1,14 +1,11 @@
1#ifndef _X86_64_PAGE_H 1#ifndef _X86_64_PAGE_H
2#define _X86_64_PAGE_H 2#define _X86_64_PAGE_H
3 3
4#include <asm/const.h>
4 5
5/* PAGE_SHIFT determines the page size */ 6/* PAGE_SHIFT determines the page size */
6#define PAGE_SHIFT 12 7#define PAGE_SHIFT 12
7#ifdef __ASSEMBLY__ 8#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
8#define PAGE_SIZE (0x1 << PAGE_SHIFT)
9#else
10#define PAGE_SIZE (1UL << PAGE_SHIFT)
11#endif
12#define PAGE_MASK (~(PAGE_SIZE-1)) 9#define PAGE_MASK (~(PAGE_SIZE-1))
13#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK) 10#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
14 11
@@ -33,10 +30,10 @@
33#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ 30#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
34 31
35#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) 32#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
36#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) 33#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
37 34
38#define HPAGE_SHIFT PMD_SHIFT 35#define HPAGE_SHIFT PMD_SHIFT
39#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) 36#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
40#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 37#define HPAGE_MASK (~(HPAGE_SIZE - 1))
41#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 38#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
42 39
@@ -64,6 +61,8 @@ typedef struct { unsigned long pgd; } pgd_t;
64 61
65typedef struct { unsigned long pgprot; } pgprot_t; 62typedef struct { unsigned long pgprot; } pgprot_t;
66 63
64extern unsigned long phys_base;
65
67#define pte_val(x) ((x).pte) 66#define pte_val(x) ((x).pte)
68#define pmd_val(x) ((x).pmd) 67#define pmd_val(x) ((x).pmd)
69#define pud_val(x) ((x).pud) 68#define pud_val(x) ((x).pud)
@@ -76,29 +75,25 @@ typedef struct { unsigned long pgprot; } pgprot_t;
76#define __pgd(x) ((pgd_t) { (x) } ) 75#define __pgd(x) ((pgd_t) { (x) } )
77#define __pgprot(x) ((pgprot_t) { (x) } ) 76#define __pgprot(x) ((pgprot_t) { (x) } )
78 77
79#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START) 78#endif /* !__ASSEMBLY__ */
80#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
81#define __START_KERNEL_map 0xffffffff80000000UL
82#define __PAGE_OFFSET 0xffff810000000000UL
83 79
84#else
85#define __PHYSICAL_START CONFIG_PHYSICAL_START 80#define __PHYSICAL_START CONFIG_PHYSICAL_START
81#define __KERNEL_ALIGN 0x200000
86#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) 82#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
87#define __START_KERNEL_map 0xffffffff80000000 83#define __START_KERNEL_map 0xffffffff80000000
88#define __PAGE_OFFSET 0xffff810000000000 84#define __PAGE_OFFSET 0xffff810000000000
89#endif /* !__ASSEMBLY__ */
90 85
91/* to align the pointer to the (next) page boundary */ 86/* to align the pointer to the (next) page boundary */
92#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) 87#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
93 88
94/* See Documentation/x86_64/mm.txt for a description of the memory map. */ 89/* See Documentation/x86_64/mm.txt for a description of the memory map. */
95#define __PHYSICAL_MASK_SHIFT 46 90#define __PHYSICAL_MASK_SHIFT 46
96#define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1) 91#define __PHYSICAL_MASK ((_AC(1,UL) << __PHYSICAL_MASK_SHIFT) - 1)
97#define __VIRTUAL_MASK_SHIFT 48 92#define __VIRTUAL_MASK_SHIFT 48
98#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) 93#define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1)
99 94
100#define KERNEL_TEXT_SIZE (40UL*1024*1024) 95#define KERNEL_TEXT_SIZE (40*1024*1024)
101#define KERNEL_TEXT_START 0xffffffff80000000UL 96#define KERNEL_TEXT_START 0xffffffff80000000
102 97
103#ifndef __ASSEMBLY__ 98#ifndef __ASSEMBLY__
104 99
@@ -106,21 +101,19 @@ typedef struct { unsigned long pgprot; } pgprot_t;
106 101
107#endif /* __ASSEMBLY__ */ 102#endif /* __ASSEMBLY__ */
108 103
109#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) 104#define PAGE_OFFSET __PAGE_OFFSET
110 105
111/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol. 106/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
112 Otherwise you risk miscompilation. */ 107 Otherwise you risk miscompilation. */
113#define __pa(x) (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET) 108#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
114/* __pa_symbol should be used for C visible symbols. 109/* __pa_symbol should be used for C visible symbols.
115 This seems to be the official gcc blessed way to do such arithmetic. */ 110 This seems to be the official gcc blessed way to do such arithmetic. */
116#define __pa_symbol(x) \ 111#define __pa_symbol(x) \
117 ({unsigned long v; \ 112 ({unsigned long v; \
118 asm("" : "=r" (v) : "0" (x)); \ 113 asm("" : "=r" (v) : "0" (x)); \
119 __pa(v); }) 114 ((v - __START_KERNEL_map) + phys_base); })
120 115
121#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) 116#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
122#define __boot_va(x) __va(x)
123#define __boot_pa(x) __pa(x)
124#ifdef CONFIG_FLATMEM 117#ifdef CONFIG_FLATMEM
125#define pfn_valid(pfn) ((pfn) < end_pfn) 118#define pfn_valid(pfn) ((pfn) < end_pfn)
126#endif 119#endif
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index 5ed0ef340842..c6fbb67eac90 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -11,16 +11,6 @@
11 11
12#include <asm/pda.h> 12#include <asm/pda.h>
13 13
14#ifdef CONFIG_MODULES
15# define PERCPU_MODULE_RESERVE 8192
16#else
17# define PERCPU_MODULE_RESERVE 0
18#endif
19
20#define PERCPU_ENOUGH_ROOM \
21 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
22 PERCPU_MODULE_RESERVE)
23
24#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) 14#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
25#define __my_cpu_offset() read_pda(data_offset) 15#define __my_cpu_offset() read_pda(data_offset)
26 16
diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h
index 4e28b6060a5e..8bb564687860 100644
--- a/include/asm-x86_64/pgalloc.h
+++ b/include/asm-x86_64/pgalloc.h
@@ -1,7 +1,6 @@
1#ifndef _X86_64_PGALLOC_H 1#ifndef _X86_64_PGALLOC_H
2#define _X86_64_PGALLOC_H 2#define _X86_64_PGALLOC_H
3 3
4#include <asm/fixmap.h>
5#include <asm/pda.h> 4#include <asm/pda.h>
6#include <linux/threads.h> 5#include <linux/threads.h>
7#include <linux/mm.h> 6#include <linux/mm.h>
@@ -45,24 +44,16 @@ static inline void pgd_list_add(pgd_t *pgd)
45 struct page *page = virt_to_page(pgd); 44 struct page *page = virt_to_page(pgd);
46 45
47 spin_lock(&pgd_lock); 46 spin_lock(&pgd_lock);
48 page->index = (pgoff_t)pgd_list; 47 list_add(&page->lru, &pgd_list);
49 if (pgd_list)
50 pgd_list->private = (unsigned long)&page->index;
51 pgd_list = page;
52 page->private = (unsigned long)&pgd_list;
53 spin_unlock(&pgd_lock); 48 spin_unlock(&pgd_lock);
54} 49}
55 50
56static inline void pgd_list_del(pgd_t *pgd) 51static inline void pgd_list_del(pgd_t *pgd)
57{ 52{
58 struct page *next, **pprev, *page = virt_to_page(pgd); 53 struct page *page = virt_to_page(pgd);
59 54
60 spin_lock(&pgd_lock); 55 spin_lock(&pgd_lock);
61 next = (struct page *)page->index; 56 list_del(&page->lru);
62 pprev = (struct page **)page->private;
63 *pprev = next;
64 if (next)
65 next->private = (unsigned long)pprev;
66 spin_unlock(&pgd_lock); 57 spin_unlock(&pgd_lock);
67} 58}
68 59
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 730bd6028416..599993f6ba84 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -1,25 +1,25 @@
1#ifndef _X86_64_PGTABLE_H 1#ifndef _X86_64_PGTABLE_H
2#define _X86_64_PGTABLE_H 2#define _X86_64_PGTABLE_H
3 3
4#include <asm/const.h>
5#ifndef __ASSEMBLY__
6
4/* 7/*
5 * This file contains the functions and defines necessary to modify and use 8 * This file contains the functions and defines necessary to modify and use
6 * the x86-64 page table tree. 9 * the x86-64 page table tree.
7 */ 10 */
8#include <asm/processor.h> 11#include <asm/processor.h>
9#include <asm/fixmap.h>
10#include <asm/bitops.h> 12#include <asm/bitops.h>
11#include <linux/threads.h> 13#include <linux/threads.h>
12#include <asm/pda.h> 14#include <asm/pda.h>
13 15
14extern pud_t level3_kernel_pgt[512]; 16extern pud_t level3_kernel_pgt[512];
15extern pud_t level3_physmem_pgt[512];
16extern pud_t level3_ident_pgt[512]; 17extern pud_t level3_ident_pgt[512];
17extern pmd_t level2_kernel_pgt[512]; 18extern pmd_t level2_kernel_pgt[512];
18extern pgd_t init_level4_pgt[]; 19extern pgd_t init_level4_pgt[];
19extern pgd_t boot_level4_pgt[];
20extern unsigned long __supported_pte_mask; 20extern unsigned long __supported_pte_mask;
21 21
22#define swapper_pg_dir init_level4_pgt 22#define swapper_pg_dir ((pgd_t *)NULL)
23 23
24extern void paging_init(void); 24extern void paging_init(void);
25extern void clear_kernel_mapping(unsigned long addr, unsigned long size); 25extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
@@ -29,7 +29,9 @@ extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
29 * for zero-mapped memory areas etc.. 29 * for zero-mapped memory areas etc..
30 */ 30 */
31extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; 31extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
32#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 32#define ZERO_PAGE(vaddr) (pfn_to_page(__pa_symbol(&empty_zero_page) >> PAGE_SHIFT))
33
34#endif /* !__ASSEMBLY__ */
33 35
34/* 36/*
35 * PGDIR_SHIFT determines what a top-level page table entry can map 37 * PGDIR_SHIFT determines what a top-level page table entry can map
@@ -55,6 +57,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
55 */ 57 */
56#define PTRS_PER_PTE 512 58#define PTRS_PER_PTE 512
57 59
60#ifndef __ASSEMBLY__
61
58#define pte_ERROR(e) \ 62#define pte_ERROR(e) \
59 printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e)) 63 printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
60#define pmd_ERROR(e) \ 64#define pmd_ERROR(e) \
@@ -118,22 +122,23 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
118 122
119#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK)) 123#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
120 124
121#define PMD_SIZE (1UL << PMD_SHIFT) 125#endif /* !__ASSEMBLY__ */
126
127#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
122#define PMD_MASK (~(PMD_SIZE-1)) 128#define PMD_MASK (~(PMD_SIZE-1))
123#define PUD_SIZE (1UL << PUD_SHIFT) 129#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
124#define PUD_MASK (~(PUD_SIZE-1)) 130#define PUD_MASK (~(PUD_SIZE-1))
125#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 131#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
126#define PGDIR_MASK (~(PGDIR_SIZE-1)) 132#define PGDIR_MASK (~(PGDIR_SIZE-1))
127 133
128#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) 134#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
129#define FIRST_USER_ADDRESS 0 135#define FIRST_USER_ADDRESS 0
130 136
131#ifndef __ASSEMBLY__ 137#define MAXMEM 0x3fffffffffff
132#define MAXMEM 0x3fffffffffffUL 138#define VMALLOC_START 0xffffc20000000000
133#define VMALLOC_START 0xffffc20000000000UL 139#define VMALLOC_END 0xffffe1ffffffffff
134#define VMALLOC_END 0xffffe1ffffffffffUL 140#define MODULES_VADDR 0xffffffff88000000
135#define MODULES_VADDR 0xffffffff88000000UL 141#define MODULES_END 0xfffffffffff00000
136#define MODULES_END 0xfffffffffff00000UL
137#define MODULES_LEN (MODULES_END - MODULES_VADDR) 142#define MODULES_LEN (MODULES_END - MODULES_VADDR)
138 143
139#define _PAGE_BIT_PRESENT 0 144#define _PAGE_BIT_PRESENT 0
@@ -159,7 +164,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
159#define _PAGE_GLOBAL 0x100 /* Global TLB entry */ 164#define _PAGE_GLOBAL 0x100 /* Global TLB entry */
160 165
161#define _PAGE_PROTNONE 0x080 /* If not present */ 166#define _PAGE_PROTNONE 0x080 /* If not present */
162#define _PAGE_NX (1UL<<_PAGE_BIT_NX) 167#define _PAGE_NX (_AC(1,UL)<<_PAGE_BIT_NX)
163 168
164#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) 169#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
165#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 170#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
@@ -221,6 +226,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
221#define __S110 PAGE_SHARED_EXEC 226#define __S110 PAGE_SHARED_EXEC
222#define __S111 PAGE_SHARED_EXEC 227#define __S111 PAGE_SHARED_EXEC
223 228
229#ifndef __ASSEMBLY__
230
224static inline unsigned long pgd_bad(pgd_t pgd) 231static inline unsigned long pgd_bad(pgd_t pgd)
225{ 232{
226 return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); 233 return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
@@ -403,11 +410,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
403#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 410#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
404 411
405extern spinlock_t pgd_lock; 412extern spinlock_t pgd_lock;
406extern struct page *pgd_list; 413extern struct list_head pgd_list;
407void vmalloc_sync_all(void); 414void vmalloc_sync_all(void);
408 415
409#endif /* !__ASSEMBLY__ */
410
411extern int kern_addr_valid(unsigned long addr); 416extern int kern_addr_valid(unsigned long addr);
412 417
413#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 418#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
@@ -437,5 +442,6 @@ extern int kern_addr_valid(unsigned long addr);
437#define __HAVE_ARCH_PTEP_SET_WRPROTECT 442#define __HAVE_ARCH_PTEP_SET_WRPROTECT
438#define __HAVE_ARCH_PTE_SAME 443#define __HAVE_ARCH_PTE_SAME
439#include <asm-generic/pgtable.h> 444#include <asm-generic/pgtable.h>
445#endif /* !__ASSEMBLY__ */
440 446
441#endif /* _X86_64_PGTABLE_H */ 447#endif /* _X86_64_PGTABLE_H */
diff --git a/include/asm-x86_64/processor-flags.h b/include/asm-x86_64/processor-flags.h
new file mode 100644
index 000000000000..ec99a57b2c6a
--- /dev/null
+++ b/include/asm-x86_64/processor-flags.h
@@ -0,0 +1 @@
#include <asm-i386/processor-flags.h>
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 76552d72804c..461ffe4c1fcc 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -20,6 +20,7 @@
20#include <asm/percpu.h> 20#include <asm/percpu.h>
21#include <linux/personality.h> 21#include <linux/personality.h>
22#include <linux/cpumask.h> 22#include <linux/cpumask.h>
23#include <asm/processor-flags.h>
23 24
24#define TF_MASK 0x00000100 25#define TF_MASK 0x00000100
25#define IF_MASK 0x00000200 26#define IF_MASK 0x00000200
@@ -103,42 +104,6 @@ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
103extern unsigned short num_cache_leaves; 104extern unsigned short num_cache_leaves;
104 105
105/* 106/*
106 * EFLAGS bits
107 */
108#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
109#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
110#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
111#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
112#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
113#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
114#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
115#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
116#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
117#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
118#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
119#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
120#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
121#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
122#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
123#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
124#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
125
126/*
127 * Intel CPU features in CR4
128 */
129#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
130#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
131#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
132#define X86_CR4_DE 0x0008 /* enable debugging extensions */
133#define X86_CR4_PSE 0x0010 /* enable page size extensions */
134#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
135#define X86_CR4_MCE 0x0040 /* Machine check enable */
136#define X86_CR4_PGE 0x0080 /* enable global pages */
137#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
138#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
139#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
140
141/*
142 * Save the cr4 feature set we're using (ie 107 * Save the cr4 feature set we're using (ie
143 * Pentium 4MB enable and PPro Global page 108 * Pentium 4MB enable and PPro Global page
144 * enable), so that any CPU's that boot up 109 * enable), so that any CPU's that boot up
@@ -201,7 +166,7 @@ struct i387_fxsave_struct {
201 u32 mxcsr; 166 u32 mxcsr;
202 u32 mxcsr_mask; 167 u32 mxcsr_mask;
203 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ 168 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
204 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 128 bytes */ 169 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
205 u32 padding[24]; 170 u32 padding[24];
206} __attribute__ ((aligned (16))); 171} __attribute__ ((aligned (16)));
207 172
@@ -427,22 +392,6 @@ static inline void prefetchw(void *x)
427#define cpu_relax() rep_nop() 392#define cpu_relax() rep_nop()
428 393
429/* 394/*
430 * NSC/Cyrix CPU configuration register indexes
431 */
432#define CX86_CCR0 0xc0
433#define CX86_CCR1 0xc1
434#define CX86_CCR2 0xc2
435#define CX86_CCR3 0xc3
436#define CX86_CCR4 0xe8
437#define CX86_CCR5 0xe9
438#define CX86_CCR6 0xea
439#define CX86_CCR7 0xeb
440#define CX86_DIR0 0xfe
441#define CX86_DIR1 0xff
442#define CX86_ARR_BASE 0xc4
443#define CX86_RCR_BASE 0xdc
444
445/*
446 * NSC/Cyrix CPU indexed register access macros 395 * NSC/Cyrix CPU indexed register access macros
447 */ 396 */
448 397
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index b6e65a699f2a..85255db1e82d 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -11,18 +11,9 @@ struct pt_regs;
11extern void start_kernel(void); 11extern void start_kernel(void);
12extern void pda_init(int); 12extern void pda_init(int);
13 13
14extern void zap_low_mappings(int cpu);
15
16extern void early_idt_handler(void); 14extern void early_idt_handler(void);
17 15
18extern void mcheck_init(struct cpuinfo_x86 *c); 16extern void mcheck_init(struct cpuinfo_x86 *c);
19#ifdef CONFIG_MTRR
20extern void mtrr_ap_init(void);
21extern void mtrr_bp_init(void);
22#else
23#define mtrr_ap_init() do {} while (0)
24#define mtrr_bp_init() do {} while (0)
25#endif
26extern void init_memory_mapping(unsigned long start, unsigned long end); 17extern void init_memory_mapping(unsigned long start, unsigned long end);
27 18
28extern void system_call(void); 19extern void system_call(void);
@@ -82,7 +73,6 @@ extern void syscall32_cpu_init(void);
82extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end); 73extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end);
83 74
84extern void early_quirks(void); 75extern void early_quirks(void);
85extern void quirk_intel_irqbalance(void);
86extern void check_efer(void); 76extern void check_efer(void);
87 77
88extern int unhandled_signal(struct task_struct *tsk, int sig); 78extern int unhandled_signal(struct task_struct *tsk, int sig);
@@ -93,6 +83,7 @@ extern unsigned long table_start, table_end;
93 83
94extern int exception_trace; 84extern int exception_trace;
95extern unsigned cpu_khz; 85extern unsigned cpu_khz;
86extern unsigned tsc_khz;
96 87
97extern void no_iommu_init(void); 88extern void no_iommu_init(void);
98extern int force_iommu, no_iommu; 89extern int force_iommu, no_iommu;
@@ -121,8 +112,12 @@ extern int gsi_irq_sharing(int gsi);
121 112
122extern void smp_local_timer_interrupt(void); 113extern void smp_local_timer_interrupt(void);
123 114
115extern int force_mwait;
116
124long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); 117long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
125 118
119void i8254_timer_resume(void);
120
126#define round_up(x,y) (((x) + (y) - 1) & ~((y)-1)) 121#define round_up(x,y) (((x) + (y) - 1) & ~((y)-1))
127#define round_down(x,y) ((x) & ~((y)-1)) 122#define round_down(x,y) ((x) & ~((y)-1))
128 123
diff --git a/include/asm-x86_64/segment.h b/include/asm-x86_64/segment.h
index 334ddcdd8f92..adf2bf1e187c 100644
--- a/include/asm-x86_64/segment.h
+++ b/include/asm-x86_64/segment.h
@@ -6,7 +6,7 @@
6#define __KERNEL_CS 0x10 6#define __KERNEL_CS 0x10
7#define __KERNEL_DS 0x18 7#define __KERNEL_DS 0x18
8 8
9#define __KERNEL32_CS 0x38 9#define __KERNEL32_CS 0x08
10 10
11/* 11/*
12 * we cannot use the same code segment descriptor for user and kernel 12 * we cannot use the same code segment descriptor for user and kernel
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index de592a408c07..d5704421456b 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -10,10 +10,9 @@
10#include <linux/init.h> 10#include <linux/init.h>
11extern int disable_apic; 11extern int disable_apic;
12 12
13#include <asm/fixmap.h>
14#include <asm/mpspec.h> 13#include <asm/mpspec.h>
15#include <asm/io_apic.h>
16#include <asm/apic.h> 14#include <asm/apic.h>
15#include <asm/io_apic.h>
17#include <asm/thread_info.h> 16#include <asm/thread_info.h>
18 17
19#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
@@ -38,7 +37,6 @@ extern void lock_ipi_call_lock(void);
38extern void unlock_ipi_call_lock(void); 37extern void unlock_ipi_call_lock(void);
39extern int smp_num_siblings; 38extern int smp_num_siblings;
40extern void smp_send_reschedule(int cpu); 39extern void smp_send_reschedule(int cpu);
41void smp_stop_cpu(void);
42 40
43extern cpumask_t cpu_sibling_map[NR_CPUS]; 41extern cpumask_t cpu_sibling_map[NR_CPUS];
44extern cpumask_t cpu_core_map[NR_CPUS]; 42extern cpumask_t cpu_core_map[NR_CPUS];
diff --git a/include/asm-x86_64/suspend.h b/include/asm-x86_64/suspend.h
index bc7f81715e5e..9c3f8de90d2d 100644
--- a/include/asm-x86_64/suspend.h
+++ b/include/asm-x86_64/suspend.h
@@ -17,6 +17,7 @@ struct saved_context {
17 u16 ds, es, fs, gs, ss; 17 u16 ds, es, fs, gs, ss;
18 unsigned long gs_base, gs_kernel_base, fs_base; 18 unsigned long gs_base, gs_kernel_base, fs_base;
19 unsigned long cr0, cr2, cr3, cr4, cr8; 19 unsigned long cr0, cr2, cr3, cr4, cr8;
20 unsigned long efer;
20 u16 gdt_pad; 21 u16 gdt_pad;
21 u16 gdt_limit; 22 u16 gdt_limit;
22 unsigned long gdt_base; 23 unsigned long gdt_base;
@@ -44,12 +45,12 @@ extern unsigned long saved_context_eflags;
44extern void fix_processor_context(void); 45extern void fix_processor_context(void);
45 46
46#ifdef CONFIG_ACPI_SLEEP 47#ifdef CONFIG_ACPI_SLEEP
47extern unsigned long saved_eip; 48extern unsigned long saved_rip;
48extern unsigned long saved_esp; 49extern unsigned long saved_rsp;
49extern unsigned long saved_ebp; 50extern unsigned long saved_rbp;
50extern unsigned long saved_ebx; 51extern unsigned long saved_rbx;
51extern unsigned long saved_esi; 52extern unsigned long saved_rsi;
52extern unsigned long saved_edi; 53extern unsigned long saved_rdi;
53 54
54/* routines for saving/restoring kernel state */ 55/* routines for saving/restoring kernel state */
55extern int acpi_save_state_mem(void); 56extern int acpi_save_state_mem(void);
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index bd376bc8c4ab..213b7fe5d998 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -89,6 +89,11 @@ static inline unsigned long read_cr3(void)
89 return cr3; 89 return cr3;
90} 90}
91 91
92static inline void write_cr3(unsigned long val)
93{
94 asm volatile("movq %0,%%cr3" :: "r" (val) : "memory");
95}
96
92static inline unsigned long read_cr4(void) 97static inline unsigned long read_cr4(void)
93{ 98{
94 unsigned long cr4; 99 unsigned long cr4;
@@ -98,7 +103,7 @@ static inline unsigned long read_cr4(void)
98 103
99static inline void write_cr4(unsigned long val) 104static inline void write_cr4(unsigned long val)
100{ 105{
101 asm volatile("movq %0,%%cr4" :: "r" (val)); 106 asm volatile("movq %0,%%cr4" :: "r" (val) : "memory");
102} 107}
103 108
104#define stts() write_cr0(8 | read_cr0()) 109#define stts() write_cr0(8 | read_cr0())
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h
index 8c6808a3fba4..f6527e1b6c1c 100644
--- a/include/asm-x86_64/timex.h
+++ b/include/asm-x86_64/timex.h
@@ -27,6 +27,6 @@ extern int read_current_timer(unsigned long *timer_value);
27#define NS_SCALE 10 /* 2^10, carefully chosen */ 27#define NS_SCALE 10 /* 2^10, carefully chosen */
28#define US_SCALE 32 /* 2^32, arbitralrily chosen */ 28#define US_SCALE 32 /* 2^32, arbitralrily chosen */
29 29
30extern void mark_tsc_unstable(void); 30extern void mark_tsc_unstable(char *msg);
31extern void set_cyc2ns_scale(unsigned long khz); 31extern void set_cyc2ns_scale(unsigned long khz);
32#endif 32#endif
diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h
index 983bd296c81a..512401b8725f 100644
--- a/include/asm-x86_64/tlbflush.h
+++ b/include/asm-x86_64/tlbflush.h
@@ -3,41 +3,18 @@
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <asm/processor.h> 5#include <asm/processor.h>
6 6#include <asm/system.h>
7static inline unsigned long get_cr3(void)
8{
9 unsigned long cr3;
10 asm volatile("mov %%cr3,%0" : "=r" (cr3));
11 return cr3;
12}
13
14static inline void set_cr3(unsigned long cr3)
15{
16 asm volatile("mov %0,%%cr3" :: "r" (cr3) : "memory");
17}
18 7
19static inline void __flush_tlb(void) 8static inline void __flush_tlb(void)
20{ 9{
21 set_cr3(get_cr3()); 10 write_cr3(read_cr3());
22}
23
24static inline unsigned long get_cr4(void)
25{
26 unsigned long cr4;
27 asm volatile("mov %%cr4,%0" : "=r" (cr4));
28 return cr4;
29}
30
31static inline void set_cr4(unsigned long cr4)
32{
33 asm volatile("mov %0,%%cr4" :: "r" (cr4) : "memory");
34} 11}
35 12
36static inline void __flush_tlb_all(void) 13static inline void __flush_tlb_all(void)
37{ 14{
38 unsigned long cr4 = get_cr4(); 15 unsigned long cr4 = read_cr4();
39 set_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */ 16 write_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */
40 set_cr4(cr4); /* write old PGE again and flush TLBs */ 17 write_cr4(cr4); /* write old PGE again and flush TLBs */
41} 18}
42 19
43#define __flush_tlb_one(addr) \ 20#define __flush_tlb_one(addr) \
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index c5f596e71faa..26e23e01c54a 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -620,8 +620,6 @@ __SYSCALL(__NR_vmsplice, sys_vmsplice)
620#define __NR_move_pages 279 620#define __NR_move_pages 279
621__SYSCALL(__NR_move_pages, sys_move_pages) 621__SYSCALL(__NR_move_pages, sys_move_pages)
622 622
623#define __NR_syscall_max __NR_move_pages
624
625#ifndef __NO_STUBS 623#ifndef __NO_STUBS
626#define __ARCH_WANT_OLD_READDIR 624#define __ARCH_WANT_OLD_READDIR
627#define __ARCH_WANT_OLD_STAT 625#define __ARCH_WANT_OLD_STAT
@@ -655,7 +653,6 @@ __SYSCALL(__NR_move_pages, sys_move_pages)
655#include <asm/ptrace.h> 653#include <asm/ptrace.h>
656 654
657asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs); 655asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs);
658asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on);
659struct sigaction; 656struct sigaction;
660asmlinkage long sys_rt_sigaction(int sig, 657asmlinkage long sys_rt_sigaction(int sig,
661 const struct sigaction __user *act, 658 const struct sigaction __user *act,
diff --git a/include/asm-xtensa/mmu_context.h b/include/asm-xtensa/mmu_context.h
index f14851f086c3..92f948392ebc 100644
--- a/include/asm-xtensa/mmu_context.h
+++ b/include/asm-xtensa/mmu_context.h
@@ -18,6 +18,7 @@
18#include <asm/pgtable.h> 18#include <asm/pgtable.h>
19#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
20#include <asm/tlbflush.h> 20#include <asm/tlbflush.h>
21#include <asm-generic/mm_hooks.h>
21 22
22#define XCHAL_MMU_ASID_BITS 8 23#define XCHAL_MMU_ASID_BITS 8
23 24
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 81c07cd18643..0365ec9fc0c9 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -122,9 +122,9 @@ extern void *alloc_large_system_hash(const char *tablename,
122#define HASH_EARLY 0x00000001 /* Allocating during early boot? */ 122#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
123 123
124/* Only NUMA needs hash distribution. 124/* Only NUMA needs hash distribution.
125 * IA64 is known to have sufficient vmalloc space. 125 * IA64 and x86_64 have sufficient vmalloc space.
126 */ 126 */
127#if defined(CONFIG_NUMA) && defined(CONFIG_IA64) 127#if defined(CONFIG_NUMA) && (defined(CONFIG_IA64) || defined(CONFIG_X86_64))
128#define HASHDIST_DEFAULT 1 128#define HASHDIST_DEFAULT 1
129#else 129#else
130#define HASHDIST_DEFAULT 0 130#define HASHDIST_DEFAULT 0
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 3ec6e7ff5fbd..963051a967d6 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -32,7 +32,15 @@
32 * CPUFREQ NOTIFIER INTERFACE * 32 * CPUFREQ NOTIFIER INTERFACE *
33 *********************************************************************/ 33 *********************************************************************/
34 34
35#ifdef CONFIG_CPU_FREQ
35int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); 36int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
37#else
38static inline int cpufreq_register_notifier(struct notifier_block *nb,
39 unsigned int list)
40{
41 return 0;
42}
43#endif
36int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); 44int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
37 45
38#define CPUFREQ_TRANSITION_NOTIFIER (0) 46#define CPUFREQ_TRANSITION_NOTIFIER (0)
@@ -260,17 +268,22 @@ struct freq_attr {
260int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); 268int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
261int cpufreq_update_policy(unsigned int cpu); 269int cpufreq_update_policy(unsigned int cpu);
262 270
263/* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */
264unsigned int cpufreq_get(unsigned int cpu);
265 271
266/* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */ 272/*
273 * query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it
274 */
267#ifdef CONFIG_CPU_FREQ 275#ifdef CONFIG_CPU_FREQ
268unsigned int cpufreq_quick_get(unsigned int cpu); 276unsigned int cpufreq_quick_get(unsigned int cpu);
277unsigned int cpufreq_get(unsigned int cpu);
269#else 278#else
270static inline unsigned int cpufreq_quick_get(unsigned int cpu) 279static inline unsigned int cpufreq_quick_get(unsigned int cpu)
271{ 280{
272 return 0; 281 return 0;
273} 282}
283static inline unsigned int cpufreq_get(unsigned int cpu)
284{
285 return 0;
286}
274#endif 287#endif
275 288
276 289
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 32503657f14f..22c7ac5cd80c 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -14,5 +14,13 @@ extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
14extern const struct file_operations proc_vmcore_operations; 14extern const struct file_operations proc_vmcore_operations;
15extern struct proc_dir_entry *proc_vmcore; 15extern struct proc_dir_entry *proc_vmcore;
16 16
17/* Architecture code defines this if there are other possible ELF
18 * machine types, e.g. on bi-arch capable hardware. */
19#ifndef vmcore_elf_check_arch_cross
20#define vmcore_elf_check_arch_cross(x) 0
21#endif
22
23#define vmcore_elf_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
24
17#endif /* CONFIG_CRASH_DUMP */ 25#endif /* CONFIG_CRASH_DUMP */
18#endif /* LINUX_CRASHDUMP_H */ 26#endif /* LINUX_CRASHDUMP_H */
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 60713e6ea297..8b17ffe222c4 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -83,6 +83,23 @@ typedef __s64 Elf64_Sxword;
83#define DT_DEBUG 21 83#define DT_DEBUG 21
84#define DT_TEXTREL 22 84#define DT_TEXTREL 22
85#define DT_JMPREL 23 85#define DT_JMPREL 23
86#define DT_ENCODING 32
87#define OLD_DT_LOOS 0x60000000
88#define DT_LOOS 0x6000000d
89#define DT_HIOS 0x6ffff000
90#define DT_VALRNGLO 0x6ffffd00
91#define DT_VALRNGHI 0x6ffffdff
92#define DT_ADDRRNGLO 0x6ffffe00
93#define DT_ADDRRNGHI 0x6ffffeff
94#define DT_VERSYM 0x6ffffff0
95#define DT_RELACOUNT 0x6ffffff9
96#define DT_RELCOUNT 0x6ffffffa
97#define DT_FLAGS_1 0x6ffffffb
98#define DT_VERDEF 0x6ffffffc
99#define DT_VERDEFNUM 0x6ffffffd
100#define DT_VERNEED 0x6ffffffe
101#define DT_VERNEEDNUM 0x6fffffff
102#define OLD_DT_HIOS 0x6fffffff
86#define DT_LOPROC 0x70000000 103#define DT_LOPROC 0x70000000
87#define DT_HIPROC 0x7fffffff 104#define DT_HIPROC 0x7fffffff
88 105
diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h
index 67396db141e8..9a1e0674e56c 100644
--- a/include/linux/elfnote.h
+++ b/include/linux/elfnote.h
@@ -39,12 +39,12 @@
39 * ELFNOTE(XYZCo, 12, .long, 0xdeadbeef) 39 * ELFNOTE(XYZCo, 12, .long, 0xdeadbeef)
40 */ 40 */
41#define ELFNOTE(name, type, desctype, descdata) \ 41#define ELFNOTE(name, type, desctype, descdata) \
42.pushsection .note.name ; \ 42.pushsection .note.name, "",@note ; \
43 .align 4 ; \ 43 .align 4 ; \
44 .long 2f - 1f /* namesz */ ; \ 44 .long 2f - 1f /* namesz */ ; \
45 .long 4f - 3f /* descsz */ ; \ 45 .long 4f - 3f /* descsz */ ; \
46 .long type ; \ 46 .long type ; \
471:.asciz "name" ; \ 471:.asciz #name ; \
482:.align 4 ; \ 482:.align 4 ; \
493:desctype descdata ; \ 493:desctype descdata ; \
504:.align 4 ; \ 504:.align 4 ; \
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 7bab8eae2341..a515eb0afdfb 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -27,6 +27,8 @@ static inline void flush_kernel_dcache_page(struct page *page)
27unsigned int nr_free_highpages(void); 27unsigned int nr_free_highpages(void);
28extern unsigned long totalhigh_pages; 28extern unsigned long totalhigh_pages;
29 29
30void kmap_flush_unused(void);
31
30#else /* CONFIG_HIGHMEM */ 32#else /* CONFIG_HIGHMEM */
31 33
32static inline unsigned int nr_free_highpages(void) { return 0; } 34static inline unsigned int nr_free_highpages(void) { return 0; }
@@ -49,10 +51,13 @@ static inline void *kmap_atomic(struct page *page, enum km_type idx)
49 pagefault_disable(); 51 pagefault_disable();
50 return page_address(page); 52 return page_address(page);
51} 53}
54#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
52 55
53#define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0) 56#define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0)
54#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) 57#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
55#define kmap_atomic_to_page(ptr) virt_to_page(ptr) 58#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
59
60#define kmap_flush_unused() do {} while(0)
56#endif 61#endif
57 62
58#endif /* CONFIG_HIGHMEM */ 63#endif /* CONFIG_HIGHMEM */
diff --git a/include/linux/init.h b/include/linux/init.h
index e290a010e3f2..9abf120ec9f8 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -52,9 +52,14 @@
52#endif 52#endif
53 53
54/* For assembly routines */ 54/* For assembly routines */
55#ifdef CONFIG_HOTPLUG_CPU
56#define __INIT .section ".text","ax"
57#define __INITDATA .section ".data","aw"
58#else
55#define __INIT .section ".init.text","ax" 59#define __INIT .section ".init.text","ax"
56#define __FINIT .previous
57#define __INITDATA .section ".init.data","aw" 60#define __INITDATA .section ".init.data","aw"
61#endif
62#define __FINIT .previous
58 63
59#ifndef __ASSEMBLY__ 64#ifndef __ASSEMBLY__
60/* 65/*
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 600e3d387ffc..b72be2f79e6a 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -11,9 +11,16 @@
11 11
12/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ 12/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
13#ifndef PERCPU_ENOUGH_ROOM 13#ifndef PERCPU_ENOUGH_ROOM
14#define PERCPU_ENOUGH_ROOM 32768 14#ifdef CONFIG_MODULES
15#define PERCPU_MODULE_RESERVE 8192
16#else
17#define PERCPU_MODULE_RESERVE 0
15#endif 18#endif
16 19
20#define PERCPU_ENOUGH_ROOM \
21 (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE)
22#endif /* PERCPU_ENOUGH_ROOM */
23
17/* 24/*
18 * Must be an lvalue. Since @var must be a simple identifier, 25 * Must be an lvalue. Since @var must be a simple identifier,
19 * we force a syntax error here if it isn't. 26 * we force a syntax error here if it isn't.
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 3e628f990fdf..89580b764959 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -26,9 +26,6 @@
26/********** arch/$ARCH/mm/init.c **********/ 26/********** arch/$ARCH/mm/init.c **********/
27#define POISON_FREE_INITMEM 0xcc 27#define POISON_FREE_INITMEM 0xcc
28 28
29/********** arch/x86_64/mm/init.c **********/
30#define POISON_FREE_INITDATA 0xba
31
32/********** arch/ia64/hp/common/sba_iommu.c **********/ 29/********** arch/ia64/hp/common/sba_iommu.c **********/
33/* 30/*
34 * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a 31 * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a